Skip to content

Commit

Permalink
[Clang][XTHeadVector] support vssra/vssrl (#126)
Browse files Browse the repository at this point in the history
* [Clang][XTHeadVector] support `vssra/vssrl`

* [Clang][XTHeadVector] support `vssra/vssrl` wrappers

* [Clang][XTHeadVector] add corresponding tests
  • Loading branch information
imkiva authored Jul 10, 2024
1 parent 6c3eb2b commit 94384bb
Show file tree
Hide file tree
Showing 6 changed files with 2,735 additions and 0 deletions.
3 changes: 3 additions & 0 deletions clang/include/clang/Basic/riscv_vector_xtheadv.td
Original file line number Diff line number Diff line change
Expand Up @@ -1412,6 +1412,9 @@ let UnMaskedPolicyScheme = HasPassthruOperand,
defm th_vasub : RVVSignedBinBuiltinSetRoundingMode;
// 13.3. Vector Single-Width Fractional Multiply with Rounding and Saturation Operations
defm th_vsmul : RVVSignedBinBuiltinSetRoundingMode;
// 13.4. Vector Single-Width Scaling Shift Instructions
defm th_vssrl : RVVUnsignedShiftBuiltinSetRoundingMode;
defm th_vssra : RVVSignedShiftBuiltinSetRoundingMode;
}

// 13.5 Vector Narrowing Fixed-Point Clip Operations
Expand Down
141 changes: 141 additions & 0 deletions clang/include/clang/Basic/riscv_vector_xtheadv_wrappers.td
Original file line number Diff line number Diff line change
Expand Up @@ -3044,6 +3044,147 @@ let HeaderCode =
}] in
def th_single_width_fractional_multiply_with_rounding_and_saturation_wrapper_macros: RVVHeader;


// 13.4. Vector Single-Width Scaling Shift Operations

let HeaderCode =
[{

#define __riscv_vssra_vv_i8m1(op1, shift, rm, vl) __riscv_th_vssra_vv_i8m1(op1, shift, rm, vl)
#define __riscv_vssra_vx_i8m1(op1, shift, rm, vl) __riscv_th_vssra_vx_i8m1(op1, shift, rm, vl)
#define __riscv_vssra_vv_i8m2(op1, shift, rm, vl) __riscv_th_vssra_vv_i8m2(op1, shift, rm, vl)
#define __riscv_vssra_vx_i8m2(op1, shift, rm, vl) __riscv_th_vssra_vx_i8m2(op1, shift, rm, vl)
#define __riscv_vssra_vv_i8m4(op1, shift, rm, vl) __riscv_th_vssra_vv_i8m4(op1, shift, rm, vl)
#define __riscv_vssra_vx_i8m4(op1, shift, rm, vl) __riscv_th_vssra_vx_i8m4(op1, shift, rm, vl)
#define __riscv_vssra_vv_i8m8(op1, shift, rm, vl) __riscv_th_vssra_vv_i8m8(op1, shift, rm, vl)
#define __riscv_vssra_vx_i8m8(op1, shift, rm, vl) __riscv_th_vssra_vx_i8m8(op1, shift, rm, vl)
#define __riscv_vssra_vv_i16m1(op1, shift, rm, vl) __riscv_th_vssra_vv_i16m1(op1, shift, rm, vl)
#define __riscv_vssra_vx_i16m1(op1, shift, rm, vl) __riscv_th_vssra_vx_i16m1(op1, shift, rm, vl)
#define __riscv_vssra_vv_i16m2(op1, shift, rm, vl) __riscv_th_vssra_vv_i16m2(op1, shift, rm, vl)
#define __riscv_vssra_vx_i16m2(op1, shift, rm, vl) __riscv_th_vssra_vx_i16m2(op1, shift, rm, vl)
#define __riscv_vssra_vv_i16m4(op1, shift, rm, vl) __riscv_th_vssra_vv_i16m4(op1, shift, rm, vl)
#define __riscv_vssra_vx_i16m4(op1, shift, rm, vl) __riscv_th_vssra_vx_i16m4(op1, shift, rm, vl)
#define __riscv_vssra_vv_i16m8(op1, shift, rm, vl) __riscv_th_vssra_vv_i16m8(op1, shift, rm, vl)
#define __riscv_vssra_vx_i16m8(op1, shift, rm, vl) __riscv_th_vssra_vx_i16m8(op1, shift, rm, vl)
#define __riscv_vssra_vv_i32m1(op1, shift, rm, vl) __riscv_th_vssra_vv_i32m1(op1, shift, rm, vl)
#define __riscv_vssra_vx_i32m1(op1, shift, rm, vl) __riscv_th_vssra_vx_i32m1(op1, shift, rm, vl)
#define __riscv_vssra_vv_i32m2(op1, shift, rm, vl) __riscv_th_vssra_vv_i32m2(op1, shift, rm, vl)
#define __riscv_vssra_vx_i32m2(op1, shift, rm, vl) __riscv_th_vssra_vx_i32m2(op1, shift, rm, vl)
#define __riscv_vssra_vv_i32m4(op1, shift, rm, vl) __riscv_th_vssra_vv_i32m4(op1, shift, rm, vl)
#define __riscv_vssra_vx_i32m4(op1, shift, rm, vl) __riscv_th_vssra_vx_i32m4(op1, shift, rm, vl)
#define __riscv_vssra_vv_i32m8(op1, shift, rm, vl) __riscv_th_vssra_vv_i32m8(op1, shift, rm, vl)
#define __riscv_vssra_vx_i32m8(op1, shift, rm, vl) __riscv_th_vssra_vx_i32m8(op1, shift, rm, vl)
#define __riscv_vssra_vv_i64m1(op1, shift, rm, vl) __riscv_th_vssra_vv_i64m1(op1, shift, rm, vl)
#define __riscv_vssra_vx_i64m1(op1, shift, rm, vl) __riscv_th_vssra_vx_i64m1(op1, shift, rm, vl)
#define __riscv_vssra_vv_i64m2(op1, shift, rm, vl) __riscv_th_vssra_vv_i64m2(op1, shift, rm, vl)
#define __riscv_vssra_vx_i64m2(op1, shift, rm, vl) __riscv_th_vssra_vx_i64m2(op1, shift, rm, vl)
#define __riscv_vssra_vv_i64m4(op1, shift, rm, vl) __riscv_th_vssra_vv_i64m4(op1, shift, rm, vl)
#define __riscv_vssra_vx_i64m4(op1, shift, rm, vl) __riscv_th_vssra_vx_i64m4(op1, shift, rm, vl)
#define __riscv_vssra_vv_i64m8(op1, shift, rm, vl) __riscv_th_vssra_vv_i64m8(op1, shift, rm, vl)
#define __riscv_vssra_vx_i64m8(op1, shift, rm, vl) __riscv_th_vssra_vx_i64m8(op1, shift, rm, vl)

#define __riscv_vssra_vv_i8m1_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssra_vv_i8m1_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssra_vx_i8m1_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssra_vx_i8m1_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssra_vv_i8m2_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssra_vv_i8m2_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssra_vx_i8m2_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssra_vx_i8m2_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssra_vv_i8m4_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssra_vv_i8m4_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssra_vx_i8m4_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssra_vx_i8m4_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssra_vv_i8m8_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssra_vv_i8m8_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssra_vx_i8m8_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssra_vx_i8m8_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssra_vv_i16m1_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssra_vv_i16m1_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssra_vx_i16m1_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssra_vx_i16m1_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssra_vv_i16m2_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssra_vv_i16m2_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssra_vx_i16m2_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssra_vx_i16m2_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssra_vv_i16m4_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssra_vv_i16m4_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssra_vx_i16m4_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssra_vx_i16m4_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssra_vv_i16m8_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssra_vv_i16m8_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssra_vx_i16m8_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssra_vx_i16m8_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssra_vv_i32m1_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssra_vv_i32m1_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssra_vx_i32m1_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssra_vx_i32m1_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssra_vv_i32m2_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssra_vv_i32m2_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssra_vx_i32m2_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssra_vx_i32m2_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssra_vv_i32m4_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssra_vv_i32m4_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssra_vx_i32m4_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssra_vx_i32m4_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssra_vv_i32m8_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssra_vv_i32m8_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssra_vx_i32m8_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssra_vx_i32m8_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssra_vv_i64m1_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssra_vv_i64m1_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssra_vx_i64m1_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssra_vx_i64m1_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssra_vv_i64m2_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssra_vv_i64m2_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssra_vx_i64m2_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssra_vx_i64m2_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssra_vv_i64m4_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssra_vv_i64m4_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssra_vx_i64m4_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssra_vx_i64m4_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssra_vv_i64m8_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssra_vv_i64m8_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssra_vx_i64m8_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssra_vx_i64m8_mu(mask, maskedoff, op1, shift, rm, vl)

#define __riscv_vssrl_vv_u8m1(op1, shift, rm, vl) __riscv_th_vssrl_vv_u8m1(op1, shift, rm, vl)
#define __riscv_vssrl_vx_u8m1(op1, shift, rm, vl) __riscv_th_vssrl_vx_u8m1(op1, shift, rm, vl)
#define __riscv_vssrl_vv_u8m2(op1, shift, rm, vl) __riscv_th_vssrl_vv_u8m2(op1, shift, rm, vl)
#define __riscv_vssrl_vx_u8m2(op1, shift, rm, vl) __riscv_th_vssrl_vx_u8m2(op1, shift, rm, vl)
#define __riscv_vssrl_vv_u8m4(op1, shift, rm, vl) __riscv_th_vssrl_vv_u8m4(op1, shift, rm, vl)
#define __riscv_vssrl_vx_u8m4(op1, shift, rm, vl) __riscv_th_vssrl_vx_u8m4(op1, shift, rm, vl)
#define __riscv_vssrl_vv_u8m8(op1, shift, rm, vl) __riscv_th_vssrl_vv_u8m8(op1, shift, rm, vl)
#define __riscv_vssrl_vx_u8m8(op1, shift, rm, vl) __riscv_th_vssrl_vx_u8m8(op1, shift, rm, vl)
#define __riscv_vssrl_vv_u16m1(op1, shift, rm, vl) __riscv_th_vssrl_vv_u16m1(op1, shift, rm, vl)
#define __riscv_vssrl_vx_u16m1(op1, shift, rm, vl) __riscv_th_vssrl_vx_u16m1(op1, shift, rm, vl)
#define __riscv_vssrl_vv_u16m2(op1, shift, rm, vl) __riscv_th_vssrl_vv_u16m2(op1, shift, rm, vl)
#define __riscv_vssrl_vx_u16m2(op1, shift, rm, vl) __riscv_th_vssrl_vx_u16m2(op1, shift, rm, vl)
#define __riscv_vssrl_vv_u16m4(op1, shift, rm, vl) __riscv_th_vssrl_vv_u16m4(op1, shift, rm, vl)
#define __riscv_vssrl_vx_u16m4(op1, shift, rm, vl) __riscv_th_vssrl_vx_u16m4(op1, shift, rm, vl)
#define __riscv_vssrl_vv_u16m8(op1, shift, rm, vl) __riscv_th_vssrl_vv_u16m8(op1, shift, rm, vl)
#define __riscv_vssrl_vx_u16m8(op1, shift, rm, vl) __riscv_th_vssrl_vx_u16m8(op1, shift, rm, vl)
#define __riscv_vssrl_vv_u32m1(op1, shift, rm, vl) __riscv_th_vssrl_vv_u32m1(op1, shift, rm, vl)
#define __riscv_vssrl_vx_u32m1(op1, shift, rm, vl) __riscv_th_vssrl_vx_u32m1(op1, shift, rm, vl)
#define __riscv_vssrl_vv_u32m2(op1, shift, rm, vl) __riscv_th_vssrl_vv_u32m2(op1, shift, rm, vl)
#define __riscv_vssrl_vx_u32m2(op1, shift, rm, vl) __riscv_th_vssrl_vx_u32m2(op1, shift, rm, vl)
#define __riscv_vssrl_vv_u32m4(op1, shift, rm, vl) __riscv_th_vssrl_vv_u32m4(op1, shift, rm, vl)
#define __riscv_vssrl_vx_u32m4(op1, shift, rm, vl) __riscv_th_vssrl_vx_u32m4(op1, shift, rm, vl)
#define __riscv_vssrl_vv_u32m8(op1, shift, rm, vl) __riscv_th_vssrl_vv_u32m8(op1, shift, rm, vl)
#define __riscv_vssrl_vx_u32m8(op1, shift, rm, vl) __riscv_th_vssrl_vx_u32m8(op1, shift, rm, vl)
#define __riscv_vssrl_vv_u64m1(op1, shift, rm, vl) __riscv_th_vssrl_vv_u64m1(op1, shift, rm, vl)
#define __riscv_vssrl_vx_u64m1(op1, shift, rm, vl) __riscv_th_vssrl_vx_u64m1(op1, shift, rm, vl)
#define __riscv_vssrl_vv_u64m2(op1, shift, rm, vl) __riscv_th_vssrl_vv_u64m2(op1, shift, rm, vl)
#define __riscv_vssrl_vx_u64m2(op1, shift, rm, vl) __riscv_th_vssrl_vx_u64m2(op1, shift, rm, vl)
#define __riscv_vssrl_vv_u64m4(op1, shift, rm, vl) __riscv_th_vssrl_vv_u64m4(op1, shift, rm, vl)
#define __riscv_vssrl_vx_u64m4(op1, shift, rm, vl) __riscv_th_vssrl_vx_u64m4(op1, shift, rm, vl)
#define __riscv_vssrl_vv_u64m8(op1, shift, rm, vl) __riscv_th_vssrl_vv_u64m8(op1, shift, rm, vl)
#define __riscv_vssrl_vx_u64m8(op1, shift, rm, vl) __riscv_th_vssrl_vx_u64m8(op1, shift, rm, vl)

#define __riscv_vssrl_vv_u8m1_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssrl_vv_u8m1_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssrl_vx_u8m1_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssrl_vx_u8m1_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssrl_vv_u8m2_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssrl_vv_u8m2_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssrl_vx_u8m2_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssrl_vx_u8m2_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssrl_vv_u8m4_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssrl_vv_u8m4_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssrl_vx_u8m4_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssrl_vx_u8m4_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssrl_vv_u8m8_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssrl_vv_u8m8_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssrl_vx_u8m8_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssrl_vx_u8m8_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssrl_vv_u16m1_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssrl_vv_u16m1_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssrl_vx_u16m1_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssrl_vx_u16m1_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssrl_vv_u16m2_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssrl_vv_u16m2_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssrl_vx_u16m2_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssrl_vx_u16m2_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssrl_vv_u16m4_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssrl_vv_u16m4_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssrl_vx_u16m4_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssrl_vx_u16m4_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssrl_vv_u16m8_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssrl_vv_u16m8_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssrl_vx_u16m8_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssrl_vx_u16m8_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssrl_vv_u32m1_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssrl_vv_u32m1_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssrl_vx_u32m1_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssrl_vx_u32m1_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssrl_vv_u32m2_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssrl_vv_u32m2_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssrl_vx_u32m2_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssrl_vx_u32m2_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssrl_vv_u32m4_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssrl_vv_u32m4_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssrl_vx_u32m4_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssrl_vx_u32m4_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssrl_vv_u32m8_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssrl_vv_u32m8_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssrl_vx_u32m8_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssrl_vx_u32m8_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssrl_vv_u64m1_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssrl_vv_u64m1_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssrl_vx_u64m1_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssrl_vx_u64m1_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssrl_vv_u64m2_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssrl_vv_u64m2_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssrl_vx_u64m2_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssrl_vx_u64m2_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssrl_vv_u64m4_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssrl_vv_u64m4_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssrl_vx_u64m4_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssrl_vx_u64m4_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssrl_vv_u64m8_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssrl_vv_u64m8_mu(mask, maskedoff, op1, shift, rm, vl)
#define __riscv_vssrl_vx_u64m8_m(mask, maskedoff, op1, shift, rm, vl) __riscv_th_vssrl_vx_u64m8_mu(mask, maskedoff, op1, shift, rm, vl)

}] in
def th_single_width_scaling_shift_wrapper_macros: RVVHeader;

// 13.5. Vector Narrowing Fixed-Point Clip Operations

let HeaderCode =
Expand Down
Loading

0 comments on commit 94384bb

Please sign in to comment.