diff --git a/README.md b/README.md index ad990691c54dcc..70fcf0b2d4eba2 100644 --- a/README.md +++ b/README.md @@ -39,6 +39,10 @@ Any feature not listed below but present in the specification should be consider - (Done) `vand.{vv,vx,vi}` - (Done) `vor.{vv,vx,vi}` - (Done) `vxor.{vv,vx,vi}` + - (Done) `12.5. Vector Single-Width Bit Shift Instructions` + - (Done) `vsll.{vv,vx,vi}` + - (Done) `vsrl.{vv,vx,vi}` + - (Done) `vsra.{vv,vx,vi}` - (WIP) Clang intrinsics related to the `XTHeadVector` extension: - (WIP) `6. Configuration-Setting and Utility` - (Done) `6.1. Set vl and vtype` diff --git a/llvm/include/llvm/IR/IntrinsicsRISCVXTHeadV.td b/llvm/include/llvm/IR/IntrinsicsRISCVXTHeadV.td index 91a1a4b9d52048..5ea855df71a04c 100644 --- a/llvm/include/llvm/IR/IntrinsicsRISCVXTHeadV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCVXTHeadV.td @@ -586,6 +586,18 @@ let TargetPrefix = "riscv" in { let VLOperand = 4; } + // For destination vector type is the same as + // first source vector (with mask but no policy). + // The second source operand must match the destination type or be an XLen scalar. + // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl) + class XVBinaryAAShiftMasked + : DefaultAttrsIntrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], + [IntrNoMem]>, RISCVVIntrinsic { + let VLOperand = 4; + } + multiclass XVBinaryAAX { def "int_riscv_" # NAME : RISCVBinaryAAXUnMasked; def "int_riscv_" # NAME # "_mask" : XVBinaryAAXMasked; @@ -595,6 +607,11 @@ let TargetPrefix = "riscv" in { def "int_riscv_" # NAME : RISCVBinaryABXUnMasked; def "int_riscv_" # NAME # "_mask" : XVBinaryABXMasked; } + + multiclass XVBinaryAAShift { + def "int_riscv_" # NAME : RISCVBinaryAAShiftUnMasked; + def "int_riscv_" # NAME # "_mask" : XVBinaryAAShiftMasked; + } } let TargetPrefix = "riscv" in { @@ -624,6 +641,11 @@ let TargetPrefix = "riscv" in { defm th_vand : XVBinaryAAX; defm th_vor : XVBinaryAAX; defm th_vxor : XVBinaryAAX; + + // 12.5. Vector Single-Width Bit Shift Instructions + defm th_vsll : XVBinaryAAShift; + defm th_vsrl : XVBinaryAAShift; + defm th_vsra : XVBinaryAAShift; } // TargetPrefix = "riscv" let TargetPrefix = "riscv" in { diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXTHeadVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXTHeadVPseudos.td index bd06cc79150a2e..4ddca578683a99 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoXTHeadVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXTHeadVPseudos.td @@ -1789,6 +1789,24 @@ multiclass XVPseudoVCALUM_VM_XM { } } +multiclass XVPseudoVSHT_VV_VX_VI { + foreach m = MxListXTHeadV in { + defvar mx = m.MX; + defvar WriteVShiftV_MX = !cast("WriteVShiftV_" # mx); + defvar WriteVShiftX_MX = !cast("WriteVShiftX_" # mx); + defvar WriteVShiftI_MX = !cast("WriteVShiftI_" # mx); + defvar ReadVShiftV_MX = !cast("ReadVShiftV_" # mx); + defvar ReadVShiftX_MX = !cast("ReadVShiftX_" # mx); + + defm "" : XVPseudoBinaryV_VV, + Sched<[WriteVShiftV_MX, ReadVShiftV_MX, ReadVShiftV_MX, ReadVMask]>; + defm "" : XVPseudoBinaryV_VX, + Sched<[WriteVShiftX_MX, ReadVShiftV_MX, ReadVShiftX_MX, ReadVMask]>; + defm "" : XVPseudoBinaryV_VI, + Sched<[WriteVShiftI_MX, ReadVShiftV_MX, ReadVMask]>; + } +} + //===----------------------------------------------------------------------===// // Helpers to define the intrinsic patterns for the XTHeadVector extension. //===----------------------------------------------------------------------===// @@ -2207,6 +2225,48 @@ let Predicates = [HasVendorXTHeadV] in { defm : XVPatBinaryV_VV_VX_VI<"int_riscv_th_vxor", "PseudoTH_VXOR", AllIntegerXVectors>; } +//===----------------------------------------------------------------------===// +// 12.5. Vector Single-Width Bit Shift Instructions +//===----------------------------------------------------------------------===// +let Predicates = [HasVendorXTHeadV] in { + defm PseudoTH_VSLL : XVPseudoVSHT_VV_VX_VI; + defm PseudoTH_VSRL : XVPseudoVSHT_VV_VX_VI; + defm PseudoTH_VSRA : XVPseudoVSHT_VV_VX_VI; +} // Predicates = [HasVendorXTHeadV] + +let Predicates = [HasVendorXTHeadV] in { + defm : XVPatBinaryV_VV_VX_VI<"int_riscv_th_vsll", "PseudoTH_VSLL", AllIntegerXVectors, + uimm5>; + defm : XVPatBinaryV_VV_VX_VI<"int_riscv_th_vsrl", "PseudoTH_VSRL", AllIntegerXVectors, + uimm5>; + defm : XVPatBinaryV_VV_VX_VI<"int_riscv_th_vsra", "PseudoTH_VSRA", AllIntegerXVectors, + uimm5>; + + foreach vti = AllIntegerXVectors in { + // Emit shift by 1 as an add since it might be faster. + let Predicates = GetXVTypePredicates.Predicates in { + def : Pat<(vti.Vector (int_riscv_th_vsll (vti.Vector undef), + (vti.Vector vti.RegClass:$rs1), + (XLenVT 1), VLOpFrag)), + (!cast("PseudoTH_VADD_VV_"#vti.LMul.MX) + (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, + vti.RegClass:$rs1, GPR:$vl, vti.Log2SEW)>; + def : Pat<(vti.Vector (int_riscv_th_vsll_mask (vti.Vector vti.RegClass:$merge), + (vti.Vector vti.RegClass:$rs1), + (XLenVT 1), + (vti.Mask V0), + VLOpFrag)), + (!cast("PseudoTH_VADD_VV_"#vti.LMul.MX#"_MASK") + vti.RegClass:$merge, + vti.RegClass:$rs1, + vti.RegClass:$rs1, + (vti.Mask V0), + GPR:$vl, + vti.Log2SEW)>; + } + } +} // Predicates = [HasVendorXTHeadV] + //===----------------------------------------------------------------------===// // 12.14. Vector Integer Merge and Move Instructions //===----------------------------------------------------------------------===// diff --git a/llvm/test/CodeGen/RISCV/rvv0p71/vsll.ll b/llvm/test/CodeGen/RISCV/rvv0p71/vsll.ll new file mode 100644 index 00000000000000..7ac6ea68b347cb --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv0p71/vsll.ll @@ -0,0 +1,3150 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+xtheadvector \ +; RUN: -verify-machineinstrs | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xtheadvector \ +; RUN: -verify-machineinstrs | FileCheck %s + +declare @llvm.riscv.th.vsll.nxv8i8.nxv8i8( + , + , + , + iXLen); + +define @intrinsic_vsll_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vv_nxv8i8_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m1, d1 +; CHECK-NEXT: th.vsll.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.nxv8i8.nxv8i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsll.mask.nxv8i8.nxv8i8( + , + , + , + , + iXLen); + +define @intrinsic_vsll_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i8_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m1, d1 +; CHECK-NEXT: th.vsll.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.mask.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsll.nxv16i8.nxv16i8( + , + , + , + iXLen); + +define @intrinsic_vsll_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vv_nxv16i8_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m2, d1 +; CHECK-NEXT: th.vsll.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.nxv16i8.nxv16i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsll.mask.nxv16i8.nxv16i8( + , + , + , + , + iXLen); + +define @intrinsic_vsll_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv16i8_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m2, d1 +; CHECK-NEXT: th.vsll.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.mask.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsll.nxv32i8.nxv32i8( + , + , + , + iXLen); + +define @intrinsic_vsll_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vv_nxv32i8_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m4, d1 +; CHECK-NEXT: th.vsll.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.nxv32i8.nxv32i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsll.mask.nxv32i8.nxv32i8( + , + , + , + , + iXLen); + +define @intrinsic_vsll_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv32i8_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m4, d1 +; CHECK-NEXT: th.vsll.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.mask.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsll.nxv64i8.nxv64i8( + , + , + , + iXLen); + +define @intrinsic_vsll_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vv_nxv64i8_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m8, d1 +; CHECK-NEXT: th.vsll.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.nxv64i8.nxv64i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsll.mask.nxv64i8.nxv64i8( + , + , + , + , + iXLen); + +define @intrinsic_vsll_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv64i8_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m8, d1 +; CHECK-NEXT: th.vle.v v24, (a0) +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a0, a2 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m8, d1 +; CHECK-NEXT: th.vsll.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.mask.nxv64i8.nxv64i8( + %0, + %1, + %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsll.nxv4i16.nxv4i16( + , + , + , + iXLen); + +define @intrinsic_vsll_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vv_nxv4i16_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m1, d1 +; CHECK-NEXT: th.vsll.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.nxv4i16.nxv4i16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsll.mask.nxv4i16.nxv4i16( + , + , + , + , + iXLen); + +define @intrinsic_vsll_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv4i16_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m1, d1 +; CHECK-NEXT: th.vsll.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.mask.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsll.nxv8i16.nxv8i16( + , + , + , + iXLen); + +define @intrinsic_vsll_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vv_nxv8i16_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m2, d1 +; CHECK-NEXT: th.vsll.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.nxv8i16.nxv8i16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsll.mask.nxv8i16.nxv8i16( + , + , + , + , + iXLen); + +define @intrinsic_vsll_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i16_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m2, d1 +; CHECK-NEXT: th.vsll.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.mask.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsll.nxv16i16.nxv16i16( + , + , + , + iXLen); + +define @intrinsic_vsll_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vv_nxv16i16_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m4, d1 +; CHECK-NEXT: th.vsll.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.nxv16i16.nxv16i16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsll.mask.nxv16i16.nxv16i16( + , + , + , + , + iXLen); + +define @intrinsic_vsll_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv16i16_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m4, d1 +; CHECK-NEXT: th.vsll.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.mask.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsll.nxv32i16.nxv32i16( + , + , + , + iXLen); + +define @intrinsic_vsll_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vv_nxv32i16_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m8, d1 +; CHECK-NEXT: th.vsll.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.nxv32i16.nxv32i16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsll.mask.nxv32i16.nxv32i16( + , + , + , + , + iXLen); + +define @intrinsic_vsll_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv32i16_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e16, m8, d1 +; CHECK-NEXT: th.vle.v v24, (a0) +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a0, a2 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m8, d1 +; CHECK-NEXT: th.vsll.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.mask.nxv32i16.nxv32i16( + %0, + %1, + %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsll.nxv2i32.nxv2i32( + , + , + , + iXLen); + +define @intrinsic_vsll_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vv_nxv2i32_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m1, d1 +; CHECK-NEXT: th.vsll.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.nxv2i32.nxv2i32( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsll.mask.nxv2i32.nxv2i32( + , + , + , + , + iXLen); + +define @intrinsic_vsll_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv2i32_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m1, d1 +; CHECK-NEXT: th.vsll.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.mask.nxv2i32.nxv2i32( + %0, + %1, + %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsll.nxv4i32.nxv4i32( + , + , + , + iXLen); + +define @intrinsic_vsll_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vv_nxv4i32_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m2, d1 +; CHECK-NEXT: th.vsll.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.nxv4i32.nxv4i32( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsll.mask.nxv4i32.nxv4i32( + , + , + , + , + iXLen); + +define @intrinsic_vsll_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv4i32_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m2, d1 +; CHECK-NEXT: th.vsll.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.mask.nxv4i32.nxv4i32( + %0, + %1, + %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsll.nxv8i32.nxv8i32( + , + , + , + iXLen); + +define @intrinsic_vsll_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vv_nxv8i32_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m4, d1 +; CHECK-NEXT: th.vsll.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.nxv8i32.nxv8i32( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsll.mask.nxv8i32.nxv8i32( + , + , + , + , + iXLen); + +define @intrinsic_vsll_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i32_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m4, d1 +; CHECK-NEXT: th.vsll.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.mask.nxv8i32.nxv8i32( + %0, + %1, + %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsll.nxv16i32.nxv16i32( + , + , + , + iXLen); + +define @intrinsic_vsll_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vv_nxv16i32_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m8, d1 +; CHECK-NEXT: th.vsll.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.nxv16i32.nxv16i32( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsll.mask.nxv16i32.nxv16i32( + , + , + , + , + iXLen); + +define @intrinsic_vsll_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv16i32_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e32, m8, d1 +; CHECK-NEXT: th.vle.v v24, (a0) +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a0, a2 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m8, d1 +; CHECK-NEXT: th.vsll.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.mask.nxv16i32.nxv16i32( + %0, + %1, + %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsll.nxv1i64.nxv1i64( + , + , + , + iXLen); + +define @intrinsic_vsll_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vv_nxv1i64_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m1, d1 +; CHECK-NEXT: th.vsll.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.nxv1i64.nxv1i64( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsll.mask.nxv1i64.nxv1i64( + , + , + , + , + iXLen); + +define @intrinsic_vsll_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i64_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m1, d1 +; CHECK-NEXT: th.vsll.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.mask.nxv1i64.nxv1i64( + %0, + %1, + %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsll.nxv2i64.nxv2i64( + , + , + , + iXLen); + +define @intrinsic_vsll_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vv_nxv2i64_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m2, d1 +; CHECK-NEXT: th.vsll.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.nxv2i64.nxv2i64( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsll.mask.nxv2i64.nxv2i64( + , + , + , + , + iXLen); + +define @intrinsic_vsll_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv2i64_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m2, d1 +; CHECK-NEXT: th.vsll.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.mask.nxv2i64.nxv2i64( + %0, + %1, + %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsll.nxv4i64.nxv4i64( + , + , + , + iXLen); + +define @intrinsic_vsll_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vv_nxv4i64_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m4, d1 +; CHECK-NEXT: th.vsll.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.nxv4i64.nxv4i64( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsll.mask.nxv4i64.nxv4i64( + , + , + , + , + iXLen); + +define @intrinsic_vsll_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv4i64_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m4, d1 +; CHECK-NEXT: th.vsll.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.mask.nxv4i64.nxv4i64( + %0, + %1, + %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsll.nxv8i64.nxv8i64( + , + , + , + iXLen); + +define @intrinsic_vsll_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vv_nxv8i64_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m8, d1 +; CHECK-NEXT: th.vsll.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.nxv8i64.nxv8i64( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsll.mask.nxv8i64.nxv8i64( + , + , + , + , + iXLen); + +define @intrinsic_vsll_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i64_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e64, m8, d1 +; CHECK-NEXT: th.vle.v v24, (a0) +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a0, a2 +; CHECK-NEXT: th.vsetvli zero, a1, e64, m8, d1 +; CHECK-NEXT: th.vsll.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.mask.nxv8i64.nxv8i64( + %0, + %1, + %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsll.nxv8i8( + , + , + iXLen, + iXLen); + +define @intrinsic_vsll_vx_nxv8i8_nxv8i8( %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vx_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m1, d1 +; CHECK-NEXT: th.vsll.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.nxv8i8( + undef, + %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsll.mask.nxv8i8( + , + , + iXLen, + , + iXLen); + +define @intrinsic_vsll_mask_vx_nxv8i8_nxv8i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m1, d1 +; CHECK-NEXT: th.vsll.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.mask.nxv8i8( + %0, + %1, + iXLen %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsll.nxv16i8( + , + , + iXLen, + iXLen); + +define @intrinsic_vsll_vx_nxv16i8_nxv16i8( %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vx_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m2, d1 +; CHECK-NEXT: th.vsll.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.nxv16i8( + undef, + %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsll.mask.nxv16i8( + , + , + iXLen, + , + iXLen); + +define @intrinsic_vsll_mask_vx_nxv16i8_nxv16i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m2, d1 +; CHECK-NEXT: th.vsll.vx v8, v10, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.mask.nxv16i8( + %0, + %1, + iXLen %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsll.nxv32i8( + , + , + iXLen, + iXLen); + +define @intrinsic_vsll_vx_nxv32i8_nxv32i8( %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vx_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m4, d1 +; CHECK-NEXT: th.vsll.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.nxv32i8( + undef, + %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsll.mask.nxv32i8( + , + , + iXLen, + , + iXLen); + +define @intrinsic_vsll_mask_vx_nxv32i8_nxv32i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m4, d1 +; CHECK-NEXT: th.vsll.vx v8, v12, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.mask.nxv32i8( + %0, + %1, + iXLen %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsll.nxv64i8( + , + , + iXLen, + iXLen); + +define @intrinsic_vsll_vx_nxv64i8_nxv64i8( %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vx_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m8, d1 +; CHECK-NEXT: th.vsll.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.nxv64i8( + undef, + %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsll.mask.nxv64i8( + , + , + iXLen, + , + iXLen); + +define @intrinsic_vsll_mask_vx_nxv64i8_nxv64i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m8, d1 +; CHECK-NEXT: th.vsll.vx v8, v16, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.mask.nxv64i8( + %0, + %1, + iXLen %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsll.nxv4i16( + , + , + iXLen, + iXLen); + +define @intrinsic_vsll_vx_nxv4i16_nxv4i16( %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vx_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m1, d1 +; CHECK-NEXT: th.vsll.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.nxv4i16( + undef, + %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsll.mask.nxv4i16( + , + , + iXLen, + , + iXLen); + +define @intrinsic_vsll_mask_vx_nxv4i16_nxv4i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m1, d1 +; CHECK-NEXT: th.vsll.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.mask.nxv4i16( + %0, + %1, + iXLen %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsll.nxv8i16( + , + , + iXLen, + iXLen); + +define @intrinsic_vsll_vx_nxv8i16_nxv8i16( %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vx_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m2, d1 +; CHECK-NEXT: th.vsll.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.nxv8i16( + undef, + %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsll.mask.nxv8i16( + , + , + iXLen, + , + iXLen); + +define @intrinsic_vsll_mask_vx_nxv8i16_nxv8i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m2, d1 +; CHECK-NEXT: th.vsll.vx v8, v10, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.mask.nxv8i16( + %0, + %1, + iXLen %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsll.nxv16i16( + , + , + iXLen, + iXLen); + +define @intrinsic_vsll_vx_nxv16i16_nxv16i16( %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vx_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m4, d1 +; CHECK-NEXT: th.vsll.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.nxv16i16( + undef, + %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsll.mask.nxv16i16( + , + , + iXLen, + , + iXLen); + +define @intrinsic_vsll_mask_vx_nxv16i16_nxv16i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m4, d1 +; CHECK-NEXT: th.vsll.vx v8, v12, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.mask.nxv16i16( + %0, + %1, + iXLen %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsll.nxv32i16( + , + , + iXLen, + iXLen); + +define @intrinsic_vsll_vx_nxv32i16_nxv32i16( %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vx_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m8, d1 +; CHECK-NEXT: th.vsll.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.nxv32i16( + undef, + %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsll.mask.nxv32i16( + , + , + iXLen, + , + iXLen); + +define @intrinsic_vsll_mask_vx_nxv32i16_nxv32i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m8, d1 +; CHECK-NEXT: th.vsll.vx v8, v16, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.mask.nxv32i16( + %0, + %1, + iXLen %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsll.nxv2i32( + , + , + iXLen, + iXLen); + +define @intrinsic_vsll_vx_nxv2i32_nxv2i32( %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vx_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m1, d1 +; CHECK-NEXT: th.vsll.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.nxv2i32( + undef, + %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsll.mask.nxv2i32( + , + , + iXLen, + , + iXLen); + +define @intrinsic_vsll_mask_vx_nxv2i32_nxv2i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m1, d1 +; CHECK-NEXT: th.vsll.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.mask.nxv2i32( + %0, + %1, + iXLen %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsll.nxv4i32( + , + , + iXLen, + iXLen); + +define @intrinsic_vsll_vx_nxv4i32_nxv4i32( %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vx_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m2, d1 +; CHECK-NEXT: th.vsll.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.nxv4i32( + undef, + %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsll.mask.nxv4i32( + , + , + iXLen, + , + iXLen); + +define @intrinsic_vsll_mask_vx_nxv4i32_nxv4i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m2, d1 +; CHECK-NEXT: th.vsll.vx v8, v10, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.mask.nxv4i32( + %0, + %1, + iXLen %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsll.nxv8i32( + , + , + iXLen, + iXLen); + +define @intrinsic_vsll_vx_nxv8i32_nxv8i32( %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vx_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m4, d1 +; CHECK-NEXT: th.vsll.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.nxv8i32( + undef, + %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsll.mask.nxv8i32( + , + , + iXLen, + , + iXLen); + +define @intrinsic_vsll_mask_vx_nxv8i32_nxv8i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m4, d1 +; CHECK-NEXT: th.vsll.vx v8, v12, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.mask.nxv8i32( + %0, + %1, + iXLen %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsll.nxv16i32( + , + , + iXLen, + iXLen); + +define @intrinsic_vsll_vx_nxv16i32_nxv16i32( %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vx_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m8, d1 +; CHECK-NEXT: th.vsll.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.nxv16i32( + undef, + %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsll.mask.nxv16i32( + , + , + iXLen, + , + iXLen); + +define @intrinsic_vsll_mask_vx_nxv16i32_nxv16i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m8, d1 +; CHECK-NEXT: th.vsll.vx v8, v16, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.mask.nxv16i32( + %0, + %1, + iXLen %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsll.nxv1i64( + , + , + iXLen, + iXLen); + +define @intrinsic_vsll_vx_nxv1i64_nxv1i64( %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vx_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e64, m1, d1 +; CHECK-NEXT: th.vsll.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.nxv1i64( + undef, + %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsll.mask.nxv1i64( + , + , + iXLen, + , + iXLen); + +define @intrinsic_vsll_mask_vx_nxv1i64_nxv1i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e64, m1, d1 +; CHECK-NEXT: th.vsll.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.mask.nxv1i64( + %0, + %1, + iXLen %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsll.nxv2i64( + , + , + iXLen, + iXLen); + +define @intrinsic_vsll_vx_nxv2i64_nxv2i64( %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vx_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e64, m2, d1 +; CHECK-NEXT: th.vsll.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.nxv2i64( + undef, + %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsll.mask.nxv2i64( + , + , + iXLen, + , + iXLen); + +define @intrinsic_vsll_mask_vx_nxv2i64_nxv2i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e64, m2, d1 +; CHECK-NEXT: th.vsll.vx v8, v10, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.mask.nxv2i64( + %0, + %1, + iXLen %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsll.nxv4i64( + , + , + iXLen, + iXLen); + +define @intrinsic_vsll_vx_nxv4i64_nxv4i64( %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vx_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e64, m4, d1 +; CHECK-NEXT: th.vsll.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.nxv4i64( + undef, + %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsll.mask.nxv4i64( + , + , + iXLen, + , + iXLen); + +define @intrinsic_vsll_mask_vx_nxv4i64_nxv4i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e64, m4, d1 +; CHECK-NEXT: th.vsll.vx v8, v12, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.mask.nxv4i64( + %0, + %1, + iXLen %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsll.nxv8i64( + , + , + iXLen, + iXLen); + +define @intrinsic_vsll_vx_nxv8i64_nxv8i64( %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsll_vx_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e64, m8, d1 +; CHECK-NEXT: th.vsll.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.nxv8i64( + undef, + %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsll.mask.nxv8i64( + , + , + iXLen, + , + iXLen); + +define @intrinsic_vsll_mask_vx_nxv8i64_nxv8i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e64, m8, d1 +; CHECK-NEXT: th.vsll.vx v8, v16, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.mask.nxv8i64( + %0, + %1, + iXLen %2, + %3, + iXLen %4) + + ret %a +} + +define @intrinsic_vsll_vi_nxv8i8_nxv8i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vsll_vi_nxv8i8_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m1, d1 +; CHECK-NEXT: th.vsll.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.nxv8i8( + undef, + %0, + iXLen 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vsll_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv8i8_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m1, d1 +; CHECK-NEXT: th.vsll.vi v8, v9, 9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.mask.nxv8i8( + %0, + %1, + iXLen 9, + %2, + iXLen %3) + + ret %a +} + +define @intrinsic_vsll_1_nxv8i8_nxv8i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vsll_1_nxv8i8_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m1, d1 +; CHECK-NEXT: th.vadd.vv v8, v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.nxv8i8( + undef, + %0, + iXLen 1, + iXLen %1) + + ret %a +} + +define @intrinsic_vsll_mask_1_nxv8i8_nxv8i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_1_nxv8i8_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m1, d1 +; CHECK-NEXT: th.vadd.vv v8, v9, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.mask.nxv8i8( + %0, + %1, + iXLen 1, + %2, + iXLen %3) + + ret %a +} + +define @intrinsic_vsll_vi_nxv16i8_nxv16i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vsll_vi_nxv16i8_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m2, d1 +; CHECK-NEXT: th.vsll.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.nxv16i8( + undef, + %0, + iXLen 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vsll_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv16i8_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m2, d1 +; CHECK-NEXT: th.vsll.vi v8, v10, 9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.mask.nxv16i8( + %0, + %1, + iXLen 9, + %2, + iXLen %3) + + ret %a +} + +define @intrinsic_vsll_vi_nxv32i8_nxv32i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vsll_vi_nxv32i8_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m4, d1 +; CHECK-NEXT: th.vsll.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.nxv32i8( + undef, + %0, + iXLen 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vsll_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv32i8_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m4, d1 +; CHECK-NEXT: th.vsll.vi v8, v12, 9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.mask.nxv32i8( + %0, + %1, + iXLen 9, + %2, + iXLen %3) + + ret %a +} + +define @intrinsic_vsll_vi_nxv64i8_nxv64i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vsll_vi_nxv64i8_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m8, d1 +; CHECK-NEXT: th.vsll.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.nxv64i8( + undef, + %0, + iXLen 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vsll_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv64i8_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m8, d1 +; CHECK-NEXT: th.vsll.vi v8, v16, 9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.mask.nxv64i8( + %0, + %1, + iXLen 9, + %2, + iXLen %3) + + ret %a +} + +define @intrinsic_vsll_vi_nxv4i16_nxv4i16_i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vsll_vi_nxv4i16_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m1, d1 +; CHECK-NEXT: th.vsll.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.nxv4i16( + undef, + %0, + iXLen 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vsll_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv4i16_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m1, d1 +; CHECK-NEXT: th.vsll.vi v8, v9, 9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.mask.nxv4i16( + %0, + %1, + iXLen 9, + %2, + iXLen %3) + + ret %a +} + +define @intrinsic_vsll_vi_nxv8i16_nxv8i16_i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vsll_vi_nxv8i16_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m2, d1 +; CHECK-NEXT: th.vsll.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.nxv8i16( + undef, + %0, + iXLen 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vsll_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv8i16_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m2, d1 +; CHECK-NEXT: th.vsll.vi v8, v10, 9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.mask.nxv8i16( + %0, + %1, + iXLen 9, + %2, + iXLen %3) + + ret %a +} + +define @intrinsic_vsll_vi_nxv16i16_nxv16i16_i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vsll_vi_nxv16i16_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m4, d1 +; CHECK-NEXT: th.vsll.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.nxv16i16( + undef, + %0, + iXLen 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vsll_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv16i16_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m4, d1 +; CHECK-NEXT: th.vsll.vi v8, v12, 9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.mask.nxv16i16( + %0, + %1, + iXLen 9, + %2, + iXLen %3) + + ret %a +} + +define @intrinsic_vsll_vi_nxv32i16_nxv32i16_i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vsll_vi_nxv32i16_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m8, d1 +; CHECK-NEXT: th.vsll.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.nxv32i16( + undef, + %0, + iXLen 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vsll_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv32i16_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m8, d1 +; CHECK-NEXT: th.vsll.vi v8, v16, 9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.mask.nxv32i16( + %0, + %1, + iXLen 9, + %2, + iXLen %3) + + ret %a +} + +define @intrinsic_vsll_vi_nxv2i32_nxv2i32_i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vsll_vi_nxv2i32_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m1, d1 +; CHECK-NEXT: th.vsll.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.nxv2i32( + undef, + %0, + iXLen 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vsll_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv2i32_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m1, d1 +; CHECK-NEXT: th.vsll.vi v8, v9, 9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.mask.nxv2i32( + %0, + %1, + iXLen 9, + %2, + iXLen %3) + + ret %a +} + +define @intrinsic_vsll_vi_nxv4i32_nxv4i32_i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vsll_vi_nxv4i32_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m2, d1 +; CHECK-NEXT: th.vsll.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.nxv4i32( + undef, + %0, + iXLen 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vsll_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv4i32_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m2, d1 +; CHECK-NEXT: th.vsll.vi v8, v10, 9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.mask.nxv4i32( + %0, + %1, + iXLen 9, + %2, + iXLen %3) + + ret %a +} + +define @intrinsic_vsll_vi_nxv8i32_nxv8i32_i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vsll_vi_nxv8i32_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m4, d1 +; CHECK-NEXT: th.vsll.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.nxv8i32( + undef, + %0, + iXLen 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vsll_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv8i32_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m4, d1 +; CHECK-NEXT: th.vsll.vi v8, v12, 9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.mask.nxv8i32( + %0, + %1, + iXLen 9, + %2, + iXLen %3) + + ret %a +} + +define @intrinsic_vsll_vi_nxv16i32_nxv16i32_i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vsll_vi_nxv16i32_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m8, d1 +; CHECK-NEXT: th.vsll.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.nxv16i32( + undef, + %0, + iXLen 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vsll_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv16i32_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m8, d1 +; CHECK-NEXT: th.vsll.vi v8, v16, 9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.mask.nxv16i32( + %0, + %1, + iXLen 9, + %2, + iXLen %3) + + ret %a +} + +define @intrinsic_vsll_vi_nxv1i64_nxv1i64_i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vsll_vi_nxv1i64_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m1, d1 +; CHECK-NEXT: th.vsll.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.nxv1i64( + undef, + %0, + iXLen 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vsll_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv1i64_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m1, d1 +; CHECK-NEXT: th.vsll.vi v8, v9, 9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.mask.nxv1i64( + %0, + %1, + iXLen 9, + %2, + iXLen %3) + + ret %a +} + +define @intrinsic_vsll_vi_nxv2i64_nxv2i64_i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vsll_vi_nxv2i64_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m2, d1 +; CHECK-NEXT: th.vsll.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.nxv2i64( + undef, + %0, + iXLen 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vsll_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv2i64_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m2, d1 +; CHECK-NEXT: th.vsll.vi v8, v10, 9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.mask.nxv2i64( + %0, + %1, + iXLen 9, + %2, + iXLen %3) + + ret %a +} + +define @intrinsic_vsll_vi_nxv4i64_nxv4i64_i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vsll_vi_nxv4i64_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m4, d1 +; CHECK-NEXT: th.vsll.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.nxv4i64( + undef, + %0, + iXLen 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vsll_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv4i64_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m4, d1 +; CHECK-NEXT: th.vsll.vi v8, v12, 9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.mask.nxv4i64( + %0, + %1, + iXLen 9, + %2, + iXLen %3) + + ret %a +} + +define @intrinsic_vsll_vi_nxv8i64_nxv8i64_i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vsll_vi_nxv8i64_nxv8i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m8, d1 +; CHECK-NEXT: th.vsll.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.nxv8i64( + undef, + %0, + iXLen 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vsll_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv8i64_nxv8i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m8, d1 +; CHECK-NEXT: th.vsll.vi v8, v16, 9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsll.mask.nxv8i64( + %0, + %1, + iXLen 9, + %2, + iXLen %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv0p71/vsra.ll b/llvm/test/CodeGen/RISCV/rvv0p71/vsra.ll new file mode 100644 index 00000000000000..55c446b95c5dcf --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv0p71/vsra.ll @@ -0,0 +1,3097 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+xtheadvector \ +; RUN: -verify-machineinstrs | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xtheadvector \ +; RUN: -verify-machineinstrs | FileCheck %s + +declare @llvm.riscv.th.vsra.nxv8i8.nxv8i8( + , + , + , + iXLen); + +define @intrinsic_vsra_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vv_nxv8i8_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m1, d1 +; CHECK-NEXT: th.vsra.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.nxv8i8.nxv8i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsra.mask.nxv8i8.nxv8i8( + , + , + , + , + iXLen); + +define @intrinsic_vsra_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv8i8_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m1, d1 +; CHECK-NEXT: th.vsra.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.mask.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsra.nxv16i8.nxv16i8( + , + , + , + iXLen); + +define @intrinsic_vsra_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vv_nxv16i8_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m2, d1 +; CHECK-NEXT: th.vsra.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.nxv16i8.nxv16i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsra.mask.nxv16i8.nxv16i8( + , + , + , + , + iXLen); + +define @intrinsic_vsra_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv16i8_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m2, d1 +; CHECK-NEXT: th.vsra.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.mask.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsra.nxv32i8.nxv32i8( + , + , + , + iXLen); + +define @intrinsic_vsra_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vv_nxv32i8_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m4, d1 +; CHECK-NEXT: th.vsra.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.nxv32i8.nxv32i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsra.mask.nxv32i8.nxv32i8( + , + , + , + , + iXLen); + +define @intrinsic_vsra_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv32i8_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m4, d1 +; CHECK-NEXT: th.vsra.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.mask.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsra.nxv64i8.nxv64i8( + , + , + , + iXLen); + +define @intrinsic_vsra_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vv_nxv64i8_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m8, d1 +; CHECK-NEXT: th.vsra.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.nxv64i8.nxv64i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsra.mask.nxv64i8.nxv64i8( + , + , + , + , + iXLen); + +define @intrinsic_vsra_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv64i8_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m8, d1 +; CHECK-NEXT: th.vle.v v24, (a0) +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a0, a2 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m8, d1 +; CHECK-NEXT: th.vsra.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.mask.nxv64i8.nxv64i8( + %0, + %1, + %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsra.nxv4i16.nxv4i16( + , + , + , + iXLen); + +define @intrinsic_vsra_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vv_nxv4i16_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m1, d1 +; CHECK-NEXT: th.vsra.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.nxv4i16.nxv4i16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsra.mask.nxv4i16.nxv4i16( + , + , + , + , + iXLen); + +define @intrinsic_vsra_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv4i16_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m1, d1 +; CHECK-NEXT: th.vsra.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.mask.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsra.nxv8i16.nxv8i16( + , + , + , + iXLen); + +define @intrinsic_vsra_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vv_nxv8i16_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m2, d1 +; CHECK-NEXT: th.vsra.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.nxv8i16.nxv8i16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsra.mask.nxv8i16.nxv8i16( + , + , + , + , + iXLen); + +define @intrinsic_vsra_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv8i16_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m2, d1 +; CHECK-NEXT: th.vsra.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.mask.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsra.nxv16i16.nxv16i16( + , + , + , + iXLen); + +define @intrinsic_vsra_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vv_nxv16i16_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m4, d1 +; CHECK-NEXT: th.vsra.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.nxv16i16.nxv16i16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsra.mask.nxv16i16.nxv16i16( + , + , + , + , + iXLen); + +define @intrinsic_vsra_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv16i16_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m4, d1 +; CHECK-NEXT: th.vsra.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.mask.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsra.nxv32i16.nxv32i16( + , + , + , + iXLen); + +define @intrinsic_vsra_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vv_nxv32i16_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m8, d1 +; CHECK-NEXT: th.vsra.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.nxv32i16.nxv32i16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsra.mask.nxv32i16.nxv32i16( + , + , + , + , + iXLen); + +define @intrinsic_vsra_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv32i16_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e16, m8, d1 +; CHECK-NEXT: th.vle.v v24, (a0) +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a0, a2 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m8, d1 +; CHECK-NEXT: th.vsra.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.mask.nxv32i16.nxv32i16( + %0, + %1, + %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsra.nxv2i32.nxv2i32( + , + , + , + iXLen); + +define @intrinsic_vsra_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vv_nxv2i32_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m1, d1 +; CHECK-NEXT: th.vsra.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.nxv2i32.nxv2i32( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsra.mask.nxv2i32.nxv2i32( + , + , + , + , + iXLen); + +define @intrinsic_vsra_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv2i32_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m1, d1 +; CHECK-NEXT: th.vsra.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.mask.nxv2i32.nxv2i32( + %0, + %1, + %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsra.nxv4i32.nxv4i32( + , + , + , + iXLen); + +define @intrinsic_vsra_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vv_nxv4i32_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m2, d1 +; CHECK-NEXT: th.vsra.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.nxv4i32.nxv4i32( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsra.mask.nxv4i32.nxv4i32( + , + , + , + , + iXLen); + +define @intrinsic_vsra_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv4i32_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m2, d1 +; CHECK-NEXT: th.vsra.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.mask.nxv4i32.nxv4i32( + %0, + %1, + %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsra.nxv8i32.nxv8i32( + , + , + , + iXLen); + +define @intrinsic_vsra_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vv_nxv8i32_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m4, d1 +; CHECK-NEXT: th.vsra.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.nxv8i32.nxv8i32( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsra.mask.nxv8i32.nxv8i32( + , + , + , + , + iXLen); + +define @intrinsic_vsra_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv8i32_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m4, d1 +; CHECK-NEXT: th.vsra.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.mask.nxv8i32.nxv8i32( + %0, + %1, + %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsra.nxv16i32.nxv16i32( + , + , + , + iXLen); + +define @intrinsic_vsra_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vv_nxv16i32_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m8, d1 +; CHECK-NEXT: th.vsra.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.nxv16i32.nxv16i32( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsra.mask.nxv16i32.nxv16i32( + , + , + , + , + iXLen); + +define @intrinsic_vsra_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv16i32_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e32, m8, d1 +; CHECK-NEXT: th.vle.v v24, (a0) +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a0, a2 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m8, d1 +; CHECK-NEXT: th.vsra.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.mask.nxv16i32.nxv16i32( + %0, + %1, + %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsra.nxv1i64.nxv1i64( + , + , + , + iXLen); + +define @intrinsic_vsra_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vv_nxv1i64_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m1, d1 +; CHECK-NEXT: th.vsra.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.nxv1i64.nxv1i64( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsra.mask.nxv1i64.nxv1i64( + , + , + , + , + iXLen); + +define @intrinsic_vsra_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv1i64_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m1, d1 +; CHECK-NEXT: th.vsra.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.mask.nxv1i64.nxv1i64( + %0, + %1, + %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsra.nxv2i64.nxv2i64( + , + , + , + iXLen); + +define @intrinsic_vsra_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vv_nxv2i64_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m2, d1 +; CHECK-NEXT: th.vsra.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.nxv2i64.nxv2i64( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsra.mask.nxv2i64.nxv2i64( + , + , + , + , + iXLen); + +define @intrinsic_vsra_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv2i64_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m2, d1 +; CHECK-NEXT: th.vsra.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.mask.nxv2i64.nxv2i64( + %0, + %1, + %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsra.nxv4i64.nxv4i64( + , + , + , + iXLen); + +define @intrinsic_vsra_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vv_nxv4i64_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m4, d1 +; CHECK-NEXT: th.vsra.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.nxv4i64.nxv4i64( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsra.mask.nxv4i64.nxv4i64( + , + , + , + , + iXLen); + +define @intrinsic_vsra_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv4i64_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m4, d1 +; CHECK-NEXT: th.vsra.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.mask.nxv4i64.nxv4i64( + %0, + %1, + %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsra.nxv8i64.nxv8i64( + , + , + , + iXLen); + +define @intrinsic_vsra_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vv_nxv8i64_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m8, d1 +; CHECK-NEXT: th.vsra.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.nxv8i64.nxv8i64( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsra.mask.nxv8i64.nxv8i64( + , + , + , + , + iXLen); + +define @intrinsic_vsra_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv8i64_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e64, m8, d1 +; CHECK-NEXT: th.vle.v v24, (a0) +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a0, a2 +; CHECK-NEXT: th.vsetvli zero, a1, e64, m8, d1 +; CHECK-NEXT: th.vsra.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.mask.nxv8i64.nxv8i64( + %0, + %1, + %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsra.nxv8i8( + , + , + iXLen, + iXLen); + +define @intrinsic_vsra_vx_nxv8i8_nxv8i8( %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vx_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m1, d1 +; CHECK-NEXT: th.vsra.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.nxv8i8( + undef, + %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsra.mask.nxv8i8( + , + , + iXLen, + , + iXLen); + +define @intrinsic_vsra_mask_vx_nxv8i8_nxv8i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m1, d1 +; CHECK-NEXT: th.vsra.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.mask.nxv8i8( + %0, + %1, + iXLen %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsra.nxv16i8( + , + , + iXLen, + iXLen); + +define @intrinsic_vsra_vx_nxv16i8_nxv16i8( %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vx_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m2, d1 +; CHECK-NEXT: th.vsra.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.nxv16i8( + undef, + %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsra.mask.nxv16i8( + , + , + iXLen, + , + iXLen); + +define @intrinsic_vsra_mask_vx_nxv16i8_nxv16i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m2, d1 +; CHECK-NEXT: th.vsra.vx v8, v10, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.mask.nxv16i8( + %0, + %1, + iXLen %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsra.nxv32i8( + , + , + iXLen, + iXLen); + +define @intrinsic_vsra_vx_nxv32i8_nxv32i8( %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vx_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m4, d1 +; CHECK-NEXT: th.vsra.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.nxv32i8( + undef, + %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsra.mask.nxv32i8( + , + , + iXLen, + , + iXLen); + +define @intrinsic_vsra_mask_vx_nxv32i8_nxv32i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m4, d1 +; CHECK-NEXT: th.vsra.vx v8, v12, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.mask.nxv32i8( + %0, + %1, + iXLen %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsra.nxv64i8( + , + , + iXLen, + iXLen); + +define @intrinsic_vsra_vx_nxv64i8_nxv64i8( %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vx_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m8, d1 +; CHECK-NEXT: th.vsra.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.nxv64i8( + undef, + %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsra.mask.nxv64i8( + , + , + iXLen, + , + iXLen); + +define @intrinsic_vsra_mask_vx_nxv64i8_nxv64i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m8, d1 +; CHECK-NEXT: th.vsra.vx v8, v16, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.mask.nxv64i8( + %0, + %1, + iXLen %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsra.nxv4i16( + , + , + iXLen, + iXLen); + +define @intrinsic_vsra_vx_nxv4i16_nxv4i16( %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vx_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m1, d1 +; CHECK-NEXT: th.vsra.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.nxv4i16( + undef, + %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsra.mask.nxv4i16( + , + , + iXLen, + , + iXLen); + +define @intrinsic_vsra_mask_vx_nxv4i16_nxv4i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m1, d1 +; CHECK-NEXT: th.vsra.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.mask.nxv4i16( + %0, + %1, + iXLen %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsra.nxv8i16( + , + , + iXLen, + iXLen); + +define @intrinsic_vsra_vx_nxv8i16_nxv8i16( %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vx_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m2, d1 +; CHECK-NEXT: th.vsra.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.nxv8i16( + undef, + %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsra.mask.nxv8i16( + , + , + iXLen, + , + iXLen); + +define @intrinsic_vsra_mask_vx_nxv8i16_nxv8i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m2, d1 +; CHECK-NEXT: th.vsra.vx v8, v10, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.mask.nxv8i16( + %0, + %1, + iXLen %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsra.nxv16i16( + , + , + iXLen, + iXLen); + +define @intrinsic_vsra_vx_nxv16i16_nxv16i16( %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vx_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m4, d1 +; CHECK-NEXT: th.vsra.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.nxv16i16( + undef, + %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsra.mask.nxv16i16( + , + , + iXLen, + , + iXLen); + +define @intrinsic_vsra_mask_vx_nxv16i16_nxv16i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m4, d1 +; CHECK-NEXT: th.vsra.vx v8, v12, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.mask.nxv16i16( + %0, + %1, + iXLen %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsra.nxv32i16( + , + , + iXLen, + iXLen); + +define @intrinsic_vsra_vx_nxv32i16_nxv32i16( %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vx_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m8, d1 +; CHECK-NEXT: th.vsra.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.nxv32i16( + undef, + %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsra.mask.nxv32i16( + , + , + iXLen, + , + iXLen); + +define @intrinsic_vsra_mask_vx_nxv32i16_nxv32i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m8, d1 +; CHECK-NEXT: th.vsra.vx v8, v16, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.mask.nxv32i16( + %0, + %1, + iXLen %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsra.nxv2i32( + , + , + iXLen, + iXLen); + +define @intrinsic_vsra_vx_nxv2i32_nxv2i32( %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vx_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m1, d1 +; CHECK-NEXT: th.vsra.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.nxv2i32( + undef, + %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsra.mask.nxv2i32( + , + , + iXLen, + , + iXLen); + +define @intrinsic_vsra_mask_vx_nxv2i32_nxv2i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m1, d1 +; CHECK-NEXT: th.vsra.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.mask.nxv2i32( + %0, + %1, + iXLen %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsra.nxv4i32( + , + , + iXLen, + iXLen); + +define @intrinsic_vsra_vx_nxv4i32_nxv4i32( %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vx_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m2, d1 +; CHECK-NEXT: th.vsra.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.nxv4i32( + undef, + %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsra.mask.nxv4i32( + , + , + iXLen, + , + iXLen); + +define @intrinsic_vsra_mask_vx_nxv4i32_nxv4i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m2, d1 +; CHECK-NEXT: th.vsra.vx v8, v10, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.mask.nxv4i32( + %0, + %1, + iXLen %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsra.nxv8i32( + , + , + iXLen, + iXLen); + +define @intrinsic_vsra_vx_nxv8i32_nxv8i32( %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vx_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m4, d1 +; CHECK-NEXT: th.vsra.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.nxv8i32( + undef, + %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsra.mask.nxv8i32( + , + , + iXLen, + , + iXLen); + +define @intrinsic_vsra_mask_vx_nxv8i32_nxv8i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m4, d1 +; CHECK-NEXT: th.vsra.vx v8, v12, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.mask.nxv8i32( + %0, + %1, + iXLen %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsra.nxv16i32( + , + , + iXLen, + iXLen); + +define @intrinsic_vsra_vx_nxv16i32_nxv16i32( %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vx_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m8, d1 +; CHECK-NEXT: th.vsra.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.nxv16i32( + undef, + %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsra.mask.nxv16i32( + , + , + iXLen, + , + iXLen); + +define @intrinsic_vsra_mask_vx_nxv16i32_nxv16i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m8, d1 +; CHECK-NEXT: th.vsra.vx v8, v16, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.mask.nxv16i32( + %0, + %1, + iXLen %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsra.nxv1i64( + , + , + iXLen, + iXLen); + +define @intrinsic_vsra_vx_nxv1i64_nxv1i64( %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vx_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e64, m1, d1 +; CHECK-NEXT: th.vsra.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.nxv1i64( + undef, + %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsra.mask.nxv1i64( + , + , + iXLen, + , + iXLen); + +define @intrinsic_vsra_mask_vx_nxv1i64_nxv1i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e64, m1, d1 +; CHECK-NEXT: th.vsra.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.mask.nxv1i64( + %0, + %1, + iXLen %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsra.nxv2i64( + , + , + iXLen, + iXLen); + +define @intrinsic_vsra_vx_nxv2i64_nxv2i64( %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vx_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e64, m2, d1 +; CHECK-NEXT: th.vsra.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.nxv2i64( + undef, + %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsra.mask.nxv2i64( + , + , + iXLen, + , + iXLen); + +define @intrinsic_vsra_mask_vx_nxv2i64_nxv2i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e64, m2, d1 +; CHECK-NEXT: th.vsra.vx v8, v10, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.mask.nxv2i64( + %0, + %1, + iXLen %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsra.nxv4i64( + , + , + iXLen, + iXLen); + +define @intrinsic_vsra_vx_nxv4i64_nxv4i64( %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vx_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e64, m4, d1 +; CHECK-NEXT: th.vsra.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.nxv4i64( + undef, + %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsra.mask.nxv4i64( + , + , + iXLen, + , + iXLen); + +define @intrinsic_vsra_mask_vx_nxv4i64_nxv4i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e64, m4, d1 +; CHECK-NEXT: th.vsra.vx v8, v12, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.mask.nxv4i64( + %0, + %1, + iXLen %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsra.nxv8i64( + , + , + iXLen, + iXLen); + +define @intrinsic_vsra_vx_nxv8i64_nxv8i64( %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsra_vx_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e64, m8, d1 +; CHECK-NEXT: th.vsra.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.nxv8i64( + undef, + %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsra.mask.nxv8i64( + , + , + iXLen, + , + iXLen); + +define @intrinsic_vsra_mask_vx_nxv8i64_nxv8i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e64, m8, d1 +; CHECK-NEXT: th.vsra.vx v8, v16, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.mask.nxv8i64( + %0, + %1, + iXLen %2, + %3, + iXLen %4) + + ret %a +} + +define @intrinsic_vsra_vi_nxv8i8_nxv8i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vsra_vi_nxv8i8_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m1, d1 +; CHECK-NEXT: th.vsra.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.nxv8i8( + undef, + %0, + iXLen 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vsra_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv8i8_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m1, d1 +; CHECK-NEXT: th.vsra.vi v8, v9, 9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.mask.nxv8i8( + %0, + %1, + iXLen 9, + %2, + iXLen %3) + + ret %a +} + +define @intrinsic_vsra_vi_nxv16i8_nxv16i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vsra_vi_nxv16i8_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m2, d1 +; CHECK-NEXT: th.vsra.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.nxv16i8( + undef, + %0, + iXLen 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vsra_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv16i8_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m2, d1 +; CHECK-NEXT: th.vsra.vi v8, v10, 9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.mask.nxv16i8( + %0, + %1, + iXLen 9, + %2, + iXLen %3) + + ret %a +} + +define @intrinsic_vsra_vi_nxv32i8_nxv32i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vsra_vi_nxv32i8_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m4, d1 +; CHECK-NEXT: th.vsra.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.nxv32i8( + undef, + %0, + iXLen 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vsra_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv32i8_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m4, d1 +; CHECK-NEXT: th.vsra.vi v8, v12, 9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.mask.nxv32i8( + %0, + %1, + iXLen 9, + %2, + iXLen %3) + + ret %a +} + +define @intrinsic_vsra_vi_nxv64i8_nxv64i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vsra_vi_nxv64i8_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m8, d1 +; CHECK-NEXT: th.vsra.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.nxv64i8( + undef, + %0, + iXLen 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vsra_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv64i8_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m8, d1 +; CHECK-NEXT: th.vsra.vi v8, v16, 9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.mask.nxv64i8( + %0, + %1, + iXLen 9, + %2, + iXLen %3) + + ret %a +} + +define @intrinsic_vsra_vi_nxv4i16_nxv4i16_i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vsra_vi_nxv4i16_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m1, d1 +; CHECK-NEXT: th.vsra.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.nxv4i16( + undef, + %0, + iXLen 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vsra_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv4i16_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m1, d1 +; CHECK-NEXT: th.vsra.vi v8, v9, 9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.mask.nxv4i16( + %0, + %1, + iXLen 9, + %2, + iXLen %3) + + ret %a +} + +define @intrinsic_vsra_vi_nxv8i16_nxv8i16_i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vsra_vi_nxv8i16_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m2, d1 +; CHECK-NEXT: th.vsra.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.nxv8i16( + undef, + %0, + iXLen 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vsra_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv8i16_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m2, d1 +; CHECK-NEXT: th.vsra.vi v8, v10, 9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.mask.nxv8i16( + %0, + %1, + iXLen 9, + %2, + iXLen %3) + + ret %a +} + +define @intrinsic_vsra_vi_nxv16i16_nxv16i16_i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vsra_vi_nxv16i16_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m4, d1 +; CHECK-NEXT: th.vsra.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.nxv16i16( + undef, + %0, + iXLen 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vsra_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv16i16_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m4, d1 +; CHECK-NEXT: th.vsra.vi v8, v12, 9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.mask.nxv16i16( + %0, + %1, + iXLen 9, + %2, + iXLen %3) + + ret %a +} + +define @intrinsic_vsra_vi_nxv32i16_nxv32i16_i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vsra_vi_nxv32i16_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m8, d1 +; CHECK-NEXT: th.vsra.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.nxv32i16( + undef, + %0, + iXLen 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vsra_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv32i16_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m8, d1 +; CHECK-NEXT: th.vsra.vi v8, v16, 9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.mask.nxv32i16( + %0, + %1, + iXLen 9, + %2, + iXLen %3) + + ret %a +} + +define @intrinsic_vsra_vi_nxv2i32_nxv2i32_i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vsra_vi_nxv2i32_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m1, d1 +; CHECK-NEXT: th.vsra.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.nxv2i32( + undef, + %0, + iXLen 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vsra_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv2i32_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m1, d1 +; CHECK-NEXT: th.vsra.vi v8, v9, 9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.mask.nxv2i32( + %0, + %1, + iXLen 9, + %2, + iXLen %3) + + ret %a +} + +define @intrinsic_vsra_vi_nxv4i32_nxv4i32_i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vsra_vi_nxv4i32_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m2, d1 +; CHECK-NEXT: th.vsra.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.nxv4i32( + undef, + %0, + iXLen 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vsra_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv4i32_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m2, d1 +; CHECK-NEXT: th.vsra.vi v8, v10, 9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.mask.nxv4i32( + %0, + %1, + iXLen 9, + %2, + iXLen %3) + + ret %a +} + +define @intrinsic_vsra_vi_nxv8i32_nxv8i32_i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vsra_vi_nxv8i32_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m4, d1 +; CHECK-NEXT: th.vsra.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.nxv8i32( + undef, + %0, + iXLen 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vsra_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv8i32_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m4, d1 +; CHECK-NEXT: th.vsra.vi v8, v12, 9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.mask.nxv8i32( + %0, + %1, + iXLen 9, + %2, + iXLen %3) + + ret %a +} + +define @intrinsic_vsra_vi_nxv16i32_nxv16i32_i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vsra_vi_nxv16i32_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m8, d1 +; CHECK-NEXT: th.vsra.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.nxv16i32( + undef, + %0, + iXLen 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vsra_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv16i32_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m8, d1 +; CHECK-NEXT: th.vsra.vi v8, v16, 9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.mask.nxv16i32( + %0, + %1, + iXLen 9, + %2, + iXLen %3) + + ret %a +} + +define @intrinsic_vsra_vi_nxv1i64_nxv1i64_i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vsra_vi_nxv1i64_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m1, d1 +; CHECK-NEXT: th.vsra.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.nxv1i64( + undef, + %0, + iXLen 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vsra_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv1i64_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m1, d1 +; CHECK-NEXT: th.vsra.vi v8, v9, 9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.mask.nxv1i64( + %0, + %1, + iXLen 9, + %2, + iXLen %3) + + ret %a +} + +define @intrinsic_vsra_vi_nxv2i64_nxv2i64_i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vsra_vi_nxv2i64_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m2, d1 +; CHECK-NEXT: th.vsra.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.nxv2i64( + undef, + %0, + iXLen 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vsra_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv2i64_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m2, d1 +; CHECK-NEXT: th.vsra.vi v8, v10, 9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.mask.nxv2i64( + %0, + %1, + iXLen 9, + %2, + iXLen %3) + + ret %a +} + +define @intrinsic_vsra_vi_nxv4i64_nxv4i64_i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vsra_vi_nxv4i64_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m4, d1 +; CHECK-NEXT: th.vsra.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.nxv4i64( + undef, + %0, + iXLen 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vsra_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv4i64_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m4, d1 +; CHECK-NEXT: th.vsra.vi v8, v12, 9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.mask.nxv4i64( + %0, + %1, + iXLen 9, + %2, + iXLen %3) + + ret %a +} + +define @intrinsic_vsra_vi_nxv8i64_nxv8i64_i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vsra_vi_nxv8i64_nxv8i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m8, d1 +; CHECK-NEXT: th.vsra.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.nxv8i64( + undef, + %0, + iXLen 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vsra_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv8i64_nxv8i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m8, d1 +; CHECK-NEXT: th.vsra.vi v8, v16, 9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsra.mask.nxv8i64( + %0, + %1, + iXLen 9, + %2, + iXLen %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv0p71/vsrl.ll b/llvm/test/CodeGen/RISCV/rvv0p71/vsrl.ll new file mode 100644 index 00000000000000..0a15bb8fc4b8cc --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv0p71/vsrl.ll @@ -0,0 +1,3097 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+xtheadvector \ +; RUN: -verify-machineinstrs | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xtheadvector \ +; RUN: -verify-machineinstrs | FileCheck %s + +declare @llvm.riscv.th.vsrl.nxv8i8.nxv8i8( + , + , + , + iXLen); + +define @intrinsic_vsrl_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vv_nxv8i8_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m1, d1 +; CHECK-NEXT: th.vsrl.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.nxv8i8.nxv8i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsrl.mask.nxv8i8.nxv8i8( + , + , + , + , + iXLen); + +define @intrinsic_vsrl_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv8i8_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m1, d1 +; CHECK-NEXT: th.vsrl.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.mask.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsrl.nxv16i8.nxv16i8( + , + , + , + iXLen); + +define @intrinsic_vsrl_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vv_nxv16i8_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m2, d1 +; CHECK-NEXT: th.vsrl.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.nxv16i8.nxv16i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsrl.mask.nxv16i8.nxv16i8( + , + , + , + , + iXLen); + +define @intrinsic_vsrl_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv16i8_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m2, d1 +; CHECK-NEXT: th.vsrl.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.mask.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsrl.nxv32i8.nxv32i8( + , + , + , + iXLen); + +define @intrinsic_vsrl_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vv_nxv32i8_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m4, d1 +; CHECK-NEXT: th.vsrl.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.nxv32i8.nxv32i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsrl.mask.nxv32i8.nxv32i8( + , + , + , + , + iXLen); + +define @intrinsic_vsrl_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv32i8_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m4, d1 +; CHECK-NEXT: th.vsrl.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.mask.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsrl.nxv64i8.nxv64i8( + , + , + , + iXLen); + +define @intrinsic_vsrl_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vv_nxv64i8_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m8, d1 +; CHECK-NEXT: th.vsrl.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.nxv64i8.nxv64i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsrl.mask.nxv64i8.nxv64i8( + , + , + , + , + iXLen); + +define @intrinsic_vsrl_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv64i8_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m8, d1 +; CHECK-NEXT: th.vle.v v24, (a0) +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a0, a2 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m8, d1 +; CHECK-NEXT: th.vsrl.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.mask.nxv64i8.nxv64i8( + %0, + %1, + %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsrl.nxv4i16.nxv4i16( + , + , + , + iXLen); + +define @intrinsic_vsrl_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vv_nxv4i16_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m1, d1 +; CHECK-NEXT: th.vsrl.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.nxv4i16.nxv4i16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsrl.mask.nxv4i16.nxv4i16( + , + , + , + , + iXLen); + +define @intrinsic_vsrl_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv4i16_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m1, d1 +; CHECK-NEXT: th.vsrl.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.mask.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsrl.nxv8i16.nxv8i16( + , + , + , + iXLen); + +define @intrinsic_vsrl_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vv_nxv8i16_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m2, d1 +; CHECK-NEXT: th.vsrl.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.nxv8i16.nxv8i16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsrl.mask.nxv8i16.nxv8i16( + , + , + , + , + iXLen); + +define @intrinsic_vsrl_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv8i16_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m2, d1 +; CHECK-NEXT: th.vsrl.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.mask.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsrl.nxv16i16.nxv16i16( + , + , + , + iXLen); + +define @intrinsic_vsrl_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vv_nxv16i16_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m4, d1 +; CHECK-NEXT: th.vsrl.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.nxv16i16.nxv16i16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsrl.mask.nxv16i16.nxv16i16( + , + , + , + , + iXLen); + +define @intrinsic_vsrl_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv16i16_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m4, d1 +; CHECK-NEXT: th.vsrl.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.mask.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsrl.nxv32i16.nxv32i16( + , + , + , + iXLen); + +define @intrinsic_vsrl_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vv_nxv32i16_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m8, d1 +; CHECK-NEXT: th.vsrl.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.nxv32i16.nxv32i16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsrl.mask.nxv32i16.nxv32i16( + , + , + , + , + iXLen); + +define @intrinsic_vsrl_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv32i16_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e16, m8, d1 +; CHECK-NEXT: th.vle.v v24, (a0) +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a0, a2 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m8, d1 +; CHECK-NEXT: th.vsrl.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.mask.nxv32i16.nxv32i16( + %0, + %1, + %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsrl.nxv2i32.nxv2i32( + , + , + , + iXLen); + +define @intrinsic_vsrl_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vv_nxv2i32_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m1, d1 +; CHECK-NEXT: th.vsrl.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.nxv2i32.nxv2i32( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsrl.mask.nxv2i32.nxv2i32( + , + , + , + , + iXLen); + +define @intrinsic_vsrl_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv2i32_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m1, d1 +; CHECK-NEXT: th.vsrl.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.mask.nxv2i32.nxv2i32( + %0, + %1, + %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsrl.nxv4i32.nxv4i32( + , + , + , + iXLen); + +define @intrinsic_vsrl_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vv_nxv4i32_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m2, d1 +; CHECK-NEXT: th.vsrl.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.nxv4i32.nxv4i32( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsrl.mask.nxv4i32.nxv4i32( + , + , + , + , + iXLen); + +define @intrinsic_vsrl_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv4i32_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m2, d1 +; CHECK-NEXT: th.vsrl.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.mask.nxv4i32.nxv4i32( + %0, + %1, + %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsrl.nxv8i32.nxv8i32( + , + , + , + iXLen); + +define @intrinsic_vsrl_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vv_nxv8i32_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m4, d1 +; CHECK-NEXT: th.vsrl.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.nxv8i32.nxv8i32( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsrl.mask.nxv8i32.nxv8i32( + , + , + , + , + iXLen); + +define @intrinsic_vsrl_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv8i32_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m4, d1 +; CHECK-NEXT: th.vsrl.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.mask.nxv8i32.nxv8i32( + %0, + %1, + %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsrl.nxv16i32.nxv16i32( + , + , + , + iXLen); + +define @intrinsic_vsrl_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vv_nxv16i32_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m8, d1 +; CHECK-NEXT: th.vsrl.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.nxv16i32.nxv16i32( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsrl.mask.nxv16i32.nxv16i32( + , + , + , + , + iXLen); + +define @intrinsic_vsrl_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv16i32_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e32, m8, d1 +; CHECK-NEXT: th.vle.v v24, (a0) +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a0, a2 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m8, d1 +; CHECK-NEXT: th.vsrl.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.mask.nxv16i32.nxv16i32( + %0, + %1, + %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsrl.nxv1i64.nxv1i64( + , + , + , + iXLen); + +define @intrinsic_vsrl_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vv_nxv1i64_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m1, d1 +; CHECK-NEXT: th.vsrl.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.nxv1i64.nxv1i64( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsrl.mask.nxv1i64.nxv1i64( + , + , + , + , + iXLen); + +define @intrinsic_vsrl_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv1i64_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m1, d1 +; CHECK-NEXT: th.vsrl.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.mask.nxv1i64.nxv1i64( + %0, + %1, + %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsrl.nxv2i64.nxv2i64( + , + , + , + iXLen); + +define @intrinsic_vsrl_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vv_nxv2i64_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m2, d1 +; CHECK-NEXT: th.vsrl.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.nxv2i64.nxv2i64( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsrl.mask.nxv2i64.nxv2i64( + , + , + , + , + iXLen); + +define @intrinsic_vsrl_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv2i64_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m2, d1 +; CHECK-NEXT: th.vsrl.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.mask.nxv2i64.nxv2i64( + %0, + %1, + %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsrl.nxv4i64.nxv4i64( + , + , + , + iXLen); + +define @intrinsic_vsrl_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vv_nxv4i64_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m4, d1 +; CHECK-NEXT: th.vsrl.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.nxv4i64.nxv4i64( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsrl.mask.nxv4i64.nxv4i64( + , + , + , + , + iXLen); + +define @intrinsic_vsrl_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv4i64_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m4, d1 +; CHECK-NEXT: th.vsrl.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.mask.nxv4i64.nxv4i64( + %0, + %1, + %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsrl.nxv8i64.nxv8i64( + , + , + , + iXLen); + +define @intrinsic_vsrl_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vv_nxv8i64_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m8, d1 +; CHECK-NEXT: th.vsrl.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.nxv8i64.nxv8i64( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsrl.mask.nxv8i64.nxv8i64( + , + , + , + , + iXLen); + +define @intrinsic_vsrl_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv8i64_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e64, m8, d1 +; CHECK-NEXT: th.vle.v v24, (a0) +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a0, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a0, a2 +; CHECK-NEXT: th.vsetvli zero, a1, e64, m8, d1 +; CHECK-NEXT: th.vsrl.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.mask.nxv8i64.nxv8i64( + %0, + %1, + %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsrl.nxv8i8( + , + , + iXLen, + iXLen); + +define @intrinsic_vsrl_vx_nxv8i8_nxv8i8( %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m1, d1 +; CHECK-NEXT: th.vsrl.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.nxv8i8( + undef, + %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsrl.mask.nxv8i8( + , + , + iXLen, + , + iXLen); + +define @intrinsic_vsrl_mask_vx_nxv8i8_nxv8i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m1, d1 +; CHECK-NEXT: th.vsrl.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.mask.nxv8i8( + %0, + %1, + iXLen %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsrl.nxv16i8( + , + , + iXLen, + iXLen); + +define @intrinsic_vsrl_vx_nxv16i8_nxv16i8( %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vx_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m2, d1 +; CHECK-NEXT: th.vsrl.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.nxv16i8( + undef, + %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsrl.mask.nxv16i8( + , + , + iXLen, + , + iXLen); + +define @intrinsic_vsrl_mask_vx_nxv16i8_nxv16i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m2, d1 +; CHECK-NEXT: th.vsrl.vx v8, v10, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.mask.nxv16i8( + %0, + %1, + iXLen %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsrl.nxv32i8( + , + , + iXLen, + iXLen); + +define @intrinsic_vsrl_vx_nxv32i8_nxv32i8( %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vx_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m4, d1 +; CHECK-NEXT: th.vsrl.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.nxv32i8( + undef, + %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsrl.mask.nxv32i8( + , + , + iXLen, + , + iXLen); + +define @intrinsic_vsrl_mask_vx_nxv32i8_nxv32i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m4, d1 +; CHECK-NEXT: th.vsrl.vx v8, v12, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.mask.nxv32i8( + %0, + %1, + iXLen %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsrl.nxv64i8( + , + , + iXLen, + iXLen); + +define @intrinsic_vsrl_vx_nxv64i8_nxv64i8( %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vx_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m8, d1 +; CHECK-NEXT: th.vsrl.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.nxv64i8( + undef, + %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsrl.mask.nxv64i8( + , + , + iXLen, + , + iXLen); + +define @intrinsic_vsrl_mask_vx_nxv64i8_nxv64i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m8, d1 +; CHECK-NEXT: th.vsrl.vx v8, v16, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.mask.nxv64i8( + %0, + %1, + iXLen %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsrl.nxv4i16( + , + , + iXLen, + iXLen); + +define @intrinsic_vsrl_vx_nxv4i16_nxv4i16( %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m1, d1 +; CHECK-NEXT: th.vsrl.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.nxv4i16( + undef, + %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsrl.mask.nxv4i16( + , + , + iXLen, + , + iXLen); + +define @intrinsic_vsrl_mask_vx_nxv4i16_nxv4i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m1, d1 +; CHECK-NEXT: th.vsrl.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.mask.nxv4i16( + %0, + %1, + iXLen %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsrl.nxv8i16( + , + , + iXLen, + iXLen); + +define @intrinsic_vsrl_vx_nxv8i16_nxv8i16( %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m2, d1 +; CHECK-NEXT: th.vsrl.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.nxv8i16( + undef, + %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsrl.mask.nxv8i16( + , + , + iXLen, + , + iXLen); + +define @intrinsic_vsrl_mask_vx_nxv8i16_nxv8i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m2, d1 +; CHECK-NEXT: th.vsrl.vx v8, v10, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.mask.nxv8i16( + %0, + %1, + iXLen %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsrl.nxv16i16( + , + , + iXLen, + iXLen); + +define @intrinsic_vsrl_vx_nxv16i16_nxv16i16( %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vx_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m4, d1 +; CHECK-NEXT: th.vsrl.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.nxv16i16( + undef, + %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsrl.mask.nxv16i16( + , + , + iXLen, + , + iXLen); + +define @intrinsic_vsrl_mask_vx_nxv16i16_nxv16i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m4, d1 +; CHECK-NEXT: th.vsrl.vx v8, v12, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.mask.nxv16i16( + %0, + %1, + iXLen %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsrl.nxv32i16( + , + , + iXLen, + iXLen); + +define @intrinsic_vsrl_vx_nxv32i16_nxv32i16( %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vx_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m8, d1 +; CHECK-NEXT: th.vsrl.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.nxv32i16( + undef, + %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsrl.mask.nxv32i16( + , + , + iXLen, + , + iXLen); + +define @intrinsic_vsrl_mask_vx_nxv32i16_nxv32i16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m8, d1 +; CHECK-NEXT: th.vsrl.vx v8, v16, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.mask.nxv32i16( + %0, + %1, + iXLen %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsrl.nxv2i32( + , + , + iXLen, + iXLen); + +define @intrinsic_vsrl_vx_nxv2i32_nxv2i32( %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m1, d1 +; CHECK-NEXT: th.vsrl.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.nxv2i32( + undef, + %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsrl.mask.nxv2i32( + , + , + iXLen, + , + iXLen); + +define @intrinsic_vsrl_mask_vx_nxv2i32_nxv2i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m1, d1 +; CHECK-NEXT: th.vsrl.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.mask.nxv2i32( + %0, + %1, + iXLen %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsrl.nxv4i32( + , + , + iXLen, + iXLen); + +define @intrinsic_vsrl_vx_nxv4i32_nxv4i32( %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m2, d1 +; CHECK-NEXT: th.vsrl.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.nxv4i32( + undef, + %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsrl.mask.nxv4i32( + , + , + iXLen, + , + iXLen); + +define @intrinsic_vsrl_mask_vx_nxv4i32_nxv4i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m2, d1 +; CHECK-NEXT: th.vsrl.vx v8, v10, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.mask.nxv4i32( + %0, + %1, + iXLen %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsrl.nxv8i32( + , + , + iXLen, + iXLen); + +define @intrinsic_vsrl_vx_nxv8i32_nxv8i32( %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m4, d1 +; CHECK-NEXT: th.vsrl.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.nxv8i32( + undef, + %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsrl.mask.nxv8i32( + , + , + iXLen, + , + iXLen); + +define @intrinsic_vsrl_mask_vx_nxv8i32_nxv8i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m4, d1 +; CHECK-NEXT: th.vsrl.vx v8, v12, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.mask.nxv8i32( + %0, + %1, + iXLen %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsrl.nxv16i32( + , + , + iXLen, + iXLen); + +define @intrinsic_vsrl_vx_nxv16i32_nxv16i32( %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vx_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m8, d1 +; CHECK-NEXT: th.vsrl.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.nxv16i32( + undef, + %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsrl.mask.nxv16i32( + , + , + iXLen, + , + iXLen); + +define @intrinsic_vsrl_mask_vx_nxv16i32_nxv16i32( %0, %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m8, d1 +; CHECK-NEXT: th.vsrl.vx v8, v16, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.mask.nxv16i32( + %0, + %1, + iXLen %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsrl.nxv1i64( + , + , + iXLen, + iXLen); + +define @intrinsic_vsrl_vx_nxv1i64_nxv1i64( %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e64, m1, d1 +; CHECK-NEXT: th.vsrl.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.nxv1i64( + undef, + %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsrl.mask.nxv1i64( + , + , + iXLen, + , + iXLen); + +define @intrinsic_vsrl_mask_vx_nxv1i64_nxv1i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e64, m1, d1 +; CHECK-NEXT: th.vsrl.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.mask.nxv1i64( + %0, + %1, + iXLen %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsrl.nxv2i64( + , + , + iXLen, + iXLen); + +define @intrinsic_vsrl_vx_nxv2i64_nxv2i64( %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e64, m2, d1 +; CHECK-NEXT: th.vsrl.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.nxv2i64( + undef, + %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsrl.mask.nxv2i64( + , + , + iXLen, + , + iXLen); + +define @intrinsic_vsrl_mask_vx_nxv2i64_nxv2i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e64, m2, d1 +; CHECK-NEXT: th.vsrl.vx v8, v10, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.mask.nxv2i64( + %0, + %1, + iXLen %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsrl.nxv4i64( + , + , + iXLen, + iXLen); + +define @intrinsic_vsrl_vx_nxv4i64_nxv4i64( %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e64, m4, d1 +; CHECK-NEXT: th.vsrl.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.nxv4i64( + undef, + %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsrl.mask.nxv4i64( + , + , + iXLen, + , + iXLen); + +define @intrinsic_vsrl_mask_vx_nxv4i64_nxv4i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e64, m4, d1 +; CHECK-NEXT: th.vsrl.vx v8, v12, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.mask.nxv4i64( + %0, + %1, + iXLen %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.th.vsrl.nxv8i64( + , + , + iXLen, + iXLen); + +define @intrinsic_vsrl_vx_nxv8i64_nxv8i64( %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e64, m8, d1 +; CHECK-NEXT: th.vsrl.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.nxv8i64( + undef, + %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vsrl.mask.nxv8i64( + , + , + iXLen, + , + iXLen); + +define @intrinsic_vsrl_mask_vx_nxv8i64_nxv8i64( %0, %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: csrr a2, vl +; CHECK-NEXT: csrr a3, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a2, a3 +; CHECK-NEXT: th.vsetvli zero, a1, e64, m8, d1 +; CHECK-NEXT: th.vsrl.vx v8, v16, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.mask.nxv8i64( + %0, + %1, + iXLen %2, + %3, + iXLen %4) + + ret %a +} + +define @intrinsic_vsrl_vi_nxv8i8_nxv8i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vi_nxv8i8_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m1, d1 +; CHECK-NEXT: th.vsrl.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.nxv8i8( + undef, + %0, + iXLen 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vsrl_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv8i8_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m1, d1 +; CHECK-NEXT: th.vsrl.vi v8, v9, 9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.mask.nxv8i8( + %0, + %1, + iXLen 9, + %2, + iXLen %3) + + ret %a +} + +define @intrinsic_vsrl_vi_nxv16i8_nxv16i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vi_nxv16i8_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m2, d1 +; CHECK-NEXT: th.vsrl.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.nxv16i8( + undef, + %0, + iXLen 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vsrl_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv16i8_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m2, d1 +; CHECK-NEXT: th.vsrl.vi v8, v10, 9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.mask.nxv16i8( + %0, + %1, + iXLen 9, + %2, + iXLen %3) + + ret %a +} + +define @intrinsic_vsrl_vi_nxv32i8_nxv32i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vi_nxv32i8_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m4, d1 +; CHECK-NEXT: th.vsrl.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.nxv32i8( + undef, + %0, + iXLen 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vsrl_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv32i8_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m4, d1 +; CHECK-NEXT: th.vsrl.vi v8, v12, 9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.mask.nxv32i8( + %0, + %1, + iXLen 9, + %2, + iXLen %3) + + ret %a +} + +define @intrinsic_vsrl_vi_nxv64i8_nxv64i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vi_nxv64i8_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m8, d1 +; CHECK-NEXT: th.vsrl.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.nxv64i8( + undef, + %0, + iXLen 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vsrl_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv64i8_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e8, m8, d1 +; CHECK-NEXT: th.vsrl.vi v8, v16, 9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.mask.nxv64i8( + %0, + %1, + iXLen 9, + %2, + iXLen %3) + + ret %a +} + +define @intrinsic_vsrl_vi_nxv4i16_nxv4i16_i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vi_nxv4i16_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m1, d1 +; CHECK-NEXT: th.vsrl.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.nxv4i16( + undef, + %0, + iXLen 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vsrl_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv4i16_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m1, d1 +; CHECK-NEXT: th.vsrl.vi v8, v9, 9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.mask.nxv4i16( + %0, + %1, + iXLen 9, + %2, + iXLen %3) + + ret %a +} + +define @intrinsic_vsrl_vi_nxv8i16_nxv8i16_i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vi_nxv8i16_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m2, d1 +; CHECK-NEXT: th.vsrl.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.nxv8i16( + undef, + %0, + iXLen 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vsrl_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv8i16_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m2, d1 +; CHECK-NEXT: th.vsrl.vi v8, v10, 9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.mask.nxv8i16( + %0, + %1, + iXLen 9, + %2, + iXLen %3) + + ret %a +} + +define @intrinsic_vsrl_vi_nxv16i16_nxv16i16_i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vi_nxv16i16_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m4, d1 +; CHECK-NEXT: th.vsrl.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.nxv16i16( + undef, + %0, + iXLen 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vsrl_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv16i16_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m4, d1 +; CHECK-NEXT: th.vsrl.vi v8, v12, 9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.mask.nxv16i16( + %0, + %1, + iXLen 9, + %2, + iXLen %3) + + ret %a +} + +define @intrinsic_vsrl_vi_nxv32i16_nxv32i16_i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vi_nxv32i16_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m8, d1 +; CHECK-NEXT: th.vsrl.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.nxv32i16( + undef, + %0, + iXLen 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vsrl_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv32i16_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m8, d1 +; CHECK-NEXT: th.vsrl.vi v8, v16, 9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.mask.nxv32i16( + %0, + %1, + iXLen 9, + %2, + iXLen %3) + + ret %a +} + +define @intrinsic_vsrl_vi_nxv2i32_nxv2i32_i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vi_nxv2i32_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m1, d1 +; CHECK-NEXT: th.vsrl.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.nxv2i32( + undef, + %0, + iXLen 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vsrl_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv2i32_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m1, d1 +; CHECK-NEXT: th.vsrl.vi v8, v9, 9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.mask.nxv2i32( + %0, + %1, + iXLen 9, + %2, + iXLen %3) + + ret %a +} + +define @intrinsic_vsrl_vi_nxv4i32_nxv4i32_i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vi_nxv4i32_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m2, d1 +; CHECK-NEXT: th.vsrl.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.nxv4i32( + undef, + %0, + iXLen 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vsrl_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv4i32_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m2, d1 +; CHECK-NEXT: th.vsrl.vi v8, v10, 9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.mask.nxv4i32( + %0, + %1, + iXLen 9, + %2, + iXLen %3) + + ret %a +} + +define @intrinsic_vsrl_vi_nxv8i32_nxv8i32_i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vi_nxv8i32_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m4, d1 +; CHECK-NEXT: th.vsrl.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.nxv8i32( + undef, + %0, + iXLen 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vsrl_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv8i32_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m4, d1 +; CHECK-NEXT: th.vsrl.vi v8, v12, 9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.mask.nxv8i32( + %0, + %1, + iXLen 9, + %2, + iXLen %3) + + ret %a +} + +define @intrinsic_vsrl_vi_nxv16i32_nxv16i32_i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vi_nxv16i32_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m8, d1 +; CHECK-NEXT: th.vsrl.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.nxv16i32( + undef, + %0, + iXLen 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vsrl_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv16i32_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m8, d1 +; CHECK-NEXT: th.vsrl.vi v8, v16, 9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.mask.nxv16i32( + %0, + %1, + iXLen 9, + %2, + iXLen %3) + + ret %a +} + +define @intrinsic_vsrl_vi_nxv1i64_nxv1i64_i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vi_nxv1i64_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m1, d1 +; CHECK-NEXT: th.vsrl.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.nxv1i64( + undef, + %0, + iXLen 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vsrl_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv1i64_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m1, d1 +; CHECK-NEXT: th.vsrl.vi v8, v9, 9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.mask.nxv1i64( + %0, + %1, + iXLen 9, + %2, + iXLen %3) + + ret %a +} + +define @intrinsic_vsrl_vi_nxv2i64_nxv2i64_i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vi_nxv2i64_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m2, d1 +; CHECK-NEXT: th.vsrl.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.nxv2i64( + undef, + %0, + iXLen 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vsrl_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv2i64_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m2, d1 +; CHECK-NEXT: th.vsrl.vi v8, v10, 9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.mask.nxv2i64( + %0, + %1, + iXLen 9, + %2, + iXLen %3) + + ret %a +} + +define @intrinsic_vsrl_vi_nxv4i64_nxv4i64_i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vi_nxv4i64_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m4, d1 +; CHECK-NEXT: th.vsrl.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.nxv4i64( + undef, + %0, + iXLen 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vsrl_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv4i64_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m4, d1 +; CHECK-NEXT: th.vsrl.vi v8, v12, 9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.mask.nxv4i64( + %0, + %1, + iXLen 9, + %2, + iXLen %3) + + ret %a +} + +define @intrinsic_vsrl_vi_nxv8i64_nxv8i64_i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vsrl_vi_nxv8i64_nxv8i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m8, d1 +; CHECK-NEXT: th.vsrl.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.nxv8i64( + undef, + %0, + iXLen 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vsrl_mask_vi_nxv8i64_nxv8i64_i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv8i64_nxv8i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m8, d1 +; CHECK-NEXT: th.vsrl.vi v8, v16, 9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vsrl.mask.nxv8i64( + %0, + %1, + iXLen 9, + %2, + iXLen %3) + + ret %a +}