diff --git a/llvm/docs/HowToUpdateDebugInfo.rst b/llvm/docs/HowToUpdateDebugInfo.rst index fff0cdbe59c8d5..7df2a8a2582759 100644 --- a/llvm/docs/HowToUpdateDebugInfo.rst +++ b/llvm/docs/HowToUpdateDebugInfo.rst @@ -342,8 +342,8 @@ A variant of the ``debugify`` utility described in :ref:`Mutation testing for IR-level transformations` can be used for MIR-level transformations as well: much like the IR-level pass, ``mir-debugify`` inserts sequentially increasing line locations to each -``MachineInstr`` in a ``Module``. And the MIR-level ``mir-check-debugify`` is -similar to IR-level ``check-debugify`` pass. +``MachineInstr`` in a ``Module`` (although there is no equivalent MIR-level +``check-debugify`` pass). For example, here is a snippet before: @@ -403,32 +403,16 @@ and ``-start-after``. For example: $ llc -debugify-and-strip-all-safe -run-pass=... $ llc -debugify-and-strip-all-safe -O1 -If you want to check it after each pass in a pipeline, use -``-debugify-check-and-strip-all-safe``. This can also be combined with -``-start-before`` and ``-start-after``. For example: - -.. code-block:: bash - - $ llc -debugify-check-and-strip-all-safe -run-pass=... - $ llc -debugify-check-and-strip-all-safe -O1 - -To check all debug info from a test, use ``mir-check-debugify``, like: - -.. code-block:: bash - - $ llc -run-pass=mir-debugify,other-pass,mir-check-debugify - To strip out all debug info from a test, use ``mir-strip-debug``, like: .. code-block:: bash $ llc -run-pass=mir-debugify,other-pass,mir-strip-debug -It can be useful to combine ``mir-debugify``, ``mir-check-debugify`` and/or -``mir-strip-debug`` to identify backend transformations which break in -the presence of debug info. For example, to run the AArch64 backend tests -with all normal passes "sandwiched" in between MIRDebugify and -MIRStripDebugify mutation passes, run: +It can be useful to combine ``mir-debugify`` and ``mir-strip-debug`` to +identify backend transformations which break in the presence of debug info. +For example, to run the AArch64 backend tests with all normal passes +"sandwiched" in between MIRDebugify and MIRStripDebugify mutation passes, run: .. code-block:: bash diff --git a/llvm/include/llvm/CodeGen/Passes.h b/llvm/include/llvm/CodeGen/Passes.h index 676ed2c65eb1ed..47037bac6270f6 100644 --- a/llvm/include/llvm/CodeGen/Passes.h +++ b/llvm/include/llvm/CodeGen/Passes.h @@ -482,9 +482,6 @@ namespace llvm { /// info was generated by another source such as clang. ModulePass *createStripDebugMachineModulePass(bool OnlyDebugified); - /// Creates MIR Check Debug pass. \see MachineCheckDebugify.cpp - ModulePass *createCheckDebugMachineModulePass(); - /// The pass fixups statepoint machine instruction to replace usage of /// caller saved registers with stack slots. extern char &FixupStatepointCallerSavedID; diff --git a/llvm/include/llvm/CodeGen/TargetPassConfig.h b/llvm/include/llvm/CodeGen/TargetPassConfig.h index 9b42b0756d4178..fc52452169416e 100644 --- a/llvm/include/llvm/CodeGen/TargetPassConfig.h +++ b/llvm/include/llvm/CodeGen/TargetPassConfig.h @@ -313,9 +313,6 @@ class TargetPassConfig : public ImmutablePass { /// Add a pass to remove debug info from the MIR. void addStripDebugPass(); - /// Add a pass to check synthesized debug info for MIR. - void addCheckDebugPass(); - /// Add standard passes before a pass that's about to be added. For example, /// the DebugifyMachineModulePass if it is enabled. void addMachinePrePasses(bool AllowDebugify = true); diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td index 30d15347206cc2..6513617a85fc5b 100644 --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -124,7 +124,7 @@ let TargetPrefix = "riscv" in { class RISCVBinaryAAXMask : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, - llvm_anyvector_ty, llvm_anyint_ty], + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { let ExtendOperand = 3; } @@ -141,7 +141,7 @@ let TargetPrefix = "riscv" in { class RISCVBinaryABXMask : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty, - llvm_anyvector_ty, llvm_anyint_ty], + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { let ExtendOperand = 3; } @@ -149,7 +149,8 @@ let TargetPrefix = "riscv" in { // Input: (vector_in, vector_in/scalar_in, V0, vl) class RISCVBinaryWithV0 : Intrinsic<[llvm_anyvector_ty], - [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty, + [LLVMMatchType<0>, llvm_any_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { let ExtendOperand = 2; @@ -158,8 +159,9 @@ let TargetPrefix = "riscv" in { // Output: (mask type output) // Input: (vector_in, vector_in/scalar_in, V0, vl) class RISCVBinaryMOutWithV0 - :Intrinsic<[llvm_anyvector_ty], - [llvm_anyvector_ty, llvm_any_ty, LLVMMatchType<0>, + :Intrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>], + [llvm_anyvector_ty, llvm_any_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { let ExtendOperand = 2; @@ -168,7 +170,7 @@ let TargetPrefix = "riscv" in { // Output: (mask type output) // Input: (vector_in, vector_in/scalar_in, vl) class RISCVBinaryMOut - : Intrinsic<[llvm_anyvector_ty], + : Intrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>], [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { let ExtendOperand = 2; @@ -237,6 +239,20 @@ let TargetPrefix = "riscv" in { defm vmaxu : RISCVBinaryAAX; defm vmax : RISCVBinaryAAX; + defm vmul : RISCVBinaryAAX; + defm vmulh : RISCVBinaryAAX; + defm vmulhu : RISCVBinaryAAX; + defm vmulhsu : RISCVBinaryAAX; + + defm vdivu : RISCVBinaryAAX; + defm vdiv : RISCVBinaryAAX; + defm vremu : RISCVBinaryAAX; + defm vrem : RISCVBinaryAAX; + + defm vwmul : RISCVBinaryABX; + defm vwmulu : RISCVBinaryABX; + defm vwmulsu : RISCVBinaryABX; + defm vfadd : RISCVBinaryAAX; defm vfsub : RISCVBinaryAAX; defm vfrsub : RISCVBinaryAAX; diff --git a/llvm/include/llvm/InitializePasses.h b/llvm/include/llvm/InitializePasses.h index e1b3a8dd3f3a76..b8420ade06a7b5 100644 --- a/llvm/include/llvm/InitializePasses.h +++ b/llvm/include/llvm/InitializePasses.h @@ -112,7 +112,6 @@ void initializeCallGraphViewerPass(PassRegistry&); void initializeCallGraphWrapperPassPass(PassRegistry&); void initializeCallSiteSplittingLegacyPassPass(PassRegistry&); void initializeCalledValuePropagationLegacyPassPass(PassRegistry &); -void initializeCheckDebugMachineModulePass(PassRegistry &); void initializeCodeGenPreparePass(PassRegistry&); void initializeConstantHoistingLegacyPassPass(PassRegistry&); void initializeConstantMergeLegacyPassPass(PassRegistry&); diff --git a/llvm/lib/CodeGen/CMakeLists.txt b/llvm/lib/CodeGen/CMakeLists.txt index d50349c514a39d..7dc854cbbf20a4 100644 --- a/llvm/lib/CodeGen/CMakeLists.txt +++ b/llvm/lib/CodeGen/CMakeLists.txt @@ -74,7 +74,6 @@ add_llvm_component_library(LLVMCodeGen MachineCombiner.cpp MachineCopyPropagation.cpp MachineCSE.cpp - MachineCheckDebugify.cpp MachineDebugify.cpp MachineDominanceFrontier.cpp MachineDominators.cpp diff --git a/llvm/lib/CodeGen/CodeGen.cpp b/llvm/lib/CodeGen/CodeGen.cpp index 94925498b01cac..92a2b73f52e3eb 100644 --- a/llvm/lib/CodeGen/CodeGen.cpp +++ b/llvm/lib/CodeGen/CodeGen.cpp @@ -25,7 +25,6 @@ void llvm::initializeCodeGen(PassRegistry &Registry) { initializeBranchRelaxationPass(Registry); initializeCFGuardLongjmpPass(Registry); initializeCFIInstrInserterPass(Registry); - initializeCheckDebugMachineModulePass(Registry); initializeCodeGenPreparePass(Registry); initializeDeadMachineInstructionElimPass(Registry); initializeDebugifyMachineModulePass(Registry); diff --git a/llvm/lib/CodeGen/MachineCheckDebugify.cpp b/llvm/lib/CodeGen/MachineCheckDebugify.cpp deleted file mode 100644 index bd7f0f86294758..00000000000000 --- a/llvm/lib/CodeGen/MachineCheckDebugify.cpp +++ /dev/null @@ -1,126 +0,0 @@ -//===- MachineCheckDebugify.cpp - Check debug info ------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -/// -/// \file This checks debug info after mir-debugify (+ pass-to-test). Currently -/// it simply checks the integrity of line info in DILocation and -/// DILocalVariable which mir-debugifiy generated before. -//===----------------------------------------------------------------------===// - -#include "llvm/CodeGen/MachineFunctionPass.h" -#include "llvm/CodeGen/MachineModuleInfo.h" -#include "llvm/CodeGen/Passes.h" -#include "llvm/IR/DebugInfo.h" -#include "llvm/InitializePasses.h" -#include "llvm/Support/CommandLine.h" -#include "llvm/Transforms/Utils/Debugify.h" - -#define DEBUG_TYPE "mir-check-debugify" - -using namespace llvm; - -namespace { - -struct CheckDebugMachineModule : public ModulePass { - bool runOnModule(Module &M) override { - MachineModuleInfo &MMI = - getAnalysis().getMMI(); - - NamedMDNode *NMD = M.getNamedMetadata("llvm.mir.debugify"); - if (!NMD) { - errs() << "WARNING: Please run mir-debugify to generate " - "llvm.mir.debugify metadata first.\n"; - return false; - } - - auto getDebugifyOperand = [&](unsigned Idx) -> unsigned { - return mdconst::extract(NMD->getOperand(Idx)->getOperand(0)) - ->getZExtValue(); - }; - assert(NMD->getNumOperands() == 2 && - "llvm.mir.debugify should have exactly 2 operands!"); - unsigned NumLines = getDebugifyOperand(0); - unsigned NumVars = getDebugifyOperand(1); - BitVector MissingLines{NumLines, true}; - BitVector MissingVars{NumVars, true}; - - for (Function &F : M.functions()) { - MachineFunction *MF = MMI.getMachineFunction(F); - if (!MF) - continue; - for (MachineBasicBlock &MBB : *MF) { - // Find missing lines. - // TODO: Avoid meta instructions other than dbg_val. - for (MachineInstr &MI : MBB) { - if (MI.isDebugValue()) - continue; - const DebugLoc DL = MI.getDebugLoc(); - if (DL && DL.getLine() != 0) { - MissingLines.reset(DL.getLine() - 1); - continue; - } - - if (!DL) { - errs() << "WARNING: Instruction with empty DebugLoc in function "; - errs() << F.getName() << " --"; - MI.print(errs()); - } - } - - // Find missing variables. - // TODO: Handle DBG_INSTR_REF which is under an experimental option now. - for (MachineInstr &MI : MBB) { - if (!MI.isDebugValue()) - continue; - const DILocalVariable *LocalVar = MI.getDebugVariable(); - unsigned Var = ~0U; - - (void)to_integer(LocalVar->getName(), Var, 10); - assert(Var <= NumVars && "Unexpected name for DILocalVariable"); - MissingVars.reset(Var - 1); - } - } - } - - bool Fail = false; - for (unsigned Idx : MissingLines.set_bits()) { - errs() << "WARNING: Missing line " << Idx + 1 << "\n"; - Fail = true; - } - - for (unsigned Idx : MissingVars.set_bits()) { - errs() << "WARNING: Missing variable " << Idx + 1 << "\n"; - Fail = true; - } - errs() << "Machine IR debug info check: "; - errs() << (Fail ? "FAIL" : "PASS") << "\n"; - - return false; - } - - CheckDebugMachineModule() : ModulePass(ID) {} - - void getAnalysisUsage(AnalysisUsage &AU) const override { - AU.addRequired(); - AU.addPreserved(); - AU.setPreservesCFG(); - } - - static char ID; // Pass identification. -}; -char CheckDebugMachineModule::ID = 0; - -} // end anonymous namespace - -INITIALIZE_PASS_BEGIN(CheckDebugMachineModule, DEBUG_TYPE, - "Machine Check Debug Module", false, false) -INITIALIZE_PASS_END(CheckDebugMachineModule, DEBUG_TYPE, - "Machine Check Debug Module", false, false) - -ModulePass *llvm::createCheckDebugMachineModulePass() { - return new CheckDebugMachineModule(); -} diff --git a/llvm/lib/CodeGen/MachineDebugify.cpp b/llvm/lib/CodeGen/MachineDebugify.cpp index 599a8184759265..bf57ec0e8c2843 100644 --- a/llvm/lib/CodeGen/MachineDebugify.cpp +++ b/llvm/lib/CodeGen/MachineDebugify.cpp @@ -14,7 +14,6 @@ //===----------------------------------------------------------------------===// #include "llvm/ADT/DenseMap.h" -#include "llvm/ADT/SmallSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/CodeGen/MachineFunctionPass.h" #include "llvm/CodeGen/MachineInstrBuilder.h" @@ -90,11 +89,10 @@ bool applyDebugifyMetadataToMachineFunction(MachineModuleInfo &MMI, // Do this by introducing debug uses of each register definition. If that is // not possible (e.g. we have a phi or a meta instruction), emit a constant. uint64_t NextImm = 0; - SmallSet VarSet; const MCInstrDesc &DbgValDesc = TII.get(TargetOpcode::DBG_VALUE); for (MachineBasicBlock &MBB : MF) { MachineBasicBlock::iterator FirstNonPHIIt = MBB.getFirstNonPHI(); - for (auto I = MBB.begin(), E = MBB.end(); I != E;) { + for (auto I = MBB.begin(), E = MBB.end(); I != E; ) { MachineInstr &MI = *I; ++I; @@ -115,7 +113,6 @@ bool applyDebugifyMetadataToMachineFunction(MachineModuleInfo &MMI, Line = EarliestDVI->getDebugLoc().getLine(); DILocalVariable *LocalVar = Line2Var[Line]; assert(LocalVar && "No variable for current line?"); - VarSet.insert(LocalVar); // Emit DBG_VALUEs for register definitions. SmallVector RegDefs; @@ -135,33 +132,6 @@ bool applyDebugifyMetadataToMachineFunction(MachineModuleInfo &MMI, } } - // Here we save the number of lines and variables into "llvm.mir.debugify". - // It is useful for mir-check-debugify. - NamedMDNode *NMD = M.getNamedMetadata("llvm.mir.debugify"); - IntegerType *Int32Ty = Type::getInt32Ty(Ctx); - if (!NMD) { - NMD = M.getOrInsertNamedMetadata("llvm.mir.debugify"); - auto addDebugifyOperand = [&](unsigned N) { - NMD->addOperand(MDNode::get( - Ctx, ValueAsMetadata::getConstant(ConstantInt::get(Int32Ty, N)))); - }; - // Add number of lines. - addDebugifyOperand(NextLine - 1); - // Add number of variables. - addDebugifyOperand(VarSet.size()); - } else { - assert(NMD->getNumOperands() == 2 && - "llvm.mir.debugify should have exactly 2 operands!"); - auto setDebugifyOperand = [&](unsigned Idx, unsigned N) { - NMD->setOperand(Idx, MDNode::get(Ctx, ValueAsMetadata::getConstant( - ConstantInt::get(Int32Ty, N)))); - }; - // Set number of lines. - setDebugifyOperand(0, NextLine - 1); - // Set number of variables. - setDebugifyOperand(1, VarSet.size()); - } - return true; } diff --git a/llvm/lib/CodeGen/TargetPassConfig.cpp b/llvm/lib/CodeGen/TargetPassConfig.cpp index 10c1ff988ac5f6..48686b9e04f83f 100644 --- a/llvm/lib/CodeGen/TargetPassConfig.cpp +++ b/llvm/lib/CodeGen/TargetPassConfig.cpp @@ -120,12 +120,6 @@ static cl::opt DebugifyAndStripAll( "Debugify MIR before and Strip debug after " "each pass except those known to be unsafe when debug info is present"), cl::ZeroOrMore); -static cl::opt DebugifyCheckAndStripAll( - "debugify-check-and-strip-all-safe", cl::Hidden, - cl::desc( - "Debugify MIR before, by checking and stripping the debug info after, " - "each pass except those known to be unsafe when debug info is present"), - cl::ZeroOrMore); enum RunOutliner { AlwaysOutline, NeverOutline, TargetDefault }; // Enable or disable the MachineOutliner. static cl::opt EnableMachineOutliner( @@ -631,26 +625,15 @@ void TargetPassConfig::addStripDebugPass() { PM->add(createStripDebugMachineModulePass(/*OnlyDebugified=*/true)); } -void TargetPassConfig::addCheckDebugPass() { - PM->add(createCheckDebugMachineModulePass()); -} - void TargetPassConfig::addMachinePrePasses(bool AllowDebugify) { - if (AllowDebugify && DebugifyIsSafe && - (DebugifyAndStripAll == cl::BOU_TRUE || - DebugifyCheckAndStripAll == cl::BOU_TRUE)) + if (AllowDebugify && DebugifyAndStripAll == cl::BOU_TRUE && DebugifyIsSafe) addDebugifyPass(); } void TargetPassConfig::addMachinePostPasses(const std::string &Banner, bool AllowVerify, bool AllowStrip) { - if (DebugifyIsSafe) { - if (DebugifyCheckAndStripAll == cl::BOU_TRUE) { - addCheckDebugPass(); - addStripDebugPass(); - } else if (DebugifyAndStripAll == cl::BOU_TRUE) - addStripDebugPass(); - } + if (DebugifyAndStripAll == cl::BOU_TRUE && DebugifyIsSafe) + addStripDebugPass(); if (AllowVerify) addVerifyPass(Banner); } diff --git a/llvm/lib/Target/RISCV/RISCV.td b/llvm/lib/Target/RISCV/RISCV.td index 7a7b3bb0ad3201..56339d7df52d8d 100644 --- a/llvm/lib/Target/RISCV/RISCV.td +++ b/llvm/lib/Target/RISCV/RISCV.td @@ -160,8 +160,7 @@ def HasRVCHints : Predicate<"Subtarget->enableRVCHintInstrs()">, def FeatureStdExtV : SubtargetFeature<"experimental-v", "HasStdExtV", "true", - "'V' (Vector Instructions)", - [FeatureStdExtF]>; + "'V' (Vector Instructions)">; def HasStdExtV : Predicate<"Subtarget->hasStdExtV()">, AssemblerPredicate<(all_of FeatureStdExtV), "'V' (Vector Instructions)">; diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index c0202e3f19e0d3..aeb6b0623862e4 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -124,23 +124,29 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, addRegisterClass(RISCVVMVTs::vint64m4_t, &RISCV::VRM4RegClass); addRegisterClass(RISCVVMVTs::vint64m8_t, &RISCV::VRM8RegClass); - addRegisterClass(RISCVVMVTs::vfloat16mf4_t, &RISCV::VRRegClass); - addRegisterClass(RISCVVMVTs::vfloat16mf2_t, &RISCV::VRRegClass); - addRegisterClass(RISCVVMVTs::vfloat16m1_t, &RISCV::VRRegClass); - addRegisterClass(RISCVVMVTs::vfloat16m2_t, &RISCV::VRM2RegClass); - addRegisterClass(RISCVVMVTs::vfloat16m4_t, &RISCV::VRM4RegClass); - addRegisterClass(RISCVVMVTs::vfloat16m8_t, &RISCV::VRM8RegClass); - - addRegisterClass(RISCVVMVTs::vfloat32mf2_t, &RISCV::VRRegClass); - addRegisterClass(RISCVVMVTs::vfloat32m1_t, &RISCV::VRRegClass); - addRegisterClass(RISCVVMVTs::vfloat32m2_t, &RISCV::VRM2RegClass); - addRegisterClass(RISCVVMVTs::vfloat32m4_t, &RISCV::VRM4RegClass); - addRegisterClass(RISCVVMVTs::vfloat32m8_t, &RISCV::VRM8RegClass); - - addRegisterClass(RISCVVMVTs::vfloat64m1_t, &RISCV::VRRegClass); - addRegisterClass(RISCVVMVTs::vfloat64m2_t, &RISCV::VRM2RegClass); - addRegisterClass(RISCVVMVTs::vfloat64m4_t, &RISCV::VRM4RegClass); - addRegisterClass(RISCVVMVTs::vfloat64m8_t, &RISCV::VRM8RegClass); + if (Subtarget.hasStdExtZfh()) { + addRegisterClass(RISCVVMVTs::vfloat16mf4_t, &RISCV::VRRegClass); + addRegisterClass(RISCVVMVTs::vfloat16mf2_t, &RISCV::VRRegClass); + addRegisterClass(RISCVVMVTs::vfloat16m1_t, &RISCV::VRRegClass); + addRegisterClass(RISCVVMVTs::vfloat16m2_t, &RISCV::VRM2RegClass); + addRegisterClass(RISCVVMVTs::vfloat16m4_t, &RISCV::VRM4RegClass); + addRegisterClass(RISCVVMVTs::vfloat16m8_t, &RISCV::VRM8RegClass); + } + + if (Subtarget.hasStdExtF()) { + addRegisterClass(RISCVVMVTs::vfloat32mf2_t, &RISCV::VRRegClass); + addRegisterClass(RISCVVMVTs::vfloat32m1_t, &RISCV::VRRegClass); + addRegisterClass(RISCVVMVTs::vfloat32m2_t, &RISCV::VRM2RegClass); + addRegisterClass(RISCVVMVTs::vfloat32m4_t, &RISCV::VRM4RegClass); + addRegisterClass(RISCVVMVTs::vfloat32m8_t, &RISCV::VRM8RegClass); + } + + if (Subtarget.hasStdExtD()) { + addRegisterClass(RISCVVMVTs::vfloat64m1_t, &RISCV::VRRegClass); + addRegisterClass(RISCVVMVTs::vfloat64m2_t, &RISCV::VRM2RegClass); + addRegisterClass(RISCVVMVTs::vfloat64m4_t, &RISCV::VRM4RegClass); + addRegisterClass(RISCVVMVTs::vfloat64m8_t, &RISCV::VRM8RegClass); + } } // Compute derived properties from the register classes. diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td index 220aa9acc771ff..3164326e951179 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td @@ -736,7 +736,9 @@ let Constraints = "@earlyclobber $vd", RVVConstraint = Narrow in { defm VNCLIPU_W : VALU_IV_V_X_I<"vnclipu", 0b101110, uimm5, "w">; defm VNCLIP_W : VALU_IV_V_X_I<"vnclip", 0b101111, uimm5, "w">; } // Constraints = "@earlyclobber $vd", RVVConstraint = Narrow +} // Predicates = [HasStdExtV] +let Predicates = [HasStdExtV, HasStdExtF] in { // Vector Single-Width Floating-Point Add/Subtract Instructions defm VFADD_V : VALU_FV_V_F<"vfadd", 0b000000>; defm VFSUB_V : VALU_FV_V_F<"vfsub", 0b000010>; @@ -862,7 +864,9 @@ defm VFNCVT_F_X_W : VALU_FV_VS2<"vfncvt.f.x.w", 0b010010, 0b10011>; defm VFNCVT_F_F_W : VALU_FV_VS2<"vfncvt.f.f.w", 0b010010, 0b10100>; defm VFNCVT_ROD_F_F_W : VALU_FV_VS2<"vfncvt.rod.f.f.w", 0b010010, 0b10101>; } // Constraints = "@earlyclobber $vd", RVVConstraint = NarrowCvt +} // Predicates = [HasStdExtV, HasStdExtF] +let Predicates = [HasStdExtV] in { // Vector Single-Width Integer Reduction Instructions let RVVConstraint = NoConstraint in { defm VREDSUM : VALU_MV_V<"vredsum", 0b000000>; @@ -884,7 +888,9 @@ let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in { defm VWREDSUMU : VALU_IV_V<"vwredsumu", 0b110000>; defm VWREDSUM : VALU_IV_V<"vwredsum", 0b110001>; } // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint +} // Predicates = [HasStdExtV] +let Predicates = [HasStdExtV, HasStdExtF] in { // Vector Single-Width Floating-Point Reduction Instructions let RVVConstraint = NoConstraint in { defm VFREDOSUM : VALU_FV_V<"vfredosum", 0b000011>; @@ -902,7 +908,9 @@ let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in { defm VFWREDOSUM : VALU_FV_V<"vfwredosum", 0b110011>; defm VFWREDSUM : VALU_FV_V<"vfwredsum", 0b110001>; } // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint +} // Predicates = [HasStdExtV, HasStdExtF] +let Predicates = [HasStdExtV] in { // Vector Mask-Register Logical Instructions defm VMAND_M : VALU_MV_Mask<"vmand", 0b011001, "m">; defm VMNAND_M : VALU_MV_Mask<"vmnand", 0b011101, "m">; @@ -964,7 +972,9 @@ def VMV_S_X : RVInstV2<0b010000, 0b00000, OPMVX, (outs VR:$vd), } } // hasSideEffects = 0, mayLoad = 0, mayStore = 0 +} // Predicates = [HasStdExtV] +let Predicates = [HasStdExtV, HasStdExtF] in { let hasSideEffects = 0, mayLoad = 0, mayStore = 0, vm = 1 in { // Floating-Point Scalar Move Instructions def VFMV_F_S : RVInstV<0b010000, 0b00000, OPFVV, (outs FPR32:$vd), @@ -973,7 +983,9 @@ def VFMV_S_F : RVInstV2<0b010000, 0b00000, OPFVF, (outs VR:$vd), (ins FPR32:$rs1), "vfmv.s.f", "$vd, $rs1">; } // hasSideEffects = 0, mayLoad = 0, mayStore = 0, vm = 1 +} // Predicates = [HasStdExtV, HasStdExtF] +let Predicates = [HasStdExtV] in { // Vector Slide Instructions let Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp in { defm VSLIDEUP_V : VALU_IV_X_I<"vslideup", 0b001110, uimm5>; @@ -982,11 +994,18 @@ defm VSLIDEDOWN_V : VALU_IV_X_I<"vslidedown", 0b001111, uimm5>; let Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp in { defm VSLIDE1UP_V : VALU_MV_X<"vslide1up", 0b001110>; -defm VFSLIDE1UP_V : VALU_FV_F<"vfslide1up", 0b001110>; } // Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp defm VSLIDE1DOWN_V : VALU_MV_X<"vslide1down", 0b001111>; +} // Predicates = [HasStdExtV] + +let Predicates = [HasStdExtV, HasStdExtF] in { +let Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp in { +defm VFSLIDE1UP_V : VALU_FV_F<"vfslide1up", 0b001110>; +} // Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp defm VFSLIDE1DOWN_V : VALU_FV_F<"vfslide1down", 0b001111>; +} // Predicates = [HasStdExtV, HasStdExtF] +let Predicates = [HasStdExtV] in { // Vector Register Gather Instruction let Constraints = "@earlyclobber $vd", RVVConstraint = Vrgather in { defm VRGATHER_V : VALU_IV_V_X_I<"vrgather", 0b001100, uimm5>; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td index d3282a953652ab..4d31f1217a3bb2 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -1140,6 +1140,29 @@ defm PseudoVMIN : VPseudoBinaryV_VV_VX; defm PseudoVMAXU : VPseudoBinaryV_VV_VX; defm PseudoVMAX : VPseudoBinaryV_VV_VX; +//===----------------------------------------------------------------------===// +// 12.10. Vector Single-Width Integer Multiply Instructions +//===----------------------------------------------------------------------===// +defm PseudoVMUL : VPseudoBinaryV_VV_VX; +defm PseudoVMULH : VPseudoBinaryV_VV_VX; +defm PseudoVMULHU : VPseudoBinaryV_VV_VX; +defm PseudoVMULHSU : VPseudoBinaryV_VV_VX; + +//===----------------------------------------------------------------------===// +// 12.11. Vector Integer Divide Instructions +//===----------------------------------------------------------------------===// +defm PseudoVDIVU : VPseudoBinaryV_VV_VX; +defm PseudoVDIV : VPseudoBinaryV_VV_VX; +defm PseudoVREMU : VPseudoBinaryV_VV_VX; +defm PseudoVREM : VPseudoBinaryV_VV_VX; + +//===----------------------------------------------------------------------===// +// 12.12. Vector Widening Integer Multiply Instructions +//===----------------------------------------------------------------------===// +defm PseudoVWMUL : VPseudoBinaryW_VV_VX; +defm PseudoVWMULU : VPseudoBinaryW_VV_VX; +defm PseudoVWMULSU : VPseudoBinaryW_VV_VX; + } // Predicates = [HasStdExtV] let Predicates = [HasStdExtV, HasStdExtF] in { @@ -1242,6 +1265,29 @@ defm "" : VPatBinaryV_VV_VX<"int_riscv_vmin", "PseudoVMIN", AllIntegerVectors>; defm "" : VPatBinaryV_VV_VX<"int_riscv_vmaxu", "PseudoVMAXU", AllIntegerVectors>; defm "" : VPatBinaryV_VV_VX<"int_riscv_vmax", "PseudoVMAX", AllIntegerVectors>; +//===----------------------------------------------------------------------===// +// 12.10. Vector Single-Width Integer Multiply Instructions +//===----------------------------------------------------------------------===// +defm "" : VPatBinaryV_VV_VX<"int_riscv_vmul", "PseudoVMUL", AllIntegerVectors>; +defm "" : VPatBinaryV_VV_VX<"int_riscv_vmulh", "PseudoVMULH", AllIntegerVectors>; +defm "" : VPatBinaryV_VV_VX<"int_riscv_vmulhu", "PseudoVMULHU", AllIntegerVectors>; +defm "" : VPatBinaryV_VV_VX<"int_riscv_vmulhsu", "PseudoVMULHSU", AllIntegerVectors>; + +//===----------------------------------------------------------------------===// +// 12.11. Vector Integer Divide Instructions +//===----------------------------------------------------------------------===// +defm "" : VPatBinaryV_VV_VX<"int_riscv_vdivu", "PseudoVDIVU", AllIntegerVectors>; +defm "" : VPatBinaryV_VV_VX<"int_riscv_vdiv", "PseudoVDIV", AllIntegerVectors>; +defm "" : VPatBinaryV_VV_VX<"int_riscv_vremu", "PseudoVREMU", AllIntegerVectors>; +defm "" : VPatBinaryV_VV_VX<"int_riscv_vrem", "PseudoVREM", AllIntegerVectors>; + +//===----------------------------------------------------------------------===// +// 12.12. Vector Widening Integer Multiply Instructions +//===----------------------------------------------------------------------===// +defm "" : VPatBinaryW_VV_VX<"int_riscv_vwmul", "PseudoVWMUL">; +defm "" : VPatBinaryW_VV_VX<"int_riscv_vwmulu", "PseudoVWMULU">; +defm "" : VPatBinaryW_VV_VX<"int_riscv_vwmulsu", "PseudoVWMULSU">; + } // Predicates = [HasStdExtV] let Predicates = [HasStdExtV, HasStdExtF] in { diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/constant-mir-debugify.mir b/llvm/test/CodeGen/AArch64/GlobalISel/constant-mir-debugify.mir index 1ba8b0043b9235..362f4511ca0d5a 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/constant-mir-debugify.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/constant-mir-debugify.mir @@ -13,17 +13,17 @@ body: | liveins: $x0 ; CHECK-LABEL: name: fconstant_to_constant_s32 ; CHECK: liveins: $x0 - ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0, debug-location !11 - ; CHECK: DBG_VALUE [[COPY]](p0), $noreg, !9, !DIExpression(), debug-location !11 - ; CHECK: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0x3FA99999A0000000, debug-location !DILocation(line: 2, column: 1, scope: !6) - ; CHECK: DBG_VALUE [[C]](s32), $noreg, !9, !DIExpression(), debug-location !DILocation(line: 2, column: 1, scope: !6) - ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 524, debug-location !DILocation(line: 3, column: 1, scope: !6) - ; CHECK: DBG_VALUE [[C1]](s64), $noreg, !9, !DIExpression(), debug-location !DILocation(line: 3, column: 1, scope: !6) - ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64), debug-location !DILocation(line: 4, column: 1, scope: !6) - ; CHECK: DBG_VALUE [[PTR_ADD]](p0), $noreg, !9, !DIExpression(), debug-location !DILocation(line: 4, column: 1, scope: !6) - ; CHECK: G_STORE [[C]](s32), [[PTR_ADD]](p0), debug-location !DILocation(line: 5, column: 1, scope: !6) :: (store 4) - ; CHECK: DBG_VALUE 0, $noreg, !9, !DIExpression(), debug-location !DILocation(line: 5, column: 1, scope: !6) - ; CHECK: RET_ReallyLR debug-location !DILocation(line: 6, column: 1, scope: !6) + ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0, debug-location !10 + ; CHECK: DBG_VALUE [[COPY]](p0), $noreg, !8, !DIExpression(), debug-location !10 + ; CHECK: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0x3FA99999A0000000, debug-location !DILocation(line: 2, column: 1, scope: !5) + ; CHECK: DBG_VALUE [[C]](s32), $noreg, !8, !DIExpression(), debug-location !DILocation(line: 2, column: 1, scope: !5) + ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 524, debug-location !DILocation(line: 3, column: 1, scope: !5) + ; CHECK: DBG_VALUE [[C1]](s64), $noreg, !8, !DIExpression(), debug-location !DILocation(line: 3, column: 1, scope: !5) + ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64), debug-location !DILocation(line: 4, column: 1, scope: !5) + ; CHECK: DBG_VALUE [[PTR_ADD]](p0), $noreg, !8, !DIExpression(), debug-location !DILocation(line: 4, column: 1, scope: !5) + ; CHECK: G_STORE [[C]](s32), [[PTR_ADD]](p0), debug-location !DILocation(line: 5, column: 1, scope: !5) :: (store 4) + ; CHECK: DBG_VALUE 0, $noreg, !8, !DIExpression(), debug-location !DILocation(line: 5, column: 1, scope: !5) + ; CHECK: RET_ReallyLR debug-location !DILocation(line: 6, column: 1, scope: !5) %0:_(p0) = COPY $x0 %3:_(s32) = G_FCONSTANT float 0x3FA99999A0000000 %1:_(s64) = G_CONSTANT i64 524 diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/phi-mir-debugify.mir b/llvm/test/CodeGen/AArch64/GlobalISel/phi-mir-debugify.mir index d37033aec6696b..7d85afeb3a2e87 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/phi-mir-debugify.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/phi-mir-debugify.mir @@ -36,41 +36,41 @@ body: | ; CHECK: bb.0: ; CHECK: successors: %bb.1(0x40000000), %bb.2(0x40000000) ; CHECK: liveins: $w0 - ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0, debug-location !11 - ; CHECK: DBG_VALUE [[COPY]](s32), $noreg, !9, !DIExpression(), debug-location !11 - ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0, debug-location !DILocation(line: 2, column: 1, scope: !6) - ; CHECK: DBG_VALUE [[C]](s32), $noreg, !9, !DIExpression(), debug-location !DILocation(line: 2, column: 1, scope: !6) - ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1, debug-location !DILocation(line: 3, column: 1, scope: !6) - ; CHECK: DBG_VALUE [[C1]](s32), $noreg, !9, !DIExpression(), debug-location !DILocation(line: 3, column: 1, scope: !6) - ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2, debug-location !DILocation(line: 4, column: 1, scope: !6) - ; CHECK: DBG_VALUE [[C2]](s32), $noreg, !9, !DIExpression(), debug-location !DILocation(line: 4, column: 1, scope: !6) - ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ugt), [[COPY]](s32), [[C]], debug-location !DILocation(line: 5, column: 1, scope: !6) - ; CHECK: DBG_VALUE [[ICMP]](s1), $noreg, !9, !DIExpression(), debug-location !DILocation(line: 5, column: 1, scope: !6) - ; CHECK: G_BRCOND [[ICMP]](s1), %bb.1, debug-location !DILocation(line: 6, column: 1, scope: !6) - ; CHECK: G_BR %bb.2, debug-location !DILocation(line: 7, column: 1, scope: !6) + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0, debug-location !10 + ; CHECK: DBG_VALUE [[COPY]](s32), $noreg, !8, !DIExpression(), debug-location !10 + ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0, debug-location !DILocation(line: 2, column: 1, scope: !5) + ; CHECK: DBG_VALUE [[C]](s32), $noreg, !8, !DIExpression(), debug-location !DILocation(line: 2, column: 1, scope: !5) + ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1, debug-location !DILocation(line: 3, column: 1, scope: !5) + ; CHECK: DBG_VALUE [[C1]](s32), $noreg, !8, !DIExpression(), debug-location !DILocation(line: 3, column: 1, scope: !5) + ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2, debug-location !DILocation(line: 4, column: 1, scope: !5) + ; CHECK: DBG_VALUE [[C2]](s32), $noreg, !8, !DIExpression(), debug-location !DILocation(line: 4, column: 1, scope: !5) + ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ugt), [[COPY]](s32), [[C]], debug-location !DILocation(line: 5, column: 1, scope: !5) + ; CHECK: DBG_VALUE [[ICMP]](s1), $noreg, !8, !DIExpression(), debug-location !DILocation(line: 5, column: 1, scope: !5) + ; CHECK: G_BRCOND [[ICMP]](s1), %bb.1, debug-location !DILocation(line: 6, column: 1, scope: !5) + ; CHECK: G_BR %bb.2, debug-location !DILocation(line: 7, column: 1, scope: !5) ; CHECK: bb.1: ; CHECK: successors: %bb.3(0x80000000) - ; CHECK: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[C1]], debug-location !DILocation(line: 8, column: 1, scope: !6) - ; CHECK: DBG_VALUE [[ADD]](s32), $noreg, !9, !DIExpression(), debug-location !DILocation(line: 8, column: 1, scope: !6) - ; CHECK: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[ADD]](s32), debug-location !DILocation(line: 9, column: 1, scope: !6) - ; CHECK: DBG_VALUE [[TRUNC]](s1), $noreg, !9, !DIExpression(), debug-location !DILocation(line: 9, column: 1, scope: !6) - ; CHECK: G_BR %bb.3, debug-location !DILocation(line: 10, column: 1, scope: !6) + ; CHECK: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[C1]], debug-location !DILocation(line: 8, column: 1, scope: !5) + ; CHECK: DBG_VALUE [[ADD]](s32), $noreg, !8, !DIExpression(), debug-location !DILocation(line: 8, column: 1, scope: !5) + ; CHECK: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[ADD]](s32), debug-location !DILocation(line: 9, column: 1, scope: !5) + ; CHECK: DBG_VALUE [[TRUNC]](s1), $noreg, !8, !DIExpression(), debug-location !DILocation(line: 9, column: 1, scope: !5) + ; CHECK: G_BR %bb.3, debug-location !DILocation(line: 10, column: 1, scope: !5) ; CHECK: bb.2: ; CHECK: successors: %bb.3(0x80000000) - ; CHECK: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[C2]], debug-location !DILocation(line: 11, column: 1, scope: !6) - ; CHECK: DBG_VALUE [[ADD1]](s32), $noreg, !9, !DIExpression(), debug-location !DILocation(line: 11, column: 1, scope: !6) - ; CHECK: [[TRUNC1:%[0-9]+]]:_(s1) = G_TRUNC [[ADD1]](s32), debug-location !DILocation(line: 12, column: 1, scope: !6) - ; CHECK: DBG_VALUE [[TRUNC1]](s1), $noreg, !9, !DIExpression(), debug-location !DILocation(line: 12, column: 1, scope: !6) + ; CHECK: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[C2]], debug-location !DILocation(line: 11, column: 1, scope: !5) + ; CHECK: DBG_VALUE [[ADD1]](s32), $noreg, !8, !DIExpression(), debug-location !DILocation(line: 11, column: 1, scope: !5) + ; CHECK: [[TRUNC1:%[0-9]+]]:_(s1) = G_TRUNC [[ADD1]](s32), debug-location !DILocation(line: 12, column: 1, scope: !5) + ; CHECK: DBG_VALUE [[TRUNC1]](s1), $noreg, !8, !DIExpression(), debug-location !DILocation(line: 12, column: 1, scope: !5) ; CHECK: bb.3: - ; CHECK: [[PHI:%[0-9]+]]:_(s1) = G_PHI [[TRUNC]](s1), %bb.1, [[TRUNC1]](s1), %bb.2, debug-location !DILocation(line: 13, column: 1, scope: !6) - ; CHECK: [[PHI1:%[0-9]+]]:_(s1) = G_PHI [[TRUNC]](s1), %bb.1, [[TRUNC1]](s1), %bb.2, debug-location !DILocation(line: 14, column: 1, scope: !6) - ; CHECK: DBG_VALUE [[PHI]](s1), $noreg, !9, !DIExpression(), debug-location !DILocation(line: 13, column: 1, scope: !6) - ; CHECK: DBG_VALUE [[PHI1]](s1), $noreg, !9, !DIExpression(), debug-location !DILocation(line: 14, column: 1, scope: !6) - ; CHECK: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[PHI]](s1), debug-location !DILocation(line: 15, column: 1, scope: !6) - ; CHECK: DBG_VALUE [[ZEXT]](s32), $noreg, !9, !DIExpression(), debug-location !DILocation(line: 15, column: 1, scope: !6) - ; CHECK: $w0 = COPY [[ZEXT]](s32), debug-location !DILocation(line: 16, column: 1, scope: !6) - ; CHECK: DBG_VALUE $w0, $noreg, !9, !DIExpression(), debug-location !DILocation(line: 16, column: 1, scope: !6) - ; CHECK: RET_ReallyLR implicit $w0, debug-location !DILocation(line: 17, column: 1, scope: !6) + ; CHECK: [[PHI:%[0-9]+]]:_(s1) = G_PHI [[TRUNC]](s1), %bb.1, [[TRUNC1]](s1), %bb.2, debug-location !DILocation(line: 13, column: 1, scope: !5) + ; CHECK: [[PHI1:%[0-9]+]]:_(s1) = G_PHI [[TRUNC]](s1), %bb.1, [[TRUNC1]](s1), %bb.2, debug-location !DILocation(line: 14, column: 1, scope: !5) + ; CHECK: DBG_VALUE [[PHI]](s1), $noreg, !8, !DIExpression(), debug-location !DILocation(line: 13, column: 1, scope: !5) + ; CHECK: DBG_VALUE [[PHI1]](s1), $noreg, !8, !DIExpression(), debug-location !DILocation(line: 14, column: 1, scope: !5) + ; CHECK: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[PHI]](s1), debug-location !DILocation(line: 15, column: 1, scope: !5) + ; CHECK: DBG_VALUE [[ZEXT]](s32), $noreg, !8, !DIExpression(), debug-location !DILocation(line: 15, column: 1, scope: !5) + ; CHECK: $w0 = COPY [[ZEXT]](s32), debug-location !DILocation(line: 16, column: 1, scope: !5) + ; CHECK: DBG_VALUE $w0, $noreg, !8, !DIExpression(), debug-location !DILocation(line: 16, column: 1, scope: !5) + ; CHECK: RET_ReallyLR implicit $w0, debug-location !DILocation(line: 17, column: 1, scope: !5) bb.0: successors: %bb.1(0x40000000), %bb.2(0x40000000) liveins: $w0 diff --git a/llvm/test/CodeGen/Generic/MIRDebugify/check-line-and-variables-x.mir b/llvm/test/CodeGen/Generic/MIRDebugify/check-line-and-variables-x.mir deleted file mode 100644 index 47e78bc8a3f08c..00000000000000 --- a/llvm/test/CodeGen/Generic/MIRDebugify/check-line-and-variables-x.mir +++ /dev/null @@ -1,79 +0,0 @@ -# RUN: llc -mtriple=x86_64-unknown-linux-gnu -run-pass=mir-check-debugify -o - %s 2>&1 | FileCheck %s ---- | - ; ModuleID = 'check-line-and-variables.mir' - source_filename = "check-line-and-variables.c" - - @ga = dso_local local_unnamed_addr global i32 2, align 4 - - ; Function Attrs: nofree norecurse nounwind uwtable writeonly - define dso_local i32 @foo(i32 %a, i32 %b) local_unnamed_addr #0 !dbg !9 { - entry: - %add = add nsw i32 %b, %a, !dbg !15 - call void @llvm.dbg.value(metadata i32 %add, metadata !12, metadata !DIExpression()), !dbg !15 - %mul = shl nsw i32 %add, 1, !dbg !16 - call void @llvm.dbg.value(metadata i32 %mul, metadata !14, metadata !DIExpression()), !dbg !16 - store i32 %mul, i32* @ga, align 4, !dbg !17 - ret i32 %add, !dbg !18 - } - - declare void @llvm.dbg.value(metadata, metadata, metadata) #1 - - !llvm.module.flags = !{!0, !1} - !llvm.ident = !{!2} - !llvm.dbg.cu = !{!3} - !llvm.mir.debugify = !{!6, !7} - !llvm.debugify = !{!8, !7} - - !0 = !{i32 1, !"wchar_size", i32 4} - !1 = !{i32 2, !"Debug Info Version", i32 3} - !2 = !{!"clang version 12.0.0 (https://github.com/llvm/llvm-project.git c0a922b3db2d39f36c0c01776cce90cc160a7d62)"} - !3 = distinct !DICompileUnit(language: DW_LANG_C, file: !4, producer: "debugify", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !5) - !4 = !DIFile(filename: "main.mir", directory: "/") - !5 = !{} - !6 = !{i32 6} - !7 = !{i32 2} - !8 = !{i32 4} - !9 = distinct !DISubprogram(name: "foo", linkageName: "foo", scope: null, file: !4, line: 1, type: !10, scopeLine: 1, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !3, retainedNodes: !11) - !10 = !DISubroutineType(types: !5) - !11 = !{!12, !14} - !12 = !DILocalVariable(name: "1", scope: !9, file: !4, line: 1, type: !13) - !13 = !DIBasicType(name: "ty32", size: 32, encoding: DW_ATE_unsigned) - !14 = !DILocalVariable(name: "2", scope: !9, file: !4, line: 2, type: !13) - !15 = !DILocation(line: 1, column: 1, scope: !9) - !16 = !DILocation(line: 2, column: 1, scope: !9) - !17 = !DILocation(line: 3, column: 1, scope: !9) - !18 = !DILocation(line: 4, column: 1, scope: !9) - -... ---- -name: foo -body: | - bb.0.entry: - liveins: $edi, $esi - - %1:gr32 = COPY $esi, debug-location !15 - DBG_VALUE %1, $noreg, !12, !DIExpression(), debug-location !15 - ; Let it missing !14 and debug-location !16 - ; %0:gr32 = COPY $edi, debug-location !16 - ; DBG_VALUE %0, $noreg, !14, !DIExpression(), debug-location !16 - %0:gr32 = COPY $edi - %2:gr32 = nsw ADD32rr %1, %0, implicit-def dead $eflags, debug-location !17 - DBG_VALUE %2, $noreg, !12, !DIExpression(), debug-location !17 - DBG_VALUE $eflags, $noreg, !12, !DIExpression(), debug-location !17 - %3:gr32 = nsw ADD32rr %2, %2, implicit-def dead $eflags, debug-location !18 - DBG_VALUE %3, $noreg, !12, !DIExpression(), debug-location !18 - DBG_VALUE $eflags, $noreg, !12, !DIExpression(), debug-location !18 - MOV32mr $rip, 1, $noreg, @ga, $noreg, killed %3, debug-location !DILocation(line: 5, column: 1, scope: !9) :: (store 4 into @ga, !tbaa !18) - DBG_VALUE 0, $noreg, !12, !DIExpression(), debug-location !DILocation(line: 5, column: 1, scope: !9) - ; Let it miss Line 6: Change "!DILocation(line: 6, ..." to "!DILocation(line: 5, ..." - $eax = COPY %2, debug-location !DILocation(line: 5, column: 1, scope: !9) - DBG_VALUE $eax, $noreg, !12, !DIExpression(), debug-location !DILocation(line: 6, column: 1, scope: !9) - RET 0, $eax, debug-location !DILocation(line: 7, column: 1, scope: !9) - - ;CHECK: WARNING: Instruction with empty DebugLoc in function foo --%1:gr32 = COPY $edi - ;CHECK-NEXT: WARNING: Missing line 2 - ;CHECK-NEXT: WARNING: Missing line 6 - ;CHECK-NEXT: WARNING: Missing variable 2 - ;CHECK-NEXT: Machine IR debug info check: FAIL - -... diff --git a/llvm/test/CodeGen/Generic/MIRDebugify/check-line-and-variables.ll b/llvm/test/CodeGen/Generic/MIRDebugify/check-line-and-variables.ll deleted file mode 100644 index 1b859907db0e3b..00000000000000 --- a/llvm/test/CodeGen/Generic/MIRDebugify/check-line-and-variables.ll +++ /dev/null @@ -1,29 +0,0 @@ -; RUN: llc -debugify-check-and-strip-all-safe -o - %s 2>&1 | FileCheck %s - -; ModuleID = 'main.c' -source_filename = "main.c" - -@ga = dso_local global i32 2, align 4 - -define dso_local i32 @foo(i32 %a, i32 %b) { -entry: - %a.addr = alloca i32, align 4 - %b.addr = alloca i32, align 4 - %c = alloca i32, align 4 - store i32 %a, i32* %a.addr, align 4 - store i32 %b, i32* %b.addr, align 4 - %0 = load i32, i32* %a.addr, align 4 - %1 = load i32, i32* %b.addr, align 4 - %add = add nsw i32 %0, %1 - store i32 %add, i32* %c, align 4 - %2 = load i32, i32* %c, align 4 - %mul = mul nsw i32 %2, 2 - store i32 %mul, i32* @ga, align 4 - %3 = load i32, i32* %c, align 4 - ret i32 %3 -} - -; Different Back-Ends may have different number of passes, here we only -; check two of them to make sure -debugify-check-and-strip-all-safe works. -;CHECK: Machine IR debug info check: PASS -;CHECK: Machine IR debug info check: PASS diff --git a/llvm/test/CodeGen/Generic/MIRDebugify/check-line-and-variables.mir b/llvm/test/CodeGen/Generic/MIRDebugify/check-line-and-variables.mir deleted file mode 100644 index 1deb4fe5db09f3..00000000000000 --- a/llvm/test/CodeGen/Generic/MIRDebugify/check-line-and-variables.mir +++ /dev/null @@ -1,69 +0,0 @@ -# RUN: llc -mtriple=x86_64-unknown-linux-gnu -run-pass=mir-debugify,dead-mi-elimination,mir-check-debugify -o - %s 2>&1 | FileCheck %s -# RUN: llc -mtriple=x86_64-unknown-linux-gnu -run-pass=mir-debugify,mir-check-debugify -o - %s 2>&1 | FileCheck %s --check-prefix=CHECK-PASS ---- | - ; ModuleID = 'check-line-and-variables.mir' - source_filename = "check-line-and-variables.ll" - - @ga = dso_local global i32 2, align 4 - - ; Function Attrs: noinline nounwind optnone uwtable - define dso_local i32 @foo(i32 %a, i32 %b) { - entry: - %a.addr = alloca i32, align 4 - %b.addr = alloca i32, align 4 - %c = alloca i32, align 4 - store i32 %a, i32* %a.addr, align 4 - store i32 %b, i32* %b.addr, align 4 - %0 = load i32, i32* %a.addr, align 4 - %1 = load i32, i32* %b.addr, align 4 - %add = add nsw i32 %0, %1 - store i32 %add, i32* %c, align 4 - %2 = load i32, i32* %c, align 4 - %mul = mul nsw i32 %2, 2 - store i32 %mul, i32* @ga, align 4 - %3 = load i32, i32* %c, align 4 - ; dead-mi-elimination will remove %4 = ... - %4 = load i32, i32* %a.addr, align 4 - ret i32 %3 - } - -... ---- -name: foo -alignment: 16 -stack: - - { id: 0, name: a.addr, type: default, offset: 0, size: 4, alignment: 4, - stack-id: default, callee-saved-register: '', callee-saved-restored: true, - debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } - - { id: 1, name: b.addr, type: default, offset: 0, size: 4, alignment: 4, - stack-id: default, callee-saved-register: '', callee-saved-restored: true, - debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } - - { id: 2, name: c, type: default, offset: 0, size: 4, alignment: 4, - stack-id: default, callee-saved-register: '', callee-saved-restored: true, - debug-info-variable: '', debug-info-expression: '', debug-info-location: '' } -body: | - bb.0.entry: - liveins: $edi, $esi - - %2:gr32 = COPY $esi - %0:gr32 = COPY $edi - %1:gr32 = COPY killed %0 - %3:gr32 = COPY killed %2 - MOV32mr %stack.0.a.addr, 1, $noreg, 0, $noreg, %1 :: (store 4 into %ir.a.addr) - MOV32mr %stack.1.b.addr, 1, $noreg, 0, $noreg, %3 :: (store 4 into %ir.b.addr) - %14:gr32 = MOV32rm %stack.0.a.addr, 1, $noreg, 0, $noreg :: (load 4 from %ir.a.addr) - %13:gr32 = ADD32rm killed %14, %stack.1.b.addr, 1, $noreg, 0, $noreg, implicit-def $eflags :: (load 4 from %ir.b.addr) - ; dead-mi-elimination will remove %15:gr32 = ... - %15:gr32 = MOV32rm %stack.0.a.addr, 1, $noreg, 0, $noreg :: (load 4 from %ir.a.addr) - MOV32mr %stack.2.c, 1, $noreg, 0, $noreg, killed %13 :: (store 4 into %ir.c) - %9:gr32 = MOV32rm %stack.2.c, 1, $noreg, 0, $noreg :: (load 4 from %ir.c) - %8:gr32 = SHL32ri killed %9, 1, implicit-def $eflags - MOV32mr $noreg, 1, $noreg, @ga, $noreg, killed %8 :: (store 4 into @ga) - %5:gr32 = MOV32rm %stack.2.c, 1, $noreg, 0, $noreg :: (load 4 from %ir.c) - $eax = COPY %5 - RETQ implicit $eax - - ;CHECK: WARNING: Missing line 9 - ;CHECK-NEXT: Machine IR debug info check: FAIL - ;CHECK-PASS: Machine IR debug info check: PASS -... diff --git a/llvm/test/CodeGen/Generic/MIRDebugify/locations-and-values.mir b/llvm/test/CodeGen/Generic/MIRDebugify/locations-and-values.mir index 79dc90f017c129..84e93f9474b8f5 100644 --- a/llvm/test/CodeGen/Generic/MIRDebugify/locations-and-values.mir +++ b/llvm/test/CodeGen/Generic/MIRDebugify/locations-and-values.mir @@ -17,10 +17,10 @@ ret i32 %sub } - ; ALL: !llvm.dbg.cu = !{![[CU:[0-9]+]]} + ; ALL: !llvm.dbg.cu = !{!0} ; ALL: !llvm.debugify = ; ALL: !llvm.module.flags = !{![[VERSION:[0-9]+]]} - ; ALL: ![[CU]] = distinct !DICompileUnit( + ; ALL: !0 = distinct !DICompileUnit( ; ALL: ![[VERSION]] = !{i32 2, !"Debug Info Version", i32 3} ; VALUE: [[VAR1:![0-9]+]] = !DILocalVariable(name: "1" ; VALUE: [[VAR2:![0-9]+]] = !DILocalVariable(name: "2" @@ -44,8 +44,8 @@ body: | ; VALUE: DBG_VALUE %1(s32), $noreg, [[VAR2]], !DIExpression(), debug-location [[L2]] ; ALL: %2:_(s32) = G_CONSTANT i32 2, debug-location [[L3]] ; VALUE: DBG_VALUE %2(s32), $noreg, [[VAR1]], !DIExpression(), debug-location [[L3]] - ; ALL: %3:_(s32) = G_ADD %0, %2, debug-location !DILocation(line: 4, column: 1, scope: [[SP:![0-9]+]]) + ; ALL: %3:_(s32) = G_ADD %0, %2, debug-location !DILocation(line: 4, column: 1, scope: !6) ; VALUE: DBG_VALUE %3(s32), $noreg, [[VAR1]], !DIExpression(), debug-location !DILocation(line: 4 - ; ALL: %4:_(s32) = G_SUB %3, %1, debug-location !DILocation(line: 5, column: 1, scope: [[SP]]) + ; ALL: %4:_(s32) = G_SUB %3, %1, debug-location !DILocation(line: 5, column: 1, scope: !6) ; VALUE: DBG_VALUE %4(s32), $noreg, [[VAR1]], !DIExpression(), debug-location !DILocation(line: 5 ... diff --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll new file mode 100644 index 00000000000000..ba0912cba28069 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll @@ -0,0 +1,1441 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vdiv.nxv1i8.nxv1i8( + , + , + i32); + +define @intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv1i8.nxv1i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8( + , + , + , + , + i32); + +define @intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv2i8.nxv2i8( + , + , + i32); + +define @intrinsic_vdiv_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv2i8.nxv2i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv2i8.nxv2i8( + , + , + , + , + i32); + +define @intrinsic_vdiv_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv4i8.nxv4i8( + , + , + i32); + +define @intrinsic_vdiv_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv4i8.nxv4i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv4i8.nxv4i8( + , + , + , + , + i32); + +define @intrinsic_vdiv_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv8i8.nxv8i8( + , + , + i32); + +define @intrinsic_vdiv_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv8i8.nxv8i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv8i8.nxv8i8( + , + , + , + , + i32); + +define @intrinsic_vdiv_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv16i8.nxv16i8( + , + , + i32); + +define @intrinsic_vdiv_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv16i8.nxv16i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv16i8.nxv16i8( + , + , + , + , + i32); + +define @intrinsic_vdiv_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv32i8.nxv32i8( + , + , + i32); + +define @intrinsic_vdiv_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv32i8.nxv32i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv32i8.nxv32i8( + , + , + , + , + i32); + +define @intrinsic_vdiv_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv64i8.nxv64i8( + , + , + i32); + +define @intrinsic_vdiv_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vv_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv64i8.nxv64i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8( + , + , + , + , + i32); + +define @intrinsic_vdiv_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv1i16.nxv1i16( + , + , + i32); + +define @intrinsic_vdiv_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv1i16.nxv1i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv1i16.nxv1i16( + , + , + , + , + i32); + +define @intrinsic_vdiv_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv2i16.nxv2i16( + , + , + i32); + +define @intrinsic_vdiv_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv2i16.nxv2i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv2i16.nxv2i16( + , + , + , + , + i32); + +define @intrinsic_vdiv_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv4i16.nxv4i16( + , + , + i32); + +define @intrinsic_vdiv_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv4i16.nxv4i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv4i16.nxv4i16( + , + , + , + , + i32); + +define @intrinsic_vdiv_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv8i16.nxv8i16( + , + , + i32); + +define @intrinsic_vdiv_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv8i16.nxv8i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv8i16.nxv8i16( + , + , + , + , + i32); + +define @intrinsic_vdiv_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv16i16.nxv16i16( + , + , + i32); + +define @intrinsic_vdiv_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv16i16.nxv16i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv16i16.nxv16i16( + , + , + , + , + i32); + +define @intrinsic_vdiv_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv32i16.nxv32i16( + , + , + i32); + +define @intrinsic_vdiv_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vv_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv32i16.nxv32i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16( + , + , + , + , + i32); + +define @intrinsic_vdiv_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv1i32.nxv1i32( + , + , + i32); + +define @intrinsic_vdiv_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv1i32.nxv1i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32( + , + , + , + , + i32); + +define @intrinsic_vdiv_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv2i32.nxv2i32( + , + , + i32); + +define @intrinsic_vdiv_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv2i32.nxv2i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv2i32.nxv2i32( + , + , + , + , + i32); + +define @intrinsic_vdiv_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv2i32.nxv2i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv4i32.nxv4i32( + , + , + i32); + +define @intrinsic_vdiv_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv4i32.nxv4i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv4i32.nxv4i32( + , + , + , + , + i32); + +define @intrinsic_vdiv_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv4i32.nxv4i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv8i32.nxv8i32( + , + , + i32); + +define @intrinsic_vdiv_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv8i32.nxv8i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv8i32.nxv8i32( + , + , + , + , + i32); + +define @intrinsic_vdiv_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv8i32.nxv8i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv16i32.nxv16i32( + , + , + i32); + +define @intrinsic_vdiv_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vv_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv16i32.nxv16i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32( + , + , + , + , + i32); + +define @intrinsic_vdiv_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv1i8.i8( + , + i8, + i32); + +define @intrinsic_vdiv_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv1i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv1i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vdiv_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv2i8.i8( + , + i8, + i32); + +define @intrinsic_vdiv_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv2i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv2i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vdiv_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv4i8.i8( + , + i8, + i32); + +define @intrinsic_vdiv_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv4i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv4i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vdiv_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv8i8.i8( + , + i8, + i32); + +define @intrinsic_vdiv_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv8i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv8i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vdiv_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv16i8.i8( + , + i8, + i32); + +define @intrinsic_vdiv_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv16i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv16i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vdiv_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv32i8.i8( + , + i8, + i32); + +define @intrinsic_vdiv_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv32i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv32i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vdiv_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv64i8.i8( + , + i8, + i32); + +define @intrinsic_vdiv_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv64i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv64i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vdiv_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv64i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv1i16.i16( + , + i16, + i32); + +define @intrinsic_vdiv_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv1i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv1i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vdiv_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv2i16.i16( + , + i16, + i32); + +define @intrinsic_vdiv_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv2i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv2i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vdiv_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv4i16.i16( + , + i16, + i32); + +define @intrinsic_vdiv_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv4i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv4i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vdiv_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv8i16.i16( + , + i16, + i32); + +define @intrinsic_vdiv_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv8i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv8i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vdiv_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv16i16.i16( + , + i16, + i32); + +define @intrinsic_vdiv_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv16i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv16i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vdiv_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv32i16.i16( + , + i16, + i32); + +define @intrinsic_vdiv_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv32i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv32i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vdiv_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv32i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv1i32.i32( + , + i32, + i32); + +define @intrinsic_vdiv_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv1i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv1i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vdiv_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv2i32.i32( + , + i32, + i32); + +define @intrinsic_vdiv_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv2i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv2i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vdiv_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv4i32.i32( + , + i32, + i32); + +define @intrinsic_vdiv_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv4i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv4i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vdiv_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv8i32.i32( + , + i32, + i32); + +define @intrinsic_vdiv_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv8i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv8i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vdiv_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv16i32.i32( + , + i32, + i32); + +define @intrinsic_vdiv_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv16i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv16i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vdiv_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv16i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll new file mode 100644 index 00000000000000..f5869dea2d0d62 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll @@ -0,0 +1,1761 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vdiv.nxv1i8.nxv1i8( + , + , + i64); + +define @intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv1i8.nxv1i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8( + , + , + , + , + i64); + +define @intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv2i8.nxv2i8( + , + , + i64); + +define @intrinsic_vdiv_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv2i8.nxv2i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv2i8.nxv2i8( + , + , + , + , + i64); + +define @intrinsic_vdiv_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv4i8.nxv4i8( + , + , + i64); + +define @intrinsic_vdiv_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv4i8.nxv4i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv4i8.nxv4i8( + , + , + , + , + i64); + +define @intrinsic_vdiv_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv8i8.nxv8i8( + , + , + i64); + +define @intrinsic_vdiv_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv8i8.nxv8i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv8i8.nxv8i8( + , + , + , + , + i64); + +define @intrinsic_vdiv_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv16i8.nxv16i8( + , + , + i64); + +define @intrinsic_vdiv_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv16i8.nxv16i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv16i8.nxv16i8( + , + , + , + , + i64); + +define @intrinsic_vdiv_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv32i8.nxv32i8( + , + , + i64); + +define @intrinsic_vdiv_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv32i8.nxv32i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv32i8.nxv32i8( + , + , + , + , + i64); + +define @intrinsic_vdiv_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv64i8.nxv64i8( + , + , + i64); + +define @intrinsic_vdiv_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vv_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv64i8.nxv64i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8( + , + , + , + , + i64); + +define @intrinsic_vdiv_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv1i16.nxv1i16( + , + , + i64); + +define @intrinsic_vdiv_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv1i16.nxv1i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv1i16.nxv1i16( + , + , + , + , + i64); + +define @intrinsic_vdiv_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv2i16.nxv2i16( + , + , + i64); + +define @intrinsic_vdiv_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv2i16.nxv2i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv2i16.nxv2i16( + , + , + , + , + i64); + +define @intrinsic_vdiv_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv4i16.nxv4i16( + , + , + i64); + +define @intrinsic_vdiv_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv4i16.nxv4i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv4i16.nxv4i16( + , + , + , + , + i64); + +define @intrinsic_vdiv_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv8i16.nxv8i16( + , + , + i64); + +define @intrinsic_vdiv_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv8i16.nxv8i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv8i16.nxv8i16( + , + , + , + , + i64); + +define @intrinsic_vdiv_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv16i16.nxv16i16( + , + , + i64); + +define @intrinsic_vdiv_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv16i16.nxv16i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv16i16.nxv16i16( + , + , + , + , + i64); + +define @intrinsic_vdiv_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv32i16.nxv32i16( + , + , + i64); + +define @intrinsic_vdiv_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vv_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv32i16.nxv32i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16( + , + , + , + , + i64); + +define @intrinsic_vdiv_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv1i32.nxv1i32( + , + , + i64); + +define @intrinsic_vdiv_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv1i32.nxv1i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32( + , + , + , + , + i64); + +define @intrinsic_vdiv_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv2i32.nxv2i32( + , + , + i64); + +define @intrinsic_vdiv_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv2i32.nxv2i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv2i32.nxv2i32( + , + , + , + , + i64); + +define @intrinsic_vdiv_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv2i32.nxv2i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv4i32.nxv4i32( + , + , + i64); + +define @intrinsic_vdiv_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv4i32.nxv4i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv4i32.nxv4i32( + , + , + , + , + i64); + +define @intrinsic_vdiv_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv4i32.nxv4i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv8i32.nxv8i32( + , + , + i64); + +define @intrinsic_vdiv_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv8i32.nxv8i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv8i32.nxv8i32( + , + , + , + , + i64); + +define @intrinsic_vdiv_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv8i32.nxv8i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv16i32.nxv16i32( + , + , + i64); + +define @intrinsic_vdiv_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vv_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv16i32.nxv16i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32( + , + , + , + , + i64); + +define @intrinsic_vdiv_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv1i64.nxv1i64( + , + , + i64); + +define @intrinsic_vdiv_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vv_nxv1i64_nxv1i64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv1i64.nxv1i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv1i64.nxv1i64( + , + , + , + , + i64); + +define @intrinsic_vdiv_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i64_nxv1i64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv1i64.nxv1i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv2i64.nxv2i64( + , + , + i64); + +define @intrinsic_vdiv_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vv_nxv2i64_nxv2i64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv2i64.nxv2i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv2i64.nxv2i64( + , + , + , + , + i64); + +define @intrinsic_vdiv_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i64_nxv2i64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv2i64.nxv2i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv4i64.nxv4i64( + , + , + i64); + +define @intrinsic_vdiv_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vv_nxv4i64_nxv4i64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv4i64.nxv4i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv4i64.nxv4i64( + , + , + , + , + i64); + +define @intrinsic_vdiv_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i64_nxv4i64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv4i64.nxv4i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv8i64.nxv8i64( + , + , + i64); + +define @intrinsic_vdiv_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vv_nxv8i64_nxv8i64_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv8i64.nxv8i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv8i64.nxv8i64( + , + , + , + , + i64); + +define @intrinsic_vdiv_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i64_nxv8i64_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv8i64.nxv8i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv1i8.i8( + , + i8, + i64); + +define @intrinsic_vdiv_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv1i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv1i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vdiv_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv2i8.i8( + , + i8, + i64); + +define @intrinsic_vdiv_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv2i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv2i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vdiv_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv4i8.i8( + , + i8, + i64); + +define @intrinsic_vdiv_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv4i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv4i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vdiv_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv8i8.i8( + , + i8, + i64); + +define @intrinsic_vdiv_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv8i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv8i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vdiv_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv16i8.i8( + , + i8, + i64); + +define @intrinsic_vdiv_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv16i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv16i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vdiv_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv32i8.i8( + , + i8, + i64); + +define @intrinsic_vdiv_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv32i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv32i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vdiv_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv64i8.i8( + , + i8, + i64); + +define @intrinsic_vdiv_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv64i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv64i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vdiv_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv64i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv1i16.i16( + , + i16, + i64); + +define @intrinsic_vdiv_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv1i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv1i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vdiv_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv2i16.i16( + , + i16, + i64); + +define @intrinsic_vdiv_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv2i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv2i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vdiv_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv4i16.i16( + , + i16, + i64); + +define @intrinsic_vdiv_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv4i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv4i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vdiv_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv8i16.i16( + , + i16, + i64); + +define @intrinsic_vdiv_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv8i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv8i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vdiv_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv16i16.i16( + , + i16, + i64); + +define @intrinsic_vdiv_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv16i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv16i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vdiv_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv32i16.i16( + , + i16, + i64); + +define @intrinsic_vdiv_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv32i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv32i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vdiv_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv32i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv1i32.i32( + , + i32, + i64); + +define @intrinsic_vdiv_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv1i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv1i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vdiv_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv2i32.i32( + , + i32, + i64); + +define @intrinsic_vdiv_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv2i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv2i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vdiv_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv4i32.i32( + , + i32, + i64); + +define @intrinsic_vdiv_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv4i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv4i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vdiv_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv8i32.i32( + , + i32, + i64); + +define @intrinsic_vdiv_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv8i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv8i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vdiv_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv16i32.i32( + , + i32, + i64); + +define @intrinsic_vdiv_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv16i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv16i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vdiv_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv16i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv1i64.i64( + , + i64, + i64); + +define @intrinsic_vdiv_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vx_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv1i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv1i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vdiv_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv1i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv2i64.i64( + , + i64, + i64); + +define @intrinsic_vdiv_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vx_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv2i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv2i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vdiv_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv2i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv4i64.i64( + , + i64, + i64); + +define @intrinsic_vdiv_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vx_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv4i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv4i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vdiv_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv4i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdiv.nxv8i64.i64( + , + i64, + i64); + +define @intrinsic_vdiv_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_vx_nxv8i64_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdiv.nxv8i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdiv.mask.nxv8i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vdiv_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv8i64_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vdiv.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdiv.mask.nxv8i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll new file mode 100644 index 00000000000000..ecfbd3b254ebb3 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll @@ -0,0 +1,1441 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vdivu.nxv1i8.nxv1i8( + , + , + i32); + +define @intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv1i8.nxv1i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv1i8.nxv1i8( + , + , + , + , + i32); + +define @intrinsic_vdivu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv2i8.nxv2i8( + , + , + i32); + +define @intrinsic_vdivu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv2i8.nxv2i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv2i8.nxv2i8( + , + , + , + , + i32); + +define @intrinsic_vdivu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv4i8.nxv4i8( + , + , + i32); + +define @intrinsic_vdivu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv4i8.nxv4i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv4i8.nxv4i8( + , + , + , + , + i32); + +define @intrinsic_vdivu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv8i8.nxv8i8( + , + , + i32); + +define @intrinsic_vdivu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv8i8.nxv8i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv8i8.nxv8i8( + , + , + , + , + i32); + +define @intrinsic_vdivu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv16i8.nxv16i8( + , + , + i32); + +define @intrinsic_vdivu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv16i8.nxv16i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv16i8.nxv16i8( + , + , + , + , + i32); + +define @intrinsic_vdivu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv32i8.nxv32i8( + , + , + i32); + +define @intrinsic_vdivu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv32i8.nxv32i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv32i8.nxv32i8( + , + , + , + , + i32); + +define @intrinsic_vdivu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv64i8.nxv64i8( + , + , + i32); + +define @intrinsic_vdivu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vv_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv64i8.nxv64i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8( + , + , + , + , + i32); + +define @intrinsic_vdivu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv1i16.nxv1i16( + , + , + i32); + +define @intrinsic_vdivu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv1i16.nxv1i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv1i16.nxv1i16( + , + , + , + , + i32); + +define @intrinsic_vdivu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv2i16.nxv2i16( + , + , + i32); + +define @intrinsic_vdivu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv2i16.nxv2i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv2i16.nxv2i16( + , + , + , + , + i32); + +define @intrinsic_vdivu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv4i16.nxv4i16( + , + , + i32); + +define @intrinsic_vdivu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv4i16.nxv4i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv4i16.nxv4i16( + , + , + , + , + i32); + +define @intrinsic_vdivu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv8i16.nxv8i16( + , + , + i32); + +define @intrinsic_vdivu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv8i16.nxv8i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv8i16.nxv8i16( + , + , + , + , + i32); + +define @intrinsic_vdivu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv16i16.nxv16i16( + , + , + i32); + +define @intrinsic_vdivu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv16i16.nxv16i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv16i16.nxv16i16( + , + , + , + , + i32); + +define @intrinsic_vdivu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv32i16.nxv32i16( + , + , + i32); + +define @intrinsic_vdivu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vv_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv32i16.nxv32i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16( + , + , + , + , + i32); + +define @intrinsic_vdivu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv1i32.nxv1i32( + , + , + i32); + +define @intrinsic_vdivu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv1i32.nxv1i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32( + , + , + , + , + i32); + +define @intrinsic_vdivu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv2i32.nxv2i32( + , + , + i32); + +define @intrinsic_vdivu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv2i32.nxv2i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv2i32.nxv2i32( + , + , + , + , + i32); + +define @intrinsic_vdivu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv2i32.nxv2i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv4i32.nxv4i32( + , + , + i32); + +define @intrinsic_vdivu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv4i32.nxv4i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv4i32.nxv4i32( + , + , + , + , + i32); + +define @intrinsic_vdivu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv4i32.nxv4i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv8i32.nxv8i32( + , + , + i32); + +define @intrinsic_vdivu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv8i32.nxv8i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv8i32.nxv8i32( + , + , + , + , + i32); + +define @intrinsic_vdivu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv8i32.nxv8i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv16i32.nxv16i32( + , + , + i32); + +define @intrinsic_vdivu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vv_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv16i32.nxv16i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32( + , + , + , + , + i32); + +define @intrinsic_vdivu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv1i8.i8( + , + i8, + i32); + +define @intrinsic_vdivu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv1i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv1i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vdivu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv2i8.i8( + , + i8, + i32); + +define @intrinsic_vdivu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv2i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv2i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vdivu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv4i8.i8( + , + i8, + i32); + +define @intrinsic_vdivu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv4i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv4i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vdivu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv8i8.i8( + , + i8, + i32); + +define @intrinsic_vdivu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv8i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv8i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vdivu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv16i8.i8( + , + i8, + i32); + +define @intrinsic_vdivu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv16i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv16i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vdivu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv32i8.i8( + , + i8, + i32); + +define @intrinsic_vdivu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv32i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv32i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vdivu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv64i8.i8( + , + i8, + i32); + +define @intrinsic_vdivu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv64i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv64i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vdivu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv64i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv1i16.i16( + , + i16, + i32); + +define @intrinsic_vdivu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv1i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv1i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vdivu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv2i16.i16( + , + i16, + i32); + +define @intrinsic_vdivu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv2i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv2i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vdivu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv4i16.i16( + , + i16, + i32); + +define @intrinsic_vdivu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv4i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv4i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vdivu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv8i16.i16( + , + i16, + i32); + +define @intrinsic_vdivu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv8i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv8i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vdivu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv16i16.i16( + , + i16, + i32); + +define @intrinsic_vdivu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv16i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv16i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vdivu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv32i16.i16( + , + i16, + i32); + +define @intrinsic_vdivu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv32i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv32i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vdivu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv32i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv1i32.i32( + , + i32, + i32); + +define @intrinsic_vdivu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv1i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv1i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vdivu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv2i32.i32( + , + i32, + i32); + +define @intrinsic_vdivu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv2i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv2i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vdivu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv4i32.i32( + , + i32, + i32); + +define @intrinsic_vdivu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv4i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv4i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vdivu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv8i32.i32( + , + i32, + i32); + +define @intrinsic_vdivu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv8i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv8i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vdivu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv16i32.i32( + , + i32, + i32); + +define @intrinsic_vdivu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv16i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv16i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vdivu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv16i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll new file mode 100644 index 00000000000000..a09d88f5db793e --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll @@ -0,0 +1,1761 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vdivu.nxv1i8.nxv1i8( + , + , + i64); + +define @intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv1i8.nxv1i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv1i8.nxv1i8( + , + , + , + , + i64); + +define @intrinsic_vdivu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv2i8.nxv2i8( + , + , + i64); + +define @intrinsic_vdivu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv2i8.nxv2i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv2i8.nxv2i8( + , + , + , + , + i64); + +define @intrinsic_vdivu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv4i8.nxv4i8( + , + , + i64); + +define @intrinsic_vdivu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv4i8.nxv4i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv4i8.nxv4i8( + , + , + , + , + i64); + +define @intrinsic_vdivu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv8i8.nxv8i8( + , + , + i64); + +define @intrinsic_vdivu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv8i8.nxv8i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv8i8.nxv8i8( + , + , + , + , + i64); + +define @intrinsic_vdivu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv16i8.nxv16i8( + , + , + i64); + +define @intrinsic_vdivu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv16i8.nxv16i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv16i8.nxv16i8( + , + , + , + , + i64); + +define @intrinsic_vdivu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv32i8.nxv32i8( + , + , + i64); + +define @intrinsic_vdivu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv32i8.nxv32i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv32i8.nxv32i8( + , + , + , + , + i64); + +define @intrinsic_vdivu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv64i8.nxv64i8( + , + , + i64); + +define @intrinsic_vdivu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vv_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv64i8.nxv64i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8( + , + , + , + , + i64); + +define @intrinsic_vdivu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv1i16.nxv1i16( + , + , + i64); + +define @intrinsic_vdivu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv1i16.nxv1i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv1i16.nxv1i16( + , + , + , + , + i64); + +define @intrinsic_vdivu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv2i16.nxv2i16( + , + , + i64); + +define @intrinsic_vdivu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv2i16.nxv2i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv2i16.nxv2i16( + , + , + , + , + i64); + +define @intrinsic_vdivu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv4i16.nxv4i16( + , + , + i64); + +define @intrinsic_vdivu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv4i16.nxv4i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv4i16.nxv4i16( + , + , + , + , + i64); + +define @intrinsic_vdivu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv8i16.nxv8i16( + , + , + i64); + +define @intrinsic_vdivu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv8i16.nxv8i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv8i16.nxv8i16( + , + , + , + , + i64); + +define @intrinsic_vdivu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv16i16.nxv16i16( + , + , + i64); + +define @intrinsic_vdivu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv16i16.nxv16i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv16i16.nxv16i16( + , + , + , + , + i64); + +define @intrinsic_vdivu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv32i16.nxv32i16( + , + , + i64); + +define @intrinsic_vdivu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vv_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv32i16.nxv32i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16( + , + , + , + , + i64); + +define @intrinsic_vdivu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv1i32.nxv1i32( + , + , + i64); + +define @intrinsic_vdivu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv1i32.nxv1i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32( + , + , + , + , + i64); + +define @intrinsic_vdivu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv2i32.nxv2i32( + , + , + i64); + +define @intrinsic_vdivu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv2i32.nxv2i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv2i32.nxv2i32( + , + , + , + , + i64); + +define @intrinsic_vdivu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv2i32.nxv2i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv4i32.nxv4i32( + , + , + i64); + +define @intrinsic_vdivu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv4i32.nxv4i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv4i32.nxv4i32( + , + , + , + , + i64); + +define @intrinsic_vdivu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv4i32.nxv4i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv8i32.nxv8i32( + , + , + i64); + +define @intrinsic_vdivu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv8i32.nxv8i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv8i32.nxv8i32( + , + , + , + , + i64); + +define @intrinsic_vdivu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv8i32.nxv8i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv16i32.nxv16i32( + , + , + i64); + +define @intrinsic_vdivu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vv_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv16i32.nxv16i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32( + , + , + , + , + i64); + +define @intrinsic_vdivu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv1i64.nxv1i64( + , + , + i64); + +define @intrinsic_vdivu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vv_nxv1i64_nxv1i64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv1i64.nxv1i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv1i64.nxv1i64( + , + , + , + , + i64); + +define @intrinsic_vdivu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i64_nxv1i64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv1i64.nxv1i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv2i64.nxv2i64( + , + , + i64); + +define @intrinsic_vdivu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vv_nxv2i64_nxv2i64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv2i64.nxv2i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv2i64.nxv2i64( + , + , + , + , + i64); + +define @intrinsic_vdivu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i64_nxv2i64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv2i64.nxv2i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv4i64.nxv4i64( + , + , + i64); + +define @intrinsic_vdivu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vv_nxv4i64_nxv4i64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv4i64.nxv4i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv4i64.nxv4i64( + , + , + , + , + i64); + +define @intrinsic_vdivu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i64_nxv4i64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv4i64.nxv4i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv8i64.nxv8i64( + , + , + i64); + +define @intrinsic_vdivu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vv_nxv8i64_nxv8i64_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv8i64.nxv8i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv8i64.nxv8i64( + , + , + , + , + i64); + +define @intrinsic_vdivu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i64_nxv8i64_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vdivu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv8i64.nxv8i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv1i8.i8( + , + i8, + i64); + +define @intrinsic_vdivu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv1i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv1i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vdivu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv2i8.i8( + , + i8, + i64); + +define @intrinsic_vdivu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv2i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv2i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vdivu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv4i8.i8( + , + i8, + i64); + +define @intrinsic_vdivu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv4i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv4i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vdivu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv8i8.i8( + , + i8, + i64); + +define @intrinsic_vdivu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv8i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv8i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vdivu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv16i8.i8( + , + i8, + i64); + +define @intrinsic_vdivu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv16i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv16i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vdivu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv32i8.i8( + , + i8, + i64); + +define @intrinsic_vdivu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv32i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv32i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vdivu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv64i8.i8( + , + i8, + i64); + +define @intrinsic_vdivu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv64i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv64i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vdivu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv64i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv1i16.i16( + , + i16, + i64); + +define @intrinsic_vdivu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv1i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv1i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vdivu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv2i16.i16( + , + i16, + i64); + +define @intrinsic_vdivu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv2i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv2i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vdivu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv4i16.i16( + , + i16, + i64); + +define @intrinsic_vdivu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv4i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv4i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vdivu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv8i16.i16( + , + i16, + i64); + +define @intrinsic_vdivu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv8i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv8i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vdivu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv16i16.i16( + , + i16, + i64); + +define @intrinsic_vdivu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv16i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv16i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vdivu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv32i16.i16( + , + i16, + i64); + +define @intrinsic_vdivu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv32i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv32i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vdivu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv32i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv1i32.i32( + , + i32, + i64); + +define @intrinsic_vdivu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv1i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv1i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vdivu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv2i32.i32( + , + i32, + i64); + +define @intrinsic_vdivu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv2i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv2i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vdivu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv4i32.i32( + , + i32, + i64); + +define @intrinsic_vdivu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv4i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv4i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vdivu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv8i32.i32( + , + i32, + i64); + +define @intrinsic_vdivu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv8i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv8i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vdivu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv16i32.i32( + , + i32, + i64); + +define @intrinsic_vdivu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv16i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv16i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vdivu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv16i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv1i64.i64( + , + i64, + i64); + +define @intrinsic_vdivu_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vx_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv1i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv1i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vdivu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv1i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv2i64.i64( + , + i64, + i64); + +define @intrinsic_vdivu_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vx_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv2i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv2i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vdivu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv2i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv4i64.i64( + , + i64, + i64); + +define @intrinsic_vdivu_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vx_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv4i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv4i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vdivu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv4i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vdivu.nxv8i64.i64( + , + i64, + i64); + +define @intrinsic_vdivu_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_vx_nxv8i64_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vdivu.nxv8i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vdivu.mask.nxv8i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vdivu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv8i64_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vdivu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vdivu.mask.nxv8i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vle-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vle-rv32.ll index be01dc42df6479..32e9004f9fb097 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vle-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vle-rv32.ll @@ -1,4 +1,5 @@ -; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -mattr=+experimental-zfh \ +; RUN: -mattr=+f -verify-machineinstrs \ ; RUN: --riscv-no-aliases < %s | FileCheck %s declare @llvm.riscv.vle.nxv1i32( *, diff --git a/llvm/test/CodeGen/RISCV/rvv/vle-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vle-rv64.ll index 1726b07053edc3..9f69a245d5c03a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vle-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vle-rv64.ll @@ -1,4 +1,5 @@ -; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -mattr=+experimental-zfh \ +; RUN: -mattr=+d -verify-machineinstrs \ ; RUN: --riscv-no-aliases < %s | FileCheck %s declare @llvm.riscv.vle.nxv1i64( *, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmul-rv32.ll new file mode 100644 index 00000000000000..0b798e1c3a0b59 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmul-rv32.ll @@ -0,0 +1,1441 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vmul.nxv1i8.nxv1i8( + , + , + i32); + +define @intrinsic_vmul_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmul.nxv1i8.nxv1i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv1i8.nxv1i8( + , + , + , + , + i32); + +define @intrinsic_vmul_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv2i8.nxv2i8( + , + , + i32); + +define @intrinsic_vmul_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmul.nxv2i8.nxv2i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv2i8.nxv2i8( + , + , + , + , + i32); + +define @intrinsic_vmul_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv4i8.nxv4i8( + , + , + i32); + +define @intrinsic_vmul_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmul.nxv4i8.nxv4i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv4i8.nxv4i8( + , + , + , + , + i32); + +define @intrinsic_vmul_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv8i8.nxv8i8( + , + , + i32); + +define @intrinsic_vmul_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmul.nxv8i8.nxv8i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv8i8.nxv8i8( + , + , + , + , + i32); + +define @intrinsic_vmul_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv16i8.nxv16i8( + , + , + i32); + +define @intrinsic_vmul_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmul.nxv16i8.nxv16i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv16i8.nxv16i8( + , + , + , + , + i32); + +define @intrinsic_vmul_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv32i8.nxv32i8( + , + , + i32); + +define @intrinsic_vmul_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmul.nxv32i8.nxv32i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv32i8.nxv32i8( + , + , + , + , + i32); + +define @intrinsic_vmul_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv64i8.nxv64i8( + , + , + i32); + +define @intrinsic_vmul_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vv_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmul.nxv64i8.nxv64i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv64i8.nxv64i8( + , + , + , + , + i32); + +define @intrinsic_vmul_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv64i8.nxv64i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv1i16.nxv1i16( + , + , + i32); + +define @intrinsic_vmul_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmul.nxv1i16.nxv1i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv1i16.nxv1i16( + , + , + , + , + i32); + +define @intrinsic_vmul_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv2i16.nxv2i16( + , + , + i32); + +define @intrinsic_vmul_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmul.nxv2i16.nxv2i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv2i16.nxv2i16( + , + , + , + , + i32); + +define @intrinsic_vmul_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv4i16.nxv4i16( + , + , + i32); + +define @intrinsic_vmul_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmul.nxv4i16.nxv4i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv4i16.nxv4i16( + , + , + , + , + i32); + +define @intrinsic_vmul_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv8i16.nxv8i16( + , + , + i32); + +define @intrinsic_vmul_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmul.nxv8i16.nxv8i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv8i16.nxv8i16( + , + , + , + , + i32); + +define @intrinsic_vmul_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv16i16.nxv16i16( + , + , + i32); + +define @intrinsic_vmul_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmul.nxv16i16.nxv16i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv16i16.nxv16i16( + , + , + , + , + i32); + +define @intrinsic_vmul_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv32i16.nxv32i16( + , + , + i32); + +define @intrinsic_vmul_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vv_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmul.nxv32i16.nxv32i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv32i16.nxv32i16( + , + , + , + , + i32); + +define @intrinsic_vmul_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv32i16.nxv32i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv1i32.nxv1i32( + , + , + i32); + +define @intrinsic_vmul_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmul.nxv1i32.nxv1i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv1i32.nxv1i32( + , + , + , + , + i32); + +define @intrinsic_vmul_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv2i32.nxv2i32( + , + , + i32); + +define @intrinsic_vmul_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmul.nxv2i32.nxv2i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv2i32.nxv2i32( + , + , + , + , + i32); + +define @intrinsic_vmul_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv2i32.nxv2i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv4i32.nxv4i32( + , + , + i32); + +define @intrinsic_vmul_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmul.nxv4i32.nxv4i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv4i32.nxv4i32( + , + , + , + , + i32); + +define @intrinsic_vmul_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv4i32.nxv4i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv8i32.nxv8i32( + , + , + i32); + +define @intrinsic_vmul_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmul.nxv8i32.nxv8i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv8i32.nxv8i32( + , + , + , + , + i32); + +define @intrinsic_vmul_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv8i32.nxv8i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv16i32.nxv16i32( + , + , + i32); + +define @intrinsic_vmul_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vv_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmul.nxv16i32.nxv16i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv16i32.nxv16i32( + , + , + , + , + i32); + +define @intrinsic_vmul_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv16i32.nxv16i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv1i8.i8( + , + i8, + i32); + +define @intrinsic_vmul_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmul.nxv1i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv1i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vmul_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv2i8.i8( + , + i8, + i32); + +define @intrinsic_vmul_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmul.nxv2i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv2i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vmul_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv4i8.i8( + , + i8, + i32); + +define @intrinsic_vmul_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmul.nxv4i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv4i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vmul_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv8i8.i8( + , + i8, + i32); + +define @intrinsic_vmul_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmul.nxv8i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv8i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vmul_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv16i8.i8( + , + i8, + i32); + +define @intrinsic_vmul_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmul.nxv16i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv16i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vmul_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv32i8.i8( + , + i8, + i32); + +define @intrinsic_vmul_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmul.nxv32i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv32i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vmul_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv64i8.i8( + , + i8, + i32); + +define @intrinsic_vmul_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmul.nxv64i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv64i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vmul_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv64i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv1i16.i16( + , + i16, + i32); + +define @intrinsic_vmul_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmul.nxv1i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv1i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vmul_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv2i16.i16( + , + i16, + i32); + +define @intrinsic_vmul_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmul.nxv2i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv2i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vmul_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv4i16.i16( + , + i16, + i32); + +define @intrinsic_vmul_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmul.nxv4i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv4i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vmul_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv8i16.i16( + , + i16, + i32); + +define @intrinsic_vmul_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmul.nxv8i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv8i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vmul_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv16i16.i16( + , + i16, + i32); + +define @intrinsic_vmul_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmul.nxv16i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv16i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vmul_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv32i16.i16( + , + i16, + i32); + +define @intrinsic_vmul_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmul.nxv32i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv32i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vmul_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv32i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv1i32.i32( + , + i32, + i32); + +define @intrinsic_vmul_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmul.nxv1i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv1i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vmul_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv2i32.i32( + , + i32, + i32); + +define @intrinsic_vmul_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmul.nxv2i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv2i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vmul_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv4i32.i32( + , + i32, + i32); + +define @intrinsic_vmul_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmul.nxv4i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv4i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vmul_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv8i32.i32( + , + i32, + i32); + +define @intrinsic_vmul_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmul.nxv8i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv8i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vmul_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv16i32.i32( + , + i32, + i32); + +define @intrinsic_vmul_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmul.nxv16i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv16i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vmul_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv16i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmul-rv64.ll new file mode 100644 index 00000000000000..fe6e710b7ff95a --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmul-rv64.ll @@ -0,0 +1,1761 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vmul.nxv1i8.nxv1i8( + , + , + i64); + +define @intrinsic_vmul_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmul.nxv1i8.nxv1i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv1i8.nxv1i8( + , + , + , + , + i64); + +define @intrinsic_vmul_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv2i8.nxv2i8( + , + , + i64); + +define @intrinsic_vmul_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmul.nxv2i8.nxv2i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv2i8.nxv2i8( + , + , + , + , + i64); + +define @intrinsic_vmul_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv4i8.nxv4i8( + , + , + i64); + +define @intrinsic_vmul_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmul.nxv4i8.nxv4i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv4i8.nxv4i8( + , + , + , + , + i64); + +define @intrinsic_vmul_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv8i8.nxv8i8( + , + , + i64); + +define @intrinsic_vmul_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmul.nxv8i8.nxv8i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv8i8.nxv8i8( + , + , + , + , + i64); + +define @intrinsic_vmul_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv16i8.nxv16i8( + , + , + i64); + +define @intrinsic_vmul_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmul.nxv16i8.nxv16i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv16i8.nxv16i8( + , + , + , + , + i64); + +define @intrinsic_vmul_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv32i8.nxv32i8( + , + , + i64); + +define @intrinsic_vmul_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmul.nxv32i8.nxv32i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv32i8.nxv32i8( + , + , + , + , + i64); + +define @intrinsic_vmul_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv64i8.nxv64i8( + , + , + i64); + +define @intrinsic_vmul_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vv_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmul.nxv64i8.nxv64i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv64i8.nxv64i8( + , + , + , + , + i64); + +define @intrinsic_vmul_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv64i8.nxv64i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv1i16.nxv1i16( + , + , + i64); + +define @intrinsic_vmul_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmul.nxv1i16.nxv1i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv1i16.nxv1i16( + , + , + , + , + i64); + +define @intrinsic_vmul_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv2i16.nxv2i16( + , + , + i64); + +define @intrinsic_vmul_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmul.nxv2i16.nxv2i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv2i16.nxv2i16( + , + , + , + , + i64); + +define @intrinsic_vmul_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv4i16.nxv4i16( + , + , + i64); + +define @intrinsic_vmul_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmul.nxv4i16.nxv4i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv4i16.nxv4i16( + , + , + , + , + i64); + +define @intrinsic_vmul_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv8i16.nxv8i16( + , + , + i64); + +define @intrinsic_vmul_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmul.nxv8i16.nxv8i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv8i16.nxv8i16( + , + , + , + , + i64); + +define @intrinsic_vmul_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv16i16.nxv16i16( + , + , + i64); + +define @intrinsic_vmul_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmul.nxv16i16.nxv16i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv16i16.nxv16i16( + , + , + , + , + i64); + +define @intrinsic_vmul_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv32i16.nxv32i16( + , + , + i64); + +define @intrinsic_vmul_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vv_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmul.nxv32i16.nxv32i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv32i16.nxv32i16( + , + , + , + , + i64); + +define @intrinsic_vmul_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv32i16.nxv32i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv1i32.nxv1i32( + , + , + i64); + +define @intrinsic_vmul_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmul.nxv1i32.nxv1i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv1i32.nxv1i32( + , + , + , + , + i64); + +define @intrinsic_vmul_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv2i32.nxv2i32( + , + , + i64); + +define @intrinsic_vmul_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmul.nxv2i32.nxv2i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv2i32.nxv2i32( + , + , + , + , + i64); + +define @intrinsic_vmul_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv2i32.nxv2i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv4i32.nxv4i32( + , + , + i64); + +define @intrinsic_vmul_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmul.nxv4i32.nxv4i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv4i32.nxv4i32( + , + , + , + , + i64); + +define @intrinsic_vmul_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv4i32.nxv4i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv8i32.nxv8i32( + , + , + i64); + +define @intrinsic_vmul_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmul.nxv8i32.nxv8i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv8i32.nxv8i32( + , + , + , + , + i64); + +define @intrinsic_vmul_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv8i32.nxv8i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv16i32.nxv16i32( + , + , + i64); + +define @intrinsic_vmul_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vv_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmul.nxv16i32.nxv16i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv16i32.nxv16i32( + , + , + , + , + i64); + +define @intrinsic_vmul_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv16i32.nxv16i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv1i64.nxv1i64( + , + , + i64); + +define @intrinsic_vmul_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vv_nxv1i64_nxv1i64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmul.nxv1i64.nxv1i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv1i64.nxv1i64( + , + , + , + , + i64); + +define @intrinsic_vmul_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv1i64_nxv1i64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv1i64.nxv1i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv2i64.nxv2i64( + , + , + i64); + +define @intrinsic_vmul_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vv_nxv2i64_nxv2i64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmul.nxv2i64.nxv2i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv2i64.nxv2i64( + , + , + , + , + i64); + +define @intrinsic_vmul_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv2i64_nxv2i64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv2i64.nxv2i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv4i64.nxv4i64( + , + , + i64); + +define @intrinsic_vmul_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vv_nxv4i64_nxv4i64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmul.nxv4i64.nxv4i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv4i64.nxv4i64( + , + , + , + , + i64); + +define @intrinsic_vmul_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv4i64_nxv4i64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv4i64.nxv4i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv8i64.nxv8i64( + , + , + i64); + +define @intrinsic_vmul_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vv_nxv8i64_nxv8i64_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmul.nxv8i64.nxv8i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv8i64.nxv8i64( + , + , + , + , + i64); + +define @intrinsic_vmul_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv8i64_nxv8i64_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv8i64.nxv8i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv1i8.i8( + , + i8, + i64); + +define @intrinsic_vmul_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmul.nxv1i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv1i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vmul_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv2i8.i8( + , + i8, + i64); + +define @intrinsic_vmul_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmul.nxv2i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv2i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vmul_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv4i8.i8( + , + i8, + i64); + +define @intrinsic_vmul_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmul.nxv4i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv4i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vmul_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv8i8.i8( + , + i8, + i64); + +define @intrinsic_vmul_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmul.nxv8i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv8i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vmul_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv16i8.i8( + , + i8, + i64); + +define @intrinsic_vmul_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmul.nxv16i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv16i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vmul_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv32i8.i8( + , + i8, + i64); + +define @intrinsic_vmul_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmul.nxv32i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv32i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vmul_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv64i8.i8( + , + i8, + i64); + +define @intrinsic_vmul_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmul.nxv64i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv64i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vmul_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv64i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv1i16.i16( + , + i16, + i64); + +define @intrinsic_vmul_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmul.nxv1i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv1i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vmul_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv2i16.i16( + , + i16, + i64); + +define @intrinsic_vmul_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmul.nxv2i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv2i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vmul_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv4i16.i16( + , + i16, + i64); + +define @intrinsic_vmul_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmul.nxv4i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv4i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vmul_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv8i16.i16( + , + i16, + i64); + +define @intrinsic_vmul_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmul.nxv8i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv8i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vmul_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv16i16.i16( + , + i16, + i64); + +define @intrinsic_vmul_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmul.nxv16i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv16i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vmul_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv32i16.i16( + , + i16, + i64); + +define @intrinsic_vmul_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmul.nxv32i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv32i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vmul_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv32i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv1i32.i32( + , + i32, + i64); + +define @intrinsic_vmul_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmul.nxv1i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv1i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vmul_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv2i32.i32( + , + i32, + i64); + +define @intrinsic_vmul_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmul.nxv2i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv2i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vmul_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv4i32.i32( + , + i32, + i64); + +define @intrinsic_vmul_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmul.nxv4i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv4i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vmul_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv8i32.i32( + , + i32, + i64); + +define @intrinsic_vmul_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmul.nxv8i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv8i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vmul_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv16i32.i32( + , + i32, + i64); + +define @intrinsic_vmul_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmul.nxv16i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv16i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vmul_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv16i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv1i64.i64( + , + i64, + i64); + +define @intrinsic_vmul_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vx_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmul.nxv1i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv1i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vmul_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv1i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv2i64.i64( + , + i64, + i64); + +define @intrinsic_vmul_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vx_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmul.nxv2i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv2i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vmul_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv2i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv4i64.i64( + , + i64, + i64); + +define @intrinsic_vmul_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vx_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmul.nxv4i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv4i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vmul_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv4i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmul.nxv8i64.i64( + , + i64, + i64); + +define @intrinsic_vmul_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_vx_nxv8i64_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmul.nxv8i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmul.mask.nxv8i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vmul_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv8i64_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmul.mask.nxv8i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulh-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmulh-rv32.ll new file mode 100644 index 00000000000000..3392b6a6f3bb67 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmulh-rv32.ll @@ -0,0 +1,1441 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vmulh.nxv1i8.nxv1i8( + , + , + i32); + +define @intrinsic_vmulh_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv1i8.nxv1i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv1i8.nxv1i8( + , + , + , + , + i32); + +define @intrinsic_vmulh_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv2i8.nxv2i8( + , + , + i32); + +define @intrinsic_vmulh_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv2i8.nxv2i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv2i8.nxv2i8( + , + , + , + , + i32); + +define @intrinsic_vmulh_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv4i8.nxv4i8( + , + , + i32); + +define @intrinsic_vmulh_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv4i8.nxv4i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv4i8.nxv4i8( + , + , + , + , + i32); + +define @intrinsic_vmulh_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv8i8.nxv8i8( + , + , + i32); + +define @intrinsic_vmulh_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv8i8.nxv8i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv8i8.nxv8i8( + , + , + , + , + i32); + +define @intrinsic_vmulh_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv16i8.nxv16i8( + , + , + i32); + +define @intrinsic_vmulh_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv16i8.nxv16i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv16i8.nxv16i8( + , + , + , + , + i32); + +define @intrinsic_vmulh_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv32i8.nxv32i8( + , + , + i32); + +define @intrinsic_vmulh_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv32i8.nxv32i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv32i8.nxv32i8( + , + , + , + , + i32); + +define @intrinsic_vmulh_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv64i8.nxv64i8( + , + , + i32); + +define @intrinsic_vmulh_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vv_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv64i8.nxv64i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv64i8.nxv64i8( + , + , + , + , + i32); + +define @intrinsic_vmulh_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv64i8.nxv64i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv1i16.nxv1i16( + , + , + i32); + +define @intrinsic_vmulh_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv1i16.nxv1i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv1i16.nxv1i16( + , + , + , + , + i32); + +define @intrinsic_vmulh_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv2i16.nxv2i16( + , + , + i32); + +define @intrinsic_vmulh_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv2i16.nxv2i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv2i16.nxv2i16( + , + , + , + , + i32); + +define @intrinsic_vmulh_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv4i16.nxv4i16( + , + , + i32); + +define @intrinsic_vmulh_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv4i16.nxv4i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv4i16.nxv4i16( + , + , + , + , + i32); + +define @intrinsic_vmulh_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv8i16.nxv8i16( + , + , + i32); + +define @intrinsic_vmulh_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv8i16.nxv8i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv8i16.nxv8i16( + , + , + , + , + i32); + +define @intrinsic_vmulh_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv16i16.nxv16i16( + , + , + i32); + +define @intrinsic_vmulh_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv16i16.nxv16i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv16i16.nxv16i16( + , + , + , + , + i32); + +define @intrinsic_vmulh_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv32i16.nxv32i16( + , + , + i32); + +define @intrinsic_vmulh_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vv_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv32i16.nxv32i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv32i16.nxv32i16( + , + , + , + , + i32); + +define @intrinsic_vmulh_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv32i16.nxv32i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv1i32.nxv1i32( + , + , + i32); + +define @intrinsic_vmulh_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv1i32.nxv1i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32( + , + , + , + , + i32); + +define @intrinsic_vmulh_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv2i32.nxv2i32( + , + , + i32); + +define @intrinsic_vmulh_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv2i32.nxv2i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv2i32.nxv2i32( + , + , + , + , + i32); + +define @intrinsic_vmulh_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv2i32.nxv2i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv4i32.nxv4i32( + , + , + i32); + +define @intrinsic_vmulh_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv4i32.nxv4i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv4i32.nxv4i32( + , + , + , + , + i32); + +define @intrinsic_vmulh_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv4i32.nxv4i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv8i32.nxv8i32( + , + , + i32); + +define @intrinsic_vmulh_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv8i32.nxv8i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv8i32.nxv8i32( + , + , + , + , + i32); + +define @intrinsic_vmulh_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv8i32.nxv8i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv16i32.nxv16i32( + , + , + i32); + +define @intrinsic_vmulh_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vv_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv16i32.nxv16i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv16i32.nxv16i32( + , + , + , + , + i32); + +define @intrinsic_vmulh_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv16i32.nxv16i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv1i8.i8( + , + i8, + i32); + +define @intrinsic_vmulh_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv1i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv1i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vmulh_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv2i8.i8( + , + i8, + i32); + +define @intrinsic_vmulh_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv2i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv2i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vmulh_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv4i8.i8( + , + i8, + i32); + +define @intrinsic_vmulh_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv4i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv4i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vmulh_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv8i8.i8( + , + i8, + i32); + +define @intrinsic_vmulh_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv8i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv8i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vmulh_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv16i8.i8( + , + i8, + i32); + +define @intrinsic_vmulh_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv16i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv16i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vmulh_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv32i8.i8( + , + i8, + i32); + +define @intrinsic_vmulh_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv32i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv32i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vmulh_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv64i8.i8( + , + i8, + i32); + +define @intrinsic_vmulh_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv64i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv64i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vmulh_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv64i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv1i16.i16( + , + i16, + i32); + +define @intrinsic_vmulh_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv1i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv1i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vmulh_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv2i16.i16( + , + i16, + i32); + +define @intrinsic_vmulh_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv2i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv2i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vmulh_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv4i16.i16( + , + i16, + i32); + +define @intrinsic_vmulh_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv4i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv4i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vmulh_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv8i16.i16( + , + i16, + i32); + +define @intrinsic_vmulh_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv8i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv8i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vmulh_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv16i16.i16( + , + i16, + i32); + +define @intrinsic_vmulh_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv16i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv16i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vmulh_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv32i16.i16( + , + i16, + i32); + +define @intrinsic_vmulh_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv32i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv32i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vmulh_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv32i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv1i32.i32( + , + i32, + i32); + +define @intrinsic_vmulh_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv1i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv1i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vmulh_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv2i32.i32( + , + i32, + i32); + +define @intrinsic_vmulh_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv2i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv2i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vmulh_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv4i32.i32( + , + i32, + i32); + +define @intrinsic_vmulh_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv4i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv4i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vmulh_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv8i32.i32( + , + i32, + i32); + +define @intrinsic_vmulh_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv8i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv8i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vmulh_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv16i32.i32( + , + i32, + i32); + +define @intrinsic_vmulh_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv16i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv16i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vmulh_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv16i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulh-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmulh-rv64.ll new file mode 100644 index 00000000000000..1749aafa270206 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmulh-rv64.ll @@ -0,0 +1,1761 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vmulh.nxv1i8.nxv1i8( + , + , + i64); + +define @intrinsic_vmulh_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv1i8.nxv1i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv1i8.nxv1i8( + , + , + , + , + i64); + +define @intrinsic_vmulh_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv2i8.nxv2i8( + , + , + i64); + +define @intrinsic_vmulh_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv2i8.nxv2i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv2i8.nxv2i8( + , + , + , + , + i64); + +define @intrinsic_vmulh_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv4i8.nxv4i8( + , + , + i64); + +define @intrinsic_vmulh_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv4i8.nxv4i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv4i8.nxv4i8( + , + , + , + , + i64); + +define @intrinsic_vmulh_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv8i8.nxv8i8( + , + , + i64); + +define @intrinsic_vmulh_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv8i8.nxv8i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv8i8.nxv8i8( + , + , + , + , + i64); + +define @intrinsic_vmulh_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv16i8.nxv16i8( + , + , + i64); + +define @intrinsic_vmulh_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv16i8.nxv16i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv16i8.nxv16i8( + , + , + , + , + i64); + +define @intrinsic_vmulh_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv32i8.nxv32i8( + , + , + i64); + +define @intrinsic_vmulh_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv32i8.nxv32i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv32i8.nxv32i8( + , + , + , + , + i64); + +define @intrinsic_vmulh_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv64i8.nxv64i8( + , + , + i64); + +define @intrinsic_vmulh_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vv_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv64i8.nxv64i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv64i8.nxv64i8( + , + , + , + , + i64); + +define @intrinsic_vmulh_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv64i8.nxv64i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv1i16.nxv1i16( + , + , + i64); + +define @intrinsic_vmulh_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv1i16.nxv1i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv1i16.nxv1i16( + , + , + , + , + i64); + +define @intrinsic_vmulh_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv2i16.nxv2i16( + , + , + i64); + +define @intrinsic_vmulh_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv2i16.nxv2i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv2i16.nxv2i16( + , + , + , + , + i64); + +define @intrinsic_vmulh_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv4i16.nxv4i16( + , + , + i64); + +define @intrinsic_vmulh_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv4i16.nxv4i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv4i16.nxv4i16( + , + , + , + , + i64); + +define @intrinsic_vmulh_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv8i16.nxv8i16( + , + , + i64); + +define @intrinsic_vmulh_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv8i16.nxv8i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv8i16.nxv8i16( + , + , + , + , + i64); + +define @intrinsic_vmulh_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv16i16.nxv16i16( + , + , + i64); + +define @intrinsic_vmulh_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv16i16.nxv16i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv16i16.nxv16i16( + , + , + , + , + i64); + +define @intrinsic_vmulh_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv32i16.nxv32i16( + , + , + i64); + +define @intrinsic_vmulh_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vv_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv32i16.nxv32i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv32i16.nxv32i16( + , + , + , + , + i64); + +define @intrinsic_vmulh_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv32i16.nxv32i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv1i32.nxv1i32( + , + , + i64); + +define @intrinsic_vmulh_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv1i32.nxv1i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32( + , + , + , + , + i64); + +define @intrinsic_vmulh_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv2i32.nxv2i32( + , + , + i64); + +define @intrinsic_vmulh_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv2i32.nxv2i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv2i32.nxv2i32( + , + , + , + , + i64); + +define @intrinsic_vmulh_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv2i32.nxv2i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv4i32.nxv4i32( + , + , + i64); + +define @intrinsic_vmulh_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv4i32.nxv4i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv4i32.nxv4i32( + , + , + , + , + i64); + +define @intrinsic_vmulh_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv4i32.nxv4i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv8i32.nxv8i32( + , + , + i64); + +define @intrinsic_vmulh_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv8i32.nxv8i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv8i32.nxv8i32( + , + , + , + , + i64); + +define @intrinsic_vmulh_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv8i32.nxv8i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv16i32.nxv16i32( + , + , + i64); + +define @intrinsic_vmulh_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vv_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv16i32.nxv16i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv16i32.nxv16i32( + , + , + , + , + i64); + +define @intrinsic_vmulh_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv16i32.nxv16i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv1i64.nxv1i64( + , + , + i64); + +define @intrinsic_vmulh_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vv_nxv1i64_nxv1i64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv1i64.nxv1i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64( + , + , + , + , + i64); + +define @intrinsic_vmulh_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv1i64_nxv1i64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv2i64.nxv2i64( + , + , + i64); + +define @intrinsic_vmulh_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vv_nxv2i64_nxv2i64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv2i64.nxv2i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv2i64.nxv2i64( + , + , + , + , + i64); + +define @intrinsic_vmulh_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv2i64_nxv2i64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv2i64.nxv2i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv4i64.nxv4i64( + , + , + i64); + +define @intrinsic_vmulh_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vv_nxv4i64_nxv4i64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv4i64.nxv4i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv4i64.nxv4i64( + , + , + , + , + i64); + +define @intrinsic_vmulh_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv4i64_nxv4i64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv4i64.nxv4i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv8i64.nxv8i64( + , + , + i64); + +define @intrinsic_vmulh_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vv_nxv8i64_nxv8i64_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv8i64.nxv8i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv8i64.nxv8i64( + , + , + , + , + i64); + +define @intrinsic_vmulh_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv8i64_nxv8i64_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vmulh.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv8i64.nxv8i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv1i8.i8( + , + i8, + i64); + +define @intrinsic_vmulh_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv1i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv1i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vmulh_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv2i8.i8( + , + i8, + i64); + +define @intrinsic_vmulh_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv2i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv2i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vmulh_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv4i8.i8( + , + i8, + i64); + +define @intrinsic_vmulh_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv4i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv4i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vmulh_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv8i8.i8( + , + i8, + i64); + +define @intrinsic_vmulh_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv8i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv8i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vmulh_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv16i8.i8( + , + i8, + i64); + +define @intrinsic_vmulh_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv16i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv16i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vmulh_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv32i8.i8( + , + i8, + i64); + +define @intrinsic_vmulh_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv32i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv32i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vmulh_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv64i8.i8( + , + i8, + i64); + +define @intrinsic_vmulh_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv64i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv64i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vmulh_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv64i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv1i16.i16( + , + i16, + i64); + +define @intrinsic_vmulh_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv1i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv1i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vmulh_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv2i16.i16( + , + i16, + i64); + +define @intrinsic_vmulh_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv2i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv2i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vmulh_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv4i16.i16( + , + i16, + i64); + +define @intrinsic_vmulh_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv4i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv4i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vmulh_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv8i16.i16( + , + i16, + i64); + +define @intrinsic_vmulh_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv8i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv8i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vmulh_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv16i16.i16( + , + i16, + i64); + +define @intrinsic_vmulh_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv16i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv16i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vmulh_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv32i16.i16( + , + i16, + i64); + +define @intrinsic_vmulh_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv32i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv32i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vmulh_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv32i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv1i32.i32( + , + i32, + i64); + +define @intrinsic_vmulh_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv1i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv1i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vmulh_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv2i32.i32( + , + i32, + i64); + +define @intrinsic_vmulh_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv2i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv2i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vmulh_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv4i32.i32( + , + i32, + i64); + +define @intrinsic_vmulh_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv4i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv4i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vmulh_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv8i32.i32( + , + i32, + i64); + +define @intrinsic_vmulh_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv8i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv8i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vmulh_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv16i32.i32( + , + i32, + i64); + +define @intrinsic_vmulh_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv16i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv16i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vmulh_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv16i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv1i64.i64( + , + i64, + i64); + +define @intrinsic_vmulh_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vx_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv1i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv1i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vmulh_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv1i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv2i64.i64( + , + i64, + i64); + +define @intrinsic_vmulh_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vx_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv2i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv2i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vmulh_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv2i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv4i64.i64( + , + i64, + i64); + +define @intrinsic_vmulh_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vx_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv4i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv4i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vmulh_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv4i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulh.nxv8i64.i64( + , + i64, + i64); + +define @intrinsic_vmulh_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_vx_nxv8i64_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulh.nxv8i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulh.mask.nxv8i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vmulh_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv8i64_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vmulh.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulh.mask.nxv8i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll new file mode 100644 index 00000000000000..aea1de9106b458 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll @@ -0,0 +1,1441 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vmulhsu.nxv1i8.nxv1i8( + , + , + i32); + +define @intrinsic_vmulhsu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv1i8.nxv1i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv1i8.nxv1i8( + , + , + , + , + i32); + +define @intrinsic_vmulhsu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv2i8.nxv2i8( + , + , + i32); + +define @intrinsic_vmulhsu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv2i8.nxv2i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv2i8.nxv2i8( + , + , + , + , + i32); + +define @intrinsic_vmulhsu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv4i8.nxv4i8( + , + , + i32); + +define @intrinsic_vmulhsu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv4i8.nxv4i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv4i8.nxv4i8( + , + , + , + , + i32); + +define @intrinsic_vmulhsu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv8i8.nxv8i8( + , + , + i32); + +define @intrinsic_vmulhsu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv8i8.nxv8i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv8i8.nxv8i8( + , + , + , + , + i32); + +define @intrinsic_vmulhsu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv16i8.nxv16i8( + , + , + i32); + +define @intrinsic_vmulhsu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv16i8.nxv16i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv16i8.nxv16i8( + , + , + , + , + i32); + +define @intrinsic_vmulhsu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv32i8.nxv32i8( + , + , + i32); + +define @intrinsic_vmulhsu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv32i8.nxv32i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv32i8.nxv32i8( + , + , + , + , + i32); + +define @intrinsic_vmulhsu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv64i8.nxv64i8( + , + , + i32); + +define @intrinsic_vmulhsu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv64i8.nxv64i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv64i8.nxv64i8( + , + , + , + , + i32); + +define @intrinsic_vmulhsu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv64i8.nxv64i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv1i16.nxv1i16( + , + , + i32); + +define @intrinsic_vmulhsu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv1i16.nxv1i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv1i16.nxv1i16( + , + , + , + , + i32); + +define @intrinsic_vmulhsu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv2i16.nxv2i16( + , + , + i32); + +define @intrinsic_vmulhsu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv2i16.nxv2i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv2i16.nxv2i16( + , + , + , + , + i32); + +define @intrinsic_vmulhsu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv4i16.nxv4i16( + , + , + i32); + +define @intrinsic_vmulhsu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv4i16.nxv4i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv4i16.nxv4i16( + , + , + , + , + i32); + +define @intrinsic_vmulhsu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv8i16.nxv8i16( + , + , + i32); + +define @intrinsic_vmulhsu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv8i16.nxv8i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv8i16.nxv8i16( + , + , + , + , + i32); + +define @intrinsic_vmulhsu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv16i16.nxv16i16( + , + , + i32); + +define @intrinsic_vmulhsu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv16i16.nxv16i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv16i16.nxv16i16( + , + , + , + , + i32); + +define @intrinsic_vmulhsu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv32i16.nxv32i16( + , + , + i32); + +define @intrinsic_vmulhsu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv32i16.nxv32i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv32i16.nxv32i16( + , + , + , + , + i32); + +define @intrinsic_vmulhsu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv32i16.nxv32i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv1i32.nxv1i32( + , + , + i32); + +define @intrinsic_vmulhsu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv1i32.nxv1i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32( + , + , + , + , + i32); + +define @intrinsic_vmulhsu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv2i32.nxv2i32( + , + , + i32); + +define @intrinsic_vmulhsu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv2i32.nxv2i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv2i32.nxv2i32( + , + , + , + , + i32); + +define @intrinsic_vmulhsu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv2i32.nxv2i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv4i32.nxv4i32( + , + , + i32); + +define @intrinsic_vmulhsu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv4i32.nxv4i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv4i32.nxv4i32( + , + , + , + , + i32); + +define @intrinsic_vmulhsu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv4i32.nxv4i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv8i32.nxv8i32( + , + , + i32); + +define @intrinsic_vmulhsu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv8i32.nxv8i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv8i32.nxv8i32( + , + , + , + , + i32); + +define @intrinsic_vmulhsu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv8i32.nxv8i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv16i32.nxv16i32( + , + , + i32); + +define @intrinsic_vmulhsu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv16i32.nxv16i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv16i32.nxv16i32( + , + , + , + , + i32); + +define @intrinsic_vmulhsu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv16i32.nxv16i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv1i8.i8( + , + i8, + i32); + +define @intrinsic_vmulhsu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv1i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv1i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vmulhsu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv2i8.i8( + , + i8, + i32); + +define @intrinsic_vmulhsu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv2i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv2i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vmulhsu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv4i8.i8( + , + i8, + i32); + +define @intrinsic_vmulhsu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv4i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv4i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vmulhsu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv8i8.i8( + , + i8, + i32); + +define @intrinsic_vmulhsu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv8i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv8i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vmulhsu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv16i8.i8( + , + i8, + i32); + +define @intrinsic_vmulhsu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv16i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv16i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vmulhsu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv32i8.i8( + , + i8, + i32); + +define @intrinsic_vmulhsu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv32i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv32i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vmulhsu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv64i8.i8( + , + i8, + i32); + +define @intrinsic_vmulhsu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv64i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv64i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vmulhsu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv64i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv1i16.i16( + , + i16, + i32); + +define @intrinsic_vmulhsu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv1i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv1i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vmulhsu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv2i16.i16( + , + i16, + i32); + +define @intrinsic_vmulhsu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv2i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv2i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vmulhsu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv4i16.i16( + , + i16, + i32); + +define @intrinsic_vmulhsu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv4i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv4i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vmulhsu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv8i16.i16( + , + i16, + i32); + +define @intrinsic_vmulhsu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv8i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv8i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vmulhsu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv16i16.i16( + , + i16, + i32); + +define @intrinsic_vmulhsu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv16i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv16i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vmulhsu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv32i16.i16( + , + i16, + i32); + +define @intrinsic_vmulhsu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv32i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv32i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vmulhsu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv32i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv1i32.i32( + , + i32, + i32); + +define @intrinsic_vmulhsu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv1i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv1i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vmulhsu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv2i32.i32( + , + i32, + i32); + +define @intrinsic_vmulhsu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv2i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv2i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vmulhsu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv4i32.i32( + , + i32, + i32); + +define @intrinsic_vmulhsu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv4i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv4i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vmulhsu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv8i32.i32( + , + i32, + i32); + +define @intrinsic_vmulhsu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv8i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv8i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vmulhsu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv16i32.i32( + , + i32, + i32); + +define @intrinsic_vmulhsu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv16i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv16i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vmulhsu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv16i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv64.ll new file mode 100644 index 00000000000000..dfff16e90cf6f3 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv64.ll @@ -0,0 +1,1761 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vmulhsu.nxv1i8.nxv1i8( + , + , + i64); + +define @intrinsic_vmulhsu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv1i8.nxv1i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv1i8.nxv1i8( + , + , + , + , + i64); + +define @intrinsic_vmulhsu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv2i8.nxv2i8( + , + , + i64); + +define @intrinsic_vmulhsu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv2i8.nxv2i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv2i8.nxv2i8( + , + , + , + , + i64); + +define @intrinsic_vmulhsu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv4i8.nxv4i8( + , + , + i64); + +define @intrinsic_vmulhsu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv4i8.nxv4i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv4i8.nxv4i8( + , + , + , + , + i64); + +define @intrinsic_vmulhsu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv8i8.nxv8i8( + , + , + i64); + +define @intrinsic_vmulhsu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv8i8.nxv8i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv8i8.nxv8i8( + , + , + , + , + i64); + +define @intrinsic_vmulhsu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv16i8.nxv16i8( + , + , + i64); + +define @intrinsic_vmulhsu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv16i8.nxv16i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv16i8.nxv16i8( + , + , + , + , + i64); + +define @intrinsic_vmulhsu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv32i8.nxv32i8( + , + , + i64); + +define @intrinsic_vmulhsu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv32i8.nxv32i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv32i8.nxv32i8( + , + , + , + , + i64); + +define @intrinsic_vmulhsu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv64i8.nxv64i8( + , + , + i64); + +define @intrinsic_vmulhsu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv64i8.nxv64i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv64i8.nxv64i8( + , + , + , + , + i64); + +define @intrinsic_vmulhsu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv64i8.nxv64i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv1i16.nxv1i16( + , + , + i64); + +define @intrinsic_vmulhsu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv1i16.nxv1i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv1i16.nxv1i16( + , + , + , + , + i64); + +define @intrinsic_vmulhsu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv2i16.nxv2i16( + , + , + i64); + +define @intrinsic_vmulhsu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv2i16.nxv2i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv2i16.nxv2i16( + , + , + , + , + i64); + +define @intrinsic_vmulhsu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv4i16.nxv4i16( + , + , + i64); + +define @intrinsic_vmulhsu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv4i16.nxv4i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv4i16.nxv4i16( + , + , + , + , + i64); + +define @intrinsic_vmulhsu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv8i16.nxv8i16( + , + , + i64); + +define @intrinsic_vmulhsu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv8i16.nxv8i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv8i16.nxv8i16( + , + , + , + , + i64); + +define @intrinsic_vmulhsu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv16i16.nxv16i16( + , + , + i64); + +define @intrinsic_vmulhsu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv16i16.nxv16i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv16i16.nxv16i16( + , + , + , + , + i64); + +define @intrinsic_vmulhsu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv32i16.nxv32i16( + , + , + i64); + +define @intrinsic_vmulhsu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv32i16.nxv32i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv32i16.nxv32i16( + , + , + , + , + i64); + +define @intrinsic_vmulhsu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv32i16.nxv32i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv1i32.nxv1i32( + , + , + i64); + +define @intrinsic_vmulhsu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv1i32.nxv1i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32( + , + , + , + , + i64); + +define @intrinsic_vmulhsu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv2i32.nxv2i32( + , + , + i64); + +define @intrinsic_vmulhsu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv2i32.nxv2i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv2i32.nxv2i32( + , + , + , + , + i64); + +define @intrinsic_vmulhsu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv2i32.nxv2i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv4i32.nxv4i32( + , + , + i64); + +define @intrinsic_vmulhsu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv4i32.nxv4i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv4i32.nxv4i32( + , + , + , + , + i64); + +define @intrinsic_vmulhsu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv4i32.nxv4i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv8i32.nxv8i32( + , + , + i64); + +define @intrinsic_vmulhsu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv8i32.nxv8i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv8i32.nxv8i32( + , + , + , + , + i64); + +define @intrinsic_vmulhsu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv8i32.nxv8i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv16i32.nxv16i32( + , + , + i64); + +define @intrinsic_vmulhsu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv16i32.nxv16i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv16i32.nxv16i32( + , + , + , + , + i64); + +define @intrinsic_vmulhsu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv16i32.nxv16i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv1i64.nxv1i64( + , + , + i64); + +define @intrinsic_vmulhsu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv1i64_nxv1i64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv1i64.nxv1i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv1i64.nxv1i64( + , + , + , + , + i64); + +define @intrinsic_vmulhsu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv1i64_nxv1i64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv1i64.nxv1i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv2i64.nxv2i64( + , + , + i64); + +define @intrinsic_vmulhsu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv2i64_nxv2i64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv2i64.nxv2i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv2i64.nxv2i64( + , + , + , + , + i64); + +define @intrinsic_vmulhsu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv2i64_nxv2i64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv2i64.nxv2i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv4i64.nxv4i64( + , + , + i64); + +define @intrinsic_vmulhsu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv4i64_nxv4i64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv4i64.nxv4i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv4i64.nxv4i64( + , + , + , + , + i64); + +define @intrinsic_vmulhsu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv4i64_nxv4i64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv4i64.nxv4i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv8i64.nxv8i64( + , + , + i64); + +define @intrinsic_vmulhsu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv8i64_nxv8i64_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv8i64.nxv8i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv8i64.nxv8i64( + , + , + , + , + i64); + +define @intrinsic_vmulhsu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv8i64_nxv8i64_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vmulhsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv8i64.nxv8i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv1i8.i8( + , + i8, + i64); + +define @intrinsic_vmulhsu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv1i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv1i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vmulhsu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv2i8.i8( + , + i8, + i64); + +define @intrinsic_vmulhsu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv2i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv2i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vmulhsu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv4i8.i8( + , + i8, + i64); + +define @intrinsic_vmulhsu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv4i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv4i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vmulhsu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv8i8.i8( + , + i8, + i64); + +define @intrinsic_vmulhsu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv8i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv8i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vmulhsu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv16i8.i8( + , + i8, + i64); + +define @intrinsic_vmulhsu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv16i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv16i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vmulhsu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv32i8.i8( + , + i8, + i64); + +define @intrinsic_vmulhsu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv32i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv32i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vmulhsu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv64i8.i8( + , + i8, + i64); + +define @intrinsic_vmulhsu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv64i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv64i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vmulhsu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv64i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv1i16.i16( + , + i16, + i64); + +define @intrinsic_vmulhsu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv1i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv1i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vmulhsu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv2i16.i16( + , + i16, + i64); + +define @intrinsic_vmulhsu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv2i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv2i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vmulhsu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv4i16.i16( + , + i16, + i64); + +define @intrinsic_vmulhsu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv4i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv4i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vmulhsu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv8i16.i16( + , + i16, + i64); + +define @intrinsic_vmulhsu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv8i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv8i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vmulhsu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv16i16.i16( + , + i16, + i64); + +define @intrinsic_vmulhsu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv16i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv16i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vmulhsu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv32i16.i16( + , + i16, + i64); + +define @intrinsic_vmulhsu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv32i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv32i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vmulhsu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv32i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv1i32.i32( + , + i32, + i64); + +define @intrinsic_vmulhsu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv1i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv1i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vmulhsu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv2i32.i32( + , + i32, + i64); + +define @intrinsic_vmulhsu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv2i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv2i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vmulhsu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv4i32.i32( + , + i32, + i64); + +define @intrinsic_vmulhsu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv4i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv4i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vmulhsu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv8i32.i32( + , + i32, + i64); + +define @intrinsic_vmulhsu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv8i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv8i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vmulhsu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv16i32.i32( + , + i32, + i64); + +define @intrinsic_vmulhsu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv16i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv16i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vmulhsu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv16i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv1i64.i64( + , + i64, + i64); + +define @intrinsic_vmulhsu_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv1i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv1i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vmulhsu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv1i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv2i64.i64( + , + i64, + i64); + +define @intrinsic_vmulhsu_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv2i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv2i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vmulhsu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv2i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv4i64.i64( + , + i64, + i64); + +define @intrinsic_vmulhsu_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv4i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv4i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vmulhsu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv4i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhsu.nxv8i64.i64( + , + i64, + i64); + +define @intrinsic_vmulhsu_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv8i64_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhsu.nxv8i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhsu.mask.nxv8i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vmulhsu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv8i64_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vmulhsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhsu.mask.nxv8i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv32.ll new file mode 100644 index 00000000000000..8997be2724c5f9 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv32.ll @@ -0,0 +1,1441 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vmulhu.nxv1i8.nxv1i8( + , + , + i32); + +define @intrinsic_vmulhu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv1i8.nxv1i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv1i8.nxv1i8( + , + , + , + , + i32); + +define @intrinsic_vmulhu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv2i8.nxv2i8( + , + , + i32); + +define @intrinsic_vmulhu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv2i8.nxv2i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv2i8.nxv2i8( + , + , + , + , + i32); + +define @intrinsic_vmulhu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv4i8.nxv4i8( + , + , + i32); + +define @intrinsic_vmulhu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv4i8.nxv4i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv4i8.nxv4i8( + , + , + , + , + i32); + +define @intrinsic_vmulhu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv8i8.nxv8i8( + , + , + i32); + +define @intrinsic_vmulhu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv8i8.nxv8i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv8i8.nxv8i8( + , + , + , + , + i32); + +define @intrinsic_vmulhu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv16i8.nxv16i8( + , + , + i32); + +define @intrinsic_vmulhu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv16i8.nxv16i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv16i8.nxv16i8( + , + , + , + , + i32); + +define @intrinsic_vmulhu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv32i8.nxv32i8( + , + , + i32); + +define @intrinsic_vmulhu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv32i8.nxv32i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv32i8.nxv32i8( + , + , + , + , + i32); + +define @intrinsic_vmulhu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv64i8.nxv64i8( + , + , + i32); + +define @intrinsic_vmulhu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vv_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv64i8.nxv64i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv64i8.nxv64i8( + , + , + , + , + i32); + +define @intrinsic_vmulhu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv64i8.nxv64i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv1i16.nxv1i16( + , + , + i32); + +define @intrinsic_vmulhu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv1i16.nxv1i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv1i16.nxv1i16( + , + , + , + , + i32); + +define @intrinsic_vmulhu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv2i16.nxv2i16( + , + , + i32); + +define @intrinsic_vmulhu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv2i16.nxv2i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv2i16.nxv2i16( + , + , + , + , + i32); + +define @intrinsic_vmulhu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv4i16.nxv4i16( + , + , + i32); + +define @intrinsic_vmulhu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv4i16.nxv4i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv4i16.nxv4i16( + , + , + , + , + i32); + +define @intrinsic_vmulhu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv8i16.nxv8i16( + , + , + i32); + +define @intrinsic_vmulhu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv8i16.nxv8i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv8i16.nxv8i16( + , + , + , + , + i32); + +define @intrinsic_vmulhu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv16i16.nxv16i16( + , + , + i32); + +define @intrinsic_vmulhu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv16i16.nxv16i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv16i16.nxv16i16( + , + , + , + , + i32); + +define @intrinsic_vmulhu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv32i16.nxv32i16( + , + , + i32); + +define @intrinsic_vmulhu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vv_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv32i16.nxv32i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv32i16.nxv32i16( + , + , + , + , + i32); + +define @intrinsic_vmulhu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv32i16.nxv32i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv1i32.nxv1i32( + , + , + i32); + +define @intrinsic_vmulhu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv1i32.nxv1i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32( + , + , + , + , + i32); + +define @intrinsic_vmulhu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv2i32.nxv2i32( + , + , + i32); + +define @intrinsic_vmulhu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv2i32.nxv2i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv2i32.nxv2i32( + , + , + , + , + i32); + +define @intrinsic_vmulhu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv2i32.nxv2i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv4i32.nxv4i32( + , + , + i32); + +define @intrinsic_vmulhu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv4i32.nxv4i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv4i32.nxv4i32( + , + , + , + , + i32); + +define @intrinsic_vmulhu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv4i32.nxv4i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv8i32.nxv8i32( + , + , + i32); + +define @intrinsic_vmulhu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv8i32.nxv8i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv8i32.nxv8i32( + , + , + , + , + i32); + +define @intrinsic_vmulhu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv8i32.nxv8i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv16i32.nxv16i32( + , + , + i32); + +define @intrinsic_vmulhu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vv_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv16i32.nxv16i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv16i32.nxv16i32( + , + , + , + , + i32); + +define @intrinsic_vmulhu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv16i32.nxv16i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv1i8.i8( + , + i8, + i32); + +define @intrinsic_vmulhu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv1i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv1i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vmulhu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv2i8.i8( + , + i8, + i32); + +define @intrinsic_vmulhu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv2i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv2i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vmulhu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv4i8.i8( + , + i8, + i32); + +define @intrinsic_vmulhu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv4i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv4i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vmulhu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv8i8.i8( + , + i8, + i32); + +define @intrinsic_vmulhu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv8i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv8i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vmulhu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv16i8.i8( + , + i8, + i32); + +define @intrinsic_vmulhu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv16i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv16i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vmulhu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv32i8.i8( + , + i8, + i32); + +define @intrinsic_vmulhu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv32i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv32i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vmulhu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv64i8.i8( + , + i8, + i32); + +define @intrinsic_vmulhu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv64i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv64i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vmulhu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv64i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv1i16.i16( + , + i16, + i32); + +define @intrinsic_vmulhu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv1i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv1i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vmulhu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv2i16.i16( + , + i16, + i32); + +define @intrinsic_vmulhu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv2i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv2i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vmulhu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv4i16.i16( + , + i16, + i32); + +define @intrinsic_vmulhu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv4i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv4i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vmulhu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv8i16.i16( + , + i16, + i32); + +define @intrinsic_vmulhu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv8i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv8i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vmulhu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv16i16.i16( + , + i16, + i32); + +define @intrinsic_vmulhu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv16i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv16i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vmulhu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv32i16.i16( + , + i16, + i32); + +define @intrinsic_vmulhu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv32i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv32i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vmulhu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv32i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv1i32.i32( + , + i32, + i32); + +define @intrinsic_vmulhu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv1i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv1i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vmulhu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv2i32.i32( + , + i32, + i32); + +define @intrinsic_vmulhu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv2i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv2i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vmulhu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv4i32.i32( + , + i32, + i32); + +define @intrinsic_vmulhu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv4i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv4i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vmulhu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv8i32.i32( + , + i32, + i32); + +define @intrinsic_vmulhu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv8i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv8i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vmulhu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv16i32.i32( + , + i32, + i32); + +define @intrinsic_vmulhu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv16i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv16i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vmulhu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv16i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv64.ll new file mode 100644 index 00000000000000..4561f0b390da3e --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv64.ll @@ -0,0 +1,1761 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vmulhu.nxv1i8.nxv1i8( + , + , + i64); + +define @intrinsic_vmulhu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv1i8.nxv1i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv1i8.nxv1i8( + , + , + , + , + i64); + +define @intrinsic_vmulhu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv2i8.nxv2i8( + , + , + i64); + +define @intrinsic_vmulhu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv2i8.nxv2i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv2i8.nxv2i8( + , + , + , + , + i64); + +define @intrinsic_vmulhu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv4i8.nxv4i8( + , + , + i64); + +define @intrinsic_vmulhu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv4i8.nxv4i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv4i8.nxv4i8( + , + , + , + , + i64); + +define @intrinsic_vmulhu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv8i8.nxv8i8( + , + , + i64); + +define @intrinsic_vmulhu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv8i8.nxv8i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv8i8.nxv8i8( + , + , + , + , + i64); + +define @intrinsic_vmulhu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv16i8.nxv16i8( + , + , + i64); + +define @intrinsic_vmulhu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv16i8.nxv16i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv16i8.nxv16i8( + , + , + , + , + i64); + +define @intrinsic_vmulhu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv32i8.nxv32i8( + , + , + i64); + +define @intrinsic_vmulhu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv32i8.nxv32i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv32i8.nxv32i8( + , + , + , + , + i64); + +define @intrinsic_vmulhu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv64i8.nxv64i8( + , + , + i64); + +define @intrinsic_vmulhu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vv_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv64i8.nxv64i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv64i8.nxv64i8( + , + , + , + , + i64); + +define @intrinsic_vmulhu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv64i8.nxv64i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv1i16.nxv1i16( + , + , + i64); + +define @intrinsic_vmulhu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv1i16.nxv1i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv1i16.nxv1i16( + , + , + , + , + i64); + +define @intrinsic_vmulhu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv2i16.nxv2i16( + , + , + i64); + +define @intrinsic_vmulhu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv2i16.nxv2i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv2i16.nxv2i16( + , + , + , + , + i64); + +define @intrinsic_vmulhu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv4i16.nxv4i16( + , + , + i64); + +define @intrinsic_vmulhu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv4i16.nxv4i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv4i16.nxv4i16( + , + , + , + , + i64); + +define @intrinsic_vmulhu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv8i16.nxv8i16( + , + , + i64); + +define @intrinsic_vmulhu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv8i16.nxv8i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv8i16.nxv8i16( + , + , + , + , + i64); + +define @intrinsic_vmulhu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv16i16.nxv16i16( + , + , + i64); + +define @intrinsic_vmulhu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv16i16.nxv16i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv16i16.nxv16i16( + , + , + , + , + i64); + +define @intrinsic_vmulhu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv32i16.nxv32i16( + , + , + i64); + +define @intrinsic_vmulhu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vv_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv32i16.nxv32i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv32i16.nxv32i16( + , + , + , + , + i64); + +define @intrinsic_vmulhu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv32i16.nxv32i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv1i32.nxv1i32( + , + , + i64); + +define @intrinsic_vmulhu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv1i32.nxv1i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32( + , + , + , + , + i64); + +define @intrinsic_vmulhu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv2i32.nxv2i32( + , + , + i64); + +define @intrinsic_vmulhu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv2i32.nxv2i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv2i32.nxv2i32( + , + , + , + , + i64); + +define @intrinsic_vmulhu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv2i32.nxv2i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv4i32.nxv4i32( + , + , + i64); + +define @intrinsic_vmulhu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv4i32.nxv4i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv4i32.nxv4i32( + , + , + , + , + i64); + +define @intrinsic_vmulhu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv4i32.nxv4i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv8i32.nxv8i32( + , + , + i64); + +define @intrinsic_vmulhu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv8i32.nxv8i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv8i32.nxv8i32( + , + , + , + , + i64); + +define @intrinsic_vmulhu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv8i32.nxv8i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv16i32.nxv16i32( + , + , + i64); + +define @intrinsic_vmulhu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vv_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv16i32.nxv16i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv16i32.nxv16i32( + , + , + , + , + i64); + +define @intrinsic_vmulhu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv16i32.nxv16i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv1i64.nxv1i64( + , + , + i64); + +define @intrinsic_vmulhu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vv_nxv1i64_nxv1i64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv1i64.nxv1i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64( + , + , + , + , + i64); + +define @intrinsic_vmulhu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv1i64_nxv1i64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv2i64.nxv2i64( + , + , + i64); + +define @intrinsic_vmulhu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vv_nxv2i64_nxv2i64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv2i64.nxv2i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv2i64.nxv2i64( + , + , + , + , + i64); + +define @intrinsic_vmulhu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv2i64_nxv2i64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv2i64.nxv2i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv4i64.nxv4i64( + , + , + i64); + +define @intrinsic_vmulhu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vv_nxv4i64_nxv4i64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv4i64.nxv4i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv4i64.nxv4i64( + , + , + , + , + i64); + +define @intrinsic_vmulhu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv4i64_nxv4i64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv4i64.nxv4i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv8i64.nxv8i64( + , + , + i64); + +define @intrinsic_vmulhu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vv_nxv8i64_nxv8i64_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv8i64.nxv8i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv8i64.nxv8i64( + , + , + , + , + i64); + +define @intrinsic_vmulhu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv8i64_nxv8i64_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vmulhu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv8i64.nxv8i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv1i8.i8( + , + i8, + i64); + +define @intrinsic_vmulhu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv1i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv1i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vmulhu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv2i8.i8( + , + i8, + i64); + +define @intrinsic_vmulhu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv2i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv2i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vmulhu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv4i8.i8( + , + i8, + i64); + +define @intrinsic_vmulhu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv4i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv4i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vmulhu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv8i8.i8( + , + i8, + i64); + +define @intrinsic_vmulhu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv8i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv8i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vmulhu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv16i8.i8( + , + i8, + i64); + +define @intrinsic_vmulhu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv16i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv16i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vmulhu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv32i8.i8( + , + i8, + i64); + +define @intrinsic_vmulhu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv32i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv32i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vmulhu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv64i8.i8( + , + i8, + i64); + +define @intrinsic_vmulhu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv64i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv64i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vmulhu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv64i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv1i16.i16( + , + i16, + i64); + +define @intrinsic_vmulhu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv1i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv1i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vmulhu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv2i16.i16( + , + i16, + i64); + +define @intrinsic_vmulhu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv2i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv2i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vmulhu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv4i16.i16( + , + i16, + i64); + +define @intrinsic_vmulhu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv4i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv4i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vmulhu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv8i16.i16( + , + i16, + i64); + +define @intrinsic_vmulhu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv8i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv8i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vmulhu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv16i16.i16( + , + i16, + i64); + +define @intrinsic_vmulhu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv16i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv16i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vmulhu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv32i16.i16( + , + i16, + i64); + +define @intrinsic_vmulhu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv32i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv32i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vmulhu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv32i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv1i32.i32( + , + i32, + i64); + +define @intrinsic_vmulhu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv1i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv1i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vmulhu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv2i32.i32( + , + i32, + i64); + +define @intrinsic_vmulhu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv2i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv2i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vmulhu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv4i32.i32( + , + i32, + i64); + +define @intrinsic_vmulhu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv4i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv4i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vmulhu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv8i32.i32( + , + i32, + i64); + +define @intrinsic_vmulhu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv8i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv8i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vmulhu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv16i32.i32( + , + i32, + i64); + +define @intrinsic_vmulhu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv16i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv16i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vmulhu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv16i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv1i64.i64( + , + i64, + i64); + +define @intrinsic_vmulhu_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vx_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv1i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv1i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vmulhu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv1i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv2i64.i64( + , + i64, + i64); + +define @intrinsic_vmulhu_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vx_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv2i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv2i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vmulhu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv2i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv4i64.i64( + , + i64, + i64); + +define @intrinsic_vmulhu_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vx_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv4i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv4i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vmulhu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv4i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmulhu.nxv8i64.i64( + , + i64, + i64); + +define @intrinsic_vmulhu_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_vx_nxv8i64_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmulhu.nxv8i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmulhu.mask.nxv8i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vmulhu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv8i64_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vmulhu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vmulhu.mask.nxv8i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll new file mode 100644 index 00000000000000..206bcb99a4c5f1 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll @@ -0,0 +1,1441 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vrem.nxv1i8.nxv1i8( + , + , + i32); + +define @intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vrem.nxv1i8.nxv1i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv1i8.nxv1i8( + , + , + , + , + i32); + +define @intrinsic_vrem_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv2i8.nxv2i8( + , + , + i32); + +define @intrinsic_vrem_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vrem.nxv2i8.nxv2i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv2i8.nxv2i8( + , + , + , + , + i32); + +define @intrinsic_vrem_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv4i8.nxv4i8( + , + , + i32); + +define @intrinsic_vrem_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vrem.nxv4i8.nxv4i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv4i8.nxv4i8( + , + , + , + , + i32); + +define @intrinsic_vrem_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv8i8.nxv8i8( + , + , + i32); + +define @intrinsic_vrem_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vrem.nxv8i8.nxv8i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv8i8.nxv8i8( + , + , + , + , + i32); + +define @intrinsic_vrem_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv16i8.nxv16i8( + , + , + i32); + +define @intrinsic_vrem_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vrem.nxv16i8.nxv16i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv16i8.nxv16i8( + , + , + , + , + i32); + +define @intrinsic_vrem_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv32i8.nxv32i8( + , + , + i32); + +define @intrinsic_vrem_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vrem.nxv32i8.nxv32i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv32i8.nxv32i8( + , + , + , + , + i32); + +define @intrinsic_vrem_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv64i8.nxv64i8( + , + , + i32); + +define @intrinsic_vrem_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vv_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vrem.nxv64i8.nxv64i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv64i8.nxv64i8( + , + , + , + , + i32); + +define @intrinsic_vrem_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv64i8.nxv64i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv1i16.nxv1i16( + , + , + i32); + +define @intrinsic_vrem_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vrem.nxv1i16.nxv1i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv1i16.nxv1i16( + , + , + , + , + i32); + +define @intrinsic_vrem_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv2i16.nxv2i16( + , + , + i32); + +define @intrinsic_vrem_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vrem.nxv2i16.nxv2i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv2i16.nxv2i16( + , + , + , + , + i32); + +define @intrinsic_vrem_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv4i16.nxv4i16( + , + , + i32); + +define @intrinsic_vrem_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vrem.nxv4i16.nxv4i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv4i16.nxv4i16( + , + , + , + , + i32); + +define @intrinsic_vrem_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv8i16.nxv8i16( + , + , + i32); + +define @intrinsic_vrem_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vrem.nxv8i16.nxv8i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv8i16.nxv8i16( + , + , + , + , + i32); + +define @intrinsic_vrem_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv16i16.nxv16i16( + , + , + i32); + +define @intrinsic_vrem_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vrem.nxv16i16.nxv16i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv16i16.nxv16i16( + , + , + , + , + i32); + +define @intrinsic_vrem_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv32i16.nxv32i16( + , + , + i32); + +define @intrinsic_vrem_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vv_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vrem.nxv32i16.nxv32i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv32i16.nxv32i16( + , + , + , + , + i32); + +define @intrinsic_vrem_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv32i16.nxv32i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv1i32.nxv1i32( + , + , + i32); + +define @intrinsic_vrem_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vrem.nxv1i32.nxv1i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv1i32.nxv1i32( + , + , + , + , + i32); + +define @intrinsic_vrem_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv1i32.nxv1i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv2i32.nxv2i32( + , + , + i32); + +define @intrinsic_vrem_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vrem.nxv2i32.nxv2i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv2i32.nxv2i32( + , + , + , + , + i32); + +define @intrinsic_vrem_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv2i32.nxv2i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv4i32.nxv4i32( + , + , + i32); + +define @intrinsic_vrem_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vrem.nxv4i32.nxv4i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv4i32.nxv4i32( + , + , + , + , + i32); + +define @intrinsic_vrem_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv4i32.nxv4i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv8i32.nxv8i32( + , + , + i32); + +define @intrinsic_vrem_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vrem.nxv8i32.nxv8i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv8i32.nxv8i32( + , + , + , + , + i32); + +define @intrinsic_vrem_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv8i32.nxv8i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv16i32.nxv16i32( + , + , + i32); + +define @intrinsic_vrem_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vv_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vrem.nxv16i32.nxv16i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv16i32.nxv16i32( + , + , + , + , + i32); + +define @intrinsic_vrem_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv16i32.nxv16i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv1i8.i8( + , + i8, + i32); + +define @intrinsic_vrem_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrem.nxv1i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv1i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vrem_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv2i8.i8( + , + i8, + i32); + +define @intrinsic_vrem_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrem.nxv2i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv2i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vrem_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv4i8.i8( + , + i8, + i32); + +define @intrinsic_vrem_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrem.nxv4i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv4i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vrem_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv8i8.i8( + , + i8, + i32); + +define @intrinsic_vrem_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrem.nxv8i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv8i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vrem_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv16i8.i8( + , + i8, + i32); + +define @intrinsic_vrem_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrem.nxv16i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv16i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vrem_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv32i8.i8( + , + i8, + i32); + +define @intrinsic_vrem_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrem.nxv32i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv32i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vrem_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv64i8.i8( + , + i8, + i32); + +define @intrinsic_vrem_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrem.nxv64i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv64i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vrem_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv64i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv1i16.i16( + , + i16, + i32); + +define @intrinsic_vrem_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrem.nxv1i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv1i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vrem_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv2i16.i16( + , + i16, + i32); + +define @intrinsic_vrem_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrem.nxv2i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv2i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vrem_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv4i16.i16( + , + i16, + i32); + +define @intrinsic_vrem_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrem.nxv4i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv4i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vrem_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv8i16.i16( + , + i16, + i32); + +define @intrinsic_vrem_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrem.nxv8i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv8i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vrem_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv16i16.i16( + , + i16, + i32); + +define @intrinsic_vrem_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrem.nxv16i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv16i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vrem_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv32i16.i16( + , + i16, + i32); + +define @intrinsic_vrem_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrem.nxv32i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv32i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vrem_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv32i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv1i32.i32( + , + i32, + i32); + +define @intrinsic_vrem_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrem.nxv1i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv1i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vrem_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv2i32.i32( + , + i32, + i32); + +define @intrinsic_vrem_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrem.nxv2i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv2i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vrem_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv4i32.i32( + , + i32, + i32); + +define @intrinsic_vrem_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrem.nxv4i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv4i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vrem_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv8i32.i32( + , + i32, + i32); + +define @intrinsic_vrem_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrem.nxv8i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv8i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vrem_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv16i32.i32( + , + i32, + i32); + +define @intrinsic_vrem_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrem.nxv16i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv16i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vrem_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv16i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll new file mode 100644 index 00000000000000..85a7a13a1619f1 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll @@ -0,0 +1,1761 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vrem.nxv1i8.nxv1i8( + , + , + i64); + +define @intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vrem.nxv1i8.nxv1i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv1i8.nxv1i8( + , + , + , + , + i64); + +define @intrinsic_vrem_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv2i8.nxv2i8( + , + , + i64); + +define @intrinsic_vrem_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vrem.nxv2i8.nxv2i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv2i8.nxv2i8( + , + , + , + , + i64); + +define @intrinsic_vrem_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv4i8.nxv4i8( + , + , + i64); + +define @intrinsic_vrem_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vrem.nxv4i8.nxv4i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv4i8.nxv4i8( + , + , + , + , + i64); + +define @intrinsic_vrem_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv8i8.nxv8i8( + , + , + i64); + +define @intrinsic_vrem_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vrem.nxv8i8.nxv8i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv8i8.nxv8i8( + , + , + , + , + i64); + +define @intrinsic_vrem_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv16i8.nxv16i8( + , + , + i64); + +define @intrinsic_vrem_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vrem.nxv16i8.nxv16i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv16i8.nxv16i8( + , + , + , + , + i64); + +define @intrinsic_vrem_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv32i8.nxv32i8( + , + , + i64); + +define @intrinsic_vrem_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vrem.nxv32i8.nxv32i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv32i8.nxv32i8( + , + , + , + , + i64); + +define @intrinsic_vrem_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv64i8.nxv64i8( + , + , + i64); + +define @intrinsic_vrem_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vv_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vrem.nxv64i8.nxv64i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv64i8.nxv64i8( + , + , + , + , + i64); + +define @intrinsic_vrem_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv64i8.nxv64i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv1i16.nxv1i16( + , + , + i64); + +define @intrinsic_vrem_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vrem.nxv1i16.nxv1i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv1i16.nxv1i16( + , + , + , + , + i64); + +define @intrinsic_vrem_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv2i16.nxv2i16( + , + , + i64); + +define @intrinsic_vrem_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vrem.nxv2i16.nxv2i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv2i16.nxv2i16( + , + , + , + , + i64); + +define @intrinsic_vrem_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv4i16.nxv4i16( + , + , + i64); + +define @intrinsic_vrem_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vrem.nxv4i16.nxv4i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv4i16.nxv4i16( + , + , + , + , + i64); + +define @intrinsic_vrem_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv8i16.nxv8i16( + , + , + i64); + +define @intrinsic_vrem_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vrem.nxv8i16.nxv8i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv8i16.nxv8i16( + , + , + , + , + i64); + +define @intrinsic_vrem_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv16i16.nxv16i16( + , + , + i64); + +define @intrinsic_vrem_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vrem.nxv16i16.nxv16i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv16i16.nxv16i16( + , + , + , + , + i64); + +define @intrinsic_vrem_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv32i16.nxv32i16( + , + , + i64); + +define @intrinsic_vrem_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vv_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vrem.nxv32i16.nxv32i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv32i16.nxv32i16( + , + , + , + , + i64); + +define @intrinsic_vrem_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv32i16.nxv32i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv1i32.nxv1i32( + , + , + i64); + +define @intrinsic_vrem_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vrem.nxv1i32.nxv1i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv1i32.nxv1i32( + , + , + , + , + i64); + +define @intrinsic_vrem_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv1i32.nxv1i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv2i32.nxv2i32( + , + , + i64); + +define @intrinsic_vrem_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vrem.nxv2i32.nxv2i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv2i32.nxv2i32( + , + , + , + , + i64); + +define @intrinsic_vrem_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv2i32.nxv2i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv4i32.nxv4i32( + , + , + i64); + +define @intrinsic_vrem_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vrem.nxv4i32.nxv4i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv4i32.nxv4i32( + , + , + , + , + i64); + +define @intrinsic_vrem_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv4i32.nxv4i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv8i32.nxv8i32( + , + , + i64); + +define @intrinsic_vrem_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vrem.nxv8i32.nxv8i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv8i32.nxv8i32( + , + , + , + , + i64); + +define @intrinsic_vrem_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv8i32.nxv8i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv16i32.nxv16i32( + , + , + i64); + +define @intrinsic_vrem_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vv_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vrem.nxv16i32.nxv16i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv16i32.nxv16i32( + , + , + , + , + i64); + +define @intrinsic_vrem_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv16i32.nxv16i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv1i64.nxv1i64( + , + , + i64); + +define @intrinsic_vrem_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vv_nxv1i64_nxv1i64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vrem.nxv1i64.nxv1i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv1i64.nxv1i64( + , + , + , + , + i64); + +define @intrinsic_vrem_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i64_nxv1i64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv1i64.nxv1i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv2i64.nxv2i64( + , + , + i64); + +define @intrinsic_vrem_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vv_nxv2i64_nxv2i64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vrem.nxv2i64.nxv2i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv2i64.nxv2i64( + , + , + , + , + i64); + +define @intrinsic_vrem_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i64_nxv2i64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv2i64.nxv2i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv4i64.nxv4i64( + , + , + i64); + +define @intrinsic_vrem_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vv_nxv4i64_nxv4i64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vrem.nxv4i64.nxv4i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv4i64.nxv4i64( + , + , + , + , + i64); + +define @intrinsic_vrem_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i64_nxv4i64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv4i64.nxv4i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv8i64.nxv8i64( + , + , + i64); + +define @intrinsic_vrem_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vv_nxv8i64_nxv8i64_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vrem.nxv8i64.nxv8i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv8i64.nxv8i64( + , + , + , + , + i64); + +define @intrinsic_vrem_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i64_nxv8i64_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vrem.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv8i64.nxv8i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv1i8.i8( + , + i8, + i64); + +define @intrinsic_vrem_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrem.nxv1i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv1i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vrem_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv2i8.i8( + , + i8, + i64); + +define @intrinsic_vrem_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrem.nxv2i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv2i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vrem_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv4i8.i8( + , + i8, + i64); + +define @intrinsic_vrem_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrem.nxv4i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv4i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vrem_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv8i8.i8( + , + i8, + i64); + +define @intrinsic_vrem_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrem.nxv8i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv8i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vrem_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv16i8.i8( + , + i8, + i64); + +define @intrinsic_vrem_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrem.nxv16i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv16i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vrem_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv32i8.i8( + , + i8, + i64); + +define @intrinsic_vrem_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrem.nxv32i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv32i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vrem_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv64i8.i8( + , + i8, + i64); + +define @intrinsic_vrem_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrem.nxv64i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv64i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vrem_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv64i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv1i16.i16( + , + i16, + i64); + +define @intrinsic_vrem_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrem.nxv1i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv1i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vrem_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv2i16.i16( + , + i16, + i64); + +define @intrinsic_vrem_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrem.nxv2i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv2i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vrem_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv4i16.i16( + , + i16, + i64); + +define @intrinsic_vrem_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrem.nxv4i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv4i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vrem_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv8i16.i16( + , + i16, + i64); + +define @intrinsic_vrem_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrem.nxv8i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv8i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vrem_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv16i16.i16( + , + i16, + i64); + +define @intrinsic_vrem_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrem.nxv16i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv16i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vrem_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv32i16.i16( + , + i16, + i64); + +define @intrinsic_vrem_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrem.nxv32i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv32i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vrem_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv32i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv1i32.i32( + , + i32, + i64); + +define @intrinsic_vrem_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrem.nxv1i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv1i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vrem_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv2i32.i32( + , + i32, + i64); + +define @intrinsic_vrem_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrem.nxv2i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv2i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vrem_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv4i32.i32( + , + i32, + i64); + +define @intrinsic_vrem_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrem.nxv4i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv4i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vrem_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv8i32.i32( + , + i32, + i64); + +define @intrinsic_vrem_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrem.nxv8i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv8i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vrem_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv16i32.i32( + , + i32, + i64); + +define @intrinsic_vrem_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrem.nxv16i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv16i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vrem_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv16i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv1i64.i64( + , + i64, + i64); + +define @intrinsic_vrem_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vx_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrem.nxv1i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv1i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vrem_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv1i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv2i64.i64( + , + i64, + i64); + +define @intrinsic_vrem_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vx_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrem.nxv2i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv2i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vrem_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv2i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv4i64.i64( + , + i64, + i64); + +define @intrinsic_vrem_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vx_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrem.nxv4i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv4i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vrem_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv4i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vrem.nxv8i64.i64( + , + i64, + i64); + +define @intrinsic_vrem_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_vx_nxv8i64_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vrem.nxv8i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vrem.mask.nxv8i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vrem_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv8i64_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vrem.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vrem.mask.nxv8i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll new file mode 100644 index 00000000000000..e5156293f5deef --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll @@ -0,0 +1,1441 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vremu.nxv1i8.nxv1i8( + , + , + i32); + +define @intrinsic_vremu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vremu.nxv1i8.nxv1i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv1i8.nxv1i8( + , + , + , + , + i32); + +define @intrinsic_vremu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv2i8.nxv2i8( + , + , + i32); + +define @intrinsic_vremu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vremu.nxv2i8.nxv2i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv2i8.nxv2i8( + , + , + , + , + i32); + +define @intrinsic_vremu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv4i8.nxv4i8( + , + , + i32); + +define @intrinsic_vremu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vremu.nxv4i8.nxv4i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv4i8.nxv4i8( + , + , + , + , + i32); + +define @intrinsic_vremu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv8i8.nxv8i8( + , + , + i32); + +define @intrinsic_vremu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vremu.nxv8i8.nxv8i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv8i8.nxv8i8( + , + , + , + , + i32); + +define @intrinsic_vremu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv16i8.nxv16i8( + , + , + i32); + +define @intrinsic_vremu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vremu.nxv16i8.nxv16i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv16i8.nxv16i8( + , + , + , + , + i32); + +define @intrinsic_vremu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv32i8.nxv32i8( + , + , + i32); + +define @intrinsic_vremu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vremu.nxv32i8.nxv32i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv32i8.nxv32i8( + , + , + , + , + i32); + +define @intrinsic_vremu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv64i8.nxv64i8( + , + , + i32); + +define @intrinsic_vremu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vv_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vremu.nxv64i8.nxv64i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv64i8.nxv64i8( + , + , + , + , + i32); + +define @intrinsic_vremu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv64i8.nxv64i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv1i16.nxv1i16( + , + , + i32); + +define @intrinsic_vremu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vremu.nxv1i16.nxv1i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv1i16.nxv1i16( + , + , + , + , + i32); + +define @intrinsic_vremu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv2i16.nxv2i16( + , + , + i32); + +define @intrinsic_vremu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vremu.nxv2i16.nxv2i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv2i16.nxv2i16( + , + , + , + , + i32); + +define @intrinsic_vremu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv4i16.nxv4i16( + , + , + i32); + +define @intrinsic_vremu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vremu.nxv4i16.nxv4i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv4i16.nxv4i16( + , + , + , + , + i32); + +define @intrinsic_vremu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv8i16.nxv8i16( + , + , + i32); + +define @intrinsic_vremu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vremu.nxv8i16.nxv8i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv8i16.nxv8i16( + , + , + , + , + i32); + +define @intrinsic_vremu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv16i16.nxv16i16( + , + , + i32); + +define @intrinsic_vremu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vremu.nxv16i16.nxv16i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv16i16.nxv16i16( + , + , + , + , + i32); + +define @intrinsic_vremu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv32i16.nxv32i16( + , + , + i32); + +define @intrinsic_vremu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vv_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vremu.nxv32i16.nxv32i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv32i16.nxv32i16( + , + , + , + , + i32); + +define @intrinsic_vremu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv32i16.nxv32i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv1i32.nxv1i32( + , + , + i32); + +define @intrinsic_vremu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vremu.nxv1i32.nxv1i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv1i32.nxv1i32( + , + , + , + , + i32); + +define @intrinsic_vremu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv1i32.nxv1i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv2i32.nxv2i32( + , + , + i32); + +define @intrinsic_vremu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vremu.nxv2i32.nxv2i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv2i32.nxv2i32( + , + , + , + , + i32); + +define @intrinsic_vremu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv2i32.nxv2i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv4i32.nxv4i32( + , + , + i32); + +define @intrinsic_vremu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vremu.nxv4i32.nxv4i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv4i32.nxv4i32( + , + , + , + , + i32); + +define @intrinsic_vremu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv4i32.nxv4i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv8i32.nxv8i32( + , + , + i32); + +define @intrinsic_vremu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vremu.nxv8i32.nxv8i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv8i32.nxv8i32( + , + , + , + , + i32); + +define @intrinsic_vremu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv8i32.nxv8i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv16i32.nxv16i32( + , + , + i32); + +define @intrinsic_vremu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vv_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vremu.nxv16i32.nxv16i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv16i32.nxv16i32( + , + , + , + , + i32); + +define @intrinsic_vremu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv16i32.nxv16i32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv1i8.i8( + , + i8, + i32); + +define @intrinsic_vremu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vremu.nxv1i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv1i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vremu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv2i8.i8( + , + i8, + i32); + +define @intrinsic_vremu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vremu.nxv2i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv2i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vremu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv4i8.i8( + , + i8, + i32); + +define @intrinsic_vremu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vremu.nxv4i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv4i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vremu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv8i8.i8( + , + i8, + i32); + +define @intrinsic_vremu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vremu.nxv8i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv8i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vremu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv16i8.i8( + , + i8, + i32); + +define @intrinsic_vremu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vremu.nxv16i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv16i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vremu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv32i8.i8( + , + i8, + i32); + +define @intrinsic_vremu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vremu.nxv32i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv32i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vremu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv64i8.i8( + , + i8, + i32); + +define @intrinsic_vremu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vremu.nxv64i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv64i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vremu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv64i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv1i16.i16( + , + i16, + i32); + +define @intrinsic_vremu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vremu.nxv1i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv1i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vremu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv2i16.i16( + , + i16, + i32); + +define @intrinsic_vremu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vremu.nxv2i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv2i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vremu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv4i16.i16( + , + i16, + i32); + +define @intrinsic_vremu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vremu.nxv4i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv4i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vremu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv8i16.i16( + , + i16, + i32); + +define @intrinsic_vremu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vremu.nxv8i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv8i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vremu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv16i16.i16( + , + i16, + i32); + +define @intrinsic_vremu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vremu.nxv16i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv16i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vremu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv32i16.i16( + , + i16, + i32); + +define @intrinsic_vremu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vremu.nxv32i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv32i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vremu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv32i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv1i32.i32( + , + i32, + i32); + +define @intrinsic_vremu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vremu.nxv1i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv1i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vremu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv2i32.i32( + , + i32, + i32); + +define @intrinsic_vremu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vremu.nxv2i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv2i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vremu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv4i32.i32( + , + i32, + i32); + +define @intrinsic_vremu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vremu.nxv4i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv4i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vremu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv8i32.i32( + , + i32, + i32); + +define @intrinsic_vremu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vremu.nxv8i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv8i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vremu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv16i32.i32( + , + i32, + i32); + +define @intrinsic_vremu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vremu.nxv16i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv16i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vremu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv16i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll new file mode 100644 index 00000000000000..b946328c60fafd --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll @@ -0,0 +1,1761 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vremu.nxv1i8.nxv1i8( + , + , + i64); + +define @intrinsic_vremu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vremu.nxv1i8.nxv1i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv1i8.nxv1i8( + , + , + , + , + i64); + +define @intrinsic_vremu_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv2i8.nxv2i8( + , + , + i64); + +define @intrinsic_vremu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vremu.nxv2i8.nxv2i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv2i8.nxv2i8( + , + , + , + , + i64); + +define @intrinsic_vremu_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv4i8.nxv4i8( + , + , + i64); + +define @intrinsic_vremu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vremu.nxv4i8.nxv4i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv4i8.nxv4i8( + , + , + , + , + i64); + +define @intrinsic_vremu_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv8i8.nxv8i8( + , + , + i64); + +define @intrinsic_vremu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vremu.nxv8i8.nxv8i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv8i8.nxv8i8( + , + , + , + , + i64); + +define @intrinsic_vremu_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv16i8.nxv16i8( + , + , + i64); + +define @intrinsic_vremu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vremu.nxv16i8.nxv16i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv16i8.nxv16i8( + , + , + , + , + i64); + +define @intrinsic_vremu_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv32i8.nxv32i8( + , + , + i64); + +define @intrinsic_vremu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vremu.nxv32i8.nxv32i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv32i8.nxv32i8( + , + , + , + , + i64); + +define @intrinsic_vremu_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv64i8.nxv64i8( + , + , + i64); + +define @intrinsic_vremu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vv_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vremu.nxv64i8.nxv64i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv64i8.nxv64i8( + , + , + , + , + i64); + +define @intrinsic_vremu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv64i8.nxv64i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv1i16.nxv1i16( + , + , + i64); + +define @intrinsic_vremu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vremu.nxv1i16.nxv1i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv1i16.nxv1i16( + , + , + , + , + i64); + +define @intrinsic_vremu_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv2i16.nxv2i16( + , + , + i64); + +define @intrinsic_vremu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vremu.nxv2i16.nxv2i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv2i16.nxv2i16( + , + , + , + , + i64); + +define @intrinsic_vremu_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv4i16.nxv4i16( + , + , + i64); + +define @intrinsic_vremu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vremu.nxv4i16.nxv4i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv4i16.nxv4i16( + , + , + , + , + i64); + +define @intrinsic_vremu_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv8i16.nxv8i16( + , + , + i64); + +define @intrinsic_vremu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vremu.nxv8i16.nxv8i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv8i16.nxv8i16( + , + , + , + , + i64); + +define @intrinsic_vremu_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv16i16.nxv16i16( + , + , + i64); + +define @intrinsic_vremu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vremu.nxv16i16.nxv16i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv16i16.nxv16i16( + , + , + , + , + i64); + +define @intrinsic_vremu_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv32i16.nxv32i16( + , + , + i64); + +define @intrinsic_vremu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vv_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vremu.nxv32i16.nxv32i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv32i16.nxv32i16( + , + , + , + , + i64); + +define @intrinsic_vremu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv32i16.nxv32i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv1i32.nxv1i32( + , + , + i64); + +define @intrinsic_vremu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vremu.nxv1i32.nxv1i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv1i32.nxv1i32( + , + , + , + , + i64); + +define @intrinsic_vremu_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv1i32.nxv1i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv2i32.nxv2i32( + , + , + i64); + +define @intrinsic_vremu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vremu.nxv2i32.nxv2i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv2i32.nxv2i32( + , + , + , + , + i64); + +define @intrinsic_vremu_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv2i32.nxv2i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv4i32.nxv4i32( + , + , + i64); + +define @intrinsic_vremu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vremu.nxv4i32.nxv4i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv4i32.nxv4i32( + , + , + , + , + i64); + +define @intrinsic_vremu_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv4i32.nxv4i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv8i32.nxv8i32( + , + , + i64); + +define @intrinsic_vremu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vremu.nxv8i32.nxv8i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv8i32.nxv8i32( + , + , + , + , + i64); + +define @intrinsic_vremu_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv8i32.nxv8i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv16i32.nxv16i32( + , + , + i64); + +define @intrinsic_vremu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vv_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vremu.nxv16i32.nxv16i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv16i32.nxv16i32( + , + , + , + , + i64); + +define @intrinsic_vremu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv16i32.nxv16i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv1i64.nxv1i64( + , + , + i64); + +define @intrinsic_vremu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vv_nxv1i64_nxv1i64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vremu.nxv1i64.nxv1i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv1i64.nxv1i64( + , + , + , + , + i64); + +define @intrinsic_vremu_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i64_nxv1i64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv1i64.nxv1i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv2i64.nxv2i64( + , + , + i64); + +define @intrinsic_vremu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vv_nxv2i64_nxv2i64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vremu.nxv2i64.nxv2i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv2i64.nxv2i64( + , + , + , + , + i64); + +define @intrinsic_vremu_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i64_nxv2i64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv2i64.nxv2i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv4i64.nxv4i64( + , + , + i64); + +define @intrinsic_vremu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vv_nxv4i64_nxv4i64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vremu.nxv4i64.nxv4i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv4i64.nxv4i64( + , + , + , + , + i64); + +define @intrinsic_vremu_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i64_nxv4i64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv4i64.nxv4i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv8i64.nxv8i64( + , + , + i64); + +define @intrinsic_vremu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vv_nxv8i64_nxv8i64_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vremu.nxv8i64.nxv8i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv8i64.nxv8i64( + , + , + , + , + i64); + +define @intrinsic_vremu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i64_nxv8i64_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vremu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv8i64.nxv8i64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv1i8.i8( + , + i8, + i64); + +define @intrinsic_vremu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vremu.nxv1i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv1i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vremu_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv2i8.i8( + , + i8, + i64); + +define @intrinsic_vremu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vremu.nxv2i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv2i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vremu_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv4i8.i8( + , + i8, + i64); + +define @intrinsic_vremu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vremu.nxv4i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv4i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vremu_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv8i8.i8( + , + i8, + i64); + +define @intrinsic_vremu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vremu.nxv8i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv8i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vremu_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv16i8.i8( + , + i8, + i64); + +define @intrinsic_vremu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vremu.nxv16i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv16i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vremu_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv32i8.i8( + , + i8, + i64); + +define @intrinsic_vremu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vremu.nxv32i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv32i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vremu_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv64i8.i8( + , + i8, + i64); + +define @intrinsic_vremu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vremu.nxv64i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv64i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vremu_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv64i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv1i16.i16( + , + i16, + i64); + +define @intrinsic_vremu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vremu.nxv1i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv1i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vremu_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv2i16.i16( + , + i16, + i64); + +define @intrinsic_vremu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vremu.nxv2i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv2i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vremu_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv4i16.i16( + , + i16, + i64); + +define @intrinsic_vremu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vremu.nxv4i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv4i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vremu_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv8i16.i16( + , + i16, + i64); + +define @intrinsic_vremu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vremu.nxv8i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv8i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vremu_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv16i16.i16( + , + i16, + i64); + +define @intrinsic_vremu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vremu.nxv16i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv16i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vremu_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv32i16.i16( + , + i16, + i64); + +define @intrinsic_vremu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vremu.nxv32i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv32i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vremu_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv32i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv1i32.i32( + , + i32, + i64); + +define @intrinsic_vremu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vremu.nxv1i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv1i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vremu_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv2i32.i32( + , + i32, + i64); + +define @intrinsic_vremu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vremu.nxv2i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv2i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vremu_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv4i32.i32( + , + i32, + i64); + +define @intrinsic_vremu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vremu.nxv4i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv4i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vremu_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv8i32.i32( + , + i32, + i64); + +define @intrinsic_vremu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vremu.nxv8i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv8i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vremu_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv16i32.i32( + , + i32, + i64); + +define @intrinsic_vremu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vremu.nxv16i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv16i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vremu_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv16i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv1i64.i64( + , + i64, + i64); + +define @intrinsic_vremu_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vx_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vremu.nxv1i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv1i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vremu_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv1i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv2i64.i64( + , + i64, + i64); + +define @intrinsic_vremu_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vx_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vremu.nxv2i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv2i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vremu_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv2i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv4i64.i64( + , + i64, + i64); + +define @intrinsic_vremu_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vx_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vremu.nxv4i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv4i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vremu_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv4i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vremu.nxv8i64.i64( + , + i64, + i64); + +define @intrinsic_vremu_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_vx_nxv8i64_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vremu.nxv8i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vremu.mask.nxv8i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vremu_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv8i64_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vremu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vremu.mask.nxv8i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vse-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vse-rv32.ll index f9d8f9557c54a7..1c13fb069f763b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vse-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vse-rv32.ll @@ -1,4 +1,5 @@ -; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -mattr=+experimental-zfh \ +; RUN: -mattr=+f -verify-machineinstrs \ ; RUN: --riscv-no-aliases < %s | FileCheck %s declare void @llvm.riscv.vse.nxv1i32( , diff --git a/llvm/test/CodeGen/RISCV/rvv/vse-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vse-rv64.ll index e49eed85d5b01a..9e74dd07a38f21 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vse-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vse-rv64.ll @@ -1,4 +1,5 @@ -; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -mattr=+experimental-zfh \ +; RUN: -mattr=+d -verify-machineinstrs \ ; RUN: --riscv-no-aliases < %s | FileCheck %s declare void @llvm.riscv.vse.nxv1i64( , diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmul-rv32.ll new file mode 100644 index 00000000000000..57f516d9ef1543 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vwmul-rv32.ll @@ -0,0 +1,881 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8( + , + , + i32); + +define @intrinsic_vwmul_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vv_nxv1i16_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8( + , + , + , + , + i32); + +define @intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8( + , + , + i32); + +define @intrinsic_vwmul_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vv_nxv2i16_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.nxv2i8( + , + , + , + , + i32); + +define @intrinsic_vwmul_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv2i16_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8( + , + , + i32); + +define @intrinsic_vwmul_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vv_nxv4i16_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.nxv4i8( + , + , + , + , + i32); + +define @intrinsic_vwmul_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv4i16_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8( + , + , + i32); + +define @intrinsic_vwmul_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vv_nxv8i16_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.nxv8i8( + , + , + , + , + i32); + +define @intrinsic_vwmul_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv8i16_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8( + , + , + i32); + +define @intrinsic_vwmul_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vv_nxv16i16_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.nxv16i8( + , + , + , + , + i32); + +define @intrinsic_vwmul_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv16i16_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8( + , + , + i32); + +define @intrinsic_vwmul_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vv_nxv32i16_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.nxv32i8( + , + , + , + , + i32); + +define @intrinsic_vwmul_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv32i16_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16( + , + , + i32); + +define @intrinsic_vwmul_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vv_nxv1i32_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.nxv1i16( + , + , + , + , + i32); + +define @intrinsic_vwmul_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i32_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16( + , + , + i32); + +define @intrinsic_vwmul_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vv_nxv2i32_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.nxv2i16( + , + , + , + , + i32); + +define @intrinsic_vwmul_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv2i32_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16( + , + , + i32); + +define @intrinsic_vwmul_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vv_nxv4i32_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.nxv4i16( + , + , + , + , + i32); + +define @intrinsic_vwmul_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv4i32_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16( + , + , + i32); + +define @intrinsic_vwmul_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vv_nxv8i32_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.nxv8i16( + , + , + , + , + i32); + +define @intrinsic_vwmul_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv8i32_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16( + , + , + i32); + +define @intrinsic_vwmul_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vv_nxv16i32_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.nxv16i16( + , + , + , + , + i32); + +define @intrinsic_vwmul_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv16i32_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8( + , + i8, + i32); + +define @intrinsic_vwmul_vx_nxv1i16_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vx_nxv1i16_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwmul_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv1i16_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8( + , + i8, + i32); + +define @intrinsic_vwmul_vx_nxv2i16_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vx_nxv2i16_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwmul_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv2i16_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8( + , + i8, + i32); + +define @intrinsic_vwmul_vx_nxv4i16_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vx_nxv4i16_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwmul_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv4i16_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8( + , + i8, + i32); + +define @intrinsic_vwmul_vx_nxv8i16_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vx_nxv8i16_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwmul_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv8i16_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8( + , + i8, + i32); + +define @intrinsic_vwmul_vx_nxv16i16_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vx_nxv16i16_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwmul_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv16i16_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8( + , + i8, + i32); + +define @intrinsic_vwmul_vx_nxv32i16_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vx_nxv32i16_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwmul_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv32i16_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16( + , + i16, + i32); + +define @intrinsic_vwmul_vx_nxv1i32_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vx_nxv1i32_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwmul_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv1i32_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16( + , + i16, + i32); + +define @intrinsic_vwmul_vx_nxv2i32_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vx_nxv2i32_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwmul_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv2i32_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16( + , + i16, + i32); + +define @intrinsic_vwmul_vx_nxv4i32_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vx_nxv4i32_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwmul_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv4i32_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16( + , + i16, + i32); + +define @intrinsic_vwmul_vx_nxv8i32_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vx_nxv8i32_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwmul_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv8i32_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16( + , + i16, + i32); + +define @intrinsic_vwmul_vx_nxv16i32_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vx_nxv16i32_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwmul_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv16i32_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmul-rv64.ll new file mode 100644 index 00000000000000..d361814644ccfd --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vwmul-rv64.ll @@ -0,0 +1,1201 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8( + , + , + i64); + +define @intrinsic_vwmul_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vv_nxv1i16_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8( + , + , + , + , + i64); + +define @intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8( + , + , + i64); + +define @intrinsic_vwmul_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vv_nxv2i16_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.nxv2i8( + , + , + , + , + i64); + +define @intrinsic_vwmul_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv2i16_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8( + , + , + i64); + +define @intrinsic_vwmul_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vv_nxv4i16_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.nxv4i8( + , + , + , + , + i64); + +define @intrinsic_vwmul_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv4i16_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8( + , + , + i64); + +define @intrinsic_vwmul_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vv_nxv8i16_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.nxv8i8( + , + , + , + , + i64); + +define @intrinsic_vwmul_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv8i16_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8( + , + , + i64); + +define @intrinsic_vwmul_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vv_nxv16i16_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.nxv16i8( + , + , + , + , + i64); + +define @intrinsic_vwmul_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv16i16_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8( + , + , + i64); + +define @intrinsic_vwmul_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vv_nxv32i16_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.nxv32i8( + , + , + , + , + i64); + +define @intrinsic_vwmul_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv32i16_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16( + , + , + i64); + +define @intrinsic_vwmul_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vv_nxv1i32_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.nxv1i16( + , + , + , + , + i64); + +define @intrinsic_vwmul_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i32_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16( + , + , + i64); + +define @intrinsic_vwmul_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vv_nxv2i32_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.nxv2i16( + , + , + , + , + i64); + +define @intrinsic_vwmul_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv2i32_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16( + , + , + i64); + +define @intrinsic_vwmul_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vv_nxv4i32_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.nxv4i16( + , + , + , + , + i64); + +define @intrinsic_vwmul_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv4i32_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16( + , + , + i64); + +define @intrinsic_vwmul_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vv_nxv8i32_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.nxv8i16( + , + , + , + , + i64); + +define @intrinsic_vwmul_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv8i32_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16( + , + , + i64); + +define @intrinsic_vwmul_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vv_nxv16i32_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.nxv16i16( + , + , + , + , + i64); + +define @intrinsic_vwmul_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv16i32_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32( + , + , + i64); + +define @intrinsic_vwmul_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vv_nxv1i64_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32( + , + , + , + , + i64); + +define @intrinsic_vwmul_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i64_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32( + , + , + i64); + +define @intrinsic_vwmul_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vv_nxv2i64_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.nxv2i32( + , + , + , + , + i64); + +define @intrinsic_vwmul_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv2i64_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.nxv2i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32( + , + , + i64); + +define @intrinsic_vwmul_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vv_nxv4i64_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.nxv4i32( + , + , + , + , + i64); + +define @intrinsic_vwmul_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv4i64_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.nxv4i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32( + , + , + i64); + +define @intrinsic_vwmul_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vv_nxv8i64_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.nxv8i32( + , + , + , + , + i64); + +define @intrinsic_vwmul_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv8i64_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.nxv8i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8( + , + i8, + i64); + +define @intrinsic_vwmul_vx_nxv1i16_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vx_nxv1i16_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwmul_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv1i16_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8( + , + i8, + i64); + +define @intrinsic_vwmul_vx_nxv2i16_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vx_nxv2i16_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwmul_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv2i16_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8( + , + i8, + i64); + +define @intrinsic_vwmul_vx_nxv4i16_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vx_nxv4i16_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwmul_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv4i16_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8( + , + i8, + i64); + +define @intrinsic_vwmul_vx_nxv8i16_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vx_nxv8i16_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwmul_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv8i16_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8( + , + i8, + i64); + +define @intrinsic_vwmul_vx_nxv16i16_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vx_nxv16i16_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwmul_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv16i16_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8( + , + i8, + i64); + +define @intrinsic_vwmul_vx_nxv32i16_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vx_nxv32i16_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwmul_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv32i16_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16( + , + i16, + i64); + +define @intrinsic_vwmul_vx_nxv1i32_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vx_nxv1i32_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwmul_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv1i32_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16( + , + i16, + i64); + +define @intrinsic_vwmul_vx_nxv2i32_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vx_nxv2i32_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwmul_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv2i32_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16( + , + i16, + i64); + +define @intrinsic_vwmul_vx_nxv4i32_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vx_nxv4i32_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwmul_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv4i32_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16( + , + i16, + i64); + +define @intrinsic_vwmul_vx_nxv8i32_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vx_nxv8i32_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwmul_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv8i32_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16( + , + i16, + i64); + +define @intrinsic_vwmul_vx_nxv16i32_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vx_nxv16i32_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwmul_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv16i32_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32( + , + i32, + i64); + +define @intrinsic_vwmul_vx_nxv1i64_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vx_nxv1i64_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vwmul_mask_vx_nxv1i64_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv1i64_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32( + , + i32, + i64); + +define @intrinsic_vwmul_vx_nxv2i64_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vx_nxv2i64_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vwmul_mask_vx_nxv2i64_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv2i64_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32( + , + i32, + i64); + +define @intrinsic_vwmul_vx_nxv4i64_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vx_nxv4i64_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vwmul_mask_vx_nxv4i64_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv4i64_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32( + , + i32, + i64); + +define @intrinsic_vwmul_vx_nxv8i64_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_vx_nxv8i64_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vwmul_mask_vx_nxv8i64_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv8i64_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vwmul.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv32.ll new file mode 100644 index 00000000000000..34c9be773157ad --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv32.ll @@ -0,0 +1,881 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8( + , + , + i32); + +define @intrinsic_vwmulsu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv1i16_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.nxv1i8( + , + , + , + , + i32); + +define @intrinsic_vwmulsu_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv1i16_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8( + , + , + i32); + +define @intrinsic_vwmulsu_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv2i16_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.nxv2i8( + , + , + , + , + i32); + +define @intrinsic_vwmulsu_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv2i16_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8( + , + , + i32); + +define @intrinsic_vwmulsu_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv4i16_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.nxv4i8( + , + , + , + , + i32); + +define @intrinsic_vwmulsu_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv4i16_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8( + , + , + i32); + +define @intrinsic_vwmulsu_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv8i16_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.nxv8i8( + , + , + , + , + i32); + +define @intrinsic_vwmulsu_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv8i16_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8( + , + , + i32); + +define @intrinsic_vwmulsu_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv16i16_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.nxv16i8( + , + , + , + , + i32); + +define @intrinsic_vwmulsu_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv16i16_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8( + , + , + i32); + +define @intrinsic_vwmulsu_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv32i16_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.nxv32i8( + , + , + , + , + i32); + +define @intrinsic_vwmulsu_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv32i16_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16( + , + , + i32); + +define @intrinsic_vwmulsu_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv1i32_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.nxv1i16( + , + , + , + , + i32); + +define @intrinsic_vwmulsu_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv1i32_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16( + , + , + i32); + +define @intrinsic_vwmulsu_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv2i32_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.nxv2i16( + , + , + , + , + i32); + +define @intrinsic_vwmulsu_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv2i32_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16( + , + , + i32); + +define @intrinsic_vwmulsu_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv4i32_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.nxv4i16( + , + , + , + , + i32); + +define @intrinsic_vwmulsu_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv4i32_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16( + , + , + i32); + +define @intrinsic_vwmulsu_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv8i32_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.nxv8i16( + , + , + , + , + i32); + +define @intrinsic_vwmulsu_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv8i32_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16( + , + , + i32); + +define @intrinsic_vwmulsu_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv16i32_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.nxv16i16( + , + , + , + , + i32); + +define @intrinsic_vwmulsu_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv16i32_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8( + , + i8, + i32); + +define @intrinsic_vwmulsu_vx_nxv1i16_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv1i16_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwmulsu_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv1i16_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8( + , + i8, + i32); + +define @intrinsic_vwmulsu_vx_nxv2i16_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv2i16_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwmulsu_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv2i16_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8( + , + i8, + i32); + +define @intrinsic_vwmulsu_vx_nxv4i16_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv4i16_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwmulsu_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv4i16_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8( + , + i8, + i32); + +define @intrinsic_vwmulsu_vx_nxv8i16_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv8i16_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwmulsu_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv8i16_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8( + , + i8, + i32); + +define @intrinsic_vwmulsu_vx_nxv16i16_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv16i16_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwmulsu_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv16i16_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8( + , + i8, + i32); + +define @intrinsic_vwmulsu_vx_nxv32i16_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv32i16_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwmulsu_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv32i16_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16( + , + i16, + i32); + +define @intrinsic_vwmulsu_vx_nxv1i32_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv1i32_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwmulsu_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv1i32_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16( + , + i16, + i32); + +define @intrinsic_vwmulsu_vx_nxv2i32_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv2i32_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwmulsu_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv2i32_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16( + , + i16, + i32); + +define @intrinsic_vwmulsu_vx_nxv4i32_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv4i32_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwmulsu_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv4i32_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16( + , + i16, + i32); + +define @intrinsic_vwmulsu_vx_nxv8i32_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv8i32_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwmulsu_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv8i32_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16( + , + i16, + i32); + +define @intrinsic_vwmulsu_vx_nxv16i32_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv16i32_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwmulsu_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv16i32_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv64.ll new file mode 100644 index 00000000000000..089f4af5512c78 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv64.ll @@ -0,0 +1,1201 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8( + , + , + i64); + +define @intrinsic_vwmulsu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv1i16_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.nxv1i8( + , + , + , + , + i64); + +define @intrinsic_vwmulsu_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv1i16_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8( + , + , + i64); + +define @intrinsic_vwmulsu_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv2i16_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.nxv2i8( + , + , + , + , + i64); + +define @intrinsic_vwmulsu_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv2i16_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8( + , + , + i64); + +define @intrinsic_vwmulsu_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv4i16_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.nxv4i8( + , + , + , + , + i64); + +define @intrinsic_vwmulsu_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv4i16_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8( + , + , + i64); + +define @intrinsic_vwmulsu_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv8i16_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.nxv8i8( + , + , + , + , + i64); + +define @intrinsic_vwmulsu_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv8i16_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8( + , + , + i64); + +define @intrinsic_vwmulsu_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv16i16_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.nxv16i8( + , + , + , + , + i64); + +define @intrinsic_vwmulsu_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv16i16_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8( + , + , + i64); + +define @intrinsic_vwmulsu_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv32i16_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.nxv32i8( + , + , + , + , + i64); + +define @intrinsic_vwmulsu_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv32i16_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16( + , + , + i64); + +define @intrinsic_vwmulsu_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv1i32_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.nxv1i16( + , + , + , + , + i64); + +define @intrinsic_vwmulsu_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv1i32_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16( + , + , + i64); + +define @intrinsic_vwmulsu_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv2i32_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.nxv2i16( + , + , + , + , + i64); + +define @intrinsic_vwmulsu_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv2i32_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16( + , + , + i64); + +define @intrinsic_vwmulsu_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv4i32_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.nxv4i16( + , + , + , + , + i64); + +define @intrinsic_vwmulsu_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv4i32_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16( + , + , + i64); + +define @intrinsic_vwmulsu_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv8i32_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.nxv8i16( + , + , + , + , + i64); + +define @intrinsic_vwmulsu_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv8i32_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16( + , + , + i64); + +define @intrinsic_vwmulsu_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv16i32_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.nxv16i16( + , + , + , + , + i64); + +define @intrinsic_vwmulsu_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv16i32_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32( + , + , + i64); + +define @intrinsic_vwmulsu_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv1i64_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.nxv1i32( + , + , + , + , + i64); + +define @intrinsic_vwmulsu_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv1i64_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.nxv1i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32( + , + , + i64); + +define @intrinsic_vwmulsu_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv2i64_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.nxv2i32( + , + , + , + , + i64); + +define @intrinsic_vwmulsu_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv2i64_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.nxv2i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32( + , + , + i64); + +define @intrinsic_vwmulsu_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv4i64_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.nxv4i32( + , + , + , + , + i64); + +define @intrinsic_vwmulsu_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv4i64_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.nxv4i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32( + , + , + i64); + +define @intrinsic_vwmulsu_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv8i64_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.nxv8i32( + , + , + , + , + i64); + +define @intrinsic_vwmulsu_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv8i64_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vwmulsu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.nxv8i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8( + , + i8, + i64); + +define @intrinsic_vwmulsu_vx_nxv1i16_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv1i16_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwmulsu_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv1i16_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8( + , + i8, + i64); + +define @intrinsic_vwmulsu_vx_nxv2i16_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv2i16_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwmulsu_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv2i16_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8( + , + i8, + i64); + +define @intrinsic_vwmulsu_vx_nxv4i16_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv4i16_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwmulsu_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv4i16_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8( + , + i8, + i64); + +define @intrinsic_vwmulsu_vx_nxv8i16_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv8i16_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwmulsu_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv8i16_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8( + , + i8, + i64); + +define @intrinsic_vwmulsu_vx_nxv16i16_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv16i16_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwmulsu_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv16i16_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8( + , + i8, + i64); + +define @intrinsic_vwmulsu_vx_nxv32i16_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv32i16_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwmulsu_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv32i16_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16( + , + i16, + i64); + +define @intrinsic_vwmulsu_vx_nxv1i32_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv1i32_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwmulsu_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv1i32_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16( + , + i16, + i64); + +define @intrinsic_vwmulsu_vx_nxv2i32_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv2i32_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwmulsu_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv2i32_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16( + , + i16, + i64); + +define @intrinsic_vwmulsu_vx_nxv4i32_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv4i32_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwmulsu_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv4i32_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16( + , + i16, + i64); + +define @intrinsic_vwmulsu_vx_nxv8i32_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv8i32_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwmulsu_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv8i32_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16( + , + i16, + i64); + +define @intrinsic_vwmulsu_vx_nxv16i32_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv16i32_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwmulsu_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv16i32_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32( + , + i32, + i64); + +define @intrinsic_vwmulsu_vx_nxv1i64_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv1i64_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vwmulsu_mask_vx_nxv1i64_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv1i64_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32( + , + i32, + i64); + +define @intrinsic_vwmulsu_vx_nxv2i64_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv2i64_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vwmulsu_mask_vx_nxv2i64_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv2i64_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32( + , + i32, + i64); + +define @intrinsic_vwmulsu_vx_nxv4i64_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv4i64_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vwmulsu_mask_vx_nxv4i64_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv4i64_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32( + , + i32, + i64); + +define @intrinsic_vwmulsu_vx_nxv8i64_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv8i64_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vwmulsu_mask_vx_nxv8i64_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv8i64_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vwmulsu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv32.ll new file mode 100644 index 00000000000000..3d8954401e8f44 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv32.ll @@ -0,0 +1,881 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8( + , + , + i32); + +define @intrinsic_vwmulu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vv_nxv1i16_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.nxv1i8( + , + , + , + , + i32); + +define @intrinsic_vwmulu_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv1i16_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8( + , + , + i32); + +define @intrinsic_vwmulu_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vv_nxv2i16_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.nxv2i8( + , + , + , + , + i32); + +define @intrinsic_vwmulu_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv2i16_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8( + , + , + i32); + +define @intrinsic_vwmulu_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vv_nxv4i16_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.nxv4i8( + , + , + , + , + i32); + +define @intrinsic_vwmulu_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv4i16_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8( + , + , + i32); + +define @intrinsic_vwmulu_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vv_nxv8i16_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.nxv8i8( + , + , + , + , + i32); + +define @intrinsic_vwmulu_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv8i16_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8( + , + , + i32); + +define @intrinsic_vwmulu_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vv_nxv16i16_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.nxv16i8( + , + , + , + , + i32); + +define @intrinsic_vwmulu_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv16i16_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8( + , + , + i32); + +define @intrinsic_vwmulu_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vv_nxv32i16_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.nxv32i8( + , + , + , + , + i32); + +define @intrinsic_vwmulu_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv32i16_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16( + , + , + i32); + +define @intrinsic_vwmulu_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vv_nxv1i32_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.nxv1i16( + , + , + , + , + i32); + +define @intrinsic_vwmulu_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv1i32_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16( + , + , + i32); + +define @intrinsic_vwmulu_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vv_nxv2i32_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.nxv2i16( + , + , + , + , + i32); + +define @intrinsic_vwmulu_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv2i32_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16( + , + , + i32); + +define @intrinsic_vwmulu_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vv_nxv4i32_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.nxv4i16( + , + , + , + , + i32); + +define @intrinsic_vwmulu_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv4i32_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16( + , + , + i32); + +define @intrinsic_vwmulu_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vv_nxv8i32_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.nxv8i16( + , + , + , + , + i32); + +define @intrinsic_vwmulu_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv8i32_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16( + , + , + i32); + +define @intrinsic_vwmulu_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vv_nxv16i32_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.nxv16i16( + , + , + , + , + i32); + +define @intrinsic_vwmulu_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv16i32_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8( + , + i8, + i32); + +define @intrinsic_vwmulu_vx_nxv1i16_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vx_nxv1i16_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwmulu_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv1i16_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8( + , + i8, + i32); + +define @intrinsic_vwmulu_vx_nxv2i16_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vx_nxv2i16_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwmulu_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv2i16_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8( + , + i8, + i32); + +define @intrinsic_vwmulu_vx_nxv4i16_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vx_nxv4i16_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwmulu_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv4i16_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8( + , + i8, + i32); + +define @intrinsic_vwmulu_vx_nxv8i16_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vx_nxv8i16_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwmulu_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv8i16_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8( + , + i8, + i32); + +define @intrinsic_vwmulu_vx_nxv16i16_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vx_nxv16i16_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwmulu_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv16i16_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8( + , + i8, + i32); + +define @intrinsic_vwmulu_vx_nxv32i16_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vx_nxv32i16_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwmulu_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv32i16_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16( + , + i16, + i32); + +define @intrinsic_vwmulu_vx_nxv1i32_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vx_nxv1i32_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwmulu_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv1i32_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16( + , + i16, + i32); + +define @intrinsic_vwmulu_vx_nxv2i32_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vx_nxv2i32_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwmulu_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv2i32_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16( + , + i16, + i32); + +define @intrinsic_vwmulu_vx_nxv4i32_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vx_nxv4i32_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwmulu_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv4i32_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16( + , + i16, + i32); + +define @intrinsic_vwmulu_vx_nxv8i32_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vx_nxv8i32_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwmulu_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv8i32_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16( + , + i16, + i32); + +define @intrinsic_vwmulu_vx_nxv16i32_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vx_nxv16i32_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwmulu_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv16i32_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv64.ll new file mode 100644 index 00000000000000..3d9bff312eb137 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv64.ll @@ -0,0 +1,1201 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8( + , + , + i64); + +define @intrinsic_vwmulu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vv_nxv1i16_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.nxv1i8( + , + , + , + , + i64); + +define @intrinsic_vwmulu_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv1i16_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8( + , + , + i64); + +define @intrinsic_vwmulu_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vv_nxv2i16_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.nxv2i8( + , + , + , + , + i64); + +define @intrinsic_vwmulu_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv2i16_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8( + , + , + i64); + +define @intrinsic_vwmulu_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vv_nxv4i16_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.nxv4i8( + , + , + , + , + i64); + +define @intrinsic_vwmulu_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv4i16_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8( + , + , + i64); + +define @intrinsic_vwmulu_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vv_nxv8i16_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.nxv8i8( + , + , + , + , + i64); + +define @intrinsic_vwmulu_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv8i16_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8( + , + , + i64); + +define @intrinsic_vwmulu_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vv_nxv16i16_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.nxv16i8( + , + , + , + , + i64); + +define @intrinsic_vwmulu_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv16i16_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8( + , + , + i64); + +define @intrinsic_vwmulu_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vv_nxv32i16_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.nxv32i8( + , + , + , + , + i64); + +define @intrinsic_vwmulu_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv32i16_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16( + , + , + i64); + +define @intrinsic_vwmulu_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vv_nxv1i32_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.nxv1i16( + , + , + , + , + i64); + +define @intrinsic_vwmulu_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv1i32_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16( + , + , + i64); + +define @intrinsic_vwmulu_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vv_nxv2i32_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.nxv2i16( + , + , + , + , + i64); + +define @intrinsic_vwmulu_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv2i32_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16( + , + , + i64); + +define @intrinsic_vwmulu_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vv_nxv4i32_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.nxv4i16( + , + , + , + , + i64); + +define @intrinsic_vwmulu_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv4i32_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16( + , + , + i64); + +define @intrinsic_vwmulu_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vv_nxv8i32_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.nxv8i16( + , + , + , + , + i64); + +define @intrinsic_vwmulu_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv8i32_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16( + , + , + i64); + +define @intrinsic_vwmulu_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vv_nxv16i32_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.nxv16i16( + , + , + , + , + i64); + +define @intrinsic_vwmulu_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv16i32_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32( + , + , + i64); + +define @intrinsic_vwmulu_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vv_nxv1i64_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32( + , + , + , + , + i64); + +define @intrinsic_vwmulu_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv1i64_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32( + , + , + i64); + +define @intrinsic_vwmulu_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vv_nxv2i64_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.nxv2i32( + , + , + , + , + i64); + +define @intrinsic_vwmulu_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv2i64_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.nxv2i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32( + , + , + i64); + +define @intrinsic_vwmulu_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vv_nxv4i64_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.nxv4i32( + , + , + , + , + i64); + +define @intrinsic_vwmulu_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv4i64_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.nxv4i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32( + , + , + i64); + +define @intrinsic_vwmulu_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vv_nxv8i64_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.nxv8i32( + , + , + , + , + i64); + +define @intrinsic_vwmulu_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv8i64_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vwmulu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.nxv8i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8( + , + i8, + i64); + +define @intrinsic_vwmulu_vx_nxv1i16_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vx_nxv1i16_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwmulu_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv1i16_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8( + , + i8, + i64); + +define @intrinsic_vwmulu_vx_nxv2i16_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vx_nxv2i16_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwmulu_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv2i16_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8( + , + i8, + i64); + +define @intrinsic_vwmulu_vx_nxv4i16_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vx_nxv4i16_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwmulu_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv4i16_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8( + , + i8, + i64); + +define @intrinsic_vwmulu_vx_nxv8i16_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vx_nxv8i16_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwmulu_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv8i16_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8( + , + i8, + i64); + +define @intrinsic_vwmulu_vx_nxv16i16_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vx_nxv16i16_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwmulu_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv16i16_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8( + , + i8, + i64); + +define @intrinsic_vwmulu_vx_nxv32i16_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vx_nxv32i16_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwmulu_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv32i16_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16( + , + i16, + i64); + +define @intrinsic_vwmulu_vx_nxv1i32_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vx_nxv1i32_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwmulu_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv1i32_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16( + , + i16, + i64); + +define @intrinsic_vwmulu_vx_nxv2i32_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vx_nxv2i32_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwmulu_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv2i32_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16( + , + i16, + i64); + +define @intrinsic_vwmulu_vx_nxv4i32_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vx_nxv4i32_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwmulu_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv4i32_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16( + , + i16, + i64); + +define @intrinsic_vwmulu_vx_nxv8i32_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vx_nxv8i32_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwmulu_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv8i32_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16( + , + i16, + i64); + +define @intrinsic_vwmulu_vx_nxv16i32_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vx_nxv16i32_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwmulu_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv16i32_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32( + , + i32, + i64); + +define @intrinsic_vwmulu_vx_nxv1i64_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vx_nxv1i64_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vwmulu_mask_vx_nxv1i64_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv1i64_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32( + , + i32, + i64); + +define @intrinsic_vwmulu_vx_nxv2i64_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vx_nxv2i64_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vwmulu_mask_vx_nxv2i64_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv2i64_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32( + , + i32, + i64); + +define @intrinsic_vwmulu_vx_nxv4i64_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vx_nxv4i64_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vwmulu_mask_vx_nxv4i64_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv4i64_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32( + , + i32, + i64); + +define @intrinsic_vwmulu_vx_nxv8i64_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_vx_nxv8i64_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vwmulu_mask_vx_nxv8i64_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv8i64_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vwmulu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} diff --git a/llvm/test/MC/RISCV/rvv/convert.s b/llvm/test/MC/RISCV/rvv/convert.s index bc818523e3e9c5..0fadd888b8c278 100644 --- a/llvm/test/MC/RISCV/rvv/convert.s +++ b/llvm/test/MC/RISCV/rvv/convert.s @@ -1,261 +1,264 @@ # RUN: llvm-mc -triple=riscv64 -show-encoding --mattr=+experimental-v %s \ +# RUN: --mattr=+f \ # RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST # RUN: not llvm-mc -triple=riscv64 -show-encoding %s 2>&1 \ # RUN: | FileCheck %s --check-prefix=CHECK-ERROR # RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+experimental-v %s \ -# RUN: | llvm-objdump -d --mattr=+experimental-v - \ +# RUN: --mattr=+f \ +# RUN: | llvm-objdump -d --mattr=+experimental-v --mattr=+f - \ # RUN: | FileCheck %s --check-prefix=CHECK-INST # RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+experimental-v %s \ +# RUN: --mattr=+f \ # RUN: | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-UNKNOWN vfcvt.xu.f.v v8, v4, v0.t # CHECK-INST: vfcvt.xu.f.v v8, v4, v0.t # CHECK-ENCODING: [0x57,0x14,0x40,0x48] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 40 48 vfcvt.xu.f.v v8, v4 # CHECK-INST: vfcvt.xu.f.v v8, v4 # CHECK-ENCODING: [0x57,0x14,0x40,0x4a] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 40 4a vfcvt.x.f.v v8, v4, v0.t # CHECK-INST: vfcvt.x.f.v v8, v4, v0.t # CHECK-ENCODING: [0x57,0x94,0x40,0x48] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 94 40 48 vfcvt.x.f.v v8, v4 # CHECK-INST: vfcvt.x.f.v v8, v4 # CHECK-ENCODING: [0x57,0x94,0x40,0x4a] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 94 40 4a vfcvt.f.xu.v v8, v4, v0.t # CHECK-INST: vfcvt.f.xu.v v8, v4, v0.t # CHECK-ENCODING: [0x57,0x14,0x41,0x48] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 41 48 vfcvt.f.xu.v v8, v4 # CHECK-INST: vfcvt.f.xu.v v8, v4 # CHECK-ENCODING: [0x57,0x14,0x41,0x4a] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 41 4a vfcvt.f.x.v v8, v4, v0.t # CHECK-INST: vfcvt.f.x.v v8, v4, v0.t # CHECK-ENCODING: [0x57,0x94,0x41,0x48] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 94 41 48 vfcvt.f.x.v v8, v4 # CHECK-INST: vfcvt.f.x.v v8, v4 # CHECK-ENCODING: [0x57,0x94,0x41,0x4a] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 94 41 4a vfcvt.rtz.xu.f.v v8, v4, v0.t # CHECK-INST: vfcvt.rtz.xu.f.v v8, v4, v0.t # CHECK-ENCODING: [0x57,0x14,0x43,0x48] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 43 48 vfcvt.rtz.xu.f.v v8, v4 # CHECK-INST: vfcvt.rtz.xu.f.v v8, v4 # CHECK-ENCODING: [0x57,0x14,0x43,0x4a] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 43 4a vfcvt.rtz.x.f.v v8, v4, v0.t # CHECK-INST: vfcvt.rtz.x.f.v v8, v4, v0.t # CHECK-ENCODING: [0x57,0x94,0x43,0x48] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 94 43 48 vfcvt.rtz.x.f.v v8, v4 # CHECK-INST: vfcvt.rtz.x.f.v v8, v4 # CHECK-ENCODING: [0x57,0x94,0x43,0x4a] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 94 43 4a vfwcvt.xu.f.v v8, v4, v0.t # CHECK-INST: vfwcvt.xu.f.v v8, v4, v0.t # CHECK-ENCODING: [0x57,0x14,0x44,0x48] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 44 48 vfwcvt.xu.f.v v8, v4 # CHECK-INST: vfwcvt.xu.f.v v8, v4 # CHECK-ENCODING: [0x57,0x14,0x44,0x4a] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 44 4a vfwcvt.x.f.v v8, v4, v0.t # CHECK-INST: vfwcvt.x.f.v v8, v4, v0.t # CHECK-ENCODING: [0x57,0x94,0x44,0x48] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 94 44 48 vfwcvt.x.f.v v8, v4 # CHECK-INST: vfwcvt.x.f.v v8, v4 # CHECK-ENCODING: [0x57,0x94,0x44,0x4a] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 94 44 4a vfwcvt.f.xu.v v8, v4, v0.t # CHECK-INST: vfwcvt.f.xu.v v8, v4, v0.t # CHECK-ENCODING: [0x57,0x14,0x45,0x48] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 45 48 vfwcvt.f.xu.v v8, v4 # CHECK-INST: vfwcvt.f.xu.v v8, v4 # CHECK-ENCODING: [0x57,0x14,0x45,0x4a] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 45 4a vfwcvt.f.x.v v8, v4, v0.t # CHECK-INST: vfwcvt.f.x.v v8, v4, v0.t # CHECK-ENCODING: [0x57,0x94,0x45,0x48] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 94 45 48 vfwcvt.f.x.v v8, v4 # CHECK-INST: vfwcvt.f.x.v v8, v4 # CHECK-ENCODING: [0x57,0x94,0x45,0x4a] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 94 45 4a vfwcvt.f.f.v v8, v4, v0.t # CHECK-INST: vfwcvt.f.f.v v8, v4, v0.t # CHECK-ENCODING: [0x57,0x14,0x46,0x48] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 46 48 vfwcvt.f.f.v v8, v4 # CHECK-INST: vfwcvt.f.f.v v8, v4 # CHECK-ENCODING: [0x57,0x14,0x46,0x4a] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 46 4a vfwcvt.rtz.xu.f.v v8, v4, v0.t # CHECK-INST: vfwcvt.rtz.xu.f.v v8, v4, v0.t # CHECK-ENCODING: [0x57,0x14,0x47,0x48] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 47 48 vfwcvt.rtz.xu.f.v v8, v4 # CHECK-INST: vfwcvt.rtz.xu.f.v v8, v4 # CHECK-ENCODING: [0x57,0x14,0x47,0x4a] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 47 4a vfwcvt.rtz.x.f.v v8, v4, v0.t # CHECK-INST: vfwcvt.rtz.x.f.v v8, v4, v0.t # CHECK-ENCODING: [0x57,0x94,0x47,0x48] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 94 47 48 vfwcvt.rtz.x.f.v v8, v4 # CHECK-INST: vfwcvt.rtz.x.f.v v8, v4 # CHECK-ENCODING: [0x57,0x94,0x47,0x4a] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 94 47 4a vfncvt.xu.f.w v8, v4, v0.t # CHECK-INST: vfncvt.xu.f.w v8, v4, v0.t # CHECK-ENCODING: [0x57,0x14,0x48,0x48] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 48 48 vfncvt.xu.f.w v8, v4 # CHECK-INST: vfncvt.xu.f.w v8, v4 # CHECK-ENCODING: [0x57,0x14,0x48,0x4a] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 48 4a vfncvt.x.f.w v8, v4, v0.t # CHECK-INST: vfncvt.x.f.w v8, v4, v0.t # CHECK-ENCODING: [0x57,0x94,0x48,0x48] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 94 48 48 vfncvt.x.f.w v8, v4 # CHECK-INST: vfncvt.x.f.w v8, v4 # CHECK-ENCODING: [0x57,0x94,0x48,0x4a] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 94 48 4a vfncvt.f.xu.w v8, v4, v0.t # CHECK-INST: vfncvt.f.xu.w v8, v4, v0.t # CHECK-ENCODING: [0x57,0x14,0x49,0x48] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 49 48 vfncvt.f.xu.w v8, v4 # CHECK-INST: vfncvt.f.xu.w v8, v4 # CHECK-ENCODING: [0x57,0x14,0x49,0x4a] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 49 4a vfncvt.f.x.w v8, v4, v0.t # CHECK-INST: vfncvt.f.x.w v8, v4, v0.t # CHECK-ENCODING: [0x57,0x94,0x49,0x48] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 94 49 48 vfncvt.f.x.w v8, v4 # CHECK-INST: vfncvt.f.x.w v8, v4 # CHECK-ENCODING: [0x57,0x94,0x49,0x4a] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 94 49 4a vfncvt.f.f.w v8, v4, v0.t # CHECK-INST: vfncvt.f.f.w v8, v4, v0.t # CHECK-ENCODING: [0x57,0x14,0x4a,0x48] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a 48 vfncvt.f.f.w v8, v4 # CHECK-INST: vfncvt.f.f.w v8, v4 # CHECK-ENCODING: [0x57,0x14,0x4a,0x4a] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a 4a vfncvt.rod.f.f.w v8, v4, v0.t # CHECK-INST: vfncvt.rod.f.f.w v8, v4, v0.t # CHECK-ENCODING: [0x57,0x94,0x4a,0x48] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 94 4a 48 vfncvt.rod.f.f.w v8, v4 # CHECK-INST: vfncvt.rod.f.f.w v8, v4 # CHECK-ENCODING: [0x57,0x94,0x4a,0x4a] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 94 4a 4a vfncvt.rtz.xu.f.w v8, v4, v0.t # CHECK-INST: vfncvt.rtz.xu.f.w v8, v4, v0.t # CHECK-ENCODING: [0x57,0x14,0x4b,0x48] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4b 48 vfncvt.rtz.xu.f.w v8, v4 # CHECK-INST: vfncvt.rtz.xu.f.w v8, v4 # CHECK-ENCODING: [0x57,0x14,0x4b,0x4a] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4b 4a vfncvt.rtz.x.f.w v8, v4, v0.t # CHECK-INST: vfncvt.rtz.x.f.w v8, v4, v0.t # CHECK-ENCODING: [0x57,0x94,0x4b,0x48] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 94 4b 48 vfncvt.rtz.x.f.w v8, v4 # CHECK-INST: vfncvt.rtz.x.f.w v8, v4 # CHECK-ENCODING: [0x57,0x94,0x4b,0x4a] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 94 4b 4a diff --git a/llvm/test/MC/RISCV/rvv/fadd.s b/llvm/test/MC/RISCV/rvv/fadd.s index 1c648c5a7634db..7feddd25d061cf 100644 --- a/llvm/test/MC/RISCV/rvv/fadd.s +++ b/llvm/test/MC/RISCV/rvv/fadd.s @@ -1,81 +1,84 @@ # RUN: llvm-mc -triple=riscv64 -show-encoding --mattr=+experimental-v %s \ +# RUN: --mattr=+f \ # RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST # RUN: not llvm-mc -triple=riscv64 -show-encoding %s 2>&1 \ # RUN: | FileCheck %s --check-prefix=CHECK-ERROR # RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+experimental-v %s \ -# RUN: | llvm-objdump -d --mattr=+experimental-v - \ +# RUN: --mattr=+f \ +# RUN: | llvm-objdump -d --mattr=+experimental-v --mattr=+f - \ # RUN: | FileCheck %s --check-prefix=CHECK-INST # RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+experimental-v %s \ +# RUN: --mattr=+f \ # RUN: | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-UNKNOWN vfadd.vv v8, v4, v20, v0.t # CHECK-INST: vfadd.vv v8, v4, v20, v0.t # CHECK-ENCODING: [0x57,0x14,0x4a,0x00] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a 00 vfadd.vv v8, v4, v20 # CHECK-INST: vfadd.vv v8, v4, v20 # CHECK-ENCODING: [0x57,0x14,0x4a,0x02] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a 02 vfadd.vf v8, v4, fa0, v0.t # CHECK-INST: vfadd.vf v8, v4, fa0, v0.t # CHECK-ENCODING: [0x57,0x54,0x45,0x00] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 00 vfadd.vf v8, v4, fa0 # CHECK-INST: vfadd.vf v8, v4, fa0 # CHECK-ENCODING: [0x57,0x54,0x45,0x02] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 02 vfwadd.vv v8, v4, v20, v0.t # CHECK-INST: vfwadd.vv v8, v4, v20, v0.t # CHECK-ENCODING: [0x57,0x14,0x4a,0xc0] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a c0 vfwadd.vv v8, v4, v20 # CHECK-INST: vfwadd.vv v8, v4, v20 # CHECK-ENCODING: [0x57,0x14,0x4a,0xc2] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a c2 vfwadd.vf v8, v4, fa0, v0.t # CHECK-INST: vfwadd.vf v8, v4, fa0, v0.t # CHECK-ENCODING: [0x57,0x54,0x45,0xc0] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 c0 vfwadd.vf v8, v4, fa0 # CHECK-INST: vfwadd.vf v8, v4, fa0 # CHECK-ENCODING: [0x57,0x54,0x45,0xc2] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 c2 vfwadd.wv v8, v4, v20, v0.t # CHECK-INST: vfwadd.wv v8, v4, v20, v0.t # CHECK-ENCODING: [0x57,0x14,0x4a,0xd0] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a d0 vfwadd.wv v8, v4, v20 # CHECK-INST: vfwadd.wv v8, v4, v20 # CHECK-ENCODING: [0x57,0x14,0x4a,0xd2] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a d2 vfwadd.wf v8, v4, fa0, v0.t # CHECK-INST: vfwadd.wf v8, v4, fa0, v0.t # CHECK-ENCODING: [0x57,0x54,0x45,0xd0] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 d0 vfwadd.wf v8, v4, fa0 # CHECK-INST: vfwadd.wf v8, v4, fa0 # CHECK-ENCODING: [0x57,0x54,0x45,0xd2] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 d2 diff --git a/llvm/test/MC/RISCV/rvv/fcompare.s b/llvm/test/MC/RISCV/rvv/fcompare.s index 4bb4e2d966f286..ada67eee08a04e 100644 --- a/llvm/test/MC/RISCV/rvv/fcompare.s +++ b/llvm/test/MC/RISCV/rvv/fcompare.s @@ -1,159 +1,162 @@ # RUN: llvm-mc -triple=riscv64 -show-encoding --mattr=+experimental-v %s \ +# RUN: --mattr=+f \ # RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST # RUN: not llvm-mc -triple=riscv64 -show-encoding %s 2>&1 \ # RUN: | FileCheck %s --check-prefix=CHECK-ERROR # RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+experimental-v %s \ -# RUN: | llvm-objdump -d --mattr=+experimental-v - \ +# RUN: --mattr=+f \ +# RUN: | llvm-objdump -d --mattr=+experimental-v --mattr=+f - \ # RUN: | FileCheck %s --check-prefix=CHECK-INST # RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+experimental-v %s \ +# RUN: --mattr=+f \ # RUN: | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-UNKNOWN vmfeq.vv v8, v4, v20, v0.t # CHECK-INST: vmfeq.vv v8, v4, v20, v0.t # CHECK-ENCODING: [0x57,0x14,0x4a,0x60] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a 60 vmfeq.vv v8, v4, v20 # CHECK-INST: vmfeq.vv v8, v4, v20 # CHECK-ENCODING: [0x57,0x14,0x4a,0x62] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a 62 vmfeq.vf v8, v4, fa0, v0.t # CHECK-INST: vmfeq.vf v8, v4, fa0, v0.t # CHECK-ENCODING: [0x57,0x54,0x45,0x60] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 60 vmfeq.vf v8, v4, fa0 # CHECK-INST: vmfeq.vf v8, v4, fa0 # CHECK-ENCODING: [0x57,0x54,0x45,0x62] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 62 vmfne.vv v8, v4, v20, v0.t # CHECK-INST: vmfne.vv v8, v4, v20, v0.t # CHECK-ENCODING: [0x57,0x14,0x4a,0x70] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a 70 vmfne.vv v8, v4, v20 # CHECK-INST: vmfne.vv v8, v4, v20 # CHECK-ENCODING: [0x57,0x14,0x4a,0x72] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a 72 vmfne.vf v8, v4, fa0, v0.t # CHECK-INST: vmfne.vf v8, v4, fa0, v0.t # CHECK-ENCODING: [0x57,0x54,0x45,0x70] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 70 vmfne.vf v8, v4, fa0 # CHECK-INST: vmfne.vf v8, v4, fa0 # CHECK-ENCODING: [0x57,0x54,0x45,0x72] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 72 vmflt.vv v8, v4, v20, v0.t # CHECK-INST: vmflt.vv v8, v4, v20, v0.t # CHECK-ENCODING: [0x57,0x14,0x4a,0x6c] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a 6c vmflt.vv v8, v4, v20 # CHECK-INST: vmflt.vv v8, v4, v20 # CHECK-ENCODING: [0x57,0x14,0x4a,0x6e] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a 6e vmflt.vf v8, v4, fa0, v0.t # CHECK-INST: vmflt.vf v8, v4, fa0, v0.t # CHECK-ENCODING: [0x57,0x54,0x45,0x6c] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 6c vmflt.vf v8, v4, fa0 # CHECK-INST: vmflt.vf v8, v4, fa0 # CHECK-ENCODING: [0x57,0x54,0x45,0x6e] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 6e vmfle.vv v8, v4, v20, v0.t # CHECK-INST: vmfle.vv v8, v4, v20, v0.t # CHECK-ENCODING: [0x57,0x14,0x4a,0x64] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a 64 vmfle.vv v8, v4, v20 # CHECK-INST: vmfle.vv v8, v4, v20 # CHECK-ENCODING: [0x57,0x14,0x4a,0x66] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a 66 vmfle.vf v8, v4, fa0, v0.t # CHECK-INST: vmfle.vf v8, v4, fa0, v0.t # CHECK-ENCODING: [0x57,0x54,0x45,0x64] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 64 vmfle.vf v8, v4, fa0 # CHECK-INST: vmfle.vf v8, v4, fa0 # CHECK-ENCODING: [0x57,0x54,0x45,0x66] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 66 vmfgt.vf v8, v4, fa0, v0.t # CHECK-INST: vmfgt.vf v8, v4, fa0, v0.t # CHECK-ENCODING: [0x57,0x54,0x45,0x74] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 74 vmfgt.vf v8, v4, fa0 # CHECK-INST: vmfgt.vf v8, v4, fa0 # CHECK-ENCODING: [0x57,0x54,0x45,0x76] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 76 vmfge.vf v8, v4, fa0, v0.t # CHECK-INST: vmfge.vf v8, v4, fa0, v0.t # CHECK-ENCODING: [0x57,0x54,0x45,0x7c] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 7c vmfge.vf v8, v4, fa0 # CHECK-INST: vmfge.vf v8, v4, fa0 # CHECK-ENCODING: [0x57,0x54,0x45,0x7e] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 7e vmfgt.vv v8, v20, v4, v0.t # CHECK-INST: vmflt.vv v8, v4, v20, v0.t # CHECK-ENCODING: [0x57,0x14,0x4a,0x6c] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a 6c vmfgt.vv v8, v20, v4 # CHECK-INST: vmflt.vv v8, v4, v20 # CHECK-ENCODING: [0x57,0x14,0x4a,0x6e] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a 6e vmfge.vv v8, v20, v4, v0.t # CHECK-INST: vmfle.vv v8, v4, v20, v0.t # CHECK-ENCODING: [0x57,0x14,0x4a,0x64] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a 64 vmfge.vv v8, v20, v4 # CHECK-INST: vmfle.vv v8, v4, v20 # CHECK-ENCODING: [0x57,0x14,0x4a,0x66] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a 66 vmfeq.vv v0, v4, v20, v0.t # CHECK-INST: vmfeq.vv v0, v4, v20, v0.t # CHECK-ENCODING: [0x57,0x10,0x4a,0x60] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 10 4a 60 diff --git a/llvm/test/MC/RISCV/rvv/fdiv.s b/llvm/test/MC/RISCV/rvv/fdiv.s index 28c067ec4fcddd..6d513cc6185381 100644 --- a/llvm/test/MC/RISCV/rvv/fdiv.s +++ b/llvm/test/MC/RISCV/rvv/fdiv.s @@ -1,45 +1,48 @@ # RUN: llvm-mc -triple=riscv64 -show-encoding --mattr=+experimental-v %s \ +# RUN: --mattr=+f \ # RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST # RUN: not llvm-mc -triple=riscv64 -show-encoding %s 2>&1 \ # RUN: | FileCheck %s --check-prefix=CHECK-ERROR # RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+experimental-v %s \ -# RUN: | llvm-objdump -d --mattr=+experimental-v - \ +# RUN: --mattr=+f \ +# RUN: | llvm-objdump -d --mattr=+experimental-v --mattr=+f - \ # RUN: | FileCheck %s --check-prefix=CHECK-INST # RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+experimental-v %s \ +# RUN: --mattr=+f \ # RUN: | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-UNKNOWN vfdiv.vv v8, v4, v20, v0.t # CHECK-INST: vfdiv.vv v8, v4, v20, v0.t # CHECK-ENCODING: [0x57,0x14,0x4a,0x80] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a 80 vfdiv.vv v8, v4, v20 # CHECK-INST: vfdiv.vv v8, v4, v20 # CHECK-ENCODING: [0x57,0x14,0x4a,0x82] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a 82 vfdiv.vf v8, v4, fa0, v0.t # CHECK-INST: vfdiv.vf v8, v4, fa0, v0.t # CHECK-ENCODING: [0x57,0x54,0x45,0x80] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 80 vfdiv.vf v8, v4, fa0 # CHECK-INST: vfdiv.vf v8, v4, fa0 # CHECK-ENCODING: [0x57,0x54,0x45,0x82] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 82 vfrdiv.vf v8, v4, fa0, v0.t # CHECK-INST: vfrdiv.vf v8, v4, fa0, v0.t # CHECK-ENCODING: [0x57,0x54,0x45,0x84] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 84 vfrdiv.vf v8, v4, fa0 # CHECK-INST: vfrdiv.vf v8, v4, fa0 # CHECK-ENCODING: [0x57,0x54,0x45,0x86] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 86 diff --git a/llvm/test/MC/RISCV/rvv/fmacc.s b/llvm/test/MC/RISCV/rvv/fmacc.s index 4dc39e4d57b30b..b3902686d0852a 100644 --- a/llvm/test/MC/RISCV/rvv/fmacc.s +++ b/llvm/test/MC/RISCV/rvv/fmacc.s @@ -1,297 +1,300 @@ # RUN: llvm-mc -triple=riscv64 -show-encoding --mattr=+experimental-v %s \ +# RUN: --mattr=+f \ # RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST # RUN: not llvm-mc -triple=riscv64 -show-encoding %s 2>&1 \ # RUN: | FileCheck %s --check-prefix=CHECK-ERROR # RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+experimental-v %s \ -# RUN: | llvm-objdump -d --mattr=+experimental-v - \ +# RUN: --mattr=+f \ +# RUN: | llvm-objdump -d --mattr=+experimental-v --mattr=+f - \ # RUN: | FileCheck %s --check-prefix=CHECK-INST # RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+experimental-v %s \ +# RUN: --mattr=+f \ # RUN: | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-UNKNOWN vfmacc.vv v8, v20, v4, v0.t # CHECK-INST: vfmacc.vv v8, v20, v4, v0.t # CHECK-ENCODING: [0x57,0x14,0x4a,0xb0] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a b0 vfmacc.vv v8, v20, v4 # CHECK-INST: vfmacc.vv v8, v20, v4 # CHECK-ENCODING: [0x57,0x14,0x4a,0xb2] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a b2 vfmacc.vf v8, fa0, v4, v0.t # CHECK-INST: vfmacc.vf v8, fa0, v4, v0.t # CHECK-ENCODING: [0x57,0x54,0x45,0xb0] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 b0 vfmacc.vf v8, fa0, v4 # CHECK-INST: vfmacc.vf v8, fa0, v4 # CHECK-ENCODING: [0x57,0x54,0x45,0xb2] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 b2 vfnmacc.vv v8, v20, v4, v0.t # CHECK-INST: vfnmacc.vv v8, v20, v4, v0.t # CHECK-ENCODING: [0x57,0x14,0x4a,0xb4] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a b4 vfnmacc.vv v8, v20, v4 # CHECK-INST: vfnmacc.vv v8, v20, v4 # CHECK-ENCODING: [0x57,0x14,0x4a,0xb6] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a b6 vfnmacc.vf v8, fa0, v4, v0.t # CHECK-INST: vfnmacc.vf v8, fa0, v4, v0.t # CHECK-ENCODING: [0x57,0x54,0x45,0xb4] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 b4 vfnmacc.vf v8, fa0, v4 # CHECK-INST: vfnmacc.vf v8, fa0, v4 # CHECK-ENCODING: [0x57,0x54,0x45,0xb6] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 b6 vfmsac.vv v8, v20, v4, v0.t # CHECK-INST: vfmsac.vv v8, v20, v4, v0.t # CHECK-ENCODING: [0x57,0x14,0x4a,0xb8] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a b8 vfmsac.vv v8, v20, v4 # CHECK-INST: vfmsac.vv v8, v20, v4 # CHECK-ENCODING: [0x57,0x14,0x4a,0xba] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a ba vfmsac.vf v8, fa0, v4, v0.t # CHECK-INST: vfmsac.vf v8, fa0, v4, v0.t # CHECK-ENCODING: [0x57,0x54,0x45,0xb8] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 b8 vfmsac.vf v8, fa0, v4 # CHECK-INST: vfmsac.vf v8, fa0, v4 # CHECK-ENCODING: [0x57,0x54,0x45,0xba] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 ba vfnmsac.vv v8, v20, v4, v0.t # CHECK-INST: vfnmsac.vv v8, v20, v4, v0.t # CHECK-ENCODING: [0x57,0x14,0x4a,0xbc] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a bc vfnmsac.vv v8, v20, v4 # CHECK-INST: vfnmsac.vv v8, v20, v4 # CHECK-ENCODING: [0x57,0x14,0x4a,0xbe] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a be vfnmsac.vf v8, fa0, v4, v0.t # CHECK-INST: vfnmsac.vf v8, fa0, v4, v0.t # CHECK-ENCODING: [0x57,0x54,0x45,0xbc] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 bc vfnmsac.vf v8, fa0, v4 # CHECK-INST: vfnmsac.vf v8, fa0, v4 # CHECK-ENCODING: [0x57,0x54,0x45,0xbe] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 be vfmadd.vv v8, v20, v4, v0.t # CHECK-INST: vfmadd.vv v8, v20, v4, v0.t # CHECK-ENCODING: [0x57,0x14,0x4a,0xa0] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a a0 vfmadd.vv v8, v20, v4 # CHECK-INST: vfmadd.vv v8, v20, v4 # CHECK-ENCODING: [0x57,0x14,0x4a,0xa2] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a a2 vfmadd.vf v8, fa0, v4, v0.t # CHECK-INST: vfmadd.vf v8, fa0, v4, v0.t # CHECK-ENCODING: [0x57,0x54,0x45,0xa0] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 a0 vfmadd.vf v8, fa0, v4 # CHECK-INST: vfmadd.vf v8, fa0, v4 # CHECK-ENCODING: [0x57,0x54,0x45,0xa2] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 a2 vfnmadd.vv v8, v20, v4, v0.t # CHECK-INST: vfnmadd.vv v8, v20, v4, v0.t # CHECK-ENCODING: [0x57,0x14,0x4a,0xa4] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a a4 vfnmadd.vv v8, v20, v4 # CHECK-INST: vfnmadd.vv v8, v20, v4 # CHECK-ENCODING: [0x57,0x14,0x4a,0xa6] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a a6 vfnmadd.vf v8, fa0, v4, v0.t # CHECK-INST: vfnmadd.vf v8, fa0, v4, v0.t # CHECK-ENCODING: [0x57,0x54,0x45,0xa4] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 a4 vfnmadd.vf v8, fa0, v4 # CHECK-INST: vfnmadd.vf v8, fa0, v4 # CHECK-ENCODING: [0x57,0x54,0x45,0xa6] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 a6 vfmsub.vv v8, v20, v4, v0.t # CHECK-INST: vfmsub.vv v8, v20, v4, v0.t # CHECK-ENCODING: [0x57,0x14,0x4a,0xa8] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a a8 vfmsub.vv v8, v20, v4 # CHECK-INST: vfmsub.vv v8, v20, v4 # CHECK-ENCODING: [0x57,0x14,0x4a,0xaa] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a aa vfmsub.vf v8, fa0, v4, v0.t # CHECK-INST: vfmsub.vf v8, fa0, v4, v0.t # CHECK-ENCODING: [0x57,0x54,0x45,0xa8] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 a8 vfmsub.vf v8, fa0, v4 # CHECK-INST: vfmsub.vf v8, fa0, v4 # CHECK-ENCODING: [0x57,0x54,0x45,0xaa] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 aa vfnmsub.vv v8, v20, v4, v0.t # CHECK-INST: vfnmsub.vv v8, v20, v4, v0.t # CHECK-ENCODING: [0x57,0x14,0x4a,0xac] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a ac vfnmsub.vv v8, v20, v4 # CHECK-INST: vfnmsub.vv v8, v20, v4 # CHECK-ENCODING: [0x57,0x14,0x4a,0xae] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a ae vfnmsub.vf v8, fa0, v4, v0.t # CHECK-INST: vfnmsub.vf v8, fa0, v4, v0.t # CHECK-ENCODING: [0x57,0x54,0x45,0xac] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 ac vfnmsub.vf v8, fa0, v4 # CHECK-INST: vfnmsub.vf v8, fa0, v4 # CHECK-ENCODING: [0x57,0x54,0x45,0xae] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 ae vfwmacc.vv v8, v20, v4, v0.t # CHECK-INST: vfwmacc.vv v8, v20, v4, v0.t # CHECK-ENCODING: [0x57,0x14,0x4a,0xf0] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a f0 vfwmacc.vv v8, v20, v4 # CHECK-INST: vfwmacc.vv v8, v20, v4 # CHECK-ENCODING: [0x57,0x14,0x4a,0xf2] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a f2 vfwmacc.vf v8, fa0, v4, v0.t # CHECK-INST: vfwmacc.vf v8, fa0, v4, v0.t # CHECK-ENCODING: [0x57,0x54,0x45,0xf0] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 f0 vfwmacc.vf v8, fa0, v4 # CHECK-INST: vfwmacc.vf v8, fa0, v4 # CHECK-ENCODING: [0x57,0x54,0x45,0xf2] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 f2 vfwnmacc.vv v8, v20, v4, v0.t # CHECK-INST: vfwnmacc.vv v8, v20, v4, v0.t # CHECK-ENCODING: [0x57,0x14,0x4a,0xf4] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a f4 vfwnmacc.vv v8, v20, v4 # CHECK-INST: vfwnmacc.vv v8, v20, v4 # CHECK-ENCODING: [0x57,0x14,0x4a,0xf6] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a f6 vfwnmacc.vf v8, fa0, v4, v0.t # CHECK-INST: vfwnmacc.vf v8, fa0, v4, v0.t # CHECK-ENCODING: [0x57,0x54,0x45,0xf4] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 f4 vfwnmacc.vf v8, fa0, v4 # CHECK-INST: vfwnmacc.vf v8, fa0, v4 # CHECK-ENCODING: [0x57,0x54,0x45,0xf6] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 f6 vfwmsac.vv v8, v20, v4, v0.t # CHECK-INST: vfwmsac.vv v8, v20, v4, v0.t # CHECK-ENCODING: [0x57,0x14,0x4a,0xf8] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a f8 vfwmsac.vv v8, v20, v4 # CHECK-INST: vfwmsac.vv v8, v20, v4 # CHECK-ENCODING: [0x57,0x14,0x4a,0xfa] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a fa vfwmsac.vf v8, fa0, v4, v0.t # CHECK-INST: vfwmsac.vf v8, fa0, v4, v0.t # CHECK-ENCODING: [0x57,0x54,0x45,0xf8] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 f8 vfwmsac.vf v8, fa0, v4 # CHECK-INST: vfwmsac.vf v8, fa0, v4 # CHECK-ENCODING: [0x57,0x54,0x45,0xfa] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 fa vfwnmsac.vv v8, v20, v4, v0.t # CHECK-INST: vfwnmsac.vv v8, v20, v4, v0.t # CHECK-ENCODING: [0x57,0x14,0x4a,0xfc] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a fc vfwnmsac.vv v8, v20, v4 # CHECK-INST: vfwnmsac.vv v8, v20, v4 # CHECK-ENCODING: [0x57,0x14,0x4a,0xfe] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a fe vfwnmsac.vf v8, fa0, v4, v0.t # CHECK-INST: vfwnmsac.vf v8, fa0, v4, v0.t # CHECK-ENCODING: [0x57,0x54,0x45,0xfc] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 fc vfwnmsac.vf v8, fa0, v4 # CHECK-INST: vfwnmsac.vf v8, fa0, v4 # CHECK-ENCODING: [0x57,0x54,0x45,0xfe] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 fe diff --git a/llvm/test/MC/RISCV/rvv/fminmax.s b/llvm/test/MC/RISCV/rvv/fminmax.s index 8ce3db16b3eaf4..2f84c4fcb054c5 100644 --- a/llvm/test/MC/RISCV/rvv/fminmax.s +++ b/llvm/test/MC/RISCV/rvv/fminmax.s @@ -1,57 +1,60 @@ # RUN: llvm-mc -triple=riscv64 -show-encoding --mattr=+experimental-v %s \ +# RUN: --mattr=+f \ # RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST # RUN: not llvm-mc -triple=riscv64 -show-encoding %s 2>&1 \ # RUN: | FileCheck %s --check-prefix=CHECK-ERROR # RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+experimental-v %s \ -# RUN: | llvm-objdump -d --mattr=+experimental-v - \ +# RUN: --mattr=+f \ +# RUN: | llvm-objdump -d --mattr=+experimental-v --mattr=+f - \ # RUN: | FileCheck %s --check-prefix=CHECK-INST # RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+experimental-v %s \ +# RUN: --mattr=+f \ # RUN: | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-UNKNOWN vfmin.vv v8, v4, v20, v0.t # CHECK-INST: vfmin.vv v8, v4, v20, v0.t # CHECK-ENCODING: [0x57,0x14,0x4a,0x10] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a 10 vfmin.vv v8, v4, v20 # CHECK-INST: vfmin.vv v8, v4, v20 # CHECK-ENCODING: [0x57,0x14,0x4a,0x12] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a 12 vfmin.vf v8, v4, fa0, v0.t # CHECK-INST: vfmin.vf v8, v4, fa0, v0.t # CHECK-ENCODING: [0x57,0x54,0x45,0x10] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 10 vfmin.vf v8, v4, fa0 # CHECK-INST: vfmin.vf v8, v4, fa0 # CHECK-ENCODING: [0x57,0x54,0x45,0x12] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 12 vfmax.vv v8, v4, v20, v0.t # CHECK-INST: vfmax.vv v8, v4, v20, v0.t # CHECK-ENCODING: [0x57,0x14,0x4a,0x18] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a 18 vfmax.vv v8, v4, v20 # CHECK-INST: vfmax.vv v8, v4, v20 # CHECK-ENCODING: [0x57,0x14,0x4a,0x1a] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a 1a vfmax.vf v8, v4, fa0, v0.t # CHECK-INST: vfmax.vf v8, v4, fa0, v0.t # CHECK-ENCODING: [0x57,0x54,0x45,0x18] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 18 vfmax.vf v8, v4, fa0 # CHECK-INST: vfmax.vf v8, v4, fa0 # CHECK-ENCODING: [0x57,0x54,0x45,0x1a] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 1a diff --git a/llvm/test/MC/RISCV/rvv/fmul.s b/llvm/test/MC/RISCV/rvv/fmul.s index 82f3bd3a4bf661..dbdf5c300d632a 100644 --- a/llvm/test/MC/RISCV/rvv/fmul.s +++ b/llvm/test/MC/RISCV/rvv/fmul.s @@ -1,57 +1,60 @@ # RUN: llvm-mc -triple=riscv64 -show-encoding --mattr=+experimental-v %s \ +# RUN: --mattr=+f \ # RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST # RUN: not llvm-mc -triple=riscv64 -show-encoding %s 2>&1 \ # RUN: | FileCheck %s --check-prefix=CHECK-ERROR # RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+experimental-v %s \ -# RUN: | llvm-objdump -d --mattr=+experimental-v - \ +# RUN: --mattr=+f \ +# RUN: | llvm-objdump -d --mattr=+experimental-v --mattr=+f - \ # RUN: | FileCheck %s --check-prefix=CHECK-INST # RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+experimental-v %s \ +# RUN: --mattr=+f \ # RUN: | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-UNKNOWN vfmul.vv v8, v4, v20, v0.t # CHECK-INST: vfmul.vv v8, v4, v20, v0.t # CHECK-ENCODING: [0x57,0x14,0x4a,0x90] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a 90 vfmul.vv v8, v4, v20 # CHECK-INST: vfmul.vv v8, v4, v20 # CHECK-ENCODING: [0x57,0x14,0x4a,0x92] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a 92 vfmul.vf v8, v4, fa0, v0.t # CHECK-INST: vfmul.vf v8, v4, fa0, v0.t # CHECK-ENCODING: [0x57,0x54,0x45,0x90] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 90 vfmul.vf v8, v4, fa0 # CHECK-INST: vfmul.vf v8, v4, fa0 # CHECK-ENCODING: [0x57,0x54,0x45,0x92] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 92 vfwmul.vv v8, v4, v20, v0.t # CHECK-INST: vfwmul.vv v8, v4, v20, v0.t # CHECK-ENCODING: [0x57,0x14,0x4a,0xe0] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a e0 vfwmul.vv v8, v4, v20 # CHECK-INST: vfwmul.vv v8, v4, v20 # CHECK-ENCODING: [0x57,0x14,0x4a,0xe2] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a e2 vfwmul.vf v8, v4, fa0, v0.t # CHECK-INST: vfwmul.vf v8, v4, fa0, v0.t # CHECK-ENCODING: [0x57,0x54,0x45,0xe0] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 e0 vfwmul.vf v8, v4, fa0 # CHECK-INST: vfwmul.vf v8, v4, fa0 # CHECK-ENCODING: [0x57,0x54,0x45,0xe2] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 e2 diff --git a/llvm/test/MC/RISCV/rvv/fmv.s b/llvm/test/MC/RISCV/rvv/fmv.s index 74e3f0268bda2c..5a855ed19bd5fd 100644 --- a/llvm/test/MC/RISCV/rvv/fmv.s +++ b/llvm/test/MC/RISCV/rvv/fmv.s @@ -1,27 +1,30 @@ # RUN: llvm-mc -triple=riscv64 -show-encoding --mattr=+experimental-v %s \ +# RUN: --mattr=+f \ # RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST # RUN: not llvm-mc -triple=riscv64 -show-encoding %s 2>&1 \ # RUN: | FileCheck %s --check-prefix=CHECK-ERROR # RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+experimental-v %s \ -# RUN: | llvm-objdump -d --mattr=+experimental-v - \ +# RUN: --mattr=+f \ +# RUN: | llvm-objdump -d --mattr=+experimental-v --mattr=+f - \ # RUN: | FileCheck %s --check-prefix=CHECK-INST # RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+experimental-v %s \ +# RUN: --mattr=+f \ # RUN: | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-UNKNOWN vfmv.v.f v8, fa0 # CHECK-INST: vfmv.v.f v8, fa0 # CHECK-ENCODING: [0x57,0x54,0x05,0x5e] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 05 5e vfmv.f.s fa0, v4 # CHECK-INST: vfmv.f.s fa0, v4 # CHECK-ENCODING: [0x57,0x15,0x40,0x42] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 15 40 42 vfmv.s.f v8, fa0 # CHECK-INST: vfmv.s.f v8, fa0 # CHECK-ENCODING: [0x57,0x54,0x05,0x42] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 05 42 diff --git a/llvm/test/MC/RISCV/rvv/fothers.s b/llvm/test/MC/RISCV/rvv/fothers.s index b9e03842fa2dea..437d086b3f7047 100644 --- a/llvm/test/MC/RISCV/rvv/fothers.s +++ b/llvm/test/MC/RISCV/rvv/fothers.s @@ -1,63 +1,66 @@ # RUN: llvm-mc -triple=riscv64 -show-encoding --mattr=+experimental-v %s \ +# RUN: --mattr=+f \ # RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST # RUN: not llvm-mc -triple=riscv64 -show-encoding %s 2>&1 \ # RUN: | FileCheck %s --check-prefix=CHECK-ERROR # RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+experimental-v %s \ -# RUN: | llvm-objdump -d --mattr=+experimental-v - \ +# RUN: --mattr=+f \ +# RUN: | llvm-objdump -d --mattr=+experimental-v --mattr=+f - \ # RUN: | FileCheck %s --check-prefix=CHECK-INST # RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+experimental-v %s \ +# RUN: --mattr=+f \ # RUN: | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-UNKNOWN vfsqrt.v v8, v4, v0.t # CHECK-INST: vfsqrt.v v8, v4, v0.t # CHECK-ENCODING: [0x57,0x14,0x40,0x4c] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 40 4c vfsqrt.v v8, v4 # CHECK-INST: vfsqrt.v v8, v4 # CHECK-ENCODING: [0x57,0x14,0x40,0x4e] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 40 4e vfclass.v v8, v4, v0.t # CHECK-INST: vfclass.v v8, v4, v0.t # CHECK-ENCODING: [0x57,0x14,0x48,0x4c] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 48 4c vfclass.v v8, v4 # CHECK-INST: vfclass.v v8, v4 # CHECK-ENCODING: [0x57,0x14,0x48,0x4e] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 48 4e vfmerge.vfm v8, v4, fa0, v0 # CHECK-INST: vfmerge.vfm v8, v4, fa0, v0 # CHECK-ENCODING: [0x57,0x54,0x45,0x5c] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 5c vfslide1up.vf v8, v4, fa0, v0.t # CHECK-INST: vfslide1up.vf v8, v4, fa0, v0.t # CHECK-ENCODING: [0x57,0x54,0x45,0x38] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 38 vfslide1up.vf v8, v4, fa0 # CHECK-INST: vfslide1up.vf v8, v4, fa0 # CHECK-ENCODING: [0x57,0x54,0x45,0x3a] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 3a vfslide1down.vf v8, v4, fa0, v0.t # CHECK-INST: vfslide1down.vf v8, v4, fa0, v0.t # CHECK-ENCODING: [0x57,0x54,0x45,0x3c] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 3c vfslide1down.vf v8, v4, fa0 # CHECK-INST: vfslide1down.vf v8, v4, fa0 # CHECK-ENCODING: [0x57,0x54,0x45,0x3e] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 3e diff --git a/llvm/test/MC/RISCV/rvv/freduction.s b/llvm/test/MC/RISCV/rvv/freduction.s index a85e676f9a8cae..4ecc9095f62391 100644 --- a/llvm/test/MC/RISCV/rvv/freduction.s +++ b/llvm/test/MC/RISCV/rvv/freduction.s @@ -1,87 +1,90 @@ # RUN: llvm-mc -triple=riscv64 -show-encoding --mattr=+experimental-v %s \ +# RUN: --mattr=+f \ # RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST # RUN: not llvm-mc -triple=riscv64 -show-encoding %s 2>&1 \ # RUN: | FileCheck %s --check-prefix=CHECK-ERROR # RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+experimental-v %s \ -# RUN: | llvm-objdump -d --mattr=+experimental-v - \ +# RUN: --mattr=+f \ +# RUN: | llvm-objdump -d --mattr=+experimental-v --mattr=+f - \ # RUN: | FileCheck %s --check-prefix=CHECK-INST # RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+experimental-v %s \ +# RUN: --mattr=+f \ # RUN: | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-UNKNOWN vfredosum.vs v8, v4, v20, v0.t # CHECK-INST: vfredosum.vs v8, v4, v20, v0.t # CHECK-ENCODING: [0x57,0x14,0x4a,0x0c] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a 0c vfredosum.vs v8, v4, v20 # CHECK-INST: vfredosum.vs v8, v4, v20 # CHECK-ENCODING: [0x57,0x14,0x4a,0x0e] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a 0e vfredsum.vs v8, v4, v20, v0.t # CHECK-INST: vfredsum.vs v8, v4, v20, v0.t # CHECK-ENCODING: [0x57,0x14,0x4a,0x04] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a 04 vfredsum.vs v8, v4, v20 # CHECK-INST: vfredsum.vs v8, v4, v20 # CHECK-ENCODING: [0x57,0x14,0x4a,0x06] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a 06 vfredmax.vs v8, v4, v20, v0.t # CHECK-INST: vfredmax.vs v8, v4, v20, v0.t # CHECK-ENCODING: [0x57,0x14,0x4a,0x1c] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a 1c vfredmax.vs v8, v4, v20 # CHECK-INST: vfredmax.vs v8, v4, v20 # CHECK-ENCODING: [0x57,0x14,0x4a,0x1e] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a 1e vfredmin.vs v8, v4, v20, v0.t # CHECK-INST: vfredmin.vs v8, v4, v20, v0.t # CHECK-ENCODING: [0x57,0x14,0x4a,0x14] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a 14 vfredmin.vs v8, v4, v20 # CHECK-INST: vfredmin.vs v8, v4, v20 # CHECK-ENCODING: [0x57,0x14,0x4a,0x16] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a 16 vfwredosum.vs v8, v4, v20, v0.t # CHECK-INST: vfwredosum.vs v8, v4, v20, v0.t # CHECK-ENCODING: [0x57,0x14,0x4a,0xcc] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a cc vfwredosum.vs v8, v4, v20 # CHECK-INST: vfwredosum.vs v8, v4, v20 # CHECK-ENCODING: [0x57,0x14,0x4a,0xce] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a ce vfwredsum.vs v8, v4, v20, v0.t # CHECK-INST: vfwredsum.vs v8, v4, v20, v0.t # CHECK-ENCODING: [0x57,0x14,0x4a,0xc4] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a c4 vfwredsum.vs v8, v4, v20 # CHECK-INST: vfwredsum.vs v8, v4, v20 # CHECK-ENCODING: [0x57,0x14,0x4a,0xc6] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a c6 vfredosum.vs v0, v4, v20, v0.t # CHECK-INST: vfredosum.vs v0, v4, v20, v0.t # CHECK-ENCODING: [0x57,0x10,0x4a,0x0c] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 10 4a 0c diff --git a/llvm/test/MC/RISCV/rvv/fsub.s b/llvm/test/MC/RISCV/rvv/fsub.s index 753da9a202b15f..951e485d903b08 100644 --- a/llvm/test/MC/RISCV/rvv/fsub.s +++ b/llvm/test/MC/RISCV/rvv/fsub.s @@ -1,93 +1,96 @@ # RUN: llvm-mc -triple=riscv64 -show-encoding --mattr=+experimental-v %s \ +# RUN: --mattr=+f \ # RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST # RUN: not llvm-mc -triple=riscv64 -show-encoding %s 2>&1 \ # RUN: | FileCheck %s --check-prefix=CHECK-ERROR # RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+experimental-v %s \ -# RUN: | llvm-objdump -d --mattr=+experimental-v - \ +# RUN: --mattr=+f \ +# RUN: | llvm-objdump -d --mattr=+experimental-v --mattr=+f - \ # RUN: | FileCheck %s --check-prefix=CHECK-INST # RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+experimental-v %s \ +# RUN: --mattr=+f \ # RUN: | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-UNKNOWN vfsub.vv v8, v4, v20, v0.t # CHECK-INST: vfsub.vv v8, v4, v20, v0.t # CHECK-ENCODING: [0x57,0x14,0x4a,0x08] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a 08 vfsub.vv v8, v4, v20 # CHECK-INST: vfsub.vv v8, v4, v20 # CHECK-ENCODING: [0x57,0x14,0x4a,0x0a] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a 0a vfsub.vf v8, v4, fa0, v0.t # CHECK-INST: vfsub.vf v8, v4, fa0, v0.t # CHECK-ENCODING: [0x57,0x54,0x45,0x08] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 08 vfsub.vf v8, v4, fa0 # CHECK-INST: vfsub.vf v8, v4, fa0 # CHECK-ENCODING: [0x57,0x54,0x45,0x0a] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 0a vfrsub.vf v8, v4, fa0, v0.t # CHECK-INST: vfrsub.vf v8, v4, fa0, v0.t # CHECK-ENCODING: [0x57,0x54,0x45,0x9c] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 9c vfrsub.vf v8, v4, fa0 # CHECK-INST: vfrsub.vf v8, v4, fa0 # CHECK-ENCODING: [0x57,0x54,0x45,0x9e] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 9e vfwsub.vv v8, v4, v20, v0.t # CHECK-INST: vfwsub.vv v8, v4, v20, v0.t # CHECK-ENCODING: [0x57,0x14,0x4a,0xc8] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a c8 vfwsub.vv v8, v4, v20 # CHECK-INST: vfwsub.vv v8, v4, v20 # CHECK-ENCODING: [0x57,0x14,0x4a,0xca] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a ca vfwsub.vf v8, v4, fa0, v0.t # CHECK-INST: vfwsub.vf v8, v4, fa0, v0.t # CHECK-ENCODING: [0x57,0x54,0x45,0xc8] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 c8 vfwsub.vf v8, v4, fa0 # CHECK-INST: vfwsub.vf v8, v4, fa0 # CHECK-ENCODING: [0x57,0x54,0x45,0xca] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 ca vfwsub.wv v8, v4, v20, v0.t # CHECK-INST: vfwsub.wv v8, v4, v20, v0.t # CHECK-ENCODING: [0x57,0x14,0x4a,0xd8] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a d8 vfwsub.wv v8, v4, v20 # CHECK-INST: vfwsub.wv v8, v4, v20 # CHECK-ENCODING: [0x57,0x14,0x4a,0xda] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a da vfwsub.wf v8, v4, fa0, v0.t # CHECK-INST: vfwsub.wf v8, v4, fa0, v0.t # CHECK-ENCODING: [0x57,0x54,0x45,0xd8] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 d8 vfwsub.wf v8, v4, fa0 # CHECK-INST: vfwsub.wf v8, v4, fa0 # CHECK-ENCODING: [0x57,0x54,0x45,0xda] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 da diff --git a/llvm/test/MC/RISCV/rvv/sign-injection.s b/llvm/test/MC/RISCV/rvv/sign-injection.s index 0831f923be3fe8..a9ec0ad703896a 100644 --- a/llvm/test/MC/RISCV/rvv/sign-injection.s +++ b/llvm/test/MC/RISCV/rvv/sign-injection.s @@ -1,81 +1,84 @@ # RUN: llvm-mc -triple=riscv64 -show-encoding --mattr=+experimental-v %s \ +# RUN: --mattr=+f \ # RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST # RUN: not llvm-mc -triple=riscv64 -show-encoding %s 2>&1 \ # RUN: | FileCheck %s --check-prefix=CHECK-ERROR # RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+experimental-v %s \ -# RUN: | llvm-objdump -d --mattr=+experimental-v - \ +# RUN: --mattr=+f \ +# RUN: | llvm-objdump -d --mattr=+experimental-v --mattr=+f - \ # RUN: | FileCheck %s --check-prefix=CHECK-INST # RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+experimental-v %s \ +# RUN: --mattr=+f \ # RUN: | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-UNKNOWN vfsgnj.vv v8, v4, v20, v0.t # CHECK-INST: vfsgnj.vv v8, v4, v20, v0.t # CHECK-ENCODING: [0x57,0x14,0x4a,0x20] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a 20 vfsgnj.vv v8, v4, v20 # CHECK-INST: vfsgnj.vv v8, v4, v20 # CHECK-ENCODING: [0x57,0x14,0x4a,0x22] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a 22 vfsgnj.vf v8, v4, fa0, v0.t # CHECK-INST: vfsgnj.vf v8, v4, fa0, v0.t # CHECK-ENCODING: [0x57,0x54,0x45,0x20] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 20 vfsgnj.vf v8, v4, fa0 # CHECK-INST: vfsgnj.vf v8, v4, fa0 # CHECK-ENCODING: [0x57,0x54,0x45,0x22] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 22 vfsgnjn.vv v8, v4, v20, v0.t # CHECK-INST: vfsgnjn.vv v8, v4, v20, v0.t # CHECK-ENCODING: [0x57,0x14,0x4a,0x24] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a 24 vfsgnjn.vv v8, v4, v20 # CHECK-INST: vfsgnjn.vv v8, v4, v20 # CHECK-ENCODING: [0x57,0x14,0x4a,0x26] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a 26 vfsgnjn.vf v8, v4, fa0, v0.t # CHECK-INST: vfsgnjn.vf v8, v4, fa0, v0.t # CHECK-ENCODING: [0x57,0x54,0x45,0x24] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 24 vfsgnjn.vf v8, v4, fa0 # CHECK-INST: vfsgnjn.vf v8, v4, fa0 # CHECK-ENCODING: [0x57,0x54,0x45,0x26] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 26 vfsgnjx.vv v8, v4, v20, v0.t # CHECK-INST: vfsgnjx.vv v8, v4, v20, v0.t # CHECK-ENCODING: [0x57,0x14,0x4a,0x28] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a 28 vfsgnjx.vv v8, v4, v20 # CHECK-INST: vfsgnjx.vv v8, v4, v20 # CHECK-ENCODING: [0x57,0x14,0x4a,0x2a] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 14 4a 2a vfsgnjx.vf v8, v4, fa0, v0.t # CHECK-INST: vfsgnjx.vf v8, v4, fa0, v0.t # CHECK-ENCODING: [0x57,0x54,0x45,0x28] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 28 vfsgnjx.vf v8, v4, fa0 # CHECK-INST: vfsgnjx.vf v8, v4, fa0 # CHECK-ENCODING: [0x57,0x54,0x45,0x2a] -# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) +# CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V' # CHECK-UNKNOWN: 57 54 45 2a diff --git a/llvm/utils/gn/secondary/llvm/lib/CodeGen/BUILD.gn b/llvm/utils/gn/secondary/llvm/lib/CodeGen/BUILD.gn index 961cb0b0bc468e..a1e117bb716ebf 100644 --- a/llvm/utils/gn/secondary/llvm/lib/CodeGen/BUILD.gn +++ b/llvm/utils/gn/secondary/llvm/lib/CodeGen/BUILD.gn @@ -99,7 +99,6 @@ static_library("CodeGen") { "MachineBlockPlacement.cpp", "MachineBranchProbabilityInfo.cpp", "MachineCSE.cpp", - "MachineCheckDebugify.cpp", "MachineCombiner.cpp", "MachineCopyPropagation.cpp", "MachineDebugify.cpp", diff --git a/mlir/include/mlir/Dialect/ArmSVE/ArmSVEDialect.h b/mlir/include/mlir/Dialect/ArmSVE/ArmSVEDialect.h index 15ca3403bef06a..7689eb4b137884 100644 --- a/mlir/include/mlir/Dialect/ArmSVE/ArmSVEDialect.h +++ b/mlir/include/mlir/Dialect/ArmSVE/ArmSVEDialect.h @@ -13,9 +13,9 @@ #ifndef MLIR_DIALECT_ARMSVE_ARMSVEDIALECT_H #define MLIR_DIALECT_ARMSVE_ARMSVEDIALECT_H +#include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/Dialect.h" #include "mlir/IR/OpDefinition.h" -#include "mlir/IR/StandardTypes.h" #include "mlir/Interfaces/SideEffectInterfaces.h" #include "mlir/Dialect/ArmSVE/ArmSVEDialect.h.inc" diff --git a/mlir/include/mlir/IR/StandardTypes.h b/mlir/include/mlir/IR/StandardTypes.h deleted file mode 100644 index bf186ef46019a1..00000000000000 --- a/mlir/include/mlir/IR/StandardTypes.h +++ /dev/null @@ -1,16 +0,0 @@ -//===- StandardTypes.h - MLIR Standard Type Classes -------------*- C++ -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef MLIR_IR_STANDARDTYPES_H -#define MLIR_IR_STANDARDTYPES_H - -/// TODO: Remove this file when all references have been replaced by -/// BuiltinTypes.h. -#include "mlir/IR/BuiltinTypes.h" - -#endif // MLIR_IR_STANDARDTYPES_H diff --git a/mlir/lib/Dialect/ArmSVE/IR/ArmSVEDialect.cpp b/mlir/lib/Dialect/ArmSVE/IR/ArmSVEDialect.cpp index 2c76a64fac5cdd..c10458a442b239 100644 --- a/mlir/lib/Dialect/ArmSVE/IR/ArmSVEDialect.cpp +++ b/mlir/lib/Dialect/ArmSVE/IR/ArmSVEDialect.cpp @@ -15,7 +15,6 @@ #include "mlir/IR/Builders.h" #include "mlir/IR/DialectImplementation.h" #include "mlir/IR/OpImplementation.h" -#include "mlir/IR/StandardTypes.h" #include "mlir/IR/TypeUtilities.h" #include "llvm/ADT/TypeSwitch.h"