From 37e75cdf9f432940cfbdcab3a3d8d93eba15bca4 Mon Sep 17 00:00:00 2001 From: Alexis Engelke Date: Tue, 6 Aug 2024 10:22:31 +0200 Subject: [PATCH 01/10] [CodeGen] Use BasicBlock numbers to map to MBBs (#101883) Now that basic blocks have numbers, we can replace the BB-to-MBB maps and the visited set during ISel with vectors for faster lookup. --- llvm/include/llvm/CodeGen/FunctionLoweringInfo.h | 13 +++++++------ llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h | 5 ----- llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp | 5 +++-- .../CodeGen/SelectionDAG/FunctionLoweringInfo.cpp | 3 ++- llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp | 5 +++-- 5 files changed, 15 insertions(+), 16 deletions(-) diff --git a/llvm/include/llvm/CodeGen/FunctionLoweringInfo.h b/llvm/include/llvm/CodeGen/FunctionLoweringInfo.h index 0e08b9bef11aad..c17cacbdc8759d 100644 --- a/llvm/include/llvm/CodeGen/FunctionLoweringInfo.h +++ b/llvm/include/llvm/CodeGen/FunctionLoweringInfo.h @@ -73,8 +73,8 @@ class FunctionLoweringInfo { /// allocated to hold a pointer to the hidden sret parameter. Register DemoteRegister; - /// MBBMap - A mapping from LLVM basic blocks to their machine code entry. - DenseMap MBBMap; + /// A mapping from LLVM basic block number to their machine block. + SmallVector MBBMap; /// ValueMap - Since we emit code for the function a basic block at a time, /// we must remember which virtual registers hold the values for @@ -172,9 +172,9 @@ class FunctionLoweringInfo { /// for a value. DenseMap PreferredExtendType; - /// VisitedBBs - The set of basic blocks visited thus far by instruction - /// selection. - SmallPtrSet VisitedBBs; + /// The set of basic blocks visited thus far by instruction selection. Indexed + /// by basic block number. + SmallVector VisitedBBs; /// PHINodesToUpdate - A list of phi instructions whose operand list will /// be updated after processing the current basic block. @@ -213,7 +213,8 @@ class FunctionLoweringInfo { } MachineBasicBlock *getMBB(const BasicBlock *BB) const { - return MBBMap.lookup(BB); + assert(BB->getNumber() < MBBMap.size() && "uninitialized MBBMap?"); + return MBBMap[BB->getNumber()]; } Register CreateReg(MVT VT, bool isDivergent = false); diff --git a/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h b/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h index deae2c55d26e27..2796ea4a866173 100644 --- a/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h +++ b/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h @@ -146,11 +146,6 @@ class IRTranslator : public MachineFunctionPass { /// virtual registers and offsets. ValueToVRegInfo VMap; - // N.b. it's not completely obvious that this will be sufficient for every - // LLVM IR construct (with "invoke" being the obvious candidate to mess up our - // lives. - DenseMap BBToMBB; - // One BasicBlock can be translated to multiple MachineBasicBlocks. For such // BasicBlocks translated to multiple MachineBasicBlocks, MachinePreds retains // a mapping between the edges arriving at the BasicBlock to the corresponding diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp index 68a8a273a1b479..0169a0e466d878 100644 --- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp +++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp @@ -287,7 +287,7 @@ Align IRTranslator::getMemOpAlign(const Instruction &I) { } MachineBasicBlock &IRTranslator::getMBB(const BasicBlock &BB) { - MachineBasicBlock *&MBB = BBToMBB[&BB]; + MachineBasicBlock *MBB = FuncInfo.getMBB(&BB); assert(MBB && "BasicBlock was not encountered before"); return *MBB; } @@ -3907,8 +3907,9 @@ bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) { bool HasMustTailInVarArgFn = false; // Create all blocks, in IR order, to preserve the layout. + FuncInfo.MBBMap.resize(F.getMaxBlockNumber()); for (const BasicBlock &BB: F) { - auto *&MBB = BBToMBB[&BB]; + auto *&MBB = FuncInfo.MBBMap[BB.getNumber()]; MBB = MF->CreateMachineBasicBlock(&BB); MF->push_back(MBB); diff --git a/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp b/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp index 9ca76aa09a2fa6..8405ba9ac326cf 100644 --- a/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp @@ -236,6 +236,7 @@ void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf, // Create an initial MachineBasicBlock for each LLVM BasicBlock in F. This // also creates the initial PHI MachineInstrs, though none of the input // operands are populated. + MBBMap.resize(Fn->getMaxBlockNumber()); for (const BasicBlock &BB : *Fn) { // Don't create MachineBasicBlocks for imaginary EH pad blocks. These blocks // are really data, and no instructions can live here. @@ -261,7 +262,7 @@ void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf, } MachineBasicBlock *MBB = mf.CreateMachineBasicBlock(&BB); - MBBMap[&BB] = MBB; + MBBMap[BB.getNumber()] = MBB; MF->push_back(MBB); // Transfer the address-taken flag. This is necessary because there could diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp index 95b6d27d31eea6..3e517a51862de3 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp @@ -1643,11 +1643,12 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) { } // Iterate over all basic blocks in the function. + FuncInfo->VisitedBBs.assign(Fn.getMaxBlockNumber(), false); for (const BasicBlock *LLVMBB : RPOT) { if (OptLevel != CodeGenOptLevel::None) { bool AllPredsVisited = true; for (const BasicBlock *Pred : predecessors(LLVMBB)) { - if (!FuncInfo->VisitedBBs.count(Pred)) { + if (!FuncInfo->VisitedBBs[Pred->getNumber()]) { AllPredsVisited = false; break; } @@ -1661,7 +1662,7 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) { FuncInfo->InvalidatePHILiveOutRegInfo(&PN); } - FuncInfo->VisitedBBs.insert(LLVMBB); + FuncInfo->VisitedBBs[LLVMBB->getNumber()] = true; } BasicBlock::const_iterator const Begin = From 4c23c1b93d8a1e9f7c0eb848d2fe4680650ad999 Mon Sep 17 00:00:00 2001 From: Alexis Engelke Date: Tue, 6 Aug 2024 10:25:03 +0200 Subject: [PATCH 02/10] [CodeGen] Use SmallVector for MBB preds/succs (#101948) Avoid extra heap allocations for typical predecessor/successor counts. --- llvm/include/llvm/Analysis/RegionInfoImpl.h | 3 ++- llvm/include/llvm/CodeGen/MachineBasicBlock.h | 22 ++++++++++--------- llvm/lib/CodeGen/MIRSampleProfile.cpp | 6 +++-- .../Target/Hexagon/HexagonCopyHoisting.cpp | 3 ++- 4 files changed, 20 insertions(+), 14 deletions(-) diff --git a/llvm/include/llvm/Analysis/RegionInfoImpl.h b/llvm/include/llvm/Analysis/RegionInfoImpl.h index c5e8821858fd2f..ebfb060ded3dd4 100644 --- a/llvm/include/llvm/Analysis/RegionInfoImpl.h +++ b/llvm/include/llvm/Analysis/RegionInfoImpl.h @@ -814,7 +814,8 @@ RegionInfoBase::getMaxRegionExit(BlockT *BB) const { // Get the single exit of BB. if (R && R->getEntry() == BB) Exit = R->getExit(); - else if (++BlockTraits::child_begin(BB) == BlockTraits::child_end(BB)) + else if (std::next(BlockTraits::child_begin(BB)) == + BlockTraits::child_end(BB)) Exit = *BlockTraits::child_begin(BB); else // No single exit exists. return Exit; diff --git a/llvm/include/llvm/CodeGen/MachineBasicBlock.h b/llvm/include/llvm/CodeGen/MachineBasicBlock.h index b8153fd5d3fb72..5b80827b780b55 100644 --- a/llvm/include/llvm/CodeGen/MachineBasicBlock.h +++ b/llvm/include/llvm/CodeGen/MachineBasicBlock.h @@ -157,8 +157,8 @@ class MachineBasicBlock Instructions Insts; /// Keep track of the predecessor / successor basic blocks. - std::vector Predecessors; - std::vector Successors; + SmallVector Predecessors; + SmallVector Successors; /// Keep track of the probabilities to the successors. This vector has the /// same order as Successors, or it is empty if we don't use it (disable @@ -387,18 +387,20 @@ class MachineBasicBlock } // Machine-CFG iterators - using pred_iterator = std::vector::iterator; - using const_pred_iterator = std::vector::const_iterator; - using succ_iterator = std::vector::iterator; - using const_succ_iterator = std::vector::const_iterator; + using pred_iterator = SmallVectorImpl::iterator; + using const_pred_iterator = + SmallVectorImpl::const_iterator; + using succ_iterator = SmallVectorImpl::iterator; + using const_succ_iterator = + SmallVectorImpl::const_iterator; using pred_reverse_iterator = - std::vector::reverse_iterator; + SmallVectorImpl::reverse_iterator; using const_pred_reverse_iterator = - std::vector::const_reverse_iterator; + SmallVectorImpl::const_reverse_iterator; using succ_reverse_iterator = - std::vector::reverse_iterator; + SmallVectorImpl::reverse_iterator; using const_succ_reverse_iterator = - std::vector::const_reverse_iterator; + SmallVectorImpl::const_reverse_iterator; pred_iterator pred_begin() { return Predecessors.begin(); } const_pred_iterator pred_begin() const { return Predecessors.begin(); } pred_iterator pred_end() { return Predecessors.end(); } diff --git a/llvm/lib/CodeGen/MIRSampleProfile.cpp b/llvm/lib/CodeGen/MIRSampleProfile.cpp index ce82f280c1c53e..90a15210a03b82 100644 --- a/llvm/lib/CodeGen/MIRSampleProfile.cpp +++ b/llvm/lib/CodeGen/MIRSampleProfile.cpp @@ -126,8 +126,10 @@ template <> struct IRTraits { using PostDominatorTreeT = MachinePostDominatorTree; using OptRemarkEmitterT = MachineOptimizationRemarkEmitter; using OptRemarkAnalysisT = MachineOptimizationRemarkAnalysis; - using PredRangeT = iterator_range::iterator>; - using SuccRangeT = iterator_range::iterator>; + using PredRangeT = + iterator_range::iterator>; + using SuccRangeT = + iterator_range::iterator>; static Function &getFunction(MachineFunction &F) { return F.getFunction(); } static const MachineBasicBlock *getEntryBB(const MachineFunction *F) { return GraphTraits::getEntryNode(F); diff --git a/llvm/lib/Target/Hexagon/HexagonCopyHoisting.cpp b/llvm/lib/Target/Hexagon/HexagonCopyHoisting.cpp index e9d95c6e89db43..a2230289ae69c7 100644 --- a/llvm/lib/Target/Hexagon/HexagonCopyHoisting.cpp +++ b/llvm/lib/Target/Hexagon/HexagonCopyHoisting.cpp @@ -249,7 +249,8 @@ void HexagonCopyHoisting::moveCopyInstr(MachineBasicBlock *DestBB, DestBB->splice(FirstTI, MI->getParent(), MI); addMItoCopyList(MI); - for (auto I = ++(DestBB->succ_begin()), E = DestBB->succ_end(); I != E; ++I) { + for (auto I = std::next(DestBB->succ_begin()), E = DestBB->succ_end(); I != E; + ++I) { MachineBasicBlock *SuccBB = *I; auto &BBCopyInst = CopyMIList[SuccBB->getNumber()]; MachineInstr *SuccMI = BBCopyInst[Key]; From f57a3a0d9d4817d2ca7c3152dda331a796bebe13 Mon Sep 17 00:00:00 2001 From: Benjamin Maxwell Date: Tue, 6 Aug 2024 09:29:09 +0100 Subject: [PATCH 03/10] [mlir][docs] Fix return type in Type/Attr printer docs (#101958) These return `void`, not `Type` or `Attribute` respectively. --- mlir/docs/DefiningDialects/AttributesAndTypes.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mlir/docs/DefiningDialects/AttributesAndTypes.md b/mlir/docs/DefiningDialects/AttributesAndTypes.md index d6941c0b681f8a..1430edd2ffb025 100644 --- a/mlir/docs/DefiningDialects/AttributesAndTypes.md +++ b/mlir/docs/DefiningDialects/AttributesAndTypes.md @@ -551,13 +551,13 @@ For Types, these methods will have the form: - `static Type MyType::parse(AsmParser &parser)` -- `Type MyType::print(AsmPrinter &p) const` +- `void MyType::print(AsmPrinter &p) const` For Attributes, these methods will have the form: - `static Attribute MyAttr::parse(AsmParser &parser, Type attrType)` -- `Attribute MyAttr::print(AsmPrinter &p) const` +- `void MyAttr::print(AsmPrinter &p) const` #### Using `assemblyFormat` From 1b8593545316971ac3f922dcb7178623b5820003 Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Tue, 6 Aug 2024 09:37:57 +0100 Subject: [PATCH 04/10] Fix MSVC "not all control paths return a value" warning. NFC. --- llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 534a4e60bb9f0b..5c6a2454d664ba 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -13157,6 +13157,7 @@ std::optional SelectionDAG::isBoolConstant(SDValue N, case TargetLowering::UndefinedBooleanContent: return CVal[0]; } + llvm_unreachable("Unknown BooleanContent enum"); } void SelectionDAG::createOperands(SDNode *Node, ArrayRef Vals) { From bb59f04e7e75dcbe39f1bf952304a157f0035314 Mon Sep 17 00:00:00 2001 From: Sam James Date: Tue, 6 Aug 2024 09:58:36 +0100 Subject: [PATCH 05/10] [LLDB] Add `` to AddressableBits (#102110) --- lldb/include/lldb/Utility/AddressableBits.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lldb/include/lldb/Utility/AddressableBits.h b/lldb/include/lldb/Utility/AddressableBits.h index 0d27c3561ec272..8c7a1ec5f52c0a 100644 --- a/lldb/include/lldb/Utility/AddressableBits.h +++ b/lldb/include/lldb/Utility/AddressableBits.h @@ -12,6 +12,8 @@ #include "lldb/lldb-forward.h" #include "lldb/lldb-public.h" +#include + namespace lldb_private { /// \class AddressableBits AddressableBits.h "lldb/Core/AddressableBits.h" From b1234ddbe2652aa7948242a57107ca7ab12fd2f8 Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Tue, 6 Aug 2024 10:18:06 +0100 Subject: [PATCH 06/10] [DAG] Add legalization handling for ABDS/ABDU (#92576) Always match ABD patterns pre-legalization, and use TargetLowering::expandABD to expand again during legalization. abdu(lhs, rhs) -> sub(xor(sub(lhs, rhs), usub_overflow(lhs, rhs)), usub_overflow(lhs, rhs)) Alive2: https://alive2.llvm.org/ce/z/dVdMyv --- llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 12 +- .../SelectionDAG/LegalizeIntegerTypes.cpp | 11 + llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h | 1 + .../SelectionDAG/LegalizeVectorTypes.cpp | 6 + .../lib/CodeGen/SelectionDAG/SelectionDAG.cpp | 2 + .../CodeGen/SelectionDAG/TargetLowering.cpp | 32 + llvm/test/CodeGen/AArch64/abds-neg.ll | 156 +- llvm/test/CodeGen/AArch64/abds.ll | 200 +-- llvm/test/CodeGen/AArch64/abdu-neg.ll | 111 +- llvm/test/CodeGen/AArch64/abdu.ll | 165 +- llvm/test/CodeGen/AArch64/arm64-vabs.ll | 30 +- llvm/test/CodeGen/AArch64/neon-abd.ll | 68 +- llvm/test/CodeGen/AArch64/sve-aba.ll | 26 +- llvm/test/CodeGen/AArch64/sve-abd.ll | 26 +- llvm/test/CodeGen/AMDGPU/sad.ll | 6 +- llvm/test/CodeGen/ARM/neon_vabd.ll | 163 +- llvm/test/CodeGen/PowerPC/ppc64-P9-vabsd.ll | 426 ++--- llvm/test/CodeGen/PowerPC/vec-zext-abdu.ll | 24 +- llvm/test/CodeGen/RISCV/abds-neg.ll | 1123 ++++++------- llvm/test/CodeGen/RISCV/abds.ll | 1454 +++++++---------- llvm/test/CodeGen/RISCV/abdu-neg.ll | 1021 ++++++------ llvm/test/CodeGen/RISCV/abdu.ll | 1446 ++++++++-------- llvm/test/CodeGen/RISCV/rvv/abd.ll | 20 +- llvm/test/CodeGen/Thumb2/mve-vabdus.ll | 3 +- llvm/test/CodeGen/X86/abds-neg.ll | 208 +-- llvm/test/CodeGen/X86/abds.ll | 292 ++-- llvm/test/CodeGen/X86/abdu-neg.ll | 92 +- llvm/test/CodeGen/X86/abdu.ll | 152 +- 28 files changed, 3198 insertions(+), 4078 deletions(-) diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index da2ad1328ebf02..9b61d83612fde6 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -4089,13 +4089,13 @@ SDValue DAGCombiner::visitSUB(SDNode *N) { } // smax(a,b) - smin(a,b) --> abds(a,b) - if (hasOperation(ISD::ABDS, VT) && + if ((!LegalOperations || hasOperation(ISD::ABDS, VT)) && sd_match(N0, m_SMax(m_Value(A), m_Value(B))) && sd_match(N1, m_SMin(m_Specific(A), m_Specific(B)))) return DAG.getNode(ISD::ABDS, DL, VT, A, B); // umax(a,b) - umin(a,b) --> abdu(a,b) - if (hasOperation(ISD::ABDU, VT) && + if ((!LegalOperations || hasOperation(ISD::ABDU, VT)) && sd_match(N0, m_UMax(m_Value(A), m_Value(B))) && sd_match(N1, m_UMin(m_Specific(A), m_Specific(B)))) return DAG.getNode(ISD::ABDU, DL, VT, A, B); @@ -10922,6 +10922,7 @@ SDValue DAGCombiner::foldABSToABD(SDNode *N, const SDLoc &DL) { (Opc0 != ISD::ZERO_EXTEND && Opc0 != ISD::SIGN_EXTEND && Opc0 != ISD::SIGN_EXTEND_INREG)) { // fold (abs (sub nsw x, y)) -> abds(x, y) + // Don't fold this for unsupported types as we lose the NSW handling. if (AbsOp1->getFlags().hasNoSignedWrap() && hasOperation(ISD::ABDS, VT) && TLI.preferABDSToABSWithNSW(VT)) { SDValue ABD = DAG.getNode(ISD::ABDS, DL, VT, Op0, Op1); @@ -10944,7 +10945,8 @@ SDValue DAGCombiner::foldABSToABD(SDNode *N, const SDLoc &DL) { // fold abs(zext(x) - zext(y)) -> zext(abdu(x, y)) EVT MaxVT = VT0.bitsGT(VT1) ? VT0 : VT1; if ((VT0 == MaxVT || Op0->hasOneUse()) && - (VT1 == MaxVT || Op1->hasOneUse()) && hasOperation(ABDOpcode, MaxVT)) { + (VT1 == MaxVT || Op1->hasOneUse()) && + (!LegalOperations || hasOperation(ABDOpcode, MaxVT))) { SDValue ABD = DAG.getNode(ABDOpcode, DL, MaxVT, DAG.getNode(ISD::TRUNCATE, DL, MaxVT, Op0), DAG.getNode(ISD::TRUNCATE, DL, MaxVT, Op1)); @@ -10954,7 +10956,7 @@ SDValue DAGCombiner::foldABSToABD(SDNode *N, const SDLoc &DL) { // fold abs(sext(x) - sext(y)) -> abds(sext(x), sext(y)) // fold abs(zext(x) - zext(y)) -> abdu(zext(x), zext(y)) - if (hasOperation(ABDOpcode, VT)) { + if (!LegalOperations || hasOperation(ABDOpcode, VT)) { SDValue ABD = DAG.getNode(ABDOpcode, DL, VT, Op0, Op1); return DAG.getZExtOrTrunc(ABD, DL, SrcVT); } @@ -12376,7 +12378,7 @@ SDValue DAGCombiner::visitVSELECT(SDNode *N) { N1.getOperand(1) == N2.getOperand(0)) { bool IsSigned = isSignedIntSetCC(CC); unsigned ABDOpc = IsSigned ? ISD::ABDS : ISD::ABDU; - if (hasOperation(ABDOpc, VT)) { + if (!LegalOperations || hasOperation(ABDOpc, VT)) { switch (CC) { case ISD::SETGT: case ISD::SETGE: diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp index a342d04ce4c47e..fefb0844f1ab53 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp @@ -192,8 +192,10 @@ void DAGTypeLegalizer::PromoteIntegerResult(SDNode *N, unsigned ResNo) { case ISD::VP_SUB: case ISD::VP_MUL: Res = PromoteIntRes_SimpleIntBinOp(N); break; + case ISD::ABDS: case ISD::AVGCEILS: case ISD::AVGFLOORS: + case ISD::VP_SMIN: case ISD::VP_SMAX: case ISD::SDIV: @@ -201,8 +203,10 @@ void DAGTypeLegalizer::PromoteIntegerResult(SDNode *N, unsigned ResNo) { case ISD::VP_SDIV: case ISD::VP_SREM: Res = PromoteIntRes_SExtIntBinOp(N); break; + case ISD::ABDU: case ISD::AVGCEILU: case ISD::AVGFLOORU: + case ISD::VP_UMIN: case ISD::VP_UMAX: case ISD::UDIV: @@ -2791,6 +2795,8 @@ void DAGTypeLegalizer::ExpandIntegerResult(SDNode *N, unsigned ResNo) { case ISD::PARITY: ExpandIntRes_PARITY(N, Lo, Hi); break; case ISD::Constant: ExpandIntRes_Constant(N, Lo, Hi); break; case ISD::ABS: ExpandIntRes_ABS(N, Lo, Hi); break; + case ISD::ABDS: + case ISD::ABDU: ExpandIntRes_ABD(N, Lo, Hi); break; case ISD::CTLZ_ZERO_UNDEF: case ISD::CTLZ: ExpandIntRes_CTLZ(N, Lo, Hi); break; case ISD::CTPOP: ExpandIntRes_CTPOP(N, Lo, Hi); break; @@ -3850,6 +3856,11 @@ void DAGTypeLegalizer::ExpandIntRes_CTLZ(SDNode *N, Hi = DAG.getConstant(0, dl, NVT); } +void DAGTypeLegalizer::ExpandIntRes_ABD(SDNode *N, SDValue &Lo, SDValue &Hi) { + SDValue Result = TLI.expandABD(N, DAG); + SplitInteger(Result, Lo, Hi); +} + void DAGTypeLegalizer::ExpandIntRes_CTPOP(SDNode *N, SDValue &Lo, SDValue &Hi) { SDLoc dl(N); diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h index d4e61c85889012..3a49a8ff10860a 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h @@ -448,6 +448,7 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer { void ExpandIntRes_AssertZext (SDNode *N, SDValue &Lo, SDValue &Hi); void ExpandIntRes_Constant (SDNode *N, SDValue &Lo, SDValue &Hi); void ExpandIntRes_ABS (SDNode *N, SDValue &Lo, SDValue &Hi); + void ExpandIntRes_ABD (SDNode *N, SDValue &Lo, SDValue &Hi); void ExpandIntRes_CTLZ (SDNode *N, SDValue &Lo, SDValue &Hi); void ExpandIntRes_CTPOP (SDNode *N, SDValue &Lo, SDValue &Hi); void ExpandIntRes_CTTZ (SDNode *N, SDValue &Lo, SDValue &Hi); diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp index 5672b611234b87..cac8027f8760fa 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp @@ -147,6 +147,8 @@ void DAGTypeLegalizer::ScalarizeVectorResult(SDNode *N, unsigned ResNo) { case ISD::FMINIMUM: case ISD::FMAXIMUM: case ISD::FLDEXP: + case ISD::ABDS: + case ISD::ABDU: case ISD::SMIN: case ISD::SMAX: case ISD::UMIN: @@ -1233,6 +1235,8 @@ void DAGTypeLegalizer::SplitVectorResult(SDNode *N, unsigned ResNo) { case ISD::MUL: case ISD::VP_MUL: case ISD::MULHS: case ISD::MULHU: + case ISD::ABDS: + case ISD::ABDU: case ISD::AVGCEILS: case ISD::AVGCEILU: case ISD::AVGFLOORS: @@ -4368,6 +4372,8 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) { case ISD::MUL: case ISD::VP_MUL: case ISD::MULHS: case ISD::MULHU: + case ISD::ABDS: + case ISD::ABDU: case ISD::OR: case ISD::VP_OR: case ISD::SUB: case ISD::VP_SUB: case ISD::XOR: case ISD::VP_XOR: diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 5c6a2454d664ba..c3a7df5361cd45 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -7024,6 +7024,8 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, assert(VT.isInteger() && "This operator does not apply to FP types!"); assert(N1.getValueType() == N2.getValueType() && N1.getValueType() == VT && "Binary operator types must match!"); + if (VT.isVector() && VT.getVectorElementType() == MVT::i1) + return getNode(ISD::XOR, DL, VT, N1, N2); break; case ISD::SMIN: case ISD::UMAX: diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp index 7fa83a5999dfee..c90afddb1c8172 100644 --- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -9311,6 +9311,21 @@ SDValue TargetLowering::expandABD(SDNode *N, SelectionDAG &DAG) const { DAG.getNode(ISD::USUBSAT, dl, VT, LHS, RHS), DAG.getNode(ISD::USUBSAT, dl, VT, RHS, LHS)); + // If the subtract doesn't overflow then just use abs(sub()) + // NOTE: don't use frozen operands for value tracking. + bool IsNonNegative = DAG.SignBitIsZero(N->getOperand(1)) && + DAG.SignBitIsZero(N->getOperand(0)); + + if (DAG.willNotOverflowSub(IsSigned || IsNonNegative, N->getOperand(0), + N->getOperand(1))) + return DAG.getNode(ISD::ABS, dl, VT, + DAG.getNode(ISD::SUB, dl, VT, LHS, RHS)); + + if (DAG.willNotOverflowSub(IsSigned || IsNonNegative, N->getOperand(1), + N->getOperand(0))) + return DAG.getNode(ISD::ABS, dl, VT, + DAG.getNode(ISD::SUB, dl, VT, RHS, LHS)); + EVT CCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); ISD::CondCode CC = IsSigned ? ISD::CondCode::SETGT : ISD::CondCode::SETUGT; SDValue Cmp = DAG.getSetCC(dl, CCVT, LHS, RHS, CC); @@ -9324,6 +9339,23 @@ SDValue TargetLowering::expandABD(SDNode *N, SelectionDAG &DAG) const { return DAG.getNode(ISD::SUB, dl, VT, Cmp, Xor); } + // Similar to the branchless expansion, use the (sign-extended) usubo overflow + // flag if the (scalar) type is illegal as this is more likely to legalize + // cleanly: + // abdu(lhs, rhs) -> sub(xor(sub(lhs, rhs), uof(lhs, rhs)), uof(lhs, rhs)) + if (!IsSigned && VT.isScalarInteger() && !isTypeLegal(VT)) { + SDValue USubO = + DAG.getNode(ISD::USUBO, dl, DAG.getVTList(VT, MVT::i1), {LHS, RHS}); + SDValue Cmp = DAG.getNode(ISD::SIGN_EXTEND, dl, VT, USubO.getValue(1)); + SDValue Xor = DAG.getNode(ISD::XOR, dl, VT, USubO.getValue(0), Cmp); + return DAG.getNode(ISD::SUB, dl, VT, Xor, Cmp); + } + + // FIXME: Should really try to split the vector in case it's legal on a + // subvector. + if (VT.isVector() && !isOperationLegalOrCustom(ISD::VSELECT, VT)) + return DAG.UnrollVectorOp(N); + // abds(lhs, rhs) -> select(sgt(lhs,rhs), sub(lhs,rhs), sub(rhs,lhs)) // abdu(lhs, rhs) -> select(ugt(lhs,rhs), sub(lhs,rhs), sub(rhs,lhs)) return DAG.getSelect(dl, VT, Cmp, DAG.getNode(ISD::SUB, dl, VT, LHS, RHS), diff --git a/llvm/test/CodeGen/AArch64/abds-neg.ll b/llvm/test/CodeGen/AArch64/abds-neg.ll index 29e46ba7bc153b..8c9c9f7188d4dc 100644 --- a/llvm/test/CodeGen/AArch64/abds-neg.ll +++ b/llvm/test/CodeGen/AArch64/abds-neg.ll @@ -8,13 +8,10 @@ define i8 @abd_ext_i8(i8 %a, i8 %b) nounwind { ; CHECK-LABEL: abd_ext_i8: ; CHECK: // %bb.0: -; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0 -; CHECK-NEXT: sxtb x8, w0 -; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1 -; CHECK-NEXT: sub x8, x8, w1, sxtb -; CHECK-NEXT: cmp x8, #0 -; CHECK-NEXT: cneg x8, x8, mi -; CHECK-NEXT: neg w0, w8 +; CHECK-NEXT: sxtb w8, w0 +; CHECK-NEXT: sub w8, w8, w1, sxtb +; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: cneg w0, w8, pl ; CHECK-NEXT: ret %aext = sext i8 %a to i64 %bext = sext i8 %b to i64 @@ -28,13 +25,10 @@ define i8 @abd_ext_i8(i8 %a, i8 %b) nounwind { define i8 @abd_ext_i8_i16(i8 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_ext_i8_i16: ; CHECK: // %bb.0: -; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0 -; CHECK-NEXT: sxtb x8, w0 -; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1 -; CHECK-NEXT: sub x8, x8, w1, sxth -; CHECK-NEXT: cmp x8, #0 -; CHECK-NEXT: cneg x8, x8, mi -; CHECK-NEXT: neg w0, w8 +; CHECK-NEXT: sxtb w8, w0 +; CHECK-NEXT: sub w8, w8, w1, sxth +; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: cneg w0, w8, pl ; CHECK-NEXT: ret %aext = sext i8 %a to i64 %bext = sext i16 %b to i64 @@ -48,13 +42,10 @@ define i8 @abd_ext_i8_i16(i8 %a, i16 %b) nounwind { define i8 @abd_ext_i8_undef(i8 %a, i8 %b) nounwind { ; CHECK-LABEL: abd_ext_i8_undef: ; CHECK: // %bb.0: -; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0 -; CHECK-NEXT: sxtb x8, w0 -; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1 -; CHECK-NEXT: sub x8, x8, w1, sxtb -; CHECK-NEXT: cmp x8, #0 -; CHECK-NEXT: cneg x8, x8, mi -; CHECK-NEXT: neg w0, w8 +; CHECK-NEXT: sxtb w8, w0 +; CHECK-NEXT: sub w8, w8, w1, sxtb +; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: cneg w0, w8, pl ; CHECK-NEXT: ret %aext = sext i8 %a to i64 %bext = sext i8 %b to i64 @@ -68,13 +59,10 @@ define i8 @abd_ext_i8_undef(i8 %a, i8 %b) nounwind { define i16 @abd_ext_i16(i16 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_ext_i16: ; CHECK: // %bb.0: -; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0 -; CHECK-NEXT: sxth x8, w0 -; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1 -; CHECK-NEXT: sub x8, x8, w1, sxth -; CHECK-NEXT: cmp x8, #0 -; CHECK-NEXT: cneg x8, x8, mi -; CHECK-NEXT: neg w0, w8 +; CHECK-NEXT: sxth w8, w0 +; CHECK-NEXT: sub w8, w8, w1, sxth +; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: cneg w0, w8, pl ; CHECK-NEXT: ret %aext = sext i16 %a to i64 %bext = sext i16 %b to i64 @@ -88,11 +76,10 @@ define i16 @abd_ext_i16(i16 %a, i16 %b) nounwind { define i16 @abd_ext_i16_i32(i16 %a, i32 %b) nounwind { ; CHECK-LABEL: abd_ext_i16_i32: ; CHECK: // %bb.0: -; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0 -; CHECK-NEXT: sxth x8, w0 -; CHECK-NEXT: sub x8, x8, w1, sxtw -; CHECK-NEXT: cmp x8, #0 -; CHECK-NEXT: cneg x8, x8, mi +; CHECK-NEXT: sxth w8, w0 +; CHECK-NEXT: sub w9, w1, w8 +; CHECK-NEXT: subs w8, w8, w1 +; CHECK-NEXT: csel w8, w8, w9, gt ; CHECK-NEXT: neg w0, w8 ; CHECK-NEXT: ret %aext = sext i16 %a to i64 @@ -107,13 +94,10 @@ define i16 @abd_ext_i16_i32(i16 %a, i32 %b) nounwind { define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_ext_i16_undef: ; CHECK: // %bb.0: -; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0 -; CHECK-NEXT: sxth x8, w0 -; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1 -; CHECK-NEXT: sub x8, x8, w1, sxth -; CHECK-NEXT: cmp x8, #0 -; CHECK-NEXT: cneg x8, x8, mi -; CHECK-NEXT: neg w0, w8 +; CHECK-NEXT: sxth w8, w0 +; CHECK-NEXT: sub w8, w8, w1, sxth +; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: cneg w0, w8, pl ; CHECK-NEXT: ret %aext = sext i16 %a to i64 %bext = sext i16 %b to i64 @@ -127,11 +111,9 @@ define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind { define i32 @abd_ext_i32(i32 %a, i32 %b) nounwind { ; CHECK-LABEL: abd_ext_i32: ; CHECK: // %bb.0: -; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0 -; CHECK-NEXT: sxtw x8, w0 -; CHECK-NEXT: sub x8, x8, w1, sxtw -; CHECK-NEXT: cmp x8, #0 -; CHECK-NEXT: cneg x8, x8, mi +; CHECK-NEXT: sub w8, w1, w0 +; CHECK-NEXT: subs w9, w0, w1 +; CHECK-NEXT: csel w8, w9, w8, gt ; CHECK-NEXT: neg w0, w8 ; CHECK-NEXT: ret %aext = sext i32 %a to i64 @@ -146,12 +128,10 @@ define i32 @abd_ext_i32(i32 %a, i32 %b) nounwind { define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_ext_i32_i16: ; CHECK: // %bb.0: -; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0 -; CHECK-NEXT: sxtw x8, w0 -; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1 -; CHECK-NEXT: sub x8, x8, w1, sxth -; CHECK-NEXT: cmp x8, #0 -; CHECK-NEXT: cneg x8, x8, mi +; CHECK-NEXT: sxth w8, w1 +; CHECK-NEXT: sub w9, w8, w0 +; CHECK-NEXT: subs w8, w0, w8 +; CHECK-NEXT: csel w8, w8, w9, gt ; CHECK-NEXT: neg w0, w8 ; CHECK-NEXT: ret %aext = sext i32 %a to i64 @@ -166,11 +146,9 @@ define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind { define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind { ; CHECK-LABEL: abd_ext_i32_undef: ; CHECK: // %bb.0: -; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0 -; CHECK-NEXT: sxtw x8, w0 -; CHECK-NEXT: sub x8, x8, w1, sxtw -; CHECK-NEXT: cmp x8, #0 -; CHECK-NEXT: cneg x8, x8, mi +; CHECK-NEXT: sub w8, w1, w0 +; CHECK-NEXT: subs w9, w0, w1 +; CHECK-NEXT: csel w8, w9, w8, gt ; CHECK-NEXT: neg w0, w8 ; CHECK-NEXT: ret %aext = sext i32 %a to i64 @@ -185,13 +163,10 @@ define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind { define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind { ; CHECK-LABEL: abd_ext_i64: ; CHECK: // %bb.0: -; CHECK-NEXT: asr x8, x0, #63 -; CHECK-NEXT: asr x9, x1, #63 -; CHECK-NEXT: subs x10, x0, x1 -; CHECK-NEXT: sbc x8, x8, x9 -; CHECK-NEXT: asr x8, x8, #63 -; CHECK-NEXT: eor x9, x10, x8 -; CHECK-NEXT: sub x0, x8, x9 +; CHECK-NEXT: sub x8, x1, x0 +; CHECK-NEXT: subs x9, x0, x1 +; CHECK-NEXT: csel x8, x9, x8, gt +; CHECK-NEXT: neg x0, x8 ; CHECK-NEXT: ret %aext = sext i64 %a to i128 %bext = sext i64 %b to i128 @@ -205,13 +180,10 @@ define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind { define i64 @abd_ext_i64_undef(i64 %a, i64 %b) nounwind { ; CHECK-LABEL: abd_ext_i64_undef: ; CHECK: // %bb.0: -; CHECK-NEXT: asr x8, x0, #63 -; CHECK-NEXT: asr x9, x1, #63 -; CHECK-NEXT: subs x10, x0, x1 -; CHECK-NEXT: sbc x8, x8, x9 -; CHECK-NEXT: asr x8, x8, #63 -; CHECK-NEXT: eor x9, x10, x8 -; CHECK-NEXT: sub x0, x8, x9 +; CHECK-NEXT: sub x8, x1, x0 +; CHECK-NEXT: subs x9, x0, x1 +; CHECK-NEXT: csel x8, x9, x8, gt +; CHECK-NEXT: neg x0, x8 ; CHECK-NEXT: ret %aext = sext i64 %a to i128 %bext = sext i64 %b to i128 @@ -225,19 +197,15 @@ define i64 @abd_ext_i64_undef(i64 %a, i64 %b) nounwind { define i128 @abd_ext_i128(i128 %a, i128 %b) nounwind { ; CHECK-LABEL: abd_ext_i128: ; CHECK: // %bb.0: -; CHECK-NEXT: asr x8, x1, #63 -; CHECK-NEXT: asr x9, x3, #63 -; CHECK-NEXT: subs x10, x0, x2 -; CHECK-NEXT: sbcs x11, x1, x3 -; CHECK-NEXT: sbcs xzr, x8, x9 -; CHECK-NEXT: sbc x8, x8, x9 -; CHECK-NEXT: asr x8, x8, #63 -; CHECK-NEXT: eor x9, x10, x8 -; CHECK-NEXT: eor x10, x11, x8 -; CHECK-NEXT: subs x9, x9, x8 -; CHECK-NEXT: sbc x8, x10, x8 -; CHECK-NEXT: negs x0, x9 -; CHECK-NEXT: ngc x1, x8 +; CHECK-NEXT: subs x8, x0, x2 +; CHECK-NEXT: sbc x9, x1, x3 +; CHECK-NEXT: subs x10, x2, x0 +; CHECK-NEXT: sbc x11, x3, x1 +; CHECK-NEXT: sbcs xzr, x3, x1 +; CHECK-NEXT: csel x8, x8, x10, lt +; CHECK-NEXT: csel x9, x9, x11, lt +; CHECK-NEXT: negs x0, x8 +; CHECK-NEXT: ngc x1, x9 ; CHECK-NEXT: ret %aext = sext i128 %a to i256 %bext = sext i128 %b to i256 @@ -251,19 +219,15 @@ define i128 @abd_ext_i128(i128 %a, i128 %b) nounwind { define i128 @abd_ext_i128_undef(i128 %a, i128 %b) nounwind { ; CHECK-LABEL: abd_ext_i128_undef: ; CHECK: // %bb.0: -; CHECK-NEXT: asr x8, x1, #63 -; CHECK-NEXT: asr x9, x3, #63 -; CHECK-NEXT: subs x10, x0, x2 -; CHECK-NEXT: sbcs x11, x1, x3 -; CHECK-NEXT: sbcs xzr, x8, x9 -; CHECK-NEXT: sbc x8, x8, x9 -; CHECK-NEXT: asr x8, x8, #63 -; CHECK-NEXT: eor x9, x10, x8 -; CHECK-NEXT: eor x10, x11, x8 -; CHECK-NEXT: subs x9, x9, x8 -; CHECK-NEXT: sbc x8, x10, x8 -; CHECK-NEXT: negs x0, x9 -; CHECK-NEXT: ngc x1, x8 +; CHECK-NEXT: subs x8, x0, x2 +; CHECK-NEXT: sbc x9, x1, x3 +; CHECK-NEXT: subs x10, x2, x0 +; CHECK-NEXT: sbc x11, x3, x1 +; CHECK-NEXT: sbcs xzr, x3, x1 +; CHECK-NEXT: csel x8, x8, x10, lt +; CHECK-NEXT: csel x9, x9, x11, lt +; CHECK-NEXT: negs x0, x8 +; CHECK-NEXT: ngc x1, x9 ; CHECK-NEXT: ret %aext = sext i128 %a to i256 %bext = sext i128 %b to i256 diff --git a/llvm/test/CodeGen/AArch64/abds.ll b/llvm/test/CodeGen/AArch64/abds.ll index 215907c66a6e83..d861ee64b99514 100644 --- a/llvm/test/CodeGen/AArch64/abds.ll +++ b/llvm/test/CodeGen/AArch64/abds.ll @@ -8,13 +8,10 @@ define i8 @abd_ext_i8(i8 %a, i8 %b) nounwind { ; CHECK-LABEL: abd_ext_i8: ; CHECK: // %bb.0: -; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0 -; CHECK-NEXT: sxtb x8, w0 -; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1 -; CHECK-NEXT: sub x8, x8, w1, sxtb -; CHECK-NEXT: cmp x8, #0 -; CHECK-NEXT: cneg x0, x8, mi -; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 +; CHECK-NEXT: sxtb w8, w0 +; CHECK-NEXT: sub w8, w8, w1, sxtb +; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %aext = sext i8 %a to i64 %bext = sext i8 %b to i64 @@ -27,13 +24,10 @@ define i8 @abd_ext_i8(i8 %a, i8 %b) nounwind { define i8 @abd_ext_i8_i16(i8 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_ext_i8_i16: ; CHECK: // %bb.0: -; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0 -; CHECK-NEXT: sxtb x8, w0 -; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1 -; CHECK-NEXT: sub x8, x8, w1, sxth -; CHECK-NEXT: cmp x8, #0 -; CHECK-NEXT: cneg x0, x8, mi -; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 +; CHECK-NEXT: sxtb w8, w0 +; CHECK-NEXT: sub w8, w8, w1, sxth +; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %aext = sext i8 %a to i64 %bext = sext i16 %b to i64 @@ -46,13 +40,10 @@ define i8 @abd_ext_i8_i16(i8 %a, i16 %b) nounwind { define i8 @abd_ext_i8_undef(i8 %a, i8 %b) nounwind { ; CHECK-LABEL: abd_ext_i8_undef: ; CHECK: // %bb.0: -; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0 -; CHECK-NEXT: sxtb x8, w0 -; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1 -; CHECK-NEXT: sub x8, x8, w1, sxtb -; CHECK-NEXT: cmp x8, #0 -; CHECK-NEXT: cneg x0, x8, mi -; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 +; CHECK-NEXT: sxtb w8, w0 +; CHECK-NEXT: sub w8, w8, w1, sxtb +; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %aext = sext i8 %a to i64 %bext = sext i8 %b to i64 @@ -65,13 +56,10 @@ define i8 @abd_ext_i8_undef(i8 %a, i8 %b) nounwind { define i16 @abd_ext_i16(i16 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_ext_i16: ; CHECK: // %bb.0: -; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0 -; CHECK-NEXT: sxth x8, w0 -; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1 -; CHECK-NEXT: sub x8, x8, w1, sxth -; CHECK-NEXT: cmp x8, #0 -; CHECK-NEXT: cneg x0, x8, mi -; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 +; CHECK-NEXT: sxth w8, w0 +; CHECK-NEXT: sub w8, w8, w1, sxth +; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %aext = sext i16 %a to i64 %bext = sext i16 %b to i64 @@ -84,12 +72,10 @@ define i16 @abd_ext_i16(i16 %a, i16 %b) nounwind { define i16 @abd_ext_i16_i32(i16 %a, i32 %b) nounwind { ; CHECK-LABEL: abd_ext_i16_i32: ; CHECK: // %bb.0: -; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0 -; CHECK-NEXT: sxth x8, w0 -; CHECK-NEXT: sub x8, x8, w1, sxtw -; CHECK-NEXT: cmp x8, #0 -; CHECK-NEXT: cneg x0, x8, mi -; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 +; CHECK-NEXT: sxth w8, w0 +; CHECK-NEXT: sub w9, w1, w8 +; CHECK-NEXT: subs w8, w8, w1 +; CHECK-NEXT: csel w0, w8, w9, gt ; CHECK-NEXT: ret %aext = sext i16 %a to i64 %bext = sext i32 %b to i64 @@ -102,13 +88,10 @@ define i16 @abd_ext_i16_i32(i16 %a, i32 %b) nounwind { define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_ext_i16_undef: ; CHECK: // %bb.0: -; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0 -; CHECK-NEXT: sxth x8, w0 -; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1 -; CHECK-NEXT: sub x8, x8, w1, sxth -; CHECK-NEXT: cmp x8, #0 -; CHECK-NEXT: cneg x0, x8, mi -; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 +; CHECK-NEXT: sxth w8, w0 +; CHECK-NEXT: sub w8, w8, w1, sxth +; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %aext = sext i16 %a to i64 %bext = sext i16 %b to i64 @@ -121,12 +104,9 @@ define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind { define i32 @abd_ext_i32(i32 %a, i32 %b) nounwind { ; CHECK-LABEL: abd_ext_i32: ; CHECK: // %bb.0: -; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0 -; CHECK-NEXT: sxtw x8, w0 -; CHECK-NEXT: sub x8, x8, w1, sxtw -; CHECK-NEXT: cmp x8, #0 -; CHECK-NEXT: cneg x0, x8, mi -; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 +; CHECK-NEXT: sub w8, w1, w0 +; CHECK-NEXT: subs w9, w0, w1 +; CHECK-NEXT: csel w0, w9, w8, gt ; CHECK-NEXT: ret %aext = sext i32 %a to i64 %bext = sext i32 %b to i64 @@ -139,13 +119,10 @@ define i32 @abd_ext_i32(i32 %a, i32 %b) nounwind { define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_ext_i32_i16: ; CHECK: // %bb.0: -; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0 -; CHECK-NEXT: sxtw x8, w0 -; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1 -; CHECK-NEXT: sub x8, x8, w1, sxth -; CHECK-NEXT: cmp x8, #0 -; CHECK-NEXT: cneg x0, x8, mi -; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 +; CHECK-NEXT: sxth w8, w1 +; CHECK-NEXT: sub w9, w8, w0 +; CHECK-NEXT: subs w8, w0, w8 +; CHECK-NEXT: csel w0, w8, w9, gt ; CHECK-NEXT: ret %aext = sext i32 %a to i64 %bext = sext i16 %b to i64 @@ -158,12 +135,9 @@ define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind { define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind { ; CHECK-LABEL: abd_ext_i32_undef: ; CHECK: // %bb.0: -; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0 -; CHECK-NEXT: sxtw x8, w0 -; CHECK-NEXT: sub x8, x8, w1, sxtw -; CHECK-NEXT: cmp x8, #0 -; CHECK-NEXT: cneg x0, x8, mi -; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 +; CHECK-NEXT: sub w8, w1, w0 +; CHECK-NEXT: subs w9, w0, w1 +; CHECK-NEXT: csel w0, w9, w8, gt ; CHECK-NEXT: ret %aext = sext i32 %a to i64 %bext = sext i32 %b to i64 @@ -176,13 +150,9 @@ define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind { define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind { ; CHECK-LABEL: abd_ext_i64: ; CHECK: // %bb.0: -; CHECK-NEXT: asr x8, x0, #63 -; CHECK-NEXT: asr x9, x1, #63 -; CHECK-NEXT: subs x10, x0, x1 -; CHECK-NEXT: sbc x8, x8, x9 -; CHECK-NEXT: asr x8, x8, #63 -; CHECK-NEXT: eor x9, x10, x8 -; CHECK-NEXT: sub x0, x9, x8 +; CHECK-NEXT: sub x8, x1, x0 +; CHECK-NEXT: subs x9, x0, x1 +; CHECK-NEXT: csel x0, x9, x8, gt ; CHECK-NEXT: ret %aext = sext i64 %a to i128 %bext = sext i64 %b to i128 @@ -195,13 +165,9 @@ define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind { define i64 @abd_ext_i64_undef(i64 %a, i64 %b) nounwind { ; CHECK-LABEL: abd_ext_i64_undef: ; CHECK: // %bb.0: -; CHECK-NEXT: asr x8, x0, #63 -; CHECK-NEXT: asr x9, x1, #63 -; CHECK-NEXT: subs x10, x0, x1 -; CHECK-NEXT: sbc x8, x8, x9 -; CHECK-NEXT: asr x8, x8, #63 -; CHECK-NEXT: eor x9, x10, x8 -; CHECK-NEXT: sub x0, x9, x8 +; CHECK-NEXT: sub x8, x1, x0 +; CHECK-NEXT: subs x9, x0, x1 +; CHECK-NEXT: csel x0, x9, x8, gt ; CHECK-NEXT: ret %aext = sext i64 %a to i128 %bext = sext i64 %b to i128 @@ -214,17 +180,14 @@ define i64 @abd_ext_i64_undef(i64 %a, i64 %b) nounwind { define i128 @abd_ext_i128(i128 %a, i128 %b) nounwind { ; CHECK-LABEL: abd_ext_i128: ; CHECK: // %bb.0: -; CHECK-NEXT: asr x8, x1, #63 -; CHECK-NEXT: asr x9, x3, #63 -; CHECK-NEXT: subs x10, x0, x2 -; CHECK-NEXT: sbcs x11, x1, x3 -; CHECK-NEXT: sbcs xzr, x8, x9 -; CHECK-NEXT: sbc x8, x8, x9 -; CHECK-NEXT: asr x8, x8, #63 -; CHECK-NEXT: eor x9, x10, x8 -; CHECK-NEXT: eor x10, x11, x8 -; CHECK-NEXT: subs x0, x9, x8 -; CHECK-NEXT: sbc x1, x10, x8 +; CHECK-NEXT: cmp x2, x0 +; CHECK-NEXT: sbc x8, x3, x1 +; CHECK-NEXT: subs x9, x0, x2 +; CHECK-NEXT: sbc x10, x1, x3 +; CHECK-NEXT: subs x11, x2, x0 +; CHECK-NEXT: sbcs xzr, x3, x1 +; CHECK-NEXT: csel x0, x9, x11, lt +; CHECK-NEXT: csel x1, x10, x8, lt ; CHECK-NEXT: ret %aext = sext i128 %a to i256 %bext = sext i128 %b to i256 @@ -237,17 +200,14 @@ define i128 @abd_ext_i128(i128 %a, i128 %b) nounwind { define i128 @abd_ext_i128_undef(i128 %a, i128 %b) nounwind { ; CHECK-LABEL: abd_ext_i128_undef: ; CHECK: // %bb.0: -; CHECK-NEXT: asr x8, x1, #63 -; CHECK-NEXT: asr x9, x3, #63 -; CHECK-NEXT: subs x10, x0, x2 -; CHECK-NEXT: sbcs x11, x1, x3 -; CHECK-NEXT: sbcs xzr, x8, x9 -; CHECK-NEXT: sbc x8, x8, x9 -; CHECK-NEXT: asr x8, x8, #63 -; CHECK-NEXT: eor x9, x10, x8 -; CHECK-NEXT: eor x10, x11, x8 -; CHECK-NEXT: subs x0, x9, x8 -; CHECK-NEXT: sbc x1, x10, x8 +; CHECK-NEXT: cmp x2, x0 +; CHECK-NEXT: sbc x8, x3, x1 +; CHECK-NEXT: subs x9, x0, x2 +; CHECK-NEXT: sbc x10, x1, x3 +; CHECK-NEXT: subs x11, x2, x0 +; CHECK-NEXT: sbcs xzr, x3, x1 +; CHECK-NEXT: csel x0, x9, x11, lt +; CHECK-NEXT: csel x1, x10, x8, lt ; CHECK-NEXT: ret %aext = sext i128 %a to i256 %bext = sext i128 %b to i256 @@ -264,12 +224,10 @@ define i128 @abd_ext_i128_undef(i128 %a, i128 %b) nounwind { define i8 @abd_minmax_i8(i8 %a, i8 %b) nounwind { ; CHECK-LABEL: abd_minmax_i8: ; CHECK: // %bb.0: -; CHECK-NEXT: sxtb w8, w1 -; CHECK-NEXT: sxtb w9, w0 -; CHECK-NEXT: cmp w9, w8 -; CHECK-NEXT: csel w10, w9, w8, lt -; CHECK-NEXT: csel w8, w9, w8, gt -; CHECK-NEXT: sub w0, w8, w10 +; CHECK-NEXT: sxtb w8, w0 +; CHECK-NEXT: sub w8, w8, w1, sxtb +; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %min = call i8 @llvm.smin.i8(i8 %a, i8 %b) %max = call i8 @llvm.smax.i8(i8 %a, i8 %b) @@ -280,12 +238,10 @@ define i8 @abd_minmax_i8(i8 %a, i8 %b) nounwind { define i16 @abd_minmax_i16(i16 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_minmax_i16: ; CHECK: // %bb.0: -; CHECK-NEXT: sxth w8, w1 -; CHECK-NEXT: sxth w9, w0 -; CHECK-NEXT: cmp w9, w8 -; CHECK-NEXT: csel w10, w9, w8, lt -; CHECK-NEXT: csel w8, w9, w8, gt -; CHECK-NEXT: sub w0, w8, w10 +; CHECK-NEXT: sxth w8, w0 +; CHECK-NEXT: sub w8, w8, w1, sxth +; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %min = call i16 @llvm.smin.i16(i16 %a, i16 %b) %max = call i16 @llvm.smax.i16(i16 %a, i16 %b) @@ -296,10 +252,9 @@ define i16 @abd_minmax_i16(i16 %a, i16 %b) nounwind { define i32 @abd_minmax_i32(i32 %a, i32 %b) nounwind { ; CHECK-LABEL: abd_minmax_i32: ; CHECK: // %bb.0: -; CHECK-NEXT: cmp w0, w1 -; CHECK-NEXT: csel w8, w0, w1, lt -; CHECK-NEXT: csel w9, w0, w1, gt -; CHECK-NEXT: sub w0, w9, w8 +; CHECK-NEXT: sub w8, w1, w0 +; CHECK-NEXT: subs w9, w0, w1 +; CHECK-NEXT: csel w0, w9, w8, gt ; CHECK-NEXT: ret %min = call i32 @llvm.smin.i32(i32 %a, i32 %b) %max = call i32 @llvm.smax.i32(i32 %a, i32 %b) @@ -310,10 +265,9 @@ define i32 @abd_minmax_i32(i32 %a, i32 %b) nounwind { define i64 @abd_minmax_i64(i64 %a, i64 %b) nounwind { ; CHECK-LABEL: abd_minmax_i64: ; CHECK: // %bb.0: -; CHECK-NEXT: cmp x0, x1 -; CHECK-NEXT: csel x8, x0, x1, lt -; CHECK-NEXT: csel x9, x0, x1, gt -; CHECK-NEXT: sub x0, x9, x8 +; CHECK-NEXT: sub x8, x1, x0 +; CHECK-NEXT: subs x9, x0, x1 +; CHECK-NEXT: csel x0, x9, x8, gt ; CHECK-NEXT: ret %min = call i64 @llvm.smin.i64(i64 %a, i64 %b) %max = call i64 @llvm.smax.i64(i64 %a, i64 %b) @@ -324,16 +278,14 @@ define i64 @abd_minmax_i64(i64 %a, i64 %b) nounwind { define i128 @abd_minmax_i128(i128 %a, i128 %b) nounwind { ; CHECK-LABEL: abd_minmax_i128: ; CHECK: // %bb.0: -; CHECK-NEXT: cmp x0, x2 -; CHECK-NEXT: sbcs xzr, x1, x3 -; CHECK-NEXT: csel x8, x1, x3, lt -; CHECK-NEXT: csel x9, x0, x2, lt ; CHECK-NEXT: cmp x2, x0 +; CHECK-NEXT: sbc x8, x3, x1 +; CHECK-NEXT: subs x9, x0, x2 +; CHECK-NEXT: sbc x10, x1, x3 +; CHECK-NEXT: subs x11, x2, x0 ; CHECK-NEXT: sbcs xzr, x3, x1 -; CHECK-NEXT: csel x10, x0, x2, lt -; CHECK-NEXT: csel x11, x1, x3, lt -; CHECK-NEXT: subs x0, x10, x9 -; CHECK-NEXT: sbc x1, x11, x8 +; CHECK-NEXT: csel x0, x9, x11, lt +; CHECK-NEXT: csel x1, x10, x8, lt ; CHECK-NEXT: ret %min = call i128 @llvm.smin.i128(i128 %a, i128 %b) %max = call i128 @llvm.smax.i128(i128 %a, i128 %b) diff --git a/llvm/test/CodeGen/AArch64/abdu-neg.ll b/llvm/test/CodeGen/AArch64/abdu-neg.ll index 637aac42fc66de..1613cbce4b8c8a 100644 --- a/llvm/test/CodeGen/AArch64/abdu-neg.ll +++ b/llvm/test/CodeGen/AArch64/abdu-neg.ll @@ -8,13 +8,10 @@ define i8 @abd_ext_i8(i8 %a, i8 %b) nounwind { ; CHECK-LABEL: abd_ext_i8: ; CHECK: // %bb.0: -; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0 -; CHECK-NEXT: and x8, x0, #0xff -; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1 -; CHECK-NEXT: sub x8, x8, w1, uxtb -; CHECK-NEXT: cmp x8, #0 -; CHECK-NEXT: cneg x8, x8, mi -; CHECK-NEXT: neg w0, w8 +; CHECK-NEXT: and w8, w0, #0xff +; CHECK-NEXT: sub w8, w8, w1, uxtb +; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: cneg w0, w8, pl ; CHECK-NEXT: ret %aext = zext i8 %a to i64 %bext = zext i8 %b to i64 @@ -28,13 +25,10 @@ define i8 @abd_ext_i8(i8 %a, i8 %b) nounwind { define i8 @abd_ext_i8_i16(i8 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_ext_i8_i16: ; CHECK: // %bb.0: -; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0 -; CHECK-NEXT: and x8, x0, #0xff -; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1 -; CHECK-NEXT: sub x8, x8, w1, uxth -; CHECK-NEXT: cmp x8, #0 -; CHECK-NEXT: cneg x8, x8, mi -; CHECK-NEXT: neg w0, w8 +; CHECK-NEXT: and w8, w0, #0xff +; CHECK-NEXT: sub w8, w8, w1, uxth +; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: cneg w0, w8, pl ; CHECK-NEXT: ret %aext = zext i8 %a to i64 %bext = zext i16 %b to i64 @@ -48,13 +42,10 @@ define i8 @abd_ext_i8_i16(i8 %a, i16 %b) nounwind { define i8 @abd_ext_i8_undef(i8 %a, i8 %b) nounwind { ; CHECK-LABEL: abd_ext_i8_undef: ; CHECK: // %bb.0: -; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0 -; CHECK-NEXT: and x8, x0, #0xff -; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1 -; CHECK-NEXT: sub x8, x8, w1, uxtb -; CHECK-NEXT: cmp x8, #0 -; CHECK-NEXT: cneg x8, x8, mi -; CHECK-NEXT: neg w0, w8 +; CHECK-NEXT: and w8, w0, #0xff +; CHECK-NEXT: sub w8, w8, w1, uxtb +; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: cneg w0, w8, pl ; CHECK-NEXT: ret %aext = zext i8 %a to i64 %bext = zext i8 %b to i64 @@ -68,13 +59,10 @@ define i8 @abd_ext_i8_undef(i8 %a, i8 %b) nounwind { define i16 @abd_ext_i16(i16 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_ext_i16: ; CHECK: // %bb.0: -; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0 -; CHECK-NEXT: and x8, x0, #0xffff -; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1 -; CHECK-NEXT: sub x8, x8, w1, uxth -; CHECK-NEXT: cmp x8, #0 -; CHECK-NEXT: cneg x8, x8, mi -; CHECK-NEXT: neg w0, w8 +; CHECK-NEXT: and w8, w0, #0xffff +; CHECK-NEXT: sub w8, w8, w1, uxth +; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: cneg w0, w8, pl ; CHECK-NEXT: ret %aext = zext i16 %a to i64 %bext = zext i16 %b to i64 @@ -88,11 +76,10 @@ define i16 @abd_ext_i16(i16 %a, i16 %b) nounwind { define i16 @abd_ext_i16_i32(i16 %a, i32 %b) nounwind { ; CHECK-LABEL: abd_ext_i16_i32: ; CHECK: // %bb.0: -; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0 -; CHECK-NEXT: and x8, x0, #0xffff -; CHECK-NEXT: sub x8, x8, w1, uxtw -; CHECK-NEXT: cmp x8, #0 -; CHECK-NEXT: cneg x8, x8, mi +; CHECK-NEXT: and w8, w0, #0xffff +; CHECK-NEXT: sub w9, w1, w8 +; CHECK-NEXT: subs w8, w8, w1 +; CHECK-NEXT: csel w8, w8, w9, hi ; CHECK-NEXT: neg w0, w8 ; CHECK-NEXT: ret %aext = zext i16 %a to i64 @@ -107,13 +94,10 @@ define i16 @abd_ext_i16_i32(i16 %a, i32 %b) nounwind { define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_ext_i16_undef: ; CHECK: // %bb.0: -; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0 -; CHECK-NEXT: and x8, x0, #0xffff -; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1 -; CHECK-NEXT: sub x8, x8, w1, uxth -; CHECK-NEXT: cmp x8, #0 -; CHECK-NEXT: cneg x8, x8, mi -; CHECK-NEXT: neg w0, w8 +; CHECK-NEXT: and w8, w0, #0xffff +; CHECK-NEXT: sub w8, w8, w1, uxth +; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: cneg w0, w8, pl ; CHECK-NEXT: ret %aext = zext i16 %a to i64 %bext = zext i16 %b to i64 @@ -127,10 +111,9 @@ define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind { define i32 @abd_ext_i32(i32 %a, i32 %b) nounwind { ; CHECK-LABEL: abd_ext_i32: ; CHECK: // %bb.0: -; CHECK-NEXT: mov w8, w0 -; CHECK-NEXT: sub x8, x8, w1, uxtw -; CHECK-NEXT: cmp x8, #0 -; CHECK-NEXT: cneg x8, x8, mi +; CHECK-NEXT: sub w8, w1, w0 +; CHECK-NEXT: subs w9, w0, w1 +; CHECK-NEXT: csel w8, w9, w8, hi ; CHECK-NEXT: neg w0, w8 ; CHECK-NEXT: ret %aext = zext i32 %a to i64 @@ -145,11 +128,10 @@ define i32 @abd_ext_i32(i32 %a, i32 %b) nounwind { define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_ext_i32_i16: ; CHECK: // %bb.0: -; CHECK-NEXT: mov w8, w0 -; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1 -; CHECK-NEXT: sub x8, x8, w1, uxth -; CHECK-NEXT: cmp x8, #0 -; CHECK-NEXT: cneg x8, x8, mi +; CHECK-NEXT: and w8, w1, #0xffff +; CHECK-NEXT: sub w9, w8, w0 +; CHECK-NEXT: subs w8, w0, w8 +; CHECK-NEXT: csel w8, w8, w9, hi ; CHECK-NEXT: neg w0, w8 ; CHECK-NEXT: ret %aext = zext i32 %a to i64 @@ -164,10 +146,9 @@ define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind { define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind { ; CHECK-LABEL: abd_ext_i32_undef: ; CHECK: // %bb.0: -; CHECK-NEXT: mov w8, w0 -; CHECK-NEXT: sub x8, x8, w1, uxtw -; CHECK-NEXT: cmp x8, #0 -; CHECK-NEXT: cneg x8, x8, mi +; CHECK-NEXT: sub w8, w1, w0 +; CHECK-NEXT: subs w9, w0, w1 +; CHECK-NEXT: csel w8, w9, w8, hi ; CHECK-NEXT: neg w0, w8 ; CHECK-NEXT: ret %aext = zext i32 %a to i64 @@ -182,10 +163,10 @@ define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind { define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind { ; CHECK-LABEL: abd_ext_i64: ; CHECK: // %bb.0: -; CHECK-NEXT: subs x8, x0, x1 -; CHECK-NEXT: ngc x9, xzr -; CHECK-NEXT: eor x8, x8, x9 -; CHECK-NEXT: sub x0, x9, x8 +; CHECK-NEXT: sub x8, x1, x0 +; CHECK-NEXT: subs x9, x0, x1 +; CHECK-NEXT: csel x8, x9, x8, hi +; CHECK-NEXT: neg x0, x8 ; CHECK-NEXT: ret %aext = zext i64 %a to i128 %bext = zext i64 %b to i128 @@ -199,10 +180,10 @@ define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind { define i64 @abd_ext_i64_undef(i64 %a, i64 %b) nounwind { ; CHECK-LABEL: abd_ext_i64_undef: ; CHECK: // %bb.0: -; CHECK-NEXT: subs x8, x0, x1 -; CHECK-NEXT: ngc x9, xzr -; CHECK-NEXT: eor x8, x8, x9 -; CHECK-NEXT: sub x0, x9, x8 +; CHECK-NEXT: sub x8, x1, x0 +; CHECK-NEXT: subs x9, x0, x1 +; CHECK-NEXT: csel x8, x9, x8, hi +; CHECK-NEXT: neg x0, x8 ; CHECK-NEXT: ret %aext = zext i64 %a to i128 %bext = zext i64 %b to i128 @@ -218,8 +199,8 @@ define i128 @abd_ext_i128(i128 %a, i128 %b) nounwind { ; CHECK: // %bb.0: ; CHECK-NEXT: subs x8, x0, x2 ; CHECK-NEXT: sbcs x9, x1, x3 -; CHECK-NEXT: ngcs xzr, xzr -; CHECK-NEXT: ngc x10, xzr +; CHECK-NEXT: cset w10, lo +; CHECK-NEXT: sbfx x10, x10, #0, #1 ; CHECK-NEXT: eor x8, x8, x10 ; CHECK-NEXT: eor x9, x9, x10 ; CHECK-NEXT: subs x8, x8, x10 @@ -241,8 +222,8 @@ define i128 @abd_ext_i128_undef(i128 %a, i128 %b) nounwind { ; CHECK: // %bb.0: ; CHECK-NEXT: subs x8, x0, x2 ; CHECK-NEXT: sbcs x9, x1, x3 -; CHECK-NEXT: ngcs xzr, xzr -; CHECK-NEXT: ngc x10, xzr +; CHECK-NEXT: cset w10, lo +; CHECK-NEXT: sbfx x10, x10, #0, #1 ; CHECK-NEXT: eor x8, x8, x10 ; CHECK-NEXT: eor x9, x9, x10 ; CHECK-NEXT: subs x8, x8, x10 diff --git a/llvm/test/CodeGen/AArch64/abdu.ll b/llvm/test/CodeGen/AArch64/abdu.ll index f70f095d7dabaa..2baa4f0ca43a7a 100644 --- a/llvm/test/CodeGen/AArch64/abdu.ll +++ b/llvm/test/CodeGen/AArch64/abdu.ll @@ -8,13 +8,10 @@ define i8 @abd_ext_i8(i8 %a, i8 %b) nounwind { ; CHECK-LABEL: abd_ext_i8: ; CHECK: // %bb.0: -; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0 -; CHECK-NEXT: and x8, x0, #0xff -; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1 -; CHECK-NEXT: sub x8, x8, w1, uxtb -; CHECK-NEXT: cmp x8, #0 -; CHECK-NEXT: cneg x0, x8, mi -; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 +; CHECK-NEXT: and w8, w0, #0xff +; CHECK-NEXT: sub w8, w8, w1, uxtb +; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %aext = zext i8 %a to i64 %bext = zext i8 %b to i64 @@ -27,13 +24,10 @@ define i8 @abd_ext_i8(i8 %a, i8 %b) nounwind { define i8 @abd_ext_i8_i16(i8 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_ext_i8_i16: ; CHECK: // %bb.0: -; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0 -; CHECK-NEXT: and x8, x0, #0xff -; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1 -; CHECK-NEXT: sub x8, x8, w1, uxth -; CHECK-NEXT: cmp x8, #0 -; CHECK-NEXT: cneg x0, x8, mi -; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 +; CHECK-NEXT: and w8, w0, #0xff +; CHECK-NEXT: sub w8, w8, w1, uxth +; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %aext = zext i8 %a to i64 %bext = zext i16 %b to i64 @@ -46,13 +40,10 @@ define i8 @abd_ext_i8_i16(i8 %a, i16 %b) nounwind { define i8 @abd_ext_i8_undef(i8 %a, i8 %b) nounwind { ; CHECK-LABEL: abd_ext_i8_undef: ; CHECK: // %bb.0: -; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0 -; CHECK-NEXT: and x8, x0, #0xff -; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1 -; CHECK-NEXT: sub x8, x8, w1, uxtb -; CHECK-NEXT: cmp x8, #0 -; CHECK-NEXT: cneg x0, x8, mi -; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 +; CHECK-NEXT: and w8, w0, #0xff +; CHECK-NEXT: sub w8, w8, w1, uxtb +; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %aext = zext i8 %a to i64 %bext = zext i8 %b to i64 @@ -65,13 +56,10 @@ define i8 @abd_ext_i8_undef(i8 %a, i8 %b) nounwind { define i16 @abd_ext_i16(i16 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_ext_i16: ; CHECK: // %bb.0: -; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0 -; CHECK-NEXT: and x8, x0, #0xffff -; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1 -; CHECK-NEXT: sub x8, x8, w1, uxth -; CHECK-NEXT: cmp x8, #0 -; CHECK-NEXT: cneg x0, x8, mi -; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 +; CHECK-NEXT: and w8, w0, #0xffff +; CHECK-NEXT: sub w8, w8, w1, uxth +; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %aext = zext i16 %a to i64 %bext = zext i16 %b to i64 @@ -84,12 +72,10 @@ define i16 @abd_ext_i16(i16 %a, i16 %b) nounwind { define i16 @abd_ext_i16_i32(i16 %a, i32 %b) nounwind { ; CHECK-LABEL: abd_ext_i16_i32: ; CHECK: // %bb.0: -; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0 -; CHECK-NEXT: and x8, x0, #0xffff -; CHECK-NEXT: sub x8, x8, w1, uxtw -; CHECK-NEXT: cmp x8, #0 -; CHECK-NEXT: cneg x0, x8, mi -; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 +; CHECK-NEXT: and w8, w0, #0xffff +; CHECK-NEXT: sub w9, w1, w8 +; CHECK-NEXT: subs w8, w8, w1 +; CHECK-NEXT: csel w0, w8, w9, hi ; CHECK-NEXT: ret %aext = zext i16 %a to i64 %bext = zext i32 %b to i64 @@ -102,13 +88,10 @@ define i16 @abd_ext_i16_i32(i16 %a, i32 %b) nounwind { define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_ext_i16_undef: ; CHECK: // %bb.0: -; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0 -; CHECK-NEXT: and x8, x0, #0xffff -; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1 -; CHECK-NEXT: sub x8, x8, w1, uxth -; CHECK-NEXT: cmp x8, #0 -; CHECK-NEXT: cneg x0, x8, mi -; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 +; CHECK-NEXT: and w8, w0, #0xffff +; CHECK-NEXT: sub w8, w8, w1, uxth +; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %aext = zext i16 %a to i64 %bext = zext i16 %b to i64 @@ -121,11 +104,9 @@ define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind { define i32 @abd_ext_i32(i32 %a, i32 %b) nounwind { ; CHECK-LABEL: abd_ext_i32: ; CHECK: // %bb.0: -; CHECK-NEXT: mov w8, w0 -; CHECK-NEXT: sub x8, x8, w1, uxtw -; CHECK-NEXT: cmp x8, #0 -; CHECK-NEXT: cneg x0, x8, mi -; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 +; CHECK-NEXT: sub w8, w1, w0 +; CHECK-NEXT: subs w9, w0, w1 +; CHECK-NEXT: csel w0, w9, w8, hi ; CHECK-NEXT: ret %aext = zext i32 %a to i64 %bext = zext i32 %b to i64 @@ -138,12 +119,10 @@ define i32 @abd_ext_i32(i32 %a, i32 %b) nounwind { define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_ext_i32_i16: ; CHECK: // %bb.0: -; CHECK-NEXT: mov w8, w0 -; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1 -; CHECK-NEXT: sub x8, x8, w1, uxth -; CHECK-NEXT: cmp x8, #0 -; CHECK-NEXT: cneg x0, x8, mi -; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 +; CHECK-NEXT: and w8, w1, #0xffff +; CHECK-NEXT: sub w9, w8, w0 +; CHECK-NEXT: subs w8, w0, w8 +; CHECK-NEXT: csel w0, w8, w9, hi ; CHECK-NEXT: ret %aext = zext i32 %a to i64 %bext = zext i16 %b to i64 @@ -156,11 +135,9 @@ define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind { define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind { ; CHECK-LABEL: abd_ext_i32_undef: ; CHECK: // %bb.0: -; CHECK-NEXT: mov w8, w0 -; CHECK-NEXT: sub x8, x8, w1, uxtw -; CHECK-NEXT: cmp x8, #0 -; CHECK-NEXT: cneg x0, x8, mi -; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0 +; CHECK-NEXT: sub w8, w1, w0 +; CHECK-NEXT: subs w9, w0, w1 +; CHECK-NEXT: csel w0, w9, w8, hi ; CHECK-NEXT: ret %aext = zext i32 %a to i64 %bext = zext i32 %b to i64 @@ -173,10 +150,9 @@ define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind { define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind { ; CHECK-LABEL: abd_ext_i64: ; CHECK: // %bb.0: -; CHECK-NEXT: subs x8, x0, x1 -; CHECK-NEXT: ngc x9, xzr -; CHECK-NEXT: eor x8, x8, x9 -; CHECK-NEXT: sub x0, x8, x9 +; CHECK-NEXT: sub x8, x1, x0 +; CHECK-NEXT: subs x9, x0, x1 +; CHECK-NEXT: csel x0, x9, x8, hi ; CHECK-NEXT: ret %aext = zext i64 %a to i128 %bext = zext i64 %b to i128 @@ -189,10 +165,9 @@ define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind { define i64 @abd_ext_i64_undef(i64 %a, i64 %b) nounwind { ; CHECK-LABEL: abd_ext_i64_undef: ; CHECK: // %bb.0: -; CHECK-NEXT: subs x8, x0, x1 -; CHECK-NEXT: ngc x9, xzr -; CHECK-NEXT: eor x8, x8, x9 -; CHECK-NEXT: sub x0, x8, x9 +; CHECK-NEXT: sub x8, x1, x0 +; CHECK-NEXT: subs x9, x0, x1 +; CHECK-NEXT: csel x0, x9, x8, hi ; CHECK-NEXT: ret %aext = zext i64 %a to i128 %bext = zext i64 %b to i128 @@ -207,8 +182,8 @@ define i128 @abd_ext_i128(i128 %a, i128 %b) nounwind { ; CHECK: // %bb.0: ; CHECK-NEXT: subs x8, x0, x2 ; CHECK-NEXT: sbcs x9, x1, x3 -; CHECK-NEXT: ngcs xzr, xzr -; CHECK-NEXT: ngc x10, xzr +; CHECK-NEXT: cset w10, lo +; CHECK-NEXT: sbfx x10, x10, #0, #1 ; CHECK-NEXT: eor x8, x8, x10 ; CHECK-NEXT: eor x9, x9, x10 ; CHECK-NEXT: subs x0, x8, x10 @@ -227,8 +202,8 @@ define i128 @abd_ext_i128_undef(i128 %a, i128 %b) nounwind { ; CHECK: // %bb.0: ; CHECK-NEXT: subs x8, x0, x2 ; CHECK-NEXT: sbcs x9, x1, x3 -; CHECK-NEXT: ngcs xzr, xzr -; CHECK-NEXT: ngc x10, xzr +; CHECK-NEXT: cset w10, lo +; CHECK-NEXT: sbfx x10, x10, #0, #1 ; CHECK-NEXT: eor x8, x8, x10 ; CHECK-NEXT: eor x9, x9, x10 ; CHECK-NEXT: subs x0, x8, x10 @@ -249,12 +224,10 @@ define i128 @abd_ext_i128_undef(i128 %a, i128 %b) nounwind { define i8 @abd_minmax_i8(i8 %a, i8 %b) nounwind { ; CHECK-LABEL: abd_minmax_i8: ; CHECK: // %bb.0: -; CHECK-NEXT: and w8, w1, #0xff -; CHECK-NEXT: and w9, w0, #0xff -; CHECK-NEXT: cmp w9, w8 -; CHECK-NEXT: csel w10, w9, w8, lo -; CHECK-NEXT: csel w8, w9, w8, hi -; CHECK-NEXT: sub w0, w8, w10 +; CHECK-NEXT: and w8, w0, #0xff +; CHECK-NEXT: sub w8, w8, w1, uxtb +; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %min = call i8 @llvm.umin.i8(i8 %a, i8 %b) %max = call i8 @llvm.umax.i8(i8 %a, i8 %b) @@ -265,12 +238,10 @@ define i8 @abd_minmax_i8(i8 %a, i8 %b) nounwind { define i16 @abd_minmax_i16(i16 %a, i16 %b) nounwind { ; CHECK-LABEL: abd_minmax_i16: ; CHECK: // %bb.0: -; CHECK-NEXT: and w8, w1, #0xffff -; CHECK-NEXT: and w9, w0, #0xffff -; CHECK-NEXT: cmp w9, w8 -; CHECK-NEXT: csel w10, w9, w8, lo -; CHECK-NEXT: csel w8, w9, w8, hi -; CHECK-NEXT: sub w0, w8, w10 +; CHECK-NEXT: and w8, w0, #0xffff +; CHECK-NEXT: sub w8, w8, w1, uxth +; CHECK-NEXT: cmp w8, #0 +; CHECK-NEXT: cneg w0, w8, mi ; CHECK-NEXT: ret %min = call i16 @llvm.umin.i16(i16 %a, i16 %b) %max = call i16 @llvm.umax.i16(i16 %a, i16 %b) @@ -281,10 +252,9 @@ define i16 @abd_minmax_i16(i16 %a, i16 %b) nounwind { define i32 @abd_minmax_i32(i32 %a, i32 %b) nounwind { ; CHECK-LABEL: abd_minmax_i32: ; CHECK: // %bb.0: -; CHECK-NEXT: cmp w0, w1 -; CHECK-NEXT: csel w8, w0, w1, lo -; CHECK-NEXT: csel w9, w0, w1, hi -; CHECK-NEXT: sub w0, w9, w8 +; CHECK-NEXT: sub w8, w1, w0 +; CHECK-NEXT: subs w9, w0, w1 +; CHECK-NEXT: csel w0, w9, w8, hi ; CHECK-NEXT: ret %min = call i32 @llvm.umin.i32(i32 %a, i32 %b) %max = call i32 @llvm.umax.i32(i32 %a, i32 %b) @@ -295,10 +265,9 @@ define i32 @abd_minmax_i32(i32 %a, i32 %b) nounwind { define i64 @abd_minmax_i64(i64 %a, i64 %b) nounwind { ; CHECK-LABEL: abd_minmax_i64: ; CHECK: // %bb.0: -; CHECK-NEXT: cmp x0, x1 -; CHECK-NEXT: csel x8, x0, x1, lo -; CHECK-NEXT: csel x9, x0, x1, hi -; CHECK-NEXT: sub x0, x9, x8 +; CHECK-NEXT: sub x8, x1, x0 +; CHECK-NEXT: subs x9, x0, x1 +; CHECK-NEXT: csel x0, x9, x8, hi ; CHECK-NEXT: ret %min = call i64 @llvm.umin.i64(i64 %a, i64 %b) %max = call i64 @llvm.umax.i64(i64 %a, i64 %b) @@ -309,16 +278,14 @@ define i64 @abd_minmax_i64(i64 %a, i64 %b) nounwind { define i128 @abd_minmax_i128(i128 %a, i128 %b) nounwind { ; CHECK-LABEL: abd_minmax_i128: ; CHECK: // %bb.0: -; CHECK-NEXT: cmp x0, x2 -; CHECK-NEXT: sbcs xzr, x1, x3 -; CHECK-NEXT: csel x8, x1, x3, lo -; CHECK-NEXT: csel x9, x0, x2, lo -; CHECK-NEXT: cmp x2, x0 -; CHECK-NEXT: sbcs xzr, x3, x1 -; CHECK-NEXT: csel x10, x0, x2, lo -; CHECK-NEXT: csel x11, x1, x3, lo -; CHECK-NEXT: subs x0, x10, x9 -; CHECK-NEXT: sbc x1, x11, x8 +; CHECK-NEXT: subs x8, x0, x2 +; CHECK-NEXT: sbcs x9, x1, x3 +; CHECK-NEXT: cset w10, lo +; CHECK-NEXT: sbfx x10, x10, #0, #1 +; CHECK-NEXT: eor x8, x8, x10 +; CHECK-NEXT: eor x9, x9, x10 +; CHECK-NEXT: subs x0, x8, x10 +; CHECK-NEXT: sbc x1, x9, x10 ; CHECK-NEXT: ret %min = call i128 @llvm.umin.i128(i128 %a, i128 %b) %max = call i128 @llvm.umax.i128(i128 %a, i128 %b) diff --git a/llvm/test/CodeGen/AArch64/arm64-vabs.ll b/llvm/test/CodeGen/AArch64/arm64-vabs.ll index 62a79e3547b297..48afcc5c3dd2b6 100644 --- a/llvm/test/CodeGen/AArch64/arm64-vabs.ll +++ b/llvm/test/CodeGen/AArch64/arm64-vabs.ll @@ -1799,28 +1799,14 @@ define <2 x i64> @uabd_i32(<2 x i32> %a, <2 x i32> %b) { define <2 x i128> @uabd_i64(<2 x i64> %a, <2 x i64> %b) { ; CHECK-LABEL: uabd_i64: ; CHECK: // %bb.0: -; CHECK-NEXT: mov.d x8, v0[1] -; CHECK-NEXT: mov.d x9, v1[1] -; CHECK-NEXT: fmov x10, d0 -; CHECK-NEXT: fmov x12, d1 -; CHECK-NEXT: asr x14, x10, #63 -; CHECK-NEXT: asr x11, x8, #63 -; CHECK-NEXT: asr x13, x9, #63 -; CHECK-NEXT: asr x15, x12, #63 -; CHECK-NEXT: subs x8, x8, x9 -; CHECK-NEXT: sbc x9, x11, x13 -; CHECK-NEXT: subs x10, x10, x12 -; CHECK-NEXT: sbc x11, x14, x15 -; CHECK-NEXT: asr x13, x9, #63 -; CHECK-NEXT: asr x12, x11, #63 -; CHECK-NEXT: eor x8, x8, x13 -; CHECK-NEXT: eor x9, x9, x13 -; CHECK-NEXT: eor x10, x10, x12 -; CHECK-NEXT: eor x11, x11, x12 -; CHECK-NEXT: subs x0, x10, x12 -; CHECK-NEXT: sbc x1, x11, x12 -; CHECK-NEXT: subs x2, x8, x13 -; CHECK-NEXT: sbc x3, x9, x13 +; CHECK-NEXT: cmgt.2d v2, v0, v1 +; CHECK-NEXT: sub.2d v0, v0, v1 +; CHECK-NEXT: mov x1, xzr +; CHECK-NEXT: mov x3, xzr +; CHECK-NEXT: eor.16b v0, v0, v2 +; CHECK-NEXT: sub.2d v0, v2, v0 +; CHECK-NEXT: mov.d x2, v0[1] +; CHECK-NEXT: fmov x0, d0 ; CHECK-NEXT: ret %aext = sext <2 x i64> %a to <2 x i128> %bext = sext <2 x i64> %b to <2 x i128> diff --git a/llvm/test/CodeGen/AArch64/neon-abd.ll b/llvm/test/CodeGen/AArch64/neon-abd.ll index 18364bdecee026..314edd2fc81a70 100644 --- a/llvm/test/CodeGen/AArch64/neon-abd.ll +++ b/llvm/test/CodeGen/AArch64/neon-abd.ll @@ -49,10 +49,10 @@ define <4 x i16> @sabd_4h(<4 x i16> %a, <4 x i16> %b) #0 { define <4 x i16> @sabd_4h_promoted_ops(<4 x i8> %a, <4 x i8> %b) #0 { ; CHECK-LABEL: sabd_4h_promoted_ops: ; CHECK: // %bb.0: -; CHECK-NEXT: shl v0.4h, v0.4h, #8 ; CHECK-NEXT: shl v1.4h, v1.4h, #8 -; CHECK-NEXT: sshr v0.4h, v0.4h, #8 +; CHECK-NEXT: shl v0.4h, v0.4h, #8 ; CHECK-NEXT: sshr v1.4h, v1.4h, #8 +; CHECK-NEXT: sshr v0.4h, v0.4h, #8 ; CHECK-NEXT: sabd v0.4h, v0.4h, v1.4h ; CHECK-NEXT: ret %a.sext = sext <4 x i8> %a to <4 x i16> @@ -103,10 +103,10 @@ define <2 x i32> @sabd_2s(<2 x i32> %a, <2 x i32> %b) #0 { define <2 x i32> @sabd_2s_promoted_ops(<2 x i16> %a, <2 x i16> %b) #0 { ; CHECK-LABEL: sabd_2s_promoted_ops: ; CHECK: // %bb.0: -; CHECK-NEXT: shl v0.2s, v0.2s, #16 ; CHECK-NEXT: shl v1.2s, v1.2s, #16 -; CHECK-NEXT: sshr v0.2s, v0.2s, #16 +; CHECK-NEXT: shl v0.2s, v0.2s, #16 ; CHECK-NEXT: sshr v1.2s, v1.2s, #16 +; CHECK-NEXT: sshr v0.2s, v0.2s, #16 ; CHECK-NEXT: sabd v0.2s, v0.2s, v1.2s ; CHECK-NEXT: ret %a.sext = sext <2 x i16> %a to <2 x i32> @@ -144,27 +144,10 @@ define <4 x i32> @sabd_4s_promoted_ops(<4 x i16> %a, <4 x i16> %b) #0 { define <2 x i64> @sabd_2d(<2 x i64> %a, <2 x i64> %b) #0 { ; CHECK-LABEL: sabd_2d: ; CHECK: // %bb.0: -; CHECK-NEXT: mov x8, v0.d[1] -; CHECK-NEXT: mov x9, v1.d[1] -; CHECK-NEXT: fmov x10, d0 -; CHECK-NEXT: fmov x12, d1 -; CHECK-NEXT: asr x14, x10, #63 -; CHECK-NEXT: asr x11, x8, #63 -; CHECK-NEXT: asr x13, x9, #63 -; CHECK-NEXT: asr x15, x12, #63 -; CHECK-NEXT: subs x8, x8, x9 -; CHECK-NEXT: sbc x9, x11, x13 -; CHECK-NEXT: subs x10, x10, x12 -; CHECK-NEXT: sbc x11, x14, x15 -; CHECK-NEXT: asr x9, x9, #63 -; CHECK-NEXT: asr x11, x11, #63 -; CHECK-NEXT: eor x8, x8, x9 -; CHECK-NEXT: eor x10, x10, x11 -; CHECK-NEXT: sub x8, x8, x9 -; CHECK-NEXT: sub x10, x10, x11 -; CHECK-NEXT: fmov d1, x8 -; CHECK-NEXT: fmov d0, x10 -; CHECK-NEXT: mov v0.d[1], v1.d[0] +; CHECK-NEXT: cmgt v2.2d, v0.2d, v1.2d +; CHECK-NEXT: sub v0.2d, v0.2d, v1.2d +; CHECK-NEXT: eor v0.16b, v0.16b, v2.16b +; CHECK-NEXT: sub v0.2d, v2.2d, v0.2d ; CHECK-NEXT: ret %a.sext = sext <2 x i64> %a to <2 x i128> %b.sext = sext <2 x i64> %b to <2 x i128> @@ -232,8 +215,8 @@ define <4 x i16> @uabd_4h(<4 x i16> %a, <4 x i16> %b) #0 { define <4 x i16> @uabd_4h_promoted_ops(<4 x i8> %a, <4 x i8> %b) #0 { ; CHECK-LABEL: uabd_4h_promoted_ops: ; CHECK: // %bb.0: -; CHECK-NEXT: bic v0.4h, #255, lsl #8 ; CHECK-NEXT: bic v1.4h, #255, lsl #8 +; CHECK-NEXT: bic v0.4h, #255, lsl #8 ; CHECK-NEXT: uabd v0.4h, v0.4h, v1.4h ; CHECK-NEXT: ret %a.zext = zext <4 x i8> %a to <4 x i16> @@ -285,8 +268,8 @@ define <2 x i32> @uabd_2s_promoted_ops(<2 x i16> %a, <2 x i16> %b) #0 { ; CHECK-LABEL: uabd_2s_promoted_ops: ; CHECK: // %bb.0: ; CHECK-NEXT: movi d2, #0x00ffff0000ffff -; CHECK-NEXT: and v0.8b, v0.8b, v2.8b ; CHECK-NEXT: and v1.8b, v1.8b, v2.8b +; CHECK-NEXT: and v0.8b, v0.8b, v2.8b ; CHECK-NEXT: uabd v0.2s, v0.2s, v1.2s ; CHECK-NEXT: ret %a.zext = zext <2 x i16> %a to <2 x i32> @@ -324,21 +307,9 @@ define <4 x i32> @uabd_4s_promoted_ops(<4 x i16> %a, <4 x i16> %b) #0 { define <2 x i64> @uabd_2d(<2 x i64> %a, <2 x i64> %b) #0 { ; CHECK-LABEL: uabd_2d: ; CHECK: // %bb.0: -; CHECK-NEXT: mov x8, v0.d[1] -; CHECK-NEXT: mov x9, v1.d[1] -; CHECK-NEXT: fmov x10, d0 -; CHECK-NEXT: fmov x11, d1 -; CHECK-NEXT: subs x8, x8, x9 -; CHECK-NEXT: ngc x9, xzr -; CHECK-NEXT: subs x10, x10, x11 -; CHECK-NEXT: ngc x11, xzr -; CHECK-NEXT: eor x8, x8, x9 -; CHECK-NEXT: eor x10, x10, x11 -; CHECK-NEXT: sub x8, x8, x9 -; CHECK-NEXT: sub x10, x10, x11 -; CHECK-NEXT: fmov d1, x8 -; CHECK-NEXT: fmov d0, x10 -; CHECK-NEXT: mov v0.d[1], v1.d[0] +; CHECK-NEXT: uqsub v2.2d, v1.2d, v0.2d +; CHECK-NEXT: uqsub v0.2d, v0.2d, v1.2d +; CHECK-NEXT: orr v0.16b, v0.16b, v2.16b ; CHECK-NEXT: ret %a.zext = zext <2 x i64> %a to <2 x i128> %b.zext = zext <2 x i64> %b to <2 x i128> @@ -482,9 +453,8 @@ define <2 x i64> @smaxmin_v2i64(<2 x i64> %0, <2 x i64> %1) { ; CHECK-LABEL: smaxmin_v2i64: ; CHECK: // %bb.0: ; CHECK-NEXT: cmgt v2.2d, v0.2d, v1.2d -; CHECK-NEXT: cmgt v3.2d, v1.2d, v0.2d -; CHECK-NEXT: bsl v2.16b, v0.16b, v1.16b -; CHECK-NEXT: bif v0.16b, v1.16b, v3.16b +; CHECK-NEXT: sub v0.2d, v0.2d, v1.2d +; CHECK-NEXT: eor v0.16b, v0.16b, v2.16b ; CHECK-NEXT: sub v0.2d, v2.2d, v0.2d ; CHECK-NEXT: ret %a = tail call <2 x i64> @llvm.smax.v2i64(<2 x i64> %0, <2 x i64> %1) @@ -529,11 +499,9 @@ define <4 x i32> @umaxmin_v4i32(<4 x i32> %0, <4 x i32> %1) { define <2 x i64> @umaxmin_v2i64(<2 x i64> %0, <2 x i64> %1) { ; CHECK-LABEL: umaxmin_v2i64: ; CHECK: // %bb.0: -; CHECK-NEXT: cmhi v2.2d, v0.2d, v1.2d -; CHECK-NEXT: cmhi v3.2d, v1.2d, v0.2d -; CHECK-NEXT: bsl v2.16b, v0.16b, v1.16b -; CHECK-NEXT: bif v0.16b, v1.16b, v3.16b -; CHECK-NEXT: sub v0.2d, v2.2d, v0.2d +; CHECK-NEXT: uqsub v2.2d, v1.2d, v0.2d +; CHECK-NEXT: uqsub v0.2d, v0.2d, v1.2d +; CHECK-NEXT: orr v0.16b, v0.16b, v2.16b ; CHECK-NEXT: ret %a = tail call <2 x i64> @llvm.umax.v2i64(<2 x i64> %0, <2 x i64> %1) %b = tail call <2 x i64> @llvm.umin.v2i64(<2 x i64> %0, <2 x i64> %1) diff --git a/llvm/test/CodeGen/AArch64/sve-aba.ll b/llvm/test/CodeGen/AArch64/sve-aba.ll index 6859f7d017044b..ffb3e2d658364d 100644 --- a/llvm/test/CodeGen/AArch64/sve-aba.ll +++ b/llvm/test/CodeGen/AArch64/sve-aba.ll @@ -24,9 +24,10 @@ define @saba_b( %a, %b, define @saba_b_promoted_ops( %a, %b, %c) #0 { ; CHECK-LABEL: saba_b_promoted_ops: ; CHECK: // %bb.0: -; CHECK-NEXT: mov z1.b, p0/z, #-1 // =0xffffffffffffffff -; CHECK-NEXT: mov z2.b, p1/z, #-1 // =0xffffffffffffffff -; CHECK-NEXT: saba z0.b, z1.b, z2.b +; CHECK-NEXT: ptrue p2.b +; CHECK-NEXT: mov z1.b, #1 // =0x1 +; CHECK-NEXT: eor p0.b, p2/z, p0.b, p1.b +; CHECK-NEXT: add z0.b, p0/m, z0.b, z1.b ; CHECK-NEXT: ret %b.sext = sext %b to %c.sext = sext %c to @@ -75,8 +76,8 @@ define @saba_h_promoted_ops( %a, %b to @@ -126,8 +127,8 @@ define @saba_s_promoted_ops( %a, %b to @@ -177,8 +178,8 @@ define @saba_d_promoted_ops( %a, %b to @@ -231,9 +232,10 @@ define @uaba_b( %a, %b, define @uaba_b_promoted_ops( %a, %b, %c) #0 { ; CHECK-LABEL: uaba_b_promoted_ops: ; CHECK: // %bb.0: -; CHECK-NEXT: mov z1.b, p0/z, #1 // =0x1 -; CHECK-NEXT: mov z2.b, p1/z, #1 // =0x1 -; CHECK-NEXT: uaba z0.b, z1.b, z2.b +; CHECK-NEXT: ptrue p2.b +; CHECK-NEXT: mov z1.b, #1 // =0x1 +; CHECK-NEXT: eor p0.b, p2/z, p0.b, p1.b +; CHECK-NEXT: add z0.b, p0/m, z0.b, z1.b ; CHECK-NEXT: ret %b.zext = zext %b to %c.zext = zext %c to @@ -281,8 +283,8 @@ define @uaba_h( %a, %b, define @uaba_h_promoted_ops( %a, %b, %c) #0 { ; CHECK-LABEL: uaba_h_promoted_ops: ; CHECK: // %bb.0: -; CHECK-NEXT: and z1.h, z1.h, #0xff ; CHECK-NEXT: and z2.h, z2.h, #0xff +; CHECK-NEXT: and z1.h, z1.h, #0xff ; CHECK-NEXT: uaba z0.h, z1.h, z2.h ; CHECK-NEXT: ret %b.zext = zext %b to @@ -331,8 +333,8 @@ define @uaba_s( %a, %b, define @uaba_s_promoted_ops( %a, %b, %c) #0 { ; CHECK-LABEL: uaba_s_promoted_ops: ; CHECK: // %bb.0: -; CHECK-NEXT: and z1.s, z1.s, #0xffff ; CHECK-NEXT: and z2.s, z2.s, #0xffff +; CHECK-NEXT: and z1.s, z1.s, #0xffff ; CHECK-NEXT: uaba z0.s, z1.s, z2.s ; CHECK-NEXT: ret %b.zext = zext %b to @@ -381,8 +383,8 @@ define @uaba_d( %a, %b, define @uaba_d_promoted_ops( %a, %b, %c) #0 { ; CHECK-LABEL: uaba_d_promoted_ops: ; CHECK: // %bb.0: -; CHECK-NEXT: and z1.d, z1.d, #0xffffffff ; CHECK-NEXT: and z2.d, z2.d, #0xffffffff +; CHECK-NEXT: and z1.d, z1.d, #0xffffffff ; CHECK-NEXT: uaba z0.d, z1.d, z2.d ; CHECK-NEXT: ret %b.zext = zext %b to diff --git a/llvm/test/CodeGen/AArch64/sve-abd.ll b/llvm/test/CodeGen/AArch64/sve-abd.ll index 7b492229e3d23d..72790155d046fc 100644 --- a/llvm/test/CodeGen/AArch64/sve-abd.ll +++ b/llvm/test/CodeGen/AArch64/sve-abd.ll @@ -24,10 +24,9 @@ define @sabd_b( %a, %b) define @sabd_b_promoted_ops( %a, %b) #0 { ; CHECK-LABEL: sabd_b_promoted_ops: ; CHECK: // %bb.0: -; CHECK-NEXT: mov z0.b, p0/z, #-1 // =0xffffffffffffffff -; CHECK-NEXT: mov z1.b, p1/z, #-1 // =0xffffffffffffffff -; CHECK-NEXT: ptrue p0.b -; CHECK-NEXT: sabd z0.b, p0/m, z0.b, z1.b +; CHECK-NEXT: ptrue p2.b +; CHECK-NEXT: eor p0.b, p2/z, p0.b, p1.b +; CHECK-NEXT: mov z0.b, p0/z, #1 // =0x1 ; CHECK-NEXT: ret %a.sext = sext %a to %b.sext = sext %b to @@ -54,8 +53,8 @@ define @sabd_h_promoted_ops( %a, %a to @@ -83,8 +82,8 @@ define @sabd_s_promoted_ops( %a, %a to @@ -112,8 +111,8 @@ define @sabd_d_promoted_ops( %a, %a to @@ -144,10 +143,9 @@ define @uabd_b( %a, %b) define @uabd_b_promoted_ops( %a, %b) #0 { ; CHECK-LABEL: uabd_b_promoted_ops: ; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p2.b +; CHECK-NEXT: eor p0.b, p2/z, p0.b, p1.b ; CHECK-NEXT: mov z0.b, p0/z, #1 // =0x1 -; CHECK-NEXT: mov z1.b, p1/z, #1 // =0x1 -; CHECK-NEXT: ptrue p0.b -; CHECK-NEXT: uabd z0.b, p0/m, z0.b, z1.b ; CHECK-NEXT: ret %a.zext = zext %a to %b.zext = zext %b to @@ -173,8 +171,8 @@ define @uabd_h( %a, %b) define @uabd_h_promoted_ops( %a, %b) #0 { ; CHECK-LABEL: uabd_h_promoted_ops: ; CHECK: // %bb.0: -; CHECK-NEXT: and z0.h, z0.h, #0xff ; CHECK-NEXT: and z1.h, z1.h, #0xff +; CHECK-NEXT: and z0.h, z0.h, #0xff ; CHECK-NEXT: ptrue p0.h ; CHECK-NEXT: uabd z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: ret @@ -202,8 +200,8 @@ define @uabd_s( %a, %b) define @uabd_s_promoted_ops( %a, %b) #0 { ; CHECK-LABEL: uabd_s_promoted_ops: ; CHECK: // %bb.0: -; CHECK-NEXT: and z0.s, z0.s, #0xffff ; CHECK-NEXT: and z1.s, z1.s, #0xffff +; CHECK-NEXT: and z0.s, z0.s, #0xffff ; CHECK-NEXT: ptrue p0.s ; CHECK-NEXT: uabd z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: ret @@ -231,8 +229,8 @@ define @uabd_d( %a, %b) define @uabd_d_promoted_ops( %a, %b) #0 { ; CHECK-LABEL: uabd_d_promoted_ops: ; CHECK: // %bb.0: -; CHECK-NEXT: and z0.d, z0.d, #0xffffffff ; CHECK-NEXT: and z1.d, z1.d, #0xffffffff +; CHECK-NEXT: and z0.d, z0.d, #0xffffffff ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: uabd z0.d, p0/m, z0.d, z1.d ; CHECK-NEXT: ret @@ -265,8 +263,8 @@ define @uabd_non_matching_extension( %a, @uabd_non_matching_promoted_ops( %a, %b) #0 { ; CHECK-LABEL: uabd_non_matching_promoted_ops: ; CHECK: // %bb.0: -; CHECK-NEXT: and z0.s, z0.s, #0xff ; CHECK-NEXT: and z1.s, z1.s, #0xffff +; CHECK-NEXT: and z0.s, z0.s, #0xff ; CHECK-NEXT: ptrue p0.s ; CHECK-NEXT: uabd z0.s, p0/m, z0.s, z1.s ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/AMDGPU/sad.ll b/llvm/test/CodeGen/AMDGPU/sad.ll index 35a5210d1c790b..e4309a29193637 100644 --- a/llvm/test/CodeGen/AMDGPU/sad.ll +++ b/llvm/test/CodeGen/AMDGPU/sad.ll @@ -86,9 +86,9 @@ define amdgpu_kernel void @v_sad_u32_multi_use_sub_pat1(ptr addrspace(1) %out, i ; GCN-NEXT: s_add_u32 s16, s16, s13 ; GCN-NEXT: s_addc_u32 s17, s17, 0 ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: s_max_u32 s3, s0, s1 -; GCN-NEXT: s_min_u32 s0, s0, s1 -; GCN-NEXT: s_sub_i32 s0, s3, s0 +; GCN-NEXT: s_min_u32 s3, s0, s1 +; GCN-NEXT: s_max_u32 s0, s0, s1 +; GCN-NEXT: s_sub_i32 s0, s0, s3 ; GCN-NEXT: v_mov_b32_e32 v0, s4 ; GCN-NEXT: v_mov_b32_e32 v2, s0 ; GCN-NEXT: s_add_i32 s0, s0, s2 diff --git a/llvm/test/CodeGen/ARM/neon_vabd.ll b/llvm/test/CodeGen/ARM/neon_vabd.ll index cdfc48468e0446..8a268d46304cf9 100644 --- a/llvm/test/CodeGen/ARM/neon_vabd.ll +++ b/llvm/test/CodeGen/ARM/neon_vabd.ll @@ -142,35 +142,30 @@ define <4 x i32> @sabd_4s_promoted_ops(<4 x i16> %a, <4 x i16> %b) { define <2 x i64> @sabd_2d(<2 x i64> %a, <2 x i64> %b) { ; CHECK-LABEL: sabd_2d: ; CHECK: @ %bb.0: -; CHECK-NEXT: .save {r4, r5, r11, lr} -; CHECK-NEXT: push {r4, r5, r11, lr} -; CHECK-NEXT: vmov lr, r1, d1 -; CHECK-NEXT: vmov r2, r3, d3 -; CHECK-NEXT: vmov r12, r0, d0 -; CHECK-NEXT: subs lr, lr, r2 -; CHECK-NEXT: asr r4, r1, #31 -; CHECK-NEXT: sbcs r1, r1, r3 -; CHECK-NEXT: sbcs r2, r4, r3, asr #31 -; CHECK-NEXT: vmov r2, r5, d2 -; CHECK-NEXT: sbc r3, r4, r3, asr #31 -; CHECK-NEXT: eor r4, lr, r3, asr #31 -; CHECK-NEXT: eor r1, r1, r3, asr #31 -; CHECK-NEXT: subs r4, r4, r3, asr #31 -; CHECK-NEXT: sbc lr, r1, r3, asr #31 -; CHECK-NEXT: asr r3, r0, #31 -; CHECK-NEXT: vmov.32 d1[0], r4 -; CHECK-NEXT: subs r2, r12, r2 -; CHECK-NEXT: sbcs r0, r0, r5 -; CHECK-NEXT: sbcs r1, r3, r5, asr #31 -; CHECK-NEXT: sbc r1, r3, r5, asr #31 -; CHECK-NEXT: eor r2, r2, r1, asr #31 -; CHECK-NEXT: eor r0, r0, r1, asr #31 -; CHECK-NEXT: subs r2, r2, r1, asr #31 -; CHECK-NEXT: sbc r0, r0, r1, asr #31 -; CHECK-NEXT: vmov.32 d0[0], r2 -; CHECK-NEXT: vmov.32 d1[1], lr -; CHECK-NEXT: vmov.32 d0[1], r0 -; CHECK-NEXT: pop {r4, r5, r11, pc} +; CHECK-NEXT: .save {r4, r5, r6, lr} +; CHECK-NEXT: push {r4, r5, r6, lr} +; CHECK-NEXT: vmov r0, r12, d0 +; CHECK-NEXT: mov r6, #0 +; CHECK-NEXT: vmov r2, r3, d2 +; CHECK-NEXT: vmov r1, lr, d1 +; CHECK-NEXT: vmov r4, r5, d3 +; CHECK-NEXT: vsub.i64 q8, q0, q1 +; CHECK-NEXT: subs r0, r2, r0 +; CHECK-NEXT: sbcs r0, r3, r12 +; CHECK-NEXT: mov r0, #0 +; CHECK-NEXT: movwlt r0, #1 +; CHECK-NEXT: subs r1, r4, r1 +; CHECK-NEXT: sbcs r1, r5, lr +; CHECK-NEXT: movwlt r6, #1 +; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: mvnne r6, #0 +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: vdup.32 d19, r6 +; CHECK-NEXT: mvnne r0, #0 +; CHECK-NEXT: vdup.32 d18, r0 +; CHECK-NEXT: veor q8, q8, q9 +; CHECK-NEXT: vsub.i64 q0, q9, q8 +; CHECK-NEXT: pop {r4, r5, r6, pc} %a.sext = sext <2 x i64> %a to <2 x i128> %b.sext = sext <2 x i64> %b to <2 x i128> %sub = sub <2 x i128> %a.sext, %b.sext @@ -329,34 +324,10 @@ define <4 x i32> @uabd_4s_promoted_ops(<4 x i16> %a, <4 x i16> %b) { define <2 x i64> @uabd_2d(<2 x i64> %a, <2 x i64> %b) { ; CHECK-LABEL: uabd_2d: ; CHECK: @ %bb.0: -; CHECK-NEXT: .save {r4, r5, r6, lr} -; CHECK-NEXT: push {r4, r5, r6, lr} -; CHECK-NEXT: vmov r0, r12, d3 -; CHECK-NEXT: mov r1, #0 -; CHECK-NEXT: vmov r2, r3, d1 -; CHECK-NEXT: vmov lr, r6, d2 -; CHECK-NEXT: vmov r4, r5, d0 -; CHECK-NEXT: subs r0, r2, r0 -; CHECK-NEXT: sbcs r2, r3, r12 -; CHECK-NEXT: sbcs r3, r1, #0 -; CHECK-NEXT: sbc r3, r1, #0 -; CHECK-NEXT: eor r0, r0, r3 -; CHECK-NEXT: eor r2, r2, r3 -; CHECK-NEXT: subs r0, r0, r3 -; CHECK-NEXT: sbc r2, r2, r3 -; CHECK-NEXT: subs r3, r4, lr -; CHECK-NEXT: sbcs r6, r5, r6 -; CHECK-NEXT: vmov.32 d1[0], r0 -; CHECK-NEXT: sbcs r5, r1, #0 -; CHECK-NEXT: sbc r1, r1, #0 -; CHECK-NEXT: eor r3, r3, r1 -; CHECK-NEXT: subs r0, r3, r1 -; CHECK-NEXT: vmov.32 d0[0], r0 -; CHECK-NEXT: eor r0, r6, r1 -; CHECK-NEXT: sbc r0, r0, r1 -; CHECK-NEXT: vmov.32 d1[1], r2 -; CHECK-NEXT: vmov.32 d0[1], r0 -; CHECK-NEXT: pop {r4, r5, r6, pc} +; CHECK-NEXT: vqsub.u64 q8, q1, q0 +; CHECK-NEXT: vqsub.u64 q9, q0, q1 +; CHECK-NEXT: vorr q0, q9, q8 +; CHECK-NEXT: bx lr %a.zext = zext <2 x i64> %a to <2 x i128> %b.zext = zext <2 x i64> %b to <2 x i128> %sub = sub <2 x i128> %a.zext, %b.zext @@ -502,56 +473,30 @@ define <4 x i32> @smaxmin_v4i32(<4 x i32> %0, <4 x i32> %1) { define <2 x i64> @smaxmin_v2i64(<2 x i64> %0, <2 x i64> %1) { ; CHECK-LABEL: smaxmin_v2i64: ; CHECK: @ %bb.0: -; CHECK-NEXT: .save {r4, r5, r6, r7, r8, lr} -; CHECK-NEXT: push {r4, r5, r6, r7, r8, lr} -; CHECK-NEXT: vmov r1, r0, d3 -; CHECK-NEXT: mov r12, #0 -; CHECK-NEXT: vmov r4, lr, d1 -; CHECK-NEXT: vmov r6, r8, d0 -; CHECK-NEXT: subs r2, r4, r1 -; CHECK-NEXT: mov r3, r0 -; CHECK-NEXT: sbcs r2, lr, r0 -; CHECK-NEXT: mov r2, #0 -; CHECK-NEXT: movwlt r2, #1 -; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: mov r2, r1 -; CHECK-NEXT: movne r3, lr -; CHECK-NEXT: movne r2, r4 -; CHECK-NEXT: vmov.32 d17[0], r2 -; CHECK-NEXT: vmov.32 d17[1], r3 +; CHECK-NEXT: .save {r4, r5, r6, lr} +; CHECK-NEXT: push {r4, r5, r6, lr} +; CHECK-NEXT: vmov r0, r12, d0 +; CHECK-NEXT: mov r6, #0 ; CHECK-NEXT: vmov r2, r3, d2 -; CHECK-NEXT: subs r5, r2, r6 -; CHECK-NEXT: sbcs r5, r3, r8 -; CHECK-NEXT: mov r7, r2 -; CHECK-NEXT: mov r5, #0 -; CHECK-NEXT: movwlt r5, #1 -; CHECK-NEXT: cmp r5, #0 -; CHECK-NEXT: movne r7, r6 -; CHECK-NEXT: vmov.32 d18[0], r7 -; CHECK-NEXT: subs r7, r1, r4 -; CHECK-NEXT: sbcs r7, r0, lr -; CHECK-NEXT: mov r7, #0 -; CHECK-NEXT: movwlt r7, #1 -; CHECK-NEXT: cmp r7, #0 -; CHECK-NEXT: movne r1, r4 -; CHECK-NEXT: vmov.32 d19[0], r1 -; CHECK-NEXT: subs r1, r6, r2 -; CHECK-NEXT: sbcs r1, r8, r3 -; CHECK-NEXT: movwlt r12, #1 -; CHECK-NEXT: cmp r12, #0 -; CHECK-NEXT: movne r2, r6 -; CHECK-NEXT: mov r1, r3 -; CHECK-NEXT: vmov.32 d16[0], r2 -; CHECK-NEXT: movne r1, r8 -; CHECK-NEXT: cmp r7, #0 -; CHECK-NEXT: movne r0, lr -; CHECK-NEXT: cmp r5, #0 -; CHECK-NEXT: vmov.32 d16[1], r1 -; CHECK-NEXT: movne r3, r8 -; CHECK-NEXT: vmov.32 d19[1], r0 -; CHECK-NEXT: vmov.32 d18[1], r3 +; CHECK-NEXT: vmov r1, lr, d1 +; CHECK-NEXT: vmov r4, r5, d3 +; CHECK-NEXT: vsub.i64 q8, q0, q1 +; CHECK-NEXT: subs r0, r2, r0 +; CHECK-NEXT: sbcs r0, r3, r12 +; CHECK-NEXT: mov r0, #0 +; CHECK-NEXT: movwlt r0, #1 +; CHECK-NEXT: subs r1, r4, r1 +; CHECK-NEXT: sbcs r1, r5, lr +; CHECK-NEXT: movwlt r6, #1 +; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: mvnne r6, #0 +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: vdup.32 d19, r6 +; CHECK-NEXT: mvnne r0, #0 +; CHECK-NEXT: vdup.32 d18, r0 +; CHECK-NEXT: veor q8, q8, q9 ; CHECK-NEXT: vsub.i64 q0, q9, q8 -; CHECK-NEXT: pop {r4, r5, r6, r7, r8, pc} +; CHECK-NEXT: pop {r4, r5, r6, pc} %a = tail call <2 x i64> @llvm.smax.v2i64(<2 x i64> %0, <2 x i64> %1) %b = tail call <2 x i64> @llvm.smin.v2i64(<2 x i64> %0, <2 x i64> %1) %sub = sub <2 x i64> %a, %b @@ -594,11 +539,9 @@ define <4 x i32> @umaxmin_v4i32(<4 x i32> %0, <4 x i32> %1) { define <2 x i64> @umaxmin_v2i64(<2 x i64> %0, <2 x i64> %1) { ; CHECK-LABEL: umaxmin_v2i64: ; CHECK: @ %bb.0: -; CHECK-NEXT: vqsub.u64 q8, q0, q1 -; CHECK-NEXT: vqsub.u64 q9, q1, q0 -; CHECK-NEXT: vsub.i64 q8, q8, q0 -; CHECK-NEXT: vadd.i64 q9, q0, q9 -; CHECK-NEXT: vadd.i64 q0, q9, q8 +; CHECK-NEXT: vqsub.u64 q8, q1, q0 +; CHECK-NEXT: vqsub.u64 q9, q0, q1 +; CHECK-NEXT: vorr q0, q9, q8 ; CHECK-NEXT: bx lr %a = tail call <2 x i64> @llvm.umax.v2i64(<2 x i64> %0, <2 x i64> %1) %b = tail call <2 x i64> @llvm.umin.v2i64(<2 x i64> %0, <2 x i64> %1) diff --git a/llvm/test/CodeGen/PowerPC/ppc64-P9-vabsd.ll b/llvm/test/CodeGen/PowerPC/ppc64-P9-vabsd.ll index 7a6640fea2d1e4..41bd93e84697e7 100644 --- a/llvm/test/CodeGen/PowerPC/ppc64-P9-vabsd.ll +++ b/llvm/test/CodeGen/PowerPC/ppc64-P9-vabsd.ll @@ -172,82 +172,13 @@ entry: ret <16 x i8> %3 } -; FIXME: This does not produce the ISD::ABS that we are looking for. -; We should fix the missing canonicalization. -; We do manage to find the word version of ABS but not the halfword. -; Threfore, we end up doing more work than is required with a pair of abs for word -; instead of just one for the halfword. define <8 x i16> @sub_absv_16_ext(<8 x i16> %a, <8 x i16> %b) local_unnamed_addr { -; CHECK-PWR9-LABEL: sub_absv_16_ext: -; CHECK-PWR9: # %bb.0: # %entry -; CHECK-PWR9-NEXT: vmrghh v4, v2, v2 -; CHECK-PWR9-NEXT: vmrglh v2, v2, v2 -; CHECK-PWR9-NEXT: vmrghh v5, v3, v3 -; CHECK-PWR9-NEXT: vmrglh v3, v3, v3 -; CHECK-PWR9-NEXT: vextsh2w v2, v2 -; CHECK-PWR9-NEXT: vextsh2w v3, v3 -; CHECK-PWR9-NEXT: vextsh2w v4, v4 -; CHECK-PWR9-NEXT: vextsh2w v5, v5 -; CHECK-PWR9-NEXT: xvnegsp v3, v3 -; CHECK-PWR9-NEXT: xvnegsp v2, v2 -; CHECK-PWR9-NEXT: xvnegsp v4, v4 -; CHECK-PWR9-NEXT: vabsduw v2, v2, v3 -; CHECK-PWR9-NEXT: xvnegsp v3, v5 -; CHECK-PWR9-NEXT: vabsduw v3, v4, v3 -; CHECK-PWR9-NEXT: vpkuwum v2, v3, v2 -; CHECK-PWR9-NEXT: blr -; -; CHECK-PWR8-LABEL: sub_absv_16_ext: -; CHECK-PWR8: # %bb.0: # %entry -; CHECK-PWR8-NEXT: vspltisw v4, 8 -; CHECK-PWR8-NEXT: vmrglh v5, v2, v2 -; CHECK-PWR8-NEXT: vadduwm v4, v4, v4 -; CHECK-PWR8-NEXT: vmrghh v2, v2, v2 -; CHECK-PWR8-NEXT: vmrglh v0, v3, v3 -; CHECK-PWR8-NEXT: vmrghh v3, v3, v3 -; CHECK-PWR8-NEXT: vslw v5, v5, v4 -; CHECK-PWR8-NEXT: vslw v2, v2, v4 -; CHECK-PWR8-NEXT: vslw v0, v0, v4 -; CHECK-PWR8-NEXT: vslw v3, v3, v4 -; CHECK-PWR8-NEXT: vsraw v5, v5, v4 -; CHECK-PWR8-NEXT: vsraw v2, v2, v4 -; CHECK-PWR8-NEXT: vsraw v0, v0, v4 -; CHECK-PWR8-NEXT: vsraw v3, v3, v4 -; CHECK-PWR8-NEXT: xxlxor v4, v4, v4 -; CHECK-PWR8-NEXT: vsubuwm v2, v2, v3 -; CHECK-PWR8-NEXT: vsubuwm v3, v5, v0 -; CHECK-PWR8-NEXT: vsubuwm v5, v4, v3 -; CHECK-PWR8-NEXT: vsubuwm v4, v4, v2 -; CHECK-PWR8-NEXT: vmaxsw v3, v3, v5 -; CHECK-PWR8-NEXT: vmaxsw v2, v2, v4 -; CHECK-PWR8-NEXT: vpkuwum v2, v2, v3 -; CHECK-PWR8-NEXT: blr -; -; CHECK-PWR7-LABEL: sub_absv_16_ext: -; CHECK-PWR7: # %bb.0: # %entry -; CHECK-PWR7-NEXT: vspltisw v4, 8 -; CHECK-PWR7-NEXT: vmrglh v5, v2, v2 -; CHECK-PWR7-NEXT: vmrghh v2, v2, v2 -; CHECK-PWR7-NEXT: vmrglh v0, v3, v3 -; CHECK-PWR7-NEXT: vmrghh v3, v3, v3 -; CHECK-PWR7-NEXT: vadduwm v4, v4, v4 -; CHECK-PWR7-NEXT: vslw v5, v5, v4 -; CHECK-PWR7-NEXT: vslw v2, v2, v4 -; CHECK-PWR7-NEXT: vslw v0, v0, v4 -; CHECK-PWR7-NEXT: vslw v3, v3, v4 -; CHECK-PWR7-NEXT: vsraw v5, v5, v4 -; CHECK-PWR7-NEXT: vsraw v2, v2, v4 -; CHECK-PWR7-NEXT: vsraw v0, v0, v4 -; CHECK-PWR7-NEXT: vsraw v3, v3, v4 -; CHECK-PWR7-NEXT: xxlxor v4, v4, v4 -; CHECK-PWR7-NEXT: vsubuwm v2, v2, v3 -; CHECK-PWR7-NEXT: vsubuwm v3, v5, v0 -; CHECK-PWR7-NEXT: vsubuwm v5, v4, v3 -; CHECK-PWR7-NEXT: vsubuwm v4, v4, v2 -; CHECK-PWR7-NEXT: vmaxsw v3, v3, v5 -; CHECK-PWR7-NEXT: vmaxsw v2, v2, v4 -; CHECK-PWR7-NEXT: vpkuwum v2, v2, v3 -; CHECK-PWR7-NEXT: blr +; CHECK-LABEL: sub_absv_16_ext: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vminsh v4, v2, v3 +; CHECK-NEXT: vmaxsh v2, v2, v3 +; CHECK-NEXT: vsubuhm v2, v2, v4 +; CHECK-NEXT: blr entry: %0 = sext <8 x i16> %a to <8 x i32> %1 = sext <8 x i16> %b to <8 x i32> @@ -1240,18 +1171,16 @@ entry: define <4 x i32> @zext_sub_absd32(<4 x i16>, <4 x i16>) local_unnamed_addr { ; CHECK-PWR9-LE-LABEL: zext_sub_absd32: ; CHECK-PWR9-LE: # %bb.0: -; CHECK-PWR9-LE-NEXT: xxlxor v4, v4, v4 -; CHECK-PWR9-LE-NEXT: vmrglh v2, v4, v2 -; CHECK-PWR9-LE-NEXT: vmrglh v3, v4, v3 -; CHECK-PWR9-LE-NEXT: vabsduw v2, v2, v3 +; CHECK-PWR9-LE-NEXT: vabsduh v2, v2, v3 +; CHECK-PWR9-LE-NEXT: xxlxor v3, v3, v3 +; CHECK-PWR9-LE-NEXT: vmrglh v2, v3, v2 ; CHECK-PWR9-LE-NEXT: blr ; ; CHECK-PWR9-BE-LABEL: zext_sub_absd32: ; CHECK-PWR9-BE: # %bb.0: -; CHECK-PWR9-BE-NEXT: xxlxor v4, v4, v4 -; CHECK-PWR9-BE-NEXT: vmrghh v2, v4, v2 -; CHECK-PWR9-BE-NEXT: vmrghh v3, v4, v3 -; CHECK-PWR9-BE-NEXT: vabsduw v2, v2, v3 +; CHECK-PWR9-BE-NEXT: vabsduh v2, v2, v3 +; CHECK-PWR9-BE-NEXT: xxlxor v3, v3, v3 +; CHECK-PWR9-BE-NEXT: vmrghh v2, v3, v2 ; CHECK-PWR9-BE-NEXT: blr ; ; CHECK-PWR8-LABEL: zext_sub_absd32: @@ -1287,18 +1216,16 @@ define <4 x i32> @zext_sub_absd32(<4 x i16>, <4 x i16>) local_unnamed_addr { define <8 x i16> @zext_sub_absd16(<8 x i8>, <8 x i8>) local_unnamed_addr { ; CHECK-PWR9-LE-LABEL: zext_sub_absd16: ; CHECK-PWR9-LE: # %bb.0: -; CHECK-PWR9-LE-NEXT: xxlxor v4, v4, v4 -; CHECK-PWR9-LE-NEXT: vmrglb v2, v4, v2 -; CHECK-PWR9-LE-NEXT: vmrglb v3, v4, v3 -; CHECK-PWR9-LE-NEXT: vabsduh v2, v2, v3 +; CHECK-PWR9-LE-NEXT: vabsdub v2, v2, v3 +; CHECK-PWR9-LE-NEXT: xxlxor v3, v3, v3 +; CHECK-PWR9-LE-NEXT: vmrglb v2, v3, v2 ; CHECK-PWR9-LE-NEXT: blr ; ; CHECK-PWR9-BE-LABEL: zext_sub_absd16: ; CHECK-PWR9-BE: # %bb.0: -; CHECK-PWR9-BE-NEXT: xxlxor v4, v4, v4 -; CHECK-PWR9-BE-NEXT: vmrghb v2, v4, v2 -; CHECK-PWR9-BE-NEXT: vmrghb v3, v4, v3 -; CHECK-PWR9-BE-NEXT: vabsduh v2, v2, v3 +; CHECK-PWR9-BE-NEXT: vabsdub v2, v2, v3 +; CHECK-PWR9-BE-NEXT: xxlxor v3, v3, v3 +; CHECK-PWR9-BE-NEXT: vmrghb v2, v3, v2 ; CHECK-PWR9-BE-NEXT: blr ; ; CHECK-PWR8-LABEL: zext_sub_absd16: @@ -1335,8 +1262,8 @@ define <16 x i8> @zext_sub_absd8(<16 x i4>, <16 x i4>) local_unnamed_addr { ; CHECK-PWR9-LABEL: zext_sub_absd8: ; CHECK-PWR9: # %bb.0: ; CHECK-PWR9-NEXT: xxspltib vs0, 15 -; CHECK-PWR9-NEXT: xxland v2, v2, vs0 ; CHECK-PWR9-NEXT: xxland v3, v3, vs0 +; CHECK-PWR9-NEXT: xxland v2, v2, vs0 ; CHECK-PWR9-NEXT: vabsdub v2, v2, v3 ; CHECK-PWR9-NEXT: blr ; @@ -1361,24 +1288,20 @@ define <16 x i8> @zext_sub_absd8(<16 x i4>, <16 x i4>) local_unnamed_addr { define <4 x i32> @sext_sub_absd32(<4 x i16>, <4 x i16>) local_unnamed_addr { ; CHECK-PWR9-LE-LABEL: sext_sub_absd32: ; CHECK-PWR9-LE: # %bb.0: -; CHECK-PWR9-LE-NEXT: vmrglh v2, v2, v2 -; CHECK-PWR9-LE-NEXT: vmrglh v3, v3, v3 -; CHECK-PWR9-LE-NEXT: vextsh2w v2, v2 -; CHECK-PWR9-LE-NEXT: vextsh2w v3, v3 -; CHECK-PWR9-LE-NEXT: xvnegsp v3, v3 -; CHECK-PWR9-LE-NEXT: xvnegsp v2, v2 -; CHECK-PWR9-LE-NEXT: vabsduw v2, v2, v3 +; CHECK-PWR9-LE-NEXT: vminsh v4, v2, v3 +; CHECK-PWR9-LE-NEXT: vmaxsh v2, v2, v3 +; CHECK-PWR9-LE-NEXT: xxlxor v3, v3, v3 +; CHECK-PWR9-LE-NEXT: vsubuhm v2, v2, v4 +; CHECK-PWR9-LE-NEXT: vmrglh v2, v3, v2 ; CHECK-PWR9-LE-NEXT: blr ; ; CHECK-PWR9-BE-LABEL: sext_sub_absd32: ; CHECK-PWR9-BE: # %bb.0: -; CHECK-PWR9-BE-NEXT: vmrghh v2, v2, v2 -; CHECK-PWR9-BE-NEXT: vmrghh v3, v3, v3 -; CHECK-PWR9-BE-NEXT: vextsh2w v2, v2 -; CHECK-PWR9-BE-NEXT: vextsh2w v3, v3 -; CHECK-PWR9-BE-NEXT: xvnegsp v3, v3 -; CHECK-PWR9-BE-NEXT: xvnegsp v2, v2 -; CHECK-PWR9-BE-NEXT: vabsduw v2, v2, v3 +; CHECK-PWR9-BE-NEXT: vminsh v4, v2, v3 +; CHECK-PWR9-BE-NEXT: vmaxsh v2, v2, v3 +; CHECK-PWR9-BE-NEXT: xxlxor v3, v3, v3 +; CHECK-PWR9-BE-NEXT: vsubuhm v2, v2, v4 +; CHECK-PWR9-BE-NEXT: vmrghh v2, v3, v2 ; CHECK-PWR9-BE-NEXT: blr ; ; CHECK-PWR8-LABEL: sext_sub_absd32: @@ -1423,32 +1346,20 @@ define <4 x i32> @sext_sub_absd32(<4 x i16>, <4 x i16>) local_unnamed_addr { define <8 x i16> @sext_sub_absd16(<8 x i8>, <8 x i8>) local_unnamed_addr { ; CHECK-PWR9-LE-LABEL: sext_sub_absd16: ; CHECK-PWR9-LE: # %bb.0: -; CHECK-PWR9-LE-NEXT: vmrglb v2, v2, v2 -; CHECK-PWR9-LE-NEXT: vspltish v4, 8 -; CHECK-PWR9-LE-NEXT: vmrglb v3, v3, v3 -; CHECK-PWR9-LE-NEXT: vslh v2, v2, v4 -; CHECK-PWR9-LE-NEXT: vslh v3, v3, v4 -; CHECK-PWR9-LE-NEXT: vsrah v2, v2, v4 -; CHECK-PWR9-LE-NEXT: vsrah v3, v3, v4 -; CHECK-PWR9-LE-NEXT: vsubuhm v2, v2, v3 +; CHECK-PWR9-LE-NEXT: vminsb v4, v2, v3 +; CHECK-PWR9-LE-NEXT: vmaxsb v2, v2, v3 ; CHECK-PWR9-LE-NEXT: xxlxor v3, v3, v3 -; CHECK-PWR9-LE-NEXT: vsubuhm v3, v3, v2 -; CHECK-PWR9-LE-NEXT: vmaxsh v2, v2, v3 +; CHECK-PWR9-LE-NEXT: vsububm v2, v2, v4 +; CHECK-PWR9-LE-NEXT: vmrglb v2, v3, v2 ; CHECK-PWR9-LE-NEXT: blr ; ; CHECK-PWR9-BE-LABEL: sext_sub_absd16: ; CHECK-PWR9-BE: # %bb.0: -; CHECK-PWR9-BE-NEXT: vmrghb v2, v2, v2 -; CHECK-PWR9-BE-NEXT: vspltish v4, 8 -; CHECK-PWR9-BE-NEXT: vmrghb v3, v3, v3 -; CHECK-PWR9-BE-NEXT: vslh v2, v2, v4 -; CHECK-PWR9-BE-NEXT: vslh v3, v3, v4 -; CHECK-PWR9-BE-NEXT: vsrah v2, v2, v4 -; CHECK-PWR9-BE-NEXT: vsrah v3, v3, v4 -; CHECK-PWR9-BE-NEXT: vsubuhm v2, v2, v3 +; CHECK-PWR9-BE-NEXT: vminsb v4, v2, v3 +; CHECK-PWR9-BE-NEXT: vmaxsb v2, v2, v3 ; CHECK-PWR9-BE-NEXT: xxlxor v3, v3, v3 -; CHECK-PWR9-BE-NEXT: vsubuhm v3, v3, v2 -; CHECK-PWR9-BE-NEXT: vmaxsh v2, v2, v3 +; CHECK-PWR9-BE-NEXT: vsububm v2, v2, v4 +; CHECK-PWR9-BE-NEXT: vmrghb v2, v3, v2 ; CHECK-PWR9-BE-NEXT: blr ; ; CHECK-PWR8-LABEL: sext_sub_absd16: @@ -1492,14 +1403,13 @@ define <16 x i8> @sext_sub_absd8(<16 x i4>, <16 x i4>) local_unnamed_addr { ; CHECK-PWR9-LABEL: sext_sub_absd8: ; CHECK-PWR9: # %bb.0: ; CHECK-PWR9-NEXT: xxspltib v4, 4 -; CHECK-PWR9-NEXT: vslb v2, v2, v4 ; CHECK-PWR9-NEXT: vslb v3, v3, v4 -; CHECK-PWR9-NEXT: vsrab v2, v2, v4 +; CHECK-PWR9-NEXT: vslb v2, v2, v4 ; CHECK-PWR9-NEXT: vsrab v3, v3, v4 -; CHECK-PWR9-NEXT: vsububm v2, v2, v3 -; CHECK-PWR9-NEXT: xxlxor v3, v3, v3 -; CHECK-PWR9-NEXT: vsububm v3, v3, v2 +; CHECK-PWR9-NEXT: vsrab v2, v2, v4 +; CHECK-PWR9-NEXT: vminsb v4, v2, v3 ; CHECK-PWR9-NEXT: vmaxsb v2, v2, v3 +; CHECK-PWR9-NEXT: vsububm v2, v2, v4 ; CHECK-PWR9-NEXT: blr ; ; CHECK-PWR78-LABEL: sext_sub_absd8: @@ -1532,10 +1442,9 @@ define <4 x i32> @absd_int32_ugt(<4 x i32>, <4 x i32>) { ; ; CHECK-PWR78-LABEL: absd_int32_ugt: ; CHECK-PWR78: # %bb.0: -; CHECK-PWR78-NEXT: vcmpgtuw v4, v2, v3 -; CHECK-PWR78-NEXT: vsubuwm v5, v2, v3 -; CHECK-PWR78-NEXT: vsubuwm v2, v3, v2 -; CHECK-PWR78-NEXT: xxsel v2, v2, v5, v4 +; CHECK-PWR78-NEXT: vminuw v4, v2, v3 +; CHECK-PWR78-NEXT: vmaxuw v2, v2, v3 +; CHECK-PWR78-NEXT: vsubuwm v2, v2, v4 ; CHECK-PWR78-NEXT: blr %3 = icmp ugt <4 x i32> %0, %1 %4 = sub <4 x i32> %0, %1 @@ -1552,11 +1461,9 @@ define <4 x i32> @absd_int32_uge(<4 x i32>, <4 x i32>) { ; ; CHECK-PWR78-LABEL: absd_int32_uge: ; CHECK-PWR78: # %bb.0: -; CHECK-PWR78-NEXT: vcmpgtuw v4, v3, v2 -; CHECK-PWR78-NEXT: xxlnor vs0, v4, v4 -; CHECK-PWR78-NEXT: vsubuwm v4, v2, v3 -; CHECK-PWR78-NEXT: vsubuwm v2, v3, v2 -; CHECK-PWR78-NEXT: xxsel v2, v2, v4, vs0 +; CHECK-PWR78-NEXT: vminuw v4, v2, v3 +; CHECK-PWR78-NEXT: vmaxuw v2, v2, v3 +; CHECK-PWR78-NEXT: vsubuwm v2, v2, v4 ; CHECK-PWR78-NEXT: blr %3 = icmp uge <4 x i32> %0, %1 %4 = sub <4 x i32> %0, %1 @@ -1573,10 +1480,9 @@ define <4 x i32> @absd_int32_ult(<4 x i32>, <4 x i32>) { ; ; CHECK-PWR78-LABEL: absd_int32_ult: ; CHECK-PWR78: # %bb.0: -; CHECK-PWR78-NEXT: vcmpgtuw v4, v3, v2 -; CHECK-PWR78-NEXT: vsubuwm v5, v2, v3 -; CHECK-PWR78-NEXT: vsubuwm v2, v3, v2 -; CHECK-PWR78-NEXT: xxsel v2, v5, v2, v4 +; CHECK-PWR78-NEXT: vminuw v4, v2, v3 +; CHECK-PWR78-NEXT: vmaxuw v2, v2, v3 +; CHECK-PWR78-NEXT: vsubuwm v2, v2, v4 ; CHECK-PWR78-NEXT: blr %3 = icmp ult <4 x i32> %0, %1 %4 = sub <4 x i32> %0, %1 @@ -1593,11 +1499,9 @@ define <4 x i32> @absd_int32_ule(<4 x i32>, <4 x i32>) { ; ; CHECK-PWR78-LABEL: absd_int32_ule: ; CHECK-PWR78: # %bb.0: -; CHECK-PWR78-NEXT: vcmpgtuw v4, v2, v3 -; CHECK-PWR78-NEXT: xxlnor vs0, v4, v4 -; CHECK-PWR78-NEXT: vsubuwm v4, v2, v3 -; CHECK-PWR78-NEXT: vsubuwm v2, v3, v2 -; CHECK-PWR78-NEXT: xxsel v2, v4, v2, vs0 +; CHECK-PWR78-NEXT: vminuw v4, v2, v3 +; CHECK-PWR78-NEXT: vmaxuw v2, v2, v3 +; CHECK-PWR78-NEXT: vsubuwm v2, v2, v4 ; CHECK-PWR78-NEXT: blr %3 = icmp ule <4 x i32> %0, %1 %4 = sub <4 x i32> %0, %1 @@ -1614,10 +1518,9 @@ define <8 x i16> @absd_int16_ugt(<8 x i16>, <8 x i16>) { ; ; CHECK-PWR78-LABEL: absd_int16_ugt: ; CHECK-PWR78: # %bb.0: -; CHECK-PWR78-NEXT: vcmpgtuh v4, v2, v3 -; CHECK-PWR78-NEXT: vsubuhm v5, v2, v3 -; CHECK-PWR78-NEXT: vsubuhm v2, v3, v2 -; CHECK-PWR78-NEXT: xxsel v2, v2, v5, v4 +; CHECK-PWR78-NEXT: vminuh v4, v2, v3 +; CHECK-PWR78-NEXT: vmaxuh v2, v2, v3 +; CHECK-PWR78-NEXT: vsubuhm v2, v2, v4 ; CHECK-PWR78-NEXT: blr %3 = icmp ugt <8 x i16> %0, %1 %4 = sub <8 x i16> %0, %1 @@ -1634,11 +1537,9 @@ define <8 x i16> @absd_int16_uge(<8 x i16>, <8 x i16>) { ; ; CHECK-PWR78-LABEL: absd_int16_uge: ; CHECK-PWR78: # %bb.0: -; CHECK-PWR78-NEXT: vcmpgtuh v4, v3, v2 -; CHECK-PWR78-NEXT: vsubuhm v5, v2, v3 -; CHECK-PWR78-NEXT: vsubuhm v2, v3, v2 -; CHECK-PWR78-NEXT: xxlnor v4, v4, v4 -; CHECK-PWR78-NEXT: xxsel v2, v2, v5, v4 +; CHECK-PWR78-NEXT: vminuh v4, v2, v3 +; CHECK-PWR78-NEXT: vmaxuh v2, v2, v3 +; CHECK-PWR78-NEXT: vsubuhm v2, v2, v4 ; CHECK-PWR78-NEXT: blr %3 = icmp uge <8 x i16> %0, %1 %4 = sub <8 x i16> %0, %1 @@ -1655,10 +1556,9 @@ define <8 x i16> @absd_int16_ult(<8 x i16>, <8 x i16>) { ; ; CHECK-PWR78-LABEL: absd_int16_ult: ; CHECK-PWR78: # %bb.0: -; CHECK-PWR78-NEXT: vcmpgtuh v4, v3, v2 -; CHECK-PWR78-NEXT: vsubuhm v5, v2, v3 -; CHECK-PWR78-NEXT: vsubuhm v2, v3, v2 -; CHECK-PWR78-NEXT: xxsel v2, v5, v2, v4 +; CHECK-PWR78-NEXT: vminuh v4, v2, v3 +; CHECK-PWR78-NEXT: vmaxuh v2, v2, v3 +; CHECK-PWR78-NEXT: vsubuhm v2, v2, v4 ; CHECK-PWR78-NEXT: blr %3 = icmp ult <8 x i16> %0, %1 %4 = sub <8 x i16> %0, %1 @@ -1675,11 +1575,9 @@ define <8 x i16> @absd_int16_ule(<8 x i16>, <8 x i16>) { ; ; CHECK-PWR78-LABEL: absd_int16_ule: ; CHECK-PWR78: # %bb.0: -; CHECK-PWR78-NEXT: vcmpgtuh v4, v2, v3 -; CHECK-PWR78-NEXT: vsubuhm v5, v2, v3 -; CHECK-PWR78-NEXT: vsubuhm v2, v3, v2 -; CHECK-PWR78-NEXT: xxlnor v4, v4, v4 -; CHECK-PWR78-NEXT: xxsel v2, v5, v2, v4 +; CHECK-PWR78-NEXT: vminuh v4, v2, v3 +; CHECK-PWR78-NEXT: vmaxuh v2, v2, v3 +; CHECK-PWR78-NEXT: vsubuhm v2, v2, v4 ; CHECK-PWR78-NEXT: blr %3 = icmp ule <8 x i16> %0, %1 %4 = sub <8 x i16> %0, %1 @@ -1696,10 +1594,9 @@ define <16 x i8> @absd_int8_ugt(<16 x i8>, <16 x i8>) { ; ; CHECK-PWR78-LABEL: absd_int8_ugt: ; CHECK-PWR78: # %bb.0: -; CHECK-PWR78-NEXT: vcmpgtub v4, v2, v3 -; CHECK-PWR78-NEXT: vsububm v5, v2, v3 -; CHECK-PWR78-NEXT: vsububm v2, v3, v2 -; CHECK-PWR78-NEXT: xxsel v2, v2, v5, v4 +; CHECK-PWR78-NEXT: vminub v4, v2, v3 +; CHECK-PWR78-NEXT: vmaxub v2, v2, v3 +; CHECK-PWR78-NEXT: vsububm v2, v2, v4 ; CHECK-PWR78-NEXT: blr %3 = icmp ugt <16 x i8> %0, %1 %4 = sub <16 x i8> %0, %1 @@ -1716,11 +1613,9 @@ define <16 x i8> @absd_int8_uge(<16 x i8>, <16 x i8>) { ; ; CHECK-PWR78-LABEL: absd_int8_uge: ; CHECK-PWR78: # %bb.0: -; CHECK-PWR78-NEXT: vcmpgtub v4, v3, v2 -; CHECK-PWR78-NEXT: vsububm v5, v2, v3 -; CHECK-PWR78-NEXT: vsububm v2, v3, v2 -; CHECK-PWR78-NEXT: xxlnor v4, v4, v4 -; CHECK-PWR78-NEXT: xxsel v2, v2, v5, v4 +; CHECK-PWR78-NEXT: vminub v4, v2, v3 +; CHECK-PWR78-NEXT: vmaxub v2, v2, v3 +; CHECK-PWR78-NEXT: vsububm v2, v2, v4 ; CHECK-PWR78-NEXT: blr %3 = icmp uge <16 x i8> %0, %1 %4 = sub <16 x i8> %0, %1 @@ -1737,10 +1632,9 @@ define <16 x i8> @absd_int8_ult(<16 x i8>, <16 x i8>) { ; ; CHECK-PWR78-LABEL: absd_int8_ult: ; CHECK-PWR78: # %bb.0: -; CHECK-PWR78-NEXT: vcmpgtub v4, v3, v2 -; CHECK-PWR78-NEXT: vsububm v5, v2, v3 -; CHECK-PWR78-NEXT: vsububm v2, v3, v2 -; CHECK-PWR78-NEXT: xxsel v2, v5, v2, v4 +; CHECK-PWR78-NEXT: vminub v4, v2, v3 +; CHECK-PWR78-NEXT: vmaxub v2, v2, v3 +; CHECK-PWR78-NEXT: vsububm v2, v2, v4 ; CHECK-PWR78-NEXT: blr %3 = icmp ult <16 x i8> %0, %1 %4 = sub <16 x i8> %0, %1 @@ -1757,11 +1651,9 @@ define <16 x i8> @absd_int8_ule(<16 x i8>, <16 x i8>) { ; ; CHECK-PWR78-LABEL: absd_int8_ule: ; CHECK-PWR78: # %bb.0: -; CHECK-PWR78-NEXT: vcmpgtub v4, v2, v3 -; CHECK-PWR78-NEXT: vsububm v5, v2, v3 -; CHECK-PWR78-NEXT: vsububm v2, v3, v2 -; CHECK-PWR78-NEXT: xxlnor v4, v4, v4 -; CHECK-PWR78-NEXT: xxsel v2, v5, v2, v4 +; CHECK-PWR78-NEXT: vminub v4, v2, v3 +; CHECK-PWR78-NEXT: vmaxub v2, v2, v3 +; CHECK-PWR78-NEXT: vsububm v2, v2, v4 ; CHECK-PWR78-NEXT: blr %3 = icmp ule <16 x i8> %0, %1 %4 = sub <16 x i8> %0, %1 @@ -1782,10 +1674,9 @@ define <4 x i32> @absd_int32_sgt(<4 x i32>, <4 x i32>) { ; ; CHECK-PWR78-LABEL: absd_int32_sgt: ; CHECK-PWR78: # %bb.0: -; CHECK-PWR78-NEXT: vcmpgtsw v4, v2, v3 -; CHECK-PWR78-NEXT: vsubuwm v5, v2, v3 -; CHECK-PWR78-NEXT: vsubuwm v2, v3, v2 -; CHECK-PWR78-NEXT: xxsel v2, v2, v5, v4 +; CHECK-PWR78-NEXT: vminsw v4, v2, v3 +; CHECK-PWR78-NEXT: vmaxsw v2, v2, v3 +; CHECK-PWR78-NEXT: vsubuwm v2, v2, v4 ; CHECK-PWR78-NEXT: blr %3 = icmp sgt <4 x i32> %0, %1 %4 = sub <4 x i32> %0, %1 @@ -1804,11 +1695,9 @@ define <4 x i32> @absd_int32_sge(<4 x i32>, <4 x i32>) { ; ; CHECK-PWR78-LABEL: absd_int32_sge: ; CHECK-PWR78: # %bb.0: -; CHECK-PWR78-NEXT: vcmpgtsw v4, v3, v2 -; CHECK-PWR78-NEXT: xxlnor vs0, v4, v4 -; CHECK-PWR78-NEXT: vsubuwm v4, v2, v3 -; CHECK-PWR78-NEXT: vsubuwm v2, v3, v2 -; CHECK-PWR78-NEXT: xxsel v2, v2, v4, vs0 +; CHECK-PWR78-NEXT: vminsw v4, v2, v3 +; CHECK-PWR78-NEXT: vmaxsw v2, v2, v3 +; CHECK-PWR78-NEXT: vsubuwm v2, v2, v4 ; CHECK-PWR78-NEXT: blr %3 = icmp sge <4 x i32> %0, %1 %4 = sub <4 x i32> %0, %1 @@ -1827,10 +1716,9 @@ define <4 x i32> @absd_int32_slt(<4 x i32>, <4 x i32>) { ; ; CHECK-PWR78-LABEL: absd_int32_slt: ; CHECK-PWR78: # %bb.0: -; CHECK-PWR78-NEXT: vcmpgtsw v4, v3, v2 -; CHECK-PWR78-NEXT: vsubuwm v5, v2, v3 -; CHECK-PWR78-NEXT: vsubuwm v2, v3, v2 -; CHECK-PWR78-NEXT: xxsel v2, v5, v2, v4 +; CHECK-PWR78-NEXT: vminsw v4, v2, v3 +; CHECK-PWR78-NEXT: vmaxsw v2, v2, v3 +; CHECK-PWR78-NEXT: vsubuwm v2, v2, v4 ; CHECK-PWR78-NEXT: blr %3 = icmp slt <4 x i32> %0, %1 %4 = sub <4 x i32> %0, %1 @@ -1849,11 +1737,9 @@ define <4 x i32> @absd_int32_sle(<4 x i32>, <4 x i32>) { ; ; CHECK-PWR78-LABEL: absd_int32_sle: ; CHECK-PWR78: # %bb.0: -; CHECK-PWR78-NEXT: vcmpgtsw v4, v2, v3 -; CHECK-PWR78-NEXT: xxlnor vs0, v4, v4 -; CHECK-PWR78-NEXT: vsubuwm v4, v2, v3 -; CHECK-PWR78-NEXT: vsubuwm v2, v3, v2 -; CHECK-PWR78-NEXT: xxsel v2, v4, v2, vs0 +; CHECK-PWR78-NEXT: vminsw v4, v2, v3 +; CHECK-PWR78-NEXT: vmaxsw v2, v2, v3 +; CHECK-PWR78-NEXT: vsubuwm v2, v2, v4 ; CHECK-PWR78-NEXT: blr %3 = icmp sle <4 x i32> %0, %1 %4 = sub <4 x i32> %0, %1 @@ -1865,10 +1751,9 @@ define <4 x i32> @absd_int32_sle(<4 x i32>, <4 x i32>) { define <8 x i16> @absd_int16_sgt(<8 x i16>, <8 x i16>) { ; CHECK-LABEL: absd_int16_sgt: ; CHECK: # %bb.0: -; CHECK-NEXT: vcmpgtsh v4, v2, v3 -; CHECK-NEXT: vsubuhm v5, v2, v3 -; CHECK-NEXT: vsubuhm v2, v3, v2 -; CHECK-NEXT: xxsel v2, v2, v5, v4 +; CHECK-NEXT: vminsh v4, v2, v3 +; CHECK-NEXT: vmaxsh v2, v2, v3 +; CHECK-NEXT: vsubuhm v2, v2, v4 ; CHECK-NEXT: blr %3 = icmp sgt <8 x i16> %0, %1 %4 = sub <8 x i16> %0, %1 @@ -1880,11 +1765,9 @@ define <8 x i16> @absd_int16_sgt(<8 x i16>, <8 x i16>) { define <8 x i16> @absd_int16_sge(<8 x i16>, <8 x i16>) { ; CHECK-LABEL: absd_int16_sge: ; CHECK: # %bb.0: -; CHECK-NEXT: vcmpgtsh v4, v3, v2 -; CHECK-NEXT: vsubuhm v5, v2, v3 -; CHECK-NEXT: vsubuhm v2, v3, v2 -; CHECK-NEXT: xxlnor v4, v4, v4 -; CHECK-NEXT: xxsel v2, v2, v5, v4 +; CHECK-NEXT: vminsh v4, v2, v3 +; CHECK-NEXT: vmaxsh v2, v2, v3 +; CHECK-NEXT: vsubuhm v2, v2, v4 ; CHECK-NEXT: blr %3 = icmp sge <8 x i16> %0, %1 %4 = sub <8 x i16> %0, %1 @@ -1896,10 +1779,9 @@ define <8 x i16> @absd_int16_sge(<8 x i16>, <8 x i16>) { define <8 x i16> @absd_int16_slt(<8 x i16>, <8 x i16>) { ; CHECK-LABEL: absd_int16_slt: ; CHECK: # %bb.0: -; CHECK-NEXT: vcmpgtsh v4, v3, v2 -; CHECK-NEXT: vsubuhm v5, v2, v3 -; CHECK-NEXT: vsubuhm v2, v3, v2 -; CHECK-NEXT: xxsel v2, v5, v2, v4 +; CHECK-NEXT: vminsh v4, v2, v3 +; CHECK-NEXT: vmaxsh v2, v2, v3 +; CHECK-NEXT: vsubuhm v2, v2, v4 ; CHECK-NEXT: blr %3 = icmp slt <8 x i16> %0, %1 %4 = sub <8 x i16> %0, %1 @@ -1911,11 +1793,9 @@ define <8 x i16> @absd_int16_slt(<8 x i16>, <8 x i16>) { define <8 x i16> @absd_int16_sle(<8 x i16>, <8 x i16>) { ; CHECK-LABEL: absd_int16_sle: ; CHECK: # %bb.0: -; CHECK-NEXT: vcmpgtsh v4, v2, v3 -; CHECK-NEXT: vsubuhm v5, v2, v3 -; CHECK-NEXT: vsubuhm v2, v3, v2 -; CHECK-NEXT: xxlnor v4, v4, v4 -; CHECK-NEXT: xxsel v2, v5, v2, v4 +; CHECK-NEXT: vminsh v4, v2, v3 +; CHECK-NEXT: vmaxsh v2, v2, v3 +; CHECK-NEXT: vsubuhm v2, v2, v4 ; CHECK-NEXT: blr %3 = icmp sle <8 x i16> %0, %1 %4 = sub <8 x i16> %0, %1 @@ -1927,10 +1807,9 @@ define <8 x i16> @absd_int16_sle(<8 x i16>, <8 x i16>) { define <16 x i8> @absd_int8_sgt(<16 x i8>, <16 x i8>) { ; CHECK-LABEL: absd_int8_sgt: ; CHECK: # %bb.0: -; CHECK-NEXT: vcmpgtsb v4, v2, v3 -; CHECK-NEXT: vsububm v5, v2, v3 -; CHECK-NEXT: vsububm v2, v3, v2 -; CHECK-NEXT: xxsel v2, v2, v5, v4 +; CHECK-NEXT: vminsb v4, v2, v3 +; CHECK-NEXT: vmaxsb v2, v2, v3 +; CHECK-NEXT: vsububm v2, v2, v4 ; CHECK-NEXT: blr %3 = icmp sgt <16 x i8> %0, %1 %4 = sub <16 x i8> %0, %1 @@ -1942,11 +1821,9 @@ define <16 x i8> @absd_int8_sgt(<16 x i8>, <16 x i8>) { define <16 x i8> @absd_int8_sge(<16 x i8>, <16 x i8>) { ; CHECK-LABEL: absd_int8_sge: ; CHECK: # %bb.0: -; CHECK-NEXT: vcmpgtsb v4, v3, v2 -; CHECK-NEXT: vsububm v5, v2, v3 -; CHECK-NEXT: vsububm v2, v3, v2 -; CHECK-NEXT: xxlnor v4, v4, v4 -; CHECK-NEXT: xxsel v2, v2, v5, v4 +; CHECK-NEXT: vminsb v4, v2, v3 +; CHECK-NEXT: vmaxsb v2, v2, v3 +; CHECK-NEXT: vsububm v2, v2, v4 ; CHECK-NEXT: blr %3 = icmp sge <16 x i8> %0, %1 %4 = sub <16 x i8> %0, %1 @@ -1958,10 +1835,9 @@ define <16 x i8> @absd_int8_sge(<16 x i8>, <16 x i8>) { define <16 x i8> @absd_int8_slt(<16 x i8>, <16 x i8>) { ; CHECK-LABEL: absd_int8_slt: ; CHECK: # %bb.0: -; CHECK-NEXT: vcmpgtsb v4, v3, v2 -; CHECK-NEXT: vsububm v5, v2, v3 -; CHECK-NEXT: vsububm v2, v3, v2 -; CHECK-NEXT: xxsel v2, v5, v2, v4 +; CHECK-NEXT: vminsb v4, v2, v3 +; CHECK-NEXT: vmaxsb v2, v2, v3 +; CHECK-NEXT: vsububm v2, v2, v4 ; CHECK-NEXT: blr %3 = icmp slt <16 x i8> %0, %1 %4 = sub <16 x i8> %0, %1 @@ -1973,11 +1849,9 @@ define <16 x i8> @absd_int8_slt(<16 x i8>, <16 x i8>) { define <16 x i8> @absd_int8_sle(<16 x i8>, <16 x i8>) { ; CHECK-LABEL: absd_int8_sle: ; CHECK: # %bb.0: -; CHECK-NEXT: vcmpgtsb v4, v2, v3 -; CHECK-NEXT: vsububm v5, v2, v3 -; CHECK-NEXT: vsububm v2, v3, v2 -; CHECK-NEXT: xxlnor v4, v4, v4 -; CHECK-NEXT: xxsel v2, v5, v2, v4 +; CHECK-NEXT: vminsb v4, v2, v3 +; CHECK-NEXT: vmaxsb v2, v2, v3 +; CHECK-NEXT: vsububm v2, v2, v4 ; CHECK-NEXT: blr %3 = icmp sle <16 x i8> %0, %1 %4 = sub <16 x i8> %0, %1 @@ -2006,53 +1880,55 @@ define <4 x i32> @absd_int32_ugt_opp(<4 x i32>, <4 x i32>) { define <2 x i64> @absd_int64_ugt(<2 x i64>, <2 x i64>) { ; CHECK-PWR9-LABEL: absd_int64_ugt: ; CHECK-PWR9: # %bb.0: -; CHECK-PWR9-NEXT: vcmpgtud v4, v2, v3 -; CHECK-PWR9-NEXT: vsubudm v5, v2, v3 -; CHECK-PWR9-NEXT: vsubudm v2, v3, v2 -; CHECK-PWR9-NEXT: xxsel v2, v2, v5, v4 +; CHECK-PWR9-NEXT: vminud v4, v2, v3 +; CHECK-PWR9-NEXT: vmaxud v2, v2, v3 +; CHECK-PWR9-NEXT: vsubudm v2, v2, v4 ; CHECK-PWR9-NEXT: blr ; ; CHECK-PWR8-LABEL: absd_int64_ugt: ; CHECK-PWR8: # %bb.0: -; CHECK-PWR8-NEXT: vcmpgtud v4, v2, v3 -; CHECK-PWR8-NEXT: vsubudm v5, v2, v3 -; CHECK-PWR8-NEXT: vsubudm v2, v3, v2 -; CHECK-PWR8-NEXT: xxsel v2, v2, v5, v4 +; CHECK-PWR8-NEXT: vminud v4, v2, v3 +; CHECK-PWR8-NEXT: vmaxud v2, v2, v3 +; CHECK-PWR8-NEXT: vsubudm v2, v2, v4 ; CHECK-PWR8-NEXT: blr ; ; CHECK-PWR7-LABEL: absd_int64_ugt: ; CHECK-PWR7: # %bb.0: -; CHECK-PWR7-NEXT: addi r3, r1, -64 -; CHECK-PWR7-NEXT: addi r4, r1, -80 -; CHECK-PWR7-NEXT: li r5, 0 -; CHECK-PWR7-NEXT: li r6, -1 +; CHECK-PWR7-NEXT: addi r3, r1, -96 +; CHECK-PWR7-NEXT: stxvd2x v2, 0, r3 +; CHECK-PWR7-NEXT: addi r3, r1, -80 ; CHECK-PWR7-NEXT: stxvd2x v3, 0, r3 -; CHECK-PWR7-NEXT: stxvd2x v2, 0, r4 -; CHECK-PWR7-NEXT: addi r9, r1, -16 -; CHECK-PWR7-NEXT: ld r3, -56(r1) +; CHECK-PWR7-NEXT: ld r3, -88(r1) ; CHECK-PWR7-NEXT: ld r4, -72(r1) -; CHECK-PWR7-NEXT: ld r8, -80(r1) -; CHECK-PWR7-NEXT: cmpld r4, r3 -; CHECK-PWR7-NEXT: iselgt r7, r6, r5 -; CHECK-PWR7-NEXT: std r7, -8(r1) -; CHECK-PWR7-NEXT: ld r7, -64(r1) -; CHECK-PWR7-NEXT: cmpld r8, r7 -; CHECK-PWR7-NEXT: iselgt r5, r6, r5 -; CHECK-PWR7-NEXT: std r5, -16(r1) -; CHECK-PWR7-NEXT: sub r5, r4, r3 +; CHECK-PWR7-NEXT: ld r6, -80(r1) +; CHECK-PWR7-NEXT: sub r5, r3, r4 +; CHECK-PWR7-NEXT: cmpld r3, r4 +; CHECK-PWR7-NEXT: li r3, 0 +; CHECK-PWR7-NEXT: li r4, -1 +; CHECK-PWR7-NEXT: std r5, -56(r1) +; CHECK-PWR7-NEXT: ld r5, -96(r1) +; CHECK-PWR7-NEXT: sub r7, r5, r6 +; CHECK-PWR7-NEXT: std r7, -64(r1) +; CHECK-PWR7-NEXT: iselgt r7, r4, r3 +; CHECK-PWR7-NEXT: cmpld r5, r6 +; CHECK-PWR7-NEXT: std r7, -40(r1) +; CHECK-PWR7-NEXT: iselgt r3, r4, r3 +; CHECK-PWR7-NEXT: addi r4, r1, -64 +; CHECK-PWR7-NEXT: std r3, -48(r1) +; CHECK-PWR7-NEXT: lxvw4x vs0, 0, r4 +; CHECK-PWR7-NEXT: addi r4, r1, -48 +; CHECK-PWR7-NEXT: lxvw4x vs1, 0, r4 +; CHECK-PWR7-NEXT: addi r4, r1, -32 +; CHECK-PWR7-NEXT: xxlxor vs0, vs0, vs1 +; CHECK-PWR7-NEXT: stxvw4x vs0, 0, r4 +; CHECK-PWR7-NEXT: ld r4, -24(r1) +; CHECK-PWR7-NEXT: sub r4, r7, r4 +; CHECK-PWR7-NEXT: std r4, -8(r1) +; CHECK-PWR7-NEXT: ld r4, -32(r1) ; CHECK-PWR7-NEXT: sub r3, r3, r4 -; CHECK-PWR7-NEXT: lxvd2x v2, 0, r9 -; CHECK-PWR7-NEXT: std r5, -40(r1) -; CHECK-PWR7-NEXT: sub r5, r8, r7 -; CHECK-PWR7-NEXT: std r5, -48(r1) -; CHECK-PWR7-NEXT: addi r5, r1, -48 -; CHECK-PWR7-NEXT: lxvd2x v3, 0, r5 -; CHECK-PWR7-NEXT: std r3, -24(r1) -; CHECK-PWR7-NEXT: sub r3, r7, r8 -; CHECK-PWR7-NEXT: std r3, -32(r1) -; CHECK-PWR7-NEXT: addi r3, r1, -32 -; CHECK-PWR7-NEXT: lxvd2x v4, 0, r3 -; CHECK-PWR7-NEXT: xxsel v2, v4, v3, v2 +; CHECK-PWR7-NEXT: std r3, -16(r1) +; CHECK-PWR7-NEXT: addi r3, r1, -16 +; CHECK-PWR7-NEXT: lxvd2x v2, 0, r3 ; CHECK-PWR7-NEXT: blr %3 = icmp ugt <2 x i64> %0, %1 %4 = sub <2 x i64> %0, %1 diff --git a/llvm/test/CodeGen/PowerPC/vec-zext-abdu.ll b/llvm/test/CodeGen/PowerPC/vec-zext-abdu.ll index 787b81f7f20980..32c28148df32e3 100644 --- a/llvm/test/CodeGen/PowerPC/vec-zext-abdu.ll +++ b/llvm/test/CodeGen/PowerPC/vec-zext-abdu.ll @@ -1,31 +1,11 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 ; RUN: llc -verify-machineinstrs -mtriple=powerpc64le -mcpu=pwr9 < %s | FileCheck %s +; Widen to <16 x i8> define <12 x i8> @zext_abdu(<12 x i8> %a, <12 x i8> %b) { ; CHECK-LABEL: zext_abdu: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addis 3, 2, .LCPI0_0@toc@ha -; CHECK-NEXT: xxlxor 36, 36, 36 -; CHECK-NEXT: addi 3, 3, .LCPI0_0@toc@l -; CHECK-NEXT: lxv 37, 0(3) -; CHECK-NEXT: addis 3, 2, .LCPI0_1@toc@ha -; CHECK-NEXT: addi 3, 3, .LCPI0_1@toc@l -; CHECK-NEXT: lxv 33, 0(3) -; CHECK-NEXT: addis 3, 2, .LCPI0_2@toc@ha -; CHECK-NEXT: vperm 0, 4, 2, 5 -; CHECK-NEXT: vperm 5, 4, 3, 5 -; CHECK-NEXT: addi 3, 3, .LCPI0_2@toc@l -; CHECK-NEXT: lxv 39, 0(3) -; CHECK-NEXT: vperm 6, 4, 2, 1 -; CHECK-NEXT: vperm 1, 4, 3, 1 -; CHECK-NEXT: vperm 2, 4, 2, 7 -; CHECK-NEXT: vperm 3, 4, 3, 7 -; CHECK-NEXT: vabsduw 4, 5, 0 -; CHECK-NEXT: vabsduw 2, 3, 2 -; CHECK-NEXT: vabsduw 3, 1, 6 -; CHECK-NEXT: vpkuwum 3, 4, 3 -; CHECK-NEXT: vpkuwum 2, 2, 2 -; CHECK-NEXT: vpkuhum 2, 2, 3 +; CHECK-NEXT: vabsdub 2, 2, 3 ; CHECK-NEXT: blr entry: %aa = zext <12 x i8> %a to <12 x i32> diff --git a/llvm/test/CodeGen/RISCV/abds-neg.ll b/llvm/test/CodeGen/RISCV/abds-neg.ll index 4ebb92991cfc29..9bd28b91dd4c95 100644 --- a/llvm/test/CodeGen/RISCV/abds-neg.ll +++ b/llvm/test/CodeGen/RISCV/abds-neg.ll @@ -10,10 +10,10 @@ define i8 @abd_ext_i8(i8 %a, i8 %b) nounwind { ; RV32I-LABEL: abd_ext_i8: ; RV32I: # %bb.0: -; RV32I-NEXT: slli a0, a0, 24 -; RV32I-NEXT: srai a0, a0, 24 ; RV32I-NEXT: slli a1, a1, 24 ; RV32I-NEXT: srai a1, a1, 24 +; RV32I-NEXT: slli a0, a0, 24 +; RV32I-NEXT: srai a0, a0, 24 ; RV32I-NEXT: sub a0, a0, a1 ; RV32I-NEXT: srai a1, a0, 31 ; RV32I-NEXT: xor a0, a0, a1 @@ -32,14 +32,23 @@ define i8 @abd_ext_i8(i8 %a, i8 %b) nounwind { ; RV64I-NEXT: sub a0, a1, a0 ; RV64I-NEXT: ret ; -; ZBB-LABEL: abd_ext_i8: -; ZBB: # %bb.0: -; ZBB-NEXT: sext.b a0, a0 -; ZBB-NEXT: sext.b a1, a1 -; ZBB-NEXT: sub a0, a0, a1 -; ZBB-NEXT: neg a1, a0 -; ZBB-NEXT: min a0, a0, a1 -; ZBB-NEXT: ret +; RV32ZBB-LABEL: abd_ext_i8: +; RV32ZBB: # %bb.0: +; RV32ZBB-NEXT: sext.b a1, a1 +; RV32ZBB-NEXT: sext.b a0, a0 +; RV32ZBB-NEXT: max a2, a0, a1 +; RV32ZBB-NEXT: min a0, a0, a1 +; RV32ZBB-NEXT: sub a0, a0, a2 +; RV32ZBB-NEXT: ret +; +; RV64ZBB-LABEL: abd_ext_i8: +; RV64ZBB: # %bb.0: +; RV64ZBB-NEXT: sext.b a0, a0 +; RV64ZBB-NEXT: sext.b a1, a1 +; RV64ZBB-NEXT: sub a0, a0, a1 +; RV64ZBB-NEXT: neg a1, a0 +; RV64ZBB-NEXT: min a0, a0, a1 +; RV64ZBB-NEXT: ret %aext = sext i8 %a to i64 %bext = sext i8 %b to i64 %sub = sub i64 %aext, %bext @@ -52,10 +61,10 @@ define i8 @abd_ext_i8(i8 %a, i8 %b) nounwind { define i8 @abd_ext_i8_i16(i8 %a, i16 %b) nounwind { ; RV32I-LABEL: abd_ext_i8_i16: ; RV32I: # %bb.0: -; RV32I-NEXT: slli a0, a0, 24 -; RV32I-NEXT: srai a0, a0, 24 ; RV32I-NEXT: slli a1, a1, 16 ; RV32I-NEXT: srai a1, a1, 16 +; RV32I-NEXT: slli a0, a0, 24 +; RV32I-NEXT: srai a0, a0, 24 ; RV32I-NEXT: sub a0, a0, a1 ; RV32I-NEXT: srai a1, a0, 31 ; RV32I-NEXT: xor a0, a0, a1 @@ -74,14 +83,23 @@ define i8 @abd_ext_i8_i16(i8 %a, i16 %b) nounwind { ; RV64I-NEXT: sub a0, a1, a0 ; RV64I-NEXT: ret ; -; ZBB-LABEL: abd_ext_i8_i16: -; ZBB: # %bb.0: -; ZBB-NEXT: sext.b a0, a0 -; ZBB-NEXT: sext.h a1, a1 -; ZBB-NEXT: sub a0, a0, a1 -; ZBB-NEXT: neg a1, a0 -; ZBB-NEXT: min a0, a0, a1 -; ZBB-NEXT: ret +; RV32ZBB-LABEL: abd_ext_i8_i16: +; RV32ZBB: # %bb.0: +; RV32ZBB-NEXT: sext.h a1, a1 +; RV32ZBB-NEXT: sext.b a0, a0 +; RV32ZBB-NEXT: max a2, a0, a1 +; RV32ZBB-NEXT: min a0, a0, a1 +; RV32ZBB-NEXT: sub a0, a0, a2 +; RV32ZBB-NEXT: ret +; +; RV64ZBB-LABEL: abd_ext_i8_i16: +; RV64ZBB: # %bb.0: +; RV64ZBB-NEXT: sext.b a0, a0 +; RV64ZBB-NEXT: sext.h a1, a1 +; RV64ZBB-NEXT: sub a0, a0, a1 +; RV64ZBB-NEXT: neg a1, a0 +; RV64ZBB-NEXT: min a0, a0, a1 +; RV64ZBB-NEXT: ret %aext = sext i8 %a to i64 %bext = sext i16 %b to i64 %sub = sub i64 %aext, %bext @@ -94,10 +112,10 @@ define i8 @abd_ext_i8_i16(i8 %a, i16 %b) nounwind { define i8 @abd_ext_i8_undef(i8 %a, i8 %b) nounwind { ; RV32I-LABEL: abd_ext_i8_undef: ; RV32I: # %bb.0: -; RV32I-NEXT: slli a0, a0, 24 -; RV32I-NEXT: srai a0, a0, 24 ; RV32I-NEXT: slli a1, a1, 24 ; RV32I-NEXT: srai a1, a1, 24 +; RV32I-NEXT: slli a0, a0, 24 +; RV32I-NEXT: srai a0, a0, 24 ; RV32I-NEXT: sub a0, a0, a1 ; RV32I-NEXT: srai a1, a0, 31 ; RV32I-NEXT: xor a0, a0, a1 @@ -116,14 +134,23 @@ define i8 @abd_ext_i8_undef(i8 %a, i8 %b) nounwind { ; RV64I-NEXT: sub a0, a1, a0 ; RV64I-NEXT: ret ; -; ZBB-LABEL: abd_ext_i8_undef: -; ZBB: # %bb.0: -; ZBB-NEXT: sext.b a0, a0 -; ZBB-NEXT: sext.b a1, a1 -; ZBB-NEXT: sub a0, a0, a1 -; ZBB-NEXT: neg a1, a0 -; ZBB-NEXT: min a0, a0, a1 -; ZBB-NEXT: ret +; RV32ZBB-LABEL: abd_ext_i8_undef: +; RV32ZBB: # %bb.0: +; RV32ZBB-NEXT: sext.b a1, a1 +; RV32ZBB-NEXT: sext.b a0, a0 +; RV32ZBB-NEXT: max a2, a0, a1 +; RV32ZBB-NEXT: min a0, a0, a1 +; RV32ZBB-NEXT: sub a0, a0, a2 +; RV32ZBB-NEXT: ret +; +; RV64ZBB-LABEL: abd_ext_i8_undef: +; RV64ZBB: # %bb.0: +; RV64ZBB-NEXT: sext.b a0, a0 +; RV64ZBB-NEXT: sext.b a1, a1 +; RV64ZBB-NEXT: sub a0, a0, a1 +; RV64ZBB-NEXT: neg a1, a0 +; RV64ZBB-NEXT: min a0, a0, a1 +; RV64ZBB-NEXT: ret %aext = sext i8 %a to i64 %bext = sext i8 %b to i64 %sub = sub i64 %aext, %bext @@ -136,10 +163,10 @@ define i8 @abd_ext_i8_undef(i8 %a, i8 %b) nounwind { define i16 @abd_ext_i16(i16 %a, i16 %b) nounwind { ; RV32I-LABEL: abd_ext_i16: ; RV32I: # %bb.0: -; RV32I-NEXT: slli a0, a0, 16 -; RV32I-NEXT: srai a0, a0, 16 ; RV32I-NEXT: slli a1, a1, 16 ; RV32I-NEXT: srai a1, a1, 16 +; RV32I-NEXT: slli a0, a0, 16 +; RV32I-NEXT: srai a0, a0, 16 ; RV32I-NEXT: sub a0, a0, a1 ; RV32I-NEXT: srai a1, a0, 31 ; RV32I-NEXT: xor a0, a0, a1 @@ -158,14 +185,23 @@ define i16 @abd_ext_i16(i16 %a, i16 %b) nounwind { ; RV64I-NEXT: sub a0, a1, a0 ; RV64I-NEXT: ret ; -; ZBB-LABEL: abd_ext_i16: -; ZBB: # %bb.0: -; ZBB-NEXT: sext.h a0, a0 -; ZBB-NEXT: sext.h a1, a1 -; ZBB-NEXT: sub a0, a0, a1 -; ZBB-NEXT: neg a1, a0 -; ZBB-NEXT: min a0, a0, a1 -; ZBB-NEXT: ret +; RV32ZBB-LABEL: abd_ext_i16: +; RV32ZBB: # %bb.0: +; RV32ZBB-NEXT: sext.h a1, a1 +; RV32ZBB-NEXT: sext.h a0, a0 +; RV32ZBB-NEXT: max a2, a0, a1 +; RV32ZBB-NEXT: min a0, a0, a1 +; RV32ZBB-NEXT: sub a0, a0, a2 +; RV32ZBB-NEXT: ret +; +; RV64ZBB-LABEL: abd_ext_i16: +; RV64ZBB: # %bb.0: +; RV64ZBB-NEXT: sext.h a0, a0 +; RV64ZBB-NEXT: sext.h a1, a1 +; RV64ZBB-NEXT: sub a0, a0, a1 +; RV64ZBB-NEXT: neg a1, a0 +; RV64ZBB-NEXT: min a0, a0, a1 +; RV64ZBB-NEXT: ret %aext = sext i16 %a to i64 %bext = sext i16 %b to i64 %sub = sub i64 %aext, %bext @@ -179,17 +215,14 @@ define i16 @abd_ext_i16_i32(i16 %a, i32 %b) nounwind { ; RV32I-LABEL: abd_ext_i16_i32: ; RV32I: # %bb.0: ; RV32I-NEXT: slli a0, a0, 16 -; RV32I-NEXT: srai a2, a0, 31 ; RV32I-NEXT: srai a0, a0, 16 -; RV32I-NEXT: srai a3, a1, 31 -; RV32I-NEXT: sltu a4, a0, a1 -; RV32I-NEXT: sub a2, a2, a3 -; RV32I-NEXT: sub a2, a2, a4 -; RV32I-NEXT: sub a0, a0, a1 -; RV32I-NEXT: bgez a2, .LBB4_2 +; RV32I-NEXT: blt a1, a0, .LBB4_2 ; RV32I-NEXT: # %bb.1: +; RV32I-NEXT: sub a0, a1, a0 ; RV32I-NEXT: neg a0, a0 +; RV32I-NEXT: ret ; RV32I-NEXT: .LBB4_2: +; RV32I-NEXT: sub a0, a0, a1 ; RV32I-NEXT: neg a0, a0 ; RV32I-NEXT: ret ; @@ -207,17 +240,9 @@ define i16 @abd_ext_i16_i32(i16 %a, i32 %b) nounwind { ; RV32ZBB-LABEL: abd_ext_i16_i32: ; RV32ZBB: # %bb.0: ; RV32ZBB-NEXT: sext.h a0, a0 -; RV32ZBB-NEXT: srai a2, a0, 31 -; RV32ZBB-NEXT: srai a3, a1, 31 -; RV32ZBB-NEXT: sltu a4, a0, a1 -; RV32ZBB-NEXT: sub a2, a2, a3 -; RV32ZBB-NEXT: sub a2, a2, a4 -; RV32ZBB-NEXT: sub a0, a0, a1 -; RV32ZBB-NEXT: bgez a2, .LBB4_2 -; RV32ZBB-NEXT: # %bb.1: -; RV32ZBB-NEXT: neg a0, a0 -; RV32ZBB-NEXT: .LBB4_2: -; RV32ZBB-NEXT: neg a0, a0 +; RV32ZBB-NEXT: max a2, a0, a1 +; RV32ZBB-NEXT: min a0, a0, a1 +; RV32ZBB-NEXT: sub a0, a0, a2 ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: abd_ext_i16_i32: @@ -240,10 +265,10 @@ define i16 @abd_ext_i16_i32(i16 %a, i32 %b) nounwind { define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind { ; RV32I-LABEL: abd_ext_i16_undef: ; RV32I: # %bb.0: -; RV32I-NEXT: slli a0, a0, 16 -; RV32I-NEXT: srai a0, a0, 16 ; RV32I-NEXT: slli a1, a1, 16 ; RV32I-NEXT: srai a1, a1, 16 +; RV32I-NEXT: slli a0, a0, 16 +; RV32I-NEXT: srai a0, a0, 16 ; RV32I-NEXT: sub a0, a0, a1 ; RV32I-NEXT: srai a1, a0, 31 ; RV32I-NEXT: xor a0, a0, a1 @@ -262,14 +287,23 @@ define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind { ; RV64I-NEXT: sub a0, a1, a0 ; RV64I-NEXT: ret ; -; ZBB-LABEL: abd_ext_i16_undef: -; ZBB: # %bb.0: -; ZBB-NEXT: sext.h a0, a0 -; ZBB-NEXT: sext.h a1, a1 -; ZBB-NEXT: sub a0, a0, a1 -; ZBB-NEXT: neg a1, a0 -; ZBB-NEXT: min a0, a0, a1 -; ZBB-NEXT: ret +; RV32ZBB-LABEL: abd_ext_i16_undef: +; RV32ZBB: # %bb.0: +; RV32ZBB-NEXT: sext.h a1, a1 +; RV32ZBB-NEXT: sext.h a0, a0 +; RV32ZBB-NEXT: max a2, a0, a1 +; RV32ZBB-NEXT: min a0, a0, a1 +; RV32ZBB-NEXT: sub a0, a0, a2 +; RV32ZBB-NEXT: ret +; +; RV64ZBB-LABEL: abd_ext_i16_undef: +; RV64ZBB: # %bb.0: +; RV64ZBB-NEXT: sext.h a0, a0 +; RV64ZBB-NEXT: sext.h a1, a1 +; RV64ZBB-NEXT: sub a0, a0, a1 +; RV64ZBB-NEXT: neg a1, a0 +; RV64ZBB-NEXT: min a0, a0, a1 +; RV64ZBB-NEXT: ret %aext = sext i16 %a to i64 %bext = sext i16 %b to i64 %sub = sub i64 %aext, %bext @@ -282,16 +316,13 @@ define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind { define i32 @abd_ext_i32(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: abd_ext_i32: ; RV32I: # %bb.0: -; RV32I-NEXT: srai a2, a0, 31 -; RV32I-NEXT: srai a3, a1, 31 -; RV32I-NEXT: sltu a4, a0, a1 -; RV32I-NEXT: sub a2, a2, a3 -; RV32I-NEXT: sub a2, a2, a4 -; RV32I-NEXT: sub a0, a0, a1 -; RV32I-NEXT: bgez a2, .LBB6_2 +; RV32I-NEXT: blt a1, a0, .LBB6_2 ; RV32I-NEXT: # %bb.1: +; RV32I-NEXT: sub a0, a1, a0 ; RV32I-NEXT: neg a0, a0 +; RV32I-NEXT: ret ; RV32I-NEXT: .LBB6_2: +; RV32I-NEXT: sub a0, a0, a1 ; RV32I-NEXT: neg a0, a0 ; RV32I-NEXT: ret ; @@ -307,17 +338,9 @@ define i32 @abd_ext_i32(i32 %a, i32 %b) nounwind { ; ; RV32ZBB-LABEL: abd_ext_i32: ; RV32ZBB: # %bb.0: -; RV32ZBB-NEXT: srai a2, a0, 31 -; RV32ZBB-NEXT: srai a3, a1, 31 -; RV32ZBB-NEXT: sltu a4, a0, a1 -; RV32ZBB-NEXT: sub a2, a2, a3 -; RV32ZBB-NEXT: sub a2, a2, a4 -; RV32ZBB-NEXT: sub a0, a0, a1 -; RV32ZBB-NEXT: bgez a2, .LBB6_2 -; RV32ZBB-NEXT: # %bb.1: -; RV32ZBB-NEXT: neg a0, a0 -; RV32ZBB-NEXT: .LBB6_2: -; RV32ZBB-NEXT: neg a0, a0 +; RV32ZBB-NEXT: max a2, a0, a1 +; RV32ZBB-NEXT: min a0, a0, a1 +; RV32ZBB-NEXT: sub a0, a0, a2 ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: abd_ext_i32: @@ -340,18 +363,15 @@ define i32 @abd_ext_i32(i32 %a, i32 %b) nounwind { define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind { ; RV32I-LABEL: abd_ext_i32_i16: ; RV32I: # %bb.0: -; RV32I-NEXT: srai a2, a0, 31 ; RV32I-NEXT: slli a1, a1, 16 -; RV32I-NEXT: srai a3, a1, 31 ; RV32I-NEXT: srai a1, a1, 16 -; RV32I-NEXT: sltu a4, a0, a1 -; RV32I-NEXT: sub a2, a2, a3 -; RV32I-NEXT: sub a2, a2, a4 -; RV32I-NEXT: sub a0, a0, a1 -; RV32I-NEXT: bgez a2, .LBB7_2 +; RV32I-NEXT: blt a1, a0, .LBB7_2 ; RV32I-NEXT: # %bb.1: +; RV32I-NEXT: sub a0, a1, a0 ; RV32I-NEXT: neg a0, a0 +; RV32I-NEXT: ret ; RV32I-NEXT: .LBB7_2: +; RV32I-NEXT: sub a0, a0, a1 ; RV32I-NEXT: neg a0, a0 ; RV32I-NEXT: ret ; @@ -368,18 +388,10 @@ define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind { ; ; RV32ZBB-LABEL: abd_ext_i32_i16: ; RV32ZBB: # %bb.0: -; RV32ZBB-NEXT: srai a2, a0, 31 ; RV32ZBB-NEXT: sext.h a1, a1 -; RV32ZBB-NEXT: srai a3, a1, 31 -; RV32ZBB-NEXT: sltu a4, a0, a1 -; RV32ZBB-NEXT: sub a2, a2, a3 -; RV32ZBB-NEXT: sub a2, a2, a4 -; RV32ZBB-NEXT: sub a0, a0, a1 -; RV32ZBB-NEXT: bgez a2, .LBB7_2 -; RV32ZBB-NEXT: # %bb.1: -; RV32ZBB-NEXT: neg a0, a0 -; RV32ZBB-NEXT: .LBB7_2: -; RV32ZBB-NEXT: neg a0, a0 +; RV32ZBB-NEXT: max a2, a0, a1 +; RV32ZBB-NEXT: min a0, a0, a1 +; RV32ZBB-NEXT: sub a0, a0, a2 ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: abd_ext_i32_i16: @@ -402,16 +414,13 @@ define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind { define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: abd_ext_i32_undef: ; RV32I: # %bb.0: -; RV32I-NEXT: srai a2, a0, 31 -; RV32I-NEXT: srai a3, a1, 31 -; RV32I-NEXT: sltu a4, a0, a1 -; RV32I-NEXT: sub a2, a2, a3 -; RV32I-NEXT: sub a2, a2, a4 -; RV32I-NEXT: sub a0, a0, a1 -; RV32I-NEXT: bgez a2, .LBB8_2 +; RV32I-NEXT: blt a1, a0, .LBB8_2 ; RV32I-NEXT: # %bb.1: +; RV32I-NEXT: sub a0, a1, a0 ; RV32I-NEXT: neg a0, a0 +; RV32I-NEXT: ret ; RV32I-NEXT: .LBB8_2: +; RV32I-NEXT: sub a0, a0, a1 ; RV32I-NEXT: neg a0, a0 ; RV32I-NEXT: ret ; @@ -427,17 +436,9 @@ define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind { ; ; RV32ZBB-LABEL: abd_ext_i32_undef: ; RV32ZBB: # %bb.0: -; RV32ZBB-NEXT: srai a2, a0, 31 -; RV32ZBB-NEXT: srai a3, a1, 31 -; RV32ZBB-NEXT: sltu a4, a0, a1 -; RV32ZBB-NEXT: sub a2, a2, a3 -; RV32ZBB-NEXT: sub a2, a2, a4 -; RV32ZBB-NEXT: sub a0, a0, a1 -; RV32ZBB-NEXT: bgez a2, .LBB8_2 -; RV32ZBB-NEXT: # %bb.1: -; RV32ZBB-NEXT: neg a0, a0 -; RV32ZBB-NEXT: .LBB8_2: -; RV32ZBB-NEXT: neg a0, a0 +; RV32ZBB-NEXT: max a2, a0, a1 +; RV32ZBB-NEXT: min a0, a0, a1 +; RV32ZBB-NEXT: sub a0, a0, a2 ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: abd_ext_i32_undef: @@ -460,29 +461,24 @@ define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind { define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind { ; RV32I-LABEL: abd_ext_i64: ; RV32I: # %bb.0: -; RV32I-NEXT: srai a5, a1, 31 -; RV32I-NEXT: sltu a4, a0, a2 -; RV32I-NEXT: srai a6, a3, 31 -; RV32I-NEXT: mv a7, a4 +; RV32I-NEXT: sltu a4, a2, a0 +; RV32I-NEXT: mv a5, a4 ; RV32I-NEXT: beq a1, a3, .LBB9_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: sltu a7, a1, a3 +; RV32I-NEXT: slt a5, a3, a1 ; RV32I-NEXT: .LBB9_2: -; RV32I-NEXT: sub t0, a5, a6 -; RV32I-NEXT: sltu a7, t0, a7 -; RV32I-NEXT: sltu a5, a5, a6 -; RV32I-NEXT: sub a5, t0, a5 -; RV32I-NEXT: sub a5, a5, a7 +; RV32I-NEXT: bnez a5, .LBB9_4 +; RV32I-NEXT: # %bb.3: +; RV32I-NEXT: sub a1, a3, a1 +; RV32I-NEXT: sub a1, a1, a4 +; RV32I-NEXT: sub a0, a2, a0 +; RV32I-NEXT: j .LBB9_5 +; RV32I-NEXT: .LBB9_4: +; RV32I-NEXT: sltu a4, a0, a2 ; RV32I-NEXT: sub a1, a1, a3 ; RV32I-NEXT: sub a1, a1, a4 ; RV32I-NEXT: sub a0, a0, a2 -; RV32I-NEXT: bgez a5, .LBB9_4 -; RV32I-NEXT: # %bb.3: -; RV32I-NEXT: snez a2, a0 -; RV32I-NEXT: add a1, a1, a2 -; RV32I-NEXT: neg a1, a1 -; RV32I-NEXT: neg a0, a0 -; RV32I-NEXT: .LBB9_4: +; RV32I-NEXT: .LBB9_5: ; RV32I-NEXT: snez a2, a0 ; RV32I-NEXT: add a1, a1, a2 ; RV32I-NEXT: neg a1, a1 @@ -491,44 +487,36 @@ define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind { ; ; RV64I-LABEL: abd_ext_i64: ; RV64I: # %bb.0: -; RV64I-NEXT: srai a2, a0, 63 -; RV64I-NEXT: srai a3, a1, 63 -; RV64I-NEXT: sltu a4, a0, a1 -; RV64I-NEXT: sub a2, a2, a3 -; RV64I-NEXT: sub a2, a2, a4 -; RV64I-NEXT: sub a0, a0, a1 -; RV64I-NEXT: bgez a2, .LBB9_2 +; RV64I-NEXT: blt a1, a0, .LBB9_2 ; RV64I-NEXT: # %bb.1: +; RV64I-NEXT: sub a0, a1, a0 ; RV64I-NEXT: neg a0, a0 +; RV64I-NEXT: ret ; RV64I-NEXT: .LBB9_2: +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: neg a0, a0 ; RV64I-NEXT: ret ; ; RV32ZBB-LABEL: abd_ext_i64: ; RV32ZBB: # %bb.0: -; RV32ZBB-NEXT: srai a5, a1, 31 -; RV32ZBB-NEXT: sltu a4, a0, a2 -; RV32ZBB-NEXT: srai a6, a3, 31 -; RV32ZBB-NEXT: mv a7, a4 +; RV32ZBB-NEXT: sltu a4, a2, a0 +; RV32ZBB-NEXT: mv a5, a4 ; RV32ZBB-NEXT: beq a1, a3, .LBB9_2 ; RV32ZBB-NEXT: # %bb.1: -; RV32ZBB-NEXT: sltu a7, a1, a3 +; RV32ZBB-NEXT: slt a5, a3, a1 ; RV32ZBB-NEXT: .LBB9_2: -; RV32ZBB-NEXT: sub t0, a5, a6 -; RV32ZBB-NEXT: sltu a7, t0, a7 -; RV32ZBB-NEXT: sltu a5, a5, a6 -; RV32ZBB-NEXT: sub a5, t0, a5 -; RV32ZBB-NEXT: sub a5, a5, a7 +; RV32ZBB-NEXT: bnez a5, .LBB9_4 +; RV32ZBB-NEXT: # %bb.3: +; RV32ZBB-NEXT: sub a1, a3, a1 +; RV32ZBB-NEXT: sub a1, a1, a4 +; RV32ZBB-NEXT: sub a0, a2, a0 +; RV32ZBB-NEXT: j .LBB9_5 +; RV32ZBB-NEXT: .LBB9_4: +; RV32ZBB-NEXT: sltu a4, a0, a2 ; RV32ZBB-NEXT: sub a1, a1, a3 ; RV32ZBB-NEXT: sub a1, a1, a4 ; RV32ZBB-NEXT: sub a0, a0, a2 -; RV32ZBB-NEXT: bgez a5, .LBB9_4 -; RV32ZBB-NEXT: # %bb.3: -; RV32ZBB-NEXT: snez a2, a0 -; RV32ZBB-NEXT: add a1, a1, a2 -; RV32ZBB-NEXT: neg a1, a1 -; RV32ZBB-NEXT: neg a0, a0 -; RV32ZBB-NEXT: .LBB9_4: +; RV32ZBB-NEXT: .LBB9_5: ; RV32ZBB-NEXT: snez a2, a0 ; RV32ZBB-NEXT: add a1, a1, a2 ; RV32ZBB-NEXT: neg a1, a1 @@ -537,17 +525,9 @@ define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind { ; ; RV64ZBB-LABEL: abd_ext_i64: ; RV64ZBB: # %bb.0: -; RV64ZBB-NEXT: srai a2, a0, 63 -; RV64ZBB-NEXT: srai a3, a1, 63 -; RV64ZBB-NEXT: sltu a4, a0, a1 -; RV64ZBB-NEXT: sub a2, a2, a3 -; RV64ZBB-NEXT: sub a2, a2, a4 -; RV64ZBB-NEXT: sub a0, a0, a1 -; RV64ZBB-NEXT: bgez a2, .LBB9_2 -; RV64ZBB-NEXT: # %bb.1: -; RV64ZBB-NEXT: neg a0, a0 -; RV64ZBB-NEXT: .LBB9_2: -; RV64ZBB-NEXT: neg a0, a0 +; RV64ZBB-NEXT: max a2, a0, a1 +; RV64ZBB-NEXT: min a0, a0, a1 +; RV64ZBB-NEXT: sub a0, a0, a2 ; RV64ZBB-NEXT: ret %aext = sext i64 %a to i128 %bext = sext i64 %b to i128 @@ -561,29 +541,24 @@ define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind { define i64 @abd_ext_i64_undef(i64 %a, i64 %b) nounwind { ; RV32I-LABEL: abd_ext_i64_undef: ; RV32I: # %bb.0: -; RV32I-NEXT: srai a5, a1, 31 -; RV32I-NEXT: sltu a4, a0, a2 -; RV32I-NEXT: srai a6, a3, 31 -; RV32I-NEXT: mv a7, a4 +; RV32I-NEXT: sltu a4, a2, a0 +; RV32I-NEXT: mv a5, a4 ; RV32I-NEXT: beq a1, a3, .LBB10_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: sltu a7, a1, a3 +; RV32I-NEXT: slt a5, a3, a1 ; RV32I-NEXT: .LBB10_2: -; RV32I-NEXT: sub t0, a5, a6 -; RV32I-NEXT: sltu a7, t0, a7 -; RV32I-NEXT: sltu a5, a5, a6 -; RV32I-NEXT: sub a5, t0, a5 -; RV32I-NEXT: sub a5, a5, a7 +; RV32I-NEXT: bnez a5, .LBB10_4 +; RV32I-NEXT: # %bb.3: +; RV32I-NEXT: sub a1, a3, a1 +; RV32I-NEXT: sub a1, a1, a4 +; RV32I-NEXT: sub a0, a2, a0 +; RV32I-NEXT: j .LBB10_5 +; RV32I-NEXT: .LBB10_4: +; RV32I-NEXT: sltu a4, a0, a2 ; RV32I-NEXT: sub a1, a1, a3 ; RV32I-NEXT: sub a1, a1, a4 ; RV32I-NEXT: sub a0, a0, a2 -; RV32I-NEXT: bgez a5, .LBB10_4 -; RV32I-NEXT: # %bb.3: -; RV32I-NEXT: snez a2, a0 -; RV32I-NEXT: add a1, a1, a2 -; RV32I-NEXT: neg a1, a1 -; RV32I-NEXT: neg a0, a0 -; RV32I-NEXT: .LBB10_4: +; RV32I-NEXT: .LBB10_5: ; RV32I-NEXT: snez a2, a0 ; RV32I-NEXT: add a1, a1, a2 ; RV32I-NEXT: neg a1, a1 @@ -592,44 +567,36 @@ define i64 @abd_ext_i64_undef(i64 %a, i64 %b) nounwind { ; ; RV64I-LABEL: abd_ext_i64_undef: ; RV64I: # %bb.0: -; RV64I-NEXT: srai a2, a0, 63 -; RV64I-NEXT: srai a3, a1, 63 -; RV64I-NEXT: sltu a4, a0, a1 -; RV64I-NEXT: sub a2, a2, a3 -; RV64I-NEXT: sub a2, a2, a4 -; RV64I-NEXT: sub a0, a0, a1 -; RV64I-NEXT: bgez a2, .LBB10_2 +; RV64I-NEXT: blt a1, a0, .LBB10_2 ; RV64I-NEXT: # %bb.1: +; RV64I-NEXT: sub a0, a1, a0 ; RV64I-NEXT: neg a0, a0 +; RV64I-NEXT: ret ; RV64I-NEXT: .LBB10_2: +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: neg a0, a0 ; RV64I-NEXT: ret ; ; RV32ZBB-LABEL: abd_ext_i64_undef: ; RV32ZBB: # %bb.0: -; RV32ZBB-NEXT: srai a5, a1, 31 -; RV32ZBB-NEXT: sltu a4, a0, a2 -; RV32ZBB-NEXT: srai a6, a3, 31 -; RV32ZBB-NEXT: mv a7, a4 +; RV32ZBB-NEXT: sltu a4, a2, a0 +; RV32ZBB-NEXT: mv a5, a4 ; RV32ZBB-NEXT: beq a1, a3, .LBB10_2 ; RV32ZBB-NEXT: # %bb.1: -; RV32ZBB-NEXT: sltu a7, a1, a3 +; RV32ZBB-NEXT: slt a5, a3, a1 ; RV32ZBB-NEXT: .LBB10_2: -; RV32ZBB-NEXT: sub t0, a5, a6 -; RV32ZBB-NEXT: sltu a7, t0, a7 -; RV32ZBB-NEXT: sltu a5, a5, a6 -; RV32ZBB-NEXT: sub a5, t0, a5 -; RV32ZBB-NEXT: sub a5, a5, a7 +; RV32ZBB-NEXT: bnez a5, .LBB10_4 +; RV32ZBB-NEXT: # %bb.3: +; RV32ZBB-NEXT: sub a1, a3, a1 +; RV32ZBB-NEXT: sub a1, a1, a4 +; RV32ZBB-NEXT: sub a0, a2, a0 +; RV32ZBB-NEXT: j .LBB10_5 +; RV32ZBB-NEXT: .LBB10_4: +; RV32ZBB-NEXT: sltu a4, a0, a2 ; RV32ZBB-NEXT: sub a1, a1, a3 ; RV32ZBB-NEXT: sub a1, a1, a4 ; RV32ZBB-NEXT: sub a0, a0, a2 -; RV32ZBB-NEXT: bgez a5, .LBB10_4 -; RV32ZBB-NEXT: # %bb.3: -; RV32ZBB-NEXT: snez a2, a0 -; RV32ZBB-NEXT: add a1, a1, a2 -; RV32ZBB-NEXT: neg a1, a1 -; RV32ZBB-NEXT: neg a0, a0 -; RV32ZBB-NEXT: .LBB10_4: +; RV32ZBB-NEXT: .LBB10_5: ; RV32ZBB-NEXT: snez a2, a0 ; RV32ZBB-NEXT: add a1, a1, a2 ; RV32ZBB-NEXT: neg a1, a1 @@ -638,17 +605,9 @@ define i64 @abd_ext_i64_undef(i64 %a, i64 %b) nounwind { ; ; RV64ZBB-LABEL: abd_ext_i64_undef: ; RV64ZBB: # %bb.0: -; RV64ZBB-NEXT: srai a2, a0, 63 -; RV64ZBB-NEXT: srai a3, a1, 63 -; RV64ZBB-NEXT: sltu a4, a0, a1 -; RV64ZBB-NEXT: sub a2, a2, a3 -; RV64ZBB-NEXT: sub a2, a2, a4 -; RV64ZBB-NEXT: sub a0, a0, a1 -; RV64ZBB-NEXT: bgez a2, .LBB10_2 -; RV64ZBB-NEXT: # %bb.1: -; RV64ZBB-NEXT: neg a0, a0 -; RV64ZBB-NEXT: .LBB10_2: -; RV64ZBB-NEXT: neg a0, a0 +; RV64ZBB-NEXT: max a2, a0, a1 +; RV64ZBB-NEXT: min a0, a0, a1 +; RV64ZBB-NEXT: sub a0, a0, a2 ; RV64ZBB-NEXT: ret %aext = sext i64 %a to i128 %bext = sext i64 %b to i128 @@ -662,121 +621,117 @@ define i64 @abd_ext_i64_undef(i64 %a, i64 %b) nounwind { define i128 @abd_ext_i128(i128 %a, i128 %b) nounwind { ; RV32I-LABEL: abd_ext_i128: ; RV32I: # %bb.0: -; RV32I-NEXT: lw a3, 0(a2) -; RV32I-NEXT: lw a4, 0(a1) -; RV32I-NEXT: lw a5, 4(a2) -; RV32I-NEXT: lw a6, 8(a2) -; RV32I-NEXT: lw a7, 8(a1) -; RV32I-NEXT: lw a2, 12(a2) -; RV32I-NEXT: lw t0, 12(a1) -; RV32I-NEXT: lw a1, 4(a1) -; RV32I-NEXT: sltu t1, a7, a6 -; RV32I-NEXT: mv t4, t1 -; RV32I-NEXT: beq t0, a2, .LBB11_2 +; RV32I-NEXT: lw a3, 0(a1) +; RV32I-NEXT: lw a4, 0(a2) +; RV32I-NEXT: lw a5, 4(a1) +; RV32I-NEXT: lw a6, 8(a1) +; RV32I-NEXT: lw a7, 8(a2) +; RV32I-NEXT: lw t1, 12(a1) +; RV32I-NEXT: lw t2, 12(a2) +; RV32I-NEXT: lw a1, 4(a2) +; RV32I-NEXT: sltu t3, a7, a6 +; RV32I-NEXT: mv t4, t3 +; RV32I-NEXT: beq t1, t2, .LBB11_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: sltu t4, t0, a2 +; RV32I-NEXT: slt t4, t2, t1 ; RV32I-NEXT: .LBB11_2: -; RV32I-NEXT: sltu t2, a4, a3 -; RV32I-NEXT: mv t3, t2 -; RV32I-NEXT: beq a1, a5, .LBB11_4 +; RV32I-NEXT: sltu a2, a4, a3 +; RV32I-NEXT: sltu t6, a1, a5 +; RV32I-NEXT: mv t0, a2 +; RV32I-NEXT: beq a5, a1, .LBB11_4 ; RV32I-NEXT: # %bb.3: -; RV32I-NEXT: sltu t3, a1, a5 +; RV32I-NEXT: mv t0, t6 ; RV32I-NEXT: .LBB11_4: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw s0, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: sw s1, 8(sp) # 4-byte Folded Spill -; RV32I-NEXT: srai t5, t0, 31 -; RV32I-NEXT: xor t6, t0, a2 -; RV32I-NEXT: xor s0, a7, a6 -; RV32I-NEXT: or s1, s0, t6 -; RV32I-NEXT: srai t6, a2, 31 -; RV32I-NEXT: mv s0, t3 -; RV32I-NEXT: beqz s1, .LBB11_6 +; RV32I-NEXT: xor t5, t1, t2 +; RV32I-NEXT: xor s0, a6, a7 +; RV32I-NEXT: or t5, s0, t5 +; RV32I-NEXT: beqz t5, .LBB11_6 ; RV32I-NEXT: # %bb.5: -; RV32I-NEXT: mv s0, t4 +; RV32I-NEXT: mv t0, t4 ; RV32I-NEXT: .LBB11_6: -; RV32I-NEXT: sub t4, t5, t6 -; RV32I-NEXT: sltu s0, t4, s0 -; RV32I-NEXT: sltu t5, t5, t6 -; RV32I-NEXT: sub t6, t4, t5 -; RV32I-NEXT: seqz s1, t6 -; RV32I-NEXT: and s0, s1, s0 -; RV32I-NEXT: sltu s0, t6, s0 -; RV32I-NEXT: sltu t4, t4, t5 -; RV32I-NEXT: sub t4, t6, t4 -; RV32I-NEXT: sub t4, t4, s0 -; RV32I-NEXT: sub a2, t0, a2 -; RV32I-NEXT: sub a2, a2, t1 -; RV32I-NEXT: sub a6, a7, a6 -; RV32I-NEXT: sltu a7, a6, t3 -; RV32I-NEXT: sub a2, a2, a7 -; RV32I-NEXT: sub a6, a6, t3 -; RV32I-NEXT: sub a1, a1, a5 -; RV32I-NEXT: sub a1, a1, t2 -; RV32I-NEXT: sub a4, a4, a3 -; RV32I-NEXT: bgez t4, .LBB11_8 +; RV32I-NEXT: mv t5, a2 +; RV32I-NEXT: beq a1, a5, .LBB11_8 ; RV32I-NEXT: # %bb.7: -; RV32I-NEXT: snez a3, a1 -; RV32I-NEXT: snez a5, a4 -; RV32I-NEXT: or a3, a5, a3 -; RV32I-NEXT: neg a7, a6 -; RV32I-NEXT: sltu t0, a7, a3 -; RV32I-NEXT: snez a6, a6 -; RV32I-NEXT: add a2, a2, a6 -; RV32I-NEXT: neg a2, a2 -; RV32I-NEXT: sub a2, a2, t0 -; RV32I-NEXT: neg a4, a4 -; RV32I-NEXT: add a1, a1, a5 -; RV32I-NEXT: neg a1, a1 -; RV32I-NEXT: sub a6, a7, a3 +; RV32I-NEXT: mv t5, t6 ; RV32I-NEXT: .LBB11_8: -; RV32I-NEXT: snez a3, a6 -; RV32I-NEXT: add a2, a2, a3 -; RV32I-NEXT: snez a3, a1 -; RV32I-NEXT: snez a5, a4 -; RV32I-NEXT: or a3, a5, a3 -; RV32I-NEXT: neg a6, a6 -; RV32I-NEXT: sltu a7, a6, a3 -; RV32I-NEXT: neg a2, a2 -; RV32I-NEXT: sub a2, a2, a7 -; RV32I-NEXT: sub a3, a6, a3 -; RV32I-NEXT: add a1, a1, a5 +; RV32I-NEXT: sltu t4, a3, a4 +; RV32I-NEXT: mv t6, t4 +; RV32I-NEXT: beq a5, a1, .LBB11_10 +; RV32I-NEXT: # %bb.9: +; RV32I-NEXT: sltu t6, a5, a1 +; RV32I-NEXT: .LBB11_10: +; RV32I-NEXT: bnez t0, .LBB11_12 +; RV32I-NEXT: # %bb.11: +; RV32I-NEXT: sub t1, t2, t1 +; RV32I-NEXT: sub a6, a7, a6 +; RV32I-NEXT: sub a7, t1, t3 +; RV32I-NEXT: sltu t1, a6, t5 +; RV32I-NEXT: sub a7, a7, t1 +; RV32I-NEXT: sub a6, a6, t5 +; RV32I-NEXT: j .LBB11_13 +; RV32I-NEXT: .LBB11_12: +; RV32I-NEXT: sltu t3, a6, a7 +; RV32I-NEXT: sub t1, t1, t2 +; RV32I-NEXT: sub t1, t1, t3 +; RV32I-NEXT: sub a6, a6, a7 +; RV32I-NEXT: sltu a7, a6, t6 +; RV32I-NEXT: sub a7, t1, a7 +; RV32I-NEXT: sub a6, a6, t6 +; RV32I-NEXT: .LBB11_13: +; RV32I-NEXT: snez t1, a6 +; RV32I-NEXT: add a7, a7, t1 +; RV32I-NEXT: bnez t0, .LBB11_15 +; RV32I-NEXT: # %bb.14: +; RV32I-NEXT: sub a1, a1, a5 +; RV32I-NEXT: sub a1, a1, a2 +; RV32I-NEXT: sub a3, a4, a3 +; RV32I-NEXT: j .LBB11_16 +; RV32I-NEXT: .LBB11_15: +; RV32I-NEXT: sub a5, a5, a1 +; RV32I-NEXT: sub a1, a5, t4 +; RV32I-NEXT: sub a3, a3, a4 +; RV32I-NEXT: .LBB11_16: +; RV32I-NEXT: or a2, a3, a1 +; RV32I-NEXT: snez a2, a2 +; RV32I-NEXT: neg a4, a6 +; RV32I-NEXT: sltu a5, a4, a2 +; RV32I-NEXT: neg a6, a7 +; RV32I-NEXT: sub a5, a6, a5 +; RV32I-NEXT: snez a6, a3 +; RV32I-NEXT: add a1, a1, a6 ; RV32I-NEXT: neg a1, a1 -; RV32I-NEXT: neg a4, a4 -; RV32I-NEXT: sw a4, 0(a0) +; RV32I-NEXT: sub a4, a4, a2 +; RV32I-NEXT: neg a2, a3 +; RV32I-NEXT: sw a2, 0(a0) +; RV32I-NEXT: sw a4, 8(a0) ; RV32I-NEXT: sw a1, 4(a0) -; RV32I-NEXT: sw a3, 8(a0) -; RV32I-NEXT: sw a2, 12(a0) +; RV32I-NEXT: sw a5, 12(a0) ; RV32I-NEXT: lw s0, 12(sp) # 4-byte Folded Reload -; RV32I-NEXT: lw s1, 8(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; ; RV64I-LABEL: abd_ext_i128: ; RV64I: # %bb.0: -; RV64I-NEXT: srai a5, a1, 63 -; RV64I-NEXT: sltu a4, a0, a2 -; RV64I-NEXT: srai a6, a3, 63 -; RV64I-NEXT: mv a7, a4 +; RV64I-NEXT: sltu a4, a2, a0 +; RV64I-NEXT: mv a5, a4 ; RV64I-NEXT: beq a1, a3, .LBB11_2 ; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: sltu a7, a1, a3 +; RV64I-NEXT: slt a5, a3, a1 ; RV64I-NEXT: .LBB11_2: -; RV64I-NEXT: sub t0, a5, a6 -; RV64I-NEXT: sltu a7, t0, a7 -; RV64I-NEXT: sltu a5, a5, a6 -; RV64I-NEXT: sub a5, t0, a5 -; RV64I-NEXT: sub a5, a5, a7 +; RV64I-NEXT: bnez a5, .LBB11_4 +; RV64I-NEXT: # %bb.3: +; RV64I-NEXT: sub a1, a3, a1 +; RV64I-NEXT: sub a1, a1, a4 +; RV64I-NEXT: sub a0, a2, a0 +; RV64I-NEXT: j .LBB11_5 +; RV64I-NEXT: .LBB11_4: +; RV64I-NEXT: sltu a4, a0, a2 ; RV64I-NEXT: sub a1, a1, a3 ; RV64I-NEXT: sub a1, a1, a4 ; RV64I-NEXT: sub a0, a0, a2 -; RV64I-NEXT: bgez a5, .LBB11_4 -; RV64I-NEXT: # %bb.3: -; RV64I-NEXT: snez a2, a0 -; RV64I-NEXT: add a1, a1, a2 -; RV64I-NEXT: neg a1, a1 -; RV64I-NEXT: neg a0, a0 -; RV64I-NEXT: .LBB11_4: +; RV64I-NEXT: .LBB11_5: ; RV64I-NEXT: snez a2, a0 ; RV64I-NEXT: add a1, a1, a2 ; RV64I-NEXT: neg a1, a1 @@ -785,121 +740,117 @@ define i128 @abd_ext_i128(i128 %a, i128 %b) nounwind { ; ; RV32ZBB-LABEL: abd_ext_i128: ; RV32ZBB: # %bb.0: -; RV32ZBB-NEXT: lw a3, 0(a2) -; RV32ZBB-NEXT: lw a4, 0(a1) -; RV32ZBB-NEXT: lw a5, 4(a2) -; RV32ZBB-NEXT: lw a6, 8(a2) -; RV32ZBB-NEXT: lw a7, 8(a1) -; RV32ZBB-NEXT: lw a2, 12(a2) -; RV32ZBB-NEXT: lw t0, 12(a1) -; RV32ZBB-NEXT: lw a1, 4(a1) -; RV32ZBB-NEXT: sltu t1, a7, a6 -; RV32ZBB-NEXT: mv t4, t1 -; RV32ZBB-NEXT: beq t0, a2, .LBB11_2 +; RV32ZBB-NEXT: lw a3, 0(a1) +; RV32ZBB-NEXT: lw a4, 0(a2) +; RV32ZBB-NEXT: lw a5, 4(a1) +; RV32ZBB-NEXT: lw a6, 8(a1) +; RV32ZBB-NEXT: lw a7, 8(a2) +; RV32ZBB-NEXT: lw t1, 12(a1) +; RV32ZBB-NEXT: lw t2, 12(a2) +; RV32ZBB-NEXT: lw a1, 4(a2) +; RV32ZBB-NEXT: sltu t3, a7, a6 +; RV32ZBB-NEXT: mv t4, t3 +; RV32ZBB-NEXT: beq t1, t2, .LBB11_2 ; RV32ZBB-NEXT: # %bb.1: -; RV32ZBB-NEXT: sltu t4, t0, a2 +; RV32ZBB-NEXT: slt t4, t2, t1 ; RV32ZBB-NEXT: .LBB11_2: -; RV32ZBB-NEXT: sltu t2, a4, a3 -; RV32ZBB-NEXT: mv t3, t2 -; RV32ZBB-NEXT: beq a1, a5, .LBB11_4 +; RV32ZBB-NEXT: sltu a2, a4, a3 +; RV32ZBB-NEXT: sltu t6, a1, a5 +; RV32ZBB-NEXT: mv t0, a2 +; RV32ZBB-NEXT: beq a5, a1, .LBB11_4 ; RV32ZBB-NEXT: # %bb.3: -; RV32ZBB-NEXT: sltu t3, a1, a5 +; RV32ZBB-NEXT: mv t0, t6 ; RV32ZBB-NEXT: .LBB11_4: ; RV32ZBB-NEXT: addi sp, sp, -16 ; RV32ZBB-NEXT: sw s0, 12(sp) # 4-byte Folded Spill -; RV32ZBB-NEXT: sw s1, 8(sp) # 4-byte Folded Spill -; RV32ZBB-NEXT: srai t5, t0, 31 -; RV32ZBB-NEXT: xor t6, t0, a2 -; RV32ZBB-NEXT: xor s0, a7, a6 -; RV32ZBB-NEXT: or s1, s0, t6 -; RV32ZBB-NEXT: srai t6, a2, 31 -; RV32ZBB-NEXT: mv s0, t3 -; RV32ZBB-NEXT: beqz s1, .LBB11_6 +; RV32ZBB-NEXT: xor t5, t1, t2 +; RV32ZBB-NEXT: xor s0, a6, a7 +; RV32ZBB-NEXT: or t5, s0, t5 +; RV32ZBB-NEXT: beqz t5, .LBB11_6 ; RV32ZBB-NEXT: # %bb.5: -; RV32ZBB-NEXT: mv s0, t4 +; RV32ZBB-NEXT: mv t0, t4 ; RV32ZBB-NEXT: .LBB11_6: -; RV32ZBB-NEXT: sub t4, t5, t6 -; RV32ZBB-NEXT: sltu s0, t4, s0 -; RV32ZBB-NEXT: sltu t5, t5, t6 -; RV32ZBB-NEXT: sub t6, t4, t5 -; RV32ZBB-NEXT: seqz s1, t6 -; RV32ZBB-NEXT: and s0, s1, s0 -; RV32ZBB-NEXT: sltu s0, t6, s0 -; RV32ZBB-NEXT: sltu t4, t4, t5 -; RV32ZBB-NEXT: sub t4, t6, t4 -; RV32ZBB-NEXT: sub t4, t4, s0 -; RV32ZBB-NEXT: sub a2, t0, a2 -; RV32ZBB-NEXT: sub a2, a2, t1 -; RV32ZBB-NEXT: sub a6, a7, a6 -; RV32ZBB-NEXT: sltu a7, a6, t3 -; RV32ZBB-NEXT: sub a2, a2, a7 -; RV32ZBB-NEXT: sub a6, a6, t3 -; RV32ZBB-NEXT: sub a1, a1, a5 -; RV32ZBB-NEXT: sub a1, a1, t2 -; RV32ZBB-NEXT: sub a4, a4, a3 -; RV32ZBB-NEXT: bgez t4, .LBB11_8 +; RV32ZBB-NEXT: mv t5, a2 +; RV32ZBB-NEXT: beq a1, a5, .LBB11_8 ; RV32ZBB-NEXT: # %bb.7: -; RV32ZBB-NEXT: snez a3, a1 -; RV32ZBB-NEXT: snez a5, a4 -; RV32ZBB-NEXT: or a3, a5, a3 -; RV32ZBB-NEXT: neg a7, a6 -; RV32ZBB-NEXT: sltu t0, a7, a3 -; RV32ZBB-NEXT: snez a6, a6 -; RV32ZBB-NEXT: add a2, a2, a6 -; RV32ZBB-NEXT: neg a2, a2 -; RV32ZBB-NEXT: sub a2, a2, t0 -; RV32ZBB-NEXT: neg a4, a4 -; RV32ZBB-NEXT: add a1, a1, a5 -; RV32ZBB-NEXT: neg a1, a1 -; RV32ZBB-NEXT: sub a6, a7, a3 +; RV32ZBB-NEXT: mv t5, t6 ; RV32ZBB-NEXT: .LBB11_8: -; RV32ZBB-NEXT: snez a3, a6 -; RV32ZBB-NEXT: add a2, a2, a3 -; RV32ZBB-NEXT: snez a3, a1 -; RV32ZBB-NEXT: snez a5, a4 -; RV32ZBB-NEXT: or a3, a5, a3 -; RV32ZBB-NEXT: neg a6, a6 -; RV32ZBB-NEXT: sltu a7, a6, a3 -; RV32ZBB-NEXT: neg a2, a2 -; RV32ZBB-NEXT: sub a2, a2, a7 -; RV32ZBB-NEXT: sub a3, a6, a3 -; RV32ZBB-NEXT: add a1, a1, a5 +; RV32ZBB-NEXT: sltu t4, a3, a4 +; RV32ZBB-NEXT: mv t6, t4 +; RV32ZBB-NEXT: beq a5, a1, .LBB11_10 +; RV32ZBB-NEXT: # %bb.9: +; RV32ZBB-NEXT: sltu t6, a5, a1 +; RV32ZBB-NEXT: .LBB11_10: +; RV32ZBB-NEXT: bnez t0, .LBB11_12 +; RV32ZBB-NEXT: # %bb.11: +; RV32ZBB-NEXT: sub t1, t2, t1 +; RV32ZBB-NEXT: sub a6, a7, a6 +; RV32ZBB-NEXT: sub a7, t1, t3 +; RV32ZBB-NEXT: sltu t1, a6, t5 +; RV32ZBB-NEXT: sub a7, a7, t1 +; RV32ZBB-NEXT: sub a6, a6, t5 +; RV32ZBB-NEXT: j .LBB11_13 +; RV32ZBB-NEXT: .LBB11_12: +; RV32ZBB-NEXT: sltu t3, a6, a7 +; RV32ZBB-NEXT: sub t1, t1, t2 +; RV32ZBB-NEXT: sub t1, t1, t3 +; RV32ZBB-NEXT: sub a6, a6, a7 +; RV32ZBB-NEXT: sltu a7, a6, t6 +; RV32ZBB-NEXT: sub a7, t1, a7 +; RV32ZBB-NEXT: sub a6, a6, t6 +; RV32ZBB-NEXT: .LBB11_13: +; RV32ZBB-NEXT: snez t1, a6 +; RV32ZBB-NEXT: add a7, a7, t1 +; RV32ZBB-NEXT: bnez t0, .LBB11_15 +; RV32ZBB-NEXT: # %bb.14: +; RV32ZBB-NEXT: sub a1, a1, a5 +; RV32ZBB-NEXT: sub a1, a1, a2 +; RV32ZBB-NEXT: sub a3, a4, a3 +; RV32ZBB-NEXT: j .LBB11_16 +; RV32ZBB-NEXT: .LBB11_15: +; RV32ZBB-NEXT: sub a5, a5, a1 +; RV32ZBB-NEXT: sub a1, a5, t4 +; RV32ZBB-NEXT: sub a3, a3, a4 +; RV32ZBB-NEXT: .LBB11_16: +; RV32ZBB-NEXT: or a2, a3, a1 +; RV32ZBB-NEXT: snez a2, a2 +; RV32ZBB-NEXT: neg a4, a6 +; RV32ZBB-NEXT: sltu a5, a4, a2 +; RV32ZBB-NEXT: neg a6, a7 +; RV32ZBB-NEXT: sub a5, a6, a5 +; RV32ZBB-NEXT: snez a6, a3 +; RV32ZBB-NEXT: add a1, a1, a6 ; RV32ZBB-NEXT: neg a1, a1 -; RV32ZBB-NEXT: neg a4, a4 -; RV32ZBB-NEXT: sw a4, 0(a0) +; RV32ZBB-NEXT: sub a4, a4, a2 +; RV32ZBB-NEXT: neg a2, a3 +; RV32ZBB-NEXT: sw a2, 0(a0) +; RV32ZBB-NEXT: sw a4, 8(a0) ; RV32ZBB-NEXT: sw a1, 4(a0) -; RV32ZBB-NEXT: sw a3, 8(a0) -; RV32ZBB-NEXT: sw a2, 12(a0) +; RV32ZBB-NEXT: sw a5, 12(a0) ; RV32ZBB-NEXT: lw s0, 12(sp) # 4-byte Folded Reload -; RV32ZBB-NEXT: lw s1, 8(sp) # 4-byte Folded Reload ; RV32ZBB-NEXT: addi sp, sp, 16 ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: abd_ext_i128: ; RV64ZBB: # %bb.0: -; RV64ZBB-NEXT: srai a5, a1, 63 -; RV64ZBB-NEXT: sltu a4, a0, a2 -; RV64ZBB-NEXT: srai a6, a3, 63 -; RV64ZBB-NEXT: mv a7, a4 +; RV64ZBB-NEXT: sltu a4, a2, a0 +; RV64ZBB-NEXT: mv a5, a4 ; RV64ZBB-NEXT: beq a1, a3, .LBB11_2 ; RV64ZBB-NEXT: # %bb.1: -; RV64ZBB-NEXT: sltu a7, a1, a3 +; RV64ZBB-NEXT: slt a5, a3, a1 ; RV64ZBB-NEXT: .LBB11_2: -; RV64ZBB-NEXT: sub t0, a5, a6 -; RV64ZBB-NEXT: sltu a7, t0, a7 -; RV64ZBB-NEXT: sltu a5, a5, a6 -; RV64ZBB-NEXT: sub a5, t0, a5 -; RV64ZBB-NEXT: sub a5, a5, a7 +; RV64ZBB-NEXT: bnez a5, .LBB11_4 +; RV64ZBB-NEXT: # %bb.3: +; RV64ZBB-NEXT: sub a1, a3, a1 +; RV64ZBB-NEXT: sub a1, a1, a4 +; RV64ZBB-NEXT: sub a0, a2, a0 +; RV64ZBB-NEXT: j .LBB11_5 +; RV64ZBB-NEXT: .LBB11_4: +; RV64ZBB-NEXT: sltu a4, a0, a2 ; RV64ZBB-NEXT: sub a1, a1, a3 ; RV64ZBB-NEXT: sub a1, a1, a4 ; RV64ZBB-NEXT: sub a0, a0, a2 -; RV64ZBB-NEXT: bgez a5, .LBB11_4 -; RV64ZBB-NEXT: # %bb.3: -; RV64ZBB-NEXT: snez a2, a0 -; RV64ZBB-NEXT: add a1, a1, a2 -; RV64ZBB-NEXT: neg a1, a1 -; RV64ZBB-NEXT: neg a0, a0 -; RV64ZBB-NEXT: .LBB11_4: +; RV64ZBB-NEXT: .LBB11_5: ; RV64ZBB-NEXT: snez a2, a0 ; RV64ZBB-NEXT: add a1, a1, a2 ; RV64ZBB-NEXT: neg a1, a1 @@ -917,121 +868,117 @@ define i128 @abd_ext_i128(i128 %a, i128 %b) nounwind { define i128 @abd_ext_i128_undef(i128 %a, i128 %b) nounwind { ; RV32I-LABEL: abd_ext_i128_undef: ; RV32I: # %bb.0: -; RV32I-NEXT: lw a3, 0(a2) -; RV32I-NEXT: lw a4, 0(a1) -; RV32I-NEXT: lw a5, 4(a2) -; RV32I-NEXT: lw a6, 8(a2) -; RV32I-NEXT: lw a7, 8(a1) -; RV32I-NEXT: lw a2, 12(a2) -; RV32I-NEXT: lw t0, 12(a1) -; RV32I-NEXT: lw a1, 4(a1) -; RV32I-NEXT: sltu t1, a7, a6 -; RV32I-NEXT: mv t4, t1 -; RV32I-NEXT: beq t0, a2, .LBB12_2 +; RV32I-NEXT: lw a3, 0(a1) +; RV32I-NEXT: lw a4, 0(a2) +; RV32I-NEXT: lw a5, 4(a1) +; RV32I-NEXT: lw a6, 8(a1) +; RV32I-NEXT: lw a7, 8(a2) +; RV32I-NEXT: lw t1, 12(a1) +; RV32I-NEXT: lw t2, 12(a2) +; RV32I-NEXT: lw a1, 4(a2) +; RV32I-NEXT: sltu t3, a7, a6 +; RV32I-NEXT: mv t4, t3 +; RV32I-NEXT: beq t1, t2, .LBB12_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: sltu t4, t0, a2 +; RV32I-NEXT: slt t4, t2, t1 ; RV32I-NEXT: .LBB12_2: -; RV32I-NEXT: sltu t2, a4, a3 -; RV32I-NEXT: mv t3, t2 -; RV32I-NEXT: beq a1, a5, .LBB12_4 +; RV32I-NEXT: sltu a2, a4, a3 +; RV32I-NEXT: sltu t6, a1, a5 +; RV32I-NEXT: mv t0, a2 +; RV32I-NEXT: beq a5, a1, .LBB12_4 ; RV32I-NEXT: # %bb.3: -; RV32I-NEXT: sltu t3, a1, a5 +; RV32I-NEXT: mv t0, t6 ; RV32I-NEXT: .LBB12_4: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw s0, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: sw s1, 8(sp) # 4-byte Folded Spill -; RV32I-NEXT: srai t5, t0, 31 -; RV32I-NEXT: xor t6, t0, a2 -; RV32I-NEXT: xor s0, a7, a6 -; RV32I-NEXT: or s1, s0, t6 -; RV32I-NEXT: srai t6, a2, 31 -; RV32I-NEXT: mv s0, t3 -; RV32I-NEXT: beqz s1, .LBB12_6 +; RV32I-NEXT: xor t5, t1, t2 +; RV32I-NEXT: xor s0, a6, a7 +; RV32I-NEXT: or t5, s0, t5 +; RV32I-NEXT: beqz t5, .LBB12_6 ; RV32I-NEXT: # %bb.5: -; RV32I-NEXT: mv s0, t4 +; RV32I-NEXT: mv t0, t4 ; RV32I-NEXT: .LBB12_6: -; RV32I-NEXT: sub t4, t5, t6 -; RV32I-NEXT: sltu s0, t4, s0 -; RV32I-NEXT: sltu t5, t5, t6 -; RV32I-NEXT: sub t6, t4, t5 -; RV32I-NEXT: seqz s1, t6 -; RV32I-NEXT: and s0, s1, s0 -; RV32I-NEXT: sltu s0, t6, s0 -; RV32I-NEXT: sltu t4, t4, t5 -; RV32I-NEXT: sub t4, t6, t4 -; RV32I-NEXT: sub t4, t4, s0 -; RV32I-NEXT: sub a2, t0, a2 -; RV32I-NEXT: sub a2, a2, t1 -; RV32I-NEXT: sub a6, a7, a6 -; RV32I-NEXT: sltu a7, a6, t3 -; RV32I-NEXT: sub a2, a2, a7 -; RV32I-NEXT: sub a6, a6, t3 -; RV32I-NEXT: sub a1, a1, a5 -; RV32I-NEXT: sub a1, a1, t2 -; RV32I-NEXT: sub a4, a4, a3 -; RV32I-NEXT: bgez t4, .LBB12_8 +; RV32I-NEXT: mv t5, a2 +; RV32I-NEXT: beq a1, a5, .LBB12_8 ; RV32I-NEXT: # %bb.7: -; RV32I-NEXT: snez a3, a1 -; RV32I-NEXT: snez a5, a4 -; RV32I-NEXT: or a3, a5, a3 -; RV32I-NEXT: neg a7, a6 -; RV32I-NEXT: sltu t0, a7, a3 -; RV32I-NEXT: snez a6, a6 -; RV32I-NEXT: add a2, a2, a6 -; RV32I-NEXT: neg a2, a2 -; RV32I-NEXT: sub a2, a2, t0 -; RV32I-NEXT: neg a4, a4 -; RV32I-NEXT: add a1, a1, a5 -; RV32I-NEXT: neg a1, a1 -; RV32I-NEXT: sub a6, a7, a3 +; RV32I-NEXT: mv t5, t6 ; RV32I-NEXT: .LBB12_8: -; RV32I-NEXT: snez a3, a6 -; RV32I-NEXT: add a2, a2, a3 -; RV32I-NEXT: snez a3, a1 -; RV32I-NEXT: snez a5, a4 -; RV32I-NEXT: or a3, a5, a3 -; RV32I-NEXT: neg a6, a6 -; RV32I-NEXT: sltu a7, a6, a3 -; RV32I-NEXT: neg a2, a2 -; RV32I-NEXT: sub a2, a2, a7 -; RV32I-NEXT: sub a3, a6, a3 -; RV32I-NEXT: add a1, a1, a5 +; RV32I-NEXT: sltu t4, a3, a4 +; RV32I-NEXT: mv t6, t4 +; RV32I-NEXT: beq a5, a1, .LBB12_10 +; RV32I-NEXT: # %bb.9: +; RV32I-NEXT: sltu t6, a5, a1 +; RV32I-NEXT: .LBB12_10: +; RV32I-NEXT: bnez t0, .LBB12_12 +; RV32I-NEXT: # %bb.11: +; RV32I-NEXT: sub t1, t2, t1 +; RV32I-NEXT: sub a6, a7, a6 +; RV32I-NEXT: sub a7, t1, t3 +; RV32I-NEXT: sltu t1, a6, t5 +; RV32I-NEXT: sub a7, a7, t1 +; RV32I-NEXT: sub a6, a6, t5 +; RV32I-NEXT: j .LBB12_13 +; RV32I-NEXT: .LBB12_12: +; RV32I-NEXT: sltu t3, a6, a7 +; RV32I-NEXT: sub t1, t1, t2 +; RV32I-NEXT: sub t1, t1, t3 +; RV32I-NEXT: sub a6, a6, a7 +; RV32I-NEXT: sltu a7, a6, t6 +; RV32I-NEXT: sub a7, t1, a7 +; RV32I-NEXT: sub a6, a6, t6 +; RV32I-NEXT: .LBB12_13: +; RV32I-NEXT: snez t1, a6 +; RV32I-NEXT: add a7, a7, t1 +; RV32I-NEXT: bnez t0, .LBB12_15 +; RV32I-NEXT: # %bb.14: +; RV32I-NEXT: sub a1, a1, a5 +; RV32I-NEXT: sub a1, a1, a2 +; RV32I-NEXT: sub a3, a4, a3 +; RV32I-NEXT: j .LBB12_16 +; RV32I-NEXT: .LBB12_15: +; RV32I-NEXT: sub a5, a5, a1 +; RV32I-NEXT: sub a1, a5, t4 +; RV32I-NEXT: sub a3, a3, a4 +; RV32I-NEXT: .LBB12_16: +; RV32I-NEXT: or a2, a3, a1 +; RV32I-NEXT: snez a2, a2 +; RV32I-NEXT: neg a4, a6 +; RV32I-NEXT: sltu a5, a4, a2 +; RV32I-NEXT: neg a6, a7 +; RV32I-NEXT: sub a5, a6, a5 +; RV32I-NEXT: snez a6, a3 +; RV32I-NEXT: add a1, a1, a6 ; RV32I-NEXT: neg a1, a1 -; RV32I-NEXT: neg a4, a4 -; RV32I-NEXT: sw a4, 0(a0) +; RV32I-NEXT: sub a4, a4, a2 +; RV32I-NEXT: neg a2, a3 +; RV32I-NEXT: sw a2, 0(a0) +; RV32I-NEXT: sw a4, 8(a0) ; RV32I-NEXT: sw a1, 4(a0) -; RV32I-NEXT: sw a3, 8(a0) -; RV32I-NEXT: sw a2, 12(a0) +; RV32I-NEXT: sw a5, 12(a0) ; RV32I-NEXT: lw s0, 12(sp) # 4-byte Folded Reload -; RV32I-NEXT: lw s1, 8(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; ; RV64I-LABEL: abd_ext_i128_undef: ; RV64I: # %bb.0: -; RV64I-NEXT: srai a5, a1, 63 -; RV64I-NEXT: sltu a4, a0, a2 -; RV64I-NEXT: srai a6, a3, 63 -; RV64I-NEXT: mv a7, a4 +; RV64I-NEXT: sltu a4, a2, a0 +; RV64I-NEXT: mv a5, a4 ; RV64I-NEXT: beq a1, a3, .LBB12_2 ; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: sltu a7, a1, a3 +; RV64I-NEXT: slt a5, a3, a1 ; RV64I-NEXT: .LBB12_2: -; RV64I-NEXT: sub t0, a5, a6 -; RV64I-NEXT: sltu a7, t0, a7 -; RV64I-NEXT: sltu a5, a5, a6 -; RV64I-NEXT: sub a5, t0, a5 -; RV64I-NEXT: sub a5, a5, a7 +; RV64I-NEXT: bnez a5, .LBB12_4 +; RV64I-NEXT: # %bb.3: +; RV64I-NEXT: sub a1, a3, a1 +; RV64I-NEXT: sub a1, a1, a4 +; RV64I-NEXT: sub a0, a2, a0 +; RV64I-NEXT: j .LBB12_5 +; RV64I-NEXT: .LBB12_4: +; RV64I-NEXT: sltu a4, a0, a2 ; RV64I-NEXT: sub a1, a1, a3 ; RV64I-NEXT: sub a1, a1, a4 ; RV64I-NEXT: sub a0, a0, a2 -; RV64I-NEXT: bgez a5, .LBB12_4 -; RV64I-NEXT: # %bb.3: -; RV64I-NEXT: snez a2, a0 -; RV64I-NEXT: add a1, a1, a2 -; RV64I-NEXT: neg a1, a1 -; RV64I-NEXT: neg a0, a0 -; RV64I-NEXT: .LBB12_4: +; RV64I-NEXT: .LBB12_5: ; RV64I-NEXT: snez a2, a0 ; RV64I-NEXT: add a1, a1, a2 ; RV64I-NEXT: neg a1, a1 @@ -1040,121 +987,117 @@ define i128 @abd_ext_i128_undef(i128 %a, i128 %b) nounwind { ; ; RV32ZBB-LABEL: abd_ext_i128_undef: ; RV32ZBB: # %bb.0: -; RV32ZBB-NEXT: lw a3, 0(a2) -; RV32ZBB-NEXT: lw a4, 0(a1) -; RV32ZBB-NEXT: lw a5, 4(a2) -; RV32ZBB-NEXT: lw a6, 8(a2) -; RV32ZBB-NEXT: lw a7, 8(a1) -; RV32ZBB-NEXT: lw a2, 12(a2) -; RV32ZBB-NEXT: lw t0, 12(a1) -; RV32ZBB-NEXT: lw a1, 4(a1) -; RV32ZBB-NEXT: sltu t1, a7, a6 -; RV32ZBB-NEXT: mv t4, t1 -; RV32ZBB-NEXT: beq t0, a2, .LBB12_2 +; RV32ZBB-NEXT: lw a3, 0(a1) +; RV32ZBB-NEXT: lw a4, 0(a2) +; RV32ZBB-NEXT: lw a5, 4(a1) +; RV32ZBB-NEXT: lw a6, 8(a1) +; RV32ZBB-NEXT: lw a7, 8(a2) +; RV32ZBB-NEXT: lw t1, 12(a1) +; RV32ZBB-NEXT: lw t2, 12(a2) +; RV32ZBB-NEXT: lw a1, 4(a2) +; RV32ZBB-NEXT: sltu t3, a7, a6 +; RV32ZBB-NEXT: mv t4, t3 +; RV32ZBB-NEXT: beq t1, t2, .LBB12_2 ; RV32ZBB-NEXT: # %bb.1: -; RV32ZBB-NEXT: sltu t4, t0, a2 +; RV32ZBB-NEXT: slt t4, t2, t1 ; RV32ZBB-NEXT: .LBB12_2: -; RV32ZBB-NEXT: sltu t2, a4, a3 -; RV32ZBB-NEXT: mv t3, t2 -; RV32ZBB-NEXT: beq a1, a5, .LBB12_4 +; RV32ZBB-NEXT: sltu a2, a4, a3 +; RV32ZBB-NEXT: sltu t6, a1, a5 +; RV32ZBB-NEXT: mv t0, a2 +; RV32ZBB-NEXT: beq a5, a1, .LBB12_4 ; RV32ZBB-NEXT: # %bb.3: -; RV32ZBB-NEXT: sltu t3, a1, a5 +; RV32ZBB-NEXT: mv t0, t6 ; RV32ZBB-NEXT: .LBB12_4: ; RV32ZBB-NEXT: addi sp, sp, -16 ; RV32ZBB-NEXT: sw s0, 12(sp) # 4-byte Folded Spill -; RV32ZBB-NEXT: sw s1, 8(sp) # 4-byte Folded Spill -; RV32ZBB-NEXT: srai t5, t0, 31 -; RV32ZBB-NEXT: xor t6, t0, a2 -; RV32ZBB-NEXT: xor s0, a7, a6 -; RV32ZBB-NEXT: or s1, s0, t6 -; RV32ZBB-NEXT: srai t6, a2, 31 -; RV32ZBB-NEXT: mv s0, t3 -; RV32ZBB-NEXT: beqz s1, .LBB12_6 +; RV32ZBB-NEXT: xor t5, t1, t2 +; RV32ZBB-NEXT: xor s0, a6, a7 +; RV32ZBB-NEXT: or t5, s0, t5 +; RV32ZBB-NEXT: beqz t5, .LBB12_6 ; RV32ZBB-NEXT: # %bb.5: -; RV32ZBB-NEXT: mv s0, t4 +; RV32ZBB-NEXT: mv t0, t4 ; RV32ZBB-NEXT: .LBB12_6: -; RV32ZBB-NEXT: sub t4, t5, t6 -; RV32ZBB-NEXT: sltu s0, t4, s0 -; RV32ZBB-NEXT: sltu t5, t5, t6 -; RV32ZBB-NEXT: sub t6, t4, t5 -; RV32ZBB-NEXT: seqz s1, t6 -; RV32ZBB-NEXT: and s0, s1, s0 -; RV32ZBB-NEXT: sltu s0, t6, s0 -; RV32ZBB-NEXT: sltu t4, t4, t5 -; RV32ZBB-NEXT: sub t4, t6, t4 -; RV32ZBB-NEXT: sub t4, t4, s0 -; RV32ZBB-NEXT: sub a2, t0, a2 -; RV32ZBB-NEXT: sub a2, a2, t1 -; RV32ZBB-NEXT: sub a6, a7, a6 -; RV32ZBB-NEXT: sltu a7, a6, t3 -; RV32ZBB-NEXT: sub a2, a2, a7 -; RV32ZBB-NEXT: sub a6, a6, t3 -; RV32ZBB-NEXT: sub a1, a1, a5 -; RV32ZBB-NEXT: sub a1, a1, t2 -; RV32ZBB-NEXT: sub a4, a4, a3 -; RV32ZBB-NEXT: bgez t4, .LBB12_8 +; RV32ZBB-NEXT: mv t5, a2 +; RV32ZBB-NEXT: beq a1, a5, .LBB12_8 ; RV32ZBB-NEXT: # %bb.7: -; RV32ZBB-NEXT: snez a3, a1 -; RV32ZBB-NEXT: snez a5, a4 -; RV32ZBB-NEXT: or a3, a5, a3 -; RV32ZBB-NEXT: neg a7, a6 -; RV32ZBB-NEXT: sltu t0, a7, a3 -; RV32ZBB-NEXT: snez a6, a6 -; RV32ZBB-NEXT: add a2, a2, a6 -; RV32ZBB-NEXT: neg a2, a2 -; RV32ZBB-NEXT: sub a2, a2, t0 -; RV32ZBB-NEXT: neg a4, a4 -; RV32ZBB-NEXT: add a1, a1, a5 -; RV32ZBB-NEXT: neg a1, a1 -; RV32ZBB-NEXT: sub a6, a7, a3 +; RV32ZBB-NEXT: mv t5, t6 ; RV32ZBB-NEXT: .LBB12_8: -; RV32ZBB-NEXT: snez a3, a6 -; RV32ZBB-NEXT: add a2, a2, a3 -; RV32ZBB-NEXT: snez a3, a1 -; RV32ZBB-NEXT: snez a5, a4 -; RV32ZBB-NEXT: or a3, a5, a3 -; RV32ZBB-NEXT: neg a6, a6 -; RV32ZBB-NEXT: sltu a7, a6, a3 -; RV32ZBB-NEXT: neg a2, a2 -; RV32ZBB-NEXT: sub a2, a2, a7 -; RV32ZBB-NEXT: sub a3, a6, a3 -; RV32ZBB-NEXT: add a1, a1, a5 +; RV32ZBB-NEXT: sltu t4, a3, a4 +; RV32ZBB-NEXT: mv t6, t4 +; RV32ZBB-NEXT: beq a5, a1, .LBB12_10 +; RV32ZBB-NEXT: # %bb.9: +; RV32ZBB-NEXT: sltu t6, a5, a1 +; RV32ZBB-NEXT: .LBB12_10: +; RV32ZBB-NEXT: bnez t0, .LBB12_12 +; RV32ZBB-NEXT: # %bb.11: +; RV32ZBB-NEXT: sub t1, t2, t1 +; RV32ZBB-NEXT: sub a6, a7, a6 +; RV32ZBB-NEXT: sub a7, t1, t3 +; RV32ZBB-NEXT: sltu t1, a6, t5 +; RV32ZBB-NEXT: sub a7, a7, t1 +; RV32ZBB-NEXT: sub a6, a6, t5 +; RV32ZBB-NEXT: j .LBB12_13 +; RV32ZBB-NEXT: .LBB12_12: +; RV32ZBB-NEXT: sltu t3, a6, a7 +; RV32ZBB-NEXT: sub t1, t1, t2 +; RV32ZBB-NEXT: sub t1, t1, t3 +; RV32ZBB-NEXT: sub a6, a6, a7 +; RV32ZBB-NEXT: sltu a7, a6, t6 +; RV32ZBB-NEXT: sub a7, t1, a7 +; RV32ZBB-NEXT: sub a6, a6, t6 +; RV32ZBB-NEXT: .LBB12_13: +; RV32ZBB-NEXT: snez t1, a6 +; RV32ZBB-NEXT: add a7, a7, t1 +; RV32ZBB-NEXT: bnez t0, .LBB12_15 +; RV32ZBB-NEXT: # %bb.14: +; RV32ZBB-NEXT: sub a1, a1, a5 +; RV32ZBB-NEXT: sub a1, a1, a2 +; RV32ZBB-NEXT: sub a3, a4, a3 +; RV32ZBB-NEXT: j .LBB12_16 +; RV32ZBB-NEXT: .LBB12_15: +; RV32ZBB-NEXT: sub a5, a5, a1 +; RV32ZBB-NEXT: sub a1, a5, t4 +; RV32ZBB-NEXT: sub a3, a3, a4 +; RV32ZBB-NEXT: .LBB12_16: +; RV32ZBB-NEXT: or a2, a3, a1 +; RV32ZBB-NEXT: snez a2, a2 +; RV32ZBB-NEXT: neg a4, a6 +; RV32ZBB-NEXT: sltu a5, a4, a2 +; RV32ZBB-NEXT: neg a6, a7 +; RV32ZBB-NEXT: sub a5, a6, a5 +; RV32ZBB-NEXT: snez a6, a3 +; RV32ZBB-NEXT: add a1, a1, a6 ; RV32ZBB-NEXT: neg a1, a1 -; RV32ZBB-NEXT: neg a4, a4 -; RV32ZBB-NEXT: sw a4, 0(a0) +; RV32ZBB-NEXT: sub a4, a4, a2 +; RV32ZBB-NEXT: neg a2, a3 +; RV32ZBB-NEXT: sw a2, 0(a0) +; RV32ZBB-NEXT: sw a4, 8(a0) ; RV32ZBB-NEXT: sw a1, 4(a0) -; RV32ZBB-NEXT: sw a3, 8(a0) -; RV32ZBB-NEXT: sw a2, 12(a0) +; RV32ZBB-NEXT: sw a5, 12(a0) ; RV32ZBB-NEXT: lw s0, 12(sp) # 4-byte Folded Reload -; RV32ZBB-NEXT: lw s1, 8(sp) # 4-byte Folded Reload ; RV32ZBB-NEXT: addi sp, sp, 16 ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: abd_ext_i128_undef: ; RV64ZBB: # %bb.0: -; RV64ZBB-NEXT: srai a5, a1, 63 -; RV64ZBB-NEXT: sltu a4, a0, a2 -; RV64ZBB-NEXT: srai a6, a3, 63 -; RV64ZBB-NEXT: mv a7, a4 +; RV64ZBB-NEXT: sltu a4, a2, a0 +; RV64ZBB-NEXT: mv a5, a4 ; RV64ZBB-NEXT: beq a1, a3, .LBB12_2 ; RV64ZBB-NEXT: # %bb.1: -; RV64ZBB-NEXT: sltu a7, a1, a3 +; RV64ZBB-NEXT: slt a5, a3, a1 ; RV64ZBB-NEXT: .LBB12_2: -; RV64ZBB-NEXT: sub t0, a5, a6 -; RV64ZBB-NEXT: sltu a7, t0, a7 -; RV64ZBB-NEXT: sltu a5, a5, a6 -; RV64ZBB-NEXT: sub a5, t0, a5 -; RV64ZBB-NEXT: sub a5, a5, a7 +; RV64ZBB-NEXT: bnez a5, .LBB12_4 +; RV64ZBB-NEXT: # %bb.3: +; RV64ZBB-NEXT: sub a1, a3, a1 +; RV64ZBB-NEXT: sub a1, a1, a4 +; RV64ZBB-NEXT: sub a0, a2, a0 +; RV64ZBB-NEXT: j .LBB12_5 +; RV64ZBB-NEXT: .LBB12_4: +; RV64ZBB-NEXT: sltu a4, a0, a2 ; RV64ZBB-NEXT: sub a1, a1, a3 ; RV64ZBB-NEXT: sub a1, a1, a4 ; RV64ZBB-NEXT: sub a0, a0, a2 -; RV64ZBB-NEXT: bgez a5, .LBB12_4 -; RV64ZBB-NEXT: # %bb.3: -; RV64ZBB-NEXT: snez a2, a0 -; RV64ZBB-NEXT: add a1, a1, a2 -; RV64ZBB-NEXT: neg a1, a1 -; RV64ZBB-NEXT: neg a0, a0 -; RV64ZBB-NEXT: .LBB12_4: +; RV64ZBB-NEXT: .LBB12_5: ; RV64ZBB-NEXT: snez a2, a0 ; RV64ZBB-NEXT: add a1, a1, a2 ; RV64ZBB-NEXT: neg a1, a1 diff --git a/llvm/test/CodeGen/RISCV/abds.ll b/llvm/test/CodeGen/RISCV/abds.ll index 053a060f3862a9..3cebc1128ae850 100644 --- a/llvm/test/CodeGen/RISCV/abds.ll +++ b/llvm/test/CodeGen/RISCV/abds.ll @@ -11,10 +11,10 @@ define i8 @abd_ext_i8(i8 %a, i8 %b) nounwind { ; RV32I-LABEL: abd_ext_i8: ; RV32I: # %bb.0: -; RV32I-NEXT: slli a0, a0, 24 -; RV32I-NEXT: srai a0, a0, 24 ; RV32I-NEXT: slli a1, a1, 24 ; RV32I-NEXT: srai a1, a1, 24 +; RV32I-NEXT: slli a0, a0, 24 +; RV32I-NEXT: srai a0, a0, 24 ; RV32I-NEXT: sub a0, a0, a1 ; RV32I-NEXT: srai a1, a0, 31 ; RV32I-NEXT: xor a0, a0, a1 @@ -23,10 +23,10 @@ define i8 @abd_ext_i8(i8 %a, i8 %b) nounwind { ; ; RV64I-LABEL: abd_ext_i8: ; RV64I: # %bb.0: -; RV64I-NEXT: slli a0, a0, 56 -; RV64I-NEXT: srai a0, a0, 56 ; RV64I-NEXT: slli a1, a1, 56 ; RV64I-NEXT: srai a1, a1, 56 +; RV64I-NEXT: slli a0, a0, 56 +; RV64I-NEXT: srai a0, a0, 56 ; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: srai a1, a0, 63 ; RV64I-NEXT: xor a0, a0, a1 @@ -35,11 +35,11 @@ define i8 @abd_ext_i8(i8 %a, i8 %b) nounwind { ; ; ZBB-LABEL: abd_ext_i8: ; ZBB: # %bb.0: -; ZBB-NEXT: sext.b a0, a0 ; ZBB-NEXT: sext.b a1, a1 -; ZBB-NEXT: sub a0, a0, a1 -; ZBB-NEXT: neg a1, a0 +; ZBB-NEXT: sext.b a0, a0 +; ZBB-NEXT: min a2, a0, a1 ; ZBB-NEXT: max a0, a0, a1 +; ZBB-NEXT: sub a0, a0, a2 ; ZBB-NEXT: ret %aext = sext i8 %a to i64 %bext = sext i8 %b to i64 @@ -52,10 +52,10 @@ define i8 @abd_ext_i8(i8 %a, i8 %b) nounwind { define i8 @abd_ext_i8_i16(i8 %a, i16 %b) nounwind { ; RV32I-LABEL: abd_ext_i8_i16: ; RV32I: # %bb.0: -; RV32I-NEXT: slli a0, a0, 24 -; RV32I-NEXT: srai a0, a0, 24 ; RV32I-NEXT: slli a1, a1, 16 ; RV32I-NEXT: srai a1, a1, 16 +; RV32I-NEXT: slli a0, a0, 24 +; RV32I-NEXT: srai a0, a0, 24 ; RV32I-NEXT: sub a0, a0, a1 ; RV32I-NEXT: srai a1, a0, 31 ; RV32I-NEXT: xor a0, a0, a1 @@ -64,10 +64,10 @@ define i8 @abd_ext_i8_i16(i8 %a, i16 %b) nounwind { ; ; RV64I-LABEL: abd_ext_i8_i16: ; RV64I: # %bb.0: -; RV64I-NEXT: slli a0, a0, 56 -; RV64I-NEXT: srai a0, a0, 56 ; RV64I-NEXT: slli a1, a1, 48 ; RV64I-NEXT: srai a1, a1, 48 +; RV64I-NEXT: slli a0, a0, 56 +; RV64I-NEXT: srai a0, a0, 56 ; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: srai a1, a0, 63 ; RV64I-NEXT: xor a0, a0, a1 @@ -76,11 +76,11 @@ define i8 @abd_ext_i8_i16(i8 %a, i16 %b) nounwind { ; ; ZBB-LABEL: abd_ext_i8_i16: ; ZBB: # %bb.0: -; ZBB-NEXT: sext.b a0, a0 ; ZBB-NEXT: sext.h a1, a1 -; ZBB-NEXT: sub a0, a0, a1 -; ZBB-NEXT: neg a1, a0 +; ZBB-NEXT: sext.b a0, a0 +; ZBB-NEXT: min a2, a0, a1 ; ZBB-NEXT: max a0, a0, a1 +; ZBB-NEXT: sub a0, a0, a2 ; ZBB-NEXT: ret %aext = sext i8 %a to i64 %bext = sext i16 %b to i64 @@ -93,10 +93,10 @@ define i8 @abd_ext_i8_i16(i8 %a, i16 %b) nounwind { define i8 @abd_ext_i8_undef(i8 %a, i8 %b) nounwind { ; RV32I-LABEL: abd_ext_i8_undef: ; RV32I: # %bb.0: -; RV32I-NEXT: slli a0, a0, 24 -; RV32I-NEXT: srai a0, a0, 24 ; RV32I-NEXT: slli a1, a1, 24 ; RV32I-NEXT: srai a1, a1, 24 +; RV32I-NEXT: slli a0, a0, 24 +; RV32I-NEXT: srai a0, a0, 24 ; RV32I-NEXT: sub a0, a0, a1 ; RV32I-NEXT: srai a1, a0, 31 ; RV32I-NEXT: xor a0, a0, a1 @@ -105,10 +105,10 @@ define i8 @abd_ext_i8_undef(i8 %a, i8 %b) nounwind { ; ; RV64I-LABEL: abd_ext_i8_undef: ; RV64I: # %bb.0: -; RV64I-NEXT: slli a0, a0, 56 -; RV64I-NEXT: srai a0, a0, 56 ; RV64I-NEXT: slli a1, a1, 56 ; RV64I-NEXT: srai a1, a1, 56 +; RV64I-NEXT: slli a0, a0, 56 +; RV64I-NEXT: srai a0, a0, 56 ; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: srai a1, a0, 63 ; RV64I-NEXT: xor a0, a0, a1 @@ -117,11 +117,11 @@ define i8 @abd_ext_i8_undef(i8 %a, i8 %b) nounwind { ; ; ZBB-LABEL: abd_ext_i8_undef: ; ZBB: # %bb.0: -; ZBB-NEXT: sext.b a0, a0 ; ZBB-NEXT: sext.b a1, a1 -; ZBB-NEXT: sub a0, a0, a1 -; ZBB-NEXT: neg a1, a0 +; ZBB-NEXT: sext.b a0, a0 +; ZBB-NEXT: min a2, a0, a1 ; ZBB-NEXT: max a0, a0, a1 +; ZBB-NEXT: sub a0, a0, a2 ; ZBB-NEXT: ret %aext = sext i8 %a to i64 %bext = sext i8 %b to i64 @@ -134,10 +134,10 @@ define i8 @abd_ext_i8_undef(i8 %a, i8 %b) nounwind { define i16 @abd_ext_i16(i16 %a, i16 %b) nounwind { ; RV32I-LABEL: abd_ext_i16: ; RV32I: # %bb.0: -; RV32I-NEXT: slli a0, a0, 16 -; RV32I-NEXT: srai a0, a0, 16 ; RV32I-NEXT: slli a1, a1, 16 ; RV32I-NEXT: srai a1, a1, 16 +; RV32I-NEXT: slli a0, a0, 16 +; RV32I-NEXT: srai a0, a0, 16 ; RV32I-NEXT: sub a0, a0, a1 ; RV32I-NEXT: srai a1, a0, 31 ; RV32I-NEXT: xor a0, a0, a1 @@ -146,10 +146,10 @@ define i16 @abd_ext_i16(i16 %a, i16 %b) nounwind { ; ; RV64I-LABEL: abd_ext_i16: ; RV64I: # %bb.0: -; RV64I-NEXT: slli a0, a0, 48 -; RV64I-NEXT: srai a0, a0, 48 ; RV64I-NEXT: slli a1, a1, 48 ; RV64I-NEXT: srai a1, a1, 48 +; RV64I-NEXT: slli a0, a0, 48 +; RV64I-NEXT: srai a0, a0, 48 ; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: srai a1, a0, 63 ; RV64I-NEXT: xor a0, a0, a1 @@ -158,11 +158,11 @@ define i16 @abd_ext_i16(i16 %a, i16 %b) nounwind { ; ; ZBB-LABEL: abd_ext_i16: ; ZBB: # %bb.0: -; ZBB-NEXT: sext.h a0, a0 ; ZBB-NEXT: sext.h a1, a1 -; ZBB-NEXT: sub a0, a0, a1 -; ZBB-NEXT: neg a1, a0 +; ZBB-NEXT: sext.h a0, a0 +; ZBB-NEXT: min a2, a0, a1 ; ZBB-NEXT: max a0, a0, a1 +; ZBB-NEXT: sub a0, a0, a2 ; ZBB-NEXT: ret %aext = sext i16 %a to i64 %bext = sext i16 %b to i64 @@ -176,24 +176,20 @@ define i16 @abd_ext_i16_i32(i16 %a, i32 %b) nounwind { ; RV32I-LABEL: abd_ext_i16_i32: ; RV32I: # %bb.0: ; RV32I-NEXT: slli a0, a0, 16 -; RV32I-NEXT: srai a2, a0, 31 ; RV32I-NEXT: srai a0, a0, 16 -; RV32I-NEXT: srai a3, a1, 31 -; RV32I-NEXT: sltu a4, a0, a1 -; RV32I-NEXT: sub a2, a2, a3 -; RV32I-NEXT: sub a2, a2, a4 -; RV32I-NEXT: sub a0, a0, a1 -; RV32I-NEXT: bgez a2, .LBB4_2 +; RV32I-NEXT: blt a1, a0, .LBB4_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: neg a0, a0 +; RV32I-NEXT: sub a0, a1, a0 +; RV32I-NEXT: ret ; RV32I-NEXT: .LBB4_2: +; RV32I-NEXT: sub a0, a0, a1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: abd_ext_i16_i32: ; RV64I: # %bb.0: +; RV64I-NEXT: sext.w a1, a1 ; RV64I-NEXT: slli a0, a0, 48 ; RV64I-NEXT: srai a0, a0, 48 -; RV64I-NEXT: sext.w a1, a1 ; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: srai a1, a0, 63 ; RV64I-NEXT: xor a0, a0, a1 @@ -203,25 +199,18 @@ define i16 @abd_ext_i16_i32(i16 %a, i32 %b) nounwind { ; RV32ZBB-LABEL: abd_ext_i16_i32: ; RV32ZBB: # %bb.0: ; RV32ZBB-NEXT: sext.h a0, a0 -; RV32ZBB-NEXT: srai a2, a0, 31 -; RV32ZBB-NEXT: srai a3, a1, 31 -; RV32ZBB-NEXT: sltu a4, a0, a1 -; RV32ZBB-NEXT: sub a2, a2, a3 -; RV32ZBB-NEXT: sub a2, a2, a4 -; RV32ZBB-NEXT: sub a0, a0, a1 -; RV32ZBB-NEXT: bgez a2, .LBB4_2 -; RV32ZBB-NEXT: # %bb.1: -; RV32ZBB-NEXT: neg a0, a0 -; RV32ZBB-NEXT: .LBB4_2: +; RV32ZBB-NEXT: min a2, a0, a1 +; RV32ZBB-NEXT: max a0, a0, a1 +; RV32ZBB-NEXT: sub a0, a0, a2 ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: abd_ext_i16_i32: ; RV64ZBB: # %bb.0: -; RV64ZBB-NEXT: sext.h a0, a0 ; RV64ZBB-NEXT: sext.w a1, a1 -; RV64ZBB-NEXT: sub a0, a0, a1 -; RV64ZBB-NEXT: neg a1, a0 +; RV64ZBB-NEXT: sext.h a0, a0 +; RV64ZBB-NEXT: min a2, a0, a1 ; RV64ZBB-NEXT: max a0, a0, a1 +; RV64ZBB-NEXT: sub a0, a0, a2 ; RV64ZBB-NEXT: ret %aext = sext i16 %a to i64 %bext = sext i32 %b to i64 @@ -234,10 +223,10 @@ define i16 @abd_ext_i16_i32(i16 %a, i32 %b) nounwind { define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind { ; RV32I-LABEL: abd_ext_i16_undef: ; RV32I: # %bb.0: -; RV32I-NEXT: slli a0, a0, 16 -; RV32I-NEXT: srai a0, a0, 16 ; RV32I-NEXT: slli a1, a1, 16 ; RV32I-NEXT: srai a1, a1, 16 +; RV32I-NEXT: slli a0, a0, 16 +; RV32I-NEXT: srai a0, a0, 16 ; RV32I-NEXT: sub a0, a0, a1 ; RV32I-NEXT: srai a1, a0, 31 ; RV32I-NEXT: xor a0, a0, a1 @@ -246,10 +235,10 @@ define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind { ; ; RV64I-LABEL: abd_ext_i16_undef: ; RV64I: # %bb.0: -; RV64I-NEXT: slli a0, a0, 48 -; RV64I-NEXT: srai a0, a0, 48 ; RV64I-NEXT: slli a1, a1, 48 ; RV64I-NEXT: srai a1, a1, 48 +; RV64I-NEXT: slli a0, a0, 48 +; RV64I-NEXT: srai a0, a0, 48 ; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: srai a1, a0, 63 ; RV64I-NEXT: xor a0, a0, a1 @@ -258,11 +247,11 @@ define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind { ; ; ZBB-LABEL: abd_ext_i16_undef: ; ZBB: # %bb.0: -; ZBB-NEXT: sext.h a0, a0 ; ZBB-NEXT: sext.h a1, a1 -; ZBB-NEXT: sub a0, a0, a1 -; ZBB-NEXT: neg a1, a0 +; ZBB-NEXT: sext.h a0, a0 +; ZBB-NEXT: min a2, a0, a1 ; ZBB-NEXT: max a0, a0, a1 +; ZBB-NEXT: sub a0, a0, a2 ; ZBB-NEXT: ret %aext = sext i16 %a to i64 %bext = sext i16 %b to i64 @@ -275,22 +264,18 @@ define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind { define i32 @abd_ext_i32(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: abd_ext_i32: ; RV32I: # %bb.0: -; RV32I-NEXT: srai a2, a0, 31 -; RV32I-NEXT: srai a3, a1, 31 -; RV32I-NEXT: sltu a4, a0, a1 -; RV32I-NEXT: sub a2, a2, a3 -; RV32I-NEXT: sub a2, a2, a4 -; RV32I-NEXT: sub a0, a0, a1 -; RV32I-NEXT: bgez a2, .LBB6_2 +; RV32I-NEXT: blt a1, a0, .LBB6_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: neg a0, a0 +; RV32I-NEXT: sub a0, a1, a0 +; RV32I-NEXT: ret ; RV32I-NEXT: .LBB6_2: +; RV32I-NEXT: sub a0, a0, a1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: abd_ext_i32: ; RV64I: # %bb.0: -; RV64I-NEXT: sext.w a0, a0 ; RV64I-NEXT: sext.w a1, a1 +; RV64I-NEXT: sext.w a0, a0 ; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: srai a1, a0, 63 ; RV64I-NEXT: xor a0, a0, a1 @@ -299,25 +284,18 @@ define i32 @abd_ext_i32(i32 %a, i32 %b) nounwind { ; ; RV32ZBB-LABEL: abd_ext_i32: ; RV32ZBB: # %bb.0: -; RV32ZBB-NEXT: srai a2, a0, 31 -; RV32ZBB-NEXT: srai a3, a1, 31 -; RV32ZBB-NEXT: sltu a4, a0, a1 -; RV32ZBB-NEXT: sub a2, a2, a3 -; RV32ZBB-NEXT: sub a2, a2, a4 -; RV32ZBB-NEXT: sub a0, a0, a1 -; RV32ZBB-NEXT: bgez a2, .LBB6_2 -; RV32ZBB-NEXT: # %bb.1: -; RV32ZBB-NEXT: neg a0, a0 -; RV32ZBB-NEXT: .LBB6_2: +; RV32ZBB-NEXT: min a2, a0, a1 +; RV32ZBB-NEXT: max a0, a0, a1 +; RV32ZBB-NEXT: sub a0, a0, a2 ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: abd_ext_i32: ; RV64ZBB: # %bb.0: -; RV64ZBB-NEXT: sext.w a0, a0 ; RV64ZBB-NEXT: sext.w a1, a1 -; RV64ZBB-NEXT: sub a0, a0, a1 -; RV64ZBB-NEXT: neg a1, a0 +; RV64ZBB-NEXT: sext.w a0, a0 +; RV64ZBB-NEXT: min a2, a0, a1 ; RV64ZBB-NEXT: max a0, a0, a1 +; RV64ZBB-NEXT: sub a0, a0, a2 ; RV64ZBB-NEXT: ret %aext = sext i32 %a to i64 %bext = sext i32 %b to i64 @@ -330,18 +308,14 @@ define i32 @abd_ext_i32(i32 %a, i32 %b) nounwind { define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind { ; RV32I-LABEL: abd_ext_i32_i16: ; RV32I: # %bb.0: -; RV32I-NEXT: srai a2, a0, 31 ; RV32I-NEXT: slli a1, a1, 16 -; RV32I-NEXT: srai a3, a1, 31 ; RV32I-NEXT: srai a1, a1, 16 -; RV32I-NEXT: sltu a4, a0, a1 -; RV32I-NEXT: sub a2, a2, a3 -; RV32I-NEXT: sub a2, a2, a4 -; RV32I-NEXT: sub a0, a0, a1 -; RV32I-NEXT: bgez a2, .LBB7_2 +; RV32I-NEXT: blt a1, a0, .LBB7_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: neg a0, a0 +; RV32I-NEXT: sub a0, a1, a0 +; RV32I-NEXT: ret ; RV32I-NEXT: .LBB7_2: +; RV32I-NEXT: sub a0, a0, a1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: abd_ext_i32_i16: @@ -357,26 +331,19 @@ define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind { ; ; RV32ZBB-LABEL: abd_ext_i32_i16: ; RV32ZBB: # %bb.0: -; RV32ZBB-NEXT: srai a2, a0, 31 ; RV32ZBB-NEXT: sext.h a1, a1 -; RV32ZBB-NEXT: srai a3, a1, 31 -; RV32ZBB-NEXT: sltu a4, a0, a1 -; RV32ZBB-NEXT: sub a2, a2, a3 -; RV32ZBB-NEXT: sub a2, a2, a4 -; RV32ZBB-NEXT: sub a0, a0, a1 -; RV32ZBB-NEXT: bgez a2, .LBB7_2 -; RV32ZBB-NEXT: # %bb.1: -; RV32ZBB-NEXT: neg a0, a0 -; RV32ZBB-NEXT: .LBB7_2: +; RV32ZBB-NEXT: min a2, a0, a1 +; RV32ZBB-NEXT: max a0, a0, a1 +; RV32ZBB-NEXT: sub a0, a0, a2 ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: abd_ext_i32_i16: ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: sext.w a0, a0 ; RV64ZBB-NEXT: sext.h a1, a1 -; RV64ZBB-NEXT: sub a0, a0, a1 -; RV64ZBB-NEXT: neg a1, a0 +; RV64ZBB-NEXT: min a2, a0, a1 ; RV64ZBB-NEXT: max a0, a0, a1 +; RV64ZBB-NEXT: sub a0, a0, a2 ; RV64ZBB-NEXT: ret %aext = sext i32 %a to i64 %bext = sext i16 %b to i64 @@ -389,22 +356,18 @@ define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind { define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: abd_ext_i32_undef: ; RV32I: # %bb.0: -; RV32I-NEXT: srai a2, a0, 31 -; RV32I-NEXT: srai a3, a1, 31 -; RV32I-NEXT: sltu a4, a0, a1 -; RV32I-NEXT: sub a2, a2, a3 -; RV32I-NEXT: sub a2, a2, a4 -; RV32I-NEXT: sub a0, a0, a1 -; RV32I-NEXT: bgez a2, .LBB8_2 +; RV32I-NEXT: blt a1, a0, .LBB8_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: neg a0, a0 +; RV32I-NEXT: sub a0, a1, a0 +; RV32I-NEXT: ret ; RV32I-NEXT: .LBB8_2: +; RV32I-NEXT: sub a0, a0, a1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: abd_ext_i32_undef: ; RV64I: # %bb.0: -; RV64I-NEXT: sext.w a0, a0 ; RV64I-NEXT: sext.w a1, a1 +; RV64I-NEXT: sext.w a0, a0 ; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: srai a1, a0, 63 ; RV64I-NEXT: xor a0, a0, a1 @@ -413,25 +376,18 @@ define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind { ; ; RV32ZBB-LABEL: abd_ext_i32_undef: ; RV32ZBB: # %bb.0: -; RV32ZBB-NEXT: srai a2, a0, 31 -; RV32ZBB-NEXT: srai a3, a1, 31 -; RV32ZBB-NEXT: sltu a4, a0, a1 -; RV32ZBB-NEXT: sub a2, a2, a3 -; RV32ZBB-NEXT: sub a2, a2, a4 -; RV32ZBB-NEXT: sub a0, a0, a1 -; RV32ZBB-NEXT: bgez a2, .LBB8_2 -; RV32ZBB-NEXT: # %bb.1: -; RV32ZBB-NEXT: neg a0, a0 -; RV32ZBB-NEXT: .LBB8_2: +; RV32ZBB-NEXT: min a2, a0, a1 +; RV32ZBB-NEXT: max a0, a0, a1 +; RV32ZBB-NEXT: sub a0, a0, a2 ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: abd_ext_i32_undef: ; RV64ZBB: # %bb.0: -; RV64ZBB-NEXT: sext.w a0, a0 ; RV64ZBB-NEXT: sext.w a1, a1 -; RV64ZBB-NEXT: sub a0, a0, a1 -; RV64ZBB-NEXT: neg a1, a0 +; RV64ZBB-NEXT: sext.w a0, a0 +; RV64ZBB-NEXT: min a2, a0, a1 ; RV64ZBB-NEXT: max a0, a0, a1 +; RV64ZBB-NEXT: sub a0, a0, a2 ; RV64ZBB-NEXT: ret %aext = sext i32 %a to i64 %bext = sext i32 %b to i64 @@ -444,84 +400,61 @@ define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind { define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind { ; RV32I-LABEL: abd_ext_i64: ; RV32I: # %bb.0: -; RV32I-NEXT: srai a5, a1, 31 -; RV32I-NEXT: sltu a4, a0, a2 -; RV32I-NEXT: srai a6, a3, 31 -; RV32I-NEXT: mv a7, a4 +; RV32I-NEXT: sltu a4, a2, a0 +; RV32I-NEXT: mv a5, a4 ; RV32I-NEXT: beq a1, a3, .LBB9_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: sltu a7, a1, a3 +; RV32I-NEXT: slt a5, a3, a1 ; RV32I-NEXT: .LBB9_2: -; RV32I-NEXT: sub t0, a5, a6 -; RV32I-NEXT: sltu a7, t0, a7 -; RV32I-NEXT: sltu a5, a5, a6 -; RV32I-NEXT: sub a5, t0, a5 -; RV32I-NEXT: sub a5, a5, a7 +; RV32I-NEXT: bnez a5, .LBB9_4 +; RV32I-NEXT: # %bb.3: +; RV32I-NEXT: sub a1, a3, a1 +; RV32I-NEXT: sub a1, a1, a4 +; RV32I-NEXT: sub a0, a2, a0 +; RV32I-NEXT: ret +; RV32I-NEXT: .LBB9_4: +; RV32I-NEXT: sltu a4, a0, a2 ; RV32I-NEXT: sub a1, a1, a3 ; RV32I-NEXT: sub a1, a1, a4 ; RV32I-NEXT: sub a0, a0, a2 -; RV32I-NEXT: bgez a5, .LBB9_4 -; RV32I-NEXT: # %bb.3: -; RV32I-NEXT: snez a2, a0 -; RV32I-NEXT: neg a0, a0 -; RV32I-NEXT: add a1, a1, a2 -; RV32I-NEXT: neg a1, a1 -; RV32I-NEXT: .LBB9_4: ; RV32I-NEXT: ret ; ; RV64I-LABEL: abd_ext_i64: ; RV64I: # %bb.0: -; RV64I-NEXT: srai a2, a0, 63 -; RV64I-NEXT: srai a3, a1, 63 -; RV64I-NEXT: sltu a4, a0, a1 -; RV64I-NEXT: sub a2, a2, a3 -; RV64I-NEXT: sub a2, a2, a4 -; RV64I-NEXT: sub a0, a0, a1 -; RV64I-NEXT: bgez a2, .LBB9_2 +; RV64I-NEXT: blt a1, a0, .LBB9_2 ; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: neg a0, a0 +; RV64I-NEXT: sub a0, a1, a0 +; RV64I-NEXT: ret ; RV64I-NEXT: .LBB9_2: +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: ret ; ; RV32ZBB-LABEL: abd_ext_i64: ; RV32ZBB: # %bb.0: -; RV32ZBB-NEXT: srai a5, a1, 31 -; RV32ZBB-NEXT: sltu a4, a0, a2 -; RV32ZBB-NEXT: srai a6, a3, 31 -; RV32ZBB-NEXT: mv a7, a4 +; RV32ZBB-NEXT: sltu a4, a2, a0 +; RV32ZBB-NEXT: mv a5, a4 ; RV32ZBB-NEXT: beq a1, a3, .LBB9_2 ; RV32ZBB-NEXT: # %bb.1: -; RV32ZBB-NEXT: sltu a7, a1, a3 +; RV32ZBB-NEXT: slt a5, a3, a1 ; RV32ZBB-NEXT: .LBB9_2: -; RV32ZBB-NEXT: sub t0, a5, a6 -; RV32ZBB-NEXT: sltu a7, t0, a7 -; RV32ZBB-NEXT: sltu a5, a5, a6 -; RV32ZBB-NEXT: sub a5, t0, a5 -; RV32ZBB-NEXT: sub a5, a5, a7 +; RV32ZBB-NEXT: bnez a5, .LBB9_4 +; RV32ZBB-NEXT: # %bb.3: +; RV32ZBB-NEXT: sub a1, a3, a1 +; RV32ZBB-NEXT: sub a1, a1, a4 +; RV32ZBB-NEXT: sub a0, a2, a0 +; RV32ZBB-NEXT: ret +; RV32ZBB-NEXT: .LBB9_4: +; RV32ZBB-NEXT: sltu a4, a0, a2 ; RV32ZBB-NEXT: sub a1, a1, a3 ; RV32ZBB-NEXT: sub a1, a1, a4 ; RV32ZBB-NEXT: sub a0, a0, a2 -; RV32ZBB-NEXT: bgez a5, .LBB9_4 -; RV32ZBB-NEXT: # %bb.3: -; RV32ZBB-NEXT: snez a2, a0 -; RV32ZBB-NEXT: neg a0, a0 -; RV32ZBB-NEXT: add a1, a1, a2 -; RV32ZBB-NEXT: neg a1, a1 -; RV32ZBB-NEXT: .LBB9_4: ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: abd_ext_i64: ; RV64ZBB: # %bb.0: -; RV64ZBB-NEXT: srai a2, a0, 63 -; RV64ZBB-NEXT: srai a3, a1, 63 -; RV64ZBB-NEXT: sltu a4, a0, a1 -; RV64ZBB-NEXT: sub a2, a2, a3 -; RV64ZBB-NEXT: sub a2, a2, a4 -; RV64ZBB-NEXT: sub a0, a0, a1 -; RV64ZBB-NEXT: bgez a2, .LBB9_2 -; RV64ZBB-NEXT: # %bb.1: -; RV64ZBB-NEXT: neg a0, a0 -; RV64ZBB-NEXT: .LBB9_2: +; RV64ZBB-NEXT: min a2, a0, a1 +; RV64ZBB-NEXT: max a0, a0, a1 +; RV64ZBB-NEXT: sub a0, a0, a2 ; RV64ZBB-NEXT: ret %aext = sext i64 %a to i128 %bext = sext i64 %b to i128 @@ -534,84 +467,61 @@ define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind { define i64 @abd_ext_i64_undef(i64 %a, i64 %b) nounwind { ; RV32I-LABEL: abd_ext_i64_undef: ; RV32I: # %bb.0: -; RV32I-NEXT: srai a5, a1, 31 -; RV32I-NEXT: sltu a4, a0, a2 -; RV32I-NEXT: srai a6, a3, 31 -; RV32I-NEXT: mv a7, a4 +; RV32I-NEXT: sltu a4, a2, a0 +; RV32I-NEXT: mv a5, a4 ; RV32I-NEXT: beq a1, a3, .LBB10_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: sltu a7, a1, a3 +; RV32I-NEXT: slt a5, a3, a1 ; RV32I-NEXT: .LBB10_2: -; RV32I-NEXT: sub t0, a5, a6 -; RV32I-NEXT: sltu a7, t0, a7 -; RV32I-NEXT: sltu a5, a5, a6 -; RV32I-NEXT: sub a5, t0, a5 -; RV32I-NEXT: sub a5, a5, a7 +; RV32I-NEXT: bnez a5, .LBB10_4 +; RV32I-NEXT: # %bb.3: +; RV32I-NEXT: sub a1, a3, a1 +; RV32I-NEXT: sub a1, a1, a4 +; RV32I-NEXT: sub a0, a2, a0 +; RV32I-NEXT: ret +; RV32I-NEXT: .LBB10_4: +; RV32I-NEXT: sltu a4, a0, a2 ; RV32I-NEXT: sub a1, a1, a3 ; RV32I-NEXT: sub a1, a1, a4 ; RV32I-NEXT: sub a0, a0, a2 -; RV32I-NEXT: bgez a5, .LBB10_4 -; RV32I-NEXT: # %bb.3: -; RV32I-NEXT: snez a2, a0 -; RV32I-NEXT: neg a0, a0 -; RV32I-NEXT: add a1, a1, a2 -; RV32I-NEXT: neg a1, a1 -; RV32I-NEXT: .LBB10_4: ; RV32I-NEXT: ret ; ; RV64I-LABEL: abd_ext_i64_undef: ; RV64I: # %bb.0: -; RV64I-NEXT: srai a2, a0, 63 -; RV64I-NEXT: srai a3, a1, 63 -; RV64I-NEXT: sltu a4, a0, a1 -; RV64I-NEXT: sub a2, a2, a3 -; RV64I-NEXT: sub a2, a2, a4 -; RV64I-NEXT: sub a0, a0, a1 -; RV64I-NEXT: bgez a2, .LBB10_2 +; RV64I-NEXT: blt a1, a0, .LBB10_2 ; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: neg a0, a0 +; RV64I-NEXT: sub a0, a1, a0 +; RV64I-NEXT: ret ; RV64I-NEXT: .LBB10_2: +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: ret ; ; RV32ZBB-LABEL: abd_ext_i64_undef: ; RV32ZBB: # %bb.0: -; RV32ZBB-NEXT: srai a5, a1, 31 -; RV32ZBB-NEXT: sltu a4, a0, a2 -; RV32ZBB-NEXT: srai a6, a3, 31 -; RV32ZBB-NEXT: mv a7, a4 +; RV32ZBB-NEXT: sltu a4, a2, a0 +; RV32ZBB-NEXT: mv a5, a4 ; RV32ZBB-NEXT: beq a1, a3, .LBB10_2 ; RV32ZBB-NEXT: # %bb.1: -; RV32ZBB-NEXT: sltu a7, a1, a3 +; RV32ZBB-NEXT: slt a5, a3, a1 ; RV32ZBB-NEXT: .LBB10_2: -; RV32ZBB-NEXT: sub t0, a5, a6 -; RV32ZBB-NEXT: sltu a7, t0, a7 -; RV32ZBB-NEXT: sltu a5, a5, a6 -; RV32ZBB-NEXT: sub a5, t0, a5 -; RV32ZBB-NEXT: sub a5, a5, a7 +; RV32ZBB-NEXT: bnez a5, .LBB10_4 +; RV32ZBB-NEXT: # %bb.3: +; RV32ZBB-NEXT: sub a1, a3, a1 +; RV32ZBB-NEXT: sub a1, a1, a4 +; RV32ZBB-NEXT: sub a0, a2, a0 +; RV32ZBB-NEXT: ret +; RV32ZBB-NEXT: .LBB10_4: +; RV32ZBB-NEXT: sltu a4, a0, a2 ; RV32ZBB-NEXT: sub a1, a1, a3 ; RV32ZBB-NEXT: sub a1, a1, a4 ; RV32ZBB-NEXT: sub a0, a0, a2 -; RV32ZBB-NEXT: bgez a5, .LBB10_4 -; RV32ZBB-NEXT: # %bb.3: -; RV32ZBB-NEXT: snez a2, a0 -; RV32ZBB-NEXT: neg a0, a0 -; RV32ZBB-NEXT: add a1, a1, a2 -; RV32ZBB-NEXT: neg a1, a1 -; RV32ZBB-NEXT: .LBB10_4: ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: abd_ext_i64_undef: ; RV64ZBB: # %bb.0: -; RV64ZBB-NEXT: srai a2, a0, 63 -; RV64ZBB-NEXT: srai a3, a1, 63 -; RV64ZBB-NEXT: sltu a4, a0, a1 -; RV64ZBB-NEXT: sub a2, a2, a3 -; RV64ZBB-NEXT: sub a2, a2, a4 -; RV64ZBB-NEXT: sub a0, a0, a1 -; RV64ZBB-NEXT: bgez a2, .LBB10_2 -; RV64ZBB-NEXT: # %bb.1: -; RV64ZBB-NEXT: neg a0, a0 -; RV64ZBB-NEXT: .LBB10_2: +; RV64ZBB-NEXT: min a2, a0, a1 +; RV64ZBB-NEXT: max a0, a0, a1 +; RV64ZBB-NEXT: sub a0, a0, a2 ; RV64ZBB-NEXT: ret %aext = sext i64 %a to i128 %bext = sext i64 %b to i128 @@ -624,214 +534,194 @@ define i64 @abd_ext_i64_undef(i64 %a, i64 %b) nounwind { define i128 @abd_ext_i128(i128 %a, i128 %b) nounwind { ; RV32I-LABEL: abd_ext_i128: ; RV32I: # %bb.0: -; RV32I-NEXT: lw a3, 0(a2) -; RV32I-NEXT: lw a4, 0(a1) -; RV32I-NEXT: lw a5, 4(a2) -; RV32I-NEXT: lw a6, 8(a2) -; RV32I-NEXT: lw a7, 8(a1) -; RV32I-NEXT: lw a2, 12(a2) +; RV32I-NEXT: lw a3, 0(a1) +; RV32I-NEXT: lw a5, 0(a2) +; RV32I-NEXT: lw a4, 4(a1) +; RV32I-NEXT: lw a6, 8(a1) +; RV32I-NEXT: lw a7, 8(a2) ; RV32I-NEXT: lw t0, 12(a1) -; RV32I-NEXT: lw a1, 4(a1) -; RV32I-NEXT: sltu t1, a7, a6 -; RV32I-NEXT: mv t4, t1 -; RV32I-NEXT: beq t0, a2, .LBB11_2 +; RV32I-NEXT: lw t1, 12(a2) +; RV32I-NEXT: lw a1, 4(a2) +; RV32I-NEXT: sltu a2, a7, a6 +; RV32I-NEXT: mv t4, a2 +; RV32I-NEXT: beq t0, t1, .LBB11_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: sltu t4, t0, a2 +; RV32I-NEXT: slt t4, t1, t0 ; RV32I-NEXT: .LBB11_2: -; RV32I-NEXT: sltu t2, a4, a3 +; RV32I-NEXT: sltu t2, a5, a3 +; RV32I-NEXT: sltu t5, a1, a4 ; RV32I-NEXT: mv t3, t2 -; RV32I-NEXT: beq a1, a5, .LBB11_4 +; RV32I-NEXT: beq a4, a1, .LBB11_4 ; RV32I-NEXT: # %bb.3: -; RV32I-NEXT: sltu t3, a1, a5 +; RV32I-NEXT: mv t3, t5 ; RV32I-NEXT: .LBB11_4: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw s0, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: sw s1, 8(sp) # 4-byte Folded Spill -; RV32I-NEXT: srai t5, t0, 31 -; RV32I-NEXT: xor t6, t0, a2 -; RV32I-NEXT: xor s0, a7, a6 -; RV32I-NEXT: or s1, s0, t6 -; RV32I-NEXT: srai t6, a2, 31 -; RV32I-NEXT: mv s0, t3 -; RV32I-NEXT: beqz s1, .LBB11_6 +; RV32I-NEXT: xor t6, t0, t1 +; RV32I-NEXT: xor s0, a6, a7 +; RV32I-NEXT: or t6, s0, t6 +; RV32I-NEXT: beqz t6, .LBB11_6 ; RV32I-NEXT: # %bb.5: -; RV32I-NEXT: mv s0, t4 +; RV32I-NEXT: mv t3, t4 ; RV32I-NEXT: .LBB11_6: -; RV32I-NEXT: sub t4, t5, t6 -; RV32I-NEXT: sltu s0, t4, s0 -; RV32I-NEXT: sltu t5, t5, t6 -; RV32I-NEXT: sub t6, t4, t5 -; RV32I-NEXT: seqz s1, t6 -; RV32I-NEXT: and s0, s1, s0 -; RV32I-NEXT: sltu s0, t6, s0 -; RV32I-NEXT: sltu t4, t4, t5 -; RV32I-NEXT: sub t4, t6, t4 -; RV32I-NEXT: sub t4, t4, s0 -; RV32I-NEXT: sub a2, t0, a2 -; RV32I-NEXT: sub a2, a2, t1 +; RV32I-NEXT: mv t4, t2 +; RV32I-NEXT: beq a1, a4, .LBB11_8 +; RV32I-NEXT: # %bb.7: +; RV32I-NEXT: mv t4, t5 +; RV32I-NEXT: .LBB11_8: +; RV32I-NEXT: sltu t5, a3, a5 +; RV32I-NEXT: mv t6, t5 +; RV32I-NEXT: beq a4, a1, .LBB11_10 +; RV32I-NEXT: # %bb.9: +; RV32I-NEXT: sltu t6, a4, a1 +; RV32I-NEXT: .LBB11_10: +; RV32I-NEXT: bnez t3, .LBB11_12 +; RV32I-NEXT: # %bb.11: +; RV32I-NEXT: sub t0, t1, t0 ; RV32I-NEXT: sub a6, a7, a6 -; RV32I-NEXT: sltu a7, a6, t3 +; RV32I-NEXT: sub a2, t0, a2 +; RV32I-NEXT: sltu a7, a6, t4 ; RV32I-NEXT: sub a2, a2, a7 -; RV32I-NEXT: sub a6, a6, t3 -; RV32I-NEXT: sub a1, a1, a5 +; RV32I-NEXT: sub a3, a5, a3 +; RV32I-NEXT: sub a1, a1, a4 ; RV32I-NEXT: sub a1, a1, t2 -; RV32I-NEXT: sub a4, a4, a3 -; RV32I-NEXT: bgez t4, .LBB11_8 -; RV32I-NEXT: # %bb.7: -; RV32I-NEXT: snez a3, a1 -; RV32I-NEXT: snez a5, a4 -; RV32I-NEXT: or a3, a5, a3 -; RV32I-NEXT: neg a7, a6 -; RV32I-NEXT: sltu t0, a7, a3 -; RV32I-NEXT: snez a6, a6 -; RV32I-NEXT: add a2, a2, a6 -; RV32I-NEXT: neg a2, a2 -; RV32I-NEXT: sub a2, a2, t0 -; RV32I-NEXT: neg a4, a4 -; RV32I-NEXT: add a1, a1, a5 -; RV32I-NEXT: neg a1, a1 -; RV32I-NEXT: sub a6, a7, a3 -; RV32I-NEXT: .LBB11_8: -; RV32I-NEXT: sw a2, 12(a0) -; RV32I-NEXT: sw a6, 8(a0) +; RV32I-NEXT: sub a4, a6, t4 +; RV32I-NEXT: j .LBB11_13 +; RV32I-NEXT: .LBB11_12: +; RV32I-NEXT: sltu a2, a6, a7 +; RV32I-NEXT: sub t0, t0, t1 +; RV32I-NEXT: sub a2, t0, a2 +; RV32I-NEXT: sub a6, a6, a7 +; RV32I-NEXT: sltu a7, a6, t6 +; RV32I-NEXT: sub a2, a2, a7 +; RV32I-NEXT: sub a3, a3, a5 +; RV32I-NEXT: sub a4, a4, a1 +; RV32I-NEXT: sub a1, a4, t5 +; RV32I-NEXT: sub a4, a6, t6 +; RV32I-NEXT: .LBB11_13: +; RV32I-NEXT: sw a4, 8(a0) ; RV32I-NEXT: sw a1, 4(a0) -; RV32I-NEXT: sw a4, 0(a0) +; RV32I-NEXT: sw a3, 0(a0) +; RV32I-NEXT: sw a2, 12(a0) ; RV32I-NEXT: lw s0, 12(sp) # 4-byte Folded Reload -; RV32I-NEXT: lw s1, 8(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; ; RV64I-LABEL: abd_ext_i128: ; RV64I: # %bb.0: -; RV64I-NEXT: srai a5, a1, 63 -; RV64I-NEXT: sltu a4, a0, a2 -; RV64I-NEXT: srai a6, a3, 63 -; RV64I-NEXT: mv a7, a4 +; RV64I-NEXT: sltu a4, a2, a0 +; RV64I-NEXT: mv a5, a4 ; RV64I-NEXT: beq a1, a3, .LBB11_2 ; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: sltu a7, a1, a3 +; RV64I-NEXT: slt a5, a3, a1 ; RV64I-NEXT: .LBB11_2: -; RV64I-NEXT: sub t0, a5, a6 -; RV64I-NEXT: sltu a7, t0, a7 -; RV64I-NEXT: sltu a5, a5, a6 -; RV64I-NEXT: sub a5, t0, a5 -; RV64I-NEXT: sub a5, a5, a7 +; RV64I-NEXT: bnez a5, .LBB11_4 +; RV64I-NEXT: # %bb.3: +; RV64I-NEXT: sub a1, a3, a1 +; RV64I-NEXT: sub a1, a1, a4 +; RV64I-NEXT: sub a0, a2, a0 +; RV64I-NEXT: ret +; RV64I-NEXT: .LBB11_4: +; RV64I-NEXT: sltu a4, a0, a2 ; RV64I-NEXT: sub a1, a1, a3 ; RV64I-NEXT: sub a1, a1, a4 ; RV64I-NEXT: sub a0, a0, a2 -; RV64I-NEXT: bgez a5, .LBB11_4 -; RV64I-NEXT: # %bb.3: -; RV64I-NEXT: snez a2, a0 -; RV64I-NEXT: neg a0, a0 -; RV64I-NEXT: add a1, a1, a2 -; RV64I-NEXT: neg a1, a1 -; RV64I-NEXT: .LBB11_4: ; RV64I-NEXT: ret ; ; RV32ZBB-LABEL: abd_ext_i128: ; RV32ZBB: # %bb.0: -; RV32ZBB-NEXT: lw a3, 0(a2) -; RV32ZBB-NEXT: lw a4, 0(a1) -; RV32ZBB-NEXT: lw a5, 4(a2) -; RV32ZBB-NEXT: lw a6, 8(a2) -; RV32ZBB-NEXT: lw a7, 8(a1) -; RV32ZBB-NEXT: lw a2, 12(a2) +; RV32ZBB-NEXT: lw a3, 0(a1) +; RV32ZBB-NEXT: lw a5, 0(a2) +; RV32ZBB-NEXT: lw a4, 4(a1) +; RV32ZBB-NEXT: lw a6, 8(a1) +; RV32ZBB-NEXT: lw a7, 8(a2) ; RV32ZBB-NEXT: lw t0, 12(a1) -; RV32ZBB-NEXT: lw a1, 4(a1) -; RV32ZBB-NEXT: sltu t1, a7, a6 -; RV32ZBB-NEXT: mv t4, t1 -; RV32ZBB-NEXT: beq t0, a2, .LBB11_2 +; RV32ZBB-NEXT: lw t1, 12(a2) +; RV32ZBB-NEXT: lw a1, 4(a2) +; RV32ZBB-NEXT: sltu a2, a7, a6 +; RV32ZBB-NEXT: mv t4, a2 +; RV32ZBB-NEXT: beq t0, t1, .LBB11_2 ; RV32ZBB-NEXT: # %bb.1: -; RV32ZBB-NEXT: sltu t4, t0, a2 +; RV32ZBB-NEXT: slt t4, t1, t0 ; RV32ZBB-NEXT: .LBB11_2: -; RV32ZBB-NEXT: sltu t2, a4, a3 +; RV32ZBB-NEXT: sltu t2, a5, a3 +; RV32ZBB-NEXT: sltu t5, a1, a4 ; RV32ZBB-NEXT: mv t3, t2 -; RV32ZBB-NEXT: beq a1, a5, .LBB11_4 +; RV32ZBB-NEXT: beq a4, a1, .LBB11_4 ; RV32ZBB-NEXT: # %bb.3: -; RV32ZBB-NEXT: sltu t3, a1, a5 +; RV32ZBB-NEXT: mv t3, t5 ; RV32ZBB-NEXT: .LBB11_4: ; RV32ZBB-NEXT: addi sp, sp, -16 ; RV32ZBB-NEXT: sw s0, 12(sp) # 4-byte Folded Spill -; RV32ZBB-NEXT: sw s1, 8(sp) # 4-byte Folded Spill -; RV32ZBB-NEXT: srai t5, t0, 31 -; RV32ZBB-NEXT: xor t6, t0, a2 -; RV32ZBB-NEXT: xor s0, a7, a6 -; RV32ZBB-NEXT: or s1, s0, t6 -; RV32ZBB-NEXT: srai t6, a2, 31 -; RV32ZBB-NEXT: mv s0, t3 -; RV32ZBB-NEXT: beqz s1, .LBB11_6 +; RV32ZBB-NEXT: xor t6, t0, t1 +; RV32ZBB-NEXT: xor s0, a6, a7 +; RV32ZBB-NEXT: or t6, s0, t6 +; RV32ZBB-NEXT: beqz t6, .LBB11_6 ; RV32ZBB-NEXT: # %bb.5: -; RV32ZBB-NEXT: mv s0, t4 +; RV32ZBB-NEXT: mv t3, t4 ; RV32ZBB-NEXT: .LBB11_6: -; RV32ZBB-NEXT: sub t4, t5, t6 -; RV32ZBB-NEXT: sltu s0, t4, s0 -; RV32ZBB-NEXT: sltu t5, t5, t6 -; RV32ZBB-NEXT: sub t6, t4, t5 -; RV32ZBB-NEXT: seqz s1, t6 -; RV32ZBB-NEXT: and s0, s1, s0 -; RV32ZBB-NEXT: sltu s0, t6, s0 -; RV32ZBB-NEXT: sltu t4, t4, t5 -; RV32ZBB-NEXT: sub t4, t6, t4 -; RV32ZBB-NEXT: sub t4, t4, s0 -; RV32ZBB-NEXT: sub a2, t0, a2 -; RV32ZBB-NEXT: sub a2, a2, t1 +; RV32ZBB-NEXT: mv t4, t2 +; RV32ZBB-NEXT: beq a1, a4, .LBB11_8 +; RV32ZBB-NEXT: # %bb.7: +; RV32ZBB-NEXT: mv t4, t5 +; RV32ZBB-NEXT: .LBB11_8: +; RV32ZBB-NEXT: sltu t5, a3, a5 +; RV32ZBB-NEXT: mv t6, t5 +; RV32ZBB-NEXT: beq a4, a1, .LBB11_10 +; RV32ZBB-NEXT: # %bb.9: +; RV32ZBB-NEXT: sltu t6, a4, a1 +; RV32ZBB-NEXT: .LBB11_10: +; RV32ZBB-NEXT: bnez t3, .LBB11_12 +; RV32ZBB-NEXT: # %bb.11: +; RV32ZBB-NEXT: sub t0, t1, t0 ; RV32ZBB-NEXT: sub a6, a7, a6 -; RV32ZBB-NEXT: sltu a7, a6, t3 +; RV32ZBB-NEXT: sub a2, t0, a2 +; RV32ZBB-NEXT: sltu a7, a6, t4 ; RV32ZBB-NEXT: sub a2, a2, a7 -; RV32ZBB-NEXT: sub a6, a6, t3 -; RV32ZBB-NEXT: sub a1, a1, a5 +; RV32ZBB-NEXT: sub a3, a5, a3 +; RV32ZBB-NEXT: sub a1, a1, a4 ; RV32ZBB-NEXT: sub a1, a1, t2 -; RV32ZBB-NEXT: sub a4, a4, a3 -; RV32ZBB-NEXT: bgez t4, .LBB11_8 -; RV32ZBB-NEXT: # %bb.7: -; RV32ZBB-NEXT: snez a3, a1 -; RV32ZBB-NEXT: snez a5, a4 -; RV32ZBB-NEXT: or a3, a5, a3 -; RV32ZBB-NEXT: neg a7, a6 -; RV32ZBB-NEXT: sltu t0, a7, a3 -; RV32ZBB-NEXT: snez a6, a6 -; RV32ZBB-NEXT: add a2, a2, a6 -; RV32ZBB-NEXT: neg a2, a2 -; RV32ZBB-NEXT: sub a2, a2, t0 -; RV32ZBB-NEXT: neg a4, a4 -; RV32ZBB-NEXT: add a1, a1, a5 -; RV32ZBB-NEXT: neg a1, a1 -; RV32ZBB-NEXT: sub a6, a7, a3 -; RV32ZBB-NEXT: .LBB11_8: -; RV32ZBB-NEXT: sw a2, 12(a0) -; RV32ZBB-NEXT: sw a6, 8(a0) +; RV32ZBB-NEXT: sub a4, a6, t4 +; RV32ZBB-NEXT: j .LBB11_13 +; RV32ZBB-NEXT: .LBB11_12: +; RV32ZBB-NEXT: sltu a2, a6, a7 +; RV32ZBB-NEXT: sub t0, t0, t1 +; RV32ZBB-NEXT: sub a2, t0, a2 +; RV32ZBB-NEXT: sub a6, a6, a7 +; RV32ZBB-NEXT: sltu a7, a6, t6 +; RV32ZBB-NEXT: sub a2, a2, a7 +; RV32ZBB-NEXT: sub a3, a3, a5 +; RV32ZBB-NEXT: sub a4, a4, a1 +; RV32ZBB-NEXT: sub a1, a4, t5 +; RV32ZBB-NEXT: sub a4, a6, t6 +; RV32ZBB-NEXT: .LBB11_13: +; RV32ZBB-NEXT: sw a4, 8(a0) ; RV32ZBB-NEXT: sw a1, 4(a0) -; RV32ZBB-NEXT: sw a4, 0(a0) +; RV32ZBB-NEXT: sw a3, 0(a0) +; RV32ZBB-NEXT: sw a2, 12(a0) ; RV32ZBB-NEXT: lw s0, 12(sp) # 4-byte Folded Reload -; RV32ZBB-NEXT: lw s1, 8(sp) # 4-byte Folded Reload ; RV32ZBB-NEXT: addi sp, sp, 16 ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: abd_ext_i128: ; RV64ZBB: # %bb.0: -; RV64ZBB-NEXT: srai a5, a1, 63 -; RV64ZBB-NEXT: sltu a4, a0, a2 -; RV64ZBB-NEXT: srai a6, a3, 63 -; RV64ZBB-NEXT: mv a7, a4 +; RV64ZBB-NEXT: sltu a4, a2, a0 +; RV64ZBB-NEXT: mv a5, a4 ; RV64ZBB-NEXT: beq a1, a3, .LBB11_2 ; RV64ZBB-NEXT: # %bb.1: -; RV64ZBB-NEXT: sltu a7, a1, a3 +; RV64ZBB-NEXT: slt a5, a3, a1 ; RV64ZBB-NEXT: .LBB11_2: -; RV64ZBB-NEXT: sub t0, a5, a6 -; RV64ZBB-NEXT: sltu a7, t0, a7 -; RV64ZBB-NEXT: sltu a5, a5, a6 -; RV64ZBB-NEXT: sub a5, t0, a5 -; RV64ZBB-NEXT: sub a5, a5, a7 +; RV64ZBB-NEXT: bnez a5, .LBB11_4 +; RV64ZBB-NEXT: # %bb.3: +; RV64ZBB-NEXT: sub a1, a3, a1 +; RV64ZBB-NEXT: sub a1, a1, a4 +; RV64ZBB-NEXT: sub a0, a2, a0 +; RV64ZBB-NEXT: ret +; RV64ZBB-NEXT: .LBB11_4: +; RV64ZBB-NEXT: sltu a4, a0, a2 ; RV64ZBB-NEXT: sub a1, a1, a3 ; RV64ZBB-NEXT: sub a1, a1, a4 ; RV64ZBB-NEXT: sub a0, a0, a2 -; RV64ZBB-NEXT: bgez a5, .LBB11_4 -; RV64ZBB-NEXT: # %bb.3: -; RV64ZBB-NEXT: snez a2, a0 -; RV64ZBB-NEXT: neg a0, a0 -; RV64ZBB-NEXT: add a1, a1, a2 -; RV64ZBB-NEXT: neg a1, a1 -; RV64ZBB-NEXT: .LBB11_4: ; RV64ZBB-NEXT: ret %aext = sext i128 %a to i256 %bext = sext i128 %b to i256 @@ -844,214 +734,194 @@ define i128 @abd_ext_i128(i128 %a, i128 %b) nounwind { define i128 @abd_ext_i128_undef(i128 %a, i128 %b) nounwind { ; RV32I-LABEL: abd_ext_i128_undef: ; RV32I: # %bb.0: -; RV32I-NEXT: lw a3, 0(a2) -; RV32I-NEXT: lw a4, 0(a1) -; RV32I-NEXT: lw a5, 4(a2) -; RV32I-NEXT: lw a6, 8(a2) -; RV32I-NEXT: lw a7, 8(a1) -; RV32I-NEXT: lw a2, 12(a2) +; RV32I-NEXT: lw a3, 0(a1) +; RV32I-NEXT: lw a5, 0(a2) +; RV32I-NEXT: lw a4, 4(a1) +; RV32I-NEXT: lw a6, 8(a1) +; RV32I-NEXT: lw a7, 8(a2) ; RV32I-NEXT: lw t0, 12(a1) -; RV32I-NEXT: lw a1, 4(a1) -; RV32I-NEXT: sltu t1, a7, a6 -; RV32I-NEXT: mv t4, t1 -; RV32I-NEXT: beq t0, a2, .LBB12_2 +; RV32I-NEXT: lw t1, 12(a2) +; RV32I-NEXT: lw a1, 4(a2) +; RV32I-NEXT: sltu a2, a7, a6 +; RV32I-NEXT: mv t4, a2 +; RV32I-NEXT: beq t0, t1, .LBB12_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: sltu t4, t0, a2 +; RV32I-NEXT: slt t4, t1, t0 ; RV32I-NEXT: .LBB12_2: -; RV32I-NEXT: sltu t2, a4, a3 +; RV32I-NEXT: sltu t2, a5, a3 +; RV32I-NEXT: sltu t5, a1, a4 ; RV32I-NEXT: mv t3, t2 -; RV32I-NEXT: beq a1, a5, .LBB12_4 +; RV32I-NEXT: beq a4, a1, .LBB12_4 ; RV32I-NEXT: # %bb.3: -; RV32I-NEXT: sltu t3, a1, a5 +; RV32I-NEXT: mv t3, t5 ; RV32I-NEXT: .LBB12_4: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw s0, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: sw s1, 8(sp) # 4-byte Folded Spill -; RV32I-NEXT: srai t5, t0, 31 -; RV32I-NEXT: xor t6, t0, a2 -; RV32I-NEXT: xor s0, a7, a6 -; RV32I-NEXT: or s1, s0, t6 -; RV32I-NEXT: srai t6, a2, 31 -; RV32I-NEXT: mv s0, t3 -; RV32I-NEXT: beqz s1, .LBB12_6 +; RV32I-NEXT: xor t6, t0, t1 +; RV32I-NEXT: xor s0, a6, a7 +; RV32I-NEXT: or t6, s0, t6 +; RV32I-NEXT: beqz t6, .LBB12_6 ; RV32I-NEXT: # %bb.5: -; RV32I-NEXT: mv s0, t4 +; RV32I-NEXT: mv t3, t4 ; RV32I-NEXT: .LBB12_6: -; RV32I-NEXT: sub t4, t5, t6 -; RV32I-NEXT: sltu s0, t4, s0 -; RV32I-NEXT: sltu t5, t5, t6 -; RV32I-NEXT: sub t6, t4, t5 -; RV32I-NEXT: seqz s1, t6 -; RV32I-NEXT: and s0, s1, s0 -; RV32I-NEXT: sltu s0, t6, s0 -; RV32I-NEXT: sltu t4, t4, t5 -; RV32I-NEXT: sub t4, t6, t4 -; RV32I-NEXT: sub t4, t4, s0 -; RV32I-NEXT: sub a2, t0, a2 -; RV32I-NEXT: sub a2, a2, t1 +; RV32I-NEXT: mv t4, t2 +; RV32I-NEXT: beq a1, a4, .LBB12_8 +; RV32I-NEXT: # %bb.7: +; RV32I-NEXT: mv t4, t5 +; RV32I-NEXT: .LBB12_8: +; RV32I-NEXT: sltu t5, a3, a5 +; RV32I-NEXT: mv t6, t5 +; RV32I-NEXT: beq a4, a1, .LBB12_10 +; RV32I-NEXT: # %bb.9: +; RV32I-NEXT: sltu t6, a4, a1 +; RV32I-NEXT: .LBB12_10: +; RV32I-NEXT: bnez t3, .LBB12_12 +; RV32I-NEXT: # %bb.11: +; RV32I-NEXT: sub t0, t1, t0 ; RV32I-NEXT: sub a6, a7, a6 -; RV32I-NEXT: sltu a7, a6, t3 +; RV32I-NEXT: sub a2, t0, a2 +; RV32I-NEXT: sltu a7, a6, t4 ; RV32I-NEXT: sub a2, a2, a7 -; RV32I-NEXT: sub a6, a6, t3 -; RV32I-NEXT: sub a1, a1, a5 +; RV32I-NEXT: sub a3, a5, a3 +; RV32I-NEXT: sub a1, a1, a4 ; RV32I-NEXT: sub a1, a1, t2 -; RV32I-NEXT: sub a4, a4, a3 -; RV32I-NEXT: bgez t4, .LBB12_8 -; RV32I-NEXT: # %bb.7: -; RV32I-NEXT: snez a3, a1 -; RV32I-NEXT: snez a5, a4 -; RV32I-NEXT: or a3, a5, a3 -; RV32I-NEXT: neg a7, a6 -; RV32I-NEXT: sltu t0, a7, a3 -; RV32I-NEXT: snez a6, a6 -; RV32I-NEXT: add a2, a2, a6 -; RV32I-NEXT: neg a2, a2 -; RV32I-NEXT: sub a2, a2, t0 -; RV32I-NEXT: neg a4, a4 -; RV32I-NEXT: add a1, a1, a5 -; RV32I-NEXT: neg a1, a1 -; RV32I-NEXT: sub a6, a7, a3 -; RV32I-NEXT: .LBB12_8: -; RV32I-NEXT: sw a2, 12(a0) -; RV32I-NEXT: sw a6, 8(a0) +; RV32I-NEXT: sub a4, a6, t4 +; RV32I-NEXT: j .LBB12_13 +; RV32I-NEXT: .LBB12_12: +; RV32I-NEXT: sltu a2, a6, a7 +; RV32I-NEXT: sub t0, t0, t1 +; RV32I-NEXT: sub a2, t0, a2 +; RV32I-NEXT: sub a6, a6, a7 +; RV32I-NEXT: sltu a7, a6, t6 +; RV32I-NEXT: sub a2, a2, a7 +; RV32I-NEXT: sub a3, a3, a5 +; RV32I-NEXT: sub a4, a4, a1 +; RV32I-NEXT: sub a1, a4, t5 +; RV32I-NEXT: sub a4, a6, t6 +; RV32I-NEXT: .LBB12_13: +; RV32I-NEXT: sw a4, 8(a0) ; RV32I-NEXT: sw a1, 4(a0) -; RV32I-NEXT: sw a4, 0(a0) +; RV32I-NEXT: sw a3, 0(a0) +; RV32I-NEXT: sw a2, 12(a0) ; RV32I-NEXT: lw s0, 12(sp) # 4-byte Folded Reload -; RV32I-NEXT: lw s1, 8(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; ; RV64I-LABEL: abd_ext_i128_undef: ; RV64I: # %bb.0: -; RV64I-NEXT: srai a5, a1, 63 -; RV64I-NEXT: sltu a4, a0, a2 -; RV64I-NEXT: srai a6, a3, 63 -; RV64I-NEXT: mv a7, a4 +; RV64I-NEXT: sltu a4, a2, a0 +; RV64I-NEXT: mv a5, a4 ; RV64I-NEXT: beq a1, a3, .LBB12_2 ; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: sltu a7, a1, a3 +; RV64I-NEXT: slt a5, a3, a1 ; RV64I-NEXT: .LBB12_2: -; RV64I-NEXT: sub t0, a5, a6 -; RV64I-NEXT: sltu a7, t0, a7 -; RV64I-NEXT: sltu a5, a5, a6 -; RV64I-NEXT: sub a5, t0, a5 -; RV64I-NEXT: sub a5, a5, a7 +; RV64I-NEXT: bnez a5, .LBB12_4 +; RV64I-NEXT: # %bb.3: +; RV64I-NEXT: sub a1, a3, a1 +; RV64I-NEXT: sub a1, a1, a4 +; RV64I-NEXT: sub a0, a2, a0 +; RV64I-NEXT: ret +; RV64I-NEXT: .LBB12_4: +; RV64I-NEXT: sltu a4, a0, a2 ; RV64I-NEXT: sub a1, a1, a3 ; RV64I-NEXT: sub a1, a1, a4 ; RV64I-NEXT: sub a0, a0, a2 -; RV64I-NEXT: bgez a5, .LBB12_4 -; RV64I-NEXT: # %bb.3: -; RV64I-NEXT: snez a2, a0 -; RV64I-NEXT: neg a0, a0 -; RV64I-NEXT: add a1, a1, a2 -; RV64I-NEXT: neg a1, a1 -; RV64I-NEXT: .LBB12_4: ; RV64I-NEXT: ret ; ; RV32ZBB-LABEL: abd_ext_i128_undef: ; RV32ZBB: # %bb.0: -; RV32ZBB-NEXT: lw a3, 0(a2) -; RV32ZBB-NEXT: lw a4, 0(a1) -; RV32ZBB-NEXT: lw a5, 4(a2) -; RV32ZBB-NEXT: lw a6, 8(a2) -; RV32ZBB-NEXT: lw a7, 8(a1) -; RV32ZBB-NEXT: lw a2, 12(a2) +; RV32ZBB-NEXT: lw a3, 0(a1) +; RV32ZBB-NEXT: lw a5, 0(a2) +; RV32ZBB-NEXT: lw a4, 4(a1) +; RV32ZBB-NEXT: lw a6, 8(a1) +; RV32ZBB-NEXT: lw a7, 8(a2) ; RV32ZBB-NEXT: lw t0, 12(a1) -; RV32ZBB-NEXT: lw a1, 4(a1) -; RV32ZBB-NEXT: sltu t1, a7, a6 -; RV32ZBB-NEXT: mv t4, t1 -; RV32ZBB-NEXT: beq t0, a2, .LBB12_2 +; RV32ZBB-NEXT: lw t1, 12(a2) +; RV32ZBB-NEXT: lw a1, 4(a2) +; RV32ZBB-NEXT: sltu a2, a7, a6 +; RV32ZBB-NEXT: mv t4, a2 +; RV32ZBB-NEXT: beq t0, t1, .LBB12_2 ; RV32ZBB-NEXT: # %bb.1: -; RV32ZBB-NEXT: sltu t4, t0, a2 +; RV32ZBB-NEXT: slt t4, t1, t0 ; RV32ZBB-NEXT: .LBB12_2: -; RV32ZBB-NEXT: sltu t2, a4, a3 +; RV32ZBB-NEXT: sltu t2, a5, a3 +; RV32ZBB-NEXT: sltu t5, a1, a4 ; RV32ZBB-NEXT: mv t3, t2 -; RV32ZBB-NEXT: beq a1, a5, .LBB12_4 +; RV32ZBB-NEXT: beq a4, a1, .LBB12_4 ; RV32ZBB-NEXT: # %bb.3: -; RV32ZBB-NEXT: sltu t3, a1, a5 +; RV32ZBB-NEXT: mv t3, t5 ; RV32ZBB-NEXT: .LBB12_4: ; RV32ZBB-NEXT: addi sp, sp, -16 ; RV32ZBB-NEXT: sw s0, 12(sp) # 4-byte Folded Spill -; RV32ZBB-NEXT: sw s1, 8(sp) # 4-byte Folded Spill -; RV32ZBB-NEXT: srai t5, t0, 31 -; RV32ZBB-NEXT: xor t6, t0, a2 -; RV32ZBB-NEXT: xor s0, a7, a6 -; RV32ZBB-NEXT: or s1, s0, t6 -; RV32ZBB-NEXT: srai t6, a2, 31 -; RV32ZBB-NEXT: mv s0, t3 -; RV32ZBB-NEXT: beqz s1, .LBB12_6 +; RV32ZBB-NEXT: xor t6, t0, t1 +; RV32ZBB-NEXT: xor s0, a6, a7 +; RV32ZBB-NEXT: or t6, s0, t6 +; RV32ZBB-NEXT: beqz t6, .LBB12_6 ; RV32ZBB-NEXT: # %bb.5: -; RV32ZBB-NEXT: mv s0, t4 +; RV32ZBB-NEXT: mv t3, t4 ; RV32ZBB-NEXT: .LBB12_6: -; RV32ZBB-NEXT: sub t4, t5, t6 -; RV32ZBB-NEXT: sltu s0, t4, s0 -; RV32ZBB-NEXT: sltu t5, t5, t6 -; RV32ZBB-NEXT: sub t6, t4, t5 -; RV32ZBB-NEXT: seqz s1, t6 -; RV32ZBB-NEXT: and s0, s1, s0 -; RV32ZBB-NEXT: sltu s0, t6, s0 -; RV32ZBB-NEXT: sltu t4, t4, t5 -; RV32ZBB-NEXT: sub t4, t6, t4 -; RV32ZBB-NEXT: sub t4, t4, s0 -; RV32ZBB-NEXT: sub a2, t0, a2 -; RV32ZBB-NEXT: sub a2, a2, t1 +; RV32ZBB-NEXT: mv t4, t2 +; RV32ZBB-NEXT: beq a1, a4, .LBB12_8 +; RV32ZBB-NEXT: # %bb.7: +; RV32ZBB-NEXT: mv t4, t5 +; RV32ZBB-NEXT: .LBB12_8: +; RV32ZBB-NEXT: sltu t5, a3, a5 +; RV32ZBB-NEXT: mv t6, t5 +; RV32ZBB-NEXT: beq a4, a1, .LBB12_10 +; RV32ZBB-NEXT: # %bb.9: +; RV32ZBB-NEXT: sltu t6, a4, a1 +; RV32ZBB-NEXT: .LBB12_10: +; RV32ZBB-NEXT: bnez t3, .LBB12_12 +; RV32ZBB-NEXT: # %bb.11: +; RV32ZBB-NEXT: sub t0, t1, t0 ; RV32ZBB-NEXT: sub a6, a7, a6 -; RV32ZBB-NEXT: sltu a7, a6, t3 +; RV32ZBB-NEXT: sub a2, t0, a2 +; RV32ZBB-NEXT: sltu a7, a6, t4 ; RV32ZBB-NEXT: sub a2, a2, a7 -; RV32ZBB-NEXT: sub a6, a6, t3 -; RV32ZBB-NEXT: sub a1, a1, a5 +; RV32ZBB-NEXT: sub a3, a5, a3 +; RV32ZBB-NEXT: sub a1, a1, a4 ; RV32ZBB-NEXT: sub a1, a1, t2 -; RV32ZBB-NEXT: sub a4, a4, a3 -; RV32ZBB-NEXT: bgez t4, .LBB12_8 -; RV32ZBB-NEXT: # %bb.7: -; RV32ZBB-NEXT: snez a3, a1 -; RV32ZBB-NEXT: snez a5, a4 -; RV32ZBB-NEXT: or a3, a5, a3 -; RV32ZBB-NEXT: neg a7, a6 -; RV32ZBB-NEXT: sltu t0, a7, a3 -; RV32ZBB-NEXT: snez a6, a6 -; RV32ZBB-NEXT: add a2, a2, a6 -; RV32ZBB-NEXT: neg a2, a2 -; RV32ZBB-NEXT: sub a2, a2, t0 -; RV32ZBB-NEXT: neg a4, a4 -; RV32ZBB-NEXT: add a1, a1, a5 -; RV32ZBB-NEXT: neg a1, a1 -; RV32ZBB-NEXT: sub a6, a7, a3 -; RV32ZBB-NEXT: .LBB12_8: -; RV32ZBB-NEXT: sw a2, 12(a0) -; RV32ZBB-NEXT: sw a6, 8(a0) +; RV32ZBB-NEXT: sub a4, a6, t4 +; RV32ZBB-NEXT: j .LBB12_13 +; RV32ZBB-NEXT: .LBB12_12: +; RV32ZBB-NEXT: sltu a2, a6, a7 +; RV32ZBB-NEXT: sub t0, t0, t1 +; RV32ZBB-NEXT: sub a2, t0, a2 +; RV32ZBB-NEXT: sub a6, a6, a7 +; RV32ZBB-NEXT: sltu a7, a6, t6 +; RV32ZBB-NEXT: sub a2, a2, a7 +; RV32ZBB-NEXT: sub a3, a3, a5 +; RV32ZBB-NEXT: sub a4, a4, a1 +; RV32ZBB-NEXT: sub a1, a4, t5 +; RV32ZBB-NEXT: sub a4, a6, t6 +; RV32ZBB-NEXT: .LBB12_13: +; RV32ZBB-NEXT: sw a4, 8(a0) ; RV32ZBB-NEXT: sw a1, 4(a0) -; RV32ZBB-NEXT: sw a4, 0(a0) +; RV32ZBB-NEXT: sw a3, 0(a0) +; RV32ZBB-NEXT: sw a2, 12(a0) ; RV32ZBB-NEXT: lw s0, 12(sp) # 4-byte Folded Reload -; RV32ZBB-NEXT: lw s1, 8(sp) # 4-byte Folded Reload ; RV32ZBB-NEXT: addi sp, sp, 16 ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: abd_ext_i128_undef: ; RV64ZBB: # %bb.0: -; RV64ZBB-NEXT: srai a5, a1, 63 -; RV64ZBB-NEXT: sltu a4, a0, a2 -; RV64ZBB-NEXT: srai a6, a3, 63 -; RV64ZBB-NEXT: mv a7, a4 +; RV64ZBB-NEXT: sltu a4, a2, a0 +; RV64ZBB-NEXT: mv a5, a4 ; RV64ZBB-NEXT: beq a1, a3, .LBB12_2 ; RV64ZBB-NEXT: # %bb.1: -; RV64ZBB-NEXT: sltu a7, a1, a3 +; RV64ZBB-NEXT: slt a5, a3, a1 ; RV64ZBB-NEXT: .LBB12_2: -; RV64ZBB-NEXT: sub t0, a5, a6 -; RV64ZBB-NEXT: sltu a7, t0, a7 -; RV64ZBB-NEXT: sltu a5, a5, a6 -; RV64ZBB-NEXT: sub a5, t0, a5 -; RV64ZBB-NEXT: sub a5, a5, a7 +; RV64ZBB-NEXT: bnez a5, .LBB12_4 +; RV64ZBB-NEXT: # %bb.3: +; RV64ZBB-NEXT: sub a1, a3, a1 +; RV64ZBB-NEXT: sub a1, a1, a4 +; RV64ZBB-NEXT: sub a0, a2, a0 +; RV64ZBB-NEXT: ret +; RV64ZBB-NEXT: .LBB12_4: +; RV64ZBB-NEXT: sltu a4, a0, a2 ; RV64ZBB-NEXT: sub a1, a1, a3 ; RV64ZBB-NEXT: sub a1, a1, a4 ; RV64ZBB-NEXT: sub a0, a0, a2 -; RV64ZBB-NEXT: bgez a5, .LBB12_4 -; RV64ZBB-NEXT: # %bb.3: -; RV64ZBB-NEXT: snez a2, a0 -; RV64ZBB-NEXT: neg a0, a0 -; RV64ZBB-NEXT: add a1, a1, a2 -; RV64ZBB-NEXT: neg a1, a1 -; RV64ZBB-NEXT: .LBB12_4: ; RV64ZBB-NEXT: ret %aext = sext i128 %a to i256 %bext = sext i128 %b to i256 @@ -1072,18 +942,10 @@ define i8 @abd_minmax_i8(i8 %a, i8 %b) nounwind { ; RV32I-NEXT: srai a1, a1, 24 ; RV32I-NEXT: slli a0, a0, 24 ; RV32I-NEXT: srai a0, a0, 24 -; RV32I-NEXT: mv a2, a0 -; RV32I-NEXT: bge a0, a1, .LBB13_3 -; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: bge a1, a0, .LBB13_4 -; RV32I-NEXT: .LBB13_2: -; RV32I-NEXT: sub a0, a0, a2 -; RV32I-NEXT: ret -; RV32I-NEXT: .LBB13_3: -; RV32I-NEXT: mv a2, a1 -; RV32I-NEXT: blt a1, a0, .LBB13_2 -; RV32I-NEXT: .LBB13_4: -; RV32I-NEXT: sub a0, a1, a2 +; RV32I-NEXT: sub a0, a0, a1 +; RV32I-NEXT: srai a1, a0, 31 +; RV32I-NEXT: xor a0, a0, a1 +; RV32I-NEXT: sub a0, a0, a1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: abd_minmax_i8: @@ -1092,18 +954,10 @@ define i8 @abd_minmax_i8(i8 %a, i8 %b) nounwind { ; RV64I-NEXT: srai a1, a1, 56 ; RV64I-NEXT: slli a0, a0, 56 ; RV64I-NEXT: srai a0, a0, 56 -; RV64I-NEXT: mv a2, a0 -; RV64I-NEXT: bge a0, a1, .LBB13_3 -; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: bge a1, a0, .LBB13_4 -; RV64I-NEXT: .LBB13_2: -; RV64I-NEXT: sub a0, a0, a2 -; RV64I-NEXT: ret -; RV64I-NEXT: .LBB13_3: -; RV64I-NEXT: mv a2, a1 -; RV64I-NEXT: blt a1, a0, .LBB13_2 -; RV64I-NEXT: .LBB13_4: -; RV64I-NEXT: sub a0, a1, a2 +; RV64I-NEXT: sub a0, a0, a1 +; RV64I-NEXT: srai a1, a0, 63 +; RV64I-NEXT: xor a0, a0, a1 +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: ret ; ; ZBB-LABEL: abd_minmax_i8: @@ -1127,18 +981,10 @@ define i16 @abd_minmax_i16(i16 %a, i16 %b) nounwind { ; RV32I-NEXT: srai a1, a1, 16 ; RV32I-NEXT: slli a0, a0, 16 ; RV32I-NEXT: srai a0, a0, 16 -; RV32I-NEXT: mv a2, a0 -; RV32I-NEXT: bge a0, a1, .LBB14_3 -; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: bge a1, a0, .LBB14_4 -; RV32I-NEXT: .LBB14_2: -; RV32I-NEXT: sub a0, a0, a2 -; RV32I-NEXT: ret -; RV32I-NEXT: .LBB14_3: -; RV32I-NEXT: mv a2, a1 -; RV32I-NEXT: blt a1, a0, .LBB14_2 -; RV32I-NEXT: .LBB14_4: -; RV32I-NEXT: sub a0, a1, a2 +; RV32I-NEXT: sub a0, a0, a1 +; RV32I-NEXT: srai a1, a0, 31 +; RV32I-NEXT: xor a0, a0, a1 +; RV32I-NEXT: sub a0, a0, a1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: abd_minmax_i16: @@ -1147,18 +993,10 @@ define i16 @abd_minmax_i16(i16 %a, i16 %b) nounwind { ; RV64I-NEXT: srai a1, a1, 48 ; RV64I-NEXT: slli a0, a0, 48 ; RV64I-NEXT: srai a0, a0, 48 -; RV64I-NEXT: mv a2, a0 -; RV64I-NEXT: bge a0, a1, .LBB14_3 -; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: bge a1, a0, .LBB14_4 -; RV64I-NEXT: .LBB14_2: -; RV64I-NEXT: sub a0, a0, a2 -; RV64I-NEXT: ret -; RV64I-NEXT: .LBB14_3: -; RV64I-NEXT: mv a2, a1 -; RV64I-NEXT: blt a1, a0, .LBB14_2 -; RV64I-NEXT: .LBB14_4: -; RV64I-NEXT: sub a0, a1, a2 +; RV64I-NEXT: sub a0, a0, a1 +; RV64I-NEXT: srai a1, a0, 63 +; RV64I-NEXT: xor a0, a0, a1 +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: ret ; ; ZBB-LABEL: abd_minmax_i16: @@ -1178,36 +1016,22 @@ define i16 @abd_minmax_i16(i16 %a, i16 %b) nounwind { define i32 @abd_minmax_i32(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: abd_minmax_i32: ; RV32I: # %bb.0: -; RV32I-NEXT: mv a2, a0 -; RV32I-NEXT: bge a0, a1, .LBB15_3 +; RV32I-NEXT: blt a1, a0, .LBB15_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: bge a1, a0, .LBB15_4 -; RV32I-NEXT: .LBB15_2: -; RV32I-NEXT: sub a0, a0, a2 +; RV32I-NEXT: sub a0, a1, a0 ; RV32I-NEXT: ret -; RV32I-NEXT: .LBB15_3: -; RV32I-NEXT: mv a2, a1 -; RV32I-NEXT: blt a1, a0, .LBB15_2 -; RV32I-NEXT: .LBB15_4: -; RV32I-NEXT: sub a0, a1, a2 +; RV32I-NEXT: .LBB15_2: +; RV32I-NEXT: sub a0, a0, a1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: abd_minmax_i32: ; RV64I: # %bb.0: ; RV64I-NEXT: sext.w a1, a1 ; RV64I-NEXT: sext.w a0, a0 -; RV64I-NEXT: mv a2, a0 -; RV64I-NEXT: bge a0, a1, .LBB15_3 -; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: bge a1, a0, .LBB15_4 -; RV64I-NEXT: .LBB15_2: -; RV64I-NEXT: subw a0, a0, a2 -; RV64I-NEXT: ret -; RV64I-NEXT: .LBB15_3: -; RV64I-NEXT: mv a2, a1 -; RV64I-NEXT: blt a1, a0, .LBB15_2 -; RV64I-NEXT: .LBB15_4: -; RV64I-NEXT: subw a0, a1, a2 +; RV64I-NEXT: sub a0, a0, a1 +; RV64I-NEXT: srai a1, a0, 63 +; RV64I-NEXT: xor a0, a0, a1 +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: ret ; ; RV32ZBB-LABEL: abd_minmax_i32: @@ -1223,7 +1047,7 @@ define i32 @abd_minmax_i32(i32 %a, i32 %b) nounwind { ; RV64ZBB-NEXT: sext.w a0, a0 ; RV64ZBB-NEXT: min a2, a0, a1 ; RV64ZBB-NEXT: max a0, a0, a1 -; RV64ZBB-NEXT: subw a0, a0, a2 +; RV64ZBB-NEXT: sub a0, a0, a2 ; RV64ZBB-NEXT: ret %min = call i32 @llvm.smin.i32(i32 %a, i32 %b) %max = call i32 @llvm.smax.i32(i32 %a, i32 %b) @@ -1234,86 +1058,54 @@ define i32 @abd_minmax_i32(i32 %a, i32 %b) nounwind { define i64 @abd_minmax_i64(i64 %a, i64 %b) nounwind { ; RV32I-LABEL: abd_minmax_i64: ; RV32I: # %bb.0: +; RV32I-NEXT: sltu a4, a2, a0 +; RV32I-NEXT: mv a5, a4 ; RV32I-NEXT: beq a1, a3, .LBB16_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: slt a6, a1, a3 -; RV32I-NEXT: j .LBB16_3 +; RV32I-NEXT: slt a5, a3, a1 ; RV32I-NEXT: .LBB16_2: -; RV32I-NEXT: sltu a6, a0, a2 -; RV32I-NEXT: .LBB16_3: -; RV32I-NEXT: mv a4, a1 -; RV32I-NEXT: mv a5, a0 -; RV32I-NEXT: bnez a6, .LBB16_5 -; RV32I-NEXT: # %bb.4: -; RV32I-NEXT: mv a4, a3 -; RV32I-NEXT: mv a5, a2 -; RV32I-NEXT: .LBB16_5: -; RV32I-NEXT: beq a1, a3, .LBB16_7 -; RV32I-NEXT: # %bb.6: -; RV32I-NEXT: slt a6, a3, a1 -; RV32I-NEXT: beqz a6, .LBB16_8 -; RV32I-NEXT: j .LBB16_9 -; RV32I-NEXT: .LBB16_7: -; RV32I-NEXT: sltu a6, a2, a0 -; RV32I-NEXT: bnez a6, .LBB16_9 -; RV32I-NEXT: .LBB16_8: -; RV32I-NEXT: mv a1, a3 -; RV32I-NEXT: mv a0, a2 -; RV32I-NEXT: .LBB16_9: -; RV32I-NEXT: sltu a2, a0, a5 +; RV32I-NEXT: bnez a5, .LBB16_4 +; RV32I-NEXT: # %bb.3: +; RV32I-NEXT: sub a1, a3, a1 +; RV32I-NEXT: sub a1, a1, a4 +; RV32I-NEXT: sub a0, a2, a0 +; RV32I-NEXT: ret +; RV32I-NEXT: .LBB16_4: +; RV32I-NEXT: sltu a4, a0, a2 +; RV32I-NEXT: sub a1, a1, a3 ; RV32I-NEXT: sub a1, a1, a4 -; RV32I-NEXT: sub a1, a1, a2 -; RV32I-NEXT: sub a0, a0, a5 +; RV32I-NEXT: sub a0, a0, a2 ; RV32I-NEXT: ret ; ; RV64I-LABEL: abd_minmax_i64: ; RV64I: # %bb.0: -; RV64I-NEXT: mv a2, a0 -; RV64I-NEXT: bge a0, a1, .LBB16_3 +; RV64I-NEXT: blt a1, a0, .LBB16_2 ; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: bge a1, a0, .LBB16_4 -; RV64I-NEXT: .LBB16_2: -; RV64I-NEXT: sub a0, a0, a2 +; RV64I-NEXT: sub a0, a1, a0 ; RV64I-NEXT: ret -; RV64I-NEXT: .LBB16_3: -; RV64I-NEXT: mv a2, a1 -; RV64I-NEXT: blt a1, a0, .LBB16_2 -; RV64I-NEXT: .LBB16_4: -; RV64I-NEXT: sub a0, a1, a2 +; RV64I-NEXT: .LBB16_2: +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: ret ; ; RV32ZBB-LABEL: abd_minmax_i64: ; RV32ZBB: # %bb.0: +; RV32ZBB-NEXT: sltu a4, a2, a0 +; RV32ZBB-NEXT: mv a5, a4 ; RV32ZBB-NEXT: beq a1, a3, .LBB16_2 ; RV32ZBB-NEXT: # %bb.1: -; RV32ZBB-NEXT: slt a6, a1, a3 -; RV32ZBB-NEXT: j .LBB16_3 +; RV32ZBB-NEXT: slt a5, a3, a1 ; RV32ZBB-NEXT: .LBB16_2: -; RV32ZBB-NEXT: sltu a6, a0, a2 -; RV32ZBB-NEXT: .LBB16_3: -; RV32ZBB-NEXT: mv a4, a1 -; RV32ZBB-NEXT: mv a5, a0 -; RV32ZBB-NEXT: bnez a6, .LBB16_5 -; RV32ZBB-NEXT: # %bb.4: -; RV32ZBB-NEXT: mv a4, a3 -; RV32ZBB-NEXT: mv a5, a2 -; RV32ZBB-NEXT: .LBB16_5: -; RV32ZBB-NEXT: beq a1, a3, .LBB16_7 -; RV32ZBB-NEXT: # %bb.6: -; RV32ZBB-NEXT: slt a6, a3, a1 -; RV32ZBB-NEXT: beqz a6, .LBB16_8 -; RV32ZBB-NEXT: j .LBB16_9 -; RV32ZBB-NEXT: .LBB16_7: -; RV32ZBB-NEXT: sltu a6, a2, a0 -; RV32ZBB-NEXT: bnez a6, .LBB16_9 -; RV32ZBB-NEXT: .LBB16_8: -; RV32ZBB-NEXT: mv a1, a3 -; RV32ZBB-NEXT: mv a0, a2 -; RV32ZBB-NEXT: .LBB16_9: -; RV32ZBB-NEXT: sltu a2, a0, a5 +; RV32ZBB-NEXT: bnez a5, .LBB16_4 +; RV32ZBB-NEXT: # %bb.3: +; RV32ZBB-NEXT: sub a1, a3, a1 ; RV32ZBB-NEXT: sub a1, a1, a4 -; RV32ZBB-NEXT: sub a1, a1, a2 -; RV32ZBB-NEXT: sub a0, a0, a5 +; RV32ZBB-NEXT: sub a0, a2, a0 +; RV32ZBB-NEXT: ret +; RV32ZBB-NEXT: .LBB16_4: +; RV32ZBB-NEXT: sltu a4, a0, a2 +; RV32ZBB-NEXT: sub a1, a1, a3 +; RV32ZBB-NEXT: sub a1, a1, a4 +; RV32ZBB-NEXT: sub a0, a0, a2 ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: abd_minmax_i64: @@ -1331,256 +1123,194 @@ define i64 @abd_minmax_i64(i64 %a, i64 %b) nounwind { define i128 @abd_minmax_i128(i128 %a, i128 %b) nounwind { ; RV32I-LABEL: abd_minmax_i128: ; RV32I: # %bb.0: -; RV32I-NEXT: lw a6, 4(a2) -; RV32I-NEXT: lw a3, 4(a1) +; RV32I-NEXT: lw a3, 0(a1) +; RV32I-NEXT: lw a5, 0(a2) +; RV32I-NEXT: lw a4, 4(a1) +; RV32I-NEXT: lw a6, 8(a1) ; RV32I-NEXT: lw a7, 8(a2) -; RV32I-NEXT: lw t0, 12(a2) -; RV32I-NEXT: lw a5, 12(a1) -; RV32I-NEXT: lw a4, 8(a1) -; RV32I-NEXT: beq a5, t0, .LBB17_2 +; RV32I-NEXT: lw t0, 12(a1) +; RV32I-NEXT: lw t1, 12(a2) +; RV32I-NEXT: lw a1, 4(a2) +; RV32I-NEXT: sltu a2, a7, a6 +; RV32I-NEXT: mv t4, a2 +; RV32I-NEXT: beq t0, t1, .LBB17_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: slt t1, a5, t0 -; RV32I-NEXT: j .LBB17_3 +; RV32I-NEXT: slt t4, t1, t0 ; RV32I-NEXT: .LBB17_2: -; RV32I-NEXT: sltu t1, a4, a7 -; RV32I-NEXT: .LBB17_3: -; RV32I-NEXT: lw t2, 0(a2) -; RV32I-NEXT: lw a1, 0(a1) -; RV32I-NEXT: beq a3, a6, .LBB17_5 -; RV32I-NEXT: # %bb.4: -; RV32I-NEXT: sltu t6, a3, a6 -; RV32I-NEXT: j .LBB17_6 -; RV32I-NEXT: .LBB17_5: -; RV32I-NEXT: sltu t6, a1, t2 +; RV32I-NEXT: sltu t2, a5, a3 +; RV32I-NEXT: sltu t5, a1, a4 +; RV32I-NEXT: mv t3, t2 +; RV32I-NEXT: beq a4, a1, .LBB17_4 +; RV32I-NEXT: # %bb.3: +; RV32I-NEXT: mv t3, t5 +; RV32I-NEXT: .LBB17_4: +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sw s0, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: xor t6, t0, t1 +; RV32I-NEXT: xor s0, a6, a7 +; RV32I-NEXT: or t6, s0, t6 +; RV32I-NEXT: beqz t6, .LBB17_6 +; RV32I-NEXT: # %bb.5: +; RV32I-NEXT: mv t3, t4 ; RV32I-NEXT: .LBB17_6: -; RV32I-NEXT: xor a2, a5, t0 -; RV32I-NEXT: xor t3, a4, a7 -; RV32I-NEXT: or t5, t3, a2 -; RV32I-NEXT: beqz t5, .LBB17_8 +; RV32I-NEXT: mv t4, t2 +; RV32I-NEXT: beq a1, a4, .LBB17_8 ; RV32I-NEXT: # %bb.7: -; RV32I-NEXT: mv t6, t1 +; RV32I-NEXT: mv t4, t5 ; RV32I-NEXT: .LBB17_8: -; RV32I-NEXT: mv a2, a1 -; RV32I-NEXT: mv t1, a3 -; RV32I-NEXT: mv t4, a5 -; RV32I-NEXT: mv t3, a4 -; RV32I-NEXT: bnez t6, .LBB17_10 +; RV32I-NEXT: sltu t5, a3, a5 +; RV32I-NEXT: mv t6, t5 +; RV32I-NEXT: beq a4, a1, .LBB17_10 ; RV32I-NEXT: # %bb.9: -; RV32I-NEXT: mv a2, t2 -; RV32I-NEXT: mv t1, a6 -; RV32I-NEXT: mv t4, t0 -; RV32I-NEXT: mv t3, a7 +; RV32I-NEXT: sltu t6, a4, a1 ; RV32I-NEXT: .LBB17_10: -; RV32I-NEXT: beq a5, t0, .LBB17_12 +; RV32I-NEXT: bnez t3, .LBB17_12 ; RV32I-NEXT: # %bb.11: -; RV32I-NEXT: slt t6, t0, a5 +; RV32I-NEXT: sub t0, t1, t0 +; RV32I-NEXT: sub a6, a7, a6 +; RV32I-NEXT: sub a2, t0, a2 +; RV32I-NEXT: sltu a7, a6, t4 +; RV32I-NEXT: sub a2, a2, a7 +; RV32I-NEXT: sub a3, a5, a3 +; RV32I-NEXT: sub a1, a1, a4 +; RV32I-NEXT: sub a1, a1, t2 +; RV32I-NEXT: sub a4, a6, t4 ; RV32I-NEXT: j .LBB17_13 ; RV32I-NEXT: .LBB17_12: -; RV32I-NEXT: sltu t6, a7, a4 -; RV32I-NEXT: .LBB17_13: -; RV32I-NEXT: addi sp, sp, -16 -; RV32I-NEXT: sw s0, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: beq a3, a6, .LBB17_15 -; RV32I-NEXT: # %bb.14: -; RV32I-NEXT: sltu s0, a6, a3 -; RV32I-NEXT: bnez t5, .LBB17_16 -; RV32I-NEXT: j .LBB17_17 -; RV32I-NEXT: .LBB17_15: -; RV32I-NEXT: sltu s0, t2, a1 -; RV32I-NEXT: beqz t5, .LBB17_17 -; RV32I-NEXT: .LBB17_16: -; RV32I-NEXT: mv s0, t6 -; RV32I-NEXT: .LBB17_17: -; RV32I-NEXT: bnez s0, .LBB17_19 -; RV32I-NEXT: # %bb.18: -; RV32I-NEXT: mv a1, t2 -; RV32I-NEXT: mv a3, a6 -; RV32I-NEXT: mv a5, t0 -; RV32I-NEXT: mv a4, a7 -; RV32I-NEXT: .LBB17_19: -; RV32I-NEXT: sltu a6, a4, t3 -; RV32I-NEXT: sub a7, a5, t4 -; RV32I-NEXT: sltu a5, a1, a2 -; RV32I-NEXT: sub a6, a7, a6 -; RV32I-NEXT: mv a7, a5 -; RV32I-NEXT: beq a3, t1, .LBB17_21 -; RV32I-NEXT: # %bb.20: -; RV32I-NEXT: sltu a7, a3, t1 -; RV32I-NEXT: .LBB17_21: -; RV32I-NEXT: sub a4, a4, t3 -; RV32I-NEXT: sltu t0, a4, a7 -; RV32I-NEXT: sub a6, a6, t0 -; RV32I-NEXT: sub a4, a4, a7 -; RV32I-NEXT: sub a3, a3, t1 +; RV32I-NEXT: sltu a2, a6, a7 +; RV32I-NEXT: sub t0, t0, t1 +; RV32I-NEXT: sub a2, t0, a2 +; RV32I-NEXT: sub a6, a6, a7 +; RV32I-NEXT: sltu a7, a6, t6 +; RV32I-NEXT: sub a2, a2, a7 ; RV32I-NEXT: sub a3, a3, a5 -; RV32I-NEXT: sub a1, a1, a2 -; RV32I-NEXT: sw a1, 0(a0) -; RV32I-NEXT: sw a3, 4(a0) +; RV32I-NEXT: sub a4, a4, a1 +; RV32I-NEXT: sub a1, a4, t5 +; RV32I-NEXT: sub a4, a6, t6 +; RV32I-NEXT: .LBB17_13: ; RV32I-NEXT: sw a4, 8(a0) -; RV32I-NEXT: sw a6, 12(a0) +; RV32I-NEXT: sw a1, 4(a0) +; RV32I-NEXT: sw a3, 0(a0) +; RV32I-NEXT: sw a2, 12(a0) ; RV32I-NEXT: lw s0, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret ; ; RV64I-LABEL: abd_minmax_i128: ; RV64I: # %bb.0: +; RV64I-NEXT: sltu a4, a2, a0 +; RV64I-NEXT: mv a5, a4 ; RV64I-NEXT: beq a1, a3, .LBB17_2 ; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: slt a6, a1, a3 -; RV64I-NEXT: j .LBB17_3 +; RV64I-NEXT: slt a5, a3, a1 ; RV64I-NEXT: .LBB17_2: -; RV64I-NEXT: sltu a6, a0, a2 -; RV64I-NEXT: .LBB17_3: -; RV64I-NEXT: mv a4, a1 -; RV64I-NEXT: mv a5, a0 -; RV64I-NEXT: bnez a6, .LBB17_5 -; RV64I-NEXT: # %bb.4: -; RV64I-NEXT: mv a4, a3 -; RV64I-NEXT: mv a5, a2 -; RV64I-NEXT: .LBB17_5: -; RV64I-NEXT: beq a1, a3, .LBB17_7 -; RV64I-NEXT: # %bb.6: -; RV64I-NEXT: slt a6, a3, a1 -; RV64I-NEXT: beqz a6, .LBB17_8 -; RV64I-NEXT: j .LBB17_9 -; RV64I-NEXT: .LBB17_7: -; RV64I-NEXT: sltu a6, a2, a0 -; RV64I-NEXT: bnez a6, .LBB17_9 -; RV64I-NEXT: .LBB17_8: -; RV64I-NEXT: mv a1, a3 -; RV64I-NEXT: mv a0, a2 -; RV64I-NEXT: .LBB17_9: -; RV64I-NEXT: sltu a2, a0, a5 +; RV64I-NEXT: bnez a5, .LBB17_4 +; RV64I-NEXT: # %bb.3: +; RV64I-NEXT: sub a1, a3, a1 ; RV64I-NEXT: sub a1, a1, a4 -; RV64I-NEXT: sub a1, a1, a2 -; RV64I-NEXT: sub a0, a0, a5 +; RV64I-NEXT: sub a0, a2, a0 +; RV64I-NEXT: ret +; RV64I-NEXT: .LBB17_4: +; RV64I-NEXT: sltu a4, a0, a2 +; RV64I-NEXT: sub a1, a1, a3 +; RV64I-NEXT: sub a1, a1, a4 +; RV64I-NEXT: sub a0, a0, a2 ; RV64I-NEXT: ret ; ; RV32ZBB-LABEL: abd_minmax_i128: ; RV32ZBB: # %bb.0: -; RV32ZBB-NEXT: lw a6, 4(a2) -; RV32ZBB-NEXT: lw a3, 4(a1) +; RV32ZBB-NEXT: lw a3, 0(a1) +; RV32ZBB-NEXT: lw a5, 0(a2) +; RV32ZBB-NEXT: lw a4, 4(a1) +; RV32ZBB-NEXT: lw a6, 8(a1) ; RV32ZBB-NEXT: lw a7, 8(a2) -; RV32ZBB-NEXT: lw t0, 12(a2) -; RV32ZBB-NEXT: lw a5, 12(a1) -; RV32ZBB-NEXT: lw a4, 8(a1) -; RV32ZBB-NEXT: beq a5, t0, .LBB17_2 +; RV32ZBB-NEXT: lw t0, 12(a1) +; RV32ZBB-NEXT: lw t1, 12(a2) +; RV32ZBB-NEXT: lw a1, 4(a2) +; RV32ZBB-NEXT: sltu a2, a7, a6 +; RV32ZBB-NEXT: mv t4, a2 +; RV32ZBB-NEXT: beq t0, t1, .LBB17_2 ; RV32ZBB-NEXT: # %bb.1: -; RV32ZBB-NEXT: slt t1, a5, t0 -; RV32ZBB-NEXT: j .LBB17_3 +; RV32ZBB-NEXT: slt t4, t1, t0 ; RV32ZBB-NEXT: .LBB17_2: -; RV32ZBB-NEXT: sltu t1, a4, a7 -; RV32ZBB-NEXT: .LBB17_3: -; RV32ZBB-NEXT: lw t2, 0(a2) -; RV32ZBB-NEXT: lw a1, 0(a1) -; RV32ZBB-NEXT: beq a3, a6, .LBB17_5 -; RV32ZBB-NEXT: # %bb.4: -; RV32ZBB-NEXT: sltu t6, a3, a6 -; RV32ZBB-NEXT: j .LBB17_6 -; RV32ZBB-NEXT: .LBB17_5: -; RV32ZBB-NEXT: sltu t6, a1, t2 +; RV32ZBB-NEXT: sltu t2, a5, a3 +; RV32ZBB-NEXT: sltu t5, a1, a4 +; RV32ZBB-NEXT: mv t3, t2 +; RV32ZBB-NEXT: beq a4, a1, .LBB17_4 +; RV32ZBB-NEXT: # %bb.3: +; RV32ZBB-NEXT: mv t3, t5 +; RV32ZBB-NEXT: .LBB17_4: +; RV32ZBB-NEXT: addi sp, sp, -16 +; RV32ZBB-NEXT: sw s0, 12(sp) # 4-byte Folded Spill +; RV32ZBB-NEXT: xor t6, t0, t1 +; RV32ZBB-NEXT: xor s0, a6, a7 +; RV32ZBB-NEXT: or t6, s0, t6 +; RV32ZBB-NEXT: beqz t6, .LBB17_6 +; RV32ZBB-NEXT: # %bb.5: +; RV32ZBB-NEXT: mv t3, t4 ; RV32ZBB-NEXT: .LBB17_6: -; RV32ZBB-NEXT: xor a2, a5, t0 -; RV32ZBB-NEXT: xor t3, a4, a7 -; RV32ZBB-NEXT: or t5, t3, a2 -; RV32ZBB-NEXT: beqz t5, .LBB17_8 +; RV32ZBB-NEXT: mv t4, t2 +; RV32ZBB-NEXT: beq a1, a4, .LBB17_8 ; RV32ZBB-NEXT: # %bb.7: -; RV32ZBB-NEXT: mv t6, t1 +; RV32ZBB-NEXT: mv t4, t5 ; RV32ZBB-NEXT: .LBB17_8: -; RV32ZBB-NEXT: mv a2, a1 -; RV32ZBB-NEXT: mv t1, a3 -; RV32ZBB-NEXT: mv t4, a5 -; RV32ZBB-NEXT: mv t3, a4 -; RV32ZBB-NEXT: bnez t6, .LBB17_10 +; RV32ZBB-NEXT: sltu t5, a3, a5 +; RV32ZBB-NEXT: mv t6, t5 +; RV32ZBB-NEXT: beq a4, a1, .LBB17_10 ; RV32ZBB-NEXT: # %bb.9: -; RV32ZBB-NEXT: mv a2, t2 -; RV32ZBB-NEXT: mv t1, a6 -; RV32ZBB-NEXT: mv t4, t0 -; RV32ZBB-NEXT: mv t3, a7 +; RV32ZBB-NEXT: sltu t6, a4, a1 ; RV32ZBB-NEXT: .LBB17_10: -; RV32ZBB-NEXT: beq a5, t0, .LBB17_12 +; RV32ZBB-NEXT: bnez t3, .LBB17_12 ; RV32ZBB-NEXT: # %bb.11: -; RV32ZBB-NEXT: slt t6, t0, a5 +; RV32ZBB-NEXT: sub t0, t1, t0 +; RV32ZBB-NEXT: sub a6, a7, a6 +; RV32ZBB-NEXT: sub a2, t0, a2 +; RV32ZBB-NEXT: sltu a7, a6, t4 +; RV32ZBB-NEXT: sub a2, a2, a7 +; RV32ZBB-NEXT: sub a3, a5, a3 +; RV32ZBB-NEXT: sub a1, a1, a4 +; RV32ZBB-NEXT: sub a1, a1, t2 +; RV32ZBB-NEXT: sub a4, a6, t4 ; RV32ZBB-NEXT: j .LBB17_13 ; RV32ZBB-NEXT: .LBB17_12: -; RV32ZBB-NEXT: sltu t6, a7, a4 -; RV32ZBB-NEXT: .LBB17_13: -; RV32ZBB-NEXT: addi sp, sp, -16 -; RV32ZBB-NEXT: sw s0, 12(sp) # 4-byte Folded Spill -; RV32ZBB-NEXT: beq a3, a6, .LBB17_15 -; RV32ZBB-NEXT: # %bb.14: -; RV32ZBB-NEXT: sltu s0, a6, a3 -; RV32ZBB-NEXT: bnez t5, .LBB17_16 -; RV32ZBB-NEXT: j .LBB17_17 -; RV32ZBB-NEXT: .LBB17_15: -; RV32ZBB-NEXT: sltu s0, t2, a1 -; RV32ZBB-NEXT: beqz t5, .LBB17_17 -; RV32ZBB-NEXT: .LBB17_16: -; RV32ZBB-NEXT: mv s0, t6 -; RV32ZBB-NEXT: .LBB17_17: -; RV32ZBB-NEXT: bnez s0, .LBB17_19 -; RV32ZBB-NEXT: # %bb.18: -; RV32ZBB-NEXT: mv a1, t2 -; RV32ZBB-NEXT: mv a3, a6 -; RV32ZBB-NEXT: mv a5, t0 -; RV32ZBB-NEXT: mv a4, a7 -; RV32ZBB-NEXT: .LBB17_19: -; RV32ZBB-NEXT: sltu a6, a4, t3 -; RV32ZBB-NEXT: sub a7, a5, t4 -; RV32ZBB-NEXT: sltu a5, a1, a2 -; RV32ZBB-NEXT: sub a6, a7, a6 -; RV32ZBB-NEXT: mv a7, a5 -; RV32ZBB-NEXT: beq a3, t1, .LBB17_21 -; RV32ZBB-NEXT: # %bb.20: -; RV32ZBB-NEXT: sltu a7, a3, t1 -; RV32ZBB-NEXT: .LBB17_21: -; RV32ZBB-NEXT: sub a4, a4, t3 -; RV32ZBB-NEXT: sltu t0, a4, a7 -; RV32ZBB-NEXT: sub a6, a6, t0 -; RV32ZBB-NEXT: sub a4, a4, a7 -; RV32ZBB-NEXT: sub a3, a3, t1 +; RV32ZBB-NEXT: sltu a2, a6, a7 +; RV32ZBB-NEXT: sub t0, t0, t1 +; RV32ZBB-NEXT: sub a2, t0, a2 +; RV32ZBB-NEXT: sub a6, a6, a7 +; RV32ZBB-NEXT: sltu a7, a6, t6 +; RV32ZBB-NEXT: sub a2, a2, a7 ; RV32ZBB-NEXT: sub a3, a3, a5 -; RV32ZBB-NEXT: sub a1, a1, a2 -; RV32ZBB-NEXT: sw a1, 0(a0) -; RV32ZBB-NEXT: sw a3, 4(a0) +; RV32ZBB-NEXT: sub a4, a4, a1 +; RV32ZBB-NEXT: sub a1, a4, t5 +; RV32ZBB-NEXT: sub a4, a6, t6 +; RV32ZBB-NEXT: .LBB17_13: ; RV32ZBB-NEXT: sw a4, 8(a0) -; RV32ZBB-NEXT: sw a6, 12(a0) +; RV32ZBB-NEXT: sw a1, 4(a0) +; RV32ZBB-NEXT: sw a3, 0(a0) +; RV32ZBB-NEXT: sw a2, 12(a0) ; RV32ZBB-NEXT: lw s0, 12(sp) # 4-byte Folded Reload ; RV32ZBB-NEXT: addi sp, sp, 16 ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: abd_minmax_i128: ; RV64ZBB: # %bb.0: +; RV64ZBB-NEXT: sltu a4, a2, a0 +; RV64ZBB-NEXT: mv a5, a4 ; RV64ZBB-NEXT: beq a1, a3, .LBB17_2 ; RV64ZBB-NEXT: # %bb.1: -; RV64ZBB-NEXT: slt a6, a1, a3 -; RV64ZBB-NEXT: j .LBB17_3 +; RV64ZBB-NEXT: slt a5, a3, a1 ; RV64ZBB-NEXT: .LBB17_2: -; RV64ZBB-NEXT: sltu a6, a0, a2 -; RV64ZBB-NEXT: .LBB17_3: -; RV64ZBB-NEXT: mv a4, a1 -; RV64ZBB-NEXT: mv a5, a0 -; RV64ZBB-NEXT: bnez a6, .LBB17_5 -; RV64ZBB-NEXT: # %bb.4: -; RV64ZBB-NEXT: mv a4, a3 -; RV64ZBB-NEXT: mv a5, a2 -; RV64ZBB-NEXT: .LBB17_5: -; RV64ZBB-NEXT: beq a1, a3, .LBB17_7 -; RV64ZBB-NEXT: # %bb.6: -; RV64ZBB-NEXT: slt a6, a3, a1 -; RV64ZBB-NEXT: beqz a6, .LBB17_8 -; RV64ZBB-NEXT: j .LBB17_9 -; RV64ZBB-NEXT: .LBB17_7: -; RV64ZBB-NEXT: sltu a6, a2, a0 -; RV64ZBB-NEXT: bnez a6, .LBB17_9 -; RV64ZBB-NEXT: .LBB17_8: -; RV64ZBB-NEXT: mv a1, a3 -; RV64ZBB-NEXT: mv a0, a2 -; RV64ZBB-NEXT: .LBB17_9: -; RV64ZBB-NEXT: sltu a2, a0, a5 +; RV64ZBB-NEXT: bnez a5, .LBB17_4 +; RV64ZBB-NEXT: # %bb.3: +; RV64ZBB-NEXT: sub a1, a3, a1 +; RV64ZBB-NEXT: sub a1, a1, a4 +; RV64ZBB-NEXT: sub a0, a2, a0 +; RV64ZBB-NEXT: ret +; RV64ZBB-NEXT: .LBB17_4: +; RV64ZBB-NEXT: sltu a4, a0, a2 +; RV64ZBB-NEXT: sub a1, a1, a3 ; RV64ZBB-NEXT: sub a1, a1, a4 -; RV64ZBB-NEXT: sub a1, a1, a2 -; RV64ZBB-NEXT: sub a0, a0, a5 +; RV64ZBB-NEXT: sub a0, a0, a2 ; RV64ZBB-NEXT: ret %min = call i128 @llvm.smin.i128(i128 %a, i128 %b) %max = call i128 @llvm.smax.i128(i128 %a, i128 %b) diff --git a/llvm/test/CodeGen/RISCV/abdu-neg.ll b/llvm/test/CodeGen/RISCV/abdu-neg.ll index c74e0c2d9af161..6b121af7e4e84f 100644 --- a/llvm/test/CodeGen/RISCV/abdu-neg.ll +++ b/llvm/test/CodeGen/RISCV/abdu-neg.ll @@ -11,8 +11,8 @@ define i8 @abd_ext_i8(i8 %a, i8 %b) nounwind { ; RV32I-LABEL: abd_ext_i8: ; RV32I: # %bb.0: -; RV32I-NEXT: andi a0, a0, 255 ; RV32I-NEXT: andi a1, a1, 255 +; RV32I-NEXT: andi a0, a0, 255 ; RV32I-NEXT: sub a0, a0, a1 ; RV32I-NEXT: srai a1, a0, 31 ; RV32I-NEXT: xor a0, a0, a1 @@ -29,14 +29,23 @@ define i8 @abd_ext_i8(i8 %a, i8 %b) nounwind { ; RV64I-NEXT: sub a0, a1, a0 ; RV64I-NEXT: ret ; -; ZBB-LABEL: abd_ext_i8: -; ZBB: # %bb.0: -; ZBB-NEXT: andi a0, a0, 255 -; ZBB-NEXT: andi a1, a1, 255 -; ZBB-NEXT: sub a0, a0, a1 -; ZBB-NEXT: neg a1, a0 -; ZBB-NEXT: min a0, a0, a1 -; ZBB-NEXT: ret +; RV32ZBB-LABEL: abd_ext_i8: +; RV32ZBB: # %bb.0: +; RV32ZBB-NEXT: andi a1, a1, 255 +; RV32ZBB-NEXT: andi a0, a0, 255 +; RV32ZBB-NEXT: maxu a2, a0, a1 +; RV32ZBB-NEXT: minu a0, a0, a1 +; RV32ZBB-NEXT: sub a0, a0, a2 +; RV32ZBB-NEXT: ret +; +; RV64ZBB-LABEL: abd_ext_i8: +; RV64ZBB: # %bb.0: +; RV64ZBB-NEXT: andi a0, a0, 255 +; RV64ZBB-NEXT: andi a1, a1, 255 +; RV64ZBB-NEXT: sub a0, a0, a1 +; RV64ZBB-NEXT: neg a1, a0 +; RV64ZBB-NEXT: min a0, a0, a1 +; RV64ZBB-NEXT: ret %aext = zext i8 %a to i64 %bext = zext i8 %b to i64 %sub = sub i64 %aext, %bext @@ -49,9 +58,9 @@ define i8 @abd_ext_i8(i8 %a, i8 %b) nounwind { define i8 @abd_ext_i8_i16(i8 %a, i16 %b) nounwind { ; RV32I-LABEL: abd_ext_i8_i16: ; RV32I: # %bb.0: -; RV32I-NEXT: andi a0, a0, 255 ; RV32I-NEXT: slli a1, a1, 16 ; RV32I-NEXT: srli a1, a1, 16 +; RV32I-NEXT: andi a0, a0, 255 ; RV32I-NEXT: sub a0, a0, a1 ; RV32I-NEXT: srai a1, a0, 31 ; RV32I-NEXT: xor a0, a0, a1 @@ -69,14 +78,23 @@ define i8 @abd_ext_i8_i16(i8 %a, i16 %b) nounwind { ; RV64I-NEXT: sub a0, a1, a0 ; RV64I-NEXT: ret ; -; ZBB-LABEL: abd_ext_i8_i16: -; ZBB: # %bb.0: -; ZBB-NEXT: andi a0, a0, 255 -; ZBB-NEXT: zext.h a1, a1 -; ZBB-NEXT: sub a0, a0, a1 -; ZBB-NEXT: neg a1, a0 -; ZBB-NEXT: min a0, a0, a1 -; ZBB-NEXT: ret +; RV32ZBB-LABEL: abd_ext_i8_i16: +; RV32ZBB: # %bb.0: +; RV32ZBB-NEXT: zext.h a1, a1 +; RV32ZBB-NEXT: andi a0, a0, 255 +; RV32ZBB-NEXT: maxu a2, a0, a1 +; RV32ZBB-NEXT: minu a0, a0, a1 +; RV32ZBB-NEXT: sub a0, a0, a2 +; RV32ZBB-NEXT: ret +; +; RV64ZBB-LABEL: abd_ext_i8_i16: +; RV64ZBB: # %bb.0: +; RV64ZBB-NEXT: andi a0, a0, 255 +; RV64ZBB-NEXT: zext.h a1, a1 +; RV64ZBB-NEXT: sub a0, a0, a1 +; RV64ZBB-NEXT: neg a1, a0 +; RV64ZBB-NEXT: min a0, a0, a1 +; RV64ZBB-NEXT: ret %aext = zext i8 %a to i64 %bext = zext i16 %b to i64 %sub = sub i64 %aext, %bext @@ -89,8 +107,8 @@ define i8 @abd_ext_i8_i16(i8 %a, i16 %b) nounwind { define i8 @abd_ext_i8_undef(i8 %a, i8 %b) nounwind { ; RV32I-LABEL: abd_ext_i8_undef: ; RV32I: # %bb.0: -; RV32I-NEXT: andi a0, a0, 255 ; RV32I-NEXT: andi a1, a1, 255 +; RV32I-NEXT: andi a0, a0, 255 ; RV32I-NEXT: sub a0, a0, a1 ; RV32I-NEXT: srai a1, a0, 31 ; RV32I-NEXT: xor a0, a0, a1 @@ -107,14 +125,23 @@ define i8 @abd_ext_i8_undef(i8 %a, i8 %b) nounwind { ; RV64I-NEXT: sub a0, a1, a0 ; RV64I-NEXT: ret ; -; ZBB-LABEL: abd_ext_i8_undef: -; ZBB: # %bb.0: -; ZBB-NEXT: andi a0, a0, 255 -; ZBB-NEXT: andi a1, a1, 255 -; ZBB-NEXT: sub a0, a0, a1 -; ZBB-NEXT: neg a1, a0 -; ZBB-NEXT: min a0, a0, a1 -; ZBB-NEXT: ret +; RV32ZBB-LABEL: abd_ext_i8_undef: +; RV32ZBB: # %bb.0: +; RV32ZBB-NEXT: andi a1, a1, 255 +; RV32ZBB-NEXT: andi a0, a0, 255 +; RV32ZBB-NEXT: maxu a2, a0, a1 +; RV32ZBB-NEXT: minu a0, a0, a1 +; RV32ZBB-NEXT: sub a0, a0, a2 +; RV32ZBB-NEXT: ret +; +; RV64ZBB-LABEL: abd_ext_i8_undef: +; RV64ZBB: # %bb.0: +; RV64ZBB-NEXT: andi a0, a0, 255 +; RV64ZBB-NEXT: andi a1, a1, 255 +; RV64ZBB-NEXT: sub a0, a0, a1 +; RV64ZBB-NEXT: neg a1, a0 +; RV64ZBB-NEXT: min a0, a0, a1 +; RV64ZBB-NEXT: ret %aext = zext i8 %a to i64 %bext = zext i8 %b to i64 %sub = sub i64 %aext, %bext @@ -129,8 +156,8 @@ define i16 @abd_ext_i16(i16 %a, i16 %b) nounwind { ; RV32I: # %bb.0: ; RV32I-NEXT: lui a2, 16 ; RV32I-NEXT: addi a2, a2, -1 -; RV32I-NEXT: and a0, a0, a2 ; RV32I-NEXT: and a1, a1, a2 +; RV32I-NEXT: and a0, a0, a2 ; RV32I-NEXT: sub a0, a0, a1 ; RV32I-NEXT: srai a1, a0, 31 ; RV32I-NEXT: xor a0, a0, a1 @@ -149,14 +176,23 @@ define i16 @abd_ext_i16(i16 %a, i16 %b) nounwind { ; RV64I-NEXT: sub a0, a1, a0 ; RV64I-NEXT: ret ; -; ZBB-LABEL: abd_ext_i16: -; ZBB: # %bb.0: -; ZBB-NEXT: zext.h a0, a0 -; ZBB-NEXT: zext.h a1, a1 -; ZBB-NEXT: sub a0, a0, a1 -; ZBB-NEXT: neg a1, a0 -; ZBB-NEXT: min a0, a0, a1 -; ZBB-NEXT: ret +; RV32ZBB-LABEL: abd_ext_i16: +; RV32ZBB: # %bb.0: +; RV32ZBB-NEXT: zext.h a1, a1 +; RV32ZBB-NEXT: zext.h a0, a0 +; RV32ZBB-NEXT: maxu a2, a0, a1 +; RV32ZBB-NEXT: minu a0, a0, a1 +; RV32ZBB-NEXT: sub a0, a0, a2 +; RV32ZBB-NEXT: ret +; +; RV64ZBB-LABEL: abd_ext_i16: +; RV64ZBB: # %bb.0: +; RV64ZBB-NEXT: zext.h a0, a0 +; RV64ZBB-NEXT: zext.h a1, a1 +; RV64ZBB-NEXT: sub a0, a0, a1 +; RV64ZBB-NEXT: neg a1, a0 +; RV64ZBB-NEXT: min a0, a0, a1 +; RV64ZBB-NEXT: ret %aext = zext i16 %a to i64 %bext = zext i16 %b to i64 %sub = sub i64 %aext, %bext @@ -171,13 +207,13 @@ define i16 @abd_ext_i16_i32(i16 %a, i32 %b) nounwind { ; RV32I: # %bb.0: ; RV32I-NEXT: slli a0, a0, 16 ; RV32I-NEXT: srli a0, a0, 16 -; RV32I-NEXT: sltu a2, a0, a1 -; RV32I-NEXT: neg a2, a2 -; RV32I-NEXT: sub a0, a0, a1 -; RV32I-NEXT: bgez a2, .LBB4_2 +; RV32I-NEXT: bltu a1, a0, .LBB4_2 ; RV32I-NEXT: # %bb.1: +; RV32I-NEXT: sub a0, a1, a0 ; RV32I-NEXT: neg a0, a0 +; RV32I-NEXT: ret ; RV32I-NEXT: .LBB4_2: +; RV32I-NEXT: sub a0, a0, a1 ; RV32I-NEXT: neg a0, a0 ; RV32I-NEXT: ret ; @@ -196,14 +232,9 @@ define i16 @abd_ext_i16_i32(i16 %a, i32 %b) nounwind { ; RV32ZBB-LABEL: abd_ext_i16_i32: ; RV32ZBB: # %bb.0: ; RV32ZBB-NEXT: zext.h a0, a0 -; RV32ZBB-NEXT: sltu a2, a0, a1 -; RV32ZBB-NEXT: neg a2, a2 -; RV32ZBB-NEXT: sub a0, a0, a1 -; RV32ZBB-NEXT: bgez a2, .LBB4_2 -; RV32ZBB-NEXT: # %bb.1: -; RV32ZBB-NEXT: neg a0, a0 -; RV32ZBB-NEXT: .LBB4_2: -; RV32ZBB-NEXT: neg a0, a0 +; RV32ZBB-NEXT: maxu a2, a0, a1 +; RV32ZBB-NEXT: minu a0, a0, a1 +; RV32ZBB-NEXT: sub a0, a0, a2 ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: abd_ext_i16_i32: @@ -229,8 +260,8 @@ define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind { ; RV32I: # %bb.0: ; RV32I-NEXT: lui a2, 16 ; RV32I-NEXT: addi a2, a2, -1 -; RV32I-NEXT: and a0, a0, a2 ; RV32I-NEXT: and a1, a1, a2 +; RV32I-NEXT: and a0, a0, a2 ; RV32I-NEXT: sub a0, a0, a1 ; RV32I-NEXT: srai a1, a0, 31 ; RV32I-NEXT: xor a0, a0, a1 @@ -249,14 +280,23 @@ define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind { ; RV64I-NEXT: sub a0, a1, a0 ; RV64I-NEXT: ret ; -; ZBB-LABEL: abd_ext_i16_undef: -; ZBB: # %bb.0: -; ZBB-NEXT: zext.h a0, a0 -; ZBB-NEXT: zext.h a1, a1 -; ZBB-NEXT: sub a0, a0, a1 -; ZBB-NEXT: neg a1, a0 -; ZBB-NEXT: min a0, a0, a1 -; ZBB-NEXT: ret +; RV32ZBB-LABEL: abd_ext_i16_undef: +; RV32ZBB: # %bb.0: +; RV32ZBB-NEXT: zext.h a1, a1 +; RV32ZBB-NEXT: zext.h a0, a0 +; RV32ZBB-NEXT: maxu a2, a0, a1 +; RV32ZBB-NEXT: minu a0, a0, a1 +; RV32ZBB-NEXT: sub a0, a0, a2 +; RV32ZBB-NEXT: ret +; +; RV64ZBB-LABEL: abd_ext_i16_undef: +; RV64ZBB: # %bb.0: +; RV64ZBB-NEXT: zext.h a0, a0 +; RV64ZBB-NEXT: zext.h a1, a1 +; RV64ZBB-NEXT: sub a0, a0, a1 +; RV64ZBB-NEXT: neg a1, a0 +; RV64ZBB-NEXT: min a0, a0, a1 +; RV64ZBB-NEXT: ret %aext = zext i16 %a to i64 %bext = zext i16 %b to i64 %sub = sub i64 %aext, %bext @@ -269,13 +309,13 @@ define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind { define i32 @abd_ext_i32(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: abd_ext_i32: ; RV32I: # %bb.0: -; RV32I-NEXT: sltu a2, a0, a1 -; RV32I-NEXT: neg a2, a2 -; RV32I-NEXT: sub a0, a0, a1 -; RV32I-NEXT: bgez a2, .LBB6_2 +; RV32I-NEXT: bltu a1, a0, .LBB6_2 ; RV32I-NEXT: # %bb.1: +; RV32I-NEXT: sub a0, a1, a0 ; RV32I-NEXT: neg a0, a0 +; RV32I-NEXT: ret ; RV32I-NEXT: .LBB6_2: +; RV32I-NEXT: sub a0, a0, a1 ; RV32I-NEXT: neg a0, a0 ; RV32I-NEXT: ret ; @@ -293,14 +333,9 @@ define i32 @abd_ext_i32(i32 %a, i32 %b) nounwind { ; ; RV32ZBB-LABEL: abd_ext_i32: ; RV32ZBB: # %bb.0: -; RV32ZBB-NEXT: sltu a2, a0, a1 -; RV32ZBB-NEXT: neg a2, a2 -; RV32ZBB-NEXT: sub a0, a0, a1 -; RV32ZBB-NEXT: bgez a2, .LBB6_2 -; RV32ZBB-NEXT: # %bb.1: -; RV32ZBB-NEXT: neg a0, a0 -; RV32ZBB-NEXT: .LBB6_2: -; RV32ZBB-NEXT: neg a0, a0 +; RV32ZBB-NEXT: maxu a2, a0, a1 +; RV32ZBB-NEXT: minu a0, a0, a1 +; RV32ZBB-NEXT: sub a0, a0, a2 ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: abd_ext_i32: @@ -327,13 +362,13 @@ define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind { ; RV32I: # %bb.0: ; RV32I-NEXT: slli a1, a1, 16 ; RV32I-NEXT: srli a1, a1, 16 -; RV32I-NEXT: sltu a2, a0, a1 -; RV32I-NEXT: neg a2, a2 -; RV32I-NEXT: sub a0, a0, a1 -; RV32I-NEXT: bgez a2, .LBB7_2 +; RV32I-NEXT: bltu a1, a0, .LBB7_2 ; RV32I-NEXT: # %bb.1: +; RV32I-NEXT: sub a0, a1, a0 ; RV32I-NEXT: neg a0, a0 +; RV32I-NEXT: ret ; RV32I-NEXT: .LBB7_2: +; RV32I-NEXT: sub a0, a0, a1 ; RV32I-NEXT: neg a0, a0 ; RV32I-NEXT: ret ; @@ -352,14 +387,9 @@ define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind { ; RV32ZBB-LABEL: abd_ext_i32_i16: ; RV32ZBB: # %bb.0: ; RV32ZBB-NEXT: zext.h a1, a1 -; RV32ZBB-NEXT: sltu a2, a0, a1 -; RV32ZBB-NEXT: neg a2, a2 -; RV32ZBB-NEXT: sub a0, a0, a1 -; RV32ZBB-NEXT: bgez a2, .LBB7_2 -; RV32ZBB-NEXT: # %bb.1: -; RV32ZBB-NEXT: neg a0, a0 -; RV32ZBB-NEXT: .LBB7_2: -; RV32ZBB-NEXT: neg a0, a0 +; RV32ZBB-NEXT: maxu a2, a0, a1 +; RV32ZBB-NEXT: minu a0, a0, a1 +; RV32ZBB-NEXT: sub a0, a0, a2 ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: abd_ext_i32_i16: @@ -383,13 +413,13 @@ define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind { define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: abd_ext_i32_undef: ; RV32I: # %bb.0: -; RV32I-NEXT: sltu a2, a0, a1 -; RV32I-NEXT: neg a2, a2 -; RV32I-NEXT: sub a0, a0, a1 -; RV32I-NEXT: bgez a2, .LBB8_2 +; RV32I-NEXT: bltu a1, a0, .LBB8_2 ; RV32I-NEXT: # %bb.1: +; RV32I-NEXT: sub a0, a1, a0 ; RV32I-NEXT: neg a0, a0 +; RV32I-NEXT: ret ; RV32I-NEXT: .LBB8_2: +; RV32I-NEXT: sub a0, a0, a1 ; RV32I-NEXT: neg a0, a0 ; RV32I-NEXT: ret ; @@ -407,14 +437,9 @@ define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind { ; ; RV32ZBB-LABEL: abd_ext_i32_undef: ; RV32ZBB: # %bb.0: -; RV32ZBB-NEXT: sltu a2, a0, a1 -; RV32ZBB-NEXT: neg a2, a2 -; RV32ZBB-NEXT: sub a0, a0, a1 -; RV32ZBB-NEXT: bgez a2, .LBB8_2 -; RV32ZBB-NEXT: # %bb.1: -; RV32ZBB-NEXT: neg a0, a0 -; RV32ZBB-NEXT: .LBB8_2: -; RV32ZBB-NEXT: neg a0, a0 +; RV32ZBB-NEXT: maxu a2, a0, a1 +; RV32ZBB-NEXT: minu a0, a0, a1 +; RV32ZBB-NEXT: sub a0, a0, a2 ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: abd_ext_i32_undef: @@ -440,22 +465,23 @@ define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind { ; RV32I-LABEL: abd_ext_i64: ; RV32I: # %bb.0: ; RV32I-NEXT: sltu a4, a0, a2 -; RV32I-NEXT: mv a5, a4 -; RV32I-NEXT: beq a1, a3, .LBB9_2 +; RV32I-NEXT: sub a3, a1, a3 +; RV32I-NEXT: sub a3, a3, a4 +; RV32I-NEXT: sub a2, a0, a2 +; RV32I-NEXT: beq a3, a1, .LBB9_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: sltu a5, a1, a3 +; RV32I-NEXT: sltu a0, a1, a3 +; RV32I-NEXT: j .LBB9_3 ; RV32I-NEXT: .LBB9_2: -; RV32I-NEXT: neg a5, a5 -; RV32I-NEXT: sub a1, a1, a3 +; RV32I-NEXT: sltu a0, a0, a2 +; RV32I-NEXT: .LBB9_3: +; RV32I-NEXT: neg a1, a0 +; RV32I-NEXT: xor a2, a2, a1 +; RV32I-NEXT: sltu a4, a2, a1 +; RV32I-NEXT: xor a1, a3, a1 +; RV32I-NEXT: add a1, a1, a0 ; RV32I-NEXT: sub a1, a1, a4 -; RV32I-NEXT: sub a0, a0, a2 -; RV32I-NEXT: bgez a5, .LBB9_4 -; RV32I-NEXT: # %bb.3: -; RV32I-NEXT: snez a2, a0 -; RV32I-NEXT: add a1, a1, a2 -; RV32I-NEXT: neg a1, a1 -; RV32I-NEXT: neg a0, a0 -; RV32I-NEXT: .LBB9_4: +; RV32I-NEXT: add a0, a2, a0 ; RV32I-NEXT: snez a2, a0 ; RV32I-NEXT: add a1, a1, a2 ; RV32I-NEXT: neg a1, a1 @@ -464,35 +490,36 @@ define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind { ; ; RV64I-LABEL: abd_ext_i64: ; RV64I: # %bb.0: -; RV64I-NEXT: sltu a2, a0, a1 -; RV64I-NEXT: neg a2, a2 -; RV64I-NEXT: sub a0, a0, a1 -; RV64I-NEXT: bgez a2, .LBB9_2 +; RV64I-NEXT: bltu a1, a0, .LBB9_2 ; RV64I-NEXT: # %bb.1: +; RV64I-NEXT: sub a0, a1, a0 ; RV64I-NEXT: neg a0, a0 +; RV64I-NEXT: ret ; RV64I-NEXT: .LBB9_2: +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: neg a0, a0 ; RV64I-NEXT: ret ; ; RV32ZBB-LABEL: abd_ext_i64: ; RV32ZBB: # %bb.0: ; RV32ZBB-NEXT: sltu a4, a0, a2 -; RV32ZBB-NEXT: mv a5, a4 -; RV32ZBB-NEXT: beq a1, a3, .LBB9_2 +; RV32ZBB-NEXT: sub a3, a1, a3 +; RV32ZBB-NEXT: sub a3, a3, a4 +; RV32ZBB-NEXT: sub a2, a0, a2 +; RV32ZBB-NEXT: beq a3, a1, .LBB9_2 ; RV32ZBB-NEXT: # %bb.1: -; RV32ZBB-NEXT: sltu a5, a1, a3 +; RV32ZBB-NEXT: sltu a0, a1, a3 +; RV32ZBB-NEXT: j .LBB9_3 ; RV32ZBB-NEXT: .LBB9_2: -; RV32ZBB-NEXT: neg a5, a5 -; RV32ZBB-NEXT: sub a1, a1, a3 +; RV32ZBB-NEXT: sltu a0, a0, a2 +; RV32ZBB-NEXT: .LBB9_3: +; RV32ZBB-NEXT: neg a1, a0 +; RV32ZBB-NEXT: xor a2, a2, a1 +; RV32ZBB-NEXT: sltu a4, a2, a1 +; RV32ZBB-NEXT: xor a1, a3, a1 +; RV32ZBB-NEXT: add a1, a1, a0 ; RV32ZBB-NEXT: sub a1, a1, a4 -; RV32ZBB-NEXT: sub a0, a0, a2 -; RV32ZBB-NEXT: bgez a5, .LBB9_4 -; RV32ZBB-NEXT: # %bb.3: -; RV32ZBB-NEXT: snez a2, a0 -; RV32ZBB-NEXT: add a1, a1, a2 -; RV32ZBB-NEXT: neg a1, a1 -; RV32ZBB-NEXT: neg a0, a0 -; RV32ZBB-NEXT: .LBB9_4: +; RV32ZBB-NEXT: add a0, a2, a0 ; RV32ZBB-NEXT: snez a2, a0 ; RV32ZBB-NEXT: add a1, a1, a2 ; RV32ZBB-NEXT: neg a1, a1 @@ -501,14 +528,9 @@ define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind { ; ; RV64ZBB-LABEL: abd_ext_i64: ; RV64ZBB: # %bb.0: -; RV64ZBB-NEXT: sltu a2, a0, a1 -; RV64ZBB-NEXT: neg a2, a2 -; RV64ZBB-NEXT: sub a0, a0, a1 -; RV64ZBB-NEXT: bgez a2, .LBB9_2 -; RV64ZBB-NEXT: # %bb.1: -; RV64ZBB-NEXT: neg a0, a0 -; RV64ZBB-NEXT: .LBB9_2: -; RV64ZBB-NEXT: neg a0, a0 +; RV64ZBB-NEXT: maxu a2, a0, a1 +; RV64ZBB-NEXT: minu a0, a0, a1 +; RV64ZBB-NEXT: sub a0, a0, a2 ; RV64ZBB-NEXT: ret %aext = zext i64 %a to i128 %bext = zext i64 %b to i128 @@ -523,22 +545,23 @@ define i64 @abd_ext_i64_undef(i64 %a, i64 %b) nounwind { ; RV32I-LABEL: abd_ext_i64_undef: ; RV32I: # %bb.0: ; RV32I-NEXT: sltu a4, a0, a2 -; RV32I-NEXT: mv a5, a4 -; RV32I-NEXT: beq a1, a3, .LBB10_2 +; RV32I-NEXT: sub a3, a1, a3 +; RV32I-NEXT: sub a3, a3, a4 +; RV32I-NEXT: sub a2, a0, a2 +; RV32I-NEXT: beq a3, a1, .LBB10_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: sltu a5, a1, a3 +; RV32I-NEXT: sltu a0, a1, a3 +; RV32I-NEXT: j .LBB10_3 ; RV32I-NEXT: .LBB10_2: -; RV32I-NEXT: neg a5, a5 -; RV32I-NEXT: sub a1, a1, a3 +; RV32I-NEXT: sltu a0, a0, a2 +; RV32I-NEXT: .LBB10_3: +; RV32I-NEXT: neg a1, a0 +; RV32I-NEXT: xor a2, a2, a1 +; RV32I-NEXT: sltu a4, a2, a1 +; RV32I-NEXT: xor a1, a3, a1 +; RV32I-NEXT: add a1, a1, a0 ; RV32I-NEXT: sub a1, a1, a4 -; RV32I-NEXT: sub a0, a0, a2 -; RV32I-NEXT: bgez a5, .LBB10_4 -; RV32I-NEXT: # %bb.3: -; RV32I-NEXT: snez a2, a0 -; RV32I-NEXT: add a1, a1, a2 -; RV32I-NEXT: neg a1, a1 -; RV32I-NEXT: neg a0, a0 -; RV32I-NEXT: .LBB10_4: +; RV32I-NEXT: add a0, a2, a0 ; RV32I-NEXT: snez a2, a0 ; RV32I-NEXT: add a1, a1, a2 ; RV32I-NEXT: neg a1, a1 @@ -547,35 +570,36 @@ define i64 @abd_ext_i64_undef(i64 %a, i64 %b) nounwind { ; ; RV64I-LABEL: abd_ext_i64_undef: ; RV64I: # %bb.0: -; RV64I-NEXT: sltu a2, a0, a1 -; RV64I-NEXT: neg a2, a2 -; RV64I-NEXT: sub a0, a0, a1 -; RV64I-NEXT: bgez a2, .LBB10_2 +; RV64I-NEXT: bltu a1, a0, .LBB10_2 ; RV64I-NEXT: # %bb.1: +; RV64I-NEXT: sub a0, a1, a0 ; RV64I-NEXT: neg a0, a0 +; RV64I-NEXT: ret ; RV64I-NEXT: .LBB10_2: +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: neg a0, a0 ; RV64I-NEXT: ret ; ; RV32ZBB-LABEL: abd_ext_i64_undef: ; RV32ZBB: # %bb.0: ; RV32ZBB-NEXT: sltu a4, a0, a2 -; RV32ZBB-NEXT: mv a5, a4 -; RV32ZBB-NEXT: beq a1, a3, .LBB10_2 +; RV32ZBB-NEXT: sub a3, a1, a3 +; RV32ZBB-NEXT: sub a3, a3, a4 +; RV32ZBB-NEXT: sub a2, a0, a2 +; RV32ZBB-NEXT: beq a3, a1, .LBB10_2 ; RV32ZBB-NEXT: # %bb.1: -; RV32ZBB-NEXT: sltu a5, a1, a3 +; RV32ZBB-NEXT: sltu a0, a1, a3 +; RV32ZBB-NEXT: j .LBB10_3 ; RV32ZBB-NEXT: .LBB10_2: -; RV32ZBB-NEXT: neg a5, a5 -; RV32ZBB-NEXT: sub a1, a1, a3 +; RV32ZBB-NEXT: sltu a0, a0, a2 +; RV32ZBB-NEXT: .LBB10_3: +; RV32ZBB-NEXT: neg a1, a0 +; RV32ZBB-NEXT: xor a2, a2, a1 +; RV32ZBB-NEXT: sltu a4, a2, a1 +; RV32ZBB-NEXT: xor a1, a3, a1 +; RV32ZBB-NEXT: add a1, a1, a0 ; RV32ZBB-NEXT: sub a1, a1, a4 -; RV32ZBB-NEXT: sub a0, a0, a2 -; RV32ZBB-NEXT: bgez a5, .LBB10_4 -; RV32ZBB-NEXT: # %bb.3: -; RV32ZBB-NEXT: snez a2, a0 -; RV32ZBB-NEXT: add a1, a1, a2 -; RV32ZBB-NEXT: neg a1, a1 -; RV32ZBB-NEXT: neg a0, a0 -; RV32ZBB-NEXT: .LBB10_4: +; RV32ZBB-NEXT: add a0, a2, a0 ; RV32ZBB-NEXT: snez a2, a0 ; RV32ZBB-NEXT: add a1, a1, a2 ; RV32ZBB-NEXT: neg a1, a1 @@ -584,14 +608,9 @@ define i64 @abd_ext_i64_undef(i64 %a, i64 %b) nounwind { ; ; RV64ZBB-LABEL: abd_ext_i64_undef: ; RV64ZBB: # %bb.0: -; RV64ZBB-NEXT: sltu a2, a0, a1 -; RV64ZBB-NEXT: neg a2, a2 -; RV64ZBB-NEXT: sub a0, a0, a1 -; RV64ZBB-NEXT: bgez a2, .LBB10_2 -; RV64ZBB-NEXT: # %bb.1: -; RV64ZBB-NEXT: neg a0, a0 -; RV64ZBB-NEXT: .LBB10_2: -; RV64ZBB-NEXT: neg a0, a0 +; RV64ZBB-NEXT: maxu a2, a0, a1 +; RV64ZBB-NEXT: minu a0, a0, a1 +; RV64ZBB-NEXT: sub a0, a0, a2 ; RV64ZBB-NEXT: ret %aext = zext i64 %a to i128 %bext = zext i64 %b to i128 @@ -605,98 +624,110 @@ define i64 @abd_ext_i64_undef(i64 %a, i64 %b) nounwind { define i128 @abd_ext_i128(i128 %a, i128 %b) nounwind { ; RV32I-LABEL: abd_ext_i128: ; RV32I: # %bb.0: -; RV32I-NEXT: lw a3, 0(a2) -; RV32I-NEXT: lw a4, 0(a1) -; RV32I-NEXT: lw a5, 4(a2) -; RV32I-NEXT: lw a6, 8(a2) -; RV32I-NEXT: lw a7, 8(a1) -; RV32I-NEXT: lw a2, 12(a2) -; RV32I-NEXT: lw t0, 12(a1) -; RV32I-NEXT: lw a1, 4(a1) -; RV32I-NEXT: sltu t1, a7, a6 -; RV32I-NEXT: mv t4, t1 -; RV32I-NEXT: beq t0, a2, .LBB11_2 +; RV32I-NEXT: lw a5, 0(a2) +; RV32I-NEXT: lw a3, 0(a1) +; RV32I-NEXT: lw t2, 12(a2) +; RV32I-NEXT: lw t1, 8(a2) +; RV32I-NEXT: lw a4, 8(a1) +; RV32I-NEXT: lw a6, 12(a1) +; RV32I-NEXT: lw a7, 4(a2) +; RV32I-NEXT: lw t0, 4(a1) +; RV32I-NEXT: sltu a1, a4, t1 +; RV32I-NEXT: sub a2, a6, t2 +; RV32I-NEXT: sltu t2, a3, a5 +; RV32I-NEXT: sub a1, a2, a1 +; RV32I-NEXT: mv a2, t2 +; RV32I-NEXT: beq t0, a7, .LBB11_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: sltu t4, t0, a2 +; RV32I-NEXT: sltu a2, t0, a7 ; RV32I-NEXT: .LBB11_2: -; RV32I-NEXT: sltu t2, a4, a3 -; RV32I-NEXT: mv t3, t2 -; RV32I-NEXT: beq a1, a5, .LBB11_4 +; RV32I-NEXT: sub t1, a4, t1 +; RV32I-NEXT: sltu t3, t1, a2 +; RV32I-NEXT: sub a1, a1, t3 +; RV32I-NEXT: sub a2, t1, a2 +; RV32I-NEXT: beq a1, a6, .LBB11_4 ; RV32I-NEXT: # %bb.3: -; RV32I-NEXT: sltu t3, a1, a5 +; RV32I-NEXT: sltu t1, a6, a1 +; RV32I-NEXT: j .LBB11_5 ; RV32I-NEXT: .LBB11_4: -; RV32I-NEXT: xor t5, t0, a2 -; RV32I-NEXT: xor t6, a7, a6 -; RV32I-NEXT: or t6, t6, t5 -; RV32I-NEXT: mv t5, t3 -; RV32I-NEXT: beqz t6, .LBB11_6 -; RV32I-NEXT: # %bb.5: -; RV32I-NEXT: mv t5, t4 -; RV32I-NEXT: .LBB11_6: -; RV32I-NEXT: neg t4, t5 -; RV32I-NEXT: sub a2, t0, a2 -; RV32I-NEXT: sub a2, a2, t1 -; RV32I-NEXT: sub a6, a7, a6 -; RV32I-NEXT: sltu a7, a6, t3 -; RV32I-NEXT: sub a2, a2, a7 -; RV32I-NEXT: sub a6, a6, t3 -; RV32I-NEXT: sub a1, a1, a5 -; RV32I-NEXT: sub a1, a1, t2 -; RV32I-NEXT: sub a4, a4, a3 -; RV32I-NEXT: bgez t4, .LBB11_8 -; RV32I-NEXT: # %bb.7: -; RV32I-NEXT: snez a3, a1 -; RV32I-NEXT: snez a5, a4 -; RV32I-NEXT: or a3, a5, a3 -; RV32I-NEXT: neg a7, a6 -; RV32I-NEXT: sltu t0, a7, a3 -; RV32I-NEXT: snez a6, a6 -; RV32I-NEXT: add a2, a2, a6 -; RV32I-NEXT: neg a2, a2 -; RV32I-NEXT: sub a2, a2, t0 -; RV32I-NEXT: sub a6, a7, a3 -; RV32I-NEXT: neg a4, a4 -; RV32I-NEXT: add a1, a1, a5 -; RV32I-NEXT: neg a1, a1 +; RV32I-NEXT: sltu t1, a4, a2 +; RV32I-NEXT: .LBB11_5: +; RV32I-NEXT: sub a7, t0, a7 +; RV32I-NEXT: sub a7, a7, t2 +; RV32I-NEXT: sub a5, a3, a5 +; RV32I-NEXT: beq a7, t0, .LBB11_7 +; RV32I-NEXT: # %bb.6: +; RV32I-NEXT: sltu a3, t0, a7 +; RV32I-NEXT: j .LBB11_8 +; RV32I-NEXT: .LBB11_7: +; RV32I-NEXT: sltu a3, a3, a5 ; RV32I-NEXT: .LBB11_8: -; RV32I-NEXT: snez a3, a1 -; RV32I-NEXT: snez a5, a4 -; RV32I-NEXT: or a3, a5, a3 -; RV32I-NEXT: neg a7, a6 -; RV32I-NEXT: sltu t0, a7, a3 -; RV32I-NEXT: snez a6, a6 -; RV32I-NEXT: add a2, a2, a6 -; RV32I-NEXT: neg a2, a2 -; RV32I-NEXT: sub a2, a2, t0 -; RV32I-NEXT: sub a3, a7, a3 -; RV32I-NEXT: add a1, a1, a5 +; RV32I-NEXT: xor a6, a1, a6 +; RV32I-NEXT: xor a4, a2, a4 +; RV32I-NEXT: or a4, a4, a6 +; RV32I-NEXT: beqz a4, .LBB11_10 +; RV32I-NEXT: # %bb.9: +; RV32I-NEXT: mv a3, t1 +; RV32I-NEXT: .LBB11_10: +; RV32I-NEXT: neg t0, a3 +; RV32I-NEXT: xor a5, a5, t0 +; RV32I-NEXT: sltu t2, a5, t0 +; RV32I-NEXT: xor t3, a7, t0 +; RV32I-NEXT: add a4, t3, a3 +; RV32I-NEXT: sub a4, a4, t2 +; RV32I-NEXT: snez t1, a4 +; RV32I-NEXT: add a5, a5, a3 +; RV32I-NEXT: snez a6, a5 +; RV32I-NEXT: or t1, a6, t1 +; RV32I-NEXT: beqz a7, .LBB11_12 +; RV32I-NEXT: # %bb.11: +; RV32I-NEXT: sltu t2, t3, t0 +; RV32I-NEXT: .LBB11_12: +; RV32I-NEXT: xor a2, a2, t0 +; RV32I-NEXT: add a7, a2, a3 +; RV32I-NEXT: sub t3, a7, t2 +; RV32I-NEXT: neg t4, t3 +; RV32I-NEXT: sltu t5, t4, t1 +; RV32I-NEXT: sltu a2, a2, t0 +; RV32I-NEXT: xor a1, a1, t0 +; RV32I-NEXT: add a1, a1, a3 +; RV32I-NEXT: sub a1, a1, a2 +; RV32I-NEXT: sltu a2, a7, t2 +; RV32I-NEXT: sub a1, a1, a2 +; RV32I-NEXT: snez a2, t3 +; RV32I-NEXT: add a1, a1, a2 ; RV32I-NEXT: neg a1, a1 -; RV32I-NEXT: neg a4, a4 +; RV32I-NEXT: sub a1, a1, t5 +; RV32I-NEXT: sub a2, t4, t1 +; RV32I-NEXT: add a4, a4, a6 +; RV32I-NEXT: neg a3, a4 +; RV32I-NEXT: neg a4, a5 ; RV32I-NEXT: sw a4, 0(a0) -; RV32I-NEXT: sw a1, 4(a0) -; RV32I-NEXT: sw a3, 8(a0) -; RV32I-NEXT: sw a2, 12(a0) +; RV32I-NEXT: sw a3, 4(a0) +; RV32I-NEXT: sw a2, 8(a0) +; RV32I-NEXT: sw a1, 12(a0) ; RV32I-NEXT: ret ; ; RV64I-LABEL: abd_ext_i128: ; RV64I: # %bb.0: ; RV64I-NEXT: sltu a4, a0, a2 -; RV64I-NEXT: mv a5, a4 -; RV64I-NEXT: beq a1, a3, .LBB11_2 +; RV64I-NEXT: sub a3, a1, a3 +; RV64I-NEXT: sub a3, a3, a4 +; RV64I-NEXT: sub a2, a0, a2 +; RV64I-NEXT: beq a3, a1, .LBB11_2 ; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: sltu a5, a1, a3 +; RV64I-NEXT: sltu a0, a1, a3 +; RV64I-NEXT: j .LBB11_3 ; RV64I-NEXT: .LBB11_2: -; RV64I-NEXT: neg a5, a5 -; RV64I-NEXT: sub a1, a1, a3 +; RV64I-NEXT: sltu a0, a0, a2 +; RV64I-NEXT: .LBB11_3: +; RV64I-NEXT: neg a1, a0 +; RV64I-NEXT: xor a2, a2, a1 +; RV64I-NEXT: sltu a4, a2, a1 +; RV64I-NEXT: xor a1, a3, a1 +; RV64I-NEXT: add a1, a1, a0 ; RV64I-NEXT: sub a1, a1, a4 -; RV64I-NEXT: sub a0, a0, a2 -; RV64I-NEXT: bgez a5, .LBB11_4 -; RV64I-NEXT: # %bb.3: -; RV64I-NEXT: snez a2, a0 -; RV64I-NEXT: add a1, a1, a2 -; RV64I-NEXT: neg a1, a1 -; RV64I-NEXT: neg a0, a0 -; RV64I-NEXT: .LBB11_4: +; RV64I-NEXT: add a0, a2, a0 ; RV64I-NEXT: snez a2, a0 ; RV64I-NEXT: add a1, a1, a2 ; RV64I-NEXT: neg a1, a1 @@ -705,98 +736,110 @@ define i128 @abd_ext_i128(i128 %a, i128 %b) nounwind { ; ; RV32ZBB-LABEL: abd_ext_i128: ; RV32ZBB: # %bb.0: -; RV32ZBB-NEXT: lw a3, 0(a2) -; RV32ZBB-NEXT: lw a4, 0(a1) -; RV32ZBB-NEXT: lw a5, 4(a2) -; RV32ZBB-NEXT: lw a6, 8(a2) -; RV32ZBB-NEXT: lw a7, 8(a1) -; RV32ZBB-NEXT: lw a2, 12(a2) -; RV32ZBB-NEXT: lw t0, 12(a1) -; RV32ZBB-NEXT: lw a1, 4(a1) -; RV32ZBB-NEXT: sltu t1, a7, a6 -; RV32ZBB-NEXT: mv t4, t1 -; RV32ZBB-NEXT: beq t0, a2, .LBB11_2 +; RV32ZBB-NEXT: lw a5, 0(a2) +; RV32ZBB-NEXT: lw a3, 0(a1) +; RV32ZBB-NEXT: lw t2, 12(a2) +; RV32ZBB-NEXT: lw t1, 8(a2) +; RV32ZBB-NEXT: lw a4, 8(a1) +; RV32ZBB-NEXT: lw a6, 12(a1) +; RV32ZBB-NEXT: lw a7, 4(a2) +; RV32ZBB-NEXT: lw t0, 4(a1) +; RV32ZBB-NEXT: sltu a1, a4, t1 +; RV32ZBB-NEXT: sub a2, a6, t2 +; RV32ZBB-NEXT: sltu t2, a3, a5 +; RV32ZBB-NEXT: sub a1, a2, a1 +; RV32ZBB-NEXT: mv a2, t2 +; RV32ZBB-NEXT: beq t0, a7, .LBB11_2 ; RV32ZBB-NEXT: # %bb.1: -; RV32ZBB-NEXT: sltu t4, t0, a2 +; RV32ZBB-NEXT: sltu a2, t0, a7 ; RV32ZBB-NEXT: .LBB11_2: -; RV32ZBB-NEXT: sltu t2, a4, a3 -; RV32ZBB-NEXT: mv t3, t2 -; RV32ZBB-NEXT: beq a1, a5, .LBB11_4 +; RV32ZBB-NEXT: sub t1, a4, t1 +; RV32ZBB-NEXT: sltu t3, t1, a2 +; RV32ZBB-NEXT: sub a1, a1, t3 +; RV32ZBB-NEXT: sub a2, t1, a2 +; RV32ZBB-NEXT: beq a1, a6, .LBB11_4 ; RV32ZBB-NEXT: # %bb.3: -; RV32ZBB-NEXT: sltu t3, a1, a5 +; RV32ZBB-NEXT: sltu t1, a6, a1 +; RV32ZBB-NEXT: j .LBB11_5 ; RV32ZBB-NEXT: .LBB11_4: -; RV32ZBB-NEXT: xor t5, t0, a2 -; RV32ZBB-NEXT: xor t6, a7, a6 -; RV32ZBB-NEXT: or t6, t6, t5 -; RV32ZBB-NEXT: mv t5, t3 -; RV32ZBB-NEXT: beqz t6, .LBB11_6 -; RV32ZBB-NEXT: # %bb.5: -; RV32ZBB-NEXT: mv t5, t4 -; RV32ZBB-NEXT: .LBB11_6: -; RV32ZBB-NEXT: neg t4, t5 -; RV32ZBB-NEXT: sub a2, t0, a2 -; RV32ZBB-NEXT: sub a2, a2, t1 -; RV32ZBB-NEXT: sub a6, a7, a6 -; RV32ZBB-NEXT: sltu a7, a6, t3 -; RV32ZBB-NEXT: sub a2, a2, a7 -; RV32ZBB-NEXT: sub a6, a6, t3 -; RV32ZBB-NEXT: sub a1, a1, a5 -; RV32ZBB-NEXT: sub a1, a1, t2 -; RV32ZBB-NEXT: sub a4, a4, a3 -; RV32ZBB-NEXT: bgez t4, .LBB11_8 -; RV32ZBB-NEXT: # %bb.7: -; RV32ZBB-NEXT: snez a3, a1 -; RV32ZBB-NEXT: snez a5, a4 -; RV32ZBB-NEXT: or a3, a5, a3 -; RV32ZBB-NEXT: neg a7, a6 -; RV32ZBB-NEXT: sltu t0, a7, a3 -; RV32ZBB-NEXT: snez a6, a6 -; RV32ZBB-NEXT: add a2, a2, a6 -; RV32ZBB-NEXT: neg a2, a2 -; RV32ZBB-NEXT: sub a2, a2, t0 -; RV32ZBB-NEXT: sub a6, a7, a3 -; RV32ZBB-NEXT: neg a4, a4 -; RV32ZBB-NEXT: add a1, a1, a5 -; RV32ZBB-NEXT: neg a1, a1 +; RV32ZBB-NEXT: sltu t1, a4, a2 +; RV32ZBB-NEXT: .LBB11_5: +; RV32ZBB-NEXT: sub a7, t0, a7 +; RV32ZBB-NEXT: sub a7, a7, t2 +; RV32ZBB-NEXT: sub a5, a3, a5 +; RV32ZBB-NEXT: beq a7, t0, .LBB11_7 +; RV32ZBB-NEXT: # %bb.6: +; RV32ZBB-NEXT: sltu a3, t0, a7 +; RV32ZBB-NEXT: j .LBB11_8 +; RV32ZBB-NEXT: .LBB11_7: +; RV32ZBB-NEXT: sltu a3, a3, a5 ; RV32ZBB-NEXT: .LBB11_8: -; RV32ZBB-NEXT: snez a3, a1 -; RV32ZBB-NEXT: snez a5, a4 -; RV32ZBB-NEXT: or a3, a5, a3 -; RV32ZBB-NEXT: neg a7, a6 -; RV32ZBB-NEXT: sltu t0, a7, a3 -; RV32ZBB-NEXT: snez a6, a6 -; RV32ZBB-NEXT: add a2, a2, a6 -; RV32ZBB-NEXT: neg a2, a2 -; RV32ZBB-NEXT: sub a2, a2, t0 -; RV32ZBB-NEXT: sub a3, a7, a3 -; RV32ZBB-NEXT: add a1, a1, a5 +; RV32ZBB-NEXT: xor a6, a1, a6 +; RV32ZBB-NEXT: xor a4, a2, a4 +; RV32ZBB-NEXT: or a4, a4, a6 +; RV32ZBB-NEXT: beqz a4, .LBB11_10 +; RV32ZBB-NEXT: # %bb.9: +; RV32ZBB-NEXT: mv a3, t1 +; RV32ZBB-NEXT: .LBB11_10: +; RV32ZBB-NEXT: neg t0, a3 +; RV32ZBB-NEXT: xor a5, a5, t0 +; RV32ZBB-NEXT: sltu t2, a5, t0 +; RV32ZBB-NEXT: xor t3, a7, t0 +; RV32ZBB-NEXT: add a4, t3, a3 +; RV32ZBB-NEXT: sub a4, a4, t2 +; RV32ZBB-NEXT: snez t1, a4 +; RV32ZBB-NEXT: add a5, a5, a3 +; RV32ZBB-NEXT: snez a6, a5 +; RV32ZBB-NEXT: or t1, a6, t1 +; RV32ZBB-NEXT: beqz a7, .LBB11_12 +; RV32ZBB-NEXT: # %bb.11: +; RV32ZBB-NEXT: sltu t2, t3, t0 +; RV32ZBB-NEXT: .LBB11_12: +; RV32ZBB-NEXT: xor a2, a2, t0 +; RV32ZBB-NEXT: add a7, a2, a3 +; RV32ZBB-NEXT: sub t3, a7, t2 +; RV32ZBB-NEXT: neg t4, t3 +; RV32ZBB-NEXT: sltu t5, t4, t1 +; RV32ZBB-NEXT: sltu a2, a2, t0 +; RV32ZBB-NEXT: xor a1, a1, t0 +; RV32ZBB-NEXT: add a1, a1, a3 +; RV32ZBB-NEXT: sub a1, a1, a2 +; RV32ZBB-NEXT: sltu a2, a7, t2 +; RV32ZBB-NEXT: sub a1, a1, a2 +; RV32ZBB-NEXT: snez a2, t3 +; RV32ZBB-NEXT: add a1, a1, a2 ; RV32ZBB-NEXT: neg a1, a1 -; RV32ZBB-NEXT: neg a4, a4 +; RV32ZBB-NEXT: sub a1, a1, t5 +; RV32ZBB-NEXT: sub a2, t4, t1 +; RV32ZBB-NEXT: add a4, a4, a6 +; RV32ZBB-NEXT: neg a3, a4 +; RV32ZBB-NEXT: neg a4, a5 ; RV32ZBB-NEXT: sw a4, 0(a0) -; RV32ZBB-NEXT: sw a1, 4(a0) -; RV32ZBB-NEXT: sw a3, 8(a0) -; RV32ZBB-NEXT: sw a2, 12(a0) +; RV32ZBB-NEXT: sw a3, 4(a0) +; RV32ZBB-NEXT: sw a2, 8(a0) +; RV32ZBB-NEXT: sw a1, 12(a0) ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: abd_ext_i128: ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: sltu a4, a0, a2 -; RV64ZBB-NEXT: mv a5, a4 -; RV64ZBB-NEXT: beq a1, a3, .LBB11_2 +; RV64ZBB-NEXT: sub a3, a1, a3 +; RV64ZBB-NEXT: sub a3, a3, a4 +; RV64ZBB-NEXT: sub a2, a0, a2 +; RV64ZBB-NEXT: beq a3, a1, .LBB11_2 ; RV64ZBB-NEXT: # %bb.1: -; RV64ZBB-NEXT: sltu a5, a1, a3 +; RV64ZBB-NEXT: sltu a0, a1, a3 +; RV64ZBB-NEXT: j .LBB11_3 ; RV64ZBB-NEXT: .LBB11_2: -; RV64ZBB-NEXT: neg a5, a5 -; RV64ZBB-NEXT: sub a1, a1, a3 +; RV64ZBB-NEXT: sltu a0, a0, a2 +; RV64ZBB-NEXT: .LBB11_3: +; RV64ZBB-NEXT: neg a1, a0 +; RV64ZBB-NEXT: xor a2, a2, a1 +; RV64ZBB-NEXT: sltu a4, a2, a1 +; RV64ZBB-NEXT: xor a1, a3, a1 +; RV64ZBB-NEXT: add a1, a1, a0 ; RV64ZBB-NEXT: sub a1, a1, a4 -; RV64ZBB-NEXT: sub a0, a0, a2 -; RV64ZBB-NEXT: bgez a5, .LBB11_4 -; RV64ZBB-NEXT: # %bb.3: -; RV64ZBB-NEXT: snez a2, a0 -; RV64ZBB-NEXT: add a1, a1, a2 -; RV64ZBB-NEXT: neg a1, a1 -; RV64ZBB-NEXT: neg a0, a0 -; RV64ZBB-NEXT: .LBB11_4: +; RV64ZBB-NEXT: add a0, a2, a0 ; RV64ZBB-NEXT: snez a2, a0 ; RV64ZBB-NEXT: add a1, a1, a2 ; RV64ZBB-NEXT: neg a1, a1 @@ -814,98 +857,110 @@ define i128 @abd_ext_i128(i128 %a, i128 %b) nounwind { define i128 @abd_ext_i128_undef(i128 %a, i128 %b) nounwind { ; RV32I-LABEL: abd_ext_i128_undef: ; RV32I: # %bb.0: -; RV32I-NEXT: lw a3, 0(a2) -; RV32I-NEXT: lw a4, 0(a1) -; RV32I-NEXT: lw a5, 4(a2) -; RV32I-NEXT: lw a6, 8(a2) -; RV32I-NEXT: lw a7, 8(a1) -; RV32I-NEXT: lw a2, 12(a2) -; RV32I-NEXT: lw t0, 12(a1) -; RV32I-NEXT: lw a1, 4(a1) -; RV32I-NEXT: sltu t1, a7, a6 -; RV32I-NEXT: mv t4, t1 -; RV32I-NEXT: beq t0, a2, .LBB12_2 +; RV32I-NEXT: lw a5, 0(a2) +; RV32I-NEXT: lw a3, 0(a1) +; RV32I-NEXT: lw t2, 12(a2) +; RV32I-NEXT: lw t1, 8(a2) +; RV32I-NEXT: lw a4, 8(a1) +; RV32I-NEXT: lw a6, 12(a1) +; RV32I-NEXT: lw a7, 4(a2) +; RV32I-NEXT: lw t0, 4(a1) +; RV32I-NEXT: sltu a1, a4, t1 +; RV32I-NEXT: sub a2, a6, t2 +; RV32I-NEXT: sltu t2, a3, a5 +; RV32I-NEXT: sub a1, a2, a1 +; RV32I-NEXT: mv a2, t2 +; RV32I-NEXT: beq t0, a7, .LBB12_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: sltu t4, t0, a2 +; RV32I-NEXT: sltu a2, t0, a7 ; RV32I-NEXT: .LBB12_2: -; RV32I-NEXT: sltu t2, a4, a3 -; RV32I-NEXT: mv t3, t2 -; RV32I-NEXT: beq a1, a5, .LBB12_4 +; RV32I-NEXT: sub t1, a4, t1 +; RV32I-NEXT: sltu t3, t1, a2 +; RV32I-NEXT: sub a1, a1, t3 +; RV32I-NEXT: sub a2, t1, a2 +; RV32I-NEXT: beq a1, a6, .LBB12_4 ; RV32I-NEXT: # %bb.3: -; RV32I-NEXT: sltu t3, a1, a5 +; RV32I-NEXT: sltu t1, a6, a1 +; RV32I-NEXT: j .LBB12_5 ; RV32I-NEXT: .LBB12_4: -; RV32I-NEXT: xor t5, t0, a2 -; RV32I-NEXT: xor t6, a7, a6 -; RV32I-NEXT: or t6, t6, t5 -; RV32I-NEXT: mv t5, t3 -; RV32I-NEXT: beqz t6, .LBB12_6 -; RV32I-NEXT: # %bb.5: -; RV32I-NEXT: mv t5, t4 -; RV32I-NEXT: .LBB12_6: -; RV32I-NEXT: neg t4, t5 -; RV32I-NEXT: sub a2, t0, a2 -; RV32I-NEXT: sub a2, a2, t1 -; RV32I-NEXT: sub a6, a7, a6 -; RV32I-NEXT: sltu a7, a6, t3 -; RV32I-NEXT: sub a2, a2, a7 -; RV32I-NEXT: sub a6, a6, t3 -; RV32I-NEXT: sub a1, a1, a5 -; RV32I-NEXT: sub a1, a1, t2 -; RV32I-NEXT: sub a4, a4, a3 -; RV32I-NEXT: bgez t4, .LBB12_8 -; RV32I-NEXT: # %bb.7: -; RV32I-NEXT: snez a3, a1 -; RV32I-NEXT: snez a5, a4 -; RV32I-NEXT: or a3, a5, a3 -; RV32I-NEXT: neg a7, a6 -; RV32I-NEXT: sltu t0, a7, a3 -; RV32I-NEXT: snez a6, a6 -; RV32I-NEXT: add a2, a2, a6 -; RV32I-NEXT: neg a2, a2 -; RV32I-NEXT: sub a2, a2, t0 -; RV32I-NEXT: sub a6, a7, a3 -; RV32I-NEXT: neg a4, a4 -; RV32I-NEXT: add a1, a1, a5 -; RV32I-NEXT: neg a1, a1 +; RV32I-NEXT: sltu t1, a4, a2 +; RV32I-NEXT: .LBB12_5: +; RV32I-NEXT: sub a7, t0, a7 +; RV32I-NEXT: sub a7, a7, t2 +; RV32I-NEXT: sub a5, a3, a5 +; RV32I-NEXT: beq a7, t0, .LBB12_7 +; RV32I-NEXT: # %bb.6: +; RV32I-NEXT: sltu a3, t0, a7 +; RV32I-NEXT: j .LBB12_8 +; RV32I-NEXT: .LBB12_7: +; RV32I-NEXT: sltu a3, a3, a5 ; RV32I-NEXT: .LBB12_8: -; RV32I-NEXT: snez a3, a1 -; RV32I-NEXT: snez a5, a4 -; RV32I-NEXT: or a3, a5, a3 -; RV32I-NEXT: neg a7, a6 -; RV32I-NEXT: sltu t0, a7, a3 -; RV32I-NEXT: snez a6, a6 -; RV32I-NEXT: add a2, a2, a6 -; RV32I-NEXT: neg a2, a2 -; RV32I-NEXT: sub a2, a2, t0 -; RV32I-NEXT: sub a3, a7, a3 -; RV32I-NEXT: add a1, a1, a5 +; RV32I-NEXT: xor a6, a1, a6 +; RV32I-NEXT: xor a4, a2, a4 +; RV32I-NEXT: or a4, a4, a6 +; RV32I-NEXT: beqz a4, .LBB12_10 +; RV32I-NEXT: # %bb.9: +; RV32I-NEXT: mv a3, t1 +; RV32I-NEXT: .LBB12_10: +; RV32I-NEXT: neg t0, a3 +; RV32I-NEXT: xor a5, a5, t0 +; RV32I-NEXT: sltu t2, a5, t0 +; RV32I-NEXT: xor t3, a7, t0 +; RV32I-NEXT: add a4, t3, a3 +; RV32I-NEXT: sub a4, a4, t2 +; RV32I-NEXT: snez t1, a4 +; RV32I-NEXT: add a5, a5, a3 +; RV32I-NEXT: snez a6, a5 +; RV32I-NEXT: or t1, a6, t1 +; RV32I-NEXT: beqz a7, .LBB12_12 +; RV32I-NEXT: # %bb.11: +; RV32I-NEXT: sltu t2, t3, t0 +; RV32I-NEXT: .LBB12_12: +; RV32I-NEXT: xor a2, a2, t0 +; RV32I-NEXT: add a7, a2, a3 +; RV32I-NEXT: sub t3, a7, t2 +; RV32I-NEXT: neg t4, t3 +; RV32I-NEXT: sltu t5, t4, t1 +; RV32I-NEXT: sltu a2, a2, t0 +; RV32I-NEXT: xor a1, a1, t0 +; RV32I-NEXT: add a1, a1, a3 +; RV32I-NEXT: sub a1, a1, a2 +; RV32I-NEXT: sltu a2, a7, t2 +; RV32I-NEXT: sub a1, a1, a2 +; RV32I-NEXT: snez a2, t3 +; RV32I-NEXT: add a1, a1, a2 ; RV32I-NEXT: neg a1, a1 -; RV32I-NEXT: neg a4, a4 +; RV32I-NEXT: sub a1, a1, t5 +; RV32I-NEXT: sub a2, t4, t1 +; RV32I-NEXT: add a4, a4, a6 +; RV32I-NEXT: neg a3, a4 +; RV32I-NEXT: neg a4, a5 ; RV32I-NEXT: sw a4, 0(a0) -; RV32I-NEXT: sw a1, 4(a0) -; RV32I-NEXT: sw a3, 8(a0) -; RV32I-NEXT: sw a2, 12(a0) +; RV32I-NEXT: sw a3, 4(a0) +; RV32I-NEXT: sw a2, 8(a0) +; RV32I-NEXT: sw a1, 12(a0) ; RV32I-NEXT: ret ; ; RV64I-LABEL: abd_ext_i128_undef: ; RV64I: # %bb.0: ; RV64I-NEXT: sltu a4, a0, a2 -; RV64I-NEXT: mv a5, a4 -; RV64I-NEXT: beq a1, a3, .LBB12_2 +; RV64I-NEXT: sub a3, a1, a3 +; RV64I-NEXT: sub a3, a3, a4 +; RV64I-NEXT: sub a2, a0, a2 +; RV64I-NEXT: beq a3, a1, .LBB12_2 ; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: sltu a5, a1, a3 +; RV64I-NEXT: sltu a0, a1, a3 +; RV64I-NEXT: j .LBB12_3 ; RV64I-NEXT: .LBB12_2: -; RV64I-NEXT: neg a5, a5 -; RV64I-NEXT: sub a1, a1, a3 +; RV64I-NEXT: sltu a0, a0, a2 +; RV64I-NEXT: .LBB12_3: +; RV64I-NEXT: neg a1, a0 +; RV64I-NEXT: xor a2, a2, a1 +; RV64I-NEXT: sltu a4, a2, a1 +; RV64I-NEXT: xor a1, a3, a1 +; RV64I-NEXT: add a1, a1, a0 ; RV64I-NEXT: sub a1, a1, a4 -; RV64I-NEXT: sub a0, a0, a2 -; RV64I-NEXT: bgez a5, .LBB12_4 -; RV64I-NEXT: # %bb.3: -; RV64I-NEXT: snez a2, a0 -; RV64I-NEXT: add a1, a1, a2 -; RV64I-NEXT: neg a1, a1 -; RV64I-NEXT: neg a0, a0 -; RV64I-NEXT: .LBB12_4: +; RV64I-NEXT: add a0, a2, a0 ; RV64I-NEXT: snez a2, a0 ; RV64I-NEXT: add a1, a1, a2 ; RV64I-NEXT: neg a1, a1 @@ -914,98 +969,110 @@ define i128 @abd_ext_i128_undef(i128 %a, i128 %b) nounwind { ; ; RV32ZBB-LABEL: abd_ext_i128_undef: ; RV32ZBB: # %bb.0: -; RV32ZBB-NEXT: lw a3, 0(a2) -; RV32ZBB-NEXT: lw a4, 0(a1) -; RV32ZBB-NEXT: lw a5, 4(a2) -; RV32ZBB-NEXT: lw a6, 8(a2) -; RV32ZBB-NEXT: lw a7, 8(a1) -; RV32ZBB-NEXT: lw a2, 12(a2) -; RV32ZBB-NEXT: lw t0, 12(a1) -; RV32ZBB-NEXT: lw a1, 4(a1) -; RV32ZBB-NEXT: sltu t1, a7, a6 -; RV32ZBB-NEXT: mv t4, t1 -; RV32ZBB-NEXT: beq t0, a2, .LBB12_2 +; RV32ZBB-NEXT: lw a5, 0(a2) +; RV32ZBB-NEXT: lw a3, 0(a1) +; RV32ZBB-NEXT: lw t2, 12(a2) +; RV32ZBB-NEXT: lw t1, 8(a2) +; RV32ZBB-NEXT: lw a4, 8(a1) +; RV32ZBB-NEXT: lw a6, 12(a1) +; RV32ZBB-NEXT: lw a7, 4(a2) +; RV32ZBB-NEXT: lw t0, 4(a1) +; RV32ZBB-NEXT: sltu a1, a4, t1 +; RV32ZBB-NEXT: sub a2, a6, t2 +; RV32ZBB-NEXT: sltu t2, a3, a5 +; RV32ZBB-NEXT: sub a1, a2, a1 +; RV32ZBB-NEXT: mv a2, t2 +; RV32ZBB-NEXT: beq t0, a7, .LBB12_2 ; RV32ZBB-NEXT: # %bb.1: -; RV32ZBB-NEXT: sltu t4, t0, a2 +; RV32ZBB-NEXT: sltu a2, t0, a7 ; RV32ZBB-NEXT: .LBB12_2: -; RV32ZBB-NEXT: sltu t2, a4, a3 -; RV32ZBB-NEXT: mv t3, t2 -; RV32ZBB-NEXT: beq a1, a5, .LBB12_4 +; RV32ZBB-NEXT: sub t1, a4, t1 +; RV32ZBB-NEXT: sltu t3, t1, a2 +; RV32ZBB-NEXT: sub a1, a1, t3 +; RV32ZBB-NEXT: sub a2, t1, a2 +; RV32ZBB-NEXT: beq a1, a6, .LBB12_4 ; RV32ZBB-NEXT: # %bb.3: -; RV32ZBB-NEXT: sltu t3, a1, a5 +; RV32ZBB-NEXT: sltu t1, a6, a1 +; RV32ZBB-NEXT: j .LBB12_5 ; RV32ZBB-NEXT: .LBB12_4: -; RV32ZBB-NEXT: xor t5, t0, a2 -; RV32ZBB-NEXT: xor t6, a7, a6 -; RV32ZBB-NEXT: or t6, t6, t5 -; RV32ZBB-NEXT: mv t5, t3 -; RV32ZBB-NEXT: beqz t6, .LBB12_6 -; RV32ZBB-NEXT: # %bb.5: -; RV32ZBB-NEXT: mv t5, t4 -; RV32ZBB-NEXT: .LBB12_6: -; RV32ZBB-NEXT: neg t4, t5 -; RV32ZBB-NEXT: sub a2, t0, a2 -; RV32ZBB-NEXT: sub a2, a2, t1 -; RV32ZBB-NEXT: sub a6, a7, a6 -; RV32ZBB-NEXT: sltu a7, a6, t3 -; RV32ZBB-NEXT: sub a2, a2, a7 -; RV32ZBB-NEXT: sub a6, a6, t3 -; RV32ZBB-NEXT: sub a1, a1, a5 -; RV32ZBB-NEXT: sub a1, a1, t2 -; RV32ZBB-NEXT: sub a4, a4, a3 -; RV32ZBB-NEXT: bgez t4, .LBB12_8 -; RV32ZBB-NEXT: # %bb.7: -; RV32ZBB-NEXT: snez a3, a1 -; RV32ZBB-NEXT: snez a5, a4 -; RV32ZBB-NEXT: or a3, a5, a3 -; RV32ZBB-NEXT: neg a7, a6 -; RV32ZBB-NEXT: sltu t0, a7, a3 -; RV32ZBB-NEXT: snez a6, a6 -; RV32ZBB-NEXT: add a2, a2, a6 -; RV32ZBB-NEXT: neg a2, a2 -; RV32ZBB-NEXT: sub a2, a2, t0 -; RV32ZBB-NEXT: sub a6, a7, a3 -; RV32ZBB-NEXT: neg a4, a4 -; RV32ZBB-NEXT: add a1, a1, a5 -; RV32ZBB-NEXT: neg a1, a1 +; RV32ZBB-NEXT: sltu t1, a4, a2 +; RV32ZBB-NEXT: .LBB12_5: +; RV32ZBB-NEXT: sub a7, t0, a7 +; RV32ZBB-NEXT: sub a7, a7, t2 +; RV32ZBB-NEXT: sub a5, a3, a5 +; RV32ZBB-NEXT: beq a7, t0, .LBB12_7 +; RV32ZBB-NEXT: # %bb.6: +; RV32ZBB-NEXT: sltu a3, t0, a7 +; RV32ZBB-NEXT: j .LBB12_8 +; RV32ZBB-NEXT: .LBB12_7: +; RV32ZBB-NEXT: sltu a3, a3, a5 ; RV32ZBB-NEXT: .LBB12_8: -; RV32ZBB-NEXT: snez a3, a1 -; RV32ZBB-NEXT: snez a5, a4 -; RV32ZBB-NEXT: or a3, a5, a3 -; RV32ZBB-NEXT: neg a7, a6 -; RV32ZBB-NEXT: sltu t0, a7, a3 -; RV32ZBB-NEXT: snez a6, a6 -; RV32ZBB-NEXT: add a2, a2, a6 -; RV32ZBB-NEXT: neg a2, a2 -; RV32ZBB-NEXT: sub a2, a2, t0 -; RV32ZBB-NEXT: sub a3, a7, a3 -; RV32ZBB-NEXT: add a1, a1, a5 +; RV32ZBB-NEXT: xor a6, a1, a6 +; RV32ZBB-NEXT: xor a4, a2, a4 +; RV32ZBB-NEXT: or a4, a4, a6 +; RV32ZBB-NEXT: beqz a4, .LBB12_10 +; RV32ZBB-NEXT: # %bb.9: +; RV32ZBB-NEXT: mv a3, t1 +; RV32ZBB-NEXT: .LBB12_10: +; RV32ZBB-NEXT: neg t0, a3 +; RV32ZBB-NEXT: xor a5, a5, t0 +; RV32ZBB-NEXT: sltu t2, a5, t0 +; RV32ZBB-NEXT: xor t3, a7, t0 +; RV32ZBB-NEXT: add a4, t3, a3 +; RV32ZBB-NEXT: sub a4, a4, t2 +; RV32ZBB-NEXT: snez t1, a4 +; RV32ZBB-NEXT: add a5, a5, a3 +; RV32ZBB-NEXT: snez a6, a5 +; RV32ZBB-NEXT: or t1, a6, t1 +; RV32ZBB-NEXT: beqz a7, .LBB12_12 +; RV32ZBB-NEXT: # %bb.11: +; RV32ZBB-NEXT: sltu t2, t3, t0 +; RV32ZBB-NEXT: .LBB12_12: +; RV32ZBB-NEXT: xor a2, a2, t0 +; RV32ZBB-NEXT: add a7, a2, a3 +; RV32ZBB-NEXT: sub t3, a7, t2 +; RV32ZBB-NEXT: neg t4, t3 +; RV32ZBB-NEXT: sltu t5, t4, t1 +; RV32ZBB-NEXT: sltu a2, a2, t0 +; RV32ZBB-NEXT: xor a1, a1, t0 +; RV32ZBB-NEXT: add a1, a1, a3 +; RV32ZBB-NEXT: sub a1, a1, a2 +; RV32ZBB-NEXT: sltu a2, a7, t2 +; RV32ZBB-NEXT: sub a1, a1, a2 +; RV32ZBB-NEXT: snez a2, t3 +; RV32ZBB-NEXT: add a1, a1, a2 ; RV32ZBB-NEXT: neg a1, a1 -; RV32ZBB-NEXT: neg a4, a4 +; RV32ZBB-NEXT: sub a1, a1, t5 +; RV32ZBB-NEXT: sub a2, t4, t1 +; RV32ZBB-NEXT: add a4, a4, a6 +; RV32ZBB-NEXT: neg a3, a4 +; RV32ZBB-NEXT: neg a4, a5 ; RV32ZBB-NEXT: sw a4, 0(a0) -; RV32ZBB-NEXT: sw a1, 4(a0) -; RV32ZBB-NEXT: sw a3, 8(a0) -; RV32ZBB-NEXT: sw a2, 12(a0) +; RV32ZBB-NEXT: sw a3, 4(a0) +; RV32ZBB-NEXT: sw a2, 8(a0) +; RV32ZBB-NEXT: sw a1, 12(a0) ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: abd_ext_i128_undef: ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: sltu a4, a0, a2 -; RV64ZBB-NEXT: mv a5, a4 -; RV64ZBB-NEXT: beq a1, a3, .LBB12_2 +; RV64ZBB-NEXT: sub a3, a1, a3 +; RV64ZBB-NEXT: sub a3, a3, a4 +; RV64ZBB-NEXT: sub a2, a0, a2 +; RV64ZBB-NEXT: beq a3, a1, .LBB12_2 ; RV64ZBB-NEXT: # %bb.1: -; RV64ZBB-NEXT: sltu a5, a1, a3 +; RV64ZBB-NEXT: sltu a0, a1, a3 +; RV64ZBB-NEXT: j .LBB12_3 ; RV64ZBB-NEXT: .LBB12_2: -; RV64ZBB-NEXT: neg a5, a5 -; RV64ZBB-NEXT: sub a1, a1, a3 +; RV64ZBB-NEXT: sltu a0, a0, a2 +; RV64ZBB-NEXT: .LBB12_3: +; RV64ZBB-NEXT: neg a1, a0 +; RV64ZBB-NEXT: xor a2, a2, a1 +; RV64ZBB-NEXT: sltu a4, a2, a1 +; RV64ZBB-NEXT: xor a1, a3, a1 +; RV64ZBB-NEXT: add a1, a1, a0 ; RV64ZBB-NEXT: sub a1, a1, a4 -; RV64ZBB-NEXT: sub a0, a0, a2 -; RV64ZBB-NEXT: bgez a5, .LBB12_4 -; RV64ZBB-NEXT: # %bb.3: -; RV64ZBB-NEXT: snez a2, a0 -; RV64ZBB-NEXT: add a1, a1, a2 -; RV64ZBB-NEXT: neg a1, a1 -; RV64ZBB-NEXT: neg a0, a0 -; RV64ZBB-NEXT: .LBB12_4: +; RV64ZBB-NEXT: add a0, a2, a0 ; RV64ZBB-NEXT: snez a2, a0 ; RV64ZBB-NEXT: add a1, a1, a2 ; RV64ZBB-NEXT: neg a1, a1 diff --git a/llvm/test/CodeGen/RISCV/abdu.ll b/llvm/test/CodeGen/RISCV/abdu.ll index d4b87366bab6e2..0730b9b350863e 100644 --- a/llvm/test/CodeGen/RISCV/abdu.ll +++ b/llvm/test/CodeGen/RISCV/abdu.ll @@ -11,8 +11,8 @@ define i8 @abd_ext_i8(i8 %a, i8 %b) nounwind { ; RV32I-LABEL: abd_ext_i8: ; RV32I: # %bb.0: -; RV32I-NEXT: andi a0, a0, 255 ; RV32I-NEXT: andi a1, a1, 255 +; RV32I-NEXT: andi a0, a0, 255 ; RV32I-NEXT: sub a0, a0, a1 ; RV32I-NEXT: srai a1, a0, 31 ; RV32I-NEXT: xor a0, a0, a1 @@ -21,8 +21,8 @@ define i8 @abd_ext_i8(i8 %a, i8 %b) nounwind { ; ; RV64I-LABEL: abd_ext_i8: ; RV64I: # %bb.0: -; RV64I-NEXT: andi a0, a0, 255 ; RV64I-NEXT: andi a1, a1, 255 +; RV64I-NEXT: andi a0, a0, 255 ; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: srai a1, a0, 63 ; RV64I-NEXT: xor a0, a0, a1 @@ -31,11 +31,11 @@ define i8 @abd_ext_i8(i8 %a, i8 %b) nounwind { ; ; ZBB-LABEL: abd_ext_i8: ; ZBB: # %bb.0: -; ZBB-NEXT: andi a0, a0, 255 ; ZBB-NEXT: andi a1, a1, 255 -; ZBB-NEXT: sub a0, a0, a1 -; ZBB-NEXT: neg a1, a0 -; ZBB-NEXT: max a0, a0, a1 +; ZBB-NEXT: andi a0, a0, 255 +; ZBB-NEXT: minu a2, a0, a1 +; ZBB-NEXT: maxu a0, a0, a1 +; ZBB-NEXT: sub a0, a0, a2 ; ZBB-NEXT: ret %aext = zext i8 %a to i64 %bext = zext i8 %b to i64 @@ -48,9 +48,9 @@ define i8 @abd_ext_i8(i8 %a, i8 %b) nounwind { define i8 @abd_ext_i8_i16(i8 %a, i16 %b) nounwind { ; RV32I-LABEL: abd_ext_i8_i16: ; RV32I: # %bb.0: -; RV32I-NEXT: andi a0, a0, 255 ; RV32I-NEXT: slli a1, a1, 16 ; RV32I-NEXT: srli a1, a1, 16 +; RV32I-NEXT: andi a0, a0, 255 ; RV32I-NEXT: sub a0, a0, a1 ; RV32I-NEXT: srai a1, a0, 31 ; RV32I-NEXT: xor a0, a0, a1 @@ -59,9 +59,9 @@ define i8 @abd_ext_i8_i16(i8 %a, i16 %b) nounwind { ; ; RV64I-LABEL: abd_ext_i8_i16: ; RV64I: # %bb.0: -; RV64I-NEXT: andi a0, a0, 255 ; RV64I-NEXT: slli a1, a1, 48 ; RV64I-NEXT: srli a1, a1, 48 +; RV64I-NEXT: andi a0, a0, 255 ; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: srai a1, a0, 63 ; RV64I-NEXT: xor a0, a0, a1 @@ -70,11 +70,11 @@ define i8 @abd_ext_i8_i16(i8 %a, i16 %b) nounwind { ; ; ZBB-LABEL: abd_ext_i8_i16: ; ZBB: # %bb.0: -; ZBB-NEXT: andi a0, a0, 255 ; ZBB-NEXT: zext.h a1, a1 -; ZBB-NEXT: sub a0, a0, a1 -; ZBB-NEXT: neg a1, a0 -; ZBB-NEXT: max a0, a0, a1 +; ZBB-NEXT: andi a0, a0, 255 +; ZBB-NEXT: minu a2, a0, a1 +; ZBB-NEXT: maxu a0, a0, a1 +; ZBB-NEXT: sub a0, a0, a2 ; ZBB-NEXT: ret %aext = zext i8 %a to i64 %bext = zext i16 %b to i64 @@ -87,8 +87,8 @@ define i8 @abd_ext_i8_i16(i8 %a, i16 %b) nounwind { define i8 @abd_ext_i8_undef(i8 %a, i8 %b) nounwind { ; RV32I-LABEL: abd_ext_i8_undef: ; RV32I: # %bb.0: -; RV32I-NEXT: andi a0, a0, 255 ; RV32I-NEXT: andi a1, a1, 255 +; RV32I-NEXT: andi a0, a0, 255 ; RV32I-NEXT: sub a0, a0, a1 ; RV32I-NEXT: srai a1, a0, 31 ; RV32I-NEXT: xor a0, a0, a1 @@ -97,8 +97,8 @@ define i8 @abd_ext_i8_undef(i8 %a, i8 %b) nounwind { ; ; RV64I-LABEL: abd_ext_i8_undef: ; RV64I: # %bb.0: -; RV64I-NEXT: andi a0, a0, 255 ; RV64I-NEXT: andi a1, a1, 255 +; RV64I-NEXT: andi a0, a0, 255 ; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: srai a1, a0, 63 ; RV64I-NEXT: xor a0, a0, a1 @@ -107,11 +107,11 @@ define i8 @abd_ext_i8_undef(i8 %a, i8 %b) nounwind { ; ; ZBB-LABEL: abd_ext_i8_undef: ; ZBB: # %bb.0: -; ZBB-NEXT: andi a0, a0, 255 ; ZBB-NEXT: andi a1, a1, 255 -; ZBB-NEXT: sub a0, a0, a1 -; ZBB-NEXT: neg a1, a0 -; ZBB-NEXT: max a0, a0, a1 +; ZBB-NEXT: andi a0, a0, 255 +; ZBB-NEXT: minu a2, a0, a1 +; ZBB-NEXT: maxu a0, a0, a1 +; ZBB-NEXT: sub a0, a0, a2 ; ZBB-NEXT: ret %aext = zext i8 %a to i64 %bext = zext i8 %b to i64 @@ -126,8 +126,8 @@ define i16 @abd_ext_i16(i16 %a, i16 %b) nounwind { ; RV32I: # %bb.0: ; RV32I-NEXT: lui a2, 16 ; RV32I-NEXT: addi a2, a2, -1 -; RV32I-NEXT: and a0, a0, a2 ; RV32I-NEXT: and a1, a1, a2 +; RV32I-NEXT: and a0, a0, a2 ; RV32I-NEXT: sub a0, a0, a1 ; RV32I-NEXT: srai a1, a0, 31 ; RV32I-NEXT: xor a0, a0, a1 @@ -138,8 +138,8 @@ define i16 @abd_ext_i16(i16 %a, i16 %b) nounwind { ; RV64I: # %bb.0: ; RV64I-NEXT: lui a2, 16 ; RV64I-NEXT: addiw a2, a2, -1 -; RV64I-NEXT: and a0, a0, a2 ; RV64I-NEXT: and a1, a1, a2 +; RV64I-NEXT: and a0, a0, a2 ; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: srai a1, a0, 63 ; RV64I-NEXT: xor a0, a0, a1 @@ -148,11 +148,11 @@ define i16 @abd_ext_i16(i16 %a, i16 %b) nounwind { ; ; ZBB-LABEL: abd_ext_i16: ; ZBB: # %bb.0: -; ZBB-NEXT: zext.h a0, a0 ; ZBB-NEXT: zext.h a1, a1 -; ZBB-NEXT: sub a0, a0, a1 -; ZBB-NEXT: neg a1, a0 -; ZBB-NEXT: max a0, a0, a1 +; ZBB-NEXT: zext.h a0, a0 +; ZBB-NEXT: minu a2, a0, a1 +; ZBB-NEXT: maxu a0, a0, a1 +; ZBB-NEXT: sub a0, a0, a2 ; ZBB-NEXT: ret %aext = zext i16 %a to i64 %bext = zext i16 %b to i64 @@ -167,21 +167,20 @@ define i16 @abd_ext_i16_i32(i16 %a, i32 %b) nounwind { ; RV32I: # %bb.0: ; RV32I-NEXT: slli a0, a0, 16 ; RV32I-NEXT: srli a0, a0, 16 -; RV32I-NEXT: sltu a2, a0, a1 -; RV32I-NEXT: neg a2, a2 -; RV32I-NEXT: sub a0, a0, a1 -; RV32I-NEXT: bgez a2, .LBB4_2 +; RV32I-NEXT: bltu a1, a0, .LBB4_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: neg a0, a0 +; RV32I-NEXT: sub a0, a1, a0 +; RV32I-NEXT: ret ; RV32I-NEXT: .LBB4_2: +; RV32I-NEXT: sub a0, a0, a1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: abd_ext_i16_i32: ; RV64I: # %bb.0: -; RV64I-NEXT: slli a0, a0, 48 -; RV64I-NEXT: srli a0, a0, 48 ; RV64I-NEXT: slli a1, a1, 32 ; RV64I-NEXT: srli a1, a1, 32 +; RV64I-NEXT: slli a0, a0, 48 +; RV64I-NEXT: srli a0, a0, 48 ; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: srai a1, a0, 63 ; RV64I-NEXT: xor a0, a0, a1 @@ -191,23 +190,19 @@ define i16 @abd_ext_i16_i32(i16 %a, i32 %b) nounwind { ; RV32ZBB-LABEL: abd_ext_i16_i32: ; RV32ZBB: # %bb.0: ; RV32ZBB-NEXT: zext.h a0, a0 -; RV32ZBB-NEXT: sltu a2, a0, a1 -; RV32ZBB-NEXT: neg a2, a2 -; RV32ZBB-NEXT: sub a0, a0, a1 -; RV32ZBB-NEXT: bgez a2, .LBB4_2 -; RV32ZBB-NEXT: # %bb.1: -; RV32ZBB-NEXT: neg a0, a0 -; RV32ZBB-NEXT: .LBB4_2: +; RV32ZBB-NEXT: minu a2, a0, a1 +; RV32ZBB-NEXT: maxu a0, a0, a1 +; RV32ZBB-NEXT: sub a0, a0, a2 ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: abd_ext_i16_i32: ; RV64ZBB: # %bb.0: -; RV64ZBB-NEXT: zext.h a0, a0 ; RV64ZBB-NEXT: slli a1, a1, 32 ; RV64ZBB-NEXT: srli a1, a1, 32 -; RV64ZBB-NEXT: sub a0, a0, a1 -; RV64ZBB-NEXT: neg a1, a0 -; RV64ZBB-NEXT: max a0, a0, a1 +; RV64ZBB-NEXT: zext.h a0, a0 +; RV64ZBB-NEXT: minu a2, a0, a1 +; RV64ZBB-NEXT: maxu a0, a0, a1 +; RV64ZBB-NEXT: sub a0, a0, a2 ; RV64ZBB-NEXT: ret %aext = zext i16 %a to i64 %bext = zext i32 %b to i64 @@ -222,8 +217,8 @@ define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind { ; RV32I: # %bb.0: ; RV32I-NEXT: lui a2, 16 ; RV32I-NEXT: addi a2, a2, -1 -; RV32I-NEXT: and a0, a0, a2 ; RV32I-NEXT: and a1, a1, a2 +; RV32I-NEXT: and a0, a0, a2 ; RV32I-NEXT: sub a0, a0, a1 ; RV32I-NEXT: srai a1, a0, 31 ; RV32I-NEXT: xor a0, a0, a1 @@ -234,8 +229,8 @@ define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind { ; RV64I: # %bb.0: ; RV64I-NEXT: lui a2, 16 ; RV64I-NEXT: addiw a2, a2, -1 -; RV64I-NEXT: and a0, a0, a2 ; RV64I-NEXT: and a1, a1, a2 +; RV64I-NEXT: and a0, a0, a2 ; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: srai a1, a0, 63 ; RV64I-NEXT: xor a0, a0, a1 @@ -244,11 +239,11 @@ define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind { ; ; ZBB-LABEL: abd_ext_i16_undef: ; ZBB: # %bb.0: -; ZBB-NEXT: zext.h a0, a0 ; ZBB-NEXT: zext.h a1, a1 -; ZBB-NEXT: sub a0, a0, a1 -; ZBB-NEXT: neg a1, a0 -; ZBB-NEXT: max a0, a0, a1 +; ZBB-NEXT: zext.h a0, a0 +; ZBB-NEXT: minu a2, a0, a1 +; ZBB-NEXT: maxu a0, a0, a1 +; ZBB-NEXT: sub a0, a0, a2 ; ZBB-NEXT: ret %aext = zext i16 %a to i64 %bext = zext i16 %b to i64 @@ -261,21 +256,20 @@ define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind { define i32 @abd_ext_i32(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: abd_ext_i32: ; RV32I: # %bb.0: -; RV32I-NEXT: sltu a2, a0, a1 -; RV32I-NEXT: neg a2, a2 -; RV32I-NEXT: sub a0, a0, a1 -; RV32I-NEXT: bgez a2, .LBB6_2 +; RV32I-NEXT: bltu a1, a0, .LBB6_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: neg a0, a0 +; RV32I-NEXT: sub a0, a1, a0 +; RV32I-NEXT: ret ; RV32I-NEXT: .LBB6_2: +; RV32I-NEXT: sub a0, a0, a1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: abd_ext_i32: ; RV64I: # %bb.0: -; RV64I-NEXT: slli a0, a0, 32 -; RV64I-NEXT: srli a0, a0, 32 ; RV64I-NEXT: slli a1, a1, 32 ; RV64I-NEXT: srli a1, a1, 32 +; RV64I-NEXT: slli a0, a0, 32 +; RV64I-NEXT: srli a0, a0, 32 ; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: srai a1, a0, 63 ; RV64I-NEXT: xor a0, a0, a1 @@ -284,24 +278,20 @@ define i32 @abd_ext_i32(i32 %a, i32 %b) nounwind { ; ; RV32ZBB-LABEL: abd_ext_i32: ; RV32ZBB: # %bb.0: -; RV32ZBB-NEXT: sltu a2, a0, a1 -; RV32ZBB-NEXT: neg a2, a2 -; RV32ZBB-NEXT: sub a0, a0, a1 -; RV32ZBB-NEXT: bgez a2, .LBB6_2 -; RV32ZBB-NEXT: # %bb.1: -; RV32ZBB-NEXT: neg a0, a0 -; RV32ZBB-NEXT: .LBB6_2: +; RV32ZBB-NEXT: minu a2, a0, a1 +; RV32ZBB-NEXT: maxu a0, a0, a1 +; RV32ZBB-NEXT: sub a0, a0, a2 ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: abd_ext_i32: ; RV64ZBB: # %bb.0: -; RV64ZBB-NEXT: slli a0, a0, 32 -; RV64ZBB-NEXT: srli a0, a0, 32 ; RV64ZBB-NEXT: slli a1, a1, 32 ; RV64ZBB-NEXT: srli a1, a1, 32 -; RV64ZBB-NEXT: sub a0, a0, a1 -; RV64ZBB-NEXT: neg a1, a0 -; RV64ZBB-NEXT: max a0, a0, a1 +; RV64ZBB-NEXT: slli a0, a0, 32 +; RV64ZBB-NEXT: srli a0, a0, 32 +; RV64ZBB-NEXT: minu a2, a0, a1 +; RV64ZBB-NEXT: maxu a0, a0, a1 +; RV64ZBB-NEXT: sub a0, a0, a2 ; RV64ZBB-NEXT: ret %aext = zext i32 %a to i64 %bext = zext i32 %b to i64 @@ -316,13 +306,12 @@ define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind { ; RV32I: # %bb.0: ; RV32I-NEXT: slli a1, a1, 16 ; RV32I-NEXT: srli a1, a1, 16 -; RV32I-NEXT: sltu a2, a0, a1 -; RV32I-NEXT: neg a2, a2 -; RV32I-NEXT: sub a0, a0, a1 -; RV32I-NEXT: bgez a2, .LBB7_2 +; RV32I-NEXT: bltu a1, a0, .LBB7_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: neg a0, a0 +; RV32I-NEXT: sub a0, a1, a0 +; RV32I-NEXT: ret ; RV32I-NEXT: .LBB7_2: +; RV32I-NEXT: sub a0, a0, a1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: abd_ext_i32_i16: @@ -340,13 +329,9 @@ define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind { ; RV32ZBB-LABEL: abd_ext_i32_i16: ; RV32ZBB: # %bb.0: ; RV32ZBB-NEXT: zext.h a1, a1 -; RV32ZBB-NEXT: sltu a2, a0, a1 -; RV32ZBB-NEXT: neg a2, a2 -; RV32ZBB-NEXT: sub a0, a0, a1 -; RV32ZBB-NEXT: bgez a2, .LBB7_2 -; RV32ZBB-NEXT: # %bb.1: -; RV32ZBB-NEXT: neg a0, a0 -; RV32ZBB-NEXT: .LBB7_2: +; RV32ZBB-NEXT: minu a2, a0, a1 +; RV32ZBB-NEXT: maxu a0, a0, a1 +; RV32ZBB-NEXT: sub a0, a0, a2 ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: abd_ext_i32_i16: @@ -354,9 +339,9 @@ define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind { ; RV64ZBB-NEXT: slli a0, a0, 32 ; RV64ZBB-NEXT: srli a0, a0, 32 ; RV64ZBB-NEXT: zext.h a1, a1 -; RV64ZBB-NEXT: sub a0, a0, a1 -; RV64ZBB-NEXT: neg a1, a0 -; RV64ZBB-NEXT: max a0, a0, a1 +; RV64ZBB-NEXT: minu a2, a0, a1 +; RV64ZBB-NEXT: maxu a0, a0, a1 +; RV64ZBB-NEXT: sub a0, a0, a2 ; RV64ZBB-NEXT: ret %aext = zext i32 %a to i64 %bext = zext i16 %b to i64 @@ -369,21 +354,20 @@ define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind { define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: abd_ext_i32_undef: ; RV32I: # %bb.0: -; RV32I-NEXT: sltu a2, a0, a1 -; RV32I-NEXT: neg a2, a2 -; RV32I-NEXT: sub a0, a0, a1 -; RV32I-NEXT: bgez a2, .LBB8_2 +; RV32I-NEXT: bltu a1, a0, .LBB8_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: neg a0, a0 +; RV32I-NEXT: sub a0, a1, a0 +; RV32I-NEXT: ret ; RV32I-NEXT: .LBB8_2: +; RV32I-NEXT: sub a0, a0, a1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: abd_ext_i32_undef: ; RV64I: # %bb.0: -; RV64I-NEXT: slli a0, a0, 32 -; RV64I-NEXT: srli a0, a0, 32 ; RV64I-NEXT: slli a1, a1, 32 ; RV64I-NEXT: srli a1, a1, 32 +; RV64I-NEXT: slli a0, a0, 32 +; RV64I-NEXT: srli a0, a0, 32 ; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: srai a1, a0, 63 ; RV64I-NEXT: xor a0, a0, a1 @@ -392,24 +376,20 @@ define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind { ; ; RV32ZBB-LABEL: abd_ext_i32_undef: ; RV32ZBB: # %bb.0: -; RV32ZBB-NEXT: sltu a2, a0, a1 -; RV32ZBB-NEXT: neg a2, a2 -; RV32ZBB-NEXT: sub a0, a0, a1 -; RV32ZBB-NEXT: bgez a2, .LBB8_2 -; RV32ZBB-NEXT: # %bb.1: -; RV32ZBB-NEXT: neg a0, a0 -; RV32ZBB-NEXT: .LBB8_2: +; RV32ZBB-NEXT: minu a2, a0, a1 +; RV32ZBB-NEXT: maxu a0, a0, a1 +; RV32ZBB-NEXT: sub a0, a0, a2 ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: abd_ext_i32_undef: ; RV64ZBB: # %bb.0: -; RV64ZBB-NEXT: slli a0, a0, 32 -; RV64ZBB-NEXT: srli a0, a0, 32 ; RV64ZBB-NEXT: slli a1, a1, 32 ; RV64ZBB-NEXT: srli a1, a1, 32 -; RV64ZBB-NEXT: sub a0, a0, a1 -; RV64ZBB-NEXT: neg a1, a0 -; RV64ZBB-NEXT: max a0, a0, a1 +; RV64ZBB-NEXT: slli a0, a0, 32 +; RV64ZBB-NEXT: srli a0, a0, 32 +; RV64ZBB-NEXT: minu a2, a0, a1 +; RV64ZBB-NEXT: maxu a0, a0, a1 +; RV64ZBB-NEXT: sub a0, a0, a2 ; RV64ZBB-NEXT: ret %aext = zext i32 %a to i64 %bext = zext i32 %b to i64 @@ -423,65 +403,62 @@ define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind { ; RV32I-LABEL: abd_ext_i64: ; RV32I: # %bb.0: ; RV32I-NEXT: sltu a4, a0, a2 -; RV32I-NEXT: mv a5, a4 -; RV32I-NEXT: beq a1, a3, .LBB9_2 +; RV32I-NEXT: sub a3, a1, a3 +; RV32I-NEXT: sub a3, a3, a4 +; RV32I-NEXT: sub a2, a0, a2 +; RV32I-NEXT: beq a3, a1, .LBB9_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: sltu a5, a1, a3 +; RV32I-NEXT: sltu a0, a1, a3 +; RV32I-NEXT: j .LBB9_3 ; RV32I-NEXT: .LBB9_2: -; RV32I-NEXT: neg a5, a5 -; RV32I-NEXT: sub a1, a1, a3 +; RV32I-NEXT: sltu a0, a0, a2 +; RV32I-NEXT: .LBB9_3: +; RV32I-NEXT: neg a1, a0 +; RV32I-NEXT: xor a2, a2, a1 +; RV32I-NEXT: sltu a4, a2, a1 +; RV32I-NEXT: xor a1, a3, a1 +; RV32I-NEXT: add a1, a1, a0 ; RV32I-NEXT: sub a1, a1, a4 -; RV32I-NEXT: sub a0, a0, a2 -; RV32I-NEXT: bgez a5, .LBB9_4 -; RV32I-NEXT: # %bb.3: -; RV32I-NEXT: snez a2, a0 -; RV32I-NEXT: add a1, a1, a2 -; RV32I-NEXT: neg a1, a1 -; RV32I-NEXT: neg a0, a0 -; RV32I-NEXT: .LBB9_4: +; RV32I-NEXT: add a0, a2, a0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: abd_ext_i64: ; RV64I: # %bb.0: -; RV64I-NEXT: sltu a2, a0, a1 -; RV64I-NEXT: neg a2, a2 -; RV64I-NEXT: sub a0, a0, a1 -; RV64I-NEXT: bgez a2, .LBB9_2 +; RV64I-NEXT: bltu a1, a0, .LBB9_2 ; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: neg a0, a0 +; RV64I-NEXT: sub a0, a1, a0 +; RV64I-NEXT: ret ; RV64I-NEXT: .LBB9_2: +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: ret ; ; RV32ZBB-LABEL: abd_ext_i64: ; RV32ZBB: # %bb.0: ; RV32ZBB-NEXT: sltu a4, a0, a2 -; RV32ZBB-NEXT: mv a5, a4 -; RV32ZBB-NEXT: beq a1, a3, .LBB9_2 +; RV32ZBB-NEXT: sub a3, a1, a3 +; RV32ZBB-NEXT: sub a3, a3, a4 +; RV32ZBB-NEXT: sub a2, a0, a2 +; RV32ZBB-NEXT: beq a3, a1, .LBB9_2 ; RV32ZBB-NEXT: # %bb.1: -; RV32ZBB-NEXT: sltu a5, a1, a3 +; RV32ZBB-NEXT: sltu a0, a1, a3 +; RV32ZBB-NEXT: j .LBB9_3 ; RV32ZBB-NEXT: .LBB9_2: -; RV32ZBB-NEXT: neg a5, a5 -; RV32ZBB-NEXT: sub a1, a1, a3 +; RV32ZBB-NEXT: sltu a0, a0, a2 +; RV32ZBB-NEXT: .LBB9_3: +; RV32ZBB-NEXT: neg a1, a0 +; RV32ZBB-NEXT: xor a2, a2, a1 +; RV32ZBB-NEXT: sltu a4, a2, a1 +; RV32ZBB-NEXT: xor a1, a3, a1 +; RV32ZBB-NEXT: add a1, a1, a0 ; RV32ZBB-NEXT: sub a1, a1, a4 -; RV32ZBB-NEXT: sub a0, a0, a2 -; RV32ZBB-NEXT: bgez a5, .LBB9_4 -; RV32ZBB-NEXT: # %bb.3: -; RV32ZBB-NEXT: snez a2, a0 -; RV32ZBB-NEXT: add a1, a1, a2 -; RV32ZBB-NEXT: neg a1, a1 -; RV32ZBB-NEXT: neg a0, a0 -; RV32ZBB-NEXT: .LBB9_4: +; RV32ZBB-NEXT: add a0, a2, a0 ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: abd_ext_i64: ; RV64ZBB: # %bb.0: -; RV64ZBB-NEXT: sltu a2, a0, a1 -; RV64ZBB-NEXT: neg a2, a2 -; RV64ZBB-NEXT: sub a0, a0, a1 -; RV64ZBB-NEXT: bgez a2, .LBB9_2 -; RV64ZBB-NEXT: # %bb.1: -; RV64ZBB-NEXT: neg a0, a0 -; RV64ZBB-NEXT: .LBB9_2: +; RV64ZBB-NEXT: minu a2, a0, a1 +; RV64ZBB-NEXT: maxu a0, a0, a1 +; RV64ZBB-NEXT: sub a0, a0, a2 ; RV64ZBB-NEXT: ret %aext = zext i64 %a to i128 %bext = zext i64 %b to i128 @@ -495,65 +472,62 @@ define i64 @abd_ext_i64_undef(i64 %a, i64 %b) nounwind { ; RV32I-LABEL: abd_ext_i64_undef: ; RV32I: # %bb.0: ; RV32I-NEXT: sltu a4, a0, a2 -; RV32I-NEXT: mv a5, a4 -; RV32I-NEXT: beq a1, a3, .LBB10_2 +; RV32I-NEXT: sub a3, a1, a3 +; RV32I-NEXT: sub a3, a3, a4 +; RV32I-NEXT: sub a2, a0, a2 +; RV32I-NEXT: beq a3, a1, .LBB10_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: sltu a5, a1, a3 +; RV32I-NEXT: sltu a0, a1, a3 +; RV32I-NEXT: j .LBB10_3 ; RV32I-NEXT: .LBB10_2: -; RV32I-NEXT: neg a5, a5 -; RV32I-NEXT: sub a1, a1, a3 +; RV32I-NEXT: sltu a0, a0, a2 +; RV32I-NEXT: .LBB10_3: +; RV32I-NEXT: neg a1, a0 +; RV32I-NEXT: xor a2, a2, a1 +; RV32I-NEXT: sltu a4, a2, a1 +; RV32I-NEXT: xor a1, a3, a1 +; RV32I-NEXT: add a1, a1, a0 ; RV32I-NEXT: sub a1, a1, a4 -; RV32I-NEXT: sub a0, a0, a2 -; RV32I-NEXT: bgez a5, .LBB10_4 -; RV32I-NEXT: # %bb.3: -; RV32I-NEXT: snez a2, a0 -; RV32I-NEXT: add a1, a1, a2 -; RV32I-NEXT: neg a1, a1 -; RV32I-NEXT: neg a0, a0 -; RV32I-NEXT: .LBB10_4: +; RV32I-NEXT: add a0, a2, a0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: abd_ext_i64_undef: ; RV64I: # %bb.0: -; RV64I-NEXT: sltu a2, a0, a1 -; RV64I-NEXT: neg a2, a2 -; RV64I-NEXT: sub a0, a0, a1 -; RV64I-NEXT: bgez a2, .LBB10_2 +; RV64I-NEXT: bltu a1, a0, .LBB10_2 ; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: neg a0, a0 +; RV64I-NEXT: sub a0, a1, a0 +; RV64I-NEXT: ret ; RV64I-NEXT: .LBB10_2: +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: ret ; ; RV32ZBB-LABEL: abd_ext_i64_undef: ; RV32ZBB: # %bb.0: ; RV32ZBB-NEXT: sltu a4, a0, a2 -; RV32ZBB-NEXT: mv a5, a4 -; RV32ZBB-NEXT: beq a1, a3, .LBB10_2 +; RV32ZBB-NEXT: sub a3, a1, a3 +; RV32ZBB-NEXT: sub a3, a3, a4 +; RV32ZBB-NEXT: sub a2, a0, a2 +; RV32ZBB-NEXT: beq a3, a1, .LBB10_2 ; RV32ZBB-NEXT: # %bb.1: -; RV32ZBB-NEXT: sltu a5, a1, a3 +; RV32ZBB-NEXT: sltu a0, a1, a3 +; RV32ZBB-NEXT: j .LBB10_3 ; RV32ZBB-NEXT: .LBB10_2: -; RV32ZBB-NEXT: neg a5, a5 -; RV32ZBB-NEXT: sub a1, a1, a3 +; RV32ZBB-NEXT: sltu a0, a0, a2 +; RV32ZBB-NEXT: .LBB10_3: +; RV32ZBB-NEXT: neg a1, a0 +; RV32ZBB-NEXT: xor a2, a2, a1 +; RV32ZBB-NEXT: sltu a4, a2, a1 +; RV32ZBB-NEXT: xor a1, a3, a1 +; RV32ZBB-NEXT: add a1, a1, a0 ; RV32ZBB-NEXT: sub a1, a1, a4 -; RV32ZBB-NEXT: sub a0, a0, a2 -; RV32ZBB-NEXT: bgez a5, .LBB10_4 -; RV32ZBB-NEXT: # %bb.3: -; RV32ZBB-NEXT: snez a2, a0 -; RV32ZBB-NEXT: add a1, a1, a2 -; RV32ZBB-NEXT: neg a1, a1 -; RV32ZBB-NEXT: neg a0, a0 -; RV32ZBB-NEXT: .LBB10_4: +; RV32ZBB-NEXT: add a0, a2, a0 ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: abd_ext_i64_undef: ; RV64ZBB: # %bb.0: -; RV64ZBB-NEXT: sltu a2, a0, a1 -; RV64ZBB-NEXT: neg a2, a2 -; RV64ZBB-NEXT: sub a0, a0, a1 -; RV64ZBB-NEXT: bgez a2, .LBB10_2 -; RV64ZBB-NEXT: # %bb.1: -; RV64ZBB-NEXT: neg a0, a0 -; RV64ZBB-NEXT: .LBB10_2: +; RV64ZBB-NEXT: minu a2, a0, a1 +; RV64ZBB-NEXT: maxu a0, a0, a1 +; RV64ZBB-NEXT: sub a0, a0, a2 ; RV64ZBB-NEXT: ret %aext = zext i64 %a to i128 %bext = zext i64 %b to i128 @@ -566,168 +540,194 @@ define i64 @abd_ext_i64_undef(i64 %a, i64 %b) nounwind { define i128 @abd_ext_i128(i128 %a, i128 %b) nounwind { ; RV32I-LABEL: abd_ext_i128: ; RV32I: # %bb.0: -; RV32I-NEXT: lw a3, 0(a2) -; RV32I-NEXT: lw a4, 0(a1) -; RV32I-NEXT: lw a5, 4(a2) -; RV32I-NEXT: lw a6, 8(a2) -; RV32I-NEXT: lw a7, 8(a1) -; RV32I-NEXT: lw a2, 12(a2) -; RV32I-NEXT: lw t0, 12(a1) +; RV32I-NEXT: lw a5, 0(a2) +; RV32I-NEXT: lw a3, 0(a1) +; RV32I-NEXT: lw t1, 12(a2) +; RV32I-NEXT: lw a7, 8(a2) +; RV32I-NEXT: lw a4, 8(a1) +; RV32I-NEXT: lw a6, 12(a1) +; RV32I-NEXT: lw t0, 4(a2) ; RV32I-NEXT: lw a1, 4(a1) -; RV32I-NEXT: sltu t1, a7, a6 -; RV32I-NEXT: mv t4, t1 -; RV32I-NEXT: beq t0, a2, .LBB11_2 +; RV32I-NEXT: sltu a2, a4, a7 +; RV32I-NEXT: sub t1, a6, t1 +; RV32I-NEXT: sltu t2, a3, a5 +; RV32I-NEXT: sub a2, t1, a2 +; RV32I-NEXT: mv t1, t2 +; RV32I-NEXT: beq a1, t0, .LBB11_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: sltu t4, t0, a2 +; RV32I-NEXT: sltu t1, a1, t0 ; RV32I-NEXT: .LBB11_2: -; RV32I-NEXT: sltu t2, a4, a3 -; RV32I-NEXT: mv t3, t2 -; RV32I-NEXT: beq a1, a5, .LBB11_4 +; RV32I-NEXT: sub a7, a4, a7 +; RV32I-NEXT: sltu t3, a7, t1 +; RV32I-NEXT: sub a2, a2, t3 +; RV32I-NEXT: sub a7, a7, t1 +; RV32I-NEXT: beq a2, a6, .LBB11_4 ; RV32I-NEXT: # %bb.3: -; RV32I-NEXT: sltu t3, a1, a5 +; RV32I-NEXT: sltu t1, a6, a2 +; RV32I-NEXT: j .LBB11_5 ; RV32I-NEXT: .LBB11_4: -; RV32I-NEXT: xor t5, t0, a2 -; RV32I-NEXT: xor t6, a7, a6 -; RV32I-NEXT: or t6, t6, t5 -; RV32I-NEXT: mv t5, t3 -; RV32I-NEXT: beqz t6, .LBB11_6 -; RV32I-NEXT: # %bb.5: -; RV32I-NEXT: mv t5, t4 -; RV32I-NEXT: .LBB11_6: -; RV32I-NEXT: neg t4, t5 -; RV32I-NEXT: sub a2, t0, a2 -; RV32I-NEXT: sub a2, a2, t1 -; RV32I-NEXT: sub a6, a7, a6 -; RV32I-NEXT: sltu a7, a6, t3 -; RV32I-NEXT: sub a2, a2, a7 -; RV32I-NEXT: sub a6, a6, t3 -; RV32I-NEXT: sub a1, a1, a5 -; RV32I-NEXT: sub a1, a1, t2 -; RV32I-NEXT: sub a4, a4, a3 -; RV32I-NEXT: bgez t4, .LBB11_8 -; RV32I-NEXT: # %bb.7: -; RV32I-NEXT: snez a3, a1 -; RV32I-NEXT: snez a5, a4 -; RV32I-NEXT: or a3, a5, a3 -; RV32I-NEXT: neg a7, a6 -; RV32I-NEXT: sltu t0, a7, a3 -; RV32I-NEXT: snez a6, a6 -; RV32I-NEXT: add a2, a2, a6 -; RV32I-NEXT: neg a2, a2 -; RV32I-NEXT: sub a2, a2, t0 -; RV32I-NEXT: sub a6, a7, a3 -; RV32I-NEXT: neg a4, a4 -; RV32I-NEXT: add a1, a1, a5 -; RV32I-NEXT: neg a1, a1 +; RV32I-NEXT: sltu t1, a4, a7 +; RV32I-NEXT: .LBB11_5: +; RV32I-NEXT: sub t0, a1, t0 +; RV32I-NEXT: sub t0, t0, t2 +; RV32I-NEXT: sub a5, a3, a5 +; RV32I-NEXT: beq t0, a1, .LBB11_7 +; RV32I-NEXT: # %bb.6: +; RV32I-NEXT: sltu a1, a1, t0 +; RV32I-NEXT: j .LBB11_8 +; RV32I-NEXT: .LBB11_7: +; RV32I-NEXT: sltu a1, a3, a5 ; RV32I-NEXT: .LBB11_8: -; RV32I-NEXT: sw a1, 4(a0) -; RV32I-NEXT: sw a4, 0(a0) -; RV32I-NEXT: sw a6, 8(a0) -; RV32I-NEXT: sw a2, 12(a0) +; RV32I-NEXT: xor a3, a2, a6 +; RV32I-NEXT: xor a4, a7, a4 +; RV32I-NEXT: or a3, a4, a3 +; RV32I-NEXT: beqz a3, .LBB11_10 +; RV32I-NEXT: # %bb.9: +; RV32I-NEXT: mv a1, t1 +; RV32I-NEXT: .LBB11_10: +; RV32I-NEXT: neg a6, a1 +; RV32I-NEXT: xor a3, a7, a6 +; RV32I-NEXT: sltu a4, a3, a6 +; RV32I-NEXT: xor a2, a2, a6 +; RV32I-NEXT: add a2, a2, a1 +; RV32I-NEXT: sub a4, a2, a4 +; RV32I-NEXT: xor a2, a5, a6 +; RV32I-NEXT: sltu a5, a2, a6 +; RV32I-NEXT: xor a7, t0, a6 +; RV32I-NEXT: mv t1, a5 +; RV32I-NEXT: beqz t0, .LBB11_12 +; RV32I-NEXT: # %bb.11: +; RV32I-NEXT: sltu t1, a7, a6 +; RV32I-NEXT: .LBB11_12: +; RV32I-NEXT: add a3, a3, a1 +; RV32I-NEXT: sltu a6, a3, t1 +; RV32I-NEXT: sub a4, a4, a6 +; RV32I-NEXT: sub a3, a3, t1 +; RV32I-NEXT: add a7, a7, a1 +; RV32I-NEXT: sub a5, a7, a5 +; RV32I-NEXT: add a1, a2, a1 +; RV32I-NEXT: sw a1, 0(a0) +; RV32I-NEXT: sw a5, 4(a0) +; RV32I-NEXT: sw a3, 8(a0) +; RV32I-NEXT: sw a4, 12(a0) ; RV32I-NEXT: ret ; ; RV64I-LABEL: abd_ext_i128: ; RV64I: # %bb.0: ; RV64I-NEXT: sltu a4, a0, a2 -; RV64I-NEXT: mv a5, a4 -; RV64I-NEXT: beq a1, a3, .LBB11_2 +; RV64I-NEXT: sub a3, a1, a3 +; RV64I-NEXT: sub a3, a3, a4 +; RV64I-NEXT: sub a2, a0, a2 +; RV64I-NEXT: beq a3, a1, .LBB11_2 ; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: sltu a5, a1, a3 +; RV64I-NEXT: sltu a0, a1, a3 +; RV64I-NEXT: j .LBB11_3 ; RV64I-NEXT: .LBB11_2: -; RV64I-NEXT: neg a5, a5 -; RV64I-NEXT: sub a1, a1, a3 +; RV64I-NEXT: sltu a0, a0, a2 +; RV64I-NEXT: .LBB11_3: +; RV64I-NEXT: neg a1, a0 +; RV64I-NEXT: xor a2, a2, a1 +; RV64I-NEXT: sltu a4, a2, a1 +; RV64I-NEXT: xor a1, a3, a1 +; RV64I-NEXT: add a1, a1, a0 ; RV64I-NEXT: sub a1, a1, a4 -; RV64I-NEXT: sub a0, a0, a2 -; RV64I-NEXT: bgez a5, .LBB11_4 -; RV64I-NEXT: # %bb.3: -; RV64I-NEXT: snez a2, a0 -; RV64I-NEXT: add a1, a1, a2 -; RV64I-NEXT: neg a1, a1 -; RV64I-NEXT: neg a0, a0 -; RV64I-NEXT: .LBB11_4: +; RV64I-NEXT: add a0, a2, a0 ; RV64I-NEXT: ret ; ; RV32ZBB-LABEL: abd_ext_i128: ; RV32ZBB: # %bb.0: -; RV32ZBB-NEXT: lw a3, 0(a2) -; RV32ZBB-NEXT: lw a4, 0(a1) -; RV32ZBB-NEXT: lw a5, 4(a2) -; RV32ZBB-NEXT: lw a6, 8(a2) -; RV32ZBB-NEXT: lw a7, 8(a1) -; RV32ZBB-NEXT: lw a2, 12(a2) -; RV32ZBB-NEXT: lw t0, 12(a1) +; RV32ZBB-NEXT: lw a5, 0(a2) +; RV32ZBB-NEXT: lw a3, 0(a1) +; RV32ZBB-NEXT: lw t1, 12(a2) +; RV32ZBB-NEXT: lw a7, 8(a2) +; RV32ZBB-NEXT: lw a4, 8(a1) +; RV32ZBB-NEXT: lw a6, 12(a1) +; RV32ZBB-NEXT: lw t0, 4(a2) ; RV32ZBB-NEXT: lw a1, 4(a1) -; RV32ZBB-NEXT: sltu t1, a7, a6 -; RV32ZBB-NEXT: mv t4, t1 -; RV32ZBB-NEXT: beq t0, a2, .LBB11_2 +; RV32ZBB-NEXT: sltu a2, a4, a7 +; RV32ZBB-NEXT: sub t1, a6, t1 +; RV32ZBB-NEXT: sltu t2, a3, a5 +; RV32ZBB-NEXT: sub a2, t1, a2 +; RV32ZBB-NEXT: mv t1, t2 +; RV32ZBB-NEXT: beq a1, t0, .LBB11_2 ; RV32ZBB-NEXT: # %bb.1: -; RV32ZBB-NEXT: sltu t4, t0, a2 +; RV32ZBB-NEXT: sltu t1, a1, t0 ; RV32ZBB-NEXT: .LBB11_2: -; RV32ZBB-NEXT: sltu t2, a4, a3 -; RV32ZBB-NEXT: mv t3, t2 -; RV32ZBB-NEXT: beq a1, a5, .LBB11_4 +; RV32ZBB-NEXT: sub a7, a4, a7 +; RV32ZBB-NEXT: sltu t3, a7, t1 +; RV32ZBB-NEXT: sub a2, a2, t3 +; RV32ZBB-NEXT: sub a7, a7, t1 +; RV32ZBB-NEXT: beq a2, a6, .LBB11_4 ; RV32ZBB-NEXT: # %bb.3: -; RV32ZBB-NEXT: sltu t3, a1, a5 +; RV32ZBB-NEXT: sltu t1, a6, a2 +; RV32ZBB-NEXT: j .LBB11_5 ; RV32ZBB-NEXT: .LBB11_4: -; RV32ZBB-NEXT: xor t5, t0, a2 -; RV32ZBB-NEXT: xor t6, a7, a6 -; RV32ZBB-NEXT: or t6, t6, t5 -; RV32ZBB-NEXT: mv t5, t3 -; RV32ZBB-NEXT: beqz t6, .LBB11_6 -; RV32ZBB-NEXT: # %bb.5: -; RV32ZBB-NEXT: mv t5, t4 -; RV32ZBB-NEXT: .LBB11_6: -; RV32ZBB-NEXT: neg t4, t5 -; RV32ZBB-NEXT: sub a2, t0, a2 -; RV32ZBB-NEXT: sub a2, a2, t1 -; RV32ZBB-NEXT: sub a6, a7, a6 -; RV32ZBB-NEXT: sltu a7, a6, t3 -; RV32ZBB-NEXT: sub a2, a2, a7 -; RV32ZBB-NEXT: sub a6, a6, t3 -; RV32ZBB-NEXT: sub a1, a1, a5 -; RV32ZBB-NEXT: sub a1, a1, t2 -; RV32ZBB-NEXT: sub a4, a4, a3 -; RV32ZBB-NEXT: bgez t4, .LBB11_8 -; RV32ZBB-NEXT: # %bb.7: -; RV32ZBB-NEXT: snez a3, a1 -; RV32ZBB-NEXT: snez a5, a4 -; RV32ZBB-NEXT: or a3, a5, a3 -; RV32ZBB-NEXT: neg a7, a6 -; RV32ZBB-NEXT: sltu t0, a7, a3 -; RV32ZBB-NEXT: snez a6, a6 -; RV32ZBB-NEXT: add a2, a2, a6 -; RV32ZBB-NEXT: neg a2, a2 -; RV32ZBB-NEXT: sub a2, a2, t0 -; RV32ZBB-NEXT: sub a6, a7, a3 -; RV32ZBB-NEXT: neg a4, a4 -; RV32ZBB-NEXT: add a1, a1, a5 -; RV32ZBB-NEXT: neg a1, a1 +; RV32ZBB-NEXT: sltu t1, a4, a7 +; RV32ZBB-NEXT: .LBB11_5: +; RV32ZBB-NEXT: sub t0, a1, t0 +; RV32ZBB-NEXT: sub t0, t0, t2 +; RV32ZBB-NEXT: sub a5, a3, a5 +; RV32ZBB-NEXT: beq t0, a1, .LBB11_7 +; RV32ZBB-NEXT: # %bb.6: +; RV32ZBB-NEXT: sltu a1, a1, t0 +; RV32ZBB-NEXT: j .LBB11_8 +; RV32ZBB-NEXT: .LBB11_7: +; RV32ZBB-NEXT: sltu a1, a3, a5 ; RV32ZBB-NEXT: .LBB11_8: -; RV32ZBB-NEXT: sw a1, 4(a0) -; RV32ZBB-NEXT: sw a4, 0(a0) -; RV32ZBB-NEXT: sw a6, 8(a0) -; RV32ZBB-NEXT: sw a2, 12(a0) +; RV32ZBB-NEXT: xor a3, a2, a6 +; RV32ZBB-NEXT: xor a4, a7, a4 +; RV32ZBB-NEXT: or a3, a4, a3 +; RV32ZBB-NEXT: beqz a3, .LBB11_10 +; RV32ZBB-NEXT: # %bb.9: +; RV32ZBB-NEXT: mv a1, t1 +; RV32ZBB-NEXT: .LBB11_10: +; RV32ZBB-NEXT: neg a6, a1 +; RV32ZBB-NEXT: xor a3, a7, a6 +; RV32ZBB-NEXT: sltu a4, a3, a6 +; RV32ZBB-NEXT: xor a2, a2, a6 +; RV32ZBB-NEXT: add a2, a2, a1 +; RV32ZBB-NEXT: sub a4, a2, a4 +; RV32ZBB-NEXT: xor a2, a5, a6 +; RV32ZBB-NEXT: sltu a5, a2, a6 +; RV32ZBB-NEXT: xor a7, t0, a6 +; RV32ZBB-NEXT: mv t1, a5 +; RV32ZBB-NEXT: beqz t0, .LBB11_12 +; RV32ZBB-NEXT: # %bb.11: +; RV32ZBB-NEXT: sltu t1, a7, a6 +; RV32ZBB-NEXT: .LBB11_12: +; RV32ZBB-NEXT: add a3, a3, a1 +; RV32ZBB-NEXT: sltu a6, a3, t1 +; RV32ZBB-NEXT: sub a4, a4, a6 +; RV32ZBB-NEXT: sub a3, a3, t1 +; RV32ZBB-NEXT: add a7, a7, a1 +; RV32ZBB-NEXT: sub a5, a7, a5 +; RV32ZBB-NEXT: add a1, a2, a1 +; RV32ZBB-NEXT: sw a1, 0(a0) +; RV32ZBB-NEXT: sw a5, 4(a0) +; RV32ZBB-NEXT: sw a3, 8(a0) +; RV32ZBB-NEXT: sw a4, 12(a0) ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: abd_ext_i128: ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: sltu a4, a0, a2 -; RV64ZBB-NEXT: mv a5, a4 -; RV64ZBB-NEXT: beq a1, a3, .LBB11_2 +; RV64ZBB-NEXT: sub a3, a1, a3 +; RV64ZBB-NEXT: sub a3, a3, a4 +; RV64ZBB-NEXT: sub a2, a0, a2 +; RV64ZBB-NEXT: beq a3, a1, .LBB11_2 ; RV64ZBB-NEXT: # %bb.1: -; RV64ZBB-NEXT: sltu a5, a1, a3 +; RV64ZBB-NEXT: sltu a0, a1, a3 +; RV64ZBB-NEXT: j .LBB11_3 ; RV64ZBB-NEXT: .LBB11_2: -; RV64ZBB-NEXT: neg a5, a5 -; RV64ZBB-NEXT: sub a1, a1, a3 +; RV64ZBB-NEXT: sltu a0, a0, a2 +; RV64ZBB-NEXT: .LBB11_3: +; RV64ZBB-NEXT: neg a1, a0 +; RV64ZBB-NEXT: xor a2, a2, a1 +; RV64ZBB-NEXT: sltu a4, a2, a1 +; RV64ZBB-NEXT: xor a1, a3, a1 +; RV64ZBB-NEXT: add a1, a1, a0 ; RV64ZBB-NEXT: sub a1, a1, a4 -; RV64ZBB-NEXT: sub a0, a0, a2 -; RV64ZBB-NEXT: bgez a5, .LBB11_4 -; RV64ZBB-NEXT: # %bb.3: -; RV64ZBB-NEXT: snez a2, a0 -; RV64ZBB-NEXT: add a1, a1, a2 -; RV64ZBB-NEXT: neg a1, a1 -; RV64ZBB-NEXT: neg a0, a0 -; RV64ZBB-NEXT: .LBB11_4: +; RV64ZBB-NEXT: add a0, a2, a0 ; RV64ZBB-NEXT: ret %aext = zext i128 %a to i256 %bext = zext i128 %b to i256 @@ -740,168 +740,194 @@ define i128 @abd_ext_i128(i128 %a, i128 %b) nounwind { define i128 @abd_ext_i128_undef(i128 %a, i128 %b) nounwind { ; RV32I-LABEL: abd_ext_i128_undef: ; RV32I: # %bb.0: -; RV32I-NEXT: lw a3, 0(a2) -; RV32I-NEXT: lw a4, 0(a1) -; RV32I-NEXT: lw a5, 4(a2) -; RV32I-NEXT: lw a6, 8(a2) -; RV32I-NEXT: lw a7, 8(a1) -; RV32I-NEXT: lw a2, 12(a2) -; RV32I-NEXT: lw t0, 12(a1) +; RV32I-NEXT: lw a5, 0(a2) +; RV32I-NEXT: lw a3, 0(a1) +; RV32I-NEXT: lw t1, 12(a2) +; RV32I-NEXT: lw a7, 8(a2) +; RV32I-NEXT: lw a4, 8(a1) +; RV32I-NEXT: lw a6, 12(a1) +; RV32I-NEXT: lw t0, 4(a2) ; RV32I-NEXT: lw a1, 4(a1) -; RV32I-NEXT: sltu t1, a7, a6 -; RV32I-NEXT: mv t4, t1 -; RV32I-NEXT: beq t0, a2, .LBB12_2 +; RV32I-NEXT: sltu a2, a4, a7 +; RV32I-NEXT: sub t1, a6, t1 +; RV32I-NEXT: sltu t2, a3, a5 +; RV32I-NEXT: sub a2, t1, a2 +; RV32I-NEXT: mv t1, t2 +; RV32I-NEXT: beq a1, t0, .LBB12_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: sltu t4, t0, a2 +; RV32I-NEXT: sltu t1, a1, t0 ; RV32I-NEXT: .LBB12_2: -; RV32I-NEXT: sltu t2, a4, a3 -; RV32I-NEXT: mv t3, t2 -; RV32I-NEXT: beq a1, a5, .LBB12_4 +; RV32I-NEXT: sub a7, a4, a7 +; RV32I-NEXT: sltu t3, a7, t1 +; RV32I-NEXT: sub a2, a2, t3 +; RV32I-NEXT: sub a7, a7, t1 +; RV32I-NEXT: beq a2, a6, .LBB12_4 ; RV32I-NEXT: # %bb.3: -; RV32I-NEXT: sltu t3, a1, a5 +; RV32I-NEXT: sltu t1, a6, a2 +; RV32I-NEXT: j .LBB12_5 ; RV32I-NEXT: .LBB12_4: -; RV32I-NEXT: xor t5, t0, a2 -; RV32I-NEXT: xor t6, a7, a6 -; RV32I-NEXT: or t6, t6, t5 -; RV32I-NEXT: mv t5, t3 -; RV32I-NEXT: beqz t6, .LBB12_6 -; RV32I-NEXT: # %bb.5: -; RV32I-NEXT: mv t5, t4 -; RV32I-NEXT: .LBB12_6: -; RV32I-NEXT: neg t4, t5 -; RV32I-NEXT: sub a2, t0, a2 -; RV32I-NEXT: sub a2, a2, t1 -; RV32I-NEXT: sub a6, a7, a6 -; RV32I-NEXT: sltu a7, a6, t3 -; RV32I-NEXT: sub a2, a2, a7 -; RV32I-NEXT: sub a6, a6, t3 -; RV32I-NEXT: sub a1, a1, a5 -; RV32I-NEXT: sub a1, a1, t2 -; RV32I-NEXT: sub a4, a4, a3 -; RV32I-NEXT: bgez t4, .LBB12_8 -; RV32I-NEXT: # %bb.7: -; RV32I-NEXT: snez a3, a1 -; RV32I-NEXT: snez a5, a4 -; RV32I-NEXT: or a3, a5, a3 -; RV32I-NEXT: neg a7, a6 -; RV32I-NEXT: sltu t0, a7, a3 -; RV32I-NEXT: snez a6, a6 -; RV32I-NEXT: add a2, a2, a6 -; RV32I-NEXT: neg a2, a2 -; RV32I-NEXT: sub a2, a2, t0 -; RV32I-NEXT: sub a6, a7, a3 -; RV32I-NEXT: neg a4, a4 -; RV32I-NEXT: add a1, a1, a5 -; RV32I-NEXT: neg a1, a1 +; RV32I-NEXT: sltu t1, a4, a7 +; RV32I-NEXT: .LBB12_5: +; RV32I-NEXT: sub t0, a1, t0 +; RV32I-NEXT: sub t0, t0, t2 +; RV32I-NEXT: sub a5, a3, a5 +; RV32I-NEXT: beq t0, a1, .LBB12_7 +; RV32I-NEXT: # %bb.6: +; RV32I-NEXT: sltu a1, a1, t0 +; RV32I-NEXT: j .LBB12_8 +; RV32I-NEXT: .LBB12_7: +; RV32I-NEXT: sltu a1, a3, a5 ; RV32I-NEXT: .LBB12_8: -; RV32I-NEXT: sw a1, 4(a0) -; RV32I-NEXT: sw a4, 0(a0) -; RV32I-NEXT: sw a6, 8(a0) -; RV32I-NEXT: sw a2, 12(a0) +; RV32I-NEXT: xor a3, a2, a6 +; RV32I-NEXT: xor a4, a7, a4 +; RV32I-NEXT: or a3, a4, a3 +; RV32I-NEXT: beqz a3, .LBB12_10 +; RV32I-NEXT: # %bb.9: +; RV32I-NEXT: mv a1, t1 +; RV32I-NEXT: .LBB12_10: +; RV32I-NEXT: neg a6, a1 +; RV32I-NEXT: xor a3, a7, a6 +; RV32I-NEXT: sltu a4, a3, a6 +; RV32I-NEXT: xor a2, a2, a6 +; RV32I-NEXT: add a2, a2, a1 +; RV32I-NEXT: sub a4, a2, a4 +; RV32I-NEXT: xor a2, a5, a6 +; RV32I-NEXT: sltu a5, a2, a6 +; RV32I-NEXT: xor a7, t0, a6 +; RV32I-NEXT: mv t1, a5 +; RV32I-NEXT: beqz t0, .LBB12_12 +; RV32I-NEXT: # %bb.11: +; RV32I-NEXT: sltu t1, a7, a6 +; RV32I-NEXT: .LBB12_12: +; RV32I-NEXT: add a3, a3, a1 +; RV32I-NEXT: sltu a6, a3, t1 +; RV32I-NEXT: sub a4, a4, a6 +; RV32I-NEXT: sub a3, a3, t1 +; RV32I-NEXT: add a7, a7, a1 +; RV32I-NEXT: sub a5, a7, a5 +; RV32I-NEXT: add a1, a2, a1 +; RV32I-NEXT: sw a1, 0(a0) +; RV32I-NEXT: sw a5, 4(a0) +; RV32I-NEXT: sw a3, 8(a0) +; RV32I-NEXT: sw a4, 12(a0) ; RV32I-NEXT: ret ; ; RV64I-LABEL: abd_ext_i128_undef: ; RV64I: # %bb.0: ; RV64I-NEXT: sltu a4, a0, a2 -; RV64I-NEXT: mv a5, a4 -; RV64I-NEXT: beq a1, a3, .LBB12_2 +; RV64I-NEXT: sub a3, a1, a3 +; RV64I-NEXT: sub a3, a3, a4 +; RV64I-NEXT: sub a2, a0, a2 +; RV64I-NEXT: beq a3, a1, .LBB12_2 ; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: sltu a5, a1, a3 +; RV64I-NEXT: sltu a0, a1, a3 +; RV64I-NEXT: j .LBB12_3 ; RV64I-NEXT: .LBB12_2: -; RV64I-NEXT: neg a5, a5 -; RV64I-NEXT: sub a1, a1, a3 +; RV64I-NEXT: sltu a0, a0, a2 +; RV64I-NEXT: .LBB12_3: +; RV64I-NEXT: neg a1, a0 +; RV64I-NEXT: xor a2, a2, a1 +; RV64I-NEXT: sltu a4, a2, a1 +; RV64I-NEXT: xor a1, a3, a1 +; RV64I-NEXT: add a1, a1, a0 ; RV64I-NEXT: sub a1, a1, a4 -; RV64I-NEXT: sub a0, a0, a2 -; RV64I-NEXT: bgez a5, .LBB12_4 -; RV64I-NEXT: # %bb.3: -; RV64I-NEXT: snez a2, a0 -; RV64I-NEXT: add a1, a1, a2 -; RV64I-NEXT: neg a1, a1 -; RV64I-NEXT: neg a0, a0 -; RV64I-NEXT: .LBB12_4: +; RV64I-NEXT: add a0, a2, a0 ; RV64I-NEXT: ret ; ; RV32ZBB-LABEL: abd_ext_i128_undef: ; RV32ZBB: # %bb.0: -; RV32ZBB-NEXT: lw a3, 0(a2) -; RV32ZBB-NEXT: lw a4, 0(a1) -; RV32ZBB-NEXT: lw a5, 4(a2) -; RV32ZBB-NEXT: lw a6, 8(a2) -; RV32ZBB-NEXT: lw a7, 8(a1) -; RV32ZBB-NEXT: lw a2, 12(a2) -; RV32ZBB-NEXT: lw t0, 12(a1) +; RV32ZBB-NEXT: lw a5, 0(a2) +; RV32ZBB-NEXT: lw a3, 0(a1) +; RV32ZBB-NEXT: lw t1, 12(a2) +; RV32ZBB-NEXT: lw a7, 8(a2) +; RV32ZBB-NEXT: lw a4, 8(a1) +; RV32ZBB-NEXT: lw a6, 12(a1) +; RV32ZBB-NEXT: lw t0, 4(a2) ; RV32ZBB-NEXT: lw a1, 4(a1) -; RV32ZBB-NEXT: sltu t1, a7, a6 -; RV32ZBB-NEXT: mv t4, t1 -; RV32ZBB-NEXT: beq t0, a2, .LBB12_2 +; RV32ZBB-NEXT: sltu a2, a4, a7 +; RV32ZBB-NEXT: sub t1, a6, t1 +; RV32ZBB-NEXT: sltu t2, a3, a5 +; RV32ZBB-NEXT: sub a2, t1, a2 +; RV32ZBB-NEXT: mv t1, t2 +; RV32ZBB-NEXT: beq a1, t0, .LBB12_2 ; RV32ZBB-NEXT: # %bb.1: -; RV32ZBB-NEXT: sltu t4, t0, a2 +; RV32ZBB-NEXT: sltu t1, a1, t0 ; RV32ZBB-NEXT: .LBB12_2: -; RV32ZBB-NEXT: sltu t2, a4, a3 -; RV32ZBB-NEXT: mv t3, t2 -; RV32ZBB-NEXT: beq a1, a5, .LBB12_4 +; RV32ZBB-NEXT: sub a7, a4, a7 +; RV32ZBB-NEXT: sltu t3, a7, t1 +; RV32ZBB-NEXT: sub a2, a2, t3 +; RV32ZBB-NEXT: sub a7, a7, t1 +; RV32ZBB-NEXT: beq a2, a6, .LBB12_4 ; RV32ZBB-NEXT: # %bb.3: -; RV32ZBB-NEXT: sltu t3, a1, a5 +; RV32ZBB-NEXT: sltu t1, a6, a2 +; RV32ZBB-NEXT: j .LBB12_5 ; RV32ZBB-NEXT: .LBB12_4: -; RV32ZBB-NEXT: xor t5, t0, a2 -; RV32ZBB-NEXT: xor t6, a7, a6 -; RV32ZBB-NEXT: or t6, t6, t5 -; RV32ZBB-NEXT: mv t5, t3 -; RV32ZBB-NEXT: beqz t6, .LBB12_6 -; RV32ZBB-NEXT: # %bb.5: -; RV32ZBB-NEXT: mv t5, t4 -; RV32ZBB-NEXT: .LBB12_6: -; RV32ZBB-NEXT: neg t4, t5 -; RV32ZBB-NEXT: sub a2, t0, a2 -; RV32ZBB-NEXT: sub a2, a2, t1 -; RV32ZBB-NEXT: sub a6, a7, a6 -; RV32ZBB-NEXT: sltu a7, a6, t3 -; RV32ZBB-NEXT: sub a2, a2, a7 -; RV32ZBB-NEXT: sub a6, a6, t3 -; RV32ZBB-NEXT: sub a1, a1, a5 -; RV32ZBB-NEXT: sub a1, a1, t2 -; RV32ZBB-NEXT: sub a4, a4, a3 -; RV32ZBB-NEXT: bgez t4, .LBB12_8 -; RV32ZBB-NEXT: # %bb.7: -; RV32ZBB-NEXT: snez a3, a1 -; RV32ZBB-NEXT: snez a5, a4 -; RV32ZBB-NEXT: or a3, a5, a3 -; RV32ZBB-NEXT: neg a7, a6 -; RV32ZBB-NEXT: sltu t0, a7, a3 -; RV32ZBB-NEXT: snez a6, a6 -; RV32ZBB-NEXT: add a2, a2, a6 -; RV32ZBB-NEXT: neg a2, a2 -; RV32ZBB-NEXT: sub a2, a2, t0 -; RV32ZBB-NEXT: sub a6, a7, a3 -; RV32ZBB-NEXT: neg a4, a4 -; RV32ZBB-NEXT: add a1, a1, a5 -; RV32ZBB-NEXT: neg a1, a1 +; RV32ZBB-NEXT: sltu t1, a4, a7 +; RV32ZBB-NEXT: .LBB12_5: +; RV32ZBB-NEXT: sub t0, a1, t0 +; RV32ZBB-NEXT: sub t0, t0, t2 +; RV32ZBB-NEXT: sub a5, a3, a5 +; RV32ZBB-NEXT: beq t0, a1, .LBB12_7 +; RV32ZBB-NEXT: # %bb.6: +; RV32ZBB-NEXT: sltu a1, a1, t0 +; RV32ZBB-NEXT: j .LBB12_8 +; RV32ZBB-NEXT: .LBB12_7: +; RV32ZBB-NEXT: sltu a1, a3, a5 ; RV32ZBB-NEXT: .LBB12_8: -; RV32ZBB-NEXT: sw a1, 4(a0) -; RV32ZBB-NEXT: sw a4, 0(a0) -; RV32ZBB-NEXT: sw a6, 8(a0) -; RV32ZBB-NEXT: sw a2, 12(a0) +; RV32ZBB-NEXT: xor a3, a2, a6 +; RV32ZBB-NEXT: xor a4, a7, a4 +; RV32ZBB-NEXT: or a3, a4, a3 +; RV32ZBB-NEXT: beqz a3, .LBB12_10 +; RV32ZBB-NEXT: # %bb.9: +; RV32ZBB-NEXT: mv a1, t1 +; RV32ZBB-NEXT: .LBB12_10: +; RV32ZBB-NEXT: neg a6, a1 +; RV32ZBB-NEXT: xor a3, a7, a6 +; RV32ZBB-NEXT: sltu a4, a3, a6 +; RV32ZBB-NEXT: xor a2, a2, a6 +; RV32ZBB-NEXT: add a2, a2, a1 +; RV32ZBB-NEXT: sub a4, a2, a4 +; RV32ZBB-NEXT: xor a2, a5, a6 +; RV32ZBB-NEXT: sltu a5, a2, a6 +; RV32ZBB-NEXT: xor a7, t0, a6 +; RV32ZBB-NEXT: mv t1, a5 +; RV32ZBB-NEXT: beqz t0, .LBB12_12 +; RV32ZBB-NEXT: # %bb.11: +; RV32ZBB-NEXT: sltu t1, a7, a6 +; RV32ZBB-NEXT: .LBB12_12: +; RV32ZBB-NEXT: add a3, a3, a1 +; RV32ZBB-NEXT: sltu a6, a3, t1 +; RV32ZBB-NEXT: sub a4, a4, a6 +; RV32ZBB-NEXT: sub a3, a3, t1 +; RV32ZBB-NEXT: add a7, a7, a1 +; RV32ZBB-NEXT: sub a5, a7, a5 +; RV32ZBB-NEXT: add a1, a2, a1 +; RV32ZBB-NEXT: sw a1, 0(a0) +; RV32ZBB-NEXT: sw a5, 4(a0) +; RV32ZBB-NEXT: sw a3, 8(a0) +; RV32ZBB-NEXT: sw a4, 12(a0) ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: abd_ext_i128_undef: ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: sltu a4, a0, a2 -; RV64ZBB-NEXT: mv a5, a4 -; RV64ZBB-NEXT: beq a1, a3, .LBB12_2 +; RV64ZBB-NEXT: sub a3, a1, a3 +; RV64ZBB-NEXT: sub a3, a3, a4 +; RV64ZBB-NEXT: sub a2, a0, a2 +; RV64ZBB-NEXT: beq a3, a1, .LBB12_2 ; RV64ZBB-NEXT: # %bb.1: -; RV64ZBB-NEXT: sltu a5, a1, a3 +; RV64ZBB-NEXT: sltu a0, a1, a3 +; RV64ZBB-NEXT: j .LBB12_3 ; RV64ZBB-NEXT: .LBB12_2: -; RV64ZBB-NEXT: neg a5, a5 -; RV64ZBB-NEXT: sub a1, a1, a3 +; RV64ZBB-NEXT: sltu a0, a0, a2 +; RV64ZBB-NEXT: .LBB12_3: +; RV64ZBB-NEXT: neg a1, a0 +; RV64ZBB-NEXT: xor a2, a2, a1 +; RV64ZBB-NEXT: sltu a4, a2, a1 +; RV64ZBB-NEXT: xor a1, a3, a1 +; RV64ZBB-NEXT: add a1, a1, a0 ; RV64ZBB-NEXT: sub a1, a1, a4 -; RV64ZBB-NEXT: sub a0, a0, a2 -; RV64ZBB-NEXT: bgez a5, .LBB12_4 -; RV64ZBB-NEXT: # %bb.3: -; RV64ZBB-NEXT: snez a2, a0 -; RV64ZBB-NEXT: add a1, a1, a2 -; RV64ZBB-NEXT: neg a1, a1 -; RV64ZBB-NEXT: neg a0, a0 -; RV64ZBB-NEXT: .LBB12_4: +; RV64ZBB-NEXT: add a0, a2, a0 ; RV64ZBB-NEXT: ret %aext = zext i128 %a to i256 %bext = zext i128 %b to i256 @@ -916,23 +942,25 @@ define i128 @abd_ext_i128_undef(i128 %a, i128 %b) nounwind { ; define i8 @abd_minmax_i8(i8 %a, i8 %b) nounwind { -; NOZBB-LABEL: abd_minmax_i8: -; NOZBB: # %bb.0: -; NOZBB-NEXT: andi a1, a1, 255 -; NOZBB-NEXT: andi a0, a0, 255 -; NOZBB-NEXT: mv a2, a0 -; NOZBB-NEXT: bgeu a0, a1, .LBB13_3 -; NOZBB-NEXT: # %bb.1: -; NOZBB-NEXT: bgeu a1, a0, .LBB13_4 -; NOZBB-NEXT: .LBB13_2: -; NOZBB-NEXT: sub a0, a0, a2 -; NOZBB-NEXT: ret -; NOZBB-NEXT: .LBB13_3: -; NOZBB-NEXT: mv a2, a1 -; NOZBB-NEXT: bltu a1, a0, .LBB13_2 -; NOZBB-NEXT: .LBB13_4: -; NOZBB-NEXT: sub a0, a1, a2 -; NOZBB-NEXT: ret +; RV32I-LABEL: abd_minmax_i8: +; RV32I: # %bb.0: +; RV32I-NEXT: andi a1, a1, 255 +; RV32I-NEXT: andi a0, a0, 255 +; RV32I-NEXT: sub a0, a0, a1 +; RV32I-NEXT: srai a1, a0, 31 +; RV32I-NEXT: xor a0, a0, a1 +; RV32I-NEXT: sub a0, a0, a1 +; RV32I-NEXT: ret +; +; RV64I-LABEL: abd_minmax_i8: +; RV64I: # %bb.0: +; RV64I-NEXT: andi a1, a1, 255 +; RV64I-NEXT: andi a0, a0, 255 +; RV64I-NEXT: sub a0, a0, a1 +; RV64I-NEXT: srai a1, a0, 63 +; RV64I-NEXT: xor a0, a0, a1 +; RV64I-NEXT: sub a0, a0, a1 +; RV64I-NEXT: ret ; ; ZBB-LABEL: abd_minmax_i8: ; ZBB: # %bb.0: @@ -955,18 +983,10 @@ define i16 @abd_minmax_i16(i16 %a, i16 %b) nounwind { ; RV32I-NEXT: addi a2, a2, -1 ; RV32I-NEXT: and a1, a1, a2 ; RV32I-NEXT: and a0, a0, a2 -; RV32I-NEXT: mv a2, a0 -; RV32I-NEXT: bgeu a0, a1, .LBB14_3 -; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: bgeu a1, a0, .LBB14_4 -; RV32I-NEXT: .LBB14_2: -; RV32I-NEXT: sub a0, a0, a2 -; RV32I-NEXT: ret -; RV32I-NEXT: .LBB14_3: -; RV32I-NEXT: mv a2, a1 -; RV32I-NEXT: bltu a1, a0, .LBB14_2 -; RV32I-NEXT: .LBB14_4: -; RV32I-NEXT: sub a0, a1, a2 +; RV32I-NEXT: sub a0, a0, a1 +; RV32I-NEXT: srai a1, a0, 31 +; RV32I-NEXT: xor a0, a0, a1 +; RV32I-NEXT: sub a0, a0, a1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: abd_minmax_i16: @@ -975,18 +995,10 @@ define i16 @abd_minmax_i16(i16 %a, i16 %b) nounwind { ; RV64I-NEXT: addiw a2, a2, -1 ; RV64I-NEXT: and a1, a1, a2 ; RV64I-NEXT: and a0, a0, a2 -; RV64I-NEXT: mv a2, a0 -; RV64I-NEXT: bgeu a0, a1, .LBB14_3 -; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: bgeu a1, a0, .LBB14_4 -; RV64I-NEXT: .LBB14_2: -; RV64I-NEXT: sub a0, a0, a2 -; RV64I-NEXT: ret -; RV64I-NEXT: .LBB14_3: -; RV64I-NEXT: mv a2, a1 -; RV64I-NEXT: bltu a1, a0, .LBB14_2 -; RV64I-NEXT: .LBB14_4: -; RV64I-NEXT: sub a0, a1, a2 +; RV64I-NEXT: sub a0, a0, a1 +; RV64I-NEXT: srai a1, a0, 63 +; RV64I-NEXT: xor a0, a0, a1 +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: ret ; ; ZBB-LABEL: abd_minmax_i16: @@ -1006,36 +1018,24 @@ define i16 @abd_minmax_i16(i16 %a, i16 %b) nounwind { define i32 @abd_minmax_i32(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: abd_minmax_i32: ; RV32I: # %bb.0: -; RV32I-NEXT: mv a2, a0 -; RV32I-NEXT: bgeu a0, a1, .LBB15_3 +; RV32I-NEXT: bltu a1, a0, .LBB15_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: bgeu a1, a0, .LBB15_4 -; RV32I-NEXT: .LBB15_2: -; RV32I-NEXT: sub a0, a0, a2 +; RV32I-NEXT: sub a0, a1, a0 ; RV32I-NEXT: ret -; RV32I-NEXT: .LBB15_3: -; RV32I-NEXT: mv a2, a1 -; RV32I-NEXT: bltu a1, a0, .LBB15_2 -; RV32I-NEXT: .LBB15_4: -; RV32I-NEXT: sub a0, a1, a2 +; RV32I-NEXT: .LBB15_2: +; RV32I-NEXT: sub a0, a0, a1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: abd_minmax_i32: ; RV64I: # %bb.0: -; RV64I-NEXT: sext.w a1, a1 -; RV64I-NEXT: sext.w a0, a0 -; RV64I-NEXT: mv a2, a0 -; RV64I-NEXT: bgeu a0, a1, .LBB15_3 -; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: bgeu a1, a0, .LBB15_4 -; RV64I-NEXT: .LBB15_2: -; RV64I-NEXT: subw a0, a0, a2 -; RV64I-NEXT: ret -; RV64I-NEXT: .LBB15_3: -; RV64I-NEXT: mv a2, a1 -; RV64I-NEXT: bltu a1, a0, .LBB15_2 -; RV64I-NEXT: .LBB15_4: -; RV64I-NEXT: subw a0, a1, a2 +; RV64I-NEXT: slli a1, a1, 32 +; RV64I-NEXT: srli a1, a1, 32 +; RV64I-NEXT: slli a0, a0, 32 +; RV64I-NEXT: srli a0, a0, 32 +; RV64I-NEXT: sub a0, a0, a1 +; RV64I-NEXT: srai a1, a0, 63 +; RV64I-NEXT: xor a0, a0, a1 +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: ret ; ; RV32ZBB-LABEL: abd_minmax_i32: @@ -1047,11 +1047,13 @@ define i32 @abd_minmax_i32(i32 %a, i32 %b) nounwind { ; ; RV64ZBB-LABEL: abd_minmax_i32: ; RV64ZBB: # %bb.0: -; RV64ZBB-NEXT: sext.w a1, a1 -; RV64ZBB-NEXT: sext.w a0, a0 +; RV64ZBB-NEXT: slli a1, a1, 32 +; RV64ZBB-NEXT: srli a1, a1, 32 +; RV64ZBB-NEXT: slli a0, a0, 32 +; RV64ZBB-NEXT: srli a0, a0, 32 ; RV64ZBB-NEXT: minu a2, a0, a1 ; RV64ZBB-NEXT: maxu a0, a0, a1 -; RV64ZBB-NEXT: subw a0, a0, a2 +; RV64ZBB-NEXT: sub a0, a0, a2 ; RV64ZBB-NEXT: ret %min = call i32 @llvm.umin.i32(i32 %a, i32 %b) %max = call i32 @llvm.umax.i32(i32 %a, i32 %b) @@ -1062,86 +1064,56 @@ define i32 @abd_minmax_i32(i32 %a, i32 %b) nounwind { define i64 @abd_minmax_i64(i64 %a, i64 %b) nounwind { ; RV32I-LABEL: abd_minmax_i64: ; RV32I: # %bb.0: -; RV32I-NEXT: beq a1, a3, .LBB16_2 +; RV32I-NEXT: sltu a4, a0, a2 +; RV32I-NEXT: sub a3, a1, a3 +; RV32I-NEXT: sub a3, a3, a4 +; RV32I-NEXT: sub a2, a0, a2 +; RV32I-NEXT: beq a3, a1, .LBB16_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: sltu a6, a1, a3 +; RV32I-NEXT: sltu a0, a1, a3 ; RV32I-NEXT: j .LBB16_3 ; RV32I-NEXT: .LBB16_2: -; RV32I-NEXT: sltu a6, a0, a2 +; RV32I-NEXT: sltu a0, a0, a2 ; RV32I-NEXT: .LBB16_3: -; RV32I-NEXT: mv a4, a1 -; RV32I-NEXT: mv a5, a0 -; RV32I-NEXT: bnez a6, .LBB16_5 -; RV32I-NEXT: # %bb.4: -; RV32I-NEXT: mv a4, a3 -; RV32I-NEXT: mv a5, a2 -; RV32I-NEXT: .LBB16_5: -; RV32I-NEXT: beq a1, a3, .LBB16_7 -; RV32I-NEXT: # %bb.6: -; RV32I-NEXT: sltu a6, a3, a1 -; RV32I-NEXT: beqz a6, .LBB16_8 -; RV32I-NEXT: j .LBB16_9 -; RV32I-NEXT: .LBB16_7: -; RV32I-NEXT: sltu a6, a2, a0 -; RV32I-NEXT: bnez a6, .LBB16_9 -; RV32I-NEXT: .LBB16_8: -; RV32I-NEXT: mv a1, a3 -; RV32I-NEXT: mv a0, a2 -; RV32I-NEXT: .LBB16_9: -; RV32I-NEXT: sltu a2, a0, a5 +; RV32I-NEXT: neg a1, a0 +; RV32I-NEXT: xor a2, a2, a1 +; RV32I-NEXT: sltu a4, a2, a1 +; RV32I-NEXT: xor a1, a3, a1 +; RV32I-NEXT: add a1, a1, a0 ; RV32I-NEXT: sub a1, a1, a4 -; RV32I-NEXT: sub a1, a1, a2 -; RV32I-NEXT: sub a0, a0, a5 +; RV32I-NEXT: add a0, a2, a0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: abd_minmax_i64: ; RV64I: # %bb.0: -; RV64I-NEXT: mv a2, a0 -; RV64I-NEXT: bgeu a0, a1, .LBB16_3 +; RV64I-NEXT: bltu a1, a0, .LBB16_2 ; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: bgeu a1, a0, .LBB16_4 -; RV64I-NEXT: .LBB16_2: -; RV64I-NEXT: sub a0, a0, a2 +; RV64I-NEXT: sub a0, a1, a0 ; RV64I-NEXT: ret -; RV64I-NEXT: .LBB16_3: -; RV64I-NEXT: mv a2, a1 -; RV64I-NEXT: bltu a1, a0, .LBB16_2 -; RV64I-NEXT: .LBB16_4: -; RV64I-NEXT: sub a0, a1, a2 +; RV64I-NEXT: .LBB16_2: +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: ret ; ; RV32ZBB-LABEL: abd_minmax_i64: ; RV32ZBB: # %bb.0: -; RV32ZBB-NEXT: beq a1, a3, .LBB16_2 +; RV32ZBB-NEXT: sltu a4, a0, a2 +; RV32ZBB-NEXT: sub a3, a1, a3 +; RV32ZBB-NEXT: sub a3, a3, a4 +; RV32ZBB-NEXT: sub a2, a0, a2 +; RV32ZBB-NEXT: beq a3, a1, .LBB16_2 ; RV32ZBB-NEXT: # %bb.1: -; RV32ZBB-NEXT: sltu a6, a1, a3 +; RV32ZBB-NEXT: sltu a0, a1, a3 ; RV32ZBB-NEXT: j .LBB16_3 ; RV32ZBB-NEXT: .LBB16_2: -; RV32ZBB-NEXT: sltu a6, a0, a2 +; RV32ZBB-NEXT: sltu a0, a0, a2 ; RV32ZBB-NEXT: .LBB16_3: -; RV32ZBB-NEXT: mv a4, a1 -; RV32ZBB-NEXT: mv a5, a0 -; RV32ZBB-NEXT: bnez a6, .LBB16_5 -; RV32ZBB-NEXT: # %bb.4: -; RV32ZBB-NEXT: mv a4, a3 -; RV32ZBB-NEXT: mv a5, a2 -; RV32ZBB-NEXT: .LBB16_5: -; RV32ZBB-NEXT: beq a1, a3, .LBB16_7 -; RV32ZBB-NEXT: # %bb.6: -; RV32ZBB-NEXT: sltu a6, a3, a1 -; RV32ZBB-NEXT: beqz a6, .LBB16_8 -; RV32ZBB-NEXT: j .LBB16_9 -; RV32ZBB-NEXT: .LBB16_7: -; RV32ZBB-NEXT: sltu a6, a2, a0 -; RV32ZBB-NEXT: bnez a6, .LBB16_9 -; RV32ZBB-NEXT: .LBB16_8: -; RV32ZBB-NEXT: mv a1, a3 -; RV32ZBB-NEXT: mv a0, a2 -; RV32ZBB-NEXT: .LBB16_9: -; RV32ZBB-NEXT: sltu a2, a0, a5 +; RV32ZBB-NEXT: neg a1, a0 +; RV32ZBB-NEXT: xor a2, a2, a1 +; RV32ZBB-NEXT: sltu a4, a2, a1 +; RV32ZBB-NEXT: xor a1, a3, a1 +; RV32ZBB-NEXT: add a1, a1, a0 ; RV32ZBB-NEXT: sub a1, a1, a4 -; RV32ZBB-NEXT: sub a1, a1, a2 -; RV32ZBB-NEXT: sub a0, a0, a5 +; RV32ZBB-NEXT: add a0, a2, a0 ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: abd_minmax_i64: @@ -1159,256 +1131,194 @@ define i64 @abd_minmax_i64(i64 %a, i64 %b) nounwind { define i128 @abd_minmax_i128(i128 %a, i128 %b) nounwind { ; RV32I-LABEL: abd_minmax_i128: ; RV32I: # %bb.0: -; RV32I-NEXT: lw a6, 4(a2) -; RV32I-NEXT: lw a3, 4(a1) +; RV32I-NEXT: lw a5, 0(a2) +; RV32I-NEXT: lw a3, 0(a1) +; RV32I-NEXT: lw t1, 12(a2) ; RV32I-NEXT: lw a7, 8(a2) -; RV32I-NEXT: lw t0, 12(a2) -; RV32I-NEXT: lw a5, 12(a1) ; RV32I-NEXT: lw a4, 8(a1) -; RV32I-NEXT: beq a5, t0, .LBB17_2 +; RV32I-NEXT: lw a6, 12(a1) +; RV32I-NEXT: lw t0, 4(a2) +; RV32I-NEXT: lw a1, 4(a1) +; RV32I-NEXT: sltu a2, a4, a7 +; RV32I-NEXT: sub t1, a6, t1 +; RV32I-NEXT: sltu t2, a3, a5 +; RV32I-NEXT: sub a2, t1, a2 +; RV32I-NEXT: mv t1, t2 +; RV32I-NEXT: beq a1, t0, .LBB17_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: sltu t1, a5, t0 -; RV32I-NEXT: j .LBB17_3 +; RV32I-NEXT: sltu t1, a1, t0 ; RV32I-NEXT: .LBB17_2: +; RV32I-NEXT: sub a7, a4, a7 +; RV32I-NEXT: sltu t3, a7, t1 +; RV32I-NEXT: sub a2, a2, t3 +; RV32I-NEXT: sub a7, a7, t1 +; RV32I-NEXT: beq a2, a6, .LBB17_4 +; RV32I-NEXT: # %bb.3: +; RV32I-NEXT: sltu t1, a6, a2 +; RV32I-NEXT: j .LBB17_5 +; RV32I-NEXT: .LBB17_4: ; RV32I-NEXT: sltu t1, a4, a7 -; RV32I-NEXT: .LBB17_3: -; RV32I-NEXT: lw t2, 0(a2) -; RV32I-NEXT: lw a1, 0(a1) -; RV32I-NEXT: beq a3, a6, .LBB17_5 -; RV32I-NEXT: # %bb.4: -; RV32I-NEXT: sltu t6, a3, a6 -; RV32I-NEXT: j .LBB17_6 ; RV32I-NEXT: .LBB17_5: -; RV32I-NEXT: sltu t6, a1, t2 -; RV32I-NEXT: .LBB17_6: -; RV32I-NEXT: xor a2, a5, t0 -; RV32I-NEXT: xor t3, a4, a7 -; RV32I-NEXT: or t5, t3, a2 -; RV32I-NEXT: beqz t5, .LBB17_8 -; RV32I-NEXT: # %bb.7: -; RV32I-NEXT: mv t6, t1 +; RV32I-NEXT: sub t0, a1, t0 +; RV32I-NEXT: sub t0, t0, t2 +; RV32I-NEXT: sub a5, a3, a5 +; RV32I-NEXT: beq t0, a1, .LBB17_7 +; RV32I-NEXT: # %bb.6: +; RV32I-NEXT: sltu a1, a1, t0 +; RV32I-NEXT: j .LBB17_8 +; RV32I-NEXT: .LBB17_7: +; RV32I-NEXT: sltu a1, a3, a5 ; RV32I-NEXT: .LBB17_8: -; RV32I-NEXT: mv a2, a1 -; RV32I-NEXT: mv t1, a3 -; RV32I-NEXT: mv t4, a5 -; RV32I-NEXT: mv t3, a4 -; RV32I-NEXT: bnez t6, .LBB17_10 +; RV32I-NEXT: xor a3, a2, a6 +; RV32I-NEXT: xor a4, a7, a4 +; RV32I-NEXT: or a3, a4, a3 +; RV32I-NEXT: beqz a3, .LBB17_10 ; RV32I-NEXT: # %bb.9: -; RV32I-NEXT: mv a2, t2 -; RV32I-NEXT: mv t1, a6 -; RV32I-NEXT: mv t4, t0 -; RV32I-NEXT: mv t3, a7 +; RV32I-NEXT: mv a1, t1 ; RV32I-NEXT: .LBB17_10: -; RV32I-NEXT: beq a5, t0, .LBB17_12 +; RV32I-NEXT: neg a6, a1 +; RV32I-NEXT: xor a3, a7, a6 +; RV32I-NEXT: sltu a4, a3, a6 +; RV32I-NEXT: xor a2, a2, a6 +; RV32I-NEXT: add a2, a2, a1 +; RV32I-NEXT: sub a4, a2, a4 +; RV32I-NEXT: xor a2, a5, a6 +; RV32I-NEXT: sltu a5, a2, a6 +; RV32I-NEXT: xor a7, t0, a6 +; RV32I-NEXT: mv t1, a5 +; RV32I-NEXT: beqz t0, .LBB17_12 ; RV32I-NEXT: # %bb.11: -; RV32I-NEXT: sltu t6, t0, a5 -; RV32I-NEXT: j .LBB17_13 +; RV32I-NEXT: sltu t1, a7, a6 ; RV32I-NEXT: .LBB17_12: -; RV32I-NEXT: sltu t6, a7, a4 -; RV32I-NEXT: .LBB17_13: -; RV32I-NEXT: addi sp, sp, -16 -; RV32I-NEXT: sw s0, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: beq a3, a6, .LBB17_15 -; RV32I-NEXT: # %bb.14: -; RV32I-NEXT: sltu s0, a6, a3 -; RV32I-NEXT: bnez t5, .LBB17_16 -; RV32I-NEXT: j .LBB17_17 -; RV32I-NEXT: .LBB17_15: -; RV32I-NEXT: sltu s0, t2, a1 -; RV32I-NEXT: beqz t5, .LBB17_17 -; RV32I-NEXT: .LBB17_16: -; RV32I-NEXT: mv s0, t6 -; RV32I-NEXT: .LBB17_17: -; RV32I-NEXT: bnez s0, .LBB17_19 -; RV32I-NEXT: # %bb.18: -; RV32I-NEXT: mv a1, t2 -; RV32I-NEXT: mv a3, a6 -; RV32I-NEXT: mv a5, t0 -; RV32I-NEXT: mv a4, a7 -; RV32I-NEXT: .LBB17_19: -; RV32I-NEXT: sltu a6, a4, t3 -; RV32I-NEXT: sub a7, a5, t4 -; RV32I-NEXT: sltu a5, a1, a2 -; RV32I-NEXT: sub a6, a7, a6 -; RV32I-NEXT: mv a7, a5 -; RV32I-NEXT: beq a3, t1, .LBB17_21 -; RV32I-NEXT: # %bb.20: -; RV32I-NEXT: sltu a7, a3, t1 -; RV32I-NEXT: .LBB17_21: -; RV32I-NEXT: sub a4, a4, t3 -; RV32I-NEXT: sltu t0, a4, a7 -; RV32I-NEXT: sub a6, a6, t0 -; RV32I-NEXT: sub a4, a4, a7 +; RV32I-NEXT: add a3, a3, a1 +; RV32I-NEXT: sltu a6, a3, t1 +; RV32I-NEXT: sub a4, a4, a6 ; RV32I-NEXT: sub a3, a3, t1 -; RV32I-NEXT: sub a3, a3, a5 -; RV32I-NEXT: sub a1, a1, a2 +; RV32I-NEXT: add a7, a7, a1 +; RV32I-NEXT: sub a5, a7, a5 +; RV32I-NEXT: add a1, a2, a1 ; RV32I-NEXT: sw a1, 0(a0) -; RV32I-NEXT: sw a3, 4(a0) -; RV32I-NEXT: sw a4, 8(a0) -; RV32I-NEXT: sw a6, 12(a0) -; RV32I-NEXT: lw s0, 12(sp) # 4-byte Folded Reload -; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: sw a5, 4(a0) +; RV32I-NEXT: sw a3, 8(a0) +; RV32I-NEXT: sw a4, 12(a0) ; RV32I-NEXT: ret ; ; RV64I-LABEL: abd_minmax_i128: ; RV64I: # %bb.0: -; RV64I-NEXT: beq a1, a3, .LBB17_2 +; RV64I-NEXT: sltu a4, a0, a2 +; RV64I-NEXT: sub a3, a1, a3 +; RV64I-NEXT: sub a3, a3, a4 +; RV64I-NEXT: sub a2, a0, a2 +; RV64I-NEXT: beq a3, a1, .LBB17_2 ; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: sltu a6, a1, a3 +; RV64I-NEXT: sltu a0, a1, a3 ; RV64I-NEXT: j .LBB17_3 ; RV64I-NEXT: .LBB17_2: -; RV64I-NEXT: sltu a6, a0, a2 +; RV64I-NEXT: sltu a0, a0, a2 ; RV64I-NEXT: .LBB17_3: -; RV64I-NEXT: mv a4, a1 -; RV64I-NEXT: mv a5, a0 -; RV64I-NEXT: bnez a6, .LBB17_5 -; RV64I-NEXT: # %bb.4: -; RV64I-NEXT: mv a4, a3 -; RV64I-NEXT: mv a5, a2 -; RV64I-NEXT: .LBB17_5: -; RV64I-NEXT: beq a1, a3, .LBB17_7 -; RV64I-NEXT: # %bb.6: -; RV64I-NEXT: sltu a6, a3, a1 -; RV64I-NEXT: beqz a6, .LBB17_8 -; RV64I-NEXT: j .LBB17_9 -; RV64I-NEXT: .LBB17_7: -; RV64I-NEXT: sltu a6, a2, a0 -; RV64I-NEXT: bnez a6, .LBB17_9 -; RV64I-NEXT: .LBB17_8: -; RV64I-NEXT: mv a1, a3 -; RV64I-NEXT: mv a0, a2 -; RV64I-NEXT: .LBB17_9: -; RV64I-NEXT: sltu a2, a0, a5 +; RV64I-NEXT: neg a1, a0 +; RV64I-NEXT: xor a2, a2, a1 +; RV64I-NEXT: sltu a4, a2, a1 +; RV64I-NEXT: xor a1, a3, a1 +; RV64I-NEXT: add a1, a1, a0 ; RV64I-NEXT: sub a1, a1, a4 -; RV64I-NEXT: sub a1, a1, a2 -; RV64I-NEXT: sub a0, a0, a5 +; RV64I-NEXT: add a0, a2, a0 ; RV64I-NEXT: ret ; ; RV32ZBB-LABEL: abd_minmax_i128: ; RV32ZBB: # %bb.0: -; RV32ZBB-NEXT: lw a6, 4(a2) -; RV32ZBB-NEXT: lw a3, 4(a1) +; RV32ZBB-NEXT: lw a5, 0(a2) +; RV32ZBB-NEXT: lw a3, 0(a1) +; RV32ZBB-NEXT: lw t1, 12(a2) ; RV32ZBB-NEXT: lw a7, 8(a2) -; RV32ZBB-NEXT: lw t0, 12(a2) -; RV32ZBB-NEXT: lw a5, 12(a1) ; RV32ZBB-NEXT: lw a4, 8(a1) -; RV32ZBB-NEXT: beq a5, t0, .LBB17_2 +; RV32ZBB-NEXT: lw a6, 12(a1) +; RV32ZBB-NEXT: lw t0, 4(a2) +; RV32ZBB-NEXT: lw a1, 4(a1) +; RV32ZBB-NEXT: sltu a2, a4, a7 +; RV32ZBB-NEXT: sub t1, a6, t1 +; RV32ZBB-NEXT: sltu t2, a3, a5 +; RV32ZBB-NEXT: sub a2, t1, a2 +; RV32ZBB-NEXT: mv t1, t2 +; RV32ZBB-NEXT: beq a1, t0, .LBB17_2 ; RV32ZBB-NEXT: # %bb.1: -; RV32ZBB-NEXT: sltu t1, a5, t0 -; RV32ZBB-NEXT: j .LBB17_3 +; RV32ZBB-NEXT: sltu t1, a1, t0 ; RV32ZBB-NEXT: .LBB17_2: +; RV32ZBB-NEXT: sub a7, a4, a7 +; RV32ZBB-NEXT: sltu t3, a7, t1 +; RV32ZBB-NEXT: sub a2, a2, t3 +; RV32ZBB-NEXT: sub a7, a7, t1 +; RV32ZBB-NEXT: beq a2, a6, .LBB17_4 +; RV32ZBB-NEXT: # %bb.3: +; RV32ZBB-NEXT: sltu t1, a6, a2 +; RV32ZBB-NEXT: j .LBB17_5 +; RV32ZBB-NEXT: .LBB17_4: ; RV32ZBB-NEXT: sltu t1, a4, a7 -; RV32ZBB-NEXT: .LBB17_3: -; RV32ZBB-NEXT: lw t2, 0(a2) -; RV32ZBB-NEXT: lw a1, 0(a1) -; RV32ZBB-NEXT: beq a3, a6, .LBB17_5 -; RV32ZBB-NEXT: # %bb.4: -; RV32ZBB-NEXT: sltu t6, a3, a6 -; RV32ZBB-NEXT: j .LBB17_6 ; RV32ZBB-NEXT: .LBB17_5: -; RV32ZBB-NEXT: sltu t6, a1, t2 -; RV32ZBB-NEXT: .LBB17_6: -; RV32ZBB-NEXT: xor a2, a5, t0 -; RV32ZBB-NEXT: xor t3, a4, a7 -; RV32ZBB-NEXT: or t5, t3, a2 -; RV32ZBB-NEXT: beqz t5, .LBB17_8 -; RV32ZBB-NEXT: # %bb.7: -; RV32ZBB-NEXT: mv t6, t1 +; RV32ZBB-NEXT: sub t0, a1, t0 +; RV32ZBB-NEXT: sub t0, t0, t2 +; RV32ZBB-NEXT: sub a5, a3, a5 +; RV32ZBB-NEXT: beq t0, a1, .LBB17_7 +; RV32ZBB-NEXT: # %bb.6: +; RV32ZBB-NEXT: sltu a1, a1, t0 +; RV32ZBB-NEXT: j .LBB17_8 +; RV32ZBB-NEXT: .LBB17_7: +; RV32ZBB-NEXT: sltu a1, a3, a5 ; RV32ZBB-NEXT: .LBB17_8: -; RV32ZBB-NEXT: mv a2, a1 -; RV32ZBB-NEXT: mv t1, a3 -; RV32ZBB-NEXT: mv t4, a5 -; RV32ZBB-NEXT: mv t3, a4 -; RV32ZBB-NEXT: bnez t6, .LBB17_10 +; RV32ZBB-NEXT: xor a3, a2, a6 +; RV32ZBB-NEXT: xor a4, a7, a4 +; RV32ZBB-NEXT: or a3, a4, a3 +; RV32ZBB-NEXT: beqz a3, .LBB17_10 ; RV32ZBB-NEXT: # %bb.9: -; RV32ZBB-NEXT: mv a2, t2 -; RV32ZBB-NEXT: mv t1, a6 -; RV32ZBB-NEXT: mv t4, t0 -; RV32ZBB-NEXT: mv t3, a7 +; RV32ZBB-NEXT: mv a1, t1 ; RV32ZBB-NEXT: .LBB17_10: -; RV32ZBB-NEXT: beq a5, t0, .LBB17_12 +; RV32ZBB-NEXT: neg a6, a1 +; RV32ZBB-NEXT: xor a3, a7, a6 +; RV32ZBB-NEXT: sltu a4, a3, a6 +; RV32ZBB-NEXT: xor a2, a2, a6 +; RV32ZBB-NEXT: add a2, a2, a1 +; RV32ZBB-NEXT: sub a4, a2, a4 +; RV32ZBB-NEXT: xor a2, a5, a6 +; RV32ZBB-NEXT: sltu a5, a2, a6 +; RV32ZBB-NEXT: xor a7, t0, a6 +; RV32ZBB-NEXT: mv t1, a5 +; RV32ZBB-NEXT: beqz t0, .LBB17_12 ; RV32ZBB-NEXT: # %bb.11: -; RV32ZBB-NEXT: sltu t6, t0, a5 -; RV32ZBB-NEXT: j .LBB17_13 +; RV32ZBB-NEXT: sltu t1, a7, a6 ; RV32ZBB-NEXT: .LBB17_12: -; RV32ZBB-NEXT: sltu t6, a7, a4 -; RV32ZBB-NEXT: .LBB17_13: -; RV32ZBB-NEXT: addi sp, sp, -16 -; RV32ZBB-NEXT: sw s0, 12(sp) # 4-byte Folded Spill -; RV32ZBB-NEXT: beq a3, a6, .LBB17_15 -; RV32ZBB-NEXT: # %bb.14: -; RV32ZBB-NEXT: sltu s0, a6, a3 -; RV32ZBB-NEXT: bnez t5, .LBB17_16 -; RV32ZBB-NEXT: j .LBB17_17 -; RV32ZBB-NEXT: .LBB17_15: -; RV32ZBB-NEXT: sltu s0, t2, a1 -; RV32ZBB-NEXT: beqz t5, .LBB17_17 -; RV32ZBB-NEXT: .LBB17_16: -; RV32ZBB-NEXT: mv s0, t6 -; RV32ZBB-NEXT: .LBB17_17: -; RV32ZBB-NEXT: bnez s0, .LBB17_19 -; RV32ZBB-NEXT: # %bb.18: -; RV32ZBB-NEXT: mv a1, t2 -; RV32ZBB-NEXT: mv a3, a6 -; RV32ZBB-NEXT: mv a5, t0 -; RV32ZBB-NEXT: mv a4, a7 -; RV32ZBB-NEXT: .LBB17_19: -; RV32ZBB-NEXT: sltu a6, a4, t3 -; RV32ZBB-NEXT: sub a7, a5, t4 -; RV32ZBB-NEXT: sltu a5, a1, a2 -; RV32ZBB-NEXT: sub a6, a7, a6 -; RV32ZBB-NEXT: mv a7, a5 -; RV32ZBB-NEXT: beq a3, t1, .LBB17_21 -; RV32ZBB-NEXT: # %bb.20: -; RV32ZBB-NEXT: sltu a7, a3, t1 -; RV32ZBB-NEXT: .LBB17_21: -; RV32ZBB-NEXT: sub a4, a4, t3 -; RV32ZBB-NEXT: sltu t0, a4, a7 -; RV32ZBB-NEXT: sub a6, a6, t0 -; RV32ZBB-NEXT: sub a4, a4, a7 +; RV32ZBB-NEXT: add a3, a3, a1 +; RV32ZBB-NEXT: sltu a6, a3, t1 +; RV32ZBB-NEXT: sub a4, a4, a6 ; RV32ZBB-NEXT: sub a3, a3, t1 -; RV32ZBB-NEXT: sub a3, a3, a5 -; RV32ZBB-NEXT: sub a1, a1, a2 +; RV32ZBB-NEXT: add a7, a7, a1 +; RV32ZBB-NEXT: sub a5, a7, a5 +; RV32ZBB-NEXT: add a1, a2, a1 ; RV32ZBB-NEXT: sw a1, 0(a0) -; RV32ZBB-NEXT: sw a3, 4(a0) -; RV32ZBB-NEXT: sw a4, 8(a0) -; RV32ZBB-NEXT: sw a6, 12(a0) -; RV32ZBB-NEXT: lw s0, 12(sp) # 4-byte Folded Reload -; RV32ZBB-NEXT: addi sp, sp, 16 +; RV32ZBB-NEXT: sw a5, 4(a0) +; RV32ZBB-NEXT: sw a3, 8(a0) +; RV32ZBB-NEXT: sw a4, 12(a0) ; RV32ZBB-NEXT: ret ; ; RV64ZBB-LABEL: abd_minmax_i128: ; RV64ZBB: # %bb.0: -; RV64ZBB-NEXT: beq a1, a3, .LBB17_2 +; RV64ZBB-NEXT: sltu a4, a0, a2 +; RV64ZBB-NEXT: sub a3, a1, a3 +; RV64ZBB-NEXT: sub a3, a3, a4 +; RV64ZBB-NEXT: sub a2, a0, a2 +; RV64ZBB-NEXT: beq a3, a1, .LBB17_2 ; RV64ZBB-NEXT: # %bb.1: -; RV64ZBB-NEXT: sltu a6, a1, a3 +; RV64ZBB-NEXT: sltu a0, a1, a3 ; RV64ZBB-NEXT: j .LBB17_3 ; RV64ZBB-NEXT: .LBB17_2: -; RV64ZBB-NEXT: sltu a6, a0, a2 +; RV64ZBB-NEXT: sltu a0, a0, a2 ; RV64ZBB-NEXT: .LBB17_3: -; RV64ZBB-NEXT: mv a4, a1 -; RV64ZBB-NEXT: mv a5, a0 -; RV64ZBB-NEXT: bnez a6, .LBB17_5 -; RV64ZBB-NEXT: # %bb.4: -; RV64ZBB-NEXT: mv a4, a3 -; RV64ZBB-NEXT: mv a5, a2 -; RV64ZBB-NEXT: .LBB17_5: -; RV64ZBB-NEXT: beq a1, a3, .LBB17_7 -; RV64ZBB-NEXT: # %bb.6: -; RV64ZBB-NEXT: sltu a6, a3, a1 -; RV64ZBB-NEXT: beqz a6, .LBB17_8 -; RV64ZBB-NEXT: j .LBB17_9 -; RV64ZBB-NEXT: .LBB17_7: -; RV64ZBB-NEXT: sltu a6, a2, a0 -; RV64ZBB-NEXT: bnez a6, .LBB17_9 -; RV64ZBB-NEXT: .LBB17_8: -; RV64ZBB-NEXT: mv a1, a3 -; RV64ZBB-NEXT: mv a0, a2 -; RV64ZBB-NEXT: .LBB17_9: -; RV64ZBB-NEXT: sltu a2, a0, a5 +; RV64ZBB-NEXT: neg a1, a0 +; RV64ZBB-NEXT: xor a2, a2, a1 +; RV64ZBB-NEXT: sltu a4, a2, a1 +; RV64ZBB-NEXT: xor a1, a3, a1 +; RV64ZBB-NEXT: add a1, a1, a0 ; RV64ZBB-NEXT: sub a1, a1, a4 -; RV64ZBB-NEXT: sub a1, a1, a2 -; RV64ZBB-NEXT: sub a0, a0, a5 +; RV64ZBB-NEXT: add a0, a2, a0 ; RV64ZBB-NEXT: ret %min = call i128 @llvm.umin.i128(i128 %a, i128 %b) %max = call i128 @llvm.umax.i128(i128 %a, i128 %b) @@ -1802,3 +1712,5 @@ declare i8 @llvm.umin.i8(i8, i8) declare i16 @llvm.umin.i16(i16, i16) declare i32 @llvm.umin.i32(i32, i32) declare i64 @llvm.umin.i64(i64, i64) +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; NOZBB: {{.*}} diff --git a/llvm/test/CodeGen/RISCV/rvv/abd.ll b/llvm/test/CodeGen/RISCV/rvv/abd.ll index ddbfbd0b59fa4b..5e610c453e1bac 100644 --- a/llvm/test/CodeGen/RISCV/rvv/abd.ll +++ b/llvm/test/CodeGen/RISCV/rvv/abd.ll @@ -26,13 +26,9 @@ define @sabd_b_promoted_ops( %a, %a to %b.sext = sext %b to @@ -158,13 +154,9 @@ define @uabd_b_promoted_ops( %a, %a to %b.zext = zext %b to diff --git a/llvm/test/CodeGen/Thumb2/mve-vabdus.ll b/llvm/test/CodeGen/Thumb2/mve-vabdus.ll index cfbecd14604a21..1279714b5a78ca 100644 --- a/llvm/test/CodeGen/Thumb2/mve-vabdus.ll +++ b/llvm/test/CodeGen/Thumb2/mve-vabdus.ll @@ -171,8 +171,7 @@ define arm_aapcs_vfpcc <4 x i8> @vabd_v4u8(<4 x i8> %src1, <4 x i8> %src2) { ; CHECK-NEXT: vmov.i32 q2, #0xff ; CHECK-NEXT: vand q1, q1, q2 ; CHECK-NEXT: vand q0, q0, q2 -; CHECK-NEXT: vsub.i32 q0, q0, q1 -; CHECK-NEXT: vabs.s32 q0, q0 +; CHECK-NEXT: vabd.u32 q0, q0, q1 ; CHECK-NEXT: bx lr %zextsrc1 = zext <4 x i8> %src1 to <4 x i16> %zextsrc2 = zext <4 x i8> %src2 to <4 x i16> diff --git a/llvm/test/CodeGen/X86/abds-neg.ll b/llvm/test/CodeGen/X86/abds-neg.ll index 753b1519c8f906..541ed1cbf4fe89 100644 --- a/llvm/test/CodeGen/X86/abds-neg.ll +++ b/llvm/test/CodeGen/X86/abds-neg.ll @@ -305,30 +305,27 @@ define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind { define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind { ; X86-LABEL: abd_ext_i64: ; X86: # %bb.0: +; X86-NEXT: pushl %ebx ; X86-NEXT: pushl %edi ; X86-NEXT: pushl %esi -; X86-NEXT: movl {{[0-9]+}}(%esp), %esi -; X86-NEXT: movl %esi, %edi -; X86-NEXT: sarl $31, %edi ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movl %ecx, %edx -; X86-NEXT: sarl $31, %edx -; X86-NEXT: subl {{[0-9]+}}(%esp), %eax -; X86-NEXT: sbbl %esi, %ecx -; X86-NEXT: movl %edx, %esi -; X86-NEXT: sbbl %edi, %esi -; X86-NEXT: sbbl %edi, %edx -; X86-NEXT: sarl $31, %edx -; X86-NEXT: xorl %edx, %ecx -; X86-NEXT: xorl %edx, %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; X86-NEXT: movl {{[0-9]+}}(%esp), %esi +; X86-NEXT: movl %edx, %edi +; X86-NEXT: subl %eax, %edi +; X86-NEXT: movl %esi, %ebx +; X86-NEXT: sbbl %ecx, %ebx ; X86-NEXT: subl %edx, %eax -; X86-NEXT: sbbl %edx, %ecx +; X86-NEXT: sbbl %esi, %ecx +; X86-NEXT: cmovll %ebx, %ecx +; X86-NEXT: cmovll %edi, %eax ; X86-NEXT: xorl %edx, %edx ; X86-NEXT: negl %eax ; X86-NEXT: sbbl %ecx, %edx ; X86-NEXT: popl %esi ; X86-NEXT: popl %edi +; X86-NEXT: popl %ebx ; X86-NEXT: retl ; ; X64-LABEL: abd_ext_i64: @@ -352,30 +349,27 @@ define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind { define i64 @abd_ext_i64_undef(i64 %a, i64 %b) nounwind { ; X86-LABEL: abd_ext_i64_undef: ; X86: # %bb.0: +; X86-NEXT: pushl %ebx ; X86-NEXT: pushl %edi ; X86-NEXT: pushl %esi -; X86-NEXT: movl {{[0-9]+}}(%esp), %esi -; X86-NEXT: movl %esi, %edi -; X86-NEXT: sarl $31, %edi ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movl %ecx, %edx -; X86-NEXT: sarl $31, %edx -; X86-NEXT: subl {{[0-9]+}}(%esp), %eax -; X86-NEXT: sbbl %esi, %ecx -; X86-NEXT: movl %edx, %esi -; X86-NEXT: sbbl %edi, %esi -; X86-NEXT: sbbl %edi, %edx -; X86-NEXT: sarl $31, %edx -; X86-NEXT: xorl %edx, %ecx -; X86-NEXT: xorl %edx, %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; X86-NEXT: movl {{[0-9]+}}(%esp), %esi +; X86-NEXT: movl %edx, %edi +; X86-NEXT: subl %eax, %edi +; X86-NEXT: movl %esi, %ebx +; X86-NEXT: sbbl %ecx, %ebx ; X86-NEXT: subl %edx, %eax -; X86-NEXT: sbbl %edx, %ecx +; X86-NEXT: sbbl %esi, %ecx +; X86-NEXT: cmovll %ebx, %ecx +; X86-NEXT: cmovll %edi, %eax ; X86-NEXT: xorl %edx, %edx ; X86-NEXT: negl %eax ; X86-NEXT: sbbl %ecx, %edx ; X86-NEXT: popl %esi ; X86-NEXT: popl %edi +; X86-NEXT: popl %ebx ; X86-NEXT: retl ; ; X64-LABEL: abd_ext_i64_undef: @@ -403,47 +397,41 @@ define i128 @abd_ext_i128(i128 %a, i128 %b) nounwind { ; X86-NEXT: pushl %ebx ; X86-NEXT: pushl %edi ; X86-NEXT: pushl %esi +; X86-NEXT: pushl %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %edi ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movl {{[0-9]+}}(%esp), %esi -; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NEXT: movl %eax, %edi -; X86-NEXT: sarl $31, %edi -; X86-NEXT: movl %ebp, %ebx -; X86-NEXT: sarl $31, %ebx +; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp +; X86-NEXT: subl %ecx, %eax +; X86-NEXT: movl %eax, (%esp) # 4-byte Spill +; X86-NEXT: sbbl %edx, %ebp +; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx +; X86-NEXT: sbbl %edi, %ebx +; X86-NEXT: movl {{[0-9]+}}(%esp), %esi +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: sbbl %esi, %eax ; X86-NEXT: subl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: sbbl {{[0-9]+}}(%esp), %esi ; X86-NEXT: sbbl {{[0-9]+}}(%esp), %edx -; X86-NEXT: sbbl %ebp, %eax -; X86-NEXT: movl %edi, %ebp -; X86-NEXT: sbbl %ebx, %ebp -; X86-NEXT: movl %edi, %ebp -; X86-NEXT: sbbl %ebx, %ebp -; X86-NEXT: movl %edi, %ebp -; X86-NEXT: sbbl %ebx, %ebp -; X86-NEXT: sbbl %ebx, %edi -; X86-NEXT: sarl $31, %edi -; X86-NEXT: xorl %edi, %eax -; X86-NEXT: xorl %edi, %edx -; X86-NEXT: xorl %edi, %esi -; X86-NEXT: xorl %edi, %ecx -; X86-NEXT: subl %edi, %ecx -; X86-NEXT: sbbl %edi, %esi -; X86-NEXT: sbbl %edi, %edx -; X86-NEXT: sbbl %edi, %eax -; X86-NEXT: xorl %edi, %edi +; X86-NEXT: sbbl {{[0-9]+}}(%esp), %edi +; X86-NEXT: sbbl {{[0-9]+}}(%esp), %esi +; X86-NEXT: cmovll %eax, %esi +; X86-NEXT: cmovll %ebx, %edi +; X86-NEXT: cmovll %ebp, %edx +; X86-NEXT: cmovll (%esp), %ecx # 4-byte Folded Reload +; X86-NEXT: xorl %ebx, %ebx ; X86-NEXT: negl %ecx -; X86-NEXT: movl $0, %ebx +; X86-NEXT: movl $0, %ebp +; X86-NEXT: sbbl %edx, %ebp +; X86-NEXT: movl $0, %edx +; X86-NEXT: sbbl %edi, %edx ; X86-NEXT: sbbl %esi, %ebx -; X86-NEXT: movl $0, %esi -; X86-NEXT: sbbl %edx, %esi -; X86-NEXT: sbbl %eax, %edi ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movl %ecx, (%eax) -; X86-NEXT: movl %ebx, 4(%eax) -; X86-NEXT: movl %esi, 8(%eax) -; X86-NEXT: movl %edi, 12(%eax) +; X86-NEXT: movl %ebp, 4(%eax) +; X86-NEXT: movl %edx, 8(%eax) +; X86-NEXT: movl %ebx, 12(%eax) +; X86-NEXT: addl $4, %esp ; X86-NEXT: popl %esi ; X86-NEXT: popl %edi ; X86-NEXT: popl %ebx @@ -453,23 +441,16 @@ define i128 @abd_ext_i128(i128 %a, i128 %b) nounwind { ; X64-LABEL: abd_ext_i128: ; X64: # %bb.0: ; X64-NEXT: movq %rdi, %rax -; X64-NEXT: movq %rsi, %rdi -; X64-NEXT: sarq $63, %rdi -; X64-NEXT: movq %rcx, %r8 -; X64-NEXT: sarq $63, %r8 ; X64-NEXT: subq %rdx, %rax -; X64-NEXT: sbbq %rcx, %rsi -; X64-NEXT: movq %rdi, %rcx -; X64-NEXT: sbbq %r8, %rcx -; X64-NEXT: sbbq %r8, %rdi -; X64-NEXT: sarq $63, %rdi -; X64-NEXT: xorq %rdi, %rsi -; X64-NEXT: xorq %rdi, %rax -; X64-NEXT: subq %rdi, %rax -; X64-NEXT: sbbq %rdi, %rsi +; X64-NEXT: movq %rsi, %r8 +; X64-NEXT: sbbq %rcx, %r8 +; X64-NEXT: subq %rdi, %rdx +; X64-NEXT: sbbq %rsi, %rcx +; X64-NEXT: cmovlq %r8, %rcx +; X64-NEXT: cmovgeq %rdx, %rax ; X64-NEXT: xorl %edx, %edx ; X64-NEXT: negq %rax -; X64-NEXT: sbbq %rsi, %rdx +; X64-NEXT: sbbq %rcx, %rdx ; X64-NEXT: retq %aext = sext i128 %a to i256 %bext = sext i128 %b to i256 @@ -487,47 +468,41 @@ define i128 @abd_ext_i128_undef(i128 %a, i128 %b) nounwind { ; X86-NEXT: pushl %ebx ; X86-NEXT: pushl %edi ; X86-NEXT: pushl %esi +; X86-NEXT: pushl %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %edi ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movl {{[0-9]+}}(%esp), %esi -; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NEXT: movl %eax, %edi -; X86-NEXT: sarl $31, %edi -; X86-NEXT: movl %ebp, %ebx -; X86-NEXT: sarl $31, %ebx +; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp +; X86-NEXT: subl %ecx, %eax +; X86-NEXT: movl %eax, (%esp) # 4-byte Spill +; X86-NEXT: sbbl %edx, %ebp +; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx +; X86-NEXT: sbbl %edi, %ebx +; X86-NEXT: movl {{[0-9]+}}(%esp), %esi +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: sbbl %esi, %eax ; X86-NEXT: subl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: sbbl {{[0-9]+}}(%esp), %esi ; X86-NEXT: sbbl {{[0-9]+}}(%esp), %edx -; X86-NEXT: sbbl %ebp, %eax -; X86-NEXT: movl %edi, %ebp -; X86-NEXT: sbbl %ebx, %ebp -; X86-NEXT: movl %edi, %ebp -; X86-NEXT: sbbl %ebx, %ebp -; X86-NEXT: movl %edi, %ebp -; X86-NEXT: sbbl %ebx, %ebp -; X86-NEXT: sbbl %ebx, %edi -; X86-NEXT: sarl $31, %edi -; X86-NEXT: xorl %edi, %eax -; X86-NEXT: xorl %edi, %edx -; X86-NEXT: xorl %edi, %esi -; X86-NEXT: xorl %edi, %ecx -; X86-NEXT: subl %edi, %ecx -; X86-NEXT: sbbl %edi, %esi -; X86-NEXT: sbbl %edi, %edx -; X86-NEXT: sbbl %edi, %eax -; X86-NEXT: xorl %edi, %edi +; X86-NEXT: sbbl {{[0-9]+}}(%esp), %edi +; X86-NEXT: sbbl {{[0-9]+}}(%esp), %esi +; X86-NEXT: cmovll %eax, %esi +; X86-NEXT: cmovll %ebx, %edi +; X86-NEXT: cmovll %ebp, %edx +; X86-NEXT: cmovll (%esp), %ecx # 4-byte Folded Reload +; X86-NEXT: xorl %ebx, %ebx ; X86-NEXT: negl %ecx -; X86-NEXT: movl $0, %ebx +; X86-NEXT: movl $0, %ebp +; X86-NEXT: sbbl %edx, %ebp +; X86-NEXT: movl $0, %edx +; X86-NEXT: sbbl %edi, %edx ; X86-NEXT: sbbl %esi, %ebx -; X86-NEXT: movl $0, %esi -; X86-NEXT: sbbl %edx, %esi -; X86-NEXT: sbbl %eax, %edi ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movl %ecx, (%eax) -; X86-NEXT: movl %ebx, 4(%eax) -; X86-NEXT: movl %esi, 8(%eax) -; X86-NEXT: movl %edi, 12(%eax) +; X86-NEXT: movl %ebp, 4(%eax) +; X86-NEXT: movl %edx, 8(%eax) +; X86-NEXT: movl %ebx, 12(%eax) +; X86-NEXT: addl $4, %esp ; X86-NEXT: popl %esi ; X86-NEXT: popl %edi ; X86-NEXT: popl %ebx @@ -537,23 +512,16 @@ define i128 @abd_ext_i128_undef(i128 %a, i128 %b) nounwind { ; X64-LABEL: abd_ext_i128_undef: ; X64: # %bb.0: ; X64-NEXT: movq %rdi, %rax -; X64-NEXT: movq %rsi, %rdi -; X64-NEXT: sarq $63, %rdi -; X64-NEXT: movq %rcx, %r8 -; X64-NEXT: sarq $63, %r8 ; X64-NEXT: subq %rdx, %rax -; X64-NEXT: sbbq %rcx, %rsi -; X64-NEXT: movq %rdi, %rcx -; X64-NEXT: sbbq %r8, %rcx -; X64-NEXT: sbbq %r8, %rdi -; X64-NEXT: sarq $63, %rdi -; X64-NEXT: xorq %rdi, %rsi -; X64-NEXT: xorq %rdi, %rax -; X64-NEXT: subq %rdi, %rax -; X64-NEXT: sbbq %rdi, %rsi +; X64-NEXT: movq %rsi, %r8 +; X64-NEXT: sbbq %rcx, %r8 +; X64-NEXT: subq %rdi, %rdx +; X64-NEXT: sbbq %rsi, %rcx +; X64-NEXT: cmovlq %r8, %rcx +; X64-NEXT: cmovgeq %rdx, %rax ; X64-NEXT: xorl %edx, %edx ; X64-NEXT: negq %rax -; X64-NEXT: sbbq %rsi, %rdx +; X64-NEXT: sbbq %rcx, %rdx ; X64-NEXT: retq %aext = sext i128 %a to i256 %bext = sext i128 %b to i256 diff --git a/llvm/test/CodeGen/X86/abds.ll b/llvm/test/CodeGen/X86/abds.ll index 918079786a6366..fd47de5af05ab4 100644 --- a/llvm/test/CodeGen/X86/abds.ll +++ b/llvm/test/CodeGen/X86/abds.ll @@ -282,27 +282,24 @@ define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind { define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind { ; X86-LABEL: abd_ext_i64: ; X86: # %bb.0: +; X86-NEXT: pushl %ebx ; X86-NEXT: pushl %edi ; X86-NEXT: pushl %esi -; X86-NEXT: movl {{[0-9]+}}(%esp), %esi -; X86-NEXT: movl %esi, %edi -; X86-NEXT: sarl $31, %edi ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx -; X86-NEXT: movl %edx, %ecx -; X86-NEXT: sarl $31, %ecx -; X86-NEXT: subl {{[0-9]+}}(%esp), %eax -; X86-NEXT: sbbl %esi, %edx -; X86-NEXT: movl %ecx, %esi -; X86-NEXT: sbbl %edi, %esi -; X86-NEXT: sbbl %edi, %ecx -; X86-NEXT: sarl $31, %ecx -; X86-NEXT: xorl %ecx, %edx -; X86-NEXT: xorl %ecx, %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl {{[0-9]+}}(%esp), %esi +; X86-NEXT: movl %ecx, %edi +; X86-NEXT: subl %eax, %edi +; X86-NEXT: movl %esi, %ebx +; X86-NEXT: sbbl %edx, %ebx ; X86-NEXT: subl %ecx, %eax -; X86-NEXT: sbbl %ecx, %edx +; X86-NEXT: sbbl %esi, %edx +; X86-NEXT: cmovll %edi, %eax +; X86-NEXT: cmovll %ebx, %edx ; X86-NEXT: popl %esi ; X86-NEXT: popl %edi +; X86-NEXT: popl %ebx ; X86-NEXT: retl ; ; X64-LABEL: abd_ext_i64: @@ -324,27 +321,24 @@ define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind { define i64 @abd_ext_i64_undef(i64 %a, i64 %b) nounwind { ; X86-LABEL: abd_ext_i64_undef: ; X86: # %bb.0: +; X86-NEXT: pushl %ebx ; X86-NEXT: pushl %edi ; X86-NEXT: pushl %esi -; X86-NEXT: movl {{[0-9]+}}(%esp), %esi -; X86-NEXT: movl %esi, %edi -; X86-NEXT: sarl $31, %edi ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx -; X86-NEXT: movl %edx, %ecx -; X86-NEXT: sarl $31, %ecx -; X86-NEXT: subl {{[0-9]+}}(%esp), %eax -; X86-NEXT: sbbl %esi, %edx -; X86-NEXT: movl %ecx, %esi -; X86-NEXT: sbbl %edi, %esi -; X86-NEXT: sbbl %edi, %ecx -; X86-NEXT: sarl $31, %ecx -; X86-NEXT: xorl %ecx, %edx -; X86-NEXT: xorl %ecx, %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl {{[0-9]+}}(%esp), %esi +; X86-NEXT: movl %ecx, %edi +; X86-NEXT: subl %eax, %edi +; X86-NEXT: movl %esi, %ebx +; X86-NEXT: sbbl %edx, %ebx ; X86-NEXT: subl %ecx, %eax -; X86-NEXT: sbbl %ecx, %edx +; X86-NEXT: sbbl %esi, %edx +; X86-NEXT: cmovll %edi, %eax +; X86-NEXT: cmovll %ebx, %edx ; X86-NEXT: popl %esi ; X86-NEXT: popl %edi +; X86-NEXT: popl %ebx ; X86-NEXT: retl ; ; X64-LABEL: abd_ext_i64_undef: @@ -370,40 +364,34 @@ define i128 @abd_ext_i128(i128 %a, i128 %b) nounwind { ; X86-NEXT: pushl %ebx ; X86-NEXT: pushl %edi ; X86-NEXT: pushl %esi -; X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; X86-NEXT: pushl %eax ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; X86-NEXT: movl {{[0-9]+}}(%esp), %esi +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx +; X86-NEXT: subl %edx, %eax +; X86-NEXT: movl %eax, (%esp) # 4-byte Spill +; X86-NEXT: sbbl %esi, %ebx ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp +; X86-NEXT: sbbl %ecx, %ebp ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi -; X86-NEXT: movl {{[0-9]+}}(%esp), %esi -; X86-NEXT: movl %esi, %eax -; X86-NEXT: sarl $31, %eax -; X86-NEXT: movl %ebp, %ebx -; X86-NEXT: sarl $31, %ebx +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: sbbl %edi, %eax ; X86-NEXT: subl {{[0-9]+}}(%esp), %edx +; X86-NEXT: sbbl {{[0-9]+}}(%esp), %esi ; X86-NEXT: sbbl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: sbbl {{[0-9]+}}(%esp), %edi -; X86-NEXT: sbbl %ebp, %esi -; X86-NEXT: movl %eax, %ebp -; X86-NEXT: sbbl %ebx, %ebp -; X86-NEXT: movl %eax, %ebp -; X86-NEXT: sbbl %ebx, %ebp -; X86-NEXT: movl %eax, %ebp -; X86-NEXT: sbbl %ebx, %ebp -; X86-NEXT: sbbl %ebx, %eax -; X86-NEXT: sarl $31, %eax -; X86-NEXT: xorl %eax, %esi -; X86-NEXT: xorl %eax, %edi -; X86-NEXT: xorl %eax, %ecx -; X86-NEXT: xorl %eax, %edx -; X86-NEXT: subl %eax, %edx -; X86-NEXT: sbbl %eax, %ecx -; X86-NEXT: sbbl %eax, %edi -; X86-NEXT: sbbl %eax, %esi +; X86-NEXT: cmovll (%esp), %edx # 4-byte Folded Reload +; X86-NEXT: cmovll %ebx, %esi +; X86-NEXT: cmovll %ebp, %ecx +; X86-NEXT: cmovll %eax, %edi ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl %edi, 12(%eax) +; X86-NEXT: movl %ecx, 8(%eax) +; X86-NEXT: movl %esi, 4(%eax) ; X86-NEXT: movl %edx, (%eax) -; X86-NEXT: movl %ecx, 4(%eax) -; X86-NEXT: movl %edi, 8(%eax) -; X86-NEXT: movl %esi, 12(%eax) +; X86-NEXT: addl $4, %esp ; X86-NEXT: popl %esi ; X86-NEXT: popl %edi ; X86-NEXT: popl %ebx @@ -413,21 +401,14 @@ define i128 @abd_ext_i128(i128 %a, i128 %b) nounwind { ; X64-LABEL: abd_ext_i128: ; X64: # %bb.0: ; X64-NEXT: movq %rdi, %rax -; X64-NEXT: movq %rsi, %rdi -; X64-NEXT: sarq $63, %rdi -; X64-NEXT: movq %rcx, %r8 -; X64-NEXT: sarq $63, %r8 ; X64-NEXT: subq %rdx, %rax -; X64-NEXT: sbbq %rcx, %rsi -; X64-NEXT: movq %rdi, %rcx -; X64-NEXT: sbbq %r8, %rcx -; X64-NEXT: sbbq %r8, %rdi -; X64-NEXT: sarq $63, %rdi -; X64-NEXT: xorq %rdi, %rsi -; X64-NEXT: xorq %rdi, %rax -; X64-NEXT: subq %rdi, %rax -; X64-NEXT: sbbq %rdi, %rsi -; X64-NEXT: movq %rsi, %rdx +; X64-NEXT: movq %rsi, %r8 +; X64-NEXT: sbbq %rcx, %r8 +; X64-NEXT: subq %rdi, %rdx +; X64-NEXT: sbbq %rsi, %rcx +; X64-NEXT: cmovgeq %rdx, %rax +; X64-NEXT: cmovgeq %rcx, %r8 +; X64-NEXT: movq %r8, %rdx ; X64-NEXT: retq %aext = sext i128 %a to i256 %bext = sext i128 %b to i256 @@ -444,40 +425,34 @@ define i128 @abd_ext_i128_undef(i128 %a, i128 %b) nounwind { ; X86-NEXT: pushl %ebx ; X86-NEXT: pushl %edi ; X86-NEXT: pushl %esi -; X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; X86-NEXT: pushl %eax ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; X86-NEXT: movl {{[0-9]+}}(%esp), %esi +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx +; X86-NEXT: subl %edx, %eax +; X86-NEXT: movl %eax, (%esp) # 4-byte Spill +; X86-NEXT: sbbl %esi, %ebx ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp +; X86-NEXT: sbbl %ecx, %ebp ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi -; X86-NEXT: movl {{[0-9]+}}(%esp), %esi -; X86-NEXT: movl %esi, %eax -; X86-NEXT: sarl $31, %eax -; X86-NEXT: movl %ebp, %ebx -; X86-NEXT: sarl $31, %ebx +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: sbbl %edi, %eax ; X86-NEXT: subl {{[0-9]+}}(%esp), %edx +; X86-NEXT: sbbl {{[0-9]+}}(%esp), %esi ; X86-NEXT: sbbl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: sbbl {{[0-9]+}}(%esp), %edi -; X86-NEXT: sbbl %ebp, %esi -; X86-NEXT: movl %eax, %ebp -; X86-NEXT: sbbl %ebx, %ebp -; X86-NEXT: movl %eax, %ebp -; X86-NEXT: sbbl %ebx, %ebp -; X86-NEXT: movl %eax, %ebp -; X86-NEXT: sbbl %ebx, %ebp -; X86-NEXT: sbbl %ebx, %eax -; X86-NEXT: sarl $31, %eax -; X86-NEXT: xorl %eax, %esi -; X86-NEXT: xorl %eax, %edi -; X86-NEXT: xorl %eax, %ecx -; X86-NEXT: xorl %eax, %edx -; X86-NEXT: subl %eax, %edx -; X86-NEXT: sbbl %eax, %ecx -; X86-NEXT: sbbl %eax, %edi -; X86-NEXT: sbbl %eax, %esi +; X86-NEXT: cmovll (%esp), %edx # 4-byte Folded Reload +; X86-NEXT: cmovll %ebx, %esi +; X86-NEXT: cmovll %ebp, %ecx +; X86-NEXT: cmovll %eax, %edi ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl %edi, 12(%eax) +; X86-NEXT: movl %ecx, 8(%eax) +; X86-NEXT: movl %esi, 4(%eax) ; X86-NEXT: movl %edx, (%eax) -; X86-NEXT: movl %ecx, 4(%eax) -; X86-NEXT: movl %edi, 8(%eax) -; X86-NEXT: movl %esi, 12(%eax) +; X86-NEXT: addl $4, %esp ; X86-NEXT: popl %esi ; X86-NEXT: popl %edi ; X86-NEXT: popl %ebx @@ -487,21 +462,14 @@ define i128 @abd_ext_i128_undef(i128 %a, i128 %b) nounwind { ; X64-LABEL: abd_ext_i128_undef: ; X64: # %bb.0: ; X64-NEXT: movq %rdi, %rax -; X64-NEXT: movq %rsi, %rdi -; X64-NEXT: sarq $63, %rdi -; X64-NEXT: movq %rcx, %r8 -; X64-NEXT: sarq $63, %r8 ; X64-NEXT: subq %rdx, %rax -; X64-NEXT: sbbq %rcx, %rsi -; X64-NEXT: movq %rdi, %rcx -; X64-NEXT: sbbq %r8, %rcx -; X64-NEXT: sbbq %r8, %rdi -; X64-NEXT: sarq $63, %rdi -; X64-NEXT: xorq %rdi, %rsi -; X64-NEXT: xorq %rdi, %rax -; X64-NEXT: subq %rdi, %rax -; X64-NEXT: sbbq %rdi, %rsi -; X64-NEXT: movq %rsi, %rdx +; X64-NEXT: movq %rsi, %r8 +; X64-NEXT: sbbq %rcx, %r8 +; X64-NEXT: subq %rdi, %rdx +; X64-NEXT: sbbq %rsi, %rcx +; X64-NEXT: cmovgeq %rdx, %rax +; X64-NEXT: cmovgeq %rcx, %r8 +; X64-NEXT: movq %r8, %rdx ; X64-NEXT: retq %aext = sext i128 %a to i256 %bext = sext i128 %b to i256 @@ -602,7 +570,6 @@ define i32 @abd_minmax_i32(i32 %a, i32 %b) nounwind { define i64 @abd_minmax_i64(i64 %a, i64 %b) nounwind { ; X86-LABEL: abd_minmax_i64: ; X86: # %bb.0: -; X86-NEXT: pushl %ebp ; X86-NEXT: pushl %ebx ; X86-NEXT: pushl %edi ; X86-NEXT: pushl %esi @@ -610,24 +577,17 @@ define i64 @abd_minmax_i64(i64 %a, i64 %b) nounwind { ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi -; X86-NEXT: cmpl %eax, %ecx -; X86-NEXT: movl %esi, %edi -; X86-NEXT: sbbl %edx, %edi -; X86-NEXT: movl %edx, %edi -; X86-NEXT: cmovll %esi, %edi -; X86-NEXT: movl %eax, %ebx -; X86-NEXT: cmovll %ecx, %ebx -; X86-NEXT: cmpl %ecx, %eax -; X86-NEXT: movl %edx, %ebp -; X86-NEXT: sbbl %esi, %ebp -; X86-NEXT: cmovll %esi, %edx -; X86-NEXT: cmovll %ecx, %eax -; X86-NEXT: subl %ebx, %eax -; X86-NEXT: sbbl %edi, %edx +; X86-NEXT: movl %ecx, %edi +; X86-NEXT: subl %eax, %edi +; X86-NEXT: movl %esi, %ebx +; X86-NEXT: sbbl %edx, %ebx +; X86-NEXT: subl %ecx, %eax +; X86-NEXT: sbbl %esi, %edx +; X86-NEXT: cmovll %edi, %eax +; X86-NEXT: cmovll %ebx, %edx ; X86-NEXT: popl %esi ; X86-NEXT: popl %edi ; X86-NEXT: popl %ebx -; X86-NEXT: popl %ebp ; X86-NEXT: retl ; ; X64-LABEL: abd_minmax_i64: @@ -651,53 +611,34 @@ define i128 @abd_minmax_i128(i128 %a, i128 %b) nounwind { ; X86-NEXT: pushl %ebx ; X86-NEXT: pushl %edi ; X86-NEXT: pushl %esi -; X86-NEXT: subl $8, %esp +; X86-NEXT: pushl %eax ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movl {{[0-9]+}}(%esp), %esi ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx -; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx +; X86-NEXT: movl {{[0-9]+}}(%esp), %esi ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NEXT: cmpl %edx, {{[0-9]+}}(%esp) -; X86-NEXT: sbbl %ebx, %eax -; X86-NEXT: movl %esi, %eax -; X86-NEXT: sbbl %ecx, %eax -; X86-NEXT: movl {{[0-9]+}}(%esp), %edi -; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp -; X86-NEXT: movl %ebp, %eax -; X86-NEXT: sbbl %edi, %eax -; X86-NEXT: movl %edi, %eax -; X86-NEXT: cmovll %ebp, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl %ecx, %eax -; X86-NEXT: cmovll %esi, %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx +; X86-NEXT: subl %edx, %eax ; X86-NEXT: movl %eax, (%esp) # 4-byte Spill -; X86-NEXT: movl %ebx, %ebp +; X86-NEXT: sbbl %esi, %ebx +; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp +; X86-NEXT: sbbl %ecx, %ebp +; X86-NEXT: movl {{[0-9]+}}(%esp), %edi ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NEXT: cmovll %eax, %ebp -; X86-NEXT: movl %edx, %eax -; X86-NEXT: movl {{[0-9]+}}(%esp), %esi -; X86-NEXT: cmovll %esi, %eax -; X86-NEXT: cmpl %esi, %edx -; X86-NEXT: movl %ebx, %esi -; X86-NEXT: sbbl {{[0-9]+}}(%esp), %esi -; X86-NEXT: movl %ecx, %esi -; X86-NEXT: sbbl {{[0-9]+}}(%esp), %esi -; X86-NEXT: movl %edi, %esi +; X86-NEXT: sbbl %edi, %eax +; X86-NEXT: subl {{[0-9]+}}(%esp), %edx ; X86-NEXT: sbbl {{[0-9]+}}(%esp), %esi -; X86-NEXT: cmovll {{[0-9]+}}(%esp), %edi -; X86-NEXT: cmovll {{[0-9]+}}(%esp), %ecx -; X86-NEXT: cmovll {{[0-9]+}}(%esp), %ebx -; X86-NEXT: cmovll {{[0-9]+}}(%esp), %edx -; X86-NEXT: subl %eax, %edx -; X86-NEXT: sbbl %ebp, %ebx -; X86-NEXT: sbbl (%esp), %ecx # 4-byte Folded Reload -; X86-NEXT: sbbl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload +; X86-NEXT: sbbl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: sbbl {{[0-9]+}}(%esp), %edi +; X86-NEXT: cmovll (%esp), %edx # 4-byte Folded Reload +; X86-NEXT: cmovll %ebx, %esi +; X86-NEXT: cmovll %ebp, %ecx +; X86-NEXT: cmovll %eax, %edi ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NEXT: movl %edx, (%eax) -; X86-NEXT: movl %ebx, 4(%eax) -; X86-NEXT: movl %ecx, 8(%eax) ; X86-NEXT: movl %edi, 12(%eax) -; X86-NEXT: addl $8, %esp +; X86-NEXT: movl %ecx, 8(%eax) +; X86-NEXT: movl %esi, 4(%eax) +; X86-NEXT: movl %edx, (%eax) +; X86-NEXT: addl $4, %esp ; X86-NEXT: popl %esi ; X86-NEXT: popl %edi ; X86-NEXT: popl %ebx @@ -706,22 +647,15 @@ define i128 @abd_minmax_i128(i128 %a, i128 %b) nounwind { ; ; X64-LABEL: abd_minmax_i128: ; X64: # %bb.0: -; X64-NEXT: movq %rdx, %rax -; X64-NEXT: cmpq %rdx, %rdi -; X64-NEXT: movq %rsi, %rdx -; X64-NEXT: sbbq %rcx, %rdx -; X64-NEXT: movq %rcx, %rdx -; X64-NEXT: cmovlq %rsi, %rdx -; X64-NEXT: movq %rax, %r8 -; X64-NEXT: cmovlq %rdi, %r8 -; X64-NEXT: cmpq %rdi, %rax -; X64-NEXT: movq %rcx, %r9 -; X64-NEXT: sbbq %rsi, %r9 -; X64-NEXT: cmovlq %rsi, %rcx -; X64-NEXT: cmovlq %rdi, %rax -; X64-NEXT: subq %r8, %rax -; X64-NEXT: sbbq %rdx, %rcx -; X64-NEXT: movq %rcx, %rdx +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: subq %rdx, %rax +; X64-NEXT: movq %rsi, %r8 +; X64-NEXT: sbbq %rcx, %r8 +; X64-NEXT: subq %rdi, %rdx +; X64-NEXT: sbbq %rsi, %rcx +; X64-NEXT: cmovgeq %rdx, %rax +; X64-NEXT: cmovgeq %rcx, %r8 +; X64-NEXT: movq %r8, %rdx ; X64-NEXT: retq %min = call i128 @llvm.smin.i128(i128 %a, i128 %b) %max = call i128 @llvm.smax.i128(i128 %a, i128 %b) diff --git a/llvm/test/CodeGen/X86/abdu-neg.ll b/llvm/test/CodeGen/X86/abdu-neg.ll index 48a8575acfacf5..fe4525009ad5c8 100644 --- a/llvm/test/CodeGen/X86/abdu-neg.ll +++ b/llvm/test/CodeGen/X86/abdu-neg.ll @@ -311,8 +311,6 @@ define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind { ; X86-NEXT: sbbl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: movl $0, %esi ; X86-NEXT: sbbl %esi, %esi -; X86-NEXT: movl $0, %esi -; X86-NEXT: sbbl %esi, %esi ; X86-NEXT: xorl %esi, %ecx ; X86-NEXT: xorl %esi, %eax ; X86-NEXT: subl %esi, %eax @@ -351,8 +349,6 @@ define i64 @abd_ext_i64_undef(i64 %a, i64 %b) nounwind { ; X86-NEXT: sbbl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: movl $0, %esi ; X86-NEXT: sbbl %esi, %esi -; X86-NEXT: movl $0, %esi -; X86-NEXT: sbbl %esi, %esi ; X86-NEXT: xorl %esi, %ecx ; X86-NEXT: xorl %esi, %eax ; X86-NEXT: subl %esi, %eax @@ -387,42 +383,36 @@ define i128 @abd_ext_i128(i128 %a, i128 %b) nounwind { ; X86-NEXT: pushl %ebx ; X86-NEXT: pushl %edi ; X86-NEXT: pushl %esi -; X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NEXT: movl {{[0-9]+}}(%esp), %edi -; X86-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx -; X86-NEXT: xorl %ecx, %ecx -; X86-NEXT: subl {{[0-9]+}}(%esp), %esi +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: xorl %edi, %edi +; X86-NEXT: subl {{[0-9]+}}(%esp), %edx ; X86-NEXT: sbbl {{[0-9]+}}(%esp), %ebx -; X86-NEXT: sbbl {{[0-9]+}}(%esp), %edi -; X86-NEXT: sbbl {{[0-9]+}}(%esp), %edx -; X86-NEXT: movl $0, %ebp -; X86-NEXT: sbbl %ebp, %ebp -; X86-NEXT: movl $0, %ebp -; X86-NEXT: sbbl %ebp, %ebp -; X86-NEXT: movl $0, %ebp -; X86-NEXT: sbbl %ebp, %ebp +; X86-NEXT: sbbl {{[0-9]+}}(%esp), %esi +; X86-NEXT: sbbl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: movl $0, %ebp ; X86-NEXT: sbbl %ebp, %ebp -; X86-NEXT: xorl %ebp, %edx -; X86-NEXT: xorl %ebp, %edi -; X86-NEXT: xorl %ebp, %ebx +; X86-NEXT: xorl %ebp, %ecx ; X86-NEXT: xorl %ebp, %esi -; X86-NEXT: subl %ebp, %esi +; X86-NEXT: xorl %ebp, %ebx +; X86-NEXT: xorl %ebp, %edx +; X86-NEXT: subl %ebp, %edx ; X86-NEXT: sbbl %ebp, %ebx -; X86-NEXT: sbbl %ebp, %edi -; X86-NEXT: sbbl %ebp, %edx -; X86-NEXT: negl %esi +; X86-NEXT: sbbl %ebp, %esi +; X86-NEXT: sbbl %ebp, %ecx +; X86-NEXT: negl %edx ; X86-NEXT: movl $0, %ebp ; X86-NEXT: sbbl %ebx, %ebp ; X86-NEXT: movl $0, %ebx -; X86-NEXT: sbbl %edi, %ebx -; X86-NEXT: sbbl %edx, %ecx -; X86-NEXT: movl %esi, (%eax) +; X86-NEXT: sbbl %esi, %ebx +; X86-NEXT: sbbl %ecx, %edi +; X86-NEXT: movl %edx, (%eax) ; X86-NEXT: movl %ebp, 4(%eax) ; X86-NEXT: movl %ebx, 8(%eax) -; X86-NEXT: movl %ecx, 12(%eax) +; X86-NEXT: movl %edi, 12(%eax) ; X86-NEXT: popl %esi ; X86-NEXT: popl %edi ; X86-NEXT: popl %ebx @@ -437,8 +427,6 @@ define i128 @abd_ext_i128(i128 %a, i128 %b) nounwind { ; X64-NEXT: sbbq %rcx, %rsi ; X64-NEXT: movl $0, %ecx ; X64-NEXT: sbbq %rcx, %rcx -; X64-NEXT: movl $0, %ecx -; X64-NEXT: sbbq %rcx, %rcx ; X64-NEXT: xorq %rcx, %rsi ; X64-NEXT: xorq %rcx, %rax ; X64-NEXT: subq %rcx, %rax @@ -463,42 +451,36 @@ define i128 @abd_ext_i128_undef(i128 %a, i128 %b) nounwind { ; X86-NEXT: pushl %ebx ; X86-NEXT: pushl %edi ; X86-NEXT: pushl %esi -; X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NEXT: movl {{[0-9]+}}(%esp), %edi -; X86-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx -; X86-NEXT: xorl %ecx, %ecx -; X86-NEXT: subl {{[0-9]+}}(%esp), %esi +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: xorl %edi, %edi +; X86-NEXT: subl {{[0-9]+}}(%esp), %edx ; X86-NEXT: sbbl {{[0-9]+}}(%esp), %ebx -; X86-NEXT: sbbl {{[0-9]+}}(%esp), %edi -; X86-NEXT: sbbl {{[0-9]+}}(%esp), %edx -; X86-NEXT: movl $0, %ebp -; X86-NEXT: sbbl %ebp, %ebp -; X86-NEXT: movl $0, %ebp -; X86-NEXT: sbbl %ebp, %ebp -; X86-NEXT: movl $0, %ebp -; X86-NEXT: sbbl %ebp, %ebp +; X86-NEXT: sbbl {{[0-9]+}}(%esp), %esi +; X86-NEXT: sbbl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: movl $0, %ebp ; X86-NEXT: sbbl %ebp, %ebp -; X86-NEXT: xorl %ebp, %edx -; X86-NEXT: xorl %ebp, %edi -; X86-NEXT: xorl %ebp, %ebx +; X86-NEXT: xorl %ebp, %ecx ; X86-NEXT: xorl %ebp, %esi -; X86-NEXT: subl %ebp, %esi +; X86-NEXT: xorl %ebp, %ebx +; X86-NEXT: xorl %ebp, %edx +; X86-NEXT: subl %ebp, %edx ; X86-NEXT: sbbl %ebp, %ebx -; X86-NEXT: sbbl %ebp, %edi -; X86-NEXT: sbbl %ebp, %edx -; X86-NEXT: negl %esi +; X86-NEXT: sbbl %ebp, %esi +; X86-NEXT: sbbl %ebp, %ecx +; X86-NEXT: negl %edx ; X86-NEXT: movl $0, %ebp ; X86-NEXT: sbbl %ebx, %ebp ; X86-NEXT: movl $0, %ebx -; X86-NEXT: sbbl %edi, %ebx -; X86-NEXT: sbbl %edx, %ecx -; X86-NEXT: movl %esi, (%eax) +; X86-NEXT: sbbl %esi, %ebx +; X86-NEXT: sbbl %ecx, %edi +; X86-NEXT: movl %edx, (%eax) ; X86-NEXT: movl %ebp, 4(%eax) ; X86-NEXT: movl %ebx, 8(%eax) -; X86-NEXT: movl %ecx, 12(%eax) +; X86-NEXT: movl %edi, 12(%eax) ; X86-NEXT: popl %esi ; X86-NEXT: popl %edi ; X86-NEXT: popl %ebx @@ -513,8 +495,6 @@ define i128 @abd_ext_i128_undef(i128 %a, i128 %b) nounwind { ; X64-NEXT: sbbq %rcx, %rsi ; X64-NEXT: movl $0, %ecx ; X64-NEXT: sbbq %rcx, %rcx -; X64-NEXT: movl $0, %ecx -; X64-NEXT: sbbq %rcx, %rcx ; X64-NEXT: xorq %rcx, %rsi ; X64-NEXT: xorq %rcx, %rax ; X64-NEXT: subq %rcx, %rax diff --git a/llvm/test/CodeGen/X86/abdu.ll b/llvm/test/CodeGen/X86/abdu.ll index 87681707023600..2e2a1debc38cb1 100644 --- a/llvm/test/CodeGen/X86/abdu.ll +++ b/llvm/test/CodeGen/X86/abdu.ll @@ -280,20 +280,16 @@ define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind { define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind { ; X86-LABEL: abd_ext_i64: ; X86: # %bb.0: -; X86-NEXT: pushl %esi ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-NEXT: xorl %ecx, %ecx ; X86-NEXT: subl {{[0-9]+}}(%esp), %eax ; X86-NEXT: sbbl {{[0-9]+}}(%esp), %edx -; X86-NEXT: movl $0, %esi -; X86-NEXT: sbbl %esi, %esi ; X86-NEXT: sbbl %ecx, %ecx ; X86-NEXT: xorl %ecx, %edx ; X86-NEXT: xorl %ecx, %eax ; X86-NEXT: subl %ecx, %eax ; X86-NEXT: sbbl %ecx, %edx -; X86-NEXT: popl %esi ; X86-NEXT: retl ; ; X64-LABEL: abd_ext_i64: @@ -315,20 +311,16 @@ define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind { define i64 @abd_ext_i64_undef(i64 %a, i64 %b) nounwind { ; X86-LABEL: abd_ext_i64_undef: ; X86: # %bb.0: -; X86-NEXT: pushl %esi ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-NEXT: xorl %ecx, %ecx ; X86-NEXT: subl {{[0-9]+}}(%esp), %eax ; X86-NEXT: sbbl {{[0-9]+}}(%esp), %edx -; X86-NEXT: movl $0, %esi -; X86-NEXT: sbbl %esi, %esi ; X86-NEXT: sbbl %ecx, %ecx ; X86-NEXT: xorl %ecx, %edx ; X86-NEXT: xorl %ecx, %eax ; X86-NEXT: subl %ecx, %eax ; X86-NEXT: sbbl %ecx, %edx -; X86-NEXT: popl %esi ; X86-NEXT: retl ; ; X64-LABEL: abd_ext_i64_undef: @@ -350,26 +342,19 @@ define i64 @abd_ext_i64_undef(i64 %a, i64 %b) nounwind { define i128 @abd_ext_i128(i128 %a, i128 %b) nounwind { ; X86-LABEL: abd_ext_i128: ; X86: # %bb.0: -; X86-NEXT: pushl %ebp ; X86-NEXT: pushl %ebx ; X86-NEXT: pushl %edi ; X86-NEXT: pushl %esi -; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: xorl %ebx, %ebx ; X86-NEXT: subl {{[0-9]+}}(%esp), %edi ; X86-NEXT: sbbl {{[0-9]+}}(%esp), %esi ; X86-NEXT: sbbl {{[0-9]+}}(%esp), %edx ; X86-NEXT: sbbl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movl $0, %ebp -; X86-NEXT: sbbl %ebp, %ebp -; X86-NEXT: movl $0, %ebp -; X86-NEXT: sbbl %ebp, %ebp -; X86-NEXT: movl $0, %ebp -; X86-NEXT: sbbl %ebp, %ebp ; X86-NEXT: sbbl %ebx, %ebx ; X86-NEXT: xorl %ebx, %ecx ; X86-NEXT: xorl %ebx, %edx @@ -386,7 +371,6 @@ define i128 @abd_ext_i128(i128 %a, i128 %b) nounwind { ; X86-NEXT: popl %esi ; X86-NEXT: popl %edi ; X86-NEXT: popl %ebx -; X86-NEXT: popl %ebp ; X86-NEXT: retl $4 ; ; X64-LABEL: abd_ext_i128: @@ -395,8 +379,6 @@ define i128 @abd_ext_i128(i128 %a, i128 %b) nounwind { ; X64-NEXT: xorl %edi, %edi ; X64-NEXT: subq %rdx, %rax ; X64-NEXT: sbbq %rcx, %rsi -; X64-NEXT: movl $0, %ecx -; X64-NEXT: sbbq %rcx, %rcx ; X64-NEXT: sbbq %rdi, %rdi ; X64-NEXT: xorq %rdi, %rsi ; X64-NEXT: xorq %rdi, %rax @@ -415,26 +397,19 @@ define i128 @abd_ext_i128(i128 %a, i128 %b) nounwind { define i128 @abd_ext_i128_undef(i128 %a, i128 %b) nounwind { ; X86-LABEL: abd_ext_i128_undef: ; X86: # %bb.0: -; X86-NEXT: pushl %ebp ; X86-NEXT: pushl %ebx ; X86-NEXT: pushl %edi ; X86-NEXT: pushl %esi -; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: xorl %ebx, %ebx ; X86-NEXT: subl {{[0-9]+}}(%esp), %edi ; X86-NEXT: sbbl {{[0-9]+}}(%esp), %esi ; X86-NEXT: sbbl {{[0-9]+}}(%esp), %edx ; X86-NEXT: sbbl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movl $0, %ebp -; X86-NEXT: sbbl %ebp, %ebp -; X86-NEXT: movl $0, %ebp -; X86-NEXT: sbbl %ebp, %ebp -; X86-NEXT: movl $0, %ebp -; X86-NEXT: sbbl %ebp, %ebp ; X86-NEXT: sbbl %ebx, %ebx ; X86-NEXT: xorl %ebx, %ecx ; X86-NEXT: xorl %ebx, %edx @@ -451,7 +426,6 @@ define i128 @abd_ext_i128_undef(i128 %a, i128 %b) nounwind { ; X86-NEXT: popl %esi ; X86-NEXT: popl %edi ; X86-NEXT: popl %ebx -; X86-NEXT: popl %ebp ; X86-NEXT: retl $4 ; ; X64-LABEL: abd_ext_i128_undef: @@ -460,8 +434,6 @@ define i128 @abd_ext_i128_undef(i128 %a, i128 %b) nounwind { ; X64-NEXT: xorl %edi, %edi ; X64-NEXT: subq %rdx, %rax ; X64-NEXT: sbbq %rcx, %rsi -; X64-NEXT: movl $0, %ecx -; X64-NEXT: sbbq %rcx, %rcx ; X64-NEXT: sbbq %rdi, %rdi ; X64-NEXT: xorq %rdi, %rsi ; X64-NEXT: xorq %rdi, %rax @@ -568,32 +540,16 @@ define i32 @abd_minmax_i32(i32 %a, i32 %b) nounwind { define i64 @abd_minmax_i64(i64 %a, i64 %b) nounwind { ; X86-LABEL: abd_minmax_i64: ; X86: # %bb.0: -; X86-NEXT: pushl %ebp -; X86-NEXT: pushl %ebx -; X86-NEXT: pushl %edi -; X86-NEXT: pushl %esi ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx -; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movl {{[0-9]+}}(%esp), %esi -; X86-NEXT: cmpl %eax, %ecx -; X86-NEXT: movl %esi, %edi -; X86-NEXT: sbbl %edx, %edi -; X86-NEXT: movl %edx, %edi -; X86-NEXT: cmovbl %esi, %edi -; X86-NEXT: movl %eax, %ebx -; X86-NEXT: cmovbl %ecx, %ebx -; X86-NEXT: cmpl %ecx, %eax -; X86-NEXT: movl %edx, %ebp -; X86-NEXT: sbbl %esi, %ebp -; X86-NEXT: cmovbl %esi, %edx -; X86-NEXT: cmovbl %ecx, %eax -; X86-NEXT: subl %ebx, %eax -; X86-NEXT: sbbl %edi, %edx -; X86-NEXT: popl %esi -; X86-NEXT: popl %edi -; X86-NEXT: popl %ebx -; X86-NEXT: popl %ebp +; X86-NEXT: xorl %ecx, %ecx +; X86-NEXT: subl {{[0-9]+}}(%esp), %eax +; X86-NEXT: sbbl {{[0-9]+}}(%esp), %edx +; X86-NEXT: sbbl %ecx, %ecx +; X86-NEXT: xorl %ecx, %edx +; X86-NEXT: xorl %ecx, %eax +; X86-NEXT: subl %ecx, %eax +; X86-NEXT: sbbl %ecx, %edx ; X86-NEXT: retl ; ; X64-LABEL: abd_minmax_i64: @@ -613,81 +569,49 @@ define i64 @abd_minmax_i64(i64 %a, i64 %b) nounwind { define i128 @abd_minmax_i128(i128 %a, i128 %b) nounwind { ; X86-LABEL: abd_minmax_i128: ; X86: # %bb.0: -; X86-NEXT: pushl %ebp ; X86-NEXT: pushl %ebx ; X86-NEXT: pushl %edi ; X86-NEXT: pushl %esi -; X86-NEXT: subl $8, %esp -; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: movl {{[0-9]+}}(%esp), %esi ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx -; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx -; X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NEXT: cmpl %edx, {{[0-9]+}}(%esp) -; X86-NEXT: sbbl %ebx, %eax -; X86-NEXT: movl %esi, %eax -; X86-NEXT: sbbl %ecx, %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi -; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp -; X86-NEXT: movl %ebp, %eax -; X86-NEXT: sbbl %edi, %eax -; X86-NEXT: movl %edi, %eax -; X86-NEXT: cmovbl %ebp, %eax -; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-NEXT: movl %ecx, %eax -; X86-NEXT: cmovbl %esi, %eax -; X86-NEXT: movl %eax, (%esp) # 4-byte Spill -; X86-NEXT: movl %ebx, %ebp -; X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NEXT: cmovbl %eax, %ebp -; X86-NEXT: movl %edx, %eax ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi -; X86-NEXT: cmovbl %esi, %eax -; X86-NEXT: cmpl %esi, %edx -; X86-NEXT: movl %ebx, %esi -; X86-NEXT: sbbl {{[0-9]+}}(%esp), %esi -; X86-NEXT: movl %ecx, %esi -; X86-NEXT: sbbl {{[0-9]+}}(%esp), %esi -; X86-NEXT: movl %edi, %esi -; X86-NEXT: sbbl {{[0-9]+}}(%esp), %esi -; X86-NEXT: cmovbl {{[0-9]+}}(%esp), %edi -; X86-NEXT: cmovbl {{[0-9]+}}(%esp), %ecx -; X86-NEXT: cmovbl {{[0-9]+}}(%esp), %ebx -; X86-NEXT: cmovbl {{[0-9]+}}(%esp), %edx -; X86-NEXT: subl %eax, %edx -; X86-NEXT: sbbl %ebp, %ebx -; X86-NEXT: sbbl (%esp), %ecx # 4-byte Folded Reload -; X86-NEXT: sbbl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NEXT: movl %edx, (%eax) -; X86-NEXT: movl %ebx, 4(%eax) -; X86-NEXT: movl %ecx, 8(%eax) -; X86-NEXT: movl %edi, 12(%eax) -; X86-NEXT: addl $8, %esp +; X86-NEXT: xorl %ebx, %ebx +; X86-NEXT: subl {{[0-9]+}}(%esp), %edi +; X86-NEXT: sbbl {{[0-9]+}}(%esp), %esi +; X86-NEXT: sbbl {{[0-9]+}}(%esp), %edx +; X86-NEXT: sbbl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: sbbl %ebx, %ebx +; X86-NEXT: xorl %ebx, %ecx +; X86-NEXT: xorl %ebx, %edx +; X86-NEXT: xorl %ebx, %esi +; X86-NEXT: xorl %ebx, %edi +; X86-NEXT: subl %ebx, %edi +; X86-NEXT: sbbl %ebx, %esi +; X86-NEXT: sbbl %ebx, %edx +; X86-NEXT: sbbl %ebx, %ecx +; X86-NEXT: movl %edi, (%eax) +; X86-NEXT: movl %esi, 4(%eax) +; X86-NEXT: movl %edx, 8(%eax) +; X86-NEXT: movl %ecx, 12(%eax) ; X86-NEXT: popl %esi ; X86-NEXT: popl %edi ; X86-NEXT: popl %ebx -; X86-NEXT: popl %ebp ; X86-NEXT: retl $4 ; ; X64-LABEL: abd_minmax_i128: ; X64: # %bb.0: -; X64-NEXT: movq %rdx, %rax -; X64-NEXT: cmpq %rdx, %rdi +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: xorl %edi, %edi +; X64-NEXT: subq %rdx, %rax +; X64-NEXT: sbbq %rcx, %rsi +; X64-NEXT: sbbq %rdi, %rdi +; X64-NEXT: xorq %rdi, %rsi +; X64-NEXT: xorq %rdi, %rax +; X64-NEXT: subq %rdi, %rax +; X64-NEXT: sbbq %rdi, %rsi ; X64-NEXT: movq %rsi, %rdx -; X64-NEXT: sbbq %rcx, %rdx -; X64-NEXT: movq %rcx, %rdx -; X64-NEXT: cmovbq %rsi, %rdx -; X64-NEXT: movq %rax, %r8 -; X64-NEXT: cmovbq %rdi, %r8 -; X64-NEXT: cmpq %rdi, %rax -; X64-NEXT: movq %rcx, %r9 -; X64-NEXT: sbbq %rsi, %r9 -; X64-NEXT: cmovbq %rsi, %rcx -; X64-NEXT: cmovbq %rdi, %rax -; X64-NEXT: subq %r8, %rax -; X64-NEXT: sbbq %rdx, %rcx -; X64-NEXT: movq %rcx, %rdx ; X64-NEXT: retq %min = call i128 @llvm.umin.i128(i128 %a, i128 %b) %max = call i128 @llvm.umax.i128(i128 %a, i128 %b) From 8868c02cda875d1efe1646affa01656ef268ffed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andrzej=20Warzy=C5=84ski?= Date: Tue, 6 Aug 2024 10:57:10 +0100 Subject: [PATCH 07/10] [mlir][linalg] Relax tensor.extract vectorization (#99299) Simplifies the vectorization of tensor.extract so that: * all cases that read into a genuinely multi-dim vector (*) are considered a gather load, * all other cases are considered as potential contiguous loads. This change means that the following extraction from a "column" tensor will be correctly identified as a scalar load followed by a broadcast (rather than a gather load). ```mlir func.func @vectorize_scalar_broadcast_column_tensor(%in: tensor<1x1x4xi32>) -> tensor<1x1x4xi32> { %c4 = arith.constant 4 : index %c0 = arith.constant 0 : index %cst = arith.constant dense<[...]> : tensor<15x1xi32> %out = linalg.generic { indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} outs(%in : tensor<1x1x4xi32>) { ^bb0(%out: i32): %idx_0 = linalg.index 0 : index %extracted = tensor.extract %cst[%idx_0, %c0] : tensor<15x1xi32> linalg.yield %extracted : i32 } -> tensor<1x1x4xi32> return %out:tensor<1x1x4xi32> } ``` (*) `vector<1x4x1xf32>` is considered as 1D vector in this context. --- .../Linalg/Transforms/Vectorization.cpp | 35 +++++------- .../Linalg/vectorize-tensor-extract.mlir | 56 +++++++++++++++++++ 2 files changed, 71 insertions(+), 20 deletions(-) diff --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp index 3d0d6abf702d70..6da886f5ec19e1 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp @@ -946,27 +946,22 @@ getTensorExtractMemoryAccessPattern(tensor::ExtractOp extractOp, if (linalgOp.hasDynamicShape()) return VectorMemoryAccessKind::Gather; - // 1. Assume that it's a gather load when reading _into_: - // * an n-D "vector", like `tensor<1x2x4xi32` or `tensor<2x1x4xi32>`, or - // * a 1-D "vector" with the trailing dim equal 1, e.g. `tensor<1x4x1xi32`. - // TODO: Relax these conditions. - // FIXME: This condition assumes non-dynamic sizes. - if ((llvm::count_if(targetShape, - [](int64_t dimSize) { return dimSize > 1; }) != 1) || - targetShape.back() == 1) - return VectorMemoryAccessKind::Gather; - - // 2. Assume that it's a gather load when reading _from_ a tensor for which - // the trailing dimension is 1, e.g. `tensor<1x4x1xi32>`. - // TODO: Relax this condition. - if (inputShape.getShape().back() == 1) + // True for vectors that are effectively 1D, e.g. `vector<1x4x1xi32>`, false + // otherwise. + bool isOutput1DVector = (llvm::count_if(targetShape, [](int64_t dimSize) { + return dimSize > 1; + }) == 1); + + // 1. Assume that it's a gather load when reading non-1D vector. + if (!isOutput1DVector) return VectorMemoryAccessKind::Gather; bool leadingIdxsLoopInvariant = true; - // 3. Analyze the leading indices of `extractOp`. + // 2. Analyze the leading indices of `extractOp`. // Look at the way each index is calculated and decide whether it is suitable - // for a contiguous load, i.e. whether it's loop invariant. + // for a contiguous load, i.e. whether it's loop invariant. If not, it's a + // gather load. auto indices = extractOp.getIndices(); auto leadIndices = indices.drop_back(1); @@ -982,13 +977,13 @@ getTensorExtractMemoryAccessPattern(tensor::ExtractOp extractOp, return VectorMemoryAccessKind::Gather; } - // 4. Analyze the trailing index for `extractOp`. + // 3. Analyze the trailing index for `extractOp`. // At this point we know that the leading indices are loop invariant. This // means that is potentially a scalar or a contiguous load. We can decide // based on the trailing idx. auto extractOpTrailingIdx = indices.back(); - // 4a. Scalar broadcast load + // 3a. Scalar broadcast load // If the trailing index is loop invariant then this is a scalar load. if (leadingIdxsLoopInvariant && isLoopInvariantIdx(linalgOp, extractOpTrailingIdx)) { @@ -997,7 +992,7 @@ getTensorExtractMemoryAccessPattern(tensor::ExtractOp extractOp, return VectorMemoryAccessKind::ScalarBroadcast; } - // 4b. Contiguous loads + // 3b. Contiguous loads // The trailing `extractOp` index should increment with every loop iteration. // This effectively means that it must be based on the trailing loop index. // This is what the following bool captures. @@ -1011,7 +1006,7 @@ getTensorExtractMemoryAccessPattern(tensor::ExtractOp extractOp, return VectorMemoryAccessKind::Contiguous; } - // 5. Fallback case - gather load. + // 4. Fallback case - gather load. LDBG("Found gather load: " << extractOp); return VectorMemoryAccessKind::Gather; } diff --git a/mlir/test/Dialect/Linalg/vectorize-tensor-extract.mlir b/mlir/test/Dialect/Linalg/vectorize-tensor-extract.mlir index 85e1c56dd45a0d..ac75a19cbeb28e 100644 --- a/mlir/test/Dialect/Linalg/vectorize-tensor-extract.mlir +++ b/mlir/test/Dialect/Linalg/vectorize-tensor-extract.mlir @@ -595,3 +595,59 @@ module attributes {transform.with_named_sequence} { transform.yield } } + + +// ----- + +func.func @vectorize_scalar_broadcast_column_tensor(%in: tensor<1x1x4xi32>) -> tensor<1x1x4xi32> { + %c4 = arith.constant 4 : index + %c0 = arith.constant 0 : index + %cst = arith.constant dense<[[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11], [12], [13], [14]]> : tensor<15x1xi32> + + %out = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} outs(%in : tensor<1x1x4xi32>) { + ^bb0(%out: i32): + %8 = linalg.index 0 : index + %idx_0 = linalg.index 0 : index + %extracted = tensor.extract %cst[%idx_0, %c0] : tensor<15x1xi32> + linalg.yield %extracted : i32 + } -> tensor<1x1x4xi32> + + return %out:tensor<1x1x4xi32> +} + +// CHECK: #[[$ATTR_1:.+]] = affine_map<(d0, d1) -> (0, 0, 0)> +// CHECK-LABEL: func.func @vectorize_scalar_broadcast_column_tensor( +// CHECK-SAME: %[[VAL_0:.*]]: tensor<1x1x4xi32>) -> tensor<1x1x4xi32> { +// CHECK: %[[VAL_1:.*]] = arith.constant 4 : index +// CHECK: %[[VAL_2:.*]] = arith.constant 0 : index +// CHECK: %[[VAL_3:.*]] = arith.constant dense<{{\[\[}}0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11], [12], [13], [14]]> : tensor<15x1xi32> +// CHECK: %[[VAL_4:.*]] = arith.constant 1 : index +// CHECK: %[[VAL_5:.*]] = arith.constant 1 : index +// CHECK: %[[VAL_6:.*]] = arith.constant 4 : index +// CHECK: %[[VAL_7:.*]] = arith.constant 0 : index +// CHECK: %[[VAL_8:.*]] = arith.constant 0 : i32 +// CHECK: %[[VAL_9:.*]] = vector.transfer_read %[[VAL_0]]{{\[}}%[[VAL_7]], %[[VAL_7]], %[[VAL_7]]], %[[VAL_8]] : tensor<1x1x4xi32>, vector<1x1x4xi32> +// CHECK: %[[VAL_10:.*]] = vector.step : vector<1xindex> +// CHECK: %[[VAL_11:.*]] = vector.broadcast %[[VAL_10]] : vector<1xindex> to vector<4x1x1xindex> +// CHECK: %[[VAL_12:.*]] = vector.transpose %[[VAL_11]], [2, 1, 0] : vector<4x1x1xindex> to vector<1x1x4xindex> +// CHECK: %[[VAL_13:.*]] = vector.step : vector<1xindex> +// CHECK: %[[VAL_14:.*]] = vector.broadcast %[[VAL_13]] : vector<1xindex> to vector<4x1x1xindex> +// CHECK: %[[VAL_15:.*]] = vector.transpose %[[VAL_14]], [2, 1, 0] : vector<4x1x1xindex> to vector<1x1x4xindex> +// CHECK: %[[VAL_16:.*]] = arith.constant dense : vector<1x1x4xi1> +// CHECK: %[[VAL_17:.*]] = arith.constant dense<0> : vector<1x1x4xi32> +// CHECK: %[[VAL_18:.*]] = arith.constant 0 : index +// CHECK: %[[VAL_19:.*]] = arith.constant 0 : i32 +// CHECK: %[[VAL_20:.*]] = vector.shape_cast %[[VAL_15]] : vector<1x1x4xindex> to vector<4xindex> +// CHECK: %[[VAL_21:.*]] = vector.extractelement %[[VAL_20]]{{\[}}%[[VAL_19]] : i32] : vector<4xindex> +// CHECK: %[[VAL_22:.*]] = arith.constant 0 : i32 +// CHECK: %[[VAL_23:.*]] = vector.transfer_read %[[VAL_3]]{{\[}}%[[VAL_21]], %[[VAL_2]]], %[[VAL_22]] {in_bounds = [true, true, true], permutation_map = #[[$ATTR_1]]} : tensor<15x1xi32>, vector<1x1x4xi32> +// CHECK: %[[VAL_24:.*]] = arith.constant 0 : index +// CHECK: %[[VAL_25:.*]] = vector.transfer_write %[[VAL_23]], %[[VAL_0]]{{\[}}%[[VAL_24]], %[[VAL_24]], %[[VAL_24]]] : vector<1x1x4xi32>, tensor<1x1x4xi32> + +module attributes {transform.with_named_sequence} { + transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) { + %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op + transform.structured.vectorize %0 vector_sizes [1, 1, 4]{ vectorize_nd_extract } : !transform.any_op + transform.yield + } +} From 3027688a77b5511447b3f060aaecbf30e4b9e63e Mon Sep 17 00:00:00 2001 From: Brandon Wu Date: Tue, 6 Aug 2024 17:59:53 +0800 Subject: [PATCH 08/10] [RISCV] Support bf16 vmv.v.v and vmerge.vvm intrinsics with `zvfbfmin` (#101611) These two intrinsics are supported for f16 with `zvfhmin`, also support them in bf16 to make it aligned to f16. This resolve: https://github.com/riscv-non-isa/rvv-intrinsic-doc/issues/349 --- clang/include/clang/Basic/riscv_vector.td | 6 ++ .../non-policy/non-overloaded/vmerge.c | 61 +++++++++++++++++- .../non-policy/non-overloaded/vmv.c | 62 ++++++++++++++++++- .../non-policy/overloaded/vmerge.c | 62 ++++++++++++++++++- .../non-policy/overloaded/vmv.c | 62 ++++++++++++++++++- .../policy/non-overloaded/vmerge.c | 62 ++++++++++++++++++- .../policy/non-overloaded/vmv.c | 62 ++++++++++++++++++- .../policy/overloaded/vmerge.c | 62 ++++++++++++++++++- .../policy/overloaded/vmv.c | 62 ++++++++++++++++++- .../Target/RISCV/RISCVInstrInfoVPseudos.td | 8 +++ 10 files changed, 501 insertions(+), 8 deletions(-) diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td index 0cab4b8067f0d4..b838917e39cfe4 100644 --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -1378,6 +1378,9 @@ let HasMasked = false, let RequiredFeatures = ["Zvfhmin"] in defm vmv_v : RVVOutBuiltinSet<"vmv_v_v", "x", [["v", "v", "vv"]]>; + let RequiredFeatures = ["Zvfbfmin"] in + defm vmv_v : RVVOutBuiltinSet<"vmv_v_v", "y", + [["v", "v", "vv"]]>; let SupportOverloading = false in defm vmv_v : RVVOutBuiltinSet<"vmv_v_x", "csil", [["x", "v", "ve"], @@ -1890,6 +1893,9 @@ let HasMasked = false, let RequiredFeatures = ["Zvfhmin"] in defm vmerge : RVVOutOp1BuiltinSet<"vmerge", "x", [["vvm", "v", "vvvm"]]>; + let RequiredFeatures = ["Zvfbfmin"] in + defm vmerge : RVVOutOp1BuiltinSet<"vmerge", "y", + [["vvm", "v", "vvvm"]]>; defm vfmerge : RVVOutOp1BuiltinSet<"vfmerge", "xfd", [["vfm", "v", "vvem"]]>; } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmerge.c index fb41a07cccec02..d2eb01c70950e8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmerge.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmerge.c @@ -1,7 +1,7 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +zvfhmin -disable-O0-optnone \ +// RUN: -target-feature +zvfhmin -target-feature +zvfbfmin -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s @@ -1037,3 +1037,62 @@ vfloat64m8_t test_vmerge_vvm_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, vbool8_t return __riscv_vmerge_vvm_f64m8(op1, op2, mask, vl); } +// CHECK-RV64-LABEL: define dso_local @test_vmerge_vvm_bf16mf4 +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vmerge_vvm_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, vbool64_t mask, size_t vl) { + return __riscv_vmerge_vvm_bf16mf4(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmerge_vvm_bf16mf2 +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vmerge_vvm_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, vbool32_t mask, size_t vl) { + return __riscv_vmerge_vvm_bf16mf2(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmerge_vvm_bf16m1 +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vmerge_vvm_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, vbool16_t mask, size_t vl) { + return __riscv_vmerge_vvm_bf16m1(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmerge_vvm_bf16m2 +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vmerge_vvm_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, vbool8_t mask, size_t vl) { + return __riscv_vmerge_vvm_bf16m2(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmerge_vvm_bf16m4 +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vmerge_vvm_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, vbool4_t mask, size_t vl) { + return __riscv_vmerge_vvm_bf16m4(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmerge_vvm_bf16m8 +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vmerge_vvm_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, vbool2_t mask, size_t vl) { + return __riscv_vmerge_vvm_bf16m8(op1, op2, mask, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmv.c index c25719a80d4fb5..7004c18fac0de0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmv.c @@ -1,7 +1,7 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +zvfhmin -disable-O0-optnone \ +// RUN: -target-feature +zvfhmin -target-feature +zvfbfmin -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s @@ -1917,3 +1917,63 @@ vuint64m8_t test_vmv_s_x_u64m8(uint64_t src, size_t vl) { return __riscv_vmv_s_x_u64m8(src, vl); } +// CHECK-RV64-LABEL: define dso_local @test_vmv_v_v_bf16mf4 +// CHECK-RV64-SAME: ( [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1bf16.i64( poison, [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vmv_v_v_bf16mf4(vbfloat16mf4_t src, size_t vl) { + return __riscv_vmv_v_v_bf16mf4(src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmv_v_v_bf16mf2 +// CHECK-RV64-SAME: ( [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2bf16.i64( poison, [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vmv_v_v_bf16mf2(vbfloat16mf2_t src, size_t vl) { + return __riscv_vmv_v_v_bf16mf2(src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmv_v_v_bf16m1 +// CHECK-RV64-SAME: ( [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4bf16.i64( poison, [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vmv_v_v_bf16m1(vbfloat16m1_t src, size_t vl) { + return __riscv_vmv_v_v_bf16m1(src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmv_v_v_bf16m2 +// CHECK-RV64-SAME: ( [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8bf16.i64( poison, [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vmv_v_v_bf16m2(vbfloat16m2_t src, size_t vl) { + return __riscv_vmv_v_v_bf16m2(src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmv_v_v_bf16m4 +// CHECK-RV64-SAME: ( [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16bf16.i64( poison, [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vmv_v_v_bf16m4(vbfloat16m4_t src, size_t vl) { + return __riscv_vmv_v_v_bf16m4(src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmv_v_v_bf16m8 +// CHECK-RV64-SAME: ( [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32bf16.i64( poison, [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vmv_v_v_bf16m8(vbfloat16m8_t src, size_t vl) { + return __riscv_vmv_v_v_bf16m8(src, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmerge.c index d67aa70fc8c8a0..2db27fdc512e5a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmerge.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmerge.c @@ -1,7 +1,7 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +zvfhmin -disable-O0-optnone \ +// RUN: -target-feature +zvfhmin -target-feature +zvfbfmin -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s @@ -1037,3 +1037,63 @@ vfloat64m8_t test_vmerge_vvm_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, vbool8_t return __riscv_vmerge(op1, op2, mask, vl); } +// CHECK-RV64-LABEL: define dso_local @test_vmerge_vvm_bf16mf4 +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vmerge_vvm_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, vbool64_t mask, size_t vl) { + return __riscv_vmerge(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmerge_vvm_bf16mf2 +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vmerge_vvm_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, vbool32_t mask, size_t vl) { + return __riscv_vmerge(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmerge_vvm_bf16m1 +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vmerge_vvm_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, vbool16_t mask, size_t vl) { + return __riscv_vmerge(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmerge_vvm_bf16m2 +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vmerge_vvm_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, vbool8_t mask, size_t vl) { + return __riscv_vmerge(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmerge_vvm_bf16m4 +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vmerge_vvm_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, vbool4_t mask, size_t vl) { + return __riscv_vmerge(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmerge_vvm_bf16m8 +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vmerge_vvm_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, vbool2_t mask, size_t vl) { + return __riscv_vmerge(op1, op2, mask, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmv.c index 44f34c4fe8101f..7b1fc33827e76b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmv.c @@ -1,7 +1,7 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +zvfhmin -disable-O0-optnone \ +// RUN: -target-feature +zvfhmin -target-feature +zvfbfmin -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s @@ -1037,3 +1037,63 @@ uint64_t test_vmv_x_s_u64m8_u64(vuint64m8_t src) { return __riscv_vmv_x(src); } +// CHECK-RV64-LABEL: define dso_local @test_vmv_v_v_bf16mf4 +// CHECK-RV64-SAME: ( [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1bf16.i64( poison, [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vmv_v_v_bf16mf4(vbfloat16mf4_t src, size_t vl) { + return __riscv_vmv_v(src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmv_v_v_bf16mf2 +// CHECK-RV64-SAME: ( [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2bf16.i64( poison, [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vmv_v_v_bf16mf2(vbfloat16mf2_t src, size_t vl) { + return __riscv_vmv_v(src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmv_v_v_bf16m1 +// CHECK-RV64-SAME: ( [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4bf16.i64( poison, [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vmv_v_v_bf16m1(vbfloat16m1_t src, size_t vl) { + return __riscv_vmv_v(src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmv_v_v_bf16m2 +// CHECK-RV64-SAME: ( [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8bf16.i64( poison, [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vmv_v_v_bf16m2(vbfloat16m2_t src, size_t vl) { + return __riscv_vmv_v(src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmv_v_v_bf16m4 +// CHECK-RV64-SAME: ( [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16bf16.i64( poison, [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vmv_v_v_bf16m4(vbfloat16m4_t src, size_t vl) { + return __riscv_vmv_v(src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmv_v_v_bf16m8 +// CHECK-RV64-SAME: ( [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32bf16.i64( poison, [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vmv_v_v_bf16m8(vbfloat16m8_t src, size_t vl) { + return __riscv_vmv_v(src, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmerge.c index 4f723c579597e1..7667c375937bd0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmerge.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmerge.c @@ -1,7 +1,7 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +zvfhmin -disable-O0-optnone \ +// RUN: -target-feature +zvfhmin -target-feature +zvfbfmin -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s @@ -1037,3 +1037,63 @@ vfloat64m8_t test_vmerge_vvm_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, return __riscv_vmerge_vvm_f64m8_tu(maskedoff, op1, op2, mask, vl); } +// CHECK-RV64-LABEL: define dso_local @test_vmerge_vvm_bf16mf4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vmerge_vvm_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, vbool64_t mask, size_t vl) { + return __riscv_vmerge_vvm_bf16mf4_tu(maskedoff, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmerge_vvm_bf16mf2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vmerge_vvm_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, vbool32_t mask, size_t vl) { + return __riscv_vmerge_vvm_bf16mf2_tu(maskedoff, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmerge_vvm_bf16m1_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vmerge_vvm_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, vbool16_t mask, size_t vl) { + return __riscv_vmerge_vvm_bf16m1_tu(maskedoff, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmerge_vvm_bf16m2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vmerge_vvm_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, vbool8_t mask, size_t vl) { + return __riscv_vmerge_vvm_bf16m2_tu(maskedoff, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmerge_vvm_bf16m4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vmerge_vvm_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, vbool4_t mask, size_t vl) { + return __riscv_vmerge_vvm_bf16m4_tu(maskedoff, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmerge_vvm_bf16m8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vmerge_vvm_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, vbool2_t mask, size_t vl) { + return __riscv_vmerge_vvm_bf16m8_tu(maskedoff, op1, op2, mask, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmv.c index 2a5a0f4f892d3e..123aace607264f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmv.c @@ -1,7 +1,7 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +zvfhmin -disable-O0-optnone \ +// RUN: -target-feature +zvfhmin -target-feature +zvfbfmin -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s @@ -1477,3 +1477,63 @@ vuint64m8_t test_vmv_s_x_u64m8_tu(vuint64m8_t maskedoff, uint64_t src, size_t vl return __riscv_vmv_s_x_u64m8_tu(maskedoff, src, vl); } +// CHECK-RV64-LABEL: define dso_local @test_vmv_v_v_bf16mf4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1bf16.i64( [[MASKEDOFF]], [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vmv_v_v_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, size_t vl) { + return __riscv_vmv_v_v_bf16mf4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmv_v_v_bf16mf2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2bf16.i64( [[MASKEDOFF]], [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vmv_v_v_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, size_t vl) { + return __riscv_vmv_v_v_bf16mf2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmv_v_v_bf16m1_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4bf16.i64( [[MASKEDOFF]], [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vmv_v_v_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t src, size_t vl) { + return __riscv_vmv_v_v_bf16m1_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmv_v_v_bf16m2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8bf16.i64( [[MASKEDOFF]], [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vmv_v_v_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t src, size_t vl) { + return __riscv_vmv_v_v_bf16m2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmv_v_v_bf16m4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16bf16.i64( [[MASKEDOFF]], [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vmv_v_v_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t src, size_t vl) { + return __riscv_vmv_v_v_bf16m4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmv_v_v_bf16m8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32bf16.i64( [[MASKEDOFF]], [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vmv_v_v_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t src, size_t vl) { + return __riscv_vmv_v_v_bf16m8_tu(maskedoff, src, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmerge.c index 8149be4cb2e71d..ba1838b3fc097f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmerge.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmerge.c @@ -1,7 +1,7 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +zvfhmin -disable-O0-optnone \ +// RUN: -target-feature +zvfhmin -target-feature +zvfbfmin -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s @@ -1037,3 +1037,63 @@ vfloat64m8_t test_vmerge_vvm_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); } +// CHECK-RV64-LABEL: define dso_local @test_vmerge_vvm_bf16mf4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vmerge_vvm_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, vbool64_t mask, size_t vl) { + return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmerge_vvm_bf16mf2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vmerge_vvm_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, vbool32_t mask, size_t vl) { + return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmerge_vvm_bf16m1_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vmerge_vvm_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, vbool16_t mask, size_t vl) { + return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmerge_vvm_bf16m2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vmerge_vvm_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, vbool8_t mask, size_t vl) { + return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmerge_vvm_bf16m4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vmerge_vvm_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, vbool4_t mask, size_t vl) { + return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmerge_vvm_bf16m8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vmerge_vvm_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, vbool2_t mask, size_t vl) { + return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmv.c index ac95c77340202c..2f15b66a437896 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmv.c @@ -1,7 +1,7 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +zvfhmin -disable-O0-optnone \ +// RUN: -target-feature +zvfhmin -target-feature +zvfbfmin -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s @@ -1477,3 +1477,63 @@ vuint64m8_t test_vmv_s_x_u64m8_tu(vuint64m8_t maskedoff, uint64_t src, size_t vl return __riscv_vmv_s_tu(maskedoff, src, vl); } +// CHECK-RV64-LABEL: define dso_local @test_vmv_v_v_bf16mf4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1bf16.i64( [[MASKEDOFF]], [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vmv_v_v_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, size_t vl) { + return __riscv_vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmv_v_v_bf16mf2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2bf16.i64( [[MASKEDOFF]], [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vmv_v_v_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, size_t vl) { + return __riscv_vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmv_v_v_bf16m1_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4bf16.i64( [[MASKEDOFF]], [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vmv_v_v_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t src, size_t vl) { + return __riscv_vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmv_v_v_bf16m2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8bf16.i64( [[MASKEDOFF]], [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vmv_v_v_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t src, size_t vl) { + return __riscv_vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmv_v_v_bf16m4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16bf16.i64( [[MASKEDOFF]], [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vmv_v_v_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t src, size_t vl) { + return __riscv_vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmv_v_v_bf16m8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32bf16.i64( [[MASKEDOFF]], [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vmv_v_v_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t src, size_t vl) { + return __riscv_vmv_v_tu(maskedoff, src, vl); +} + diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td index b5dbc055e03ddc..f78e404bbafd13 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -7245,6 +7245,14 @@ foreach vti = AllFloatVectors in { vti.RegClass, vti.ScalarRegClass>; } +foreach vti = AllBFloatVectors in + let Predicates = [HasVInstructionsBF16Minimal] in + defm : VPatBinaryCarryInTAIL<"int_riscv_vmerge", "PseudoVMERGE", "VVM", + vti.Vector, + vti.Vector, vti.Vector, vti.Mask, + vti.Log2SEW, vti.LMul, vti.RegClass, + vti.RegClass, vti.RegClass>; + foreach fvti = AllFloatVectors in { defvar instr = !cast("PseudoVMERGE_VIM_"#fvti.LMul.MX); let Predicates = GetVTypePredicates.Predicates in From 6a59deafde742e30daf3bf886f98afc37f00d75b Mon Sep 17 00:00:00 2001 From: Brandon Wu Date: Tue, 6 Aug 2024 18:00:16 +0800 Subject: [PATCH 09/10] [RISCV] Support `vrgather` and `vcompress` for `zvfhmin` and `zvfbfmin` (#101633) Support these in both C intrinsics and CodeGen, they can work with other intrinsics in `zvfhmin` or `zvfbfmin`. This resolve: https://github.com/riscv-non-isa/rvv-intrinsic-doc/issues/350 --- clang/include/clang/Basic/riscv_vector.td | 24 +- .../non-policy/non-overloaded/vcompress.c | 62 +- .../non-policy/non-overloaded/vrgather.c | 242 +++++- .../non-policy/overloaded/vcompress.c | 62 +- .../non-policy/overloaded/vrgather.c | 242 +++++- .../policy/non-overloaded/vcompress.c | 62 +- .../policy/non-overloaded/vrgather.c | 242 +++++- .../policy/overloaded/vcompress.c | 62 +- .../policy/overloaded/vrgather.c | 242 +++++- .../Target/RISCV/RISCVInstrInfoVPseudos.td | 15 +- llvm/test/CodeGen/RISCV/rvv/vcompress.ll | 137 ++- llvm/test/CodeGen/RISCV/rvv/vrgather.ll | 786 +++++++++++++++++- 12 files changed, 2161 insertions(+), 17 deletions(-) diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td index b838917e39cfe4..06ce0affced897 100644 --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -2262,10 +2262,22 @@ defm vfslide1down : RVVFloatingBinVFBuiltinSet; // 16.4. Vector Register Gather Instructions // signed and floating type -defm vrgather : RVVOutBuiltinSet<"vrgather_vv", "csilxfd", +defm vrgather : RVVOutBuiltinSet<"vrgather_vv", "csilfd", [["vv", "v", "vvUv"]]>; -defm vrgather : RVVOutBuiltinSet<"vrgather_vx", "csilxfd", +defm vrgather : RVVOutBuiltinSet<"vrgather_vx", "csilfd", [["vx", "v", "vvz"]]>; +let RequiredFeatures = ["Zvfhmin"] in { + defm vrgather : RVVOutBuiltinSet<"vrgather_vv", "x", + [["vv", "v", "vvUv"]]>; + defm vrgather : RVVOutBuiltinSet<"vrgather_vx", "x", + [["vx", "v", "vvz"]]>; +} +let RequiredFeatures = ["Zvfbfmin"] in { + defm vrgather : RVVOutBuiltinSet<"vrgather_vv", "y", + [["vv", "v", "vvUv"]]>; + defm vrgather : RVVOutBuiltinSet<"vrgather_vx", "y", + [["vx", "v", "vvz"]]>; +} defm vrgatherei16 : RVVOutBuiltinSet<"vrgatherei16_vv", "csilxfd", [["vv", "v", "vv(Log2EEW:4)Uv"]]>; // unsigned type @@ -2288,8 +2300,14 @@ let HasMasked = false, IntrinsicTypes = {ResultType, Ops.back()->getType()}; }] in { // signed and floating type - defm vcompress : RVVOutBuiltinSet<"vcompress", "csilxfd", + defm vcompress : RVVOutBuiltinSet<"vcompress", "csilfd", [["vm", "v", "vvm"]]>; + let RequiredFeatures = ["Zvfhmin"] in + defm vcompress : RVVOutBuiltinSet<"vcompress", "x", + [["vm", "v", "vvm"]]>; + let RequiredFeatures = ["Zvfbfmin"] in + defm vcompress : RVVOutBuiltinSet<"vcompress", "y", + [["vm", "v", "vvm"]]>; // unsigned type defm vcompress : RVVOutBuiltinSet<"vcompress", "csil", [["vm", "Uv", "UvUvm"]]>; diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vcompress.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vcompress.c index 43ed94b70d0e05..31b7da69cc1930 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vcompress.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vcompress.c @@ -1,7 +1,7 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfhmin -target-feature +zvfbfmin -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s @@ -597,3 +597,63 @@ vuint64m8_t test_vcompress_vm_u64m8(vuint64m8_t src, vbool8_t mask, size_t vl) { return __riscv_vcompress_vm_u64m8(src, mask, vl); } +// CHECK-RV64-LABEL: define dso_local @test_vcompress_vm_bf16mf4 +// CHECK-RV64-SAME: ( [[SRC:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1bf16.i64( poison, [[SRC]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vcompress_vm_bf16mf4(vbfloat16mf4_t src, vbool64_t mask, size_t vl) { + return __riscv_vcompress_vm_bf16mf4(src, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vcompress_vm_bf16mf2 +// CHECK-RV64-SAME: ( [[SRC:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2bf16.i64( poison, [[SRC]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vcompress_vm_bf16mf2(vbfloat16mf2_t src, vbool32_t mask, size_t vl) { + return __riscv_vcompress_vm_bf16mf2(src, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vcompress_vm_bf16m1 +// CHECK-RV64-SAME: ( [[SRC:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4bf16.i64( poison, [[SRC]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vcompress_vm_bf16m1(vbfloat16m1_t src, vbool16_t mask, size_t vl) { + return __riscv_vcompress_vm_bf16m1(src, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vcompress_vm_bf16m2 +// CHECK-RV64-SAME: ( [[SRC:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8bf16.i64( poison, [[SRC]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vcompress_vm_bf16m2(vbfloat16m2_t src, vbool8_t mask, size_t vl) { + return __riscv_vcompress_vm_bf16m2(src, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vcompress_vm_bf16m4 +// CHECK-RV64-SAME: ( [[SRC:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16bf16.i64( poison, [[SRC]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vcompress_vm_bf16m4(vbfloat16m4_t src, vbool4_t mask, size_t vl) { + return __riscv_vcompress_vm_bf16m4(src, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vcompress_vm_bf16m8 +// CHECK-RV64-SAME: ( [[SRC:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32bf16.i64( poison, [[SRC]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vcompress_vm_bf16m8(vbfloat16m8_t src, vbool2_t mask, size_t vl) { + return __riscv_vcompress_vm_bf16m8(src, mask, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vrgather.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vrgather.c index 5da76ddfb80048..bb73d62f878d18 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vrgather.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vrgather.c @@ -1,7 +1,7 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfhmin -target-feature +zvfbfmin -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s @@ -2367,3 +2367,243 @@ vuint64m8_t test_vrgather_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, size_t inde return __riscv_vrgather_vx_u64m8_m(mask, op1, index, vl); } +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16mf4 +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1bf16.i64( poison, [[OP1]], [[INDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vrgather_vv_bf16mf4(vbfloat16mf4_t op1, vuint16mf4_t index, size_t vl) { + return __riscv_vrgather_vv_bf16mf4(op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16mf4 +// CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1bf16.i64( poison, [[OP1]], i64 [[INDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vrgather_vx_bf16mf4(vbfloat16mf4_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_bf16mf4(op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16mf2 +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2bf16.i64( poison, [[OP1]], [[INDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vrgather_vv_bf16mf2(vbfloat16mf2_t op1, vuint16mf2_t index, size_t vl) { + return __riscv_vrgather_vv_bf16mf2(op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16mf2 +// CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2bf16.i64( poison, [[OP1]], i64 [[INDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vrgather_vx_bf16mf2(vbfloat16mf2_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_bf16mf2(op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m1 +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4bf16.i64( poison, [[OP1]], [[INDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vrgather_vv_bf16m1(vbfloat16m1_t op1, vuint16m1_t index, size_t vl) { + return __riscv_vrgather_vv_bf16m1(op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m1 +// CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4bf16.i64( poison, [[OP1]], i64 [[INDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vrgather_vx_bf16m1(vbfloat16m1_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_bf16m1(op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m2 +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8bf16.i64( poison, [[OP1]], [[INDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vrgather_vv_bf16m2(vbfloat16m2_t op1, vuint16m2_t index, size_t vl) { + return __riscv_vrgather_vv_bf16m2(op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m2 +// CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8bf16.i64( poison, [[OP1]], i64 [[INDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vrgather_vx_bf16m2(vbfloat16m2_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_bf16m2(op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m4 +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16bf16.i64( poison, [[OP1]], [[INDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vrgather_vv_bf16m4(vbfloat16m4_t op1, vuint16m4_t index, size_t vl) { + return __riscv_vrgather_vv_bf16m4(op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m4 +// CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16bf16.i64( poison, [[OP1]], i64 [[INDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vrgather_vx_bf16m4(vbfloat16m4_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_bf16m4(op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m8 +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32bf16.i64( poison, [[OP1]], [[INDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vrgather_vv_bf16m8(vbfloat16m8_t op1, vuint16m8_t index, size_t vl) { + return __riscv_vrgather_vv_bf16m8(op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m8 +// CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32bf16.i64( poison, [[OP1]], i64 [[INDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vrgather_vx_bf16m8(vbfloat16m8_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_bf16m8(op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16mf4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1bf16.i64( poison, [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vrgather_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vuint16mf4_t index, size_t vl) { + return __riscv_vrgather_vv_bf16mf4_m(mask, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16mf4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1bf16.i64( poison, [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vrgather_vx_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_bf16mf4_m(mask, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16mf2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2bf16.i64( poison, [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vrgather_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vuint16mf2_t index, size_t vl) { + return __riscv_vrgather_vv_bf16mf2_m(mask, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16mf2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2bf16.i64( poison, [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vrgather_vx_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_bf16mf2_m(mask, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4bf16.i64( poison, [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vrgather_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vuint16m1_t index, size_t vl) { + return __riscv_vrgather_vv_bf16m1_m(mask, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4bf16.i64( poison, [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vrgather_vx_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_bf16m1_m(mask, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8bf16.i64( poison, [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vrgather_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vuint16m2_t index, size_t vl) { + return __riscv_vrgather_vv_bf16m2_m(mask, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8bf16.i64( poison, [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vrgather_vx_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_bf16m2_m(mask, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16bf16.i64( poison, [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vrgather_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vuint16m4_t index, size_t vl) { + return __riscv_vrgather_vv_bf16m4_m(mask, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16bf16.i64( poison, [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vrgather_vx_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_bf16m4_m(mask, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32bf16.i64( poison, [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vrgather_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vuint16m8_t index, size_t vl) { + return __riscv_vrgather_vv_bf16m8_m(mask, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32bf16.i64( poison, [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vrgather_vx_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_bf16m8_m(mask, op1, index, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vcompress.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vcompress.c index b2729034f9298e..bb030ee000c65b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vcompress.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vcompress.c @@ -1,7 +1,7 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfhmin -target-feature +zvfbfmin -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s @@ -597,3 +597,63 @@ vuint64m8_t test_vcompress_vm_u64m8(vuint64m8_t src, vbool8_t mask, size_t vl) { return __riscv_vcompress(src, mask, vl); } +// CHECK-RV64-LABEL: define dso_local @test_vcompress_vm_bf16mf4 +// CHECK-RV64-SAME: ( [[SRC:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1bf16.i64( poison, [[SRC]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vcompress_vm_bf16mf4(vbfloat16mf4_t src, vbool64_t mask, size_t vl) { + return __riscv_vcompress(src, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vcompress_vm_bf16mf2 +// CHECK-RV64-SAME: ( [[SRC:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2bf16.i64( poison, [[SRC]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vcompress_vm_bf16mf2(vbfloat16mf2_t src, vbool32_t mask, size_t vl) { + return __riscv_vcompress(src, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vcompress_vm_bf16m1 +// CHECK-RV64-SAME: ( [[SRC:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4bf16.i64( poison, [[SRC]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vcompress_vm_bf16m1(vbfloat16m1_t src, vbool16_t mask, size_t vl) { + return __riscv_vcompress(src, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vcompress_vm_bf16m2 +// CHECK-RV64-SAME: ( [[SRC:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8bf16.i64( poison, [[SRC]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vcompress_vm_bf16m2(vbfloat16m2_t src, vbool8_t mask, size_t vl) { + return __riscv_vcompress(src, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vcompress_vm_bf16m4 +// CHECK-RV64-SAME: ( [[SRC:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16bf16.i64( poison, [[SRC]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vcompress_vm_bf16m4(vbfloat16m4_t src, vbool4_t mask, size_t vl) { + return __riscv_vcompress(src, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vcompress_vm_bf16m8 +// CHECK-RV64-SAME: ( [[SRC:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32bf16.i64( poison, [[SRC]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vcompress_vm_bf16m8(vbfloat16m8_t src, vbool2_t mask, size_t vl) { + return __riscv_vcompress(src, mask, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vrgather.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vrgather.c index 0ea6382a6614b2..c6f9cb4c3f335b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vrgather.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vrgather.c @@ -1,7 +1,7 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfhmin -target-feature +zvfbfmin -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s @@ -2367,3 +2367,243 @@ vuint64m8_t test_vrgather_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, size_t inde return __riscv_vrgather(mask, op1, index, vl); } +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16mf4 +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1bf16.i64( poison, [[OP1]], [[INDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vrgather_vv_bf16mf4(vbfloat16mf4_t op1, vuint16mf4_t index, size_t vl) { + return __riscv_vrgather(op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16mf4 +// CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1bf16.i64( poison, [[OP1]], i64 [[INDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vrgather_vx_bf16mf4(vbfloat16mf4_t op1, size_t index, size_t vl) { + return __riscv_vrgather(op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16mf2 +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2bf16.i64( poison, [[OP1]], [[INDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vrgather_vv_bf16mf2(vbfloat16mf2_t op1, vuint16mf2_t index, size_t vl) { + return __riscv_vrgather(op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16mf2 +// CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2bf16.i64( poison, [[OP1]], i64 [[INDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vrgather_vx_bf16mf2(vbfloat16mf2_t op1, size_t index, size_t vl) { + return __riscv_vrgather(op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m1 +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4bf16.i64( poison, [[OP1]], [[INDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vrgather_vv_bf16m1(vbfloat16m1_t op1, vuint16m1_t index, size_t vl) { + return __riscv_vrgather(op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m1 +// CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4bf16.i64( poison, [[OP1]], i64 [[INDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vrgather_vx_bf16m1(vbfloat16m1_t op1, size_t index, size_t vl) { + return __riscv_vrgather(op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m2 +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8bf16.i64( poison, [[OP1]], [[INDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vrgather_vv_bf16m2(vbfloat16m2_t op1, vuint16m2_t index, size_t vl) { + return __riscv_vrgather(op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m2 +// CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8bf16.i64( poison, [[OP1]], i64 [[INDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vrgather_vx_bf16m2(vbfloat16m2_t op1, size_t index, size_t vl) { + return __riscv_vrgather(op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m4 +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16bf16.i64( poison, [[OP1]], [[INDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vrgather_vv_bf16m4(vbfloat16m4_t op1, vuint16m4_t index, size_t vl) { + return __riscv_vrgather(op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m4 +// CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16bf16.i64( poison, [[OP1]], i64 [[INDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vrgather_vx_bf16m4(vbfloat16m4_t op1, size_t index, size_t vl) { + return __riscv_vrgather(op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m8 +// CHECK-RV64-SAME: ( [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32bf16.i64( poison, [[OP1]], [[INDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vrgather_vv_bf16m8(vbfloat16m8_t op1, vuint16m8_t index, size_t vl) { + return __riscv_vrgather(op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m8 +// CHECK-RV64-SAME: ( [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32bf16.i64( poison, [[OP1]], i64 [[INDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vrgather_vx_bf16m8(vbfloat16m8_t op1, size_t index, size_t vl) { + return __riscv_vrgather(op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16mf4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1bf16.i64( poison, [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vrgather_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vuint16mf4_t index, size_t vl) { + return __riscv_vrgather(mask, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16mf4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1bf16.i64( poison, [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vrgather_vx_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, size_t index, size_t vl) { + return __riscv_vrgather(mask, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16mf2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2bf16.i64( poison, [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vrgather_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vuint16mf2_t index, size_t vl) { + return __riscv_vrgather(mask, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16mf2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2bf16.i64( poison, [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vrgather_vx_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, size_t index, size_t vl) { + return __riscv_vrgather(mask, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4bf16.i64( poison, [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vrgather_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vuint16m1_t index, size_t vl) { + return __riscv_vrgather(mask, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4bf16.i64( poison, [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vrgather_vx_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, size_t index, size_t vl) { + return __riscv_vrgather(mask, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8bf16.i64( poison, [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vrgather_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vuint16m2_t index, size_t vl) { + return __riscv_vrgather(mask, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8bf16.i64( poison, [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vrgather_vx_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, size_t index, size_t vl) { + return __riscv_vrgather(mask, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16bf16.i64( poison, [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vrgather_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vuint16m4_t index, size_t vl) { + return __riscv_vrgather(mask, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16bf16.i64( poison, [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vrgather_vx_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, size_t index, size_t vl) { + return __riscv_vrgather(mask, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32bf16.i64( poison, [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vrgather_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vuint16m8_t index, size_t vl) { + return __riscv_vrgather(mask, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32bf16.i64( poison, [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vrgather_vx_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, size_t index, size_t vl) { + return __riscv_vrgather(mask, op1, index, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vcompress.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vcompress.c index a39e76c2474e3f..97440d62f28d71 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vcompress.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vcompress.c @@ -1,7 +1,7 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfhmin -target-feature +zvfbfmin -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s @@ -597,3 +597,63 @@ vuint64m8_t test_vcompress_vm_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t src, v return __riscv_vcompress_vm_u64m8_tu(maskedoff, src, mask, vl); } +// CHECK-RV64-LABEL: define dso_local @test_vcompress_vm_bf16mf4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1bf16.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vcompress_vm_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, vbool64_t mask, size_t vl) { + return __riscv_vcompress_vm_bf16mf4_tu(maskedoff, src, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vcompress_vm_bf16mf2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2bf16.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vcompress_vm_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, vbool32_t mask, size_t vl) { + return __riscv_vcompress_vm_bf16mf2_tu(maskedoff, src, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vcompress_vm_bf16m1_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4bf16.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vcompress_vm_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t src, vbool16_t mask, size_t vl) { + return __riscv_vcompress_vm_bf16m1_tu(maskedoff, src, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vcompress_vm_bf16m2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8bf16.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vcompress_vm_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t src, vbool8_t mask, size_t vl) { + return __riscv_vcompress_vm_bf16m2_tu(maskedoff, src, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vcompress_vm_bf16m4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16bf16.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vcompress_vm_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t src, vbool4_t mask, size_t vl) { + return __riscv_vcompress_vm_bf16m4_tu(maskedoff, src, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vcompress_vm_bf16m8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32bf16.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vcompress_vm_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t src, vbool2_t mask, size_t vl) { + return __riscv_vcompress_vm_bf16m8_tu(maskedoff, src, mask, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vrgather.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vrgather.c index dec79b77e9bfa9..3c4316de9e8b8b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vrgather.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vrgather.c @@ -1,7 +1,7 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfhmin -target-feature +zvfbfmin -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s @@ -4727,3 +4727,243 @@ vuint64m8_t test_vrgather_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuin return __riscv_vrgather_vx_u64m8_mu(mask, maskedoff, op1, index, vl); } +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16mf4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vrgather_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vuint16mf4_t index, size_t vl) { + return __riscv_vrgather_vv_bf16mf4_tu(maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16mf4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vrgather_vx_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_bf16mf4_tu(maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16mf2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vrgather_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vuint16mf2_t index, size_t vl) { + return __riscv_vrgather_vv_bf16mf2_tu(maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16mf2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vrgather_vx_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_bf16mf2_tu(maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m1_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vrgather_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vuint16m1_t index, size_t vl) { + return __riscv_vrgather_vv_bf16m1_tu(maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m1_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vrgather_vx_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_bf16m1_tu(maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vrgather_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vuint16m2_t index, size_t vl) { + return __riscv_vrgather_vv_bf16m2_tu(maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vrgather_vx_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_bf16m2_tu(maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vrgather_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vuint16m4_t index, size_t vl) { + return __riscv_vrgather_vv_bf16m4_tu(maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vrgather_vx_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_bf16m4_tu(maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vrgather_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vuint16m8_t index, size_t vl) { + return __riscv_vrgather_vv_bf16m8_tu(maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vrgather_vx_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_bf16m8_tu(maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16mf4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vrgather_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vuint16mf4_t index, size_t vl) { + return __riscv_vrgather_vv_bf16mf4_mu(mask, maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16mf4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vrgather_vx_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_bf16mf4_mu(mask, maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16mf2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vrgather_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vuint16mf2_t index, size_t vl) { + return __riscv_vrgather_vv_bf16mf2_mu(mask, maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16mf2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vrgather_vx_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_bf16mf2_mu(mask, maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m1_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vrgather_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vuint16m1_t index, size_t vl) { + return __riscv_vrgather_vv_bf16m1_mu(mask, maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m1_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vrgather_vx_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_bf16m1_mu(mask, maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vrgather_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vuint16m2_t index, size_t vl) { + return __riscv_vrgather_vv_bf16m2_mu(mask, maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vrgather_vx_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_bf16m2_mu(mask, maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vrgather_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vuint16m4_t index, size_t vl) { + return __riscv_vrgather_vv_bf16m4_mu(mask, maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vrgather_vx_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_bf16m4_mu(mask, maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vrgather_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vuint16m8_t index, size_t vl) { + return __riscv_vrgather_vv_bf16m8_mu(mask, maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vrgather_vx_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_bf16m8_mu(mask, maskedoff, op1, index, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vcompress.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vcompress.c index 0de71a7f8eb74a..e1525e5c67878a 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vcompress.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vcompress.c @@ -1,7 +1,7 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfhmin -target-feature +zvfbfmin -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s @@ -597,3 +597,63 @@ vuint64m8_t test_vcompress_vm_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t src, v return __riscv_vcompress_tu(maskedoff, src, mask, vl); } +// CHECK-RV64-LABEL: define dso_local @test_vcompress_vm_bf16mf4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1bf16.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vcompress_vm_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, vbool64_t mask, size_t vl) { + return __riscv_vcompress_tu(maskedoff, src, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vcompress_vm_bf16mf2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2bf16.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vcompress_vm_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, vbool32_t mask, size_t vl) { + return __riscv_vcompress_tu(maskedoff, src, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vcompress_vm_bf16m1_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4bf16.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vcompress_vm_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t src, vbool16_t mask, size_t vl) { + return __riscv_vcompress_tu(maskedoff, src, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vcompress_vm_bf16m2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8bf16.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vcompress_vm_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t src, vbool8_t mask, size_t vl) { + return __riscv_vcompress_tu(maskedoff, src, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vcompress_vm_bf16m4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16bf16.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vcompress_vm_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t src, vbool4_t mask, size_t vl) { + return __riscv_vcompress_tu(maskedoff, src, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vcompress_vm_bf16m8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32bf16.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vcompress_vm_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t src, vbool2_t mask, size_t vl) { + return __riscv_vcompress_tu(maskedoff, src, mask, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vrgather.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vrgather.c index f3ba55c2c31c8d..a094e8401b4aea 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vrgather.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vrgather.c @@ -1,7 +1,7 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfhmin -target-feature +zvfbfmin -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s @@ -4727,3 +4727,243 @@ vuint64m8_t test_vrgather_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuin return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); } +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16mf4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vrgather_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vuint16mf4_t index, size_t vl) { + return __riscv_vrgather_tu(maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16mf4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vrgather_vx_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t index, size_t vl) { + return __riscv_vrgather_tu(maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16mf2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vrgather_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vuint16mf2_t index, size_t vl) { + return __riscv_vrgather_tu(maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16mf2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vrgather_vx_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t index, size_t vl) { + return __riscv_vrgather_tu(maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m1_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vrgather_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vuint16m1_t index, size_t vl) { + return __riscv_vrgather_tu(maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m1_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vrgather_vx_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t index, size_t vl) { + return __riscv_vrgather_tu(maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vrgather_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vuint16m2_t index, size_t vl) { + return __riscv_vrgather_tu(maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m2_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vrgather_vx_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t index, size_t vl) { + return __riscv_vrgather_tu(maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vrgather_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vuint16m4_t index, size_t vl) { + return __riscv_vrgather_tu(maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m4_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vrgather_vx_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t index, size_t vl) { + return __riscv_vrgather_tu(maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vrgather_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vuint16m8_t index, size_t vl) { + return __riscv_vrgather_tu(maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m8_tu +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vrgather_vx_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t index, size_t vl) { + return __riscv_vrgather_tu(maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16mf4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vrgather_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vuint16mf4_t index, size_t vl) { + return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16mf4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vrgather_vx_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t index, size_t vl) { + return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16mf2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vrgather_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vuint16mf2_t index, size_t vl) { + return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16mf2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vrgather_vx_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t index, size_t vl) { + return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m1_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vrgather_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vuint16m1_t index, size_t vl) { + return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m1_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vrgather_vx_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t index, size_t vl) { + return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vrgather_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vuint16m2_t index, size_t vl) { + return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m2_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vrgather_vx_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t index, size_t vl) { + return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vrgather_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vuint16m4_t index, size_t vl) { + return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m4_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vrgather_vx_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t index, size_t vl) { + return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_bf16m8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vrgather_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vuint16m8_t index, size_t vl) { + return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_bf16m8_mu +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vrgather_vx_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t index, size_t vl) { + return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +} + diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td index f78e404bbafd13..86b30e836473dd 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -371,6 +371,9 @@ defset list AllVectors = { } } +defvar AllFloatVectorsExceptFP16 = !filter(vti, AllFloatVectors, !ne(vti.Scalar, f16)); +defvar AllFP16Vectors = !filter(vti, AllFloatVectors, !eq(vti.Scalar, f16)); + // This functor is used to obtain the int vector type that has the same SEW and // multiplier as the input parameter type class GetIntVTypeInfo { @@ -7453,14 +7456,22 @@ defm : VPatBinaryV_VV_INT_EEW<"int_riscv_vrgatherei16_vv", "PseudoVRGATHEREI16", eew=16, vtilist=AllIntegerVectors>; defm : VPatBinaryV_VV_VX_VI_INT<"int_riscv_vrgather", "PseudoVRGATHER", - AllFloatVectors, uimm5>; + AllFloatVectorsExceptFP16, uimm5>; +let Predicates = [HasVInstructionsF16Minimal] in + defm : VPatBinaryV_VV_VX_VI_INT<"int_riscv_vrgather", "PseudoVRGATHER", + AllFP16Vectors, uimm5>; +defm : VPatBinaryV_VV_VX_VI_INT<"int_riscv_vrgather", "PseudoVRGATHER", + AllBFloatVectors, uimm5>; defm : VPatBinaryV_VV_INT_EEW<"int_riscv_vrgatherei16_vv", "PseudoVRGATHEREI16", eew=16, vtilist=AllFloatVectors>; //===----------------------------------------------------------------------===// // 16.5. Vector Compress Instruction //===----------------------------------------------------------------------===// defm : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllIntegerVectors>; -defm : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllFloatVectors>; +defm : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllFloatVectorsExceptFP16>; +let Predicates = [HasVInstructionsF16Minimal] in + defm : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllFP16Vectors>; +defm : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllBFloatVectors>; // Include the non-intrinsic ISel patterns include "RISCVInstrInfoVVLPatterns.td" diff --git a/llvm/test/CodeGen/RISCV/rvv/vcompress.ll b/llvm/test/CodeGen/RISCV/rvv/vcompress.ll index 85663f08db6a09..b763e116a9f62e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vcompress.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vcompress.ll @@ -1,7 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+zvfh \ +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+zvfhmin,+zvfbfmin \ ; RUN: -verify-machineinstrs | FileCheck %s -; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfh \ +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfhmin,+zvfbfmin \ ; RUN: -verify-machineinstrs | FileCheck %s declare @llvm.riscv.vcompress.nxv1i8( @@ -817,3 +817,136 @@ entry: ret %a } + +declare @llvm.riscv.vcompress.nxv1bf16( + , + , + , + iXLen); + +define @intrinsic_vcompress_vm_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma +; CHECK-NEXT: vcompress.vm v8, v9, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vcompress.nxv1bf16( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vcompress.nxv2bf16( + , + , + , + iXLen); + +define @intrinsic_vcompress_vm_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma +; CHECK-NEXT: vcompress.vm v8, v9, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vcompress.nxv2bf16( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vcompress.nxv4bf16( + , + , + , + iXLen); + +define @intrinsic_vcompress_vm_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vcompress.vm v8, v9, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vcompress.nxv4bf16( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vcompress.nxv8bf16( + , + , + , + iXLen); + +define @intrinsic_vcompress_vm_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma +; CHECK-NEXT: vcompress.vm v8, v10, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vcompress.nxv8bf16( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vcompress.nxv16bf16( + , + , + , + iXLen); + +define @intrinsic_vcompress_vm_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma +; CHECK-NEXT: vcompress.vm v8, v12, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vcompress.nxv16bf16( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vcompress.nxv32bf16( + , + , + , + iXLen); + +define @intrinsic_vcompress_vm_nxv32bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vcompress_vm_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma +; CHECK-NEXT: vcompress.vm v8, v16, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vcompress.nxv32bf16( + %0, + %1, + %2, + iXLen %3) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgather.ll b/llvm/test/CodeGen/RISCV/rvv/vrgather.ll index d11e172b25037a..5d700e683a9698 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vrgather.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrgather.ll @@ -1,7 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+d,+zfh,+zvfh \ +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+d,+zfhmin,+zvfh,+zvfbfmin \ ; RUN: -verify-machineinstrs | FileCheck %s -; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+d,+zfh,+zvfh \ +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+d,+zfhmin,+zvfh,+zvfbfmin \ ; RUN: -verify-machineinstrs | FileCheck %s declare @llvm.riscv.vrgather.vv.nxv1i8.iXLen( @@ -4820,3 +4820,785 @@ entry: ret %a } + +declare @llvm.riscv.vrgather.vv.nxv1bf16.iXLen( + , + , + , + iXLen) + +define @intrinsic_vrgather_vv_nxv1bf16_nxv1bf16_nxv1i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv1bf16_nxv1bf16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vrgather.vv v10, v8, v9 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrgather.vv.nxv1bf16.iXLen( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrgather.vv.mask.nxv1bf16.iXLen( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vrgather_mask_vv_nxv1bf16_nxv1bf16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1bf16_nxv1bf16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrgather.vv.mask.nxv1bf16.iXLen( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrgather.vv.nxv2bf16.iXLen( + , + , + , + iXLen) + +define @intrinsic_vrgather_vv_nxv2bf16_nxv2bf16_nxv2i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv2bf16_nxv2bf16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: vrgather.vv v10, v8, v9 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrgather.vv.nxv2bf16.iXLen( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrgather.vv.mask.nxv2bf16.iXLen( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vrgather_mask_vv_nxv2bf16_nxv2bf16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2bf16_nxv2bf16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrgather.vv.mask.nxv2bf16.iXLen( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrgather.vv.nxv4bf16.iXLen( + , + , + , + iXLen) + +define @intrinsic_vrgather_vv_nxv4bf16_nxv4bf16_nxv4i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv4bf16_nxv4bf16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: vrgather.vv v10, v8, v9 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrgather.vv.nxv4bf16.iXLen( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrgather.vv.mask.nxv4bf16.iXLen( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vrgather_mask_vv_nxv4bf16_nxv4bf16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4bf16_nxv4bf16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrgather.vv.mask.nxv4bf16.iXLen( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrgather.vv.nxv8bf16.iXLen( + , + , + , + iXLen) + +define @intrinsic_vrgather_vv_nxv8bf16_nxv8bf16_nxv8i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv8bf16_nxv8bf16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: vrgather.vv v12, v8, v10 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrgather.vv.nxv8bf16.iXLen( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrgather.vv.mask.nxv8bf16.iXLen( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vrgather_mask_vv_nxv8bf16_nxv8bf16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8bf16_nxv8bf16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vrgather.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrgather.vv.mask.nxv8bf16.iXLen( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrgather.vv.nxv16bf16.iXLen( + , + , + , + iXLen) + +define @intrinsic_vrgather_vv_nxv16bf16_nxv16bf16_nxv16i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv16bf16_nxv16bf16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: vrgather.vv v16, v8, v12 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrgather.vv.nxv16bf16.iXLen( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrgather.vv.mask.nxv16bf16.iXLen( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vrgather_mask_vv_nxv16bf16_nxv16bf16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16bf16_nxv16bf16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vrgather.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrgather.vv.mask.nxv16bf16.iXLen( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrgather.vv.nxv32bf16.iXLen( + , + , + , + iXLen) + +define @intrinsic_vrgather_vv_nxv32bf16_nxv32bf16_nxv32i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vv_nxv32bf16_nxv32bf16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: vrgather.vv v24, v8, v16 +; CHECK-NEXT: vmv.v.v v8, v24 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrgather.vv.nxv32bf16.iXLen( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrgather.vv.mask.nxv32bf16.iXLen( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vrgather_mask_vv_nxv32bf16_nxv32bf16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv32bf16_nxv32bf16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re16.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrgather.vv.mask.nxv32bf16.iXLen( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrgather.vx.nxv1bf16.iXLen( + , + , + iXLen, + iXLen) + +define @intrinsic_vrgather_vx_nxv1bf16_nxv1bf16( %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vrgather.vx v9, v8, a0 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrgather.vx.nxv1bf16.iXLen( + undef, + %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrgather.vx.mask.nxv1bf16.iXLen( + , + , + iXLen, + , + iXLen, + iXLen) + +define @intrinsic_vrgather_mask_vx_nxv1bf16_nxv1bf16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrgather.vx.mask.nxv1bf16.iXLen( + %0, + %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrgather.vx.nxv2bf16.iXLen( + , + , + iXLen, + iXLen) + +define @intrinsic_vrgather_vx_nxv2bf16_nxv2bf16( %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vrgather.vx v9, v8, a0 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrgather.vx.nxv2bf16.iXLen( + undef, + %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrgather.vx.mask.nxv2bf16.iXLen( + , + , + iXLen, + , + iXLen, + iXLen) + +define @intrinsic_vrgather_mask_vx_nxv2bf16_nxv2bf16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrgather.vx.mask.nxv2bf1bf16XLen( + %0, + %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrgather.vx.nxv4bf16.iXLen( + , + , + iXLen, + iXLen) + +define @intrinsic_vrgather_vx_nxv4bf16_nxv4bf16( %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vrgather.vx v9, v8, a0 +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrgather.vx.nxv4bf16.iXLen( + undef, + %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrgather.vx.mask.nxv4bf16.iXLen( + , + , + iXLen, + , + iXLen, + iXLen) + +define @intrinsic_vrgather_mask_vx_nxv4bf16_nxv4bf16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vrgather.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrgather.vx.mask.nxv4bf16.iXLen( + %0, + %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrgather.vx.nxv8bf16.iXLen( + , + , + iXLen, + iXLen) + +define @intrinsic_vrgather_vx_nxv8bf16_nxv8bf16( %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vrgather.vx v10, v8, a0 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrgather.vx.nxv8bf16.iXLen( + undef, + %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrgather.vx.mask.nxv8bf16.iXLen( + , + , + iXLen, + , + iXLen, + iXLen) + +define @intrinsic_vrgather_mask_vx_nxv8bf16_nxv8bf16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vrgather.vx v8, v10, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrgather.vx.mask.nxv8bf16.iXLen( + %0, + %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrgather.vx.nxv16bf16.iXLen( + , + , + iXLen, + iXLen) + +define @intrinsic_vrgather_vx_nxv16bf16_nxv16bf16( %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vrgather.vx v12, v8, a0 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrgather.vx.nxv16bf16.iXLen( + undef, + %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrgather.vx.mask.nxv16bf16.iXLen( + , + , + iXLen, + , + iXLen, + iXLen) + +define @intrinsic_vrgather_mask_vx_nxv16bf16_nxv16bf16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vrgather.vx v8, v12, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrgather.vx.mask.nxv16bf16.iXLen( + %0, + %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrgather.vx.nxv32bf16.iXLen( + , + , + iXLen, + iXLen) + +define @intrinsic_vrgather_vx_nxv32bf16_nxv32bf16( %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vx_nxv32bf16_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: vrgather.vx v16, v8, a0 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrgather.vx.nxv32bf16.iXLen( + undef, + %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrgather.vx.mask.nxv32bf16.iXLen( + , + , + iXLen, + , + iXLen, + iXLen) + +define @intrinsic_vrgather_mask_vx_nxv32bf16_nxv32bf16( %0, %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv32bf16_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vrgather.vx v8, v16, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrgather.vx.mask.nxv32bf16.iXLen( + %0, + %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv1bf16_nxv1bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vrgather.vi v9, v8, 9 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrgather.vx.nxv1bf16.iXLen( + undef, + %0, + iXLen 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv1bf16_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrgather.vx.mask.nxv1bf16.iXLen( + %0, + %1, + iXLen 9, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv2bf16_nxv2bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: vrgather.vi v9, v8, 9 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrgather.vx.nxv2bf16.iXLen( + undef, + %0, + iXLen 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv2bf16_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrgather.vx.mask.nxv2bf16.iXLen( + %0, + %1, + iXLen 9, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv4bf16_nxv4bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: vrgather.vi v9, v8, 9 +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrgather.vx.nxv4bf16.iXLen( + undef, + %0, + iXLen 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv4bf16_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vrgather.vi v8, v9, 9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrgather.vx.mask.nxv4bf16.iXLen( + %0, + %1, + iXLen 9, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv8bf16_nxv8bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: vrgather.vi v10, v8, 9 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrgather.vx.nxv8bf16.iXLen( + undef, + %0, + iXLen 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv8bf16_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vrgather.vi v8, v10, 9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrgather.vx.mask.nxv8bf16.iXLen( + %0, + %1, + iXLen 9, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv16bf16_nxv16bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: vrgather.vi v12, v8, 9 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrgather.vx.nxv16bf16.iXLen( + undef, + %0, + iXLen 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv16bf16_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vrgather.vi v8, v12, 9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrgather.vx.mask.nxv16bf16.iXLen( + %0, + %1, + iXLen 9, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vrgather_vi_nxv32bf16_nxv32bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrgather_vi_nxv32bf16_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: vrgather.vi v16, v8, 9 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrgather.vx.nxv32bf16.iXLen( + undef, + %0, + iXLen 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vrgather_mask_vi_nxv32bf16_nxv32bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv32bf16_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vrgather.vi v8, v16, 9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrgather.vx.mask.nxv32bf16.iXLen( + %0, + %1, + iXLen 9, + %2, + iXLen %3, iXLen 1) + + ret %a +} + From 40c2aaf54e9a7b5c560bb68796d444180ad67b5d Mon Sep 17 00:00:00 2001 From: Brandon Wu Date: Tue, 6 Aug 2024 18:00:38 +0800 Subject: [PATCH 10/10] [RISCV][sema] Correct the requirement of `vf[n|w]cvt.x[|u].f` intrinsics (#101811) Fix https://github.com/llvm/llvm-project/issues/101526 `vf[n|w]cvt.x[|u].f` for f16 needs `zvfh` instead of `zvfhmin`, current approach is not able to detect this. Ultimately we need to add `zvfh` to RequiredFeatures to check other intrinsics instead, the type check should be done in checkRVVTypeSupport. --- clang/include/clang/Basic/riscv_vector.td | 48 ++++++++++++++++--- .../clang/Basic/riscv_vector_common.td | 4 +- .../clang/Support/RISCVVIntrinsicUtils.h | 3 +- clang/lib/Sema/SemaRISCV.cpp | 6 +++ clang/utils/TableGen/RISCVVEmitter.cpp | 1 + 5 files changed, 53 insertions(+), 9 deletions(-) diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td index 06ce0affced897..dda2dcb9f4ff65 100644 --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -1918,8 +1918,18 @@ def vfcvt_rtz_x_f_v : RVVConvToSignedBuiltin<"vfcvt_rtz_x">; let Log2LMUL = [-3, -2, -1, 0, 1, 2] in { def vfwcvt_rtz_xu_f_v : RVVConvToWidenUnsignedBuiltin<"vfwcvt_rtz_xu">; def vfwcvt_rtz_x_f_v : RVVConvToWidenSignedBuiltin<"vfwcvt_rtz_x">; - def vfwcvt_f_xu_v : RVVConvBuiltin<"Fw", "FwUv", "csi", "vfwcvt_f">; - def vfwcvt_f_x_v : RVVConvBuiltin<"Fw", "Fwv", "csi", "vfwcvt_f">; + def vfwcvt_f_xu_v : RVVConvBuiltin<"Fw", "FwUv", "si", "vfwcvt_f">; + def vfwcvt_f_x_v : RVVConvBuiltin<"Fw", "Fwv", "si", "vfwcvt_f">; + let RequiredFeatures = ["Zvfh"] in { + let Name = "vfwcvt_f_xu_v", + IRName = "vfwcvt_f_xu_v", + MaskedIRName = "vfwcvt_f_xu_v_mask" in + def : RVVConvBuiltin<"Fw", "FwUv", "c", "vfwcvt_f">; + let Name = "vfwcvt_f_x_v", + IRName = "vfwcvt_f_x_v", + MaskedIRName = "vfwcvt_f_x_v_mask" in + def : RVVConvBuiltin<"Fw", "Fwv", "c", "vfwcvt_f">; + } def vfwcvt_f_f_v : RVVConvBuiltin<"w", "wv", "f", "vfwcvt_f">; let RequiredFeatures = ["Zvfhmin"] in def vfwcvt_f_f_v_fp16 : RVVConvBuiltin<"w", "wv", "x", "vfwcvt_f"> { @@ -1933,6 +1943,16 @@ let Log2LMUL = [-3, -2, -1, 0, 1, 2] in { let Log2LMUL = [-3, -2, -1, 0, 1, 2] in { def vfncvt_rtz_xu_f_w : RVVConvToNarrowingUnsignedBuiltin<"vfncvt_rtz_xu">; def vfncvt_rtz_x_f_w : RVVConvToNarrowingSignedBuiltin<"vfncvt_rtz_x">; + let RequiredFeatures = ["Zvfh"] in { + let Name = "vfncvt_rtz_xu_f_w", + IRName = "vfncvt_rtz_xu_f_w", + MaskedIRName = "vfncvt_rtz_xu_f_w_mask" in + def : RVVConvBuiltin<"Uv", "UvFw", "c", "vfncvt_rtz_xu">; + let Name = "vfncvt_rtz_x_f_w", + IRName = "vfncvt_rtz_x_f_w", + MaskedIRName = "vfncvt_rtz_x_f_w_mask" in + def : RVVConvBuiltin<"Iv", "IvFw", "c", "vfncvt_rtz_x">; + } def vfncvt_rod_f_f_w : RVVConvBuiltin<"v", "vw", "xf", "vfncvt_rod_f">; } @@ -2011,10 +2031,18 @@ let ManualCodegen = [{ let Log2LMUL = [-3, -2, -1, 0, 1, 2] in { let OverloadedName = "vfncvt_x" in defm : - RVVConvBuiltinSet<"vfncvt_x_f_w", "csi", [["Iv", "IvFwu"]]>; + RVVConvBuiltinSet<"vfncvt_x_f_w", "si", [["Iv", "IvFwu"]]>; let OverloadedName = "vfncvt_xu" in defm : - RVVConvBuiltinSet<"vfncvt_xu_f_w", "csi", [["Uv", "UvFwu"]]>; + RVVConvBuiltinSet<"vfncvt_xu_f_w", "si", [["Uv", "UvFwu"]]>; + let RequiredFeatures = ["Zvfh"] in { + let OverloadedName = "vfncvt_x" in + defm : + RVVConvBuiltinSet<"vfncvt_x_f_w", "c", [["Iv", "IvFwu"]]>; + let OverloadedName = "vfncvt_xu" in + defm : + RVVConvBuiltinSet<"vfncvt_xu_f_w", "c", [["Uv", "UvFwu"]]>; + } let OverloadedName = "vfncvt_f" in { defm : RVVConvBuiltinSet<"vfncvt_f_x_w", "xf", [["v", "vIwu"]]>; @@ -2061,10 +2089,18 @@ let ManualCodegen = [{ let Log2LMUL = [-3, -2, -1, 0, 1, 2] in { let OverloadedName = "vfncvt_x" in defm : - RVVConvBuiltinSet<"vfncvt_x_f_w", "csi", [["Iv", "IvFw"]]>; + RVVConvBuiltinSet<"vfncvt_x_f_w", "si", [["Iv", "IvFw"]]>; let OverloadedName = "vfncvt_xu" in defm : - RVVConvBuiltinSet<"vfncvt_xu_f_w", "csi", [["Uv", "UvFw"]]>; + RVVConvBuiltinSet<"vfncvt_xu_f_w", "si", [["Uv", "UvFw"]]>; + let RequiredFeatures = ["Zvfh"] in { + let OverloadedName = "vfncvt_x" in + defm : + RVVConvBuiltinSet<"vfncvt_x_f_w", "c", [["Iv", "IvFw"]]>; + let OverloadedName = "vfncvt_xu" in + defm : + RVVConvBuiltinSet<"vfncvt_xu_f_w", "c", [["Uv", "UvFw"]]>; + } let OverloadedName = "vfncvt_f" in { defm : RVVConvBuiltinSet<"vfncvt_f_x_w", "xf", [["v", "vIw"]]>; diff --git a/clang/include/clang/Basic/riscv_vector_common.td b/clang/include/clang/Basic/riscv_vector_common.td index 040db6f0cdbfb0..33f6441217a5e1 100644 --- a/clang/include/clang/Basic/riscv_vector_common.td +++ b/clang/include/clang/Basic/riscv_vector_common.td @@ -604,10 +604,10 @@ class RVVConvToWidenUnsignedBuiltin : RVVConvBuiltin<"Uw", "Uwv", "xf", overloaded_name>; class RVVConvToNarrowingSignedBuiltin - : RVVConvBuiltin<"Iv", "IvFw", "csi", overloaded_name>; + : RVVConvBuiltin<"Iv", "IvFw", "si", overloaded_name>; class RVVConvToNarrowingUnsignedBuiltin - : RVVConvBuiltin<"Uv", "UvFw", "csi", overloaded_name>; + : RVVConvBuiltin<"Uv", "UvFw", "si", overloaded_name>; let HasMaskedOffOperand = true in { multiclass RVVSignedReductionBuiltin { diff --git a/clang/include/clang/Support/RISCVVIntrinsicUtils.h b/clang/include/clang/Support/RISCVVIntrinsicUtils.h index b4ff61784126e2..9a6a2092eb9961 100644 --- a/clang/include/clang/Support/RISCVVIntrinsicUtils.h +++ b/clang/include/clang/Support/RISCVVIntrinsicUtils.h @@ -502,7 +502,8 @@ enum RVVRequire : uint32_t { RVV_REQ_Zvksh = 1 << 15, RVV_REQ_Zvfbfwma = 1 << 16, RVV_REQ_Zvfbfmin = 1 << 17, - RVV_REQ_Experimental = 1 << 18, + RVV_REQ_Zvfh = 1 << 18, + RVV_REQ_Experimental = 1 << 19, LLVM_MARK_AS_BITMASK_ENUM(RVV_REQ_Experimental) }; diff --git a/clang/lib/Sema/SemaRISCV.cpp b/clang/lib/Sema/SemaRISCV.cpp index f1c7c0516e6719..abf8e4ac2f3e8a 100644 --- a/clang/lib/Sema/SemaRISCV.cpp +++ b/clang/lib/Sema/SemaRISCV.cpp @@ -222,6 +222,7 @@ void RISCVIntrinsicManagerImpl::ConstructRVVIntrinsics( {"zvksh", RVV_REQ_Zvksh}, {"zvfbfwma", RVV_REQ_Zvfbfwma}, {"zvfbfmin", RVV_REQ_Zvfbfmin}, + {"zvfh", RVV_REQ_Zvfh}, {"experimental", RVV_REQ_Experimental}}; // Construction of RVVIntrinsicRecords need to sync with createRVVIntrinsics @@ -280,6 +281,11 @@ void RISCVIntrinsicManagerImpl::ConstructRVVIntrinsics( if ((BaseTypeI & Record.TypeRangeMask) != BaseTypeI) continue; + // TODO: Remove the check below and use RequiredFeatures in + // riscv_vector.td to check the intrinsics instead, the type check should + // be done in checkRVVTypeSupport. This check also not able to work on the + // intrinsics that have Float16 but the BaseType is not Float16 such as + // `vfcvt_f_x_v`. if (BaseType == BasicType::Float16) { if ((Record.RequiredExtensions & RVV_REQ_Zvfhmin) == RVV_REQ_Zvfhmin) { if (!TI.hasFeature("zvfhmin")) diff --git a/clang/utils/TableGen/RISCVVEmitter.cpp b/clang/utils/TableGen/RISCVVEmitter.cpp index 7f3cb70c97d09b..ef7159fae9fd28 100644 --- a/clang/utils/TableGen/RISCVVEmitter.cpp +++ b/clang/utils/TableGen/RISCVVEmitter.cpp @@ -670,6 +670,7 @@ void RVVEmitter::createRVVIntrinsics( .Case("Zvksh", RVV_REQ_Zvksh) .Case("Zvfbfwma", RVV_REQ_Zvfbfwma) .Case("Zvfbfmin", RVV_REQ_Zvfbfmin) + .Case("Zvfh", RVV_REQ_Zvfh) .Case("Experimental", RVV_REQ_Experimental) .Default(RVV_REQ_None); assert(RequireExt != RVV_REQ_None && "Unrecognized required feature?");