Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[AMDGPU] Qualify auto. NFC. #110878

Merged
merged 2 commits into from
Oct 3, 2024
Merged

[AMDGPU] Qualify auto. NFC. #110878

merged 2 commits into from
Oct 3, 2024

Conversation

jayfoad
Copy link
Contributor

@jayfoad jayfoad commented Oct 2, 2024

Generated automatically with:
$ clang-tidy -fix -checks=-*,llvm-qualified-auto $(find lib/Target/AMDGPU/ -type f)

Generated automatically with:
$ clang-tidy -fix -checks=-*,llvm-qualified-auto $(find lib/Target/AMDGPU/ -type f)
@llvmbot
Copy link
Member

llvmbot commented Oct 2, 2024

@llvm/pr-subscribers-backend-amdgpu

Author: Jay Foad (jayfoad)

Changes

Generated automatically with:
$ clang-tidy -fix -checks=-*,llvm-qualified-auto $(find lib/Target/AMDGPU/ -type f)


Patch is 96.40 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/110878.diff

50 Files Affected:

  • (modified) llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.cpp (+2-2)
  • (modified) llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp (+4-4)
  • (modified) llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp (+4-4)
  • (modified) llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp (+17-17)
  • (modified) llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp (+21-21)
  • (modified) llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp (+5-5)
  • (modified) llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp (+2-2)
  • (modified) llvm/lib/Target/AMDGPU/AMDGPUImageIntrinsicOptimizer.cpp (+6-5)
  • (modified) llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp (+1-1)
  • (modified) llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp (+2-2)
  • (modified) llvm/lib/Target/AMDGPU/AMDGPULateCodeGenPrepare.cpp (+2-2)
  • (modified) llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp (+1-1)
  • (modified) llvm/lib/Target/AMDGPU/AMDGPULowerKernelAttributes.cpp (+1-1)
  • (modified) llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp (+4-4)
  • (modified) llvm/lib/Target/AMDGPU/AMDGPUMCInstLower.h (+1-1)
  • (modified) llvm/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp (+1-1)
  • (modified) llvm/lib/Target/AMDGPU/AMDGPUPerfHintAnalysis.cpp (+15-15)
  • (modified) llvm/lib/Target/AMDGPU/AMDGPUPrintfRuntimeBinding.cpp (+1-1)
  • (modified) llvm/lib/Target/AMDGPU/AMDGPUSubtarget.cpp (+1-1)
  • (modified) llvm/lib/Target/AMDGPU/AMDGPUSwLowerLDS.cpp (+3-2)
  • (modified) llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp (+3-4)
  • (modified) llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp (+2-2)
  • (modified) llvm/lib/Target/AMDGPU/AMDGPUUnifyMetadata.cpp (+4-4)
  • (modified) llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp (+5-5)
  • (modified) llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp (+18-18)
  • (modified) llvm/lib/Target/AMDGPU/GCNDPPCombine.cpp (+1-1)
  • (modified) llvm/lib/Target/AMDGPU/GCNILPSched.cpp (+5-5)
  • (modified) llvm/lib/Target/AMDGPU/GCNIterativeScheduler.cpp (+6-6)
  • (modified) llvm/lib/Target/AMDGPU/GCNMinRegStrategy.cpp (+8-8)
  • (modified) llvm/lib/Target/AMDGPU/GCNNSAReassign.cpp (+8-6)
  • (modified) llvm/lib/Target/AMDGPU/GCNRegPressure.cpp (+3-2)
  • (modified) llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCExpr.cpp (+1-1)
  • (modified) llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCKernelDescriptor.cpp (+4-4)
  • (modified) llvm/lib/Target/AMDGPU/R600OpenCLImageTypeLoweringPass.cpp (+7-7)
  • (modified) llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp (+8-8)
  • (modified) llvm/lib/Target/AMDGPU/SIFrameLowering.cpp (+6-6)
  • (modified) llvm/lib/Target/AMDGPU/SIISelLowering.cpp (+26-26)
  • (modified) llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp (+2-2)
  • (modified) llvm/lib/Target/AMDGPU/SIInstrInfo.cpp (+8-8)
  • (modified) llvm/lib/Target/AMDGPU/SIInstrInfo.h (+2-2)
  • (modified) llvm/lib/Target/AMDGPU/SILateBranchLowering.cpp (+1-1)
  • (modified) llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp (+1-1)
  • (modified) llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp (+1-1)
  • (modified) llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h (+10-7)
  • (modified) llvm/lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp (+1-1)
  • (modified) llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp (+2-2)
  • (modified) llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp (+14-14)
  • (modified) llvm/lib/Target/AMDGPU/SIWholeQuadMode.cpp (+4-4)
  • (modified) llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp (+4-4)
  • (modified) llvm/lib/Target/AMDGPU/Utils/AMDGPUPALMetadata.cpp (+10-10)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.cpp
index 4c8ddbd9aabd5a..a6a8597f22a4a2 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.cpp
@@ -235,7 +235,7 @@ void getInterestingMemoryOperands(
     Interesting.emplace_back(I, XCHG->getPointerOperandIndex(), true,
                              XCHG->getCompareOperand()->getType(),
                              std::nullopt);
-  } else if (auto CI = dyn_cast<CallInst>(I)) {
+  } else if (auto *CI = dyn_cast<CallInst>(I)) {
     switch (CI->getIntrinsicID()) {
     case Intrinsic::masked_load:
     case Intrinsic::masked_store:
@@ -257,7 +257,7 @@ void getInterestingMemoryOperands(
     case Intrinsic::masked_compressstore: {
       bool IsWrite = CI->getIntrinsicID() == Intrinsic::masked_compressstore;
       unsigned OpOffset = IsWrite ? 1 : 0;
-      auto BasePtr = CI->getOperand(OpOffset);
+      auto *BasePtr = CI->getOperand(OpOffset);
       MaybeAlign Alignment = BasePtr->getPointerAlignment(DL);
       Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
       IRBuilder<> IB(I);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
index 974f1c80d95530..93b6ba0595b70f 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
@@ -332,7 +332,7 @@ void AMDGPUAsmPrinter::emitGlobalVariable(const GlobalVariable *GV) {
 
     emitVisibility(GVSym, GV->getVisibility(), !GV->isDeclaration());
     emitLinkage(GV, GVSym);
-    auto TS = getTargetStreamer();
+    auto *TS = getTargetStreamer();
     TS->emitAMDGPULDS(GVSym, Size, Alignment);
     return;
   }
@@ -1238,8 +1238,8 @@ void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo,
     // return ((Dst & ~Mask) | (Value << Shift))
     auto SetBits = [&Ctx](const MCExpr *Dst, const MCExpr *Value, uint32_t Mask,
                           uint32_t Shift) {
-      auto Shft = MCConstantExpr::create(Shift, Ctx);
-      auto Msk = MCConstantExpr::create(Mask, Ctx);
+      const auto *Shft = MCConstantExpr::create(Shift, Ctx);
+      const auto *Msk = MCConstantExpr::create(Mask, Ctx);
       Dst = MCBinaryExpr::createAnd(Dst, MCUnaryExpr::createNot(Msk, Ctx), Ctx);
       Dst = MCBinaryExpr::createOr(
           Dst, MCBinaryExpr::createShl(Value, Shft, Ctx), Ctx);
@@ -1414,7 +1414,7 @@ void AMDGPUAsmPrinter::EmitPALMetadata(const MachineFunction &MF,
        const SIProgramInfo &CurrentProgramInfo) {
   const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
   auto CC = MF.getFunction().getCallingConv();
-  auto MD = getTargetStreamer()->getPALMetadata();
+  auto *MD = getTargetStreamer()->getPALMetadata();
   auto &Ctx = MF.getContext();
 
   MD->setEntryPoint(CC, MF.getFunction().getName());
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
index 25e36dc4b3691f..9807e31c1c7f83 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
@@ -142,7 +142,7 @@ struct AMDGPUIncomingArgHandler : public CallLowering::IncomingValueHandler {
                             const CCValAssign &VA) override {
     MachineFunction &MF = MIRBuilder.getMF();
 
-    auto MMO = MF.getMachineMemOperand(
+    auto *MMO = MF.getMachineMemOperand(
         MPO, MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant, MemTy,
         inferAlignFromPtrInfo(MF, MPO));
     MIRBuilder.buildLoad(ValVReg, Addr, *MMO);
@@ -244,7 +244,7 @@ struct AMDGPUOutgoingArgHandler : public AMDGPUOutgoingValueHandler {
     uint64_t LocMemOffset = VA.getLocMemOffset();
     const auto &ST = MF.getSubtarget<GCNSubtarget>();
 
-    auto MMO = MF.getMachineMemOperand(
+    auto *MMO = MF.getMachineMemOperand(
         MPO, MachineMemOperand::MOStore, MemTy,
         commonAlignment(ST.getStackAlignment(), LocMemOffset));
     MIRBuilder.buildStore(ValVReg, Addr, *MMO);
@@ -1007,7 +1007,7 @@ bool AMDGPUCallLowering::doCallerAndCalleePassArgsTheSameWay(
   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
 
   // Make sure that the caller and callee preserve all of the same registers.
-  auto TRI = ST.getRegisterInfo();
+  const auto *TRI = ST.getRegisterInfo();
 
   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
   const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
@@ -1219,7 +1219,7 @@ bool AMDGPUCallLowering::lowerTailCall(
     if (!ExecArg.Ty->isIntegerTy(ST.getWavefrontSize()))
       return false;
 
-    if (auto CI = dyn_cast<ConstantInt>(ExecArg.OrigValue)) {
+    if (const auto *CI = dyn_cast<ConstantInt>(ExecArg.OrigValue)) {
       MIB.addImm(CI->getSExtValue());
     } else {
       MIB.addReg(ExecArg.Regs[0]);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp b/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp
index 26116bfa3c2feb..b67d78e450bb82 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp
@@ -163,8 +163,8 @@ std::string MetadataStreamerMsgPackV4::getTypeName(Type *Ty,
   case Type::DoubleTyID:
     return "double";
   case Type::FixedVectorTyID: {
-    auto VecTy = cast<FixedVectorType>(Ty);
-    auto ElTy = VecTy->getElementType();
+    auto *VecTy = cast<FixedVectorType>(Ty);
+    auto *ElTy = VecTy->getElementType();
     auto NumElements = VecTy->getNumElements();
     return (Twine(getTypeName(ElTy, Signed)) + Twine(NumElements)).str();
   }
@@ -199,7 +199,7 @@ void MetadataStreamerMsgPackV4::emitTargetID(
 }
 
 void MetadataStreamerMsgPackV4::emitPrintf(const Module &Mod) {
-  auto Node = Mod.getNamedMetadata("llvm.printf.fmts");
+  auto *Node = Mod.getNamedMetadata("llvm.printf.fmts");
   if (!Node)
     return;
 
@@ -214,10 +214,10 @@ void MetadataStreamerMsgPackV4::emitPrintf(const Module &Mod) {
 void MetadataStreamerMsgPackV4::emitKernelLanguage(const Function &Func,
                                                    msgpack::MapDocNode Kern) {
   // TODO: What about other languages?
-  auto Node = Func.getParent()->getNamedMetadata("opencl.ocl.version");
+  auto *Node = Func.getParent()->getNamedMetadata("opencl.ocl.version");
   if (!Node || !Node->getNumOperands())
     return;
-  auto Op0 = Node->getOperand(0);
+  auto *Op0 = Node->getOperand(0);
   if (Op0->getNumOperands() <= 1)
     return;
 
@@ -233,11 +233,11 @@ void MetadataStreamerMsgPackV4::emitKernelLanguage(const Function &Func,
 void MetadataStreamerMsgPackV4::emitKernelAttrs(const Function &Func,
                                                 msgpack::MapDocNode Kern) {
 
-  if (auto Node = Func.getMetadata("reqd_work_group_size"))
+  if (auto *Node = Func.getMetadata("reqd_work_group_size"))
     Kern[".reqd_workgroup_size"] = getWorkGroupDimensions(Node);
-  if (auto Node = Func.getMetadata("work_group_size_hint"))
+  if (auto *Node = Func.getMetadata("work_group_size_hint"))
     Kern[".workgroup_size_hint"] = getWorkGroupDimensions(Node);
-  if (auto Node = Func.getMetadata("vec_type_hint")) {
+  if (auto *Node = Func.getMetadata("vec_type_hint")) {
     Kern[".vec_type_hint"] = Kern.getDocument()->getNode(
         getTypeName(
             cast<ValueAsMetadata>(Node->getOperand(0))->getType(),
@@ -271,7 +271,7 @@ void MetadataStreamerMsgPackV4::emitKernelArgs(const MachineFunction &MF,
 void MetadataStreamerMsgPackV4::emitKernelArg(const Argument &Arg,
                                               unsigned &Offset,
                                               msgpack::ArrayDocNode Args) {
-  auto Func = Arg.getParent();
+  const auto *Func = Arg.getParent();
   auto ArgNo = Arg.getArgNo();
   const MDNode *Node;
 
@@ -317,7 +317,7 @@ void MetadataStreamerMsgPackV4::emitKernelArg(const Argument &Arg,
   Type *Ty = Arg.hasByRefAttr() ? Arg.getParamByRefType() : Arg.getType();
 
   // FIXME: Need to distinguish in memory alignment from pointer alignment.
-  if (auto PtrTy = dyn_cast<PointerType>(Ty)) {
+  if (auto *PtrTy = dyn_cast<PointerType>(Ty)) {
     if (PtrTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS)
       PointeeAlign = Arg.getParamAlign().valueOrOne();
   }
@@ -353,7 +353,7 @@ void MetadataStreamerMsgPackV4::emitKernelArg(
   if (PointeeAlign)
     Arg[".pointee_align"] = Arg.getDocument()->getNode(PointeeAlign->value());
 
-  if (auto PtrTy = dyn_cast<PointerType>(Ty))
+  if (auto *PtrTy = dyn_cast<PointerType>(Ty))
     if (auto Qualifier = getAddressSpaceQualifier(PtrTy->getAddressSpace()))
       // Limiting address space to emit only for a certain ValueKind.
       if (ValueKind == "global_buffer" || ValueKind == "dynamic_shared_pointer")
@@ -393,7 +393,7 @@ void MetadataStreamerMsgPackV4::emitHiddenKernelArgs(
 
   const Module *M = Func.getParent();
   auto &DL = M->getDataLayout();
-  auto Int64Ty = Type::getInt64Ty(Func.getContext());
+  auto *Int64Ty = Type::getInt64Ty(Func.getContext());
 
   Offset = alignTo(Offset, ST.getAlignmentForImplicitArgPtr());
 
@@ -407,7 +407,7 @@ void MetadataStreamerMsgPackV4::emitHiddenKernelArgs(
     emitKernelArg(DL, Int64Ty, Align(8), "hidden_global_offset_z", Offset,
                   Args);
 
-  auto Int8PtrTy =
+  auto *Int8PtrTy =
       PointerType::get(Func.getContext(), AMDGPUAS::GLOBAL_ADDRESS);
 
   if (HiddenArgNumBytes >= 32) {
@@ -592,9 +592,9 @@ void MetadataStreamerMsgPackV5::emitHiddenKernelArgs(
   auto &DL = M->getDataLayout();
   const SIMachineFunctionInfo &MFI = *MF.getInfo<SIMachineFunctionInfo>();
 
-  auto Int64Ty = Type::getInt64Ty(Func.getContext());
-  auto Int32Ty = Type::getInt32Ty(Func.getContext());
-  auto Int16Ty = Type::getInt16Ty(Func.getContext());
+  auto *Int64Ty = Type::getInt64Ty(Func.getContext());
+  auto *Int32Ty = Type::getInt32Ty(Func.getContext());
+  auto *Int16Ty = Type::getInt16Ty(Func.getContext());
 
   Offset = alignTo(Offset, ST.getAlignmentForImplicitArgPtr());
   emitKernelArg(DL, Int32Ty, Align(4), "hidden_block_count_x", Offset, Args);
@@ -621,7 +621,7 @@ void MetadataStreamerMsgPackV5::emitHiddenKernelArgs(
   emitKernelArg(DL, Int16Ty, Align(2), "hidden_grid_dims", Offset, Args);
 
   Offset += 6; // Reserved.
-  auto Int8PtrTy =
+  auto *Int8PtrTy =
       PointerType::get(Func.getContext(), AMDGPUAS::GLOBAL_ADDRESS);
 
   if (M->getNamedMetadata("llvm.printf.fmts")) {
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp b/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp
index aaad2a56de2cd0..a367db70627748 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp
@@ -394,7 +394,7 @@ void PipelineSolver::reset() {
     for (auto &SG : SyncPipeline) {
       SmallVector<SUnit *, 32> TempCollection = SG.Collection;
       SG.Collection.clear();
-      auto SchedBarr = llvm::find_if(TempCollection, [](SUnit *SU) {
+      auto *SchedBarr = llvm::find_if(TempCollection, [](SUnit *SU) {
         return SU->getInstr()->getOpcode() == AMDGPU::SCHED_GROUP_BARRIER;
       });
       if (SchedBarr != TempCollection.end())
@@ -421,7 +421,7 @@ void PipelineSolver::convertSyncMapsToArrays() {
             std::pair(SUsToCandSGs.first, SUsToCandSGs.second));
         continue;
       }
-      auto SortPosition = PipelineInstrs[PipelineIDx].begin();
+      auto *SortPosition = PipelineInstrs[PipelineIDx].begin();
       // Insert them in sorted order -- this allows for good parsing order in
       // the greedy algorithm
       while (SortPosition != PipelineInstrs[PipelineIDx].end() &&
@@ -515,7 +515,7 @@ void PipelineSolver::removeEdges(
     SUnit *Pred = PredSuccPair.first;
     SUnit *Succ = PredSuccPair.second;
 
-    auto Match = llvm::find_if(
+    auto *Match = llvm::find_if(
         Succ->Preds, [&Pred](SDep &P) { return P.getSUnit() == Pred; });
     if (Match != Succ->Preds.end()) {
       assert(Match->isArtificial());
@@ -639,8 +639,8 @@ bool PipelineSolver::solveExact() {
              : populateReadyList(ReadyList, CurrSU.second.begin(),
                                  CurrSU.second.end());
 
-  auto I = ReadyList.begin();
-  auto E = ReadyList.end();
+  auto *I = ReadyList.begin();
+  auto *E = ReadyList.end();
   for (; I != E; ++I) {
     // If we are trying SGs in least cost order, and the current SG is cost
     // infeasible, then all subsequent SGs will also be cost infeasible, so we
@@ -942,7 +942,7 @@ class MFMAExpInterleaveOpt final : public IGLPStrategy {
     bool apply(const SUnit *SU, const ArrayRef<SUnit *> Collection,
                SmallVectorImpl<SchedGroup> &SyncPipe) override {
 
-      auto DAG = SyncPipe[0].DAG;
+      auto *DAG = SyncPipe[0].DAG;
 
       if (Cache->empty()) {
         auto I = DAG->SUnits.rbegin();
@@ -976,7 +976,7 @@ class MFMAExpInterleaveOpt final : public IGLPStrategy {
                SmallVectorImpl<SchedGroup> &SyncPipe) override {
       bool FoundTrans = false;
       unsigned Counter = 1;
-      auto DAG = SyncPipe[0].DAG;
+      auto *DAG = SyncPipe[0].DAG;
 
       if (Cache->empty()) {
         SmallVector<SUnit *, 8> Worklist;
@@ -1016,13 +1016,13 @@ class MFMAExpInterleaveOpt final : public IGLPStrategy {
   public:
     bool apply(const SUnit *SU, const ArrayRef<SUnit *> Collection,
                SmallVectorImpl<SchedGroup> &SyncPipe) override {
-      auto DAG = SyncPipe[0].DAG;
+      auto *DAG = SyncPipe[0].DAG;
 
       if (!SU || !TII->isMFMAorWMMA(*ChainSeed->getInstr()))
         return false;
 
       if (Cache->empty()) {
-        auto TempSU = ChainSeed;
+        auto *TempSU = ChainSeed;
         auto Depth = Number;
         while (Depth > 0) {
           --Depth;
@@ -1232,7 +1232,7 @@ class MFMAExpInterleaveOpt final : public IGLPStrategy {
       if (!OtherGroup->Collection.size())
         return true;
 
-      auto DAG = SyncPipe[0].DAG;
+      auto *DAG = SyncPipe[0].DAG;
 
       for (auto &OtherEle : OtherGroup->Collection)
         if (DAG->IsReachable(const_cast<SUnit *>(SU), OtherEle))
@@ -1275,7 +1275,7 @@ class MFMAExpInterleaveOpt final : public IGLPStrategy {
         return false;
 
       if (Cache->empty()) {
-        auto TempSU = ChainSeed;
+        auto *TempSU = ChainSeed;
         auto Depth = Number;
         while (Depth > 0) {
           --Depth;
@@ -1315,7 +1315,7 @@ class MFMAExpInterleaveOpt final : public IGLPStrategy {
                SmallVectorImpl<SchedGroup> &SyncPipe) override {
 
       SmallVector<SUnit *, 12> Worklist;
-      auto DAG = SyncPipe[0].DAG;
+      auto *DAG = SyncPipe[0].DAG;
       if (Cache->empty()) {
         for (auto &SU : DAG->SUnits)
           if (TII->isTRANS(SU.getInstr()->getOpcode())) {
@@ -1509,7 +1509,7 @@ bool MFMAExpInterleaveOpt::analyzeDAG(const SIInstrInfo *TII) {
                       return isBitPack(Opc);
                     });
 
-  auto PackPred =
+  auto *PackPred =
       std::find_if((*TempMFMA)->Preds.begin(), (*TempMFMA)->Preds.end(),
                    [&isBitPack](SDep &Pred) {
                      auto Opc = Pred.getSUnit()->getInstr()->getOpcode();
@@ -1868,7 +1868,7 @@ class MFMASmallGemmSingleWaveOpt final : public IGLPStrategy {
       }
 
       assert(Cache->size());
-      auto DAG = SyncPipe[0].DAG;
+      auto *DAG = SyncPipe[0].DAG;
       for (auto &Elt : *Cache) {
         if (DAG->IsReachable(Elt, const_cast<SUnit *>(SU)))
           return true;
@@ -1886,7 +1886,7 @@ class MFMASmallGemmSingleWaveOpt final : public IGLPStrategy {
   public:
     bool apply(const SUnit *SU, const ArrayRef<SUnit *> Collection,
                SmallVectorImpl<SchedGroup> &SyncPipe) override {
-      auto MI = SU->getInstr();
+      auto *MI = SU->getInstr();
       if (MI->getOpcode() != AMDGPU::V_PERM_B32_e64)
         return false;
 
@@ -1952,7 +1952,7 @@ class MFMASmallGemmSingleWaveOpt final : public IGLPStrategy {
   public:
     bool apply(const SUnit *SU, const ArrayRef<SUnit *> Collection,
                SmallVectorImpl<SchedGroup> &SyncPipe) override {
-      auto MI = SU->getInstr();
+      auto *MI = SU->getInstr();
       if (MI->getOpcode() == TargetOpcode::BUNDLE)
         return false;
       if (!Collection.size())
@@ -2023,7 +2023,7 @@ class MFMASmallGemmSingleWaveOpt final : public IGLPStrategy {
           return false;
       }
 
-      auto DAG = SyncPipe[0].DAG;
+      auto *DAG = SyncPipe[0].DAG;
       // Does the previous DS_WRITE share a V_PERM predecessor with this
       // VMEM_READ
       return llvm::any_of(*Cache, [&SU, &DAG](SUnit *Elt) {
@@ -2070,7 +2070,7 @@ bool MFMASmallGemmSingleWaveOpt::applyIGLPStrategy(
          "DSWCounters should be zero in pre-RA scheduling!");
   SmallVector<SUnit *, 6> DSWithPerms;
   for (auto &SU : DAG->SUnits) {
-    auto I = SU.getInstr();
+    auto *I = SU.getInstr();
     if (TII->isMFMAorWMMA(*I))
       ++MFMACount;
     else if (TII->isDS(*I)) {
@@ -2091,8 +2091,8 @@ bool MFMASmallGemmSingleWaveOpt::applyIGLPStrategy(
 
   if (IsInitial) {
     DSWWithPermCount = DSWithPerms.size();
-    auto I = DSWithPerms.begin();
-    auto E = DSWithPerms.end();
+    auto *I = DSWithPerms.begin();
+    auto *E = DSWithPerms.end();
 
     // Get the count of DS_WRITES with V_PERM predecessors which
     // have loop carried dependencies (WAR) on the same VMEM_READs.
@@ -2113,7 +2113,7 @@ bool MFMASmallGemmSingleWaveOpt::applyIGLPStrategy(
           break;
 
         for (auto &Succ : Pred.getSUnit()->Succs) {
-          auto MI = Succ.getSUnit()->getInstr();
+          auto *MI = Succ.getSUnit()->getInstr();
           if (!TII->isVMEM(*MI) || !MI->mayLoad())
             continue;
 
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
index ff8798edb3cc0f..21fffba14287ef 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
@@ -1537,7 +1537,7 @@ static bool IsCopyFromSGPR(const SIRegisterInfo &TRI, SDValue Val) {
   auto Reg = cast<RegisterSDNode>(Val.getOperand(1))->getReg();
   if (!Reg.isPhysical())
     return false;
-  auto RC = TRI.getPhysRegBaseClass(Reg);
+  const auto *RC = TRI.getPhysRegBaseClass(Reg);
   return RC && TRI.isSGPRClass(RC);
 }
 
@@ -1855,13 +1855,13 @@ bool AMDGPUDAGToDAGISel::SelectGlobalSAddr(SDNode *N,
 }
 
 static SDValue SelectSAddrFI(SelectionDAG *CurDAG, SDValue SAddr) {
-  if (auto FI = dyn_cast<FrameIndexSDNode>(SAddr)) {
+  if (auto *FI = dyn_cast<FrameIndexSDNode>(SAddr)) {
     SAddr = CurDAG->getTargetFrameIndex(FI->getIndex(), FI->getValueType(0));
   } else if (SAddr.getOpcode() == ISD::ADD &&
              isa<FrameIndexSDNode>(SAddr.getOperand(0))) {
     // Materialize this into a scalar move for scalar address to avoid
     // readfirstlane.
-    auto FI = cast<FrameIndexSDNode>(SAddr.getOperand(0));
+    auto *FI = cast<FrameIndexSDNode>(SAddr.getOperand(0));
     SDValue TFI = CurDAG->getTargetFrameIndex(FI->getIndex(),
                                               FI->getValueType(0));
     SAddr = SDValue(CurDAG->getMachineNode(AMDGPU::S_ADD_I32, SDLoc(SAddr),
@@ -2391,7 +2391,7 @@ bool AMDGPUDAGToDAGISel::isCBranchSCC(const SDNode *N) const {
     return true;
 
   if (VT == MVT::i64) {
-    auto ST = static_cast<const GCNSubtarget *>(Subtarget);
+    const auto *ST = static_cast<const GCNSubtarget *>(Subtarget);
 
     ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
     return (CC == ISD::SETEQ || CC == ISD::SETNE) && ST->hasScalarCompareEq64();
@@ -3600,7 +3600,7 @@ bool AMDGPUDAGToDAGISel::isVGPRImm(const SDNode * N) const {
 }
 
 bool AMDGPUDAGToDAGISel::isUniformLoad(const SDNode *N) const {
-  auto Ld = cast<LoadSDNode>(N);
+  const auto *Ld = cast<LoadSDNode>(N);
 
   const MachineMemOperand *MMO = Ld->getMemOperand();
   if (N->isDivergent() && !AMDGPUInstrInfo::isUniformMMO(MMO))
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index 94fdf4effa10a1..cceb89e23f1290 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -4202,7 +4202,7 @@ SDValue AMDGPUTargetLowering::performTruncateCombine(
   // integer operation.
   // trunc (srl (bitcast (build_vector x, y))), 16 -> trunc (bitcast y)
   if (Src.getOpcode() == ISD::SRL && !VT.isVector()) {
-    if (auto K = isConstOrConstSplat(Src.getOperand(1))) {
+    if (aut...
[truncated]

@jayfoad jayfoad merged commit 8d13e7b into llvm:main Oct 3, 2024
8 checks passed
@jayfoad jayfoad deleted the qualify-auto branch October 3, 2024 12:07
xgupta pushed a commit to xgupta/llvm-project that referenced this pull request Oct 4, 2024
Generated automatically with:
$ clang-tidy -fix -checks=-*,llvm-qualified-auto $(find
lib/Target/AMDGPU/ -type f)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Projects
None yet
Development

Successfully merging this pull request may close these issues.

3 participants