[llvm] [AMDGPU] Use range-based for loops. NFC. (PR #99047)
Jay Foad via llvm-commits
llvm-commits at lists.llvm.org
Tue Jul 16 06:52:18 PDT 2024
https://github.com/jayfoad created https://github.com/llvm/llvm-project/pull/99047
None
>From 4762d60f4a26604ef877cb6f2d981cbba2c4148d Mon Sep 17 00:00:00 2001
From: Jay Foad <jay.foad at amd.com>
Date: Tue, 16 Jul 2024 13:24:30 +0100
Subject: [PATCH] [AMDGPU] Use range-based for loops. NFC.
---
llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp | 8 ++------
llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp | 4 ++--
llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp | 10 ++++------
.../Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp | 13 ++++++-------
.../Target/AMDGPU/R600EmitClauseMarkers.cpp | 18 ++++++++----------
.../AMDGPU/R600MachineCFGStructurizer.cpp | 4 ++--
.../AMDGPU/R600OpenCLImageTypeLoweringPass.cpp | 5 ++---
llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp | 14 +++++---------
llvm/lib/Target/AMDGPU/SIFoldOperands.cpp | 8 +++-----
llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 7 +++----
.../Target/AMDGPU/SIMachineFunctionInfo.cpp | 6 +++---
11 files changed, 40 insertions(+), 57 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp b/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp
index 86f28a5057694..2884a8a966f33 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp
@@ -190,13 +190,9 @@ class SchedGroup {
// Returns true if the SU matches all rules
bool allowedByRules(const SUnit *SU,
SmallVectorImpl<SchedGroup> &SyncPipe) const {
- if (Rules.empty())
- return true;
- for (size_t I = 0; I < Rules.size(); I++) {
- auto TheRule = Rules[I].get();
- if (!TheRule->apply(SU, Collection, SyncPipe)) {
+ for (auto &Rule : Rules) {
+ if (!Rule.get()->apply(SU, Collection, SyncPipe))
return false;
- }
}
return true;
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index 26426575aeed3..01f399e302879 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -1342,8 +1342,8 @@ SDValue AMDGPUTargetLowering::lowerUnhandledCall(CallLoweringInfo &CLI,
DAG.getContext()->diagnose(NoCalls);
if (!CLI.IsTailCall) {
- for (unsigned I = 0, E = CLI.Ins.size(); I != E; ++I)
- InVals.push_back(DAG.getUNDEF(CLI.Ins[I].VT));
+ for (auto &Arg : CLI.Ins)
+ InVals.push_back(DAG.getUNDEF(Arg.VT));
}
return DAG.getEntryNode();
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp b/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp
index 456f3cb332cf8..1012498fa4ee4 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp
@@ -861,9 +861,8 @@ bool AMDGPULibCalls::TDOFold(CallInst *CI, const FuncInfo &FInfo) {
Constant *nval;
if (getArgType(FInfo) == AMDGPULibFunc::F32) {
SmallVector<float, 0> FVal;
- for (unsigned i = 0; i < DVal.size(); ++i) {
- FVal.push_back((float)DVal[i]);
- }
+ for (double D : DVal)
+ FVal.push_back((float)D);
ArrayRef<float> tmp(FVal);
nval = ConstantDataVector::get(context, tmp);
} else { // F64
@@ -1082,9 +1081,8 @@ bool AMDGPULibCalls::fold_pow(FPMathOperator *FPOp, IRBuilder<> &B,
}
if (getArgType(FInfo) == AMDGPULibFunc::F32) {
SmallVector<float, 0> FVal;
- for (unsigned i=0; i < DVal.size(); ++i) {
- FVal.push_back((float)DVal[i]);
- }
+ for (double D : DVal)
+ FVal.push_back((float)D);
ArrayRef<float> tmp(FVal);
cnval = ConstantDataVector::get(M->getContext(), tmp);
} else {
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp b/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp
index a295117de6414..bb2603e0076e4 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp
@@ -321,8 +321,7 @@ class AMDGPULowerModuleLDS {
ArrayType *KernelOffsetsType = ArrayType::get(I32, Variables.size());
SmallVector<Constant *> Elements;
- for (size_t i = 0; i < Variables.size(); i++) {
- GlobalVariable *GV = Variables[i];
+ for (GlobalVariable *GV : Variables) {
auto ConstantGepIt = LDSVarsToConstantGEP.find(GV);
if (ConstantGepIt != LDSVarsToConstantGEP.end()) {
auto elt = ConstantExpr::getPtrToInt(ConstantGepIt->second, I32);
@@ -1194,10 +1193,10 @@ class AMDGPULowerModuleLDS {
IsPaddingField.reserve(LDSVarsToTransform.size());
{
uint64_t CurrentOffset = 0;
- for (size_t I = 0; I < LayoutFields.size(); I++) {
- GlobalVariable *FGV = static_cast<GlobalVariable *>(
- const_cast<void *>(LayoutFields[I].Id));
- Align DataAlign = LayoutFields[I].Alignment;
+ for (auto &F : LayoutFields) {
+ GlobalVariable *FGV =
+ static_cast<GlobalVariable *>(const_cast<void *>(F.Id));
+ Align DataAlign = F.Alignment;
uint64_t DataAlignV = DataAlign.value();
if (uint64_t Rem = CurrentOffset % DataAlignV) {
@@ -1218,7 +1217,7 @@ class AMDGPULowerModuleLDS {
LocalVars.push_back(FGV);
IsPaddingField.push_back(false);
- CurrentOffset += LayoutFields[I].Size;
+ CurrentOffset += F.Size;
}
}
diff --git a/llvm/lib/Target/AMDGPU/R600EmitClauseMarkers.cpp b/llvm/lib/Target/AMDGPU/R600EmitClauseMarkers.cpp
index ef67e5c937dc2..4f4c7868a5c16 100644
--- a/llvm/lib/Target/AMDGPU/R600EmitClauseMarkers.cpp
+++ b/llvm/lib/Target/AMDGPU/R600EmitClauseMarkers.cpp
@@ -124,10 +124,9 @@ class R600EmitClauseMarkers : public MachineFunctionPass {
assert(
(TII->isALUInstr(MI.getOpcode()) || MI.getOpcode() == R600::DOT_4) &&
"Can't assign Const");
- for (unsigned i = 0, n = Consts.size(); i < n; ++i) {
- if (Consts[i].first->getReg() != R600::ALU_CONST)
+ for (auto &[Op, Sel] : Consts) {
+ if (Op->getReg() != R600::ALU_CONST)
continue;
- unsigned Sel = Consts[i].second;
unsigned Chan = Sel & 3, Index = ((Sel >> 2) - 512) & 31;
unsigned KCacheIndex = Index * 4 + Chan;
const std::pair<unsigned, unsigned> &BankLine = getAccessedBankLine(Sel);
@@ -155,17 +154,16 @@ class R600EmitClauseMarkers : public MachineFunctionPass {
if (!UpdateInstr)
return true;
- for (unsigned i = 0, j = 0, n = Consts.size(); i < n; ++i) {
- if (Consts[i].first->getReg() != R600::ALU_CONST)
+ unsigned j = 0;
+ for (auto &[Op, Sel] : Consts) {
+ if (Op->getReg() != R600::ALU_CONST)
continue;
- switch(UsedKCache[j].first) {
+ switch (UsedKCache[j].first) {
case 0:
- Consts[i].first->setReg(
- R600::R600_KC0RegClass.getRegister(UsedKCache[j].second));
+ Op->setReg(R600::R600_KC0RegClass.getRegister(UsedKCache[j].second));
break;
case 1:
- Consts[i].first->setReg(
- R600::R600_KC1RegClass.getRegister(UsedKCache[j].second));
+ Op->setReg(R600::R600_KC1RegClass.getRegister(UsedKCache[j].second));
break;
default:
llvm_unreachable("Wrong Cache Line");
diff --git a/llvm/lib/Target/AMDGPU/R600MachineCFGStructurizer.cpp b/llvm/lib/Target/AMDGPU/R600MachineCFGStructurizer.cpp
index c8152c1f920df..29e3365be9920 100644
--- a/llvm/lib/Target/AMDGPU/R600MachineCFGStructurizer.cpp
+++ b/llvm/lib/Target/AMDGPU/R600MachineCFGStructurizer.cpp
@@ -669,8 +669,8 @@ void R600MachineCFGStructurizer::wrapup(MachineBasicBlock *MBB) {
}
//delete continue right before endloop
- for (unsigned i = 0; i < ContInstr.size(); ++i)
- ContInstr[i]->eraseFromParent();
+ for (auto *MI : ContInstr)
+ MI->eraseFromParent();
// TODO to fix up jump table so later phase won't be confused. if
// (jumpTableInfo->isEmpty() == false) { need to clean the jump table, but
diff --git a/llvm/lib/Target/AMDGPU/R600OpenCLImageTypeLoweringPass.cpp b/llvm/lib/Target/AMDGPU/R600OpenCLImageTypeLoweringPass.cpp
index c1a5e3b593748..604a4cb1bf881 100644
--- a/llvm/lib/Target/AMDGPU/R600OpenCLImageTypeLoweringPass.cpp
+++ b/llvm/lib/Target/AMDGPU/R600OpenCLImageTypeLoweringPass.cpp
@@ -244,9 +244,8 @@ class R600OpenCLImageTypeLoweringPass : public ModulePass {
Modified |= replaceSamplerUses(Arg, ResourceID);
}
}
- for (unsigned i = 0; i < InstsToErase.size(); ++i) {
- InstsToErase[i]->eraseFromParent();
- }
+ for (auto *Inst : InstsToErase)
+ Inst->eraseFromParent();
return Modified;
}
diff --git a/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp b/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
index 68c5f23c8e11f..bfcf70bccb511 100644
--- a/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
@@ -613,10 +613,8 @@ bool SIFixSGPRCopies::runOnMachineFunction(MachineFunction &MF) {
TII = ST.getInstrInfo();
MDT = &getAnalysis<MachineDominatorTreeWrapperPass>().getDomTree();
- for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
- BI != BE; ++BI) {
- MachineBasicBlock *MBB = &*BI;
- for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E;
+ for (MachineBasicBlock &MBB : MF) {
+ for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E;
++I) {
MachineInstr &MI = *I;
@@ -665,7 +663,7 @@ bool SIFixSGPRCopies::runOnMachineFunction(MachineFunction &MF) {
Register NewDst = MRI->createVirtualRegister(DestRC);
MachineBasicBlock *BlockToInsertCopy =
MI.isPHI() ? MI.getOperand(MO.getOperandNo() + 1).getMBB()
- : MBB;
+ : &MBB;
MachineBasicBlock::iterator PointToInsertCopy =
MI.isPHI() ? BlockToInsertCopy->getFirstInstrTerminator() : I;
@@ -1095,10 +1093,8 @@ void SIFixSGPRCopies::lowerVGPR2SGPRCopies(MachineFunction &MF) {
void SIFixSGPRCopies::fixSCCCopies(MachineFunction &MF) {
bool IsWave32 = MF.getSubtarget<GCNSubtarget>().isWave32();
- for (MachineFunction::iterator BI = MF.begin(), BE = MF.end(); BI != BE;
- ++BI) {
- MachineBasicBlock *MBB = &*BI;
- for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E;
+ for (MachineBasicBlock &MBB : MF) {
+ for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E;
++I) {
MachineInstr &MI = *I;
// May already have been lowered.
diff --git a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
index 7bf6a635158eb..0e8c96625b221 100644
--- a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
@@ -1770,8 +1770,7 @@ bool SIFoldOperands::tryFoldRegSequence(MachineInstr &MI) {
if (!getRegSeqInit(Defs, Reg, MCOI::OPERAND_REGISTER))
return false;
- for (auto &Def : Defs) {
- const auto *Op = Def.first;
+ for (auto &[Op, SubIdx] : Defs) {
if (!Op->isReg())
return false;
if (TRI->isAGPR(*MRI, Op->getReg()))
@@ -1809,8 +1808,7 @@ bool SIFoldOperands::tryFoldRegSequence(MachineInstr &MI) {
auto RS = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
TII->get(AMDGPU::REG_SEQUENCE), Dst);
- for (unsigned I = 0; I < Defs.size(); ++I) {
- MachineOperand *Def = Defs[I].first;
+ for (auto &[Def, SubIdx] : Defs) {
Def->setIsKill(false);
if (TRI->isAGPR(*MRI, Def->getReg())) {
RS.add(*Def);
@@ -1819,7 +1817,7 @@ bool SIFoldOperands::tryFoldRegSequence(MachineInstr &MI) {
SubDef->getOperand(1).setIsKill(false);
RS.addReg(SubDef->getOperand(1).getReg(), 0, Def->getSubReg());
}
- RS.addImm(Defs[I].second);
+ RS.addImm(SubIdx);
}
Op->setReg(Dst);
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index bb8e21772e566..37f11ead8ab1d 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -3250,8 +3250,7 @@ SDValue SITargetLowering::LowerCallResult(
CCInfo.AnalyzeCallResult(Ins, RetCC);
// Copy all of the result registers out of their specified physreg.
- for (unsigned i = 0; i != RVLocs.size(); ++i) {
- CCValAssign VA = RVLocs[i];
+ for (CCValAssign VA : RVLocs) {
SDValue Val;
if (VA.isRegLoc()) {
@@ -3642,8 +3641,8 @@ SDValue SITargetLowering::LowerCall(CallLoweringInfo &CLI,
if (Callee.isUndef() || isNullConstant(Callee)) {
if (!CLI.IsTailCall) {
- for (unsigned I = 0, E = CLI.Ins.size(); I != E; ++I)
- InVals.push_back(DAG.getUNDEF(CLI.Ins[I].VT));
+ for (auto &Arg : CLI.Ins)
+ InVals.push_back(DAG.getUNDEF(Arg.VT));
}
return Chain;
diff --git a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
index d9db0f7a4f531..1650d53161e70 100644
--- a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
@@ -324,8 +324,7 @@ void SIMachineFunctionInfo::shiftSpillPhysVGPRsToLowestRange(
MachineFunction &MF) {
const SIRegisterInfo *TRI = MF.getSubtarget<GCNSubtarget>().getRegisterInfo();
MachineRegisterInfo &MRI = MF.getRegInfo();
- for (unsigned I = 0, E = SpillPhysVGPRs.size(); I < E; ++I) {
- Register Reg = SpillPhysVGPRs[I];
+ for (Register &Reg : SpillPhysVGPRs) {
Register NewReg =
TRI->findUnusedRegister(MRI, &AMDGPU::VGPR_32RegClass, MF);
if (!NewReg || NewReg >= Reg)
@@ -334,7 +333,6 @@ void SIMachineFunctionInfo::shiftSpillPhysVGPRsToLowestRange(
MRI.replaceRegWith(Reg, NewReg);
// Update various tables with the new VGPR.
- SpillPhysVGPRs[I] = NewReg;
WWMReservedRegs.remove(Reg);
WWMReservedRegs.insert(NewReg);
WWMSpills.insert(std::make_pair(NewReg, WWMSpills[Reg]));
@@ -344,6 +342,8 @@ void SIMachineFunctionInfo::shiftSpillPhysVGPRsToLowestRange(
MBB.removeLiveIn(Reg);
MBB.sortUniqueLiveIns();
}
+
+ Reg = NewReg;
}
}
More information about the llvm-commits
mailing list