[llvm] 85b4b21 - [llvm] Use make_early_inc_range (NFC)
Kazu Hirata via llvm-commits
llvm-commits at lists.llvm.org
Mon Sep 20 19:30:15 PDT 2021
Author: Kazu Hirata
Date: 2021-09-20T19:30:02-07:00
New Revision: 85b4b21c8bbad346d58a30154d2767c39cf3285a
URL: https://github.com/llvm/llvm-project/commit/85b4b21c8bbad346d58a30154d2767c39cf3285a
DIFF: https://github.com/llvm/llvm-project/commit/85b4b21c8bbad346d58a30154d2767c39cf3285a.diff
LOG: [llvm] Use make_early_inc_range (NFC)
Added:
Modified:
llvm/lib/AsmParser/LLParser.cpp
llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
llvm/lib/Target/NVPTX/NVPTXGenericToNVVM.cpp
llvm/lib/Target/WebAssembly/WebAssemblyCFGStackify.cpp
llvm/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp
llvm/lib/Target/WebAssembly/WebAssemblyLowerBrUnless.cpp
llvm/lib/Target/WebAssembly/WebAssemblyMemIntrinsicResults.cpp
llvm/lib/Target/WebAssembly/WebAssemblyOptimizeLiveIntervals.cpp
llvm/lib/Target/WebAssembly/WebAssemblyPrepareForLiveIntervals.cpp
llvm/lib/Target/WebAssembly/WebAssemblyReplacePhysRegs.cpp
llvm/lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp
llvm/lib/Target/X86/X86OptimizeLEAs.cpp
llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
Removed:
################################################################################
diff --git a/llvm/lib/AsmParser/LLParser.cpp b/llvm/lib/AsmParser/LLParser.cpp
index 5a1b3c1e28fdf..9a026e77accff 100644
--- a/llvm/lib/AsmParser/LLParser.cpp
+++ b/llvm/lib/AsmParser/LLParser.cpp
@@ -235,18 +235,18 @@ bool LLParser::validateEndOfModule(bool UpgradeDebugInfo) {
Inst->setMetadata(LLVMContext::MD_tbaa, UpgradedMD);
}
- // Look for intrinsic functions and CallInst that need to be upgraded
- for (Module::iterator FI = M->begin(), FE = M->end(); FI != FE; )
- UpgradeCallsToIntrinsic(&*FI++); // must be post-increment, as we remove
+ // Look for intrinsic functions and CallInst that need to be upgraded. We use
+ // make_early_inc_range here because we may remove some functions.
+ for (Function &F : llvm::make_early_inc_range(*M))
+ UpgradeCallsToIntrinsic(&F);
// Some types could be renamed during loading if several modules are
// loaded in the same LLVMContext (LTO scenario). In this case we should
// remangle intrinsics names as well.
- for (Module::iterator FI = M->begin(), FE = M->end(); FI != FE; ) {
- Function *F = &*FI++;
- if (auto Remangled = Intrinsic::remangleIntrinsicFunction(F)) {
- F->replaceAllUsesWith(Remangled.getValue());
- F->eraseFromParent();
+ for (Function &F : llvm::make_early_inc_range(*M)) {
+ if (auto Remangled = Intrinsic::remangleIntrinsicFunction(&F)) {
+ F.replaceAllUsesWith(Remangled.getValue());
+ F.eraseFromParent();
}
}
diff --git a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
index 8443e939a11c0..2b83a292db768 100644
--- a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
+++ b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
@@ -410,11 +410,9 @@ void ARMDAGToDAGISel::PreprocessISelDAG() {
return;
bool isThumb2 = Subtarget->isThumb();
- for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
- E = CurDAG->allnodes_end(); I != E; ) {
- SDNode *N = &*I++; // Preincrement iterator to avoid invalidation issues.
-
- if (N->getOpcode() != ISD::ADD)
+ // We use make_early_inc_range to avoid invalidation issues.
+ for (SDNode &N : llvm::make_early_inc_range(CurDAG->allnodes())) {
+ if (N.getOpcode() != ISD::ADD)
continue;
// Look for (add X1, (and (srl X2, c1), c2)) where c2 is constant with
@@ -426,8 +424,8 @@ void ARMDAGToDAGISel::PreprocessISelDAG() {
// operand of 'add' and the 'and' and 'srl' would become a bits extraction
// node (UBFX).
- SDValue N0 = N->getOperand(0);
- SDValue N1 = N->getOperand(1);
+ SDValue N0 = N.getOperand(0);
+ SDValue N1 = N.getOperand(1);
unsigned And_imm = 0;
if (!isOpcWithIntImmediate(N1.getNode(), ISD::AND, And_imm)) {
if (isOpcWithIntImmediate(N0.getNode(), ISD::AND, And_imm))
@@ -484,7 +482,7 @@ void ARMDAGToDAGISel::PreprocessISelDAG() {
CurDAG->getConstant(And_imm, SDLoc(Srl), MVT::i32));
N1 = CurDAG->getNode(ISD::SHL, SDLoc(N1), MVT::i32,
N1, CurDAG->getConstant(TZ, SDLoc(Srl), MVT::i32));
- CurDAG->UpdateNodeOperands(N, N0, N1);
+ CurDAG->UpdateNodeOperands(&N, N0, N1);
}
}
diff --git a/llvm/lib/Target/NVPTX/NVPTXGenericToNVVM.cpp b/llvm/lib/Target/NVPTX/NVPTXGenericToNVVM.cpp
index 9078ff8cfb975..063ea18574b94 100644
--- a/llvm/lib/Target/NVPTX/NVPTXGenericToNVVM.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXGenericToNVVM.cpp
@@ -74,19 +74,16 @@ bool GenericToNVVM::runOnModule(Module &M) {
// of original global variable and its clone is placed in the GVMap for later
// use.
- for (Module::global_iterator I = M.global_begin(), E = M.global_end();
- I != E;) {
- GlobalVariable *GV = &*I++;
- if (GV->getType()->getAddressSpace() == llvm::ADDRESS_SPACE_GENERIC &&
- !llvm::isTexture(*GV) && !llvm::isSurface(*GV) &&
- !llvm::isSampler(*GV) && !GV->getName().startswith("llvm.")) {
+ for (GlobalVariable &GV : llvm::make_early_inc_range(M.globals())) {
+ if (GV.getType()->getAddressSpace() == llvm::ADDRESS_SPACE_GENERIC &&
+ !llvm::isTexture(GV) && !llvm::isSurface(GV) && !llvm::isSampler(GV) &&
+ !GV.getName().startswith("llvm.")) {
GlobalVariable *NewGV = new GlobalVariable(
- M, GV->getValueType(), GV->isConstant(),
- GV->getLinkage(),
- GV->hasInitializer() ? GV->getInitializer() : nullptr,
- "", GV, GV->getThreadLocalMode(), llvm::ADDRESS_SPACE_GLOBAL);
- NewGV->copyAttributesFrom(GV);
- GVMap[GV] = NewGV;
+ M, GV.getValueType(), GV.isConstant(), GV.getLinkage(),
+ GV.hasInitializer() ? GV.getInitializer() : nullptr, "", &GV,
+ GV.getThreadLocalMode(), llvm::ADDRESS_SPACE_GLOBAL);
+ NewGV->copyAttributesFrom(&GV);
+ GVMap[&GV] = NewGV;
}
}
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyCFGStackify.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyCFGStackify.cpp
index 59d69e48b775c..7507344037871 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyCFGStackify.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyCFGStackify.cpp
@@ -842,8 +842,7 @@ static void unstackifyVRegsUsedInSplitBB(MachineBasicBlock &MBB,
// INST ..., TeeReg, ...
// INST ..., Reg, ...
// INST ..., Reg, ...
- for (auto I = MBB.begin(), E = MBB.end(); I != E;) {
- MachineInstr &MI = *I++;
+ for (MachineInstr &MI : llvm::make_early_inc_range(MBB)) {
if (!WebAssembly::isTee(MI.getOpcode()))
continue;
Register TeeReg = MI.getOperand(0).getReg();
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp
index 4a0738dc3b7ab..a933d1a4f4215 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp
@@ -252,8 +252,7 @@ bool WebAssemblyExplicitLocals::runOnMachineFunction(MachineFunction &MF) {
// Visit each instruction in the function.
for (MachineBasicBlock &MBB : MF) {
- for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E;) {
- MachineInstr &MI = *I++;
+ for (MachineInstr &MI : llvm::make_early_inc_range(MBB)) {
assert(!WebAssembly::isArgument(MI.getOpcode()));
if (MI.isDebugInstr() || MI.isLabel())
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyLowerBrUnless.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyLowerBrUnless.cpp
index 01b3aa887738e..52226206eb325 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyLowerBrUnless.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyLowerBrUnless.cpp
@@ -63,12 +63,11 @@ bool WebAssemblyLowerBrUnless::runOnMachineFunction(MachineFunction &MF) {
auto &MRI = MF.getRegInfo();
for (auto &MBB : MF) {
- for (auto MII = MBB.begin(); MII != MBB.end();) {
- MachineInstr *MI = &*MII++;
- if (MI->getOpcode() != WebAssembly::BR_UNLESS)
+ for (MachineInstr &MI : llvm::make_early_inc_range(MBB)) {
+ if (MI.getOpcode() != WebAssembly::BR_UNLESS)
continue;
- Register Cond = MI->getOperand(1).getReg();
+ Register Cond = MI.getOperand(1).getReg();
bool Inverted = false;
// Attempt to invert the condition in place.
@@ -189,7 +188,7 @@ bool WebAssemblyLowerBrUnless::runOnMachineFunction(MachineFunction &MF) {
// instruction to invert it.
if (!Inverted) {
Register Tmp = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
- BuildMI(MBB, MI, MI->getDebugLoc(), TII.get(WebAssembly::EQZ_I32), Tmp)
+ BuildMI(MBB, &MI, MI.getDebugLoc(), TII.get(WebAssembly::EQZ_I32), Tmp)
.addReg(Cond);
MFI.stackifyVReg(MRI, Tmp);
Cond = Tmp;
@@ -199,10 +198,10 @@ bool WebAssemblyLowerBrUnless::runOnMachineFunction(MachineFunction &MF) {
// The br_unless condition has now been inverted. Insert a br_if and
// delete the br_unless.
assert(Inverted);
- BuildMI(MBB, MI, MI->getDebugLoc(), TII.get(WebAssembly::BR_IF))
- .add(MI->getOperand(0))
+ BuildMI(MBB, &MI, MI.getDebugLoc(), TII.get(WebAssembly::BR_IF))
+ .add(MI.getOperand(0))
.addReg(Cond);
- MBB.erase(MI);
+ MBB.erase(&MI);
}
}
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyMemIntrinsicResults.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyMemIntrinsicResults.cpp
index 9aea65cba280c..2180f57c106a7 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyMemIntrinsicResults.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyMemIntrinsicResults.cpp
@@ -96,9 +96,8 @@ static bool replaceDominatedUses(MachineBasicBlock &MBB, MachineInstr &MI,
SmallVector<SlotIndex, 4> Indices;
- for (auto I = MRI.use_nodbg_begin(FromReg), E = MRI.use_nodbg_end();
- I != E;) {
- MachineOperand &O = *I++;
+ for (MachineOperand &O :
+ llvm::make_early_inc_range(MRI.use_nodbg_operands(FromReg))) {
MachineInstr *Where = O.getParent();
// Check that MI dominates the instruction in the normal way.
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeLiveIntervals.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeLiveIntervals.cpp
index 6bfed1a7195c1..9d83a75a82478 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeLiveIntervals.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeLiveIntervals.cpp
@@ -106,13 +106,12 @@ bool WebAssemblyOptimizeLiveIntervals::runOnMachineFunction(
// instructions to satisfy LiveIntervals' requirement that all uses be
// dominated by defs. Now that LiveIntervals has computed which of these
// defs are actually needed and which are dead, remove the dead ones.
- for (auto MII = MF.begin()->begin(), MIE = MF.begin()->end(); MII != MIE;) {
- MachineInstr *MI = &*MII++;
- if (MI->isImplicitDef() && MI->getOperand(0).isDead()) {
- LiveInterval &LI = LIS.getInterval(MI->getOperand(0).getReg());
- LIS.removeVRegDefAt(LI, LIS.getInstructionIndex(*MI).getRegSlot());
- LIS.RemoveMachineInstrFromMaps(*MI);
- MI->eraseFromParent();
+ for (MachineInstr &MI : llvm::make_early_inc_range(MF.front())) {
+ if (MI.isImplicitDef() && MI.getOperand(0).isDead()) {
+ LiveInterval &LI = LIS.getInterval(MI.getOperand(0).getReg());
+ LIS.removeVRegDefAt(LI, LIS.getInstructionIndex(MI).getRegSlot());
+ LIS.RemoveMachineInstrFromMaps(MI);
+ MI.eraseFromParent();
}
}
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyPrepareForLiveIntervals.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyPrepareForLiveIntervals.cpp
index ed5f7ccc854f2..8b8593ddcbdd4 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyPrepareForLiveIntervals.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyPrepareForLiveIntervals.cpp
@@ -112,8 +112,7 @@ bool WebAssemblyPrepareForLiveIntervals::runOnMachineFunction(
// Move ARGUMENT_* instructions to the top of the entry block, so that their
// liveness reflects the fact that these really are live-in values.
- for (auto MII = Entry.begin(), MIE = Entry.end(); MII != MIE;) {
- MachineInstr &MI = *MII++;
+ for (MachineInstr &MI : llvm::make_early_inc_range(Entry)) {
if (WebAssembly::isArgument(MI.getOpcode())) {
MI.removeFromParent();
Entry.insert(Entry.begin(), &MI);
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyReplacePhysRegs.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyReplacePhysRegs.cpp
index 9f5d6b2a9a47b..dc854ba573c36 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyReplacePhysRegs.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyReplacePhysRegs.cpp
@@ -85,8 +85,8 @@ bool WebAssemblyReplacePhysRegs::runOnMachineFunction(MachineFunction &MF) {
// Replace explicit uses of the physical register with a virtual register.
const TargetRegisterClass *RC = TRI.getMinimalPhysRegClass(PReg);
unsigned VReg = WebAssembly::NoRegister;
- for (auto I = MRI.reg_begin(PReg), E = MRI.reg_end(); I != E;) {
- MachineOperand &MO = *I++;
+ for (MachineOperand &MO :
+ llvm::make_early_inc_range(MRI.reg_operands(PReg))) {
if (!MO.isImplicit()) {
if (VReg == WebAssembly::NoRegister) {
VReg = MRI.createVirtualRegister(RC);
diff --git a/llvm/lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp b/llvm/lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp
index b6a37f08d7e9b..4a3206f370955 100644
--- a/llvm/lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp
+++ b/llvm/lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp
@@ -542,9 +542,8 @@ void X86AvoidSFBPass::findPotentiallylBlockedCopies(MachineFunction &MF) {
int DefVR = MI.getOperand(0).getReg();
if (!MRI->hasOneNonDBGUse(DefVR))
continue;
- for (auto UI = MRI->use_nodbg_begin(DefVR), UE = MRI->use_nodbg_end();
- UI != UE;) {
- MachineOperand &StoreMO = *UI++;
+ for (MachineOperand &StoreMO :
+ llvm::make_early_inc_range(MRI->use_nodbg_operands(DefVR))) {
MachineInstr &StoreMI = *StoreMO.getParent();
// Skip cases where the memcpy may overlap.
if (StoreMI.getParent() == MI.getParent() &&
diff --git a/llvm/lib/Target/X86/X86OptimizeLEAs.cpp b/llvm/lib/Target/X86/X86OptimizeLEAs.cpp
index ab4d2bd057727..659fb632cc50d 100644
--- a/llvm/lib/Target/X86/X86OptimizeLEAs.cpp
+++ b/llvm/lib/Target/X86/X86OptimizeLEAs.cpp
@@ -503,9 +503,7 @@ bool X86OptimizeLEAPass::removeRedundantAddrCalc(MemOpMap &LEAs) {
MachineBasicBlock *MBB = (*LEAs.begin()->second.begin())->getParent();
// Process all instructions in basic block.
- for (auto I = MBB->begin(), E = MBB->end(); I != E;) {
- MachineInstr &MI = *I++;
-
+ for (MachineInstr &MI : llvm::make_early_inc_range(*MBB)) {
// Instruction must be load or store.
if (!MI.mayLoadOrStore())
continue;
diff --git a/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp b/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
index fcaf7c86128a2..83a4a025f518c 100644
--- a/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
+++ b/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
@@ -850,11 +850,9 @@ getRegClassForUnfoldedLoad(MachineFunction &MF, const X86InstrInfo &TII,
void X86SpeculativeLoadHardeningPass::unfoldCallAndJumpLoads(
MachineFunction &MF) {
for (MachineBasicBlock &MBB : MF)
- for (auto MII = MBB.instr_begin(), MIE = MBB.instr_end(); MII != MIE;) {
- // Grab a reference and increment the iterator so we can remove this
- // instruction if needed without disturbing the iteration.
- MachineInstr &MI = *MII++;
-
+ // We use make_early_inc_range here so we can remove instructions if needed
+ // without disturbing the iteration.
+ for (MachineInstr &MI : llvm::make_early_inc_range(MBB.instrs())) {
// Must either be a call or a branch.
if (!MI.isCall() && !MI.isBranch())
continue;
diff --git a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
index e0d7dea1e2236..433a852572438 100644
--- a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
@@ -1609,13 +1609,11 @@ bool HWAddressSanitizer::sanitizeFunction(
// dynamic allocas.
if (EntryIRB.GetInsertBlock() != &F.getEntryBlock()) {
InsertPt = &*F.getEntryBlock().begin();
- for (auto II = EntryIRB.GetInsertBlock()->begin(),
- IE = EntryIRB.GetInsertBlock()->end();
- II != IE;) {
- Instruction *I = &*II++;
- if (auto *AI = dyn_cast<AllocaInst>(I))
+ for (Instruction &I :
+ llvm::make_early_inc_range(*EntryIRB.GetInsertBlock())) {
+ if (auto *AI = dyn_cast<AllocaInst>(&I))
if (isa<ConstantInt>(AI->getArraySize()))
- I->moveBefore(InsertPt);
+ I.moveBefore(InsertPt);
}
}
More information about the llvm-commits
mailing list