[clang] [libc] [llvm] [DebugInfo][RemoveDIs] Suppress getNextNonDebugInfoInstruction (PR #144383)
Jeremy Morse via llvm-commits
llvm-commits at lists.llvm.org
Mon Jun 16 09:15:39 PDT 2025
https://github.com/jmorse created https://github.com/llvm/llvm-project/pull/144383
There are no longer debug-info instructions, thus we don't need this skipping. Horray!
CC @WenleiHe , `getNextNonDebugInfoInstruction` has a "skip pseudo-probes" argument that appears to be unused throughout all of upstream LLVM. We'd really like to get rid of everything to do with debug-intrinsics to reduce maintenance burden everywhere -- please let us know if this is going to interfere with anything you've got doing on downstream.
>From 63654fb7cf702a1d0c5fb5f8d526e926ec5a3470 Mon Sep 17 00:00:00 2001
From: Jeremy Morse <jeremy.morse at sony.com>
Date: Fri, 13 Jun 2025 15:39:53 +0100
Subject: [PATCH] [DebugInfo][RemoveDIs] Suppress
getNextNonDebugInfoInstruction
There are no longer debug-info instructions, thust we don't need this
skipping.
---
clang/lib/CodeGen/CGBlocks.cpp | 4 ++--
libc/docs/configure.rst | 2 +-
llvm/include/llvm/IR/Instruction.h | 11 -----------
.../Transforms/Utils/LockstepReverseIterator.h | 2 +-
llvm/lib/Analysis/MemoryDependenceAnalysis.cpp | 2 +-
llvm/lib/CodeGen/CodeGenPrepare.cpp | 2 +-
llvm/lib/IR/Instruction.cpp | 11 -----------
llvm/lib/IR/Verifier.cpp | 2 +-
.../AArch64/AArch64TargetTransformInfo.cpp | 4 ++--
llvm/lib/Target/X86/X86WinEHState.cpp | 4 ++--
llvm/lib/Transforms/Coroutines/CoroSplit.cpp | 4 ++--
llvm/lib/Transforms/IPO/Attributor.cpp | 2 +-
.../Transforms/IPO/AttributorAttributes.cpp | 6 +++---
llvm/lib/Transforms/IPO/GlobalOpt.cpp | 4 ++--
llvm/lib/Transforms/IPO/IROutliner.cpp | 6 +++---
llvm/lib/Transforms/IPO/OpenMPOpt.cpp | 2 +-
.../InstCombine/InstCombineCalls.cpp | 12 ++++++------
.../InstCombine/InstructionCombining.cpp | 2 +-
.../Instrumentation/AddressSanitizer.cpp | 4 ++--
.../Instrumentation/HWAddressSanitizer.cpp | 4 ++--
.../Instrumentation/ValueProfilePlugins.inc | 4 ++--
.../Transforms/Scalar/LoopStrengthReduce.cpp | 2 +-
llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp | 4 ++--
llvm/lib/Transforms/Utils/Local.cpp | 10 +++++-----
.../Utils/ScalarEvolutionExpander.cpp | 2 +-
.../lib/Transforms/Vectorize/SLPVectorizer.cpp | 8 ++++----
.../unittests/Frontend/OpenMPIRBuilderTest.cpp | 18 +++++++++---------
.../FuzzMutate/RandomIRBuilderTest.cpp | 2 +-
llvm/unittests/IR/InstructionsTest.cpp | 4 ++--
llvm/unittests/Transforms/Utils/LocalTest.cpp | 14 +++++++-------
30 files changed, 68 insertions(+), 90 deletions(-)
diff --git a/clang/lib/CodeGen/CGBlocks.cpp b/clang/lib/CodeGen/CGBlocks.cpp
index 729758ddce560..b5466bb6e346f 100644
--- a/clang/lib/CodeGen/CGBlocks.cpp
+++ b/clang/lib/CodeGen/CGBlocks.cpp
@@ -1532,8 +1532,8 @@ llvm::Function *CodeGenFunction::GenerateBlockFunction(
llvm::BasicBlock *resume = Builder.GetInsertBlock();
// Go back to the entry.
- if (entry_ptr->getNextNonDebugInstruction())
- entry_ptr = entry_ptr->getNextNonDebugInstruction()->getIterator();
+ if (entry_ptr->getNextNode())
+ entry_ptr = entry_ptr->getNextNode()->getIterator();
else
entry_ptr = entry->end();
Builder.SetInsertPoint(entry, entry_ptr);
diff --git a/libc/docs/configure.rst b/libc/docs/configure.rst
index 8d53390ae19bf..109412225634f 100644
--- a/libc/docs/configure.rst
+++ b/libc/docs/configure.rst
@@ -29,7 +29,7 @@ to learn about the defaults for your platform and target.
- ``LIBC_CONF_ENABLE_STRONG_STACK_PROTECTOR``: Enable -fstack-protector-strong to defend against stack smashing attack.
- ``LIBC_CONF_KEEP_FRAME_POINTER``: Keep frame pointer in functions for better debugging experience.
* **"errno" options**
- - ``LIBC_CONF_ERRNO_MODE``: The implementation used for errno, acceptable values are LIBC_ERRNO_MODE_DEFAULT, LIBC_ERRNO_MODE_UNDEFINED, LIBC_ERRNO_MODE_THREAD_LOCAL, LIBC_ERRNO_MODE_SHARED, LIBC_ERRNO_MODE_EXTERNAL, and LIBC_ERRNO_MODE_SYSTEM.
+ - ``LIBC_CONF_ERRNO_MODE``: The implementation used for errno, acceptable values are LIBC_ERRNO_MODE_DEFAULT, LIBC_ERRNO_MODE_UNDEFINED, LIBC_ERRNO_MODE_THREAD_LOCAL, LIBC_ERRNO_MODE_SHARED, LIBC_ERRNO_MODE_EXTERNAL, LIBC_ERRNO_MODE_SYSTEM, and LIBC_ERRNO_MODE_SYSTEM_INLINE.
* **"general" options**
- ``LIBC_ADD_NULL_CHECKS``: Add nullptr checks in the library's implementations to some functions for which passing nullptr is undefined behavior.
* **"math" options**
diff --git a/llvm/include/llvm/IR/Instruction.h b/llvm/include/llvm/IR/Instruction.h
index 10fc9c1298607..9904e7bc4b299 100644
--- a/llvm/include/llvm/IR/Instruction.h
+++ b/llvm/include/llvm/IR/Instruction.h
@@ -898,17 +898,6 @@ class Instruction : public User,
/// Return true if the instruction is a DbgInfoIntrinsic or PseudoProbeInst.
LLVM_ABI bool isDebugOrPseudoInst() const LLVM_READONLY;
- /// Return a pointer to the next non-debug instruction in the same basic
- /// block as 'this', or nullptr if no such instruction exists. Skip any pseudo
- /// operations if \c SkipPseudoOp is true.
- LLVM_ABI const Instruction *
- getNextNonDebugInstruction(bool SkipPseudoOp = false) const;
- Instruction *getNextNonDebugInstruction(bool SkipPseudoOp = false) {
- return const_cast<Instruction *>(
- static_cast<const Instruction *>(this)->getNextNonDebugInstruction(
- SkipPseudoOp));
- }
-
/// Return a pointer to the previous non-debug instruction in the same basic
/// block as 'this', or nullptr if no such instruction exists. Skip any pseudo
/// operations if \c SkipPseudoOp is true.
diff --git a/llvm/include/llvm/Transforms/Utils/LockstepReverseIterator.h b/llvm/include/llvm/Transforms/Utils/LockstepReverseIterator.h
index 1b6309c7fb1a4..cd525a9710103 100644
--- a/llvm/include/llvm/Transforms/Utils/LockstepReverseIterator.h
+++ b/llvm/include/llvm/Transforms/Utils/LockstepReverseIterator.h
@@ -133,7 +133,7 @@ class LockstepReverseIterator
return *this;
SmallVector<Instruction *, 4> NewInsts;
for (Instruction *Inst : Insts) {
- Instruction *Next = Inst->getNextNonDebugInstruction();
+ Instruction *Next = Inst->getNextNode();
// Already at end of block.
if (!Next) {
Fail = true;
diff --git a/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp b/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
index f062189bac6a0..3709081e257be 100644
--- a/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
+++ b/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
@@ -356,7 +356,7 @@ static bool canSkipClobberingStore(const StoreInst *SI,
if (BatchAA.alias(MemoryLocation::get(LI), MemLoc) != AliasResult::MustAlias)
return false;
unsigned NumVisitedInsts = 0;
- for (const Instruction *I = LI; I != SI; I = I->getNextNonDebugInstruction())
+ for (const Instruction *I = LI; I != SI; I = I->getNextNode())
if (++NumVisitedInsts > ScanLimit ||
isModSet(BatchAA.getModRefInfo(I, MemLoc)))
return false;
diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp
index 3792b456c836e..af6306c126136 100644
--- a/llvm/lib/CodeGen/CodeGenPrepare.cpp
+++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp
@@ -7327,7 +7327,7 @@ bool CodeGenPrepare::optimizeLoadExt(LoadInst *Load) {
!TLI->isLoadExtLegal(ISD::ZEXTLOAD, LoadResultVT, TruncVT))
return false;
- IRBuilder<> Builder(Load->getNextNonDebugInstruction());
+ IRBuilder<> Builder(Load->getNextNode());
auto *NewAnd = cast<Instruction>(
Builder.CreateAnd(Load, ConstantInt::get(Ctx, DemandBits)));
// Mark this instruction as "inserted by CGP", so that other
diff --git a/llvm/lib/IR/Instruction.cpp b/llvm/lib/IR/Instruction.cpp
index 1b60caab6c11a..8c71d077ed67b 100644
--- a/llvm/lib/IR/Instruction.cpp
+++ b/llvm/lib/IR/Instruction.cpp
@@ -1235,14 +1235,6 @@ bool Instruction::isDebugOrPseudoInst() const {
return isa<DbgInfoIntrinsic>(this) || isa<PseudoProbeInst>(this);
}
-const Instruction *
-Instruction::getNextNonDebugInstruction(bool SkipPseudoOp) const {
- for (const Instruction *I = getNextNode(); I; I = I->getNextNode())
- if (!isa<DbgInfoIntrinsic>(I) && !(SkipPseudoOp && isa<PseudoProbeInst>(I)))
- return I;
- return nullptr;
-}
-
const Instruction *
Instruction::getPrevNonDebugInstruction(bool SkipPseudoOp) const {
for (const Instruction *I = getPrevNode(); I; I = I->getPrevNode())
@@ -1252,9 +1244,6 @@ Instruction::getPrevNonDebugInstruction(bool SkipPseudoOp) const {
}
const DebugLoc &Instruction::getStableDebugLoc() const {
- if (isa<DbgInfoIntrinsic>(this))
- if (const Instruction *Next = getNextNonDebugInstruction())
- return Next->getDebugLoc();
return getDebugLoc();
}
diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp
index 1f1041b259736..a3a3e3f4e9551 100644
--- a/llvm/lib/IR/Verifier.cpp
+++ b/llvm/lib/IR/Verifier.cpp
@@ -6455,7 +6455,7 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
Check(!Call.paramHasAttr(3, Attribute::InReg),
"VGPR arguments must not have the `inreg` attribute", &Call);
- auto *Next = Call.getNextNonDebugInstruction();
+ auto *Next = Call.getNextNode();
bool IsAMDUnreachable = Next && isa<IntrinsicInst>(Next) &&
cast<IntrinsicInst>(Next)->getIntrinsicID() ==
Intrinsic::amdgcn_unreachable;
diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
index 0232ac421aeda..396c8a725b5bd 100644
--- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
@@ -2674,14 +2674,14 @@ static std::optional<Instruction *> instCombineSVEInsr(InstCombiner &IC,
static std::optional<Instruction *> instCombineDMB(InstCombiner &IC,
IntrinsicInst &II) {
// If this barrier is post-dominated by identical one we can remove it
- auto *NI = II.getNextNonDebugInstruction();
+ auto *NI = II.getNextNode();
unsigned LookaheadThreshold = DMBLookaheadThreshold;
auto CanSkipOver = [](Instruction *I) {
return !I->mayReadOrWriteMemory() && !I->mayHaveSideEffects();
};
while (LookaheadThreshold-- && CanSkipOver(NI)) {
auto *NIBB = NI->getParent();
- NI = NI->getNextNonDebugInstruction();
+ NI = NI->getNextNode();
if (!NI) {
if (auto *SuccBB = NIBB->getUniqueSuccessor())
NI = &*SuccBB->getFirstNonPHIOrDbgOrLifetime();
diff --git a/llvm/lib/Target/X86/X86WinEHState.cpp b/llvm/lib/Target/X86/X86WinEHState.cpp
index 27111fce45662..a650f6f069e5f 100644
--- a/llvm/lib/Target/X86/X86WinEHState.cpp
+++ b/llvm/lib/Target/X86/X86WinEHState.cpp
@@ -811,7 +811,7 @@ void WinEHStatePass::updateEspForInAllocas(Function &F) {
if (auto *Alloca = dyn_cast<AllocaInst>(&I)) {
if (Alloca->isStaticAlloca())
continue;
- IRBuilder<> Builder(Alloca->getNextNonDebugInstruction());
+ IRBuilder<> Builder(Alloca->getNextNode());
// SavedESP = llvm.stacksave()
Value *SP = Builder.CreateStackSave();
Builder.CreateStore(SP, Builder.CreateStructGEP(RegNodeTy, RegNode, 0));
@@ -820,7 +820,7 @@ void WinEHStatePass::updateEspForInAllocas(Function &F) {
if (auto *II = dyn_cast<IntrinsicInst>(&I)) {
if (II->getIntrinsicID() != Intrinsic::stackrestore)
continue;
- IRBuilder<> Builder(II->getNextNonDebugInstruction());
+ IRBuilder<> Builder(II->getNextNode());
// SavedESP = llvm.stacksave()
Value *SP = Builder.CreateStackSave();
Builder.CreateStore(SP, Builder.CreateStructGEP(RegNodeTy, RegNode, 0));
diff --git a/llvm/lib/Transforms/Coroutines/CoroSplit.cpp b/llvm/lib/Transforms/Coroutines/CoroSplit.cpp
index 8813f91e9060c..1dc49e221b6ff 100644
--- a/llvm/lib/Transforms/Coroutines/CoroSplit.cpp
+++ b/llvm/lib/Transforms/Coroutines/CoroSplit.cpp
@@ -807,7 +807,7 @@ static void updateScopeLine(Instruction *ActiveSuspend,
return;
// No subsequent instruction -> fallback to the location of ActiveSuspend.
- if (!ActiveSuspend->getNextNonDebugInstruction()) {
+ if (!ActiveSuspend->getNextNode()) {
if (auto DL = ActiveSuspend->getDebugLoc())
if (SPToUpdate.getFile() == DL->getFile())
SPToUpdate.setScopeLine(DL->getLine());
@@ -815,7 +815,7 @@ static void updateScopeLine(Instruction *ActiveSuspend,
}
BasicBlock::iterator Successor =
- ActiveSuspend->getNextNonDebugInstruction()->getIterator();
+ ActiveSuspend->getNextNode()->getIterator();
// Corosplit splits the BB around ActiveSuspend, so the meaningful
// instructions are not in the same BB.
if (auto *Branch = dyn_cast_or_null<BranchInst>(Successor);
diff --git a/llvm/lib/Transforms/IPO/Attributor.cpp b/llvm/lib/Transforms/IPO/Attributor.cpp
index 050eed376ed3f..df8c7b680cd5f 100644
--- a/llvm/lib/Transforms/IPO/Attributor.cpp
+++ b/llvm/lib/Transforms/IPO/Attributor.cpp
@@ -795,7 +795,7 @@ isPotentiallyReachable(Attributor &A, const Instruction &FromI,
if (isa<InvokeInst>(CB))
return false;
- Instruction *Inst = CB->getNextNonDebugInstruction();
+ Instruction *Inst = CB->getNextNode();
Worklist.push_back(Inst);
return true;
};
diff --git a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
index 3799a696f67af..4f5ba74e53f4b 100644
--- a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
+++ b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
@@ -1774,7 +1774,7 @@ ChangeStatus AAPointerInfoFloating::updateImpl(Attributor &A) {
do {
if (FromI->mayWriteToMemory() && !IsAssumption(*FromI))
return true;
- FromI = FromI->getNextNonDebugInstruction();
+ FromI = FromI->getNextNode();
} while (FromI && FromI != ToI);
return false;
};
@@ -1785,7 +1785,7 @@ ChangeStatus AAPointerInfoFloating::updateImpl(Attributor &A) {
return false;
BasicBlock *IntrBB = IntrI.getParent();
if (IntrI.getParent() == BB) {
- if (IsImpactedInRange(LoadI->getNextNonDebugInstruction(), &IntrI))
+ if (IsImpactedInRange(LoadI->getNextNode(), &IntrI))
return false;
} else {
auto PredIt = pred_begin(IntrBB);
@@ -1802,7 +1802,7 @@ ChangeStatus AAPointerInfoFloating::updateImpl(Attributor &A) {
continue;
return false;
}
- if (IsImpactedInRange(LoadI->getNextNonDebugInstruction(),
+ if (IsImpactedInRange(LoadI->getNextNode(),
BB->getTerminator()))
return false;
if (IsImpactedInRange(&IntrBB->front(), &IntrI))
diff --git a/llvm/lib/Transforms/IPO/GlobalOpt.cpp b/llvm/lib/Transforms/IPO/GlobalOpt.cpp
index 7db0586386506..f830e53cfc532 100644
--- a/llvm/lib/Transforms/IPO/GlobalOpt.cpp
+++ b/llvm/lib/Transforms/IPO/GlobalOpt.cpp
@@ -1880,7 +1880,7 @@ static void RemovePreallocated(Function *F) {
Builder.SetInsertPoint(PreallocatedSetup);
auto *StackSave = Builder.CreateStackSave();
- Builder.SetInsertPoint(NewCB->getNextNonDebugInstruction());
+ Builder.SetInsertPoint(NewCB->getNextNode());
Builder.CreateStackRestore(StackSave);
// Replace @llvm.call.preallocated.arg() with alloca.
@@ -1904,7 +1904,7 @@ static void RemovePreallocated(Function *F) {
auto AddressSpace = UseCall->getType()->getPointerAddressSpace();
auto *ArgType =
UseCall->getFnAttr(Attribute::Preallocated).getValueAsType();
- auto *InsertBefore = PreallocatedSetup->getNextNonDebugInstruction();
+ auto *InsertBefore = PreallocatedSetup->getNextNode();
Builder.SetInsertPoint(InsertBefore);
auto *Alloca =
Builder.CreateAlloca(ArgType, AddressSpace, nullptr, "paarg");
diff --git a/llvm/lib/Transforms/IPO/IROutliner.cpp b/llvm/lib/Transforms/IPO/IROutliner.cpp
index cb18b55ae2183..bdb16ae812cf3 100644
--- a/llvm/lib/Transforms/IPO/IROutliner.cpp
+++ b/llvm/lib/Transforms/IPO/IROutliner.cpp
@@ -268,7 +268,7 @@ void OutlinableRegion::splitCandidate() {
// instruction. If they do not match, there could be problems in rewriting
// the program after outlining, so we ignore it.
if (!BackInst->isTerminator() &&
- EndInst != BackInst->getNextNonDebugInstruction())
+ EndInst != BackInst->getNextNode())
return;
Instruction *StartInst = (*Candidate->begin()).Inst;
@@ -2340,7 +2340,7 @@ static bool nextIRInstructionDataMatchesNextInst(IRInstructionData &ID) {
Instruction *NextIDLInst = NextIDIt->Inst;
Instruction *NextModuleInst = nullptr;
if (!ID.Inst->isTerminator())
- NextModuleInst = ID.Inst->getNextNonDebugInstruction();
+ NextModuleInst = ID.Inst->getNextNode();
else if (NextIDLInst != nullptr)
NextModuleInst =
&*NextIDIt->Inst->getParent()->instructionsWithoutDebug().begin();
@@ -2367,7 +2367,7 @@ bool IROutliner::isCompatibleWithAlreadyOutlinedCode(
// if it does not, we fix it in the InstructionDataList.
if (!Region.Candidate->backInstruction()->isTerminator()) {
Instruction *NewEndInst =
- Region.Candidate->backInstruction()->getNextNonDebugInstruction();
+ Region.Candidate->backInstruction()->getNextNode();
assert(NewEndInst && "Next instruction is a nullptr?");
if (Region.Candidate->end()->Inst != NewEndInst) {
IRInstructionDataList *IDL = Region.Candidate->front()->IDL;
diff --git a/llvm/lib/Transforms/IPO/OpenMPOpt.cpp b/llvm/lib/Transforms/IPO/OpenMPOpt.cpp
index dd7ae7e66e350..5de2285c2d2e3 100644
--- a/llvm/lib/Transforms/IPO/OpenMPOpt.cpp
+++ b/llvm/lib/Transforms/IPO/OpenMPOpt.cpp
@@ -2856,7 +2856,7 @@ struct AAExecutionDomainFunction : public AAExecutionDomain {
if (!It->getSecond().IsReachingAlignedBarrierOnly)
ForwardIsOk = false;
break;
- } while ((CurI = CurI->getNextNonDebugInstruction()));
+ } while ((CurI = CurI->getNextNode()));
if (!CurI && !BEDMap.lookup(I.getParent()).IsReachingAlignedBarrierOnly)
ForwardIsOk = false;
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
index c169ab25b2106..1886ee781508e 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -3243,7 +3243,7 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
// Remove an assume if it is followed by an identical assume.
// TODO: Do we need this? Unless there are conflicting assumptions, the
// computeKnownBits(IIOperand) below here eliminates redundant assumes.
- Instruction *Next = II->getNextNonDebugInstruction();
+ Instruction *Next = II->getNextNode();
if (match(Next, m_Intrinsic<Intrinsic::assume>(m_Specific(IIOperand))))
return RemoveConditionFromAssume(Next);
@@ -3416,12 +3416,12 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
// Is this guard followed by another guard? We scan forward over a small
// fixed window of instructions to handle common cases with conditions
// computed between guards.
- Instruction *NextInst = II->getNextNonDebugInstruction();
+ Instruction *NextInst = II->getNextNode();
for (unsigned i = 0; i < GuardWideningWindow; i++) {
// Note: Using context-free form to avoid compile time blow up
if (!isSafeToSpeculativelyExecute(NextInst))
break;
- NextInst = NextInst->getNextNonDebugInstruction();
+ NextInst = NextInst->getNextNode();
}
Value *NextCond = nullptr;
if (match(NextInst,
@@ -3431,10 +3431,10 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
// Remove a guard that it is immediately preceded by an identical guard.
// Otherwise canonicalize guard(a); guard(b) -> guard(a & b).
if (CurrCond != NextCond) {
- Instruction *MoveI = II->getNextNonDebugInstruction();
+ Instruction *MoveI = II->getNextNode();
while (MoveI != NextInst) {
auto *Temp = MoveI;
- MoveI = MoveI->getNextNonDebugInstruction();
+ MoveI = MoveI->getNextNode();
Temp->moveBefore(II->getIterator());
}
replaceOperand(*II, 0, Builder.CreateAnd(CurrCond, NextCond));
@@ -3871,7 +3871,7 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
// Fence instruction simplification
Instruction *InstCombinerImpl::visitFenceInst(FenceInst &FI) {
- auto *NFI = dyn_cast<FenceInst>(FI.getNextNonDebugInstruction());
+ auto *NFI = dyn_cast<FenceInst>(FI.getNextNode());
// This check is solely here to handle arbitrary target-dependent syncscopes.
// TODO: Can remove if does not matter in practice.
if (NFI && FI.isIdenticalTo(NFI))
diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index e261807bbc035..9c9dddb73318f 100644
--- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -4760,7 +4760,7 @@ bool InstCombinerImpl::freezeOtherUses(FreezeInst &FI) {
// Don't move to the position of a debug intrinsic.
if (isa<DbgInfoIntrinsic>(MoveBefore))
- MoveBefore = MoveBefore->getNextNonDebugInstruction()->getIterator();
+ MoveBefore = MoveBefore->getNextNode()->getIterator();
// Re-point iterator to come after any debug-info records, if we're
// running in "RemoveDIs" mode
MoveBefore.setHeadBit(false);
diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index 840a5e3f31dfd..dfbe4f8172066 100644
--- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -3396,8 +3396,8 @@ void FunctionStackPoisoner::processDynamicAllocas() {
static void findStoresToUninstrumentedArgAllocas(
AddressSanitizer &ASan, Instruction &InsBefore,
SmallVectorImpl<Instruction *> &InitInsts) {
- Instruction *Start = InsBefore.getNextNonDebugInstruction();
- for (Instruction *It = Start; It; It = It->getNextNonDebugInstruction()) {
+ Instruction *Start = InsBefore.getNextNode();
+ for (Instruction *It = Start; It; It = It->getNextNode()) {
// Argument initialization looks like:
// 1) store <Argument>, <Alloca> OR
// 2) <CastArgument> = cast <Argument> to ...
diff --git a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
index 77db686f8229c..2c34bf2157cdd 100644
--- a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
@@ -1421,7 +1421,7 @@ void HWAddressSanitizer::emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord) {
bool HWAddressSanitizer::instrumentLandingPads(
SmallVectorImpl<Instruction *> &LandingPadVec) {
for (auto *LP : LandingPadVec) {
- IRBuilder<> IRB(LP->getNextNonDebugInstruction());
+ IRBuilder<> IRB(LP->getNextNode());
IRB.CreateCall(
HwasanHandleVfork,
{memtag::readRegister(
@@ -1446,7 +1446,7 @@ bool HWAddressSanitizer::instrumentStack(memtag::StackInfo &SInfo,
auto N = I++;
auto *AI = KV.first;
memtag::AllocaInfo &Info = KV.second;
- IRBuilder<> IRB(AI->getNextNonDebugInstruction());
+ IRBuilder<> IRB(AI->getNextNode());
// Replace uses of the alloca with tagged address.
Value *Tag = getAllocaTag(IRB, StackTag, N);
diff --git a/llvm/lib/Transforms/Instrumentation/ValueProfilePlugins.inc b/llvm/lib/Transforms/Instrumentation/ValueProfilePlugins.inc
index b47ef8523ea11..a3d4e5367b9ab 100644
--- a/llvm/lib/Transforms/Instrumentation/ValueProfilePlugins.inc
+++ b/llvm/lib/Transforms/Instrumentation/ValueProfilePlugins.inc
@@ -102,12 +102,12 @@ public:
void run(std::vector<CandidateInfo> &Candidates) {
std::vector<Instruction *> Result = findVTableAddrs(F);
for (Instruction *I : Result) {
- Instruction *InsertPt = I->getNextNonDebugInstruction();
+ Instruction *InsertPt = I->getNextNode();
// When finding an insertion point, keep PHI and EH pad instructions
// before vp intrinsics. This is similar to
// `BasicBlock::getFirstInsertionPt`.
while (InsertPt && (dyn_cast<PHINode>(InsertPt) || InsertPt->isEHPad()))
- InsertPt = InsertPt->getNextNonDebugInstruction();
+ InsertPt = InsertPt->getNextNode();
// Skip instrumentating the value if InsertPt is the last instruction.
// FIXME: Set InsertPt to the end of basic block to instrument the value
// if InsertPt is the last instruction.
diff --git a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
index 242e571c072af..163b81fc23083 100644
--- a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
@@ -2685,7 +2685,7 @@ LSRInstance::OptimizeLoopTermCond() {
// It's possible for the setcc instruction to be anywhere in the loop, and
// possible for it to have multiple users. If it is not immediately before
// the exiting block branch, move it.
- if (Cond->getNextNonDebugInstruction() != TermBr) {
+ if (Cond->getNextNode() != TermBr) {
if (Cond->hasOneUse()) {
Cond->moveBefore(TermBr->getIterator());
} else {
diff --git a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
index 1c4ec6aa08b43..80f0549d207ba 100644
--- a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
+++ b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
@@ -727,7 +727,7 @@ bool MemCpyOptPass::processStoreOfLoad(StoreInst *SI, LoadInst *LI,
if (performStackMoveOptzn(LI, SI, DestAlloca, SrcAlloca,
DL.getTypeStoreSize(T), BAA)) {
// Avoid invalidating the iterator.
- BBI = SI->getNextNonDebugInstruction()->getIterator();
+ BBI = SI->getNextNode()->getIterator();
eraseInstruction(SI);
eraseInstruction(LI);
++NumMemCpyInstr;
@@ -1843,7 +1843,7 @@ bool MemCpyOptPass::processMemCpy(MemCpyInst *M, BasicBlock::iterator &BBI) {
if (performStackMoveOptzn(M, M, DestAlloca, SrcAlloca,
TypeSize::getFixed(Len->getZExtValue()), BAA)) {
// Avoid invalidating the iterator.
- BBI = M->getNextNonDebugInstruction()->getIterator();
+ BBI = M->getNextNode()->getIterator();
eraseInstruction(M);
++NumMemCpyInstr;
return true;
diff --git a/llvm/lib/Transforms/Utils/Local.cpp b/llvm/lib/Transforms/Utils/Local.cpp
index a3252a69874d3..11318059d7176 100644
--- a/llvm/lib/Transforms/Utils/Local.cpp
+++ b/llvm/lib/Transforms/Utils/Local.cpp
@@ -2661,12 +2661,12 @@ static bool rewriteDebugUsers(
SmallPtrSet<DbgVariableIntrinsic *, 1> UndefOrSalvage;
SmallPtrSet<DbgVariableRecord *, 1> UndefOrSalvageDVR;
if (isa<Instruction>(&To)) {
- bool DomPointAfterFrom = From.getNextNonDebugInstruction() == &DomPoint;
+ bool DomPointAfterFrom = From.getNextNode() == &DomPoint;
for (auto *DII : Users) {
// It's common to see a debug user between From and DomPoint. Move it
// after DomPoint to preserve the variable update without any reordering.
- if (DomPointAfterFrom && DII->getNextNonDebugInstruction() == &DomPoint) {
+ if (DomPointAfterFrom && DII->getNextNode() == &DomPoint) {
LLVM_DEBUG(dbgs() << "MOVE: " << *DII << '\n');
DII->moveAfter(&DomPoint);
Changed = true;
@@ -2684,7 +2684,7 @@ static bool rewriteDebugUsers(
Instruction *NextNonDebug = MarkedInstr;
// The next instruction might still be a dbg.declare, skip over it.
if (isa<DbgVariableIntrinsic>(NextNonDebug))
- NextNonDebug = NextNonDebug->getNextNonDebugInstruction();
+ NextNonDebug = NextNonDebug->getNextNode();
if (DomPointAfterFrom && NextNonDebug == &DomPoint) {
LLVM_DEBUG(dbgs() << "MOVE: " << *DVR << '\n');
@@ -3076,9 +3076,9 @@ static bool markAliveBlocks(Function &F,
// If we found a call to a no-return function, insert an unreachable
// instruction after it. Make sure there isn't *already* one there
// though.
- if (!isa<UnreachableInst>(CI->getNextNonDebugInstruction())) {
+ if (!isa<UnreachableInst>(CI->getNextNode())) {
// Don't insert a call to llvm.trap right before the unreachable.
- changeToUnreachable(CI->getNextNonDebugInstruction(), false, DTU);
+ changeToUnreachable(CI->getNextNode(), false, DTU);
Changed = true;
}
break;
diff --git a/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp b/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp
index 70afd4133df7c..0052a73acf1b6 100644
--- a/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp
+++ b/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp
@@ -1693,7 +1693,7 @@ void SCEVExpander::replaceCongruentIVInc(
if (PHINode *PN = dyn_cast<PHINode>(OrigInc))
IP = PN->getParent()->getFirstInsertionPt();
else
- IP = OrigInc->getNextNonDebugInstruction()->getIterator();
+ IP = OrigInc->getNextNode()->getIterator();
IRBuilder<> Builder(IP->getParent(), IP);
Builder.SetCurrentDebugLocation(IsomorphicInc->getDebugLoc());
diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index c3ca22dce0cc4..c68d9249b472a 100644
--- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -16164,7 +16164,7 @@ void BoUpSLP::setInsertPointAfterBundle(const TreeEntry *E) {
// debug location to Front.
Builder.SetInsertPoint(
LastInst->getParent(),
- LastInst->getNextNonDebugInstruction()->getIterator());
+ LastInst->getNextNode()->getIterator());
}
Builder.SetCurrentDebugLocation(Front->getDebugLoc());
}
@@ -18886,7 +18886,7 @@ Value *BoUpSLP::vectorizeTree(
Builder.SetInsertPoint(
IVec->getParent()->getFirstNonPHIOrDbgOrLifetime());
} else if (auto *IVec = dyn_cast<Instruction>(Vec)) {
- Builder.SetInsertPoint(IVec->getNextNonDebugInstruction());
+ Builder.SetInsertPoint(IVec->getNextNode());
}
Vec = Builder.CreateIntCast(
Vec,
@@ -19864,7 +19864,7 @@ void BoUpSLP::scheduleBlock(BlockScheduling *BS) {
Instruction *PickedInst = BundleMember->getInst();
if (!Scheduled.insert(PickedInst).second)
continue;
- if (PickedInst->getNextNonDebugInstruction() != LastScheduledInst)
+ if (PickedInst->getNextNode() != LastScheduledInst)
PickedInst->moveAfter(LastScheduledInst->getPrevNode());
LastScheduledInst = PickedInst;
}
@@ -19873,7 +19873,7 @@ void BoUpSLP::scheduleBlock(BlockScheduling *BS) {
} else {
auto *SD = cast<ScheduleData>(Picked);
Instruction *PickedInst = SD->getInst();
- if (PickedInst->getNextNonDebugInstruction() != LastScheduledInst)
+ if (PickedInst->getNextNode() != LastScheduledInst)
PickedInst->moveAfter(LastScheduledInst->getPrevNode());
LastScheduledInst = PickedInst;
}
diff --git a/llvm/unittests/Frontend/OpenMPIRBuilderTest.cpp b/llvm/unittests/Frontend/OpenMPIRBuilderTest.cpp
index be98be260c9dc..558af6900eac3 100644
--- a/llvm/unittests/Frontend/OpenMPIRBuilderTest.cpp
+++ b/llvm/unittests/Frontend/OpenMPIRBuilderTest.cpp
@@ -4644,7 +4644,7 @@ TEST_F(OpenMPIRBuilderTest, CreateTeamsWithThreadLimit) {
// Verifying that the next instruction to execute is kmpc_fork_teams
BranchInst *BrInst =
- dyn_cast<BranchInst>(PushNumTeamsCallInst->getNextNonDebugInstruction());
+ dyn_cast<BranchInst>(PushNumTeamsCallInst->getNextNode());
ASSERT_NE(BrInst, nullptr);
ASSERT_EQ(BrInst->getNumSuccessors(), 1U);
BasicBlock::iterator NextInstruction =
@@ -4701,7 +4701,7 @@ TEST_F(OpenMPIRBuilderTest, CreateTeamsWithNumTeamsUpper) {
// Verifying that the next instruction to execute is kmpc_fork_teams
BranchInst *BrInst =
- dyn_cast<BranchInst>(PushNumTeamsCallInst->getNextNonDebugInstruction());
+ dyn_cast<BranchInst>(PushNumTeamsCallInst->getNextNode());
ASSERT_NE(BrInst, nullptr);
ASSERT_EQ(BrInst->getNumSuccessors(), 1U);
BasicBlock::iterator NextInstruction =
@@ -4761,7 +4761,7 @@ TEST_F(OpenMPIRBuilderTest, CreateTeamsWithNumTeamsBoth) {
// Verifying that the next instruction to execute is kmpc_fork_teams
BranchInst *BrInst =
- dyn_cast<BranchInst>(PushNumTeamsCallInst->getNextNonDebugInstruction());
+ dyn_cast<BranchInst>(PushNumTeamsCallInst->getNextNode());
ASSERT_NE(BrInst, nullptr);
ASSERT_EQ(BrInst->getNumSuccessors(), 1U);
BasicBlock::iterator NextInstruction =
@@ -4827,7 +4827,7 @@ TEST_F(OpenMPIRBuilderTest, CreateTeamsWithNumTeamsAndThreadLimit) {
// Verifying that the next instruction to execute is kmpc_fork_teams
BranchInst *BrInst =
- dyn_cast<BranchInst>(PushNumTeamsCallInst->getNextNonDebugInstruction());
+ dyn_cast<BranchInst>(PushNumTeamsCallInst->getNextNode());
ASSERT_NE(BrInst, nullptr);
ASSERT_EQ(BrInst->getNumSuccessors(), 1U);
BasicBlock::iterator NextInstruction =
@@ -7338,8 +7338,8 @@ TEST_F(OpenMPIRBuilderTest, CreateTaskIfCondition) {
EXPECT_EQ(TaskBeginIfCall->getParent(),
IfConditionBranchInst->getSuccessor(1));
- EXPECT_EQ(TaskBeginIfCall->getNextNonDebugInstruction(), OulinedFnCall);
- EXPECT_EQ(OulinedFnCall->getNextNonDebugInstruction(), TaskCompleteCall);
+ EXPECT_EQ(TaskBeginIfCall->getNextNode(), OulinedFnCall);
+ EXPECT_EQ(OulinedFnCall->getNextNode(), TaskCompleteCall);
}
TEST_F(OpenMPIRBuilderTest, CreateTaskgroup) {
@@ -7423,12 +7423,12 @@ TEST_F(OpenMPIRBuilderTest, CreateTaskgroup) {
OMPRTL___kmpc_global_thread_num));
// Checking the general structure of the IR generated is same as expected.
- Instruction *GeneratedStoreInst = TaskgroupCall->getNextNonDebugInstruction();
+ Instruction *GeneratedStoreInst = TaskgroupCall->getNextNode();
EXPECT_EQ(GeneratedStoreInst, InternalStoreInst);
Instruction *GeneratedLoad32 =
- GeneratedStoreInst->getNextNonDebugInstruction();
+ GeneratedStoreInst->getNextNode();
EXPECT_EQ(GeneratedLoad32, InternalLoad32);
- Instruction *GeneratedLoad128 = GeneratedLoad32->getNextNonDebugInstruction();
+ Instruction *GeneratedLoad128 = GeneratedLoad32->getNextNode();
EXPECT_EQ(GeneratedLoad128, InternalLoad128);
// Checking the ordering because of the if statements and that
diff --git a/llvm/unittests/FuzzMutate/RandomIRBuilderTest.cpp b/llvm/unittests/FuzzMutate/RandomIRBuilderTest.cpp
index b7f5234ffcda1..9bef77433bad5 100644
--- a/llvm/unittests/FuzzMutate/RandomIRBuilderTest.cpp
+++ b/llvm/unittests/FuzzMutate/RandomIRBuilderTest.cpp
@@ -520,7 +520,7 @@ TEST(RandomIRBuilderTest, sinkToIntrinsic) {
ASSERT_TRUE(Modified);
Modified = false;
- I = I->getNextNonDebugInstruction();
+ I = I->getNextNode();
for (int i = 0; i < 20; i++) {
Value *OldOperand = I->getOperand(0);
Value *Src = F.getArg(5);
diff --git a/llvm/unittests/IR/InstructionsTest.cpp b/llvm/unittests/IR/InstructionsTest.cpp
index 126db4d4c1625..fd8838e06f34c 100644
--- a/llvm/unittests/IR/InstructionsTest.cpp
+++ b/llvm/unittests/IR/InstructionsTest.cpp
@@ -1482,11 +1482,11 @@ TEST(InstructionsTest, SkipDebug) {
// The first non-debug instruction is the terminator.
auto *Term = BB.getTerminator();
- EXPECT_EQ(Term, BB.begin()->getNextNonDebugInstruction());
+ EXPECT_EQ(Term, BB.begin()->getNextNode());
EXPECT_EQ(Term->getIterator(), skipDebugIntrinsics(BB.begin()));
// After the terminator, there are no non-debug instructions.
- EXPECT_EQ(nullptr, Term->getNextNonDebugInstruction());
+ EXPECT_EQ(nullptr, Term->getNextNode());
}
TEST(InstructionsTest, PhiMightNotBeFPMathOperator) {
diff --git a/llvm/unittests/Transforms/Utils/LocalTest.cpp b/llvm/unittests/Transforms/Utils/LocalTest.cpp
index 3c7a892c9d65a..b922216ef8893 100644
--- a/llvm/unittests/Transforms/Utils/LocalTest.cpp
+++ b/llvm/unittests/Transforms/Utils/LocalTest.cpp
@@ -823,13 +823,13 @@ TEST(Local, ReplaceAllDbgUsesWith) {
BasicBlock &BB = F.front();
Instruction &A = BB.front();
- Instruction &B = *A.getNextNonDebugInstruction();
- Instruction &C = *B.getNextNonDebugInstruction();
- Instruction &D = *C.getNextNonDebugInstruction();
- Instruction &E = *D.getNextNonDebugInstruction();
- Instruction &F_ = *E.getNextNonDebugInstruction();
- Instruction &Barrier = *F_.getNextNonDebugInstruction();
- Instruction &G = *Barrier.getNextNonDebugInstruction();
+ Instruction &B = *A.getNextNode();
+ Instruction &C = *B.getNextNode();
+ Instruction &D = *C.getNextNode();
+ Instruction &E = *D.getNextNode();
+ Instruction &F_ = *E.getNextNode();
+ Instruction &Barrier = *F_.getNextNode();
+ Instruction &G = *Barrier.getNextNode();
// Simulate i32 <-> i64* conversion. Expect no updates: the datalayout says
// pointers are 64 bits, so the conversion would be lossy.
More information about the llvm-commits
mailing list