[llvm] [llvm] Replace SmallSet with SmallPtrSet (NFC) (PR #154068)
Kazu Hirata via llvm-commits
llvm-commits at lists.llvm.org
Sun Aug 17 23:44:25 PDT 2025
https://github.com/kazutakahirata created https://github.com/llvm/llvm-project/pull/154068
This patch replaces SmallSet<T *, N> with SmallPtrSet<T *, N>. Note
that SmallSet.h "redirects" SmallSet to SmallPtrSet for pointer
element types:
template <typename PointeeType, unsigned N>
class SmallSet<PointeeType*, N> : public SmallPtrSet<PointeeType*, N> {};
We only have 140 instances that rely on this "redirection", with the
vast majority of them under llvm/. Since relying on the redirection
doesn't improve readability, this patch replaces SmallSet with
SmallPtrSet for pointer element types.
>From 9016c5421ebb5ecfbaa043629172cb0eacf2ac9c Mon Sep 17 00:00:00 2001
From: Kazu Hirata <kazu at google.com>
Date: Sun, 17 Aug 2025 23:06:54 -0700
Subject: [PATCH] [llvm] Replace SmallSet with SmallPtrSet (NFC)
This patch replaces SmallSet<T *, N> with SmallPtrSet<T *, N>. Note
that SmallSet.h "redirects" SmallSet to SmallPtrSet for pointer
element types:
template <typename PointeeType, unsigned N>
class SmallSet<PointeeType*, N> : public SmallPtrSet<PointeeType*, N> {};
We only have 140 instances that rely on this "redirection", with the
vast majority of them under llvm/. Since relying on the redirection
doesn't improve readability, this patch replaces SmallSet with
SmallPtrSet for pointer element types.
---
.../llvm/Analysis/GenericDomTreeUpdaterImpl.h | 2 +-
.../llvm/CodeGen/GlobalISel/LoadStoreOpt.h | 2 +-
llvm/include/llvm/CodeGen/MachinePipeliner.h | 2 +-
llvm/include/llvm/CodeGen/ScheduleDAG.h | 2 +-
llvm/lib/Analysis/CallPrinter.cpp | 4 +--
llvm/lib/Analysis/CaptureTracking.cpp | 2 +-
llvm/lib/Analysis/ScalarEvolution.cpp | 2 +-
llvm/lib/Analysis/ValueTracking.cpp | 6 ++--
llvm/lib/CodeGen/CodeGenPrepare.cpp | 15 ++++-----
llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp | 2 +-
llvm/lib/CodeGen/MachineCopyPropagation.cpp | 2 +-
llvm/lib/CodeGen/MachineDebugify.cpp | 2 +-
llvm/lib/CodeGen/MachinePipeliner.cpp | 6 ++--
llvm/lib/CodeGen/MacroFusion.cpp | 2 +-
.../SelectionDAG/SelectionDAGBuilder.cpp | 2 +-
llvm/lib/CodeGen/SwiftErrorValueTracking.cpp | 2 +-
.../Orc/Debugging/DebuggerSupportPlugin.cpp | 2 +-
llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp | 2 +-
llvm/lib/IR/AutoUpgrade.cpp | 2 +-
llvm/lib/IR/Verifier.cpp | 4 +--
llvm/lib/Target/AMDGPU/AMDGPUMemoryUtils.cpp | 2 +-
.../Target/AMDGPU/AMDGPUPerfHintAnalysis.cpp | 4 +--
.../Target/AMDGPU/AMDGPUSetWavePriority.cpp | 2 +-
llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp | 4 +--
llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp | 4 +--
llvm/lib/Target/ARM/ARMConstantIslandPass.cpp | 2 +-
.../ARM/MVETPAndVPTOptimisationsPass.cpp | 2 +-
.../Target/CSKY/CSKYConstantIslandPass.cpp | 2 +-
llvm/lib/Target/Hexagon/HexagonGenInsert.cpp | 2 +-
.../Hexagon/HexagonLoopIdiomRecognition.cpp | 2 +-
llvm/lib/Target/Hexagon/HexagonSubtarget.cpp | 10 +++---
llvm/lib/Target/Hexagon/HexagonSubtarget.h | 3 +-
.../Target/Mips/MipsConstantIslandPass.cpp | 2 +-
llvm/lib/Target/PowerPC/PPCCTRLoopsVerify.cpp | 2 +-
llvm/lib/Target/PowerPC/PPCISelLowering.cpp | 4 +--
.../Target/PowerPC/PPCLoopInstrFormPrep.cpp | 17 +++++-----
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 2 +-
.../X86LoadValueInjectionLoadHardening.cpp | 2 +-
llvm/lib/Target/X86/X86PreTileConfig.cpp | 2 +-
llvm/lib/Transforms/IPO/FunctionAttrs.cpp | 32 +++++++++----------
.../Transforms/Scalar/DFAJumpThreading.cpp | 12 +++----
llvm/lib/Transforms/Scalar/GVN.cpp | 4 +--
llvm/lib/Transforms/Scalar/GuardWidening.cpp | 4 +--
llvm/lib/Transforms/Scalar/IndVarSimplify.cpp | 2 +-
.../Scalar/LowerMatrixIntrinsics.cpp | 2 +-
.../lib/Transforms/Scalar/MemCpyOptimizer.cpp | 4 +--
llvm/lib/Transforms/Scalar/Reassociate.cpp | 2 +-
llvm/lib/Transforms/Scalar/StructurizeCFG.cpp | 8 ++---
.../Utils/CanonicalizeFreezeInLoops.cpp | 2 +-
.../lib/Transforms/Utils/ControlFlowUtils.cpp | 2 +-
llvm/lib/Transforms/Utils/Local.cpp | 6 ++--
.../Utils/PromoteMemoryToRegister.cpp | 22 ++++++-------
.../Utils/ScalarEvolutionExpander.cpp | 2 +-
.../Transforms/Vectorize/LoopVectorize.cpp | 2 +-
.../Transforms/Vectorize/SLPVectorizer.cpp | 2 +-
55 files changed, 120 insertions(+), 123 deletions(-)
diff --git a/llvm/include/llvm/Analysis/GenericDomTreeUpdaterImpl.h b/llvm/include/llvm/Analysis/GenericDomTreeUpdaterImpl.h
index 896b68c5021b3..6bfad783b529b 100644
--- a/llvm/include/llvm/Analysis/GenericDomTreeUpdaterImpl.h
+++ b/llvm/include/llvm/Analysis/GenericDomTreeUpdaterImpl.h
@@ -383,7 +383,7 @@ void GenericDomTreeUpdater<DerivedT, DomTreeT, PostDomTreeT>::
// field of all the elements of Edges.
// I.e., forall elt in Edges, it exists BB in NewBBs
// such as BB == elt.NewBB.
- SmallSet<BasicBlockT *, 32> NewBBs;
+ SmallPtrSet<BasicBlockT *, 32> NewBBs;
for (auto &Edge : Edges)
NewBBs.insert(Edge.NewBB);
// For each element in Edges, remember whether or not element
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/LoadStoreOpt.h b/llvm/include/llvm/CodeGen/GlobalISel/LoadStoreOpt.h
index cee779a5fd5d1..4b7506e013762 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/LoadStoreOpt.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/LoadStoreOpt.h
@@ -162,7 +162,7 @@ class LLVM_ABI LoadStoreOpt : public MachineFunctionPass {
DenseMap<unsigned, BitVector> LegalStoreSizes;
bool IsPreLegalizer = false;
/// Contains instructions to be erased at the end of a block scan.
- SmallSet<MachineInstr *, 16> InstsToErase;
+ SmallPtrSet<MachineInstr *, 16> InstsToErase;
public:
LoadStoreOpt();
diff --git a/llvm/include/llvm/CodeGen/MachinePipeliner.h b/llvm/include/llvm/CodeGen/MachinePipeliner.h
index e50443d25cc60..c90ff4f3daa47 100644
--- a/llvm/include/llvm/CodeGen/MachinePipeliner.h
+++ b/llvm/include/llvm/CodeGen/MachinePipeliner.h
@@ -830,7 +830,7 @@ class SMSchedule {
return ScheduledInstrs[cycle];
}
- SmallSet<SUnit *, 8>
+ SmallPtrSet<SUnit *, 8>
computeUnpipelineableNodes(SwingSchedulerDAG *SSD,
TargetInstrInfo::PipelinerLoopInfo *PLI);
diff --git a/llvm/include/llvm/CodeGen/ScheduleDAG.h b/llvm/include/llvm/CodeGen/ScheduleDAG.h
index 122b7be96b46a..aee1514581485 100644
--- a/llvm/include/llvm/CodeGen/ScheduleDAG.h
+++ b/llvm/include/llvm/CodeGen/ScheduleDAG.h
@@ -237,7 +237,7 @@ class TargetRegisterInfo;
};
/// Keep record of which SUnit are in the same cluster group.
- typedef SmallSet<SUnit *, 8> ClusterInfo;
+ typedef SmallPtrSet<SUnit *, 8> ClusterInfo;
constexpr unsigned InvalidClusterId = ~0u;
/// Return whether the input cluster ID's are the same and valid.
diff --git a/llvm/lib/Analysis/CallPrinter.cpp b/llvm/lib/Analysis/CallPrinter.cpp
index 672dae1642cb3..99d8b11f0c4ba 100644
--- a/llvm/lib/Analysis/CallPrinter.cpp
+++ b/llvm/lib/Analysis/CallPrinter.cpp
@@ -70,7 +70,7 @@ class CallGraphDOTInfo {
for (Function &F : M->getFunctionList()) {
uint64_t localSumFreq = 0;
- SmallSet<Function *, 16> Callers;
+ SmallPtrSet<Function *, 16> Callers;
for (User *U : F.users())
if (isa<CallInst>(U))
Callers.insert(cast<Instruction>(U)->getFunction());
@@ -99,7 +99,7 @@ class CallGraphDOTInfo {
bool FoundParallelEdge = true;
while (FoundParallelEdge) {
- SmallSet<Function *, 16> Visited;
+ SmallPtrSet<Function *, 16> Visited;
FoundParallelEdge = false;
for (auto CI = Node->begin(), CE = Node->end(); CI != CE; CI++) {
if (!(Visited.insert(CI->second->getFunction())).second) {
diff --git a/llvm/lib/Analysis/CaptureTracking.cpp b/llvm/lib/Analysis/CaptureTracking.cpp
index bd0d417b1ed33..b6acda3a9f259 100644
--- a/llvm/lib/Analysis/CaptureTracking.cpp
+++ b/llvm/lib/Analysis/CaptureTracking.cpp
@@ -405,7 +405,7 @@ void llvm::PointerMayBeCaptured(const Value *V, CaptureTracker *Tracker,
SmallVector<const Use *, 20> Worklist;
Worklist.reserve(getDefaultMaxUsesToExploreForCaptureTracking());
- SmallSet<const Use *, 20> Visited;
+ SmallPtrSet<const Use *, 20> Visited;
auto AddUses = [&](const Value *V) {
for (const Use &U : V->uses()) {
diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp
index ce4d4ad7a0ab0..d2c445f1ffaa0 100644
--- a/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -7284,7 +7284,7 @@ ScalarEvolution::getDefiningScopeBound(ArrayRef<const SCEV *> Ops,
bool &Precise) {
Precise = true;
// Do a bounded search of the def relation of the requested SCEVs.
- SmallSet<const SCEV *, 16> Visited;
+ SmallPtrSet<const SCEV *, 16> Visited;
SmallVector<const SCEV *> Worklist;
auto pushOp = [&](const SCEV *S) {
if (!Visited.insert(S).second)
diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index b0e4b009f3501..50e43a53def6c 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -7785,7 +7785,7 @@ bool llvm::mustExecuteUBIfPoisonOnPathTo(Instruction *Root,
// The set of all recursive users we've visited (which are assumed to all be
// poison because of said visit)
- SmallSet<const Value *, 16> KnownPoison;
+ SmallPtrSet<const Value *, 16> KnownPoison;
SmallVector<const Instruction*, 16> Worklist;
Worklist.push_back(Root);
while (!Worklist.empty()) {
@@ -8140,8 +8140,8 @@ static bool programUndefinedIfUndefOrPoison(const Value *V,
// Set of instructions that we have proved will yield poison if Inst
// does.
- SmallSet<const Value *, 16> YieldsPoison;
- SmallSet<const BasicBlock *, 4> Visited;
+ SmallPtrSet<const Value *, 16> YieldsPoison;
+ SmallPtrSet<const BasicBlock *, 4> Visited;
YieldsPoison.insert(V);
Visited.insert(BB);
diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp
index 9223739fc0098..0e40a92fd8d64 100644
--- a/llvm/lib/CodeGen/CodeGenPrepare.cpp
+++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp
@@ -377,7 +377,7 @@ class CodeGenPrepare {
/// to be optimized again.
/// Note: Consider building time in this pass, when a BB updated, we need
/// to insert such BB into FreshBBs for huge function.
- SmallSet<BasicBlock *, 32> FreshBBs;
+ SmallPtrSet<BasicBlock *, 32> FreshBBs;
void releaseMemory() {
// Clear per function information.
@@ -1105,7 +1105,7 @@ bool CodeGenPrepare::canMergeBlocks(const BasicBlock *BB,
/// Replace all old uses with new ones, and push the updated BBs into FreshBBs.
static void replaceAllUsesWith(Value *Old, Value *New,
- SmallSet<BasicBlock *, 32> &FreshBBs,
+ SmallPtrSet<BasicBlock *, 32> &FreshBBs,
bool IsHuge) {
auto *OldI = dyn_cast<Instruction>(Old);
if (OldI) {
@@ -2135,7 +2135,7 @@ static bool isRemOfLoopIncrementWithLoopInvariant(
// Rem = rem == RemAmtLoopInvariant ? 0 : Rem;
static bool foldURemOfLoopIncrement(Instruction *Rem, const DataLayout *DL,
const LoopInfo *LI,
- SmallSet<BasicBlock *, 32> &FreshBBs,
+ SmallPtrSet<BasicBlock *, 32> &FreshBBs,
bool IsHuge) {
Value *AddOffset, *RemAmt, *AddInst;
PHINode *LoopIncrPN;
@@ -2534,11 +2534,10 @@ static bool OptimizeExtractBits(BinaryOperator *ShiftI, ConstantInt *CI,
/// %ctz = phi i64 [ 64, %entry ], [ %z, %cond.false ]
///
/// If the transform is performed, return true and set ModifiedDT to true.
-static bool despeculateCountZeros(IntrinsicInst *CountZeros,
- LoopInfo &LI,
+static bool despeculateCountZeros(IntrinsicInst *CountZeros, LoopInfo &LI,
const TargetLowering *TLI,
const DataLayout *DL, ModifyDT &ModifiedDT,
- SmallSet<BasicBlock *, 32> &FreshBBs,
+ SmallPtrSet<BasicBlock *, 32> &FreshBBs,
bool IsHugeFunc) {
// If a zero input is undefined, it doesn't make sense to despeculate that.
if (match(CountZeros->getOperand(1), m_One()))
@@ -4351,7 +4350,7 @@ class AddressingModeCombiner {
PhiNodeSet &PhiNodesToMatch) {
SmallVector<PHIPair, 8> WorkList;
Matcher.insert({PHI, Candidate});
- SmallSet<PHINode *, 8> MatchedPHIs;
+ SmallPtrSet<PHINode *, 8> MatchedPHIs;
MatchedPHIs.insert(PHI);
WorkList.push_back({PHI, Candidate});
SmallSet<PHIPair, 8> Visited;
@@ -8635,7 +8634,7 @@ static bool tryUnmergingGEPsAcrossIndirectBr(GetElementPtrInst *GEPI,
}
static bool optimizeBranch(BranchInst *Branch, const TargetLowering &TLI,
- SmallSet<BasicBlock *, 32> &FreshBBs,
+ SmallPtrSet<BasicBlock *, 32> &FreshBBs,
bool IsHugeFunc) {
// Try and convert
// %c = icmp ult %x, 8
diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index 64c19fab1a023..7ca02ad756f51 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -3517,7 +3517,7 @@ void IRTranslator::finishPendingPhis() {
Verifier.setCurrentInst(PI);
#endif // ifndef NDEBUG
- SmallSet<const MachineBasicBlock *, 16> SeenPreds;
+ SmallPtrSet<const MachineBasicBlock *, 16> SeenPreds;
for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) {
auto IRPred = PI->getIncomingBlock(i);
ArrayRef<Register> ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i));
diff --git a/llvm/lib/CodeGen/MachineCopyPropagation.cpp b/llvm/lib/CodeGen/MachineCopyPropagation.cpp
index 742de1101faa2..e35983138550f 100644
--- a/llvm/lib/CodeGen/MachineCopyPropagation.cpp
+++ b/llvm/lib/CodeGen/MachineCopyPropagation.cpp
@@ -490,7 +490,7 @@ class MachineCopyPropagation {
SmallSetVector<MachineInstr *, 8> MaybeDeadCopies;
/// Multimap tracking debug users in current BB
- DenseMap<MachineInstr *, SmallSet<MachineInstr *, 2>> CopyDbgUsers;
+ DenseMap<MachineInstr *, SmallPtrSet<MachineInstr *, 2>> CopyDbgUsers;
CopyTracker Tracker;
diff --git a/llvm/lib/CodeGen/MachineDebugify.cpp b/llvm/lib/CodeGen/MachineDebugify.cpp
index 1a20fe586e951..307f49468eb39 100644
--- a/llvm/lib/CodeGen/MachineDebugify.cpp
+++ b/llvm/lib/CodeGen/MachineDebugify.cpp
@@ -87,7 +87,7 @@ bool applyDebugifyMetadataToMachineFunction(MachineModuleInfo &MMI,
// Do this by introducing debug uses of each register definition. If that is
// not possible (e.g. we have a phi or a meta instruction), emit a constant.
uint64_t NextImm = 0;
- SmallSet<DILocalVariable *, 16> VarSet;
+ SmallPtrSet<DILocalVariable *, 16> VarSet;
const MCInstrDesc &DbgValDesc = TII.get(TargetOpcode::DBG_VALUE);
for (MachineBasicBlock &MBB : MF) {
MachineBasicBlock::iterator FirstNonPHIIt = MBB.getFirstNonPHI();
diff --git a/llvm/lib/CodeGen/MachinePipeliner.cpp b/llvm/lib/CodeGen/MachinePipeliner.cpp
index 90005bd181f3a..3a9651c5cee04 100644
--- a/llvm/lib/CodeGen/MachinePipeliner.cpp
+++ b/llvm/lib/CodeGen/MachinePipeliner.cpp
@@ -3466,9 +3466,9 @@ bool SMSchedule::onlyHasLoopCarriedOutputOrOrderPreds(
}
/// Determine transitive dependences of unpipelineable instructions
-SmallSet<SUnit *, 8> SMSchedule::computeUnpipelineableNodes(
+SmallPtrSet<SUnit *, 8> SMSchedule::computeUnpipelineableNodes(
SwingSchedulerDAG *SSD, TargetInstrInfo::PipelinerLoopInfo *PLI) {
- SmallSet<SUnit *, 8> DoNotPipeline;
+ SmallPtrSet<SUnit *, 8> DoNotPipeline;
SmallVector<SUnit *, 8> Worklist;
for (auto &SU : SSD->SUnits)
@@ -3498,7 +3498,7 @@ SmallSet<SUnit *, 8> SMSchedule::computeUnpipelineableNodes(
// and ensure that they are in stage 0. If unable to do so, return false.
bool SMSchedule::normalizeNonPipelinedInstructions(
SwingSchedulerDAG *SSD, TargetInstrInfo::PipelinerLoopInfo *PLI) {
- SmallSet<SUnit *, 8> DNP = computeUnpipelineableNodes(SSD, PLI);
+ SmallPtrSet<SUnit *, 8> DNP = computeUnpipelineableNodes(SSD, PLI);
int NewLastCycle = INT_MIN;
for (SUnit &SU : SSD->SUnits) {
diff --git a/llvm/lib/CodeGen/MacroFusion.cpp b/llvm/lib/CodeGen/MacroFusion.cpp
index 975a3fe71abad..1db53017e6cef 100644
--- a/llvm/lib/CodeGen/MacroFusion.cpp
+++ b/llvm/lib/CodeGen/MacroFusion.cpp
@@ -79,7 +79,7 @@ bool llvm::fuseInstructionPair(ScheduleDAGInstrs &DAG, SUnit &FirstSU,
FirstSU.ParentClusterIdx = Clusters.size();
SecondSU.ParentClusterIdx = Clusters.size();
- SmallSet<SUnit *, 8> Cluster{{&FirstSU, &SecondSU}};
+ SmallPtrSet<SUnit *, 8> Cluster{{&FirstSU, &SecondSU}};
Clusters.push_back(Cluster);
// TODO - If we want to chain more than two instructions, we need to create
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 2a1ef2b980ac4..2565339c5de16 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -3576,7 +3576,7 @@ void SelectionDAGBuilder::visitIndirectBr(const IndirectBrInst &I) {
MachineBasicBlock *IndirectBrMBB = FuncInfo.MBB;
// Update machine-CFG edges with unique successors.
- SmallSet<BasicBlock*, 32> Done;
+ SmallPtrSet<BasicBlock *, 32> Done;
for (unsigned i = 0, e = I.getNumSuccessors(); i != e; ++i) {
BasicBlock *BB = I.getSuccessor(i);
bool Inserted = Done.insert(BB).second;
diff --git a/llvm/lib/CodeGen/SwiftErrorValueTracking.cpp b/llvm/lib/CodeGen/SwiftErrorValueTracking.cpp
index decffdc7dfe45..ff4b568b5ee20 100644
--- a/llvm/lib/CodeGen/SwiftErrorValueTracking.cpp
+++ b/llvm/lib/CodeGen/SwiftErrorValueTracking.cpp
@@ -179,7 +179,7 @@ void SwiftErrorValueTracking::propagateVRegs() {
// Check whether we have a single vreg def from all predecessors.
// Otherwise we need a phi.
SmallVector<std::pair<MachineBasicBlock *, Register>, 4> VRegs;
- SmallSet<const MachineBasicBlock *, 8> Visited;
+ SmallPtrSet<const MachineBasicBlock *, 8> Visited;
for (auto *Pred : MBB->predecessors()) {
if (!Visited.insert(Pred).second)
continue;
diff --git a/llvm/lib/ExecutionEngine/Orc/Debugging/DebuggerSupportPlugin.cpp b/llvm/lib/ExecutionEngine/Orc/Debugging/DebuggerSupportPlugin.cpp
index 1bafed79d6968..ba27aa87b7c7a 100644
--- a/llvm/lib/ExecutionEngine/Orc/Debugging/DebuggerSupportPlugin.cpp
+++ b/llvm/lib/ExecutionEngine/Orc/Debugging/DebuggerSupportPlugin.cpp
@@ -64,7 +64,7 @@ class MachODebugObjectSynthesizerBase
LLVM_DEBUG({
dbgs() << " Preserving debug section " << Sec.getName() << "\n";
});
- SmallSet<Block *, 8> PreservedBlocks;
+ SmallPtrSet<Block *, 8> PreservedBlocks;
for (auto *Sym : Sec.symbols()) {
bool NewPreservedBlock =
PreservedBlocks.insert(&Sym->getBlock()).second;
diff --git a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
index c16b0dde1a3da..e9147a42452d0 100644
--- a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
+++ b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
@@ -5930,7 +5930,7 @@ void OpenMPIRBuilder::applySimd(CanonicalLoopInfo *CanonicalLoop,
createIfVersion(CanonicalLoop, IfCond, VMap, LIA, LI, L, "simd");
}
- SmallSet<BasicBlock *, 8> Reachable;
+ SmallPtrSet<BasicBlock *, 8> Reachable;
// Get the basic blocks from the loop in which memref instructions
// can be found.
diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp
index b91fd70bd9467..e200f3626e69d 100644
--- a/llvm/lib/IR/AutoUpgrade.cpp
+++ b/llvm/lib/IR/AutoUpgrade.cpp
@@ -5391,7 +5391,7 @@ void llvm::UpgradeNVVMAnnotations(Module &M) {
return;
SmallVector<MDNode *, 8> NewNodes;
- SmallSet<const MDNode *, 8> SeenNodes;
+ SmallPtrSet<const MDNode *, 8> SeenNodes;
for (MDNode *MD : NamedMD->operands()) {
if (!SeenNodes.insert(MD).second)
continue;
diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp
index 5a93228faa3ac..9d9b51db98702 100644
--- a/llvm/lib/IR/Verifier.cpp
+++ b/llvm/lib/IR/Verifier.cpp
@@ -4636,7 +4636,7 @@ void Verifier::visitEHPadPredecessors(Instruction &I) {
}
// The edge may exit from zero or more nested pads.
- SmallSet<Value *, 8> Seen;
+ SmallPtrSet<Value *, 8> Seen;
for (;; FromPad = getParentPad(FromPad)) {
Check(FromPad != ToPad,
"EH pad cannot handle exceptions raised within it", FromPad, TI);
@@ -4764,7 +4764,7 @@ void Verifier::visitFuncletPadInst(FuncletPadInst &FPI) {
User *FirstUser = nullptr;
Value *FirstUnwindPad = nullptr;
SmallVector<FuncletPadInst *, 8> Worklist({&FPI});
- SmallSet<FuncletPadInst *, 8> Seen;
+ SmallPtrSet<FuncletPadInst *, 8> Seen;
while (!Worklist.empty()) {
FuncletPadInst *CurrentPad = Worklist.pop_back_val();
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUMemoryUtils.cpp b/llvm/lib/Target/AMDGPU/AMDGPUMemoryUtils.cpp
index e65dd1b04cc48..dfe7c53aaca06 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUMemoryUtils.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUMemoryUtils.cpp
@@ -384,7 +384,7 @@ bool isClobberedInFunction(const LoadInst *Load, MemorySSA *MSSA,
AAResults *AA) {
MemorySSAWalker *Walker = MSSA->getWalker();
SmallVector<MemoryAccess *> WorkList{Walker->getClobberingMemoryAccess(Load)};
- SmallSet<MemoryAccess *, 8> Visited;
+ SmallPtrSet<MemoryAccess *, 8> Visited;
MemoryLocation Loc(MemoryLocation::get(Load));
LLVM_DEBUG(dbgs() << "Checking clobbering of: " << *Load << '\n');
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPerfHintAnalysis.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPerfHintAnalysis.cpp
index 3a3751892c8b6..28d5400fd1807 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUPerfHintAnalysis.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUPerfHintAnalysis.cpp
@@ -134,8 +134,8 @@ static std::pair<const Value *, const Type *> getMemoryInstrPtrAndType(
bool AMDGPUPerfHint::isIndirectAccess(const Instruction *Inst) const {
LLVM_DEBUG(dbgs() << "[isIndirectAccess] " << *Inst << '\n');
- SmallSet<const Value *, 32> WorkSet;
- SmallSet<const Value *, 32> Visited;
+ SmallPtrSet<const Value *, 32> WorkSet;
+ SmallPtrSet<const Value *, 32> Visited;
if (const Value *MO = getMemoryInstrPtrAndType(Inst).first) {
if (isGlobalAddr(MO))
WorkSet.insert(MO);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSetWavePriority.cpp b/llvm/lib/Target/AMDGPU/AMDGPUSetWavePriority.cpp
index b60ded33a4ac3..56aa3f6db83ad 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUSetWavePriority.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSetWavePriority.cpp
@@ -195,7 +195,7 @@ bool AMDGPUSetWavePriority::run(MachineFunction &MF) {
// Lower the priority on edges where control leaves blocks from which
// the VMEM loads are reachable.
- SmallSet<MachineBasicBlock *, 16> PriorityLoweringBlocks;
+ SmallPtrSet<MachineBasicBlock *, 16> PriorityLoweringBlocks;
for (MachineBasicBlock &MBB : MF) {
if (MBBInfos[&MBB].MayReachVMEMLoad) {
if (MBB.succ_empty())
diff --git a/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp b/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
index f018f77bc83e1..dce4e6f993005 100644
--- a/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
@@ -460,7 +460,7 @@ static bool hoistAndMergeSGPRInits(unsigned Reg,
// List of clobbering instructions.
SmallVector<MachineInstr*, 8> Clobbers;
// List of instructions marked for deletion.
- SmallSet<MachineInstr*, 8> MergedInstrs;
+ SmallPtrSet<MachineInstr *, 8> MergedInstrs;
bool Changed = false;
@@ -808,7 +808,7 @@ bool SIFixSGPRCopies::run(MachineFunction &MF) {
void SIFixSGPRCopies::processPHINode(MachineInstr &MI) {
bool AllAGPRUses = true;
SetVector<const MachineInstr *> worklist;
- SmallSet<const MachineInstr *, 4> Visited;
+ SmallPtrSet<const MachineInstr *, 4> Visited;
SetVector<MachineInstr *> PHIOperands;
worklist.insert(&MI);
Visited.insert(&MI);
diff --git a/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp b/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp
index f7a9a584a6b51..e97536d36bab2 100644
--- a/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp
+++ b/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp
@@ -81,7 +81,7 @@ class SILowerControlFlow {
MachineRegisterInfo *MRI = nullptr;
SetVector<MachineInstr*> LoweredEndCf;
DenseSet<Register> LoweredIf;
- SmallSet<MachineBasicBlock *, 4> KillBlocks;
+ SmallPtrSet<MachineBasicBlock *, 4> KillBlocks;
SmallSet<Register, 8> RecomputeRegs;
const TargetRegisterClass *BoolRC = nullptr;
@@ -460,7 +460,7 @@ MachineBasicBlock::iterator
SILowerControlFlow::skipIgnoreExecInstsTrivialSucc(
MachineBasicBlock &MBB, MachineBasicBlock::iterator It) const {
- SmallSet<const MachineBasicBlock *, 4> Visited;
+ SmallPtrSet<const MachineBasicBlock *, 4> Visited;
MachineBasicBlock *B = &MBB;
do {
if (!Visited.insert(B).second)
diff --git a/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp b/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
index ef690838f0f3b..c53e2158f4c73 100644
--- a/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
+++ b/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
@@ -109,7 +109,7 @@ namespace {
/// NewWaterList - The subset of WaterList that was created since the
/// previous iteration by inserting unconditional branches.
- SmallSet<MachineBasicBlock*, 4> NewWaterList;
+ SmallPtrSet<MachineBasicBlock *, 4> NewWaterList;
using water_iterator = std::vector<MachineBasicBlock *>::iterator;
diff --git a/llvm/lib/Target/ARM/MVETPAndVPTOptimisationsPass.cpp b/llvm/lib/Target/ARM/MVETPAndVPTOptimisationsPass.cpp
index 0b4e7dfebe369..5eeb4fe995485 100644
--- a/llvm/lib/Target/ARM/MVETPAndVPTOptimisationsPass.cpp
+++ b/llvm/lib/Target/ARM/MVETPAndVPTOptimisationsPass.cpp
@@ -922,7 +922,7 @@ bool MVETPAndVPTOptimisations::ReplaceConstByVPNOTs(MachineBasicBlock &MBB,
// the function.
unsigned LastVPTImm = 0;
Register LastVPTReg = 0;
- SmallSet<MachineInstr *, 4> DeadInstructions;
+ SmallPtrSet<MachineInstr *, 4> DeadInstructions;
for (MachineInstr &Instr : MBB.instrs()) {
// Look for predicated MVE instructions.
diff --git a/llvm/lib/Target/CSKY/CSKYConstantIslandPass.cpp b/llvm/lib/Target/CSKY/CSKYConstantIslandPass.cpp
index e55d9b227d1cd..7885d93cbad98 100644
--- a/llvm/lib/Target/CSKY/CSKYConstantIslandPass.cpp
+++ b/llvm/lib/Target/CSKY/CSKYConstantIslandPass.cpp
@@ -116,7 +116,7 @@ class CSKYConstantIslands : public MachineFunctionPass {
/// NewWaterList - The subset of WaterList that was created since the
/// previous iteration by inserting unconditional branches.
- SmallSet<MachineBasicBlock *, 4> NewWaterList;
+ SmallPtrSet<MachineBasicBlock *, 4> NewWaterList;
using water_iterator = std::vector<MachineBasicBlock *>::iterator;
diff --git a/llvm/lib/Target/Hexagon/HexagonGenInsert.cpp b/llvm/lib/Target/Hexagon/HexagonGenInsert.cpp
index a9201460d8e2e..b2218abcaaa3c 100644
--- a/llvm/lib/Target/Hexagon/HexagonGenInsert.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonGenInsert.cpp
@@ -1273,7 +1273,7 @@ void HexagonGenInsert::selectCandidates() {
for (unsigned R = AllRMs.find_first(); R; R = AllRMs.find_next(R)) {
using use_iterator = MachineRegisterInfo::use_nodbg_iterator;
- using InstrSet = SmallSet<const MachineInstr *, 16>;
+ using InstrSet = SmallPtrSet<const MachineInstr *, 16>;
InstrSet UIs;
// Count as the number of instructions in which R is used, not the
diff --git a/llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp b/llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp
index c34eecd3fcb09..a3717bb97d14b 100644
--- a/llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp
@@ -2289,7 +2289,7 @@ bool HexagonLoopIdiomRecognize::processCopyingStore(Loop *CurLoop,
// the instructions in Insts are removed.
bool HexagonLoopIdiomRecognize::coverLoop(Loop *L,
SmallVectorImpl<Instruction*> &Insts) const {
- SmallSet<BasicBlock*,8> LoopBlocks;
+ SmallPtrSet<BasicBlock *, 8> LoopBlocks;
LoopBlocks.insert_range(L->blocks());
SetVector<Instruction *> Worklist(llvm::from_range, Insts);
diff --git a/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp b/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp
index ecc1b5d2ebe35..6a05b5ab2c21c 100644
--- a/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp
@@ -445,8 +445,8 @@ void HexagonSubtarget::adjustSchedDependency(
const HexagonInstrInfo *QII = getInstrInfo();
// Instructions with .new operands have zero latency.
- SmallSet<SUnit *, 4> ExclSrc;
- SmallSet<SUnit *, 4> ExclDst;
+ SmallPtrSet<SUnit *, 4> ExclSrc;
+ SmallPtrSet<SUnit *, 4> ExclDst;
if (QII->canExecuteInBundle(*SrcInst, *DstInst) &&
isBestZeroLatency(Src, Dst, QII, ExclSrc, ExclDst)) {
Dep.setLatency(0);
@@ -630,9 +630,9 @@ static SUnit *getZeroLatency(SUnit *N, SmallVector<SDep, 4> &Deps) {
// together with a zero latency. Only one dependence should have a zero
// latency. If there are multiple choices, choose the best, and change
// the others, if needed.
-bool HexagonSubtarget::isBestZeroLatency(SUnit *Src, SUnit *Dst,
- const HexagonInstrInfo *TII, SmallSet<SUnit*, 4> &ExclSrc,
- SmallSet<SUnit*, 4> &ExclDst) const {
+bool HexagonSubtarget::isBestZeroLatency(
+ SUnit *Src, SUnit *Dst, const HexagonInstrInfo *TII,
+ SmallPtrSet<SUnit *, 4> &ExclSrc, SmallPtrSet<SUnit *, 4> &ExclDst) const {
MachineInstr &SrcInst = *Src->getInstr();
MachineInstr &DstInst = *Dst->getInstr();
diff --git a/llvm/lib/Target/Hexagon/HexagonSubtarget.h b/llvm/lib/Target/Hexagon/HexagonSubtarget.h
index 41555db4ac662..b111471a9696c 100644
--- a/llvm/lib/Target/Hexagon/HexagonSubtarget.h
+++ b/llvm/lib/Target/Hexagon/HexagonSubtarget.h
@@ -366,7 +366,8 @@ class HexagonSubtarget : public HexagonGenSubtargetInfo {
void restoreLatency(SUnit *Src, SUnit *Dst) const;
void changeLatency(SUnit *Src, SUnit *Dst, unsigned Lat) const;
bool isBestZeroLatency(SUnit *Src, SUnit *Dst, const HexagonInstrInfo *TII,
- SmallSet<SUnit*, 4> &ExclSrc, SmallSet<SUnit*, 4> &ExclDst) const;
+ SmallPtrSet<SUnit *, 4> &ExclSrc,
+ SmallPtrSet<SUnit *, 4> &ExclDst) const;
};
} // end namespace llvm
diff --git a/llvm/lib/Target/Mips/MipsConstantIslandPass.cpp b/llvm/lib/Target/Mips/MipsConstantIslandPass.cpp
index 8067dbc54170b..2a2ccf7d43b8e 100644
--- a/llvm/lib/Target/Mips/MipsConstantIslandPass.cpp
+++ b/llvm/lib/Target/Mips/MipsConstantIslandPass.cpp
@@ -232,7 +232,7 @@ namespace {
/// NewWaterList - The subset of WaterList that was created since the
/// previous iteration by inserting unconditional branches.
- SmallSet<MachineBasicBlock*, 4> NewWaterList;
+ SmallPtrSet<MachineBasicBlock *, 4> NewWaterList;
using water_iterator = std::vector<MachineBasicBlock *>::iterator;
diff --git a/llvm/lib/Target/PowerPC/PPCCTRLoopsVerify.cpp b/llvm/lib/Target/PowerPC/PPCCTRLoopsVerify.cpp
index 46aa27e1450a6..c8e576f976f67 100644
--- a/llvm/lib/Target/PowerPC/PPCCTRLoopsVerify.cpp
+++ b/llvm/lib/Target/PowerPC/PPCCTRLoopsVerify.cpp
@@ -93,7 +93,7 @@ static bool clobbersCTR(const MachineInstr &MI) {
static bool verifyCTRBranch(MachineBasicBlock *MBB,
MachineBasicBlock::iterator I) {
MachineBasicBlock::iterator BI = I;
- SmallSet<MachineBasicBlock *, 16> Visited;
+ SmallPtrSet<MachineBasicBlock *, 16> Visited;
SmallVector<MachineBasicBlock *, 8> Preds;
bool CheckPreds;
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index b97d0e235c019..652edd4e04c60 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -14814,9 +14814,9 @@ static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG) {
SDValue Chain = LD->getChain();
EVT VT = LD->getMemoryVT();
- SmallSet<SDNode *, 16> LoadRoots;
+ SmallPtrSet<SDNode *, 16> LoadRoots;
SmallVector<SDNode *, 8> Queue(1, Chain.getNode());
- SmallSet<SDNode *, 16> Visited;
+ SmallPtrSet<SDNode *, 16> Visited;
// First, search up the chain, branching to follow all token-factor operands.
// If we find a consecutive load, then we're done, otherwise, record all
diff --git a/llvm/lib/Target/PowerPC/PPCLoopInstrFormPrep.cpp b/llvm/lib/Target/PowerPC/PPCLoopInstrFormPrep.cpp
index 709d7e7e9b47a..adf9436b34ccf 100644
--- a/llvm/lib/Target/PowerPC/PPCLoopInstrFormPrep.cpp
+++ b/llvm/lib/Target/PowerPC/PPCLoopInstrFormPrep.cpp
@@ -264,9 +264,8 @@ namespace {
bool prepareBasesForCommoningChains(Bucket &BucketChain);
/// Rewrite load/store according to the common chains.
- bool
- rewriteLoadStoresForCommoningChains(Loop *L, Bucket &Bucket,
- SmallSet<BasicBlock *, 16> &BBChanged);
+ bool rewriteLoadStoresForCommoningChains(
+ Loop *L, Bucket &Bucket, SmallPtrSet<BasicBlock *, 16> &BBChanged);
/// Collect condition matched(\p isValidCandidate() returns true)
/// candidates in Loop \p L.
@@ -309,7 +308,7 @@ namespace {
/// Rewrite load/store instructions in \p BucketChain according to
/// preparation.
bool rewriteLoadStores(Loop *L, Bucket &BucketChain,
- SmallSet<BasicBlock *, 16> &BBChanged,
+ SmallPtrSet<BasicBlock *, 16> &BBChanged,
PrepForm Form);
/// Rewrite for the base load/store of a chain.
@@ -523,7 +522,7 @@ bool PPCLoopInstrFormPrep::chainCommoning(Loop *L,
if (Buckets.empty())
return MadeChange;
- SmallSet<BasicBlock *, 16> BBChanged;
+ SmallPtrSet<BasicBlock *, 16> BBChanged;
for (auto &Bucket : Buckets) {
if (prepareBasesForCommoningChains(Bucket))
@@ -537,7 +536,7 @@ bool PPCLoopInstrFormPrep::chainCommoning(Loop *L,
}
bool PPCLoopInstrFormPrep::rewriteLoadStoresForCommoningChains(
- Loop *L, Bucket &Bucket, SmallSet<BasicBlock *, 16> &BBChanged) {
+ Loop *L, Bucket &Bucket, SmallPtrSet<BasicBlock *, 16> &BBChanged) {
bool MadeChange = false;
assert(Bucket.Elements.size() ==
@@ -1006,7 +1005,7 @@ bool PPCLoopInstrFormPrep::prepareBaseForUpdateFormChain(Bucket &BucketChain) {
}
bool PPCLoopInstrFormPrep::rewriteLoadStores(
- Loop *L, Bucket &BucketChain, SmallSet<BasicBlock *, 16> &BBChanged,
+ Loop *L, Bucket &BucketChain, SmallPtrSet<BasicBlock *, 16> &BBChanged,
PrepForm Form) {
bool MadeChange = false;
@@ -1089,7 +1088,7 @@ bool PPCLoopInstrFormPrep::updateFormPrep(Loop *L,
bool MadeChange = false;
if (Buckets.empty())
return MadeChange;
- SmallSet<BasicBlock *, 16> BBChanged;
+ SmallPtrSet<BasicBlock *, 16> BBChanged;
for (auto &Bucket : Buckets)
// The base address of each bucket is transformed into a phi and the others
// are rewritten based on new base.
@@ -1110,7 +1109,7 @@ bool PPCLoopInstrFormPrep::dispFormPrep(Loop *L,
if (Buckets.empty())
return MadeChange;
- SmallSet<BasicBlock *, 16> BBChanged;
+ SmallPtrSet<BasicBlock *, 16> BBChanged;
for (auto &Bucket : Buckets) {
if (Bucket.Elements.size() < DispFormPrepMinThreshold)
continue;
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index aedba7e52e3ab..ce03818b49502 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -17525,7 +17525,7 @@ static SDValue combineOp_VLToVWOp_VL(SDNode *N,
return SDValue();
SmallVector<SDNode *> Worklist;
- SmallSet<SDNode *, 8> Inserted;
+ SmallPtrSet<SDNode *, 8> Inserted;
Worklist.push_back(N);
Inserted.insert(N);
SmallVector<CombineResult> CombinesToApply;
diff --git a/llvm/lib/Target/X86/X86LoadValueInjectionLoadHardening.cpp b/llvm/lib/Target/X86/X86LoadValueInjectionLoadHardening.cpp
index cf055cf3be0aa..090060eaa65e1 100644
--- a/llvm/lib/Target/X86/X86LoadValueInjectionLoadHardening.cpp
+++ b/llvm/lib/Target/X86/X86LoadValueInjectionLoadHardening.cpp
@@ -491,7 +491,7 @@ X86LoadValueInjectionLoadHardeningPass::getGadgetGraph(
NumGadgets += GadgetCount;
// Traverse CFG to build the rest of the graph
- SmallSet<MachineBasicBlock *, 8> BlocksVisited;
+ SmallPtrSet<MachineBasicBlock *, 8> BlocksVisited;
std::function<void(MachineBasicBlock *, GraphIter, unsigned)> TraverseCFG =
[&](MachineBasicBlock *MBB, GraphIter GI, unsigned ParentDepth) {
unsigned LoopDepth = MLI.getLoopDepth(MBB);
diff --git a/llvm/lib/Target/X86/X86PreTileConfig.cpp b/llvm/lib/Target/X86/X86PreTileConfig.cpp
index 3b4e531f25388..2a1c49957bf7a 100644
--- a/llvm/lib/Target/X86/X86PreTileConfig.cpp
+++ b/llvm/lib/Target/X86/X86PreTileConfig.cpp
@@ -100,7 +100,7 @@ struct BBInfo {
class X86PreTileConfig : public MachineFunctionPass {
MachineRegisterInfo *MRI = nullptr;
const MachineLoopInfo *MLI = nullptr;
- SmallSet<MachineInstr *, 8> DefVisited;
+ SmallPtrSet<MachineInstr *, 8> DefVisited;
DenseMap<MachineBasicBlock *, BBInfo> BBVisitedInfo;
DenseMap<MachineBasicBlock *, SmallVector<MIRef, 8>> ShapeBBs;
diff --git a/llvm/lib/Transforms/IPO/FunctionAttrs.cpp b/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
index 8262c8c3a90f2..44394f6deb9a2 100644
--- a/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
+++ b/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
@@ -273,7 +273,7 @@ MemoryEffects llvm::computeFunctionBodyMemoryAccess(Function &F,
/// Deduce readonly/readnone/writeonly attributes for the SCC.
template <typename AARGetterT>
static void addMemoryAttrs(const SCCNodeSet &SCCNodes, AARGetterT &&AARGetter,
- SmallSet<Function *, 8> &Changed) {
+ SmallPtrSet<Function *, 8> &Changed) {
MemoryEffects ME = MemoryEffects::none();
MemoryEffects RecursiveArgME = MemoryEffects::none();
for (Function *F : SCCNodes) {
@@ -1002,7 +1002,7 @@ determinePointerAccessAttrs(Argument *A,
/// Deduce returned attributes for the SCC.
static void addArgumentReturnedAttrs(const SCCNodeSet &SCCNodes,
- SmallSet<Function *, 8> &Changed) {
+ SmallPtrSet<Function *, 8> &Changed) {
// Check each function in turn, determining if an argument is always returned.
for (Function *F : SCCNodes) {
// We can infer and propagate function attributes only when we know that the
@@ -1238,7 +1238,7 @@ static bool inferInitializes(Argument &A, Function &F) {
/// Deduce nocapture attributes for the SCC.
static void addArgumentAttrs(const SCCNodeSet &SCCNodes,
- SmallSet<Function *, 8> &Changed,
+ SmallPtrSet<Function *, 8> &Changed,
bool SkipInitializes) {
ArgumentGraph AG;
@@ -1510,7 +1510,7 @@ static bool isFunctionMallocLike(Function *F, const SCCNodeSet &SCCNodes) {
/// Deduce noalias attributes for the SCC.
static void addNoAliasAttrs(const SCCNodeSet &SCCNodes,
- SmallSet<Function *, 8> &Changed) {
+ SmallPtrSet<Function *, 8> &Changed) {
// Check each function in turn, determining which functions return noalias
// pointers.
for (Function *F : SCCNodes) {
@@ -1623,7 +1623,7 @@ static bool isReturnNonNull(Function *F, const SCCNodeSet &SCCNodes,
/// Deduce nonnull attributes for the SCC.
static void addNonNullAttrs(const SCCNodeSet &SCCNodes,
- SmallSet<Function *, 8> &Changed) {
+ SmallPtrSet<Function *, 8> &Changed) {
// Speculative that all functions in the SCC return only nonnull
// pointers. We may refute this as we analyze functions.
bool SCCReturnsNonNull = true;
@@ -1680,7 +1680,7 @@ static void addNonNullAttrs(const SCCNodeSet &SCCNodes,
/// Deduce noundef attributes for the SCC.
static void addNoUndefAttrs(const SCCNodeSet &SCCNodes,
- SmallSet<Function *, 8> &Changed) {
+ SmallPtrSet<Function *, 8> &Changed) {
// Check each function in turn, determining which functions return noundef
// values.
for (Function *F : SCCNodes) {
@@ -1788,13 +1788,13 @@ class AttributeInferer {
InferenceDescriptors.push_back(AttrInference);
}
- void run(const SCCNodeSet &SCCNodes, SmallSet<Function *, 8> &Changed);
+ void run(const SCCNodeSet &SCCNodes, SmallPtrSet<Function *, 8> &Changed);
};
/// Perform all the requested attribute inference actions according to the
/// attribute predicates stored before.
void AttributeInferer::run(const SCCNodeSet &SCCNodes,
- SmallSet<Function *, 8> &Changed) {
+ SmallPtrSet<Function *, 8> &Changed) {
SmallVector<InferenceDescriptor, 4> InferInSCC = InferenceDescriptors;
// Go through all the functions in SCC and check corresponding attribute
// assumptions for each of them. Attributes that are invalid for this SCC
@@ -1969,7 +1969,7 @@ static bool InstrBreaksNoSync(Instruction &I, const SCCNodeSet &SCCNodes) {
///
/// Returns true if any changes to function attributes were made.
static void inferConvergent(const SCCNodeSet &SCCNodes,
- SmallSet<Function *, 8> &Changed) {
+ SmallPtrSet<Function *, 8> &Changed) {
AttributeInferer AI;
// Request to remove the convergent attribute from all functions in the SCC
@@ -2000,7 +2000,7 @@ static void inferConvergent(const SCCNodeSet &SCCNodes,
///
/// Returns true if any changes to function attributes were made.
static void inferAttrsFromFunctionBodies(const SCCNodeSet &SCCNodes,
- SmallSet<Function *, 8> &Changed) {
+ SmallPtrSet<Function *, 8> &Changed) {
AttributeInferer AI;
if (!DisableNoUnwindInference)
@@ -2069,7 +2069,7 @@ static void inferAttrsFromFunctionBodies(const SCCNodeSet &SCCNodes,
}
static void addNoRecurseAttrs(const SCCNodeSet &SCCNodes,
- SmallSet<Function *, 8> &Changed) {
+ SmallPtrSet<Function *, 8> &Changed) {
// Try and identify functions that do not recurse.
// If the SCC contains multiple nodes we know for sure there is recursion.
@@ -2105,7 +2105,7 @@ static void addNoRecurseAttrs(const SCCNodeSet &SCCNodes,
// Set the noreturn function attribute if possible.
static void addNoReturnAttrs(const SCCNodeSet &SCCNodes,
- SmallSet<Function *, 8> &Changed) {
+ SmallPtrSet<Function *, 8> &Changed) {
for (Function *F : SCCNodes) {
if (!F || !F->hasExactDefinition() || F->hasFnAttribute(Attribute::Naked) ||
F->doesNotReturn())
@@ -2166,7 +2166,7 @@ static bool allPathsGoThroughCold(Function &F) {
// Set the cold function attribute if possible.
static void addColdAttrs(const SCCNodeSet &SCCNodes,
- SmallSet<Function *, 8> &Changed) {
+ SmallPtrSet<Function *, 8> &Changed) {
for (Function *F : SCCNodes) {
if (!F || !F->hasExactDefinition() || F->hasFnAttribute(Attribute::Naked) ||
F->hasFnAttribute(Attribute::Cold) || F->hasFnAttribute(Attribute::Hot))
@@ -2213,7 +2213,7 @@ static bool functionWillReturn(const Function &F) {
// Set the willreturn function attribute if possible.
static void addWillReturn(const SCCNodeSet &SCCNodes,
- SmallSet<Function *, 8> &Changed) {
+ SmallPtrSet<Function *, 8> &Changed) {
for (Function *F : SCCNodes) {
if (!F || F->willReturn() || !functionWillReturn(*F))
continue;
@@ -2239,7 +2239,7 @@ static SCCNodesResult createSCCNodeSet(ArrayRef<Function *> Functions) {
}
template <typename AARGetterT>
-static SmallSet<Function *, 8>
+static SmallPtrSet<Function *, 8>
deriveAttrsInPostOrder(ArrayRef<Function *> Functions, AARGetterT &&AARGetter,
bool ArgAttrsOnly) {
SCCNodesResult Nodes = createSCCNodeSet(Functions);
@@ -2248,7 +2248,7 @@ deriveAttrsInPostOrder(ArrayRef<Function *> Functions, AARGetterT &&AARGetter,
if (Nodes.SCCNodes.empty())
return {};
- SmallSet<Function *, 8> Changed;
+ SmallPtrSet<Function *, 8> Changed;
if (ArgAttrsOnly) {
// ArgAttrsOnly means to only infer attributes that may aid optimizations
// on the *current* function. "initializes" attribute is to aid
diff --git a/llvm/lib/Transforms/Scalar/DFAJumpThreading.cpp b/llvm/lib/Transforms/Scalar/DFAJumpThreading.cpp
index 938aab5879044..ac59ae182896b 100644
--- a/llvm/lib/Transforms/Scalar/DFAJumpThreading.cpp
+++ b/llvm/lib/Transforms/Scalar/DFAJumpThreading.cpp
@@ -447,7 +447,7 @@ struct MainSwitch {
/// Also, collect select instructions to unfold.
bool isCandidate(const SwitchInst *SI) {
std::deque<std::pair<Value *, BasicBlock *>> Q;
- SmallSet<Value *, 16> SeenValues;
+ SmallPtrSet<Value *, 16> SeenValues;
SelectInsts.clear();
Value *SICond = SI->getCondition();
@@ -511,7 +511,7 @@ struct MainSwitch {
void addToQueue(Value *Val, BasicBlock *BB,
std::deque<std::pair<Value *, BasicBlock *>> &Q,
- SmallSet<Value *, 16> &SeenValues) {
+ SmallPtrSet<Value *, 16> &SeenValues) {
if (SeenValues.insert(Val).second)
Q.push_back({Val, BB});
}
@@ -713,7 +713,7 @@ struct AllSwitchPaths {
// Some blocks have multiple edges to the same successor, and this set
// is used to prevent a duplicate path from being generated
- SmallSet<BasicBlock *, 4> Successors;
+ SmallPtrSet<BasicBlock *, 4> Successors;
for (BasicBlock *Succ : successors(BB)) {
if (!Successors.insert(Succ).second)
continue;
@@ -762,7 +762,7 @@ struct AllSwitchPaths {
SmallVector<PHINode *, 8> Stack;
Stack.push_back(FirstDef);
- SmallSet<Value *, 16> SeenValues;
+ SmallPtrSet<Value *, 16> SeenValues;
while (!Stack.empty()) {
PHINode *CurPhi = Stack.pop_back_val();
@@ -955,7 +955,7 @@ struct TransformDFA {
DuplicateBlockMap DuplicateMap;
DefMap NewDefs;
- SmallSet<BasicBlock *, 16> BlocksToClean;
+ SmallPtrSet<BasicBlock *, 16> BlocksToClean;
BlocksToClean.insert_range(successors(SwitchBlock));
for (ThreadingPath &TPath : SwitchPaths->getThreadingPaths()) {
@@ -984,7 +984,7 @@ struct TransformDFA {
/// the predecessors, and phis in the successor blocks.
void createExitPath(DefMap &NewDefs, ThreadingPath &Path,
DuplicateBlockMap &DuplicateMap,
- SmallSet<BasicBlock *, 16> &BlocksToClean,
+ SmallPtrSet<BasicBlock *, 16> &BlocksToClean,
DomTreeUpdater *DTU) {
APInt NextState = Path.getExitValue();
const BasicBlock *Determinator = Path.getDeterminatorBB();
diff --git a/llvm/lib/Transforms/Scalar/GVN.cpp b/llvm/lib/Transforms/Scalar/GVN.cpp
index 7704e49c499da..4baa3b3eb8242 100644
--- a/llvm/lib/Transforms/Scalar/GVN.cpp
+++ b/llvm/lib/Transforms/Scalar/GVN.cpp
@@ -978,7 +978,7 @@ static bool IsValueFullyAvailableInBlock(
unsigned NumNewNewSpeculativelyAvailableBBs = 0;
#ifndef NDEBUG
- SmallSet<BasicBlock *, 32> NewSpeculativelyAvailableBBs;
+ SmallPtrSet<BasicBlock *, 32> NewSpeculativelyAvailableBBs;
SmallVector<BasicBlock *, 32> AvailableBBs;
#endif
@@ -1222,7 +1222,7 @@ static bool liesBetween(const Instruction *From, Instruction *Between,
const Instruction *To, const DominatorTree *DT) {
if (From->getParent() == Between->getParent())
return DT->dominates(From, Between);
- SmallSet<BasicBlock *, 1> Exclusion;
+ SmallPtrSet<BasicBlock *, 1> Exclusion;
Exclusion.insert(Between->getParent());
return !isPotentiallyReachable(From, To, &Exclusion, DT);
}
diff --git a/llvm/lib/Transforms/Scalar/GuardWidening.cpp b/llvm/lib/Transforms/Scalar/GuardWidening.cpp
index 3ba5b79293bcd..d99f1eb9c93cd 100644
--- a/llvm/lib/Transforms/Scalar/GuardWidening.cpp
+++ b/llvm/lib/Transforms/Scalar/GuardWidening.cpp
@@ -642,9 +642,9 @@ Value *GuardWideningImpl::freezeAndPush(Value *Orig,
return FI;
}
- SmallSet<Value *, 16> Visited;
+ SmallPtrSet<Value *, 16> Visited;
SmallVector<Value *, 16> Worklist;
- SmallSet<Instruction *, 16> DropPoisonFlags;
+ SmallPtrSet<Instruction *, 16> DropPoisonFlags;
SmallVector<Value *, 16> NeedFreeze;
DenseMap<Value *, FreezeInst *> CacheOfFreezes;
diff --git a/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp b/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
index 334c911191cb8..6720cb1ef8998 100644
--- a/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
+++ b/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
@@ -1613,7 +1613,7 @@ bool IndVarSimplify::optimizeLoopExits(Loop *L, SCEVExpander &Rewriter) {
if (CurrMaxExit == MaxBECount)
SkipLastIter = true;
};
- SmallSet<const SCEV *, 8> DominatingExactExitCounts;
+ SmallPtrSet<const SCEV *, 8> DominatingExactExitCounts;
for (BasicBlock *ExitingBB : ExitingBlocks) {
const SCEV *ExactExitCount = SE->getExitCount(L, ExitingBB);
const SCEV *MaxExitCount = SE->getExitCount(
diff --git a/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp b/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp
index c68149b780807..5795c761b3bee 100644
--- a/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp
+++ b/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp
@@ -1209,7 +1209,7 @@ class LowerMatrixIntrinsics {
//
// For verification, we keep track of where we changed uses to poison in
// PoisonedInsts and then check that we in fact remove them.
- SmallSet<Instruction *, 16> PoisonedInsts;
+ SmallPtrSet<Instruction *, 16> PoisonedInsts;
for (auto *Inst : reverse(ToRemove)) {
for (Use &U : llvm::make_early_inc_range(Inst->uses())) {
if (auto *Poisoned = dyn_cast<Instruction>(U.getUser()))
diff --git a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
index f237322f90455..e043d072a7638 100644
--- a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
+++ b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
@@ -1530,7 +1530,7 @@ bool MemCpyOptPass::performStackMoveOptzn(Instruction *Load, Instruction *Store,
// to remove them.
SmallVector<Instruction *, 4> LifetimeMarkers;
- SmallSet<Instruction *, 4> AAMetadataInstrs;
+ SmallPtrSet<Instruction *, 4> AAMetadataInstrs;
bool SrcNotDom = false;
auto CaptureTrackingWithModRef =
@@ -1540,7 +1540,7 @@ bool MemCpyOptPass::performStackMoveOptzn(Instruction *Load, Instruction *Store,
Worklist.push_back(AI);
unsigned MaxUsesToExplore = getDefaultMaxUsesToExploreForCaptureTracking();
Worklist.reserve(MaxUsesToExplore);
- SmallSet<const Use *, 20> Visited;
+ SmallPtrSet<const Use *, 20> Visited;
while (!Worklist.empty()) {
Instruction *I = Worklist.pop_back_val();
for (const Use &U : I->uses()) {
diff --git a/llvm/lib/Transforms/Scalar/Reassociate.cpp b/llvm/lib/Transforms/Scalar/Reassociate.cpp
index 343da5b2e4704..ba58b8e4eda5e 100644
--- a/llvm/lib/Transforms/Scalar/Reassociate.cpp
+++ b/llvm/lib/Transforms/Scalar/Reassociate.cpp
@@ -878,7 +878,7 @@ static Value *NegateValue(Value *V, Instruction *BI,
// only that it mostly looks like one.
static bool isLoadCombineCandidate(Instruction *Or) {
SmallVector<Instruction *, 8> Worklist;
- SmallSet<Instruction *, 8> Visited;
+ SmallPtrSet<Instruction *, 8> Visited;
auto Enqueue = [&](Value *V) {
auto *I = dyn_cast<Instruction>(V);
diff --git a/llvm/lib/Transforms/Scalar/StructurizeCFG.cpp b/llvm/lib/Transforms/Scalar/StructurizeCFG.cpp
index 44e63a0583d1a..b17dcb7869420 100644
--- a/llvm/lib/Transforms/Scalar/StructurizeCFG.cpp
+++ b/llvm/lib/Transforms/Scalar/StructurizeCFG.cpp
@@ -328,7 +328,7 @@ class StructurizeCFG {
void addPhiValues(BasicBlock *From, BasicBlock *To);
void findUndefBlocks(BasicBlock *PHIBlock,
- const SmallSet<BasicBlock *, 8> &Incomings,
+ const SmallPtrSet<BasicBlock *, 8> &Incomings,
SmallVector<BasicBlock *> &UndefBlks) const;
void mergeIfCompatible(EquivalenceClasses<PHINode *> &PhiClasses, PHINode *A,
@@ -762,7 +762,7 @@ void StructurizeCFG::addPhiValues(BasicBlock *From, BasicBlock *To) {
/// from some blocks as undefined. The function will find out all such blocks
/// and return in \p UndefBlks.
void StructurizeCFG::findUndefBlocks(
- BasicBlock *PHIBlock, const SmallSet<BasicBlock *, 8> &Incomings,
+ BasicBlock *PHIBlock, const SmallPtrSet<BasicBlock *, 8> &Incomings,
SmallVector<BasicBlock *> &UndefBlks) const {
// We may get a post-structured CFG like below:
//
@@ -788,7 +788,7 @@ void StructurizeCFG::findUndefBlocks(
// path N->F2->F3->B. For example, the threads take the branch F1->N may
// always take the branch F2->P2. So, when we are reconstructing a PHI
// originally in B, we can safely say the incoming value from N is undefined.
- SmallSet<BasicBlock *, 8> VisitedBlock;
+ SmallPtrSet<BasicBlock *, 8> VisitedBlock;
SmallVector<BasicBlock *, 8> Stack;
if (PHIBlock == ParentRegion->getExit()) {
for (auto P : predecessors(PHIBlock)) {
@@ -884,7 +884,7 @@ void StructurizeCFG::setPhiValues() {
PhiMap &BlkPhis = OldPhiIt->second;
SmallVector<BasicBlock *> &UndefBlks = UndefBlksMap[To];
- SmallSet<BasicBlock *, 8> Incomings;
+ SmallPtrSet<BasicBlock *, 8> Incomings;
// Get the undefined blocks shared by all the phi nodes.
if (!BlkPhis.empty()) {
diff --git a/llvm/lib/Transforms/Utils/CanonicalizeFreezeInLoops.cpp b/llvm/lib/Transforms/Utils/CanonicalizeFreezeInLoops.cpp
index 40010aee9c111..8044f611e89f0 100644
--- a/llvm/lib/Transforms/Utils/CanonicalizeFreezeInLoops.cpp
+++ b/llvm/lib/Transforms/Utils/CanonicalizeFreezeInLoops.cpp
@@ -193,7 +193,7 @@ bool CanonicalizeFreezeInLoopsImpl::run() {
if (Candidates.empty())
return false;
- SmallSet<PHINode *, 8> ProcessedPHIs;
+ SmallPtrSet<PHINode *, 8> ProcessedPHIs;
for (const auto &Info : Candidates) {
PHINode *PHI = Info.PHI;
if (!ProcessedPHIs.insert(Info.PHI).second)
diff --git a/llvm/lib/Transforms/Utils/ControlFlowUtils.cpp b/llvm/lib/Transforms/Utils/ControlFlowUtils.cpp
index 4b0065d0030cd..8954de618bc2d 100644
--- a/llvm/lib/Transforms/Utils/ControlFlowUtils.cpp
+++ b/llvm/lib/Transforms/Utils/ControlFlowUtils.cpp
@@ -276,7 +276,7 @@ std::pair<BasicBlock *, bool> ControlFlowHub::finalize(
DomTreeUpdater *DTU, SmallVectorImpl<BasicBlock *> &GuardBlocks,
const StringRef Prefix, std::optional<unsigned> MaxControlFlowBooleans) {
#ifndef NDEBUG
- SmallSet<BasicBlock *, 8> Incoming;
+ SmallPtrSet<BasicBlock *, 8> Incoming;
#endif
SetVector<BasicBlock *> Outgoing;
diff --git a/llvm/lib/Transforms/Utils/Local.cpp b/llvm/lib/Transforms/Utils/Local.cpp
index b559212de71d7..ac344904f90f0 100644
--- a/llvm/lib/Transforms/Utils/Local.cpp
+++ b/llvm/lib/Transforms/Utils/Local.cpp
@@ -275,7 +275,7 @@ bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions,
Builder.CreateBr(TheOnlyDest);
BasicBlock *BB = SI->getParent();
- SmallSet<BasicBlock *, 8> RemovedSuccessors;
+ SmallPtrSet<BasicBlock *, 8> RemovedSuccessors;
// Remove entries from PHI nodes which we no longer branch to...
BasicBlock *SuccToKeep = TheOnlyDest;
@@ -343,7 +343,7 @@ bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions,
if (auto *BA =
dyn_cast<BlockAddress>(IBI->getAddress()->stripPointerCasts())) {
BasicBlock *TheOnlyDest = BA->getBasicBlock();
- SmallSet<BasicBlock *, 8> RemovedSuccessors;
+ SmallPtrSet<BasicBlock *, 8> RemovedSuccessors;
// Insert the new branch.
Builder.CreateBr(TheOnlyDest);
@@ -2518,7 +2518,7 @@ unsigned llvm::changeToUnreachable(Instruction *I, bool PreserveLCSSA,
if (MSSAU)
MSSAU->changeToUnreachable(I);
- SmallSet<BasicBlock *, 8> UniqueSuccessors;
+ SmallPtrSet<BasicBlock *, 8> UniqueSuccessors;
// Loop over all of the successors, removing BB's entry from any PHI
// nodes.
diff --git a/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp b/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
index d96f1d6c23d47..10c162bc6463a 100644
--- a/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
+++ b/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
@@ -136,7 +136,7 @@ class AssignmentTrackingInfo {
/// \p ToDelete that stores to this alloca.
void updateForDeletedStore(
StoreInst *ToDelete, DIBuilder &DIB,
- SmallSet<DbgVariableRecord *, 8> *DVRAssignsToDelete) const {
+ SmallPtrSet<DbgVariableRecord *, 8> *DVRAssignsToDelete) const {
// There's nothing to do if the alloca doesn't have any variables using
// assignment tracking.
if (DVRAssigns.empty())
@@ -382,7 +382,7 @@ struct PromoteMem2Reg {
SmallVector<AssignmentTrackingInfo, 8> AllocaATInfo;
/// A set of dbg.assigns to delete because they've been demoted to
/// dbg.values. Call cleanUpDbgAssigns to delete them.
- SmallSet<DbgVariableRecord *, 8> DVRAssignsToDelete;
+ SmallPtrSet<DbgVariableRecord *, 8> DVRAssignsToDelete;
/// The set of basic blocks the renamer has already visited.
BitVector Visited;
@@ -533,11 +533,10 @@ static void removeIntrinsicUsers(AllocaInst *AI) {
/// false there were some loads which were not dominated by the single store
/// and thus must be phi-ed with undef. We fall back to the standard alloca
/// promotion algorithm in that case.
-static bool
-rewriteSingleStoreAlloca(AllocaInst *AI, AllocaInfo &Info, LargeBlockInfo &LBI,
- const DataLayout &DL, DominatorTree &DT,
- AssumptionCache *AC,
- SmallSet<DbgVariableRecord *, 8> *DVRAssignsToDelete) {
+static bool rewriteSingleStoreAlloca(
+ AllocaInst *AI, AllocaInfo &Info, LargeBlockInfo &LBI, const DataLayout &DL,
+ DominatorTree &DT, AssumptionCache *AC,
+ SmallPtrSet<DbgVariableRecord *, 8> *DVRAssignsToDelete) {
StoreInst *OnlyStore = Info.OnlyStore;
Value *ReplVal = OnlyStore->getOperand(0);
// Loads may either load the stored value or uninitialized memory (undef).
@@ -647,11 +646,10 @@ rewriteSingleStoreAlloca(AllocaInst *AI, AllocaInfo &Info, LargeBlockInfo &LBI,
/// use(t);
/// *A = 42;
/// }
-static bool
-promoteSingleBlockAlloca(AllocaInst *AI, const AllocaInfo &Info,
- LargeBlockInfo &LBI, const DataLayout &DL,
- DominatorTree &DT, AssumptionCache *AC,
- SmallSet<DbgVariableRecord *, 8> *DVRAssignsToDelete) {
+static bool promoteSingleBlockAlloca(
+ AllocaInst *AI, const AllocaInfo &Info, LargeBlockInfo &LBI,
+ const DataLayout &DL, DominatorTree &DT, AssumptionCache *AC,
+ SmallPtrSet<DbgVariableRecord *, 8> *DVRAssignsToDelete) {
// The trickiest case to handle is when we have large blocks. Because of this,
// this code is optimized assuming that large blocks happen. This does not
// significantly pessimize the small block case. This uses LargeBlockInfo to
diff --git a/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp b/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp
index 1eb8996fca031..e218db30d92b4 100644
--- a/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp
+++ b/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp
@@ -1346,7 +1346,7 @@ Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
CanonicalIV->insertBefore(Header->begin());
rememberInstruction(CanonicalIV);
- SmallSet<BasicBlock *, 4> PredSeen;
+ SmallPtrSet<BasicBlock *, 4> PredSeen;
Constant *One = ConstantInt::get(Ty, 1);
for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) {
BasicBlock *HP = *HPI;
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index 675a230bd2c94..e009b81afd0ed 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -8111,7 +8111,7 @@ void VPRecipeBuilder::collectScaledReductions(VFRange &Range) {
// extends are intended to be lowered along with the reduction itself.
// Build up a set of partial reduction ops for efficient use checking.
- SmallSet<User *, 4> PartialReductionOps;
+ SmallPtrSet<User *, 4> PartialReductionOps;
for (const auto &[PartialRdx, _] : PartialReductionChains)
PartialReductionOps.insert(PartialRdx.ExtendUser);
diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index b88de09a3e447..37dc41413966d 100644
--- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -24477,7 +24477,7 @@ class HorizontalReduction {
// correct, replace internal uses with undef, and mark for eventual
// deletion.
#ifndef NDEBUG
- SmallSet<Value *, 4> IgnoreSet;
+ SmallPtrSet<Value *, 4> IgnoreSet;
for (ArrayRef<Value *> RdxOps : ReductionOps)
IgnoreSet.insert_range(RdxOps);
#endif
More information about the llvm-commits
mailing list