[llvm] [llvm] Replace SmallSet with SmallPtrSet (NFC) (PR #154068)

via llvm-commits llvm-commits at lists.llvm.org
Sun Aug 17 23:44:59 PDT 2025


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-mips

@llvm/pr-subscribers-backend-hexagon

Author: Kazu Hirata (kazutakahirata)

<details>
<summary>Changes</summary>

This patch replaces SmallSet<T *, N> with SmallPtrSet<T *, N>.  Note
that SmallSet.h "redirects" SmallSet to SmallPtrSet for pointer
element types:

  template <typename PointeeType, unsigned N>
  class SmallSet<PointeeType*, N> : public SmallPtrSet<PointeeType*, N> {};

We only have 140 instances that rely on this "redirection", with the
vast majority of them under llvm/. Since relying on the redirection
doesn't improve readability, this patch replaces SmallSet with
SmallPtrSet for pointer element types.


---

Patch is 56.15 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/154068.diff


55 Files Affected:

- (modified) llvm/include/llvm/Analysis/GenericDomTreeUpdaterImpl.h (+1-1) 
- (modified) llvm/include/llvm/CodeGen/GlobalISel/LoadStoreOpt.h (+1-1) 
- (modified) llvm/include/llvm/CodeGen/MachinePipeliner.h (+1-1) 
- (modified) llvm/include/llvm/CodeGen/ScheduleDAG.h (+1-1) 
- (modified) llvm/lib/Analysis/CallPrinter.cpp (+2-2) 
- (modified) llvm/lib/Analysis/CaptureTracking.cpp (+1-1) 
- (modified) llvm/lib/Analysis/ScalarEvolution.cpp (+1-1) 
- (modified) llvm/lib/Analysis/ValueTracking.cpp (+3-3) 
- (modified) llvm/lib/CodeGen/CodeGenPrepare.cpp (+7-8) 
- (modified) llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp (+1-1) 
- (modified) llvm/lib/CodeGen/MachineCopyPropagation.cpp (+1-1) 
- (modified) llvm/lib/CodeGen/MachineDebugify.cpp (+1-1) 
- (modified) llvm/lib/CodeGen/MachinePipeliner.cpp (+3-3) 
- (modified) llvm/lib/CodeGen/MacroFusion.cpp (+1-1) 
- (modified) llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp (+1-1) 
- (modified) llvm/lib/CodeGen/SwiftErrorValueTracking.cpp (+1-1) 
- (modified) llvm/lib/ExecutionEngine/Orc/Debugging/DebuggerSupportPlugin.cpp (+1-1) 
- (modified) llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp (+1-1) 
- (modified) llvm/lib/IR/AutoUpgrade.cpp (+1-1) 
- (modified) llvm/lib/IR/Verifier.cpp (+2-2) 
- (modified) llvm/lib/Target/AMDGPU/AMDGPUMemoryUtils.cpp (+1-1) 
- (modified) llvm/lib/Target/AMDGPU/AMDGPUPerfHintAnalysis.cpp (+2-2) 
- (modified) llvm/lib/Target/AMDGPU/AMDGPUSetWavePriority.cpp (+1-1) 
- (modified) llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp (+2-2) 
- (modified) llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp (+2-2) 
- (modified) llvm/lib/Target/ARM/ARMConstantIslandPass.cpp (+1-1) 
- (modified) llvm/lib/Target/ARM/MVETPAndVPTOptimisationsPass.cpp (+1-1) 
- (modified) llvm/lib/Target/CSKY/CSKYConstantIslandPass.cpp (+1-1) 
- (modified) llvm/lib/Target/Hexagon/HexagonGenInsert.cpp (+1-1) 
- (modified) llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp (+1-1) 
- (modified) llvm/lib/Target/Hexagon/HexagonSubtarget.cpp (+5-5) 
- (modified) llvm/lib/Target/Hexagon/HexagonSubtarget.h (+2-1) 
- (modified) llvm/lib/Target/Mips/MipsConstantIslandPass.cpp (+1-1) 
- (modified) llvm/lib/Target/PowerPC/PPCCTRLoopsVerify.cpp (+1-1) 
- (modified) llvm/lib/Target/PowerPC/PPCISelLowering.cpp (+2-2) 
- (modified) llvm/lib/Target/PowerPC/PPCLoopInstrFormPrep.cpp (+8-9) 
- (modified) llvm/lib/Target/RISCV/RISCVISelLowering.cpp (+1-1) 
- (modified) llvm/lib/Target/X86/X86LoadValueInjectionLoadHardening.cpp (+1-1) 
- (modified) llvm/lib/Target/X86/X86PreTileConfig.cpp (+1-1) 
- (modified) llvm/lib/Transforms/IPO/FunctionAttrs.cpp (+16-16) 
- (modified) llvm/lib/Transforms/Scalar/DFAJumpThreading.cpp (+6-6) 
- (modified) llvm/lib/Transforms/Scalar/GVN.cpp (+2-2) 
- (modified) llvm/lib/Transforms/Scalar/GuardWidening.cpp (+2-2) 
- (modified) llvm/lib/Transforms/Scalar/IndVarSimplify.cpp (+1-1) 
- (modified) llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp (+1-1) 
- (modified) llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp (+2-2) 
- (modified) llvm/lib/Transforms/Scalar/Reassociate.cpp (+1-1) 
- (modified) llvm/lib/Transforms/Scalar/StructurizeCFG.cpp (+4-4) 
- (modified) llvm/lib/Transforms/Utils/CanonicalizeFreezeInLoops.cpp (+1-1) 
- (modified) llvm/lib/Transforms/Utils/ControlFlowUtils.cpp (+1-1) 
- (modified) llvm/lib/Transforms/Utils/Local.cpp (+3-3) 
- (modified) llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp (+10-12) 
- (modified) llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp (+1-1) 
- (modified) llvm/lib/Transforms/Vectorize/LoopVectorize.cpp (+1-1) 
- (modified) llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp (+1-1) 


``````````diff
diff --git a/llvm/include/llvm/Analysis/GenericDomTreeUpdaterImpl.h b/llvm/include/llvm/Analysis/GenericDomTreeUpdaterImpl.h
index 896b68c5021b3..6bfad783b529b 100644
--- a/llvm/include/llvm/Analysis/GenericDomTreeUpdaterImpl.h
+++ b/llvm/include/llvm/Analysis/GenericDomTreeUpdaterImpl.h
@@ -383,7 +383,7 @@ void GenericDomTreeUpdater<DerivedT, DomTreeT, PostDomTreeT>::
   // field of all the elements of Edges.
   // I.e., forall elt in Edges, it exists BB in NewBBs
   // such as BB == elt.NewBB.
-  SmallSet<BasicBlockT *, 32> NewBBs;
+  SmallPtrSet<BasicBlockT *, 32> NewBBs;
   for (auto &Edge : Edges)
     NewBBs.insert(Edge.NewBB);
   // For each element in Edges, remember whether or not element
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/LoadStoreOpt.h b/llvm/include/llvm/CodeGen/GlobalISel/LoadStoreOpt.h
index cee779a5fd5d1..4b7506e013762 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/LoadStoreOpt.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/LoadStoreOpt.h
@@ -162,7 +162,7 @@ class LLVM_ABI LoadStoreOpt : public MachineFunctionPass {
   DenseMap<unsigned, BitVector> LegalStoreSizes;
   bool IsPreLegalizer = false;
   /// Contains instructions to be erased at the end of a block scan.
-  SmallSet<MachineInstr *, 16> InstsToErase;
+  SmallPtrSet<MachineInstr *, 16> InstsToErase;
 
 public:
   LoadStoreOpt();
diff --git a/llvm/include/llvm/CodeGen/MachinePipeliner.h b/llvm/include/llvm/CodeGen/MachinePipeliner.h
index e50443d25cc60..c90ff4f3daa47 100644
--- a/llvm/include/llvm/CodeGen/MachinePipeliner.h
+++ b/llvm/include/llvm/CodeGen/MachinePipeliner.h
@@ -830,7 +830,7 @@ class SMSchedule {
     return ScheduledInstrs[cycle];
   }
 
-  SmallSet<SUnit *, 8>
+  SmallPtrSet<SUnit *, 8>
   computeUnpipelineableNodes(SwingSchedulerDAG *SSD,
                              TargetInstrInfo::PipelinerLoopInfo *PLI);
 
diff --git a/llvm/include/llvm/CodeGen/ScheduleDAG.h b/llvm/include/llvm/CodeGen/ScheduleDAG.h
index 122b7be96b46a..aee1514581485 100644
--- a/llvm/include/llvm/CodeGen/ScheduleDAG.h
+++ b/llvm/include/llvm/CodeGen/ScheduleDAG.h
@@ -237,7 +237,7 @@ class TargetRegisterInfo;
   };
 
   /// Keep record of which SUnit are in the same cluster group.
-  typedef SmallSet<SUnit *, 8> ClusterInfo;
+  typedef SmallPtrSet<SUnit *, 8> ClusterInfo;
   constexpr unsigned InvalidClusterId = ~0u;
 
   /// Return whether the input cluster ID's are the same and valid.
diff --git a/llvm/lib/Analysis/CallPrinter.cpp b/llvm/lib/Analysis/CallPrinter.cpp
index 672dae1642cb3..99d8b11f0c4ba 100644
--- a/llvm/lib/Analysis/CallPrinter.cpp
+++ b/llvm/lib/Analysis/CallPrinter.cpp
@@ -70,7 +70,7 @@ class CallGraphDOTInfo {
 
     for (Function &F : M->getFunctionList()) {
       uint64_t localSumFreq = 0;
-      SmallSet<Function *, 16> Callers;
+      SmallPtrSet<Function *, 16> Callers;
       for (User *U : F.users())
         if (isa<CallInst>(U))
           Callers.insert(cast<Instruction>(U)->getFunction());
@@ -99,7 +99,7 @@ class CallGraphDOTInfo {
 
       bool FoundParallelEdge = true;
       while (FoundParallelEdge) {
-        SmallSet<Function *, 16> Visited;
+        SmallPtrSet<Function *, 16> Visited;
         FoundParallelEdge = false;
         for (auto CI = Node->begin(), CE = Node->end(); CI != CE; CI++) {
           if (!(Visited.insert(CI->second->getFunction())).second) {
diff --git a/llvm/lib/Analysis/CaptureTracking.cpp b/llvm/lib/Analysis/CaptureTracking.cpp
index bd0d417b1ed33..b6acda3a9f259 100644
--- a/llvm/lib/Analysis/CaptureTracking.cpp
+++ b/llvm/lib/Analysis/CaptureTracking.cpp
@@ -405,7 +405,7 @@ void llvm::PointerMayBeCaptured(const Value *V, CaptureTracker *Tracker,
 
   SmallVector<const Use *, 20> Worklist;
   Worklist.reserve(getDefaultMaxUsesToExploreForCaptureTracking());
-  SmallSet<const Use *, 20> Visited;
+  SmallPtrSet<const Use *, 20> Visited;
 
   auto AddUses = [&](const Value *V) {
     for (const Use &U : V->uses()) {
diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp
index ce4d4ad7a0ab0..d2c445f1ffaa0 100644
--- a/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -7284,7 +7284,7 @@ ScalarEvolution::getDefiningScopeBound(ArrayRef<const SCEV *> Ops,
                                        bool &Precise) {
   Precise = true;
   // Do a bounded search of the def relation of the requested SCEVs.
-  SmallSet<const SCEV *, 16> Visited;
+  SmallPtrSet<const SCEV *, 16> Visited;
   SmallVector<const SCEV *> Worklist;
   auto pushOp = [&](const SCEV *S) {
     if (!Visited.insert(S).second)
diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index b0e4b009f3501..50e43a53def6c 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -7785,7 +7785,7 @@ bool llvm::mustExecuteUBIfPoisonOnPathTo(Instruction *Root,
 
   // The set of all recursive users we've visited (which are assumed to all be
   // poison because of said visit)
-  SmallSet<const Value *, 16> KnownPoison;
+  SmallPtrSet<const Value *, 16> KnownPoison;
   SmallVector<const Instruction*, 16> Worklist;
   Worklist.push_back(Root);
   while (!Worklist.empty()) {
@@ -8140,8 +8140,8 @@ static bool programUndefinedIfUndefOrPoison(const Value *V,
 
   // Set of instructions that we have proved will yield poison if Inst
   // does.
-  SmallSet<const Value *, 16> YieldsPoison;
-  SmallSet<const BasicBlock *, 4> Visited;
+  SmallPtrSet<const Value *, 16> YieldsPoison;
+  SmallPtrSet<const BasicBlock *, 4> Visited;
 
   YieldsPoison.insert(V);
   Visited.insert(BB);
diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp
index 9223739fc0098..0e40a92fd8d64 100644
--- a/llvm/lib/CodeGen/CodeGenPrepare.cpp
+++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp
@@ -377,7 +377,7 @@ class CodeGenPrepare {
   /// to be optimized again.
   /// Note: Consider building time in this pass, when a BB updated, we need
   /// to insert such BB into FreshBBs for huge function.
-  SmallSet<BasicBlock *, 32> FreshBBs;
+  SmallPtrSet<BasicBlock *, 32> FreshBBs;
 
   void releaseMemory() {
     // Clear per function information.
@@ -1105,7 +1105,7 @@ bool CodeGenPrepare::canMergeBlocks(const BasicBlock *BB,
 
 /// Replace all old uses with new ones, and push the updated BBs into FreshBBs.
 static void replaceAllUsesWith(Value *Old, Value *New,
-                               SmallSet<BasicBlock *, 32> &FreshBBs,
+                               SmallPtrSet<BasicBlock *, 32> &FreshBBs,
                                bool IsHuge) {
   auto *OldI = dyn_cast<Instruction>(Old);
   if (OldI) {
@@ -2135,7 +2135,7 @@ static bool isRemOfLoopIncrementWithLoopInvariant(
 //    Rem = rem == RemAmtLoopInvariant ? 0 : Rem;
 static bool foldURemOfLoopIncrement(Instruction *Rem, const DataLayout *DL,
                                     const LoopInfo *LI,
-                                    SmallSet<BasicBlock *, 32> &FreshBBs,
+                                    SmallPtrSet<BasicBlock *, 32> &FreshBBs,
                                     bool IsHuge) {
   Value *AddOffset, *RemAmt, *AddInst;
   PHINode *LoopIncrPN;
@@ -2534,11 +2534,10 @@ static bool OptimizeExtractBits(BinaryOperator *ShiftI, ConstantInt *CI,
 ///     %ctz = phi i64 [ 64, %entry ], [ %z, %cond.false ]
 ///
 /// If the transform is performed, return true and set ModifiedDT to true.
-static bool despeculateCountZeros(IntrinsicInst *CountZeros,
-                                  LoopInfo &LI,
+static bool despeculateCountZeros(IntrinsicInst *CountZeros, LoopInfo &LI,
                                   const TargetLowering *TLI,
                                   const DataLayout *DL, ModifyDT &ModifiedDT,
-                                  SmallSet<BasicBlock *, 32> &FreshBBs,
+                                  SmallPtrSet<BasicBlock *, 32> &FreshBBs,
                                   bool IsHugeFunc) {
   // If a zero input is undefined, it doesn't make sense to despeculate that.
   if (match(CountZeros->getOperand(1), m_One()))
@@ -4351,7 +4350,7 @@ class AddressingModeCombiner {
                     PhiNodeSet &PhiNodesToMatch) {
     SmallVector<PHIPair, 8> WorkList;
     Matcher.insert({PHI, Candidate});
-    SmallSet<PHINode *, 8> MatchedPHIs;
+    SmallPtrSet<PHINode *, 8> MatchedPHIs;
     MatchedPHIs.insert(PHI);
     WorkList.push_back({PHI, Candidate});
     SmallSet<PHIPair, 8> Visited;
@@ -8635,7 +8634,7 @@ static bool tryUnmergingGEPsAcrossIndirectBr(GetElementPtrInst *GEPI,
 }
 
 static bool optimizeBranch(BranchInst *Branch, const TargetLowering &TLI,
-                           SmallSet<BasicBlock *, 32> &FreshBBs,
+                           SmallPtrSet<BasicBlock *, 32> &FreshBBs,
                            bool IsHugeFunc) {
   // Try and convert
   //  %c = icmp ult %x, 8
diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index 64c19fab1a023..7ca02ad756f51 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -3517,7 +3517,7 @@ void IRTranslator::finishPendingPhis() {
     Verifier.setCurrentInst(PI);
 #endif // ifndef NDEBUG
 
-    SmallSet<const MachineBasicBlock *, 16> SeenPreds;
+    SmallPtrSet<const MachineBasicBlock *, 16> SeenPreds;
     for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) {
       auto IRPred = PI->getIncomingBlock(i);
       ArrayRef<Register> ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i));
diff --git a/llvm/lib/CodeGen/MachineCopyPropagation.cpp b/llvm/lib/CodeGen/MachineCopyPropagation.cpp
index 742de1101faa2..e35983138550f 100644
--- a/llvm/lib/CodeGen/MachineCopyPropagation.cpp
+++ b/llvm/lib/CodeGen/MachineCopyPropagation.cpp
@@ -490,7 +490,7 @@ class MachineCopyPropagation {
   SmallSetVector<MachineInstr *, 8> MaybeDeadCopies;
 
   /// Multimap tracking debug users in current BB
-  DenseMap<MachineInstr *, SmallSet<MachineInstr *, 2>> CopyDbgUsers;
+  DenseMap<MachineInstr *, SmallPtrSet<MachineInstr *, 2>> CopyDbgUsers;
 
   CopyTracker Tracker;
 
diff --git a/llvm/lib/CodeGen/MachineDebugify.cpp b/llvm/lib/CodeGen/MachineDebugify.cpp
index 1a20fe586e951..307f49468eb39 100644
--- a/llvm/lib/CodeGen/MachineDebugify.cpp
+++ b/llvm/lib/CodeGen/MachineDebugify.cpp
@@ -87,7 +87,7 @@ bool applyDebugifyMetadataToMachineFunction(MachineModuleInfo &MMI,
   // Do this by introducing debug uses of each register definition. If that is
   // not possible (e.g. we have a phi or a meta instruction), emit a constant.
   uint64_t NextImm = 0;
-  SmallSet<DILocalVariable *, 16> VarSet;
+  SmallPtrSet<DILocalVariable *, 16> VarSet;
   const MCInstrDesc &DbgValDesc = TII.get(TargetOpcode::DBG_VALUE);
   for (MachineBasicBlock &MBB : MF) {
     MachineBasicBlock::iterator FirstNonPHIIt = MBB.getFirstNonPHI();
diff --git a/llvm/lib/CodeGen/MachinePipeliner.cpp b/llvm/lib/CodeGen/MachinePipeliner.cpp
index 90005bd181f3a..3a9651c5cee04 100644
--- a/llvm/lib/CodeGen/MachinePipeliner.cpp
+++ b/llvm/lib/CodeGen/MachinePipeliner.cpp
@@ -3466,9 +3466,9 @@ bool SMSchedule::onlyHasLoopCarriedOutputOrOrderPreds(
 }
 
 /// Determine transitive dependences of unpipelineable instructions
-SmallSet<SUnit *, 8> SMSchedule::computeUnpipelineableNodes(
+SmallPtrSet<SUnit *, 8> SMSchedule::computeUnpipelineableNodes(
     SwingSchedulerDAG *SSD, TargetInstrInfo::PipelinerLoopInfo *PLI) {
-  SmallSet<SUnit *, 8> DoNotPipeline;
+  SmallPtrSet<SUnit *, 8> DoNotPipeline;
   SmallVector<SUnit *, 8> Worklist;
 
   for (auto &SU : SSD->SUnits)
@@ -3498,7 +3498,7 @@ SmallSet<SUnit *, 8> SMSchedule::computeUnpipelineableNodes(
 // and ensure that they are in stage 0.  If unable to do so, return false.
 bool SMSchedule::normalizeNonPipelinedInstructions(
     SwingSchedulerDAG *SSD, TargetInstrInfo::PipelinerLoopInfo *PLI) {
-  SmallSet<SUnit *, 8> DNP = computeUnpipelineableNodes(SSD, PLI);
+  SmallPtrSet<SUnit *, 8> DNP = computeUnpipelineableNodes(SSD, PLI);
 
   int NewLastCycle = INT_MIN;
   for (SUnit &SU : SSD->SUnits) {
diff --git a/llvm/lib/CodeGen/MacroFusion.cpp b/llvm/lib/CodeGen/MacroFusion.cpp
index 975a3fe71abad..1db53017e6cef 100644
--- a/llvm/lib/CodeGen/MacroFusion.cpp
+++ b/llvm/lib/CodeGen/MacroFusion.cpp
@@ -79,7 +79,7 @@ bool llvm::fuseInstructionPair(ScheduleDAGInstrs &DAG, SUnit &FirstSU,
   FirstSU.ParentClusterIdx = Clusters.size();
   SecondSU.ParentClusterIdx = Clusters.size();
 
-  SmallSet<SUnit *, 8> Cluster{{&FirstSU, &SecondSU}};
+  SmallPtrSet<SUnit *, 8> Cluster{{&FirstSU, &SecondSU}};
   Clusters.push_back(Cluster);
 
   // TODO - If we want to chain more than two instructions, we need to create
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 2a1ef2b980ac4..2565339c5de16 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -3576,7 +3576,7 @@ void SelectionDAGBuilder::visitIndirectBr(const IndirectBrInst &I) {
   MachineBasicBlock *IndirectBrMBB = FuncInfo.MBB;
 
   // Update machine-CFG edges with unique successors.
-  SmallSet<BasicBlock*, 32> Done;
+  SmallPtrSet<BasicBlock *, 32> Done;
   for (unsigned i = 0, e = I.getNumSuccessors(); i != e; ++i) {
     BasicBlock *BB = I.getSuccessor(i);
     bool Inserted = Done.insert(BB).second;
diff --git a/llvm/lib/CodeGen/SwiftErrorValueTracking.cpp b/llvm/lib/CodeGen/SwiftErrorValueTracking.cpp
index decffdc7dfe45..ff4b568b5ee20 100644
--- a/llvm/lib/CodeGen/SwiftErrorValueTracking.cpp
+++ b/llvm/lib/CodeGen/SwiftErrorValueTracking.cpp
@@ -179,7 +179,7 @@ void SwiftErrorValueTracking::propagateVRegs() {
       // Check whether we have a single vreg def from all predecessors.
       // Otherwise we need a phi.
       SmallVector<std::pair<MachineBasicBlock *, Register>, 4> VRegs;
-      SmallSet<const MachineBasicBlock *, 8> Visited;
+      SmallPtrSet<const MachineBasicBlock *, 8> Visited;
       for (auto *Pred : MBB->predecessors()) {
         if (!Visited.insert(Pred).second)
           continue;
diff --git a/llvm/lib/ExecutionEngine/Orc/Debugging/DebuggerSupportPlugin.cpp b/llvm/lib/ExecutionEngine/Orc/Debugging/DebuggerSupportPlugin.cpp
index 1bafed79d6968..ba27aa87b7c7a 100644
--- a/llvm/lib/ExecutionEngine/Orc/Debugging/DebuggerSupportPlugin.cpp
+++ b/llvm/lib/ExecutionEngine/Orc/Debugging/DebuggerSupportPlugin.cpp
@@ -64,7 +64,7 @@ class MachODebugObjectSynthesizerBase
       LLVM_DEBUG({
         dbgs() << "  Preserving debug section " << Sec.getName() << "\n";
       });
-      SmallSet<Block *, 8> PreservedBlocks;
+      SmallPtrSet<Block *, 8> PreservedBlocks;
       for (auto *Sym : Sec.symbols()) {
         bool NewPreservedBlock =
             PreservedBlocks.insert(&Sym->getBlock()).second;
diff --git a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
index c16b0dde1a3da..e9147a42452d0 100644
--- a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
+++ b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
@@ -5930,7 +5930,7 @@ void OpenMPIRBuilder::applySimd(CanonicalLoopInfo *CanonicalLoop,
     createIfVersion(CanonicalLoop, IfCond, VMap, LIA, LI, L, "simd");
   }
 
-  SmallSet<BasicBlock *, 8> Reachable;
+  SmallPtrSet<BasicBlock *, 8> Reachable;
 
   // Get the basic blocks from the loop in which memref instructions
   // can be found.
diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp
index b91fd70bd9467..e200f3626e69d 100644
--- a/llvm/lib/IR/AutoUpgrade.cpp
+++ b/llvm/lib/IR/AutoUpgrade.cpp
@@ -5391,7 +5391,7 @@ void llvm::UpgradeNVVMAnnotations(Module &M) {
     return;
 
   SmallVector<MDNode *, 8> NewNodes;
-  SmallSet<const MDNode *, 8> SeenNodes;
+  SmallPtrSet<const MDNode *, 8> SeenNodes;
   for (MDNode *MD : NamedMD->operands()) {
     if (!SeenNodes.insert(MD).second)
       continue;
diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp
index 5a93228faa3ac..9d9b51db98702 100644
--- a/llvm/lib/IR/Verifier.cpp
+++ b/llvm/lib/IR/Verifier.cpp
@@ -4636,7 +4636,7 @@ void Verifier::visitEHPadPredecessors(Instruction &I) {
     }
 
     // The edge may exit from zero or more nested pads.
-    SmallSet<Value *, 8> Seen;
+    SmallPtrSet<Value *, 8> Seen;
     for (;; FromPad = getParentPad(FromPad)) {
       Check(FromPad != ToPad,
             "EH pad cannot handle exceptions raised within it", FromPad, TI);
@@ -4764,7 +4764,7 @@ void Verifier::visitFuncletPadInst(FuncletPadInst &FPI) {
   User *FirstUser = nullptr;
   Value *FirstUnwindPad = nullptr;
   SmallVector<FuncletPadInst *, 8> Worklist({&FPI});
-  SmallSet<FuncletPadInst *, 8> Seen;
+  SmallPtrSet<FuncletPadInst *, 8> Seen;
 
   while (!Worklist.empty()) {
     FuncletPadInst *CurrentPad = Worklist.pop_back_val();
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUMemoryUtils.cpp b/llvm/lib/Target/AMDGPU/AMDGPUMemoryUtils.cpp
index e65dd1b04cc48..dfe7c53aaca06 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUMemoryUtils.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUMemoryUtils.cpp
@@ -384,7 +384,7 @@ bool isClobberedInFunction(const LoadInst *Load, MemorySSA *MSSA,
                            AAResults *AA) {
   MemorySSAWalker *Walker = MSSA->getWalker();
   SmallVector<MemoryAccess *> WorkList{Walker->getClobberingMemoryAccess(Load)};
-  SmallSet<MemoryAccess *, 8> Visited;
+  SmallPtrSet<MemoryAccess *, 8> Visited;
   MemoryLocation Loc(MemoryLocation::get(Load));
 
   LLVM_DEBUG(dbgs() << "Checking clobbering of: " << *Load << '\n');
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPerfHintAnalysis.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPerfHintAnalysis.cpp
index 3a3751892c8b6..28d5400fd1807 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUPerfHintAnalysis.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUPerfHintAnalysis.cpp
@@ -134,8 +134,8 @@ static std::pair<const Value *, const Type *> getMemoryInstrPtrAndType(
 
 bool AMDGPUPerfHint::isIndirectAccess(const Instruction *Inst) const {
   LLVM_DEBUG(dbgs() << "[isIndirectAccess] " << *Inst << '\n');
-  SmallSet<const Value *, 32> WorkSet;
-  SmallSet<const Value *, 32> Visited;
+  SmallPtrSet<const Value *, 32> WorkSet;
+  SmallPtrSet<const Value *, 32> Visited;
   if (const Value *MO = getMemoryInstrPtrAndType(Inst).first) {
     if (isGlobalAddr(MO))
       WorkSet.insert(MO);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSetWavePriority.cpp b/llvm/lib/Target/AMDGPU/AMDGPUSetWavePriority.cpp
index b60ded33a4ac3..56aa3f6db83ad 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUSetWavePriority.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSetWavePriority.cpp
@@ -195,7 +195,7 @@ bool AMDGPUSetWavePriority::run(MachineFunction &MF) {
 
   // Lower the priority on edges where control leaves blocks from which
   // the VMEM loads are reachable.
-  SmallSet<MachineBasicBlock *, 16> PriorityLoweringBlocks;
+  SmallPtrSet<MachineBasicBlock *, 16> PriorityLoweringBlocks;
   for (MachineBasicBlock &MBB : MF) {
     if (MBBInfos[&MBB].MayReachVMEMLoad) {
       if (MBB.succ_empty())
diff --git a/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp b/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
index f018f77bc83e1..dce4e6f993005 100644
--- a/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
@@ -460,7 +460,7 @@ static bool hoistAndMergeSGPRInits(unsigned Reg,
   // List of clobbering instructions.
   SmallVector<MachineInstr*, 8> Clobbers;
   // List of instructions marked for deletion.
-  SmallSet<MachineInstr*, 8> MergedInstrs;
+  SmallPtrSet<MachineInstr *, 8> MergedInstrs;
 
   bool Changed = false;
 
@@ -808,7 +808,7 @@ bool SIFixSGPRCopies::run(MachineFunction &MF) {
 void SIFixSGPRCopies::processPHINode(MachineInstr &MI) {
   bool AllAGPRUses = true;
   SetVector<const MachineInstr *> worklist;
-  SmallSet<const MachineInstr *, 4> Visited;
+  SmallPtrSet<const MachineInstr *, 4> Visited;
   SetVector<MachineInstr *> PHIOperands;
   worklist.insert(&MI);
   Visited.insert(&MI);
diff --git a/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp b/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp
index f7a9a584a6b51..e97536d36bab2 100644
--- a/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp
+++ b/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp
@@ -81,7 +81,7 @@ class SILowerControlFlow {
   MachineRegisterInfo *MRI = nullptr;
   SetVector<MachineInstr*> LoweredEndCf;
   DenseSet<Register> LoweredIf;
-  SmallSet<MachineBasicBlock *, 4> KillBlocks;
+  SmallPtrSet<MachineBasicBlock *, 4> KillBlocks;
   SmallSet<Register, 8>...
[truncated]

``````````

</details>


https://github.com/llvm/llvm-project/pull/154068


More information about the llvm-commits mailing list