[clang] 3badd17 - SmallPtrSet::find -> SmallPtrSet::count
Benjamin Kramer via cfe-commits
cfe-commits at lists.llvm.org
Sun Jun 7 13:38:19 PDT 2020
Author: Benjamin Kramer
Date: 2020-06-07T22:38:08+02:00
New Revision: 3badd17b6989621b5aa2732800f697dabbda034d
URL: https://github.com/llvm/llvm-project/commit/3badd17b6989621b5aa2732800f697dabbda034d
DIFF: https://github.com/llvm/llvm-project/commit/3badd17b6989621b5aa2732800f697dabbda034d.diff
LOG: SmallPtrSet::find -> SmallPtrSet::count
The latter is more readable and more efficient. While there clean up
some double lookups. NFCI.
Added:
Modified:
clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
lldb/source/Plugins/ExpressionParser/Clang/ClangASTImporter.cpp
llvm/lib/Analysis/DependenceGraphBuilder.cpp
llvm/lib/Analysis/StackSafetyAnalysis.cpp
llvm/lib/IR/Verifier.cpp
llvm/lib/Target/AMDGPU/GCNMinRegStrategy.cpp
llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
llvm/lib/Transforms/Scalar/LoopInterchange.cpp
llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp
llvm/lib/Transforms/Utils/BypassSlowDivision.cpp
llvm/lib/Transforms/Utils/Local.cpp
llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
llvm/tools/bugpoint/CrashDebugger.cpp
Removed:
################################################################################
diff --git a/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp b/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
index 9b6369aee7a8..ed62778623a8 100644
--- a/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
+++ b/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
@@ -973,9 +973,8 @@ static std::string getMacroNameAndPrintExpansion(
// in this case we don't get the full expansion text in the Plist file. See
// the test file where "value" is expanded to "garbage_" instead of
// "garbage_value".
- if (AlreadyProcessedTokens.find(IDInfo) != AlreadyProcessedTokens.end())
+ if (!AlreadyProcessedTokens.insert(IDInfo).second)
return Info.Name;
- AlreadyProcessedTokens.insert(IDInfo);
if (!Info.MI)
return Info.Name;
diff --git a/lldb/source/Plugins/ExpressionParser/Clang/ClangASTImporter.cpp b/lldb/source/Plugins/ExpressionParser/Clang/ClangASTImporter.cpp
index 2a21e5e0d7b9..27efad214476 100644
--- a/lldb/source/Plugins/ExpressionParser/Clang/ClangASTImporter.cpp
+++ b/lldb/source/Plugins/ExpressionParser/Clang/ClangASTImporter.cpp
@@ -1025,7 +1025,7 @@ void ClangASTImporter::ASTImporterDelegate::Imported(clang::Decl *from,
// Some decls shouldn't be tracked here because they were not created by
// copying 'from' to 'to'. Just exit early for those.
- if (m_decls_to_ignore.find(to) != m_decls_to_ignore.end())
+ if (m_decls_to_ignore.count(to))
return clang::ASTImporter::Imported(from, to);
// Transfer module ownership information.
diff --git a/llvm/lib/Analysis/DependenceGraphBuilder.cpp b/llvm/lib/Analysis/DependenceGraphBuilder.cpp
index 67304a495960..7a98d844e4cb 100644
--- a/llvm/lib/Analysis/DependenceGraphBuilder.cpp
+++ b/llvm/lib/Analysis/DependenceGraphBuilder.cpp
@@ -435,9 +435,8 @@ template <class G> void AbstractDependenceGraphBuilder<G>::simplify() {
NodeType &Src = *Worklist.pop_back_val();
// As nodes get merged, we need to skip any node that has been removed from
// the candidate set (see below).
- if (CandidateSourceNodes.find(&Src) == CandidateSourceNodes.end())
+ if (!CandidateSourceNodes.erase(&Src))
continue;
- CandidateSourceNodes.erase(&Src);
assert(Src.getEdges().size() == 1 &&
"Expected a single edge from the candidate src node.");
@@ -470,10 +469,9 @@ template <class G> void AbstractDependenceGraphBuilder<G>::simplify() {
// We also need to remove the old target (b), from the worklist. We first
// remove it from the candidate set here, and skip any item from the
// worklist that is not in the set.
- if (CandidateSourceNodes.find(&Tgt) != CandidateSourceNodes.end()) {
+ if (CandidateSourceNodes.erase(&Tgt)) {
Worklist.push_back(&Src);
CandidateSourceNodes.insert(&Src);
- CandidateSourceNodes.erase(&Tgt);
LLVM_DEBUG(dbgs() << "Putting " << &Src << " back in the worklist.\n");
}
}
diff --git a/llvm/lib/Analysis/StackSafetyAnalysis.cpp b/llvm/lib/Analysis/StackSafetyAnalysis.cpp
index dd4ba86df7ca..9d96e9d5d075 100644
--- a/llvm/lib/Analysis/StackSafetyAnalysis.cpp
+++ b/llvm/lib/Analysis/StackSafetyAnalysis.cpp
@@ -684,7 +684,7 @@ StackSafetyGlobalInfo::~StackSafetyGlobalInfo() = default;
bool StackSafetyGlobalInfo::isSafe(const AllocaInst &AI) const {
const auto &Info = getInfo();
- return Info.SafeAllocas.find(&AI) != Info.SafeAllocas.end();
+ return Info.SafeAllocas.count(&AI);
}
void StackSafetyGlobalInfo::print(raw_ostream &O) const {
diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp
index 1c9023a349c2..eae7c03d744e 100644
--- a/llvm/lib/IR/Verifier.cpp
+++ b/llvm/lib/IR/Verifier.cpp
@@ -2613,8 +2613,7 @@ void Verifier::visitCallBrInst(CallBrInst &CBI) {
if (auto *BA = dyn_cast<BlockAddress>(V))
ArgBBs.insert(BA->getBasicBlock());
for (BasicBlock *BB : CBI.getIndirectDests())
- Assert(ArgBBs.find(BB) != ArgBBs.end(),
- "Indirect label missing from arglist.", &CBI);
+ Assert(ArgBBs.count(BB), "Indirect label missing from arglist.", &CBI);
}
visitTerminator(CBI);
diff --git a/llvm/lib/Target/AMDGPU/GCNMinRegStrategy.cpp b/llvm/lib/Target/AMDGPU/GCNMinRegStrategy.cpp
index 0dc7423043cb..884b2e17289c 100644
--- a/llvm/lib/Target/AMDGPU/GCNMinRegStrategy.cpp
+++ b/llvm/lib/Target/AMDGPU/GCNMinRegStrategy.cpp
@@ -214,9 +214,8 @@ void GCNMinRegScheduler::bumpPredsPriority(const SUnit *SchedSU, int Priority) {
LLVM_DEBUG(dbgs() << "Make the predecessors of SU(" << SchedSU->NodeNum
<< ")'s non-ready successors of " << Priority
<< " priority in ready queue: ");
- const auto SetEnd = Set.end();
for (auto &C : RQ) {
- if (Set.find(C.SU) != SetEnd) {
+ if (Set.count(C.SU)) {
C.Priority = Priority;
LLVM_DEBUG(dbgs() << " SU(" << C.SU->NodeNum << ')');
}
diff --git a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
index bf5ba7294d86..94411a895c69 100644
--- a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -1792,7 +1792,7 @@ struct DSEState {
return false;
if (SI->getParent() == NI->getParent())
- return ThrowingBlocks.find(SI->getParent()) != ThrowingBlocks.end();
+ return ThrowingBlocks.count(SI->getParent());
return !ThrowingBlocks.empty();
}
diff --git a/llvm/lib/Transforms/Scalar/LoopInterchange.cpp b/llvm/lib/Transforms/Scalar/LoopInterchange.cpp
index 15addc057dd9..7787c0bccd4c 100644
--- a/llvm/lib/Transforms/Scalar/LoopInterchange.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopInterchange.cpp
@@ -694,7 +694,7 @@ bool LoopInterchangeLegality::findInductionAndReductions(
// PHIs in inner loops need to be part of a reduction in the outer loop,
// discovered when checking the PHIs of the outer loop earlier.
if (!InnerLoop) {
- if (OuterInnerReductions.find(&PHI) == OuterInnerReductions.end()) {
+ if (!OuterInnerReductions.count(&PHI)) {
LLVM_DEBUG(dbgs() << "Inner loop PHI is not part of reductions "
"across the outer loop.\n");
return false;
@@ -908,8 +908,8 @@ areInnerLoopExitPHIsSupported(Loop *InnerL, Loop *OuterL,
return false;
if (any_of(PHI.users(), [&Reductions, OuterL](User *U) {
PHINode *PN = dyn_cast<PHINode>(U);
- return !PN || (Reductions.find(PN) == Reductions.end() &&
- OuterL->contains(PN->getParent()));
+ return !PN ||
+ (!Reductions.count(PN) && OuterL->contains(PN->getParent()));
})) {
return false;
}
@@ -1582,13 +1582,11 @@ bool LoopInterchangeTransform::adjustLoopBranches() {
// outer loop and all the remains to do is and updating the incoming blocks.
for (PHINode *PHI : OuterLoopPHIs) {
PHI->moveBefore(InnerLoopHeader->getFirstNonPHI());
- assert(OuterInnerReductions.find(PHI) != OuterInnerReductions.end() &&
- "Expected a reduction PHI node");
+ assert(OuterInnerReductions.count(PHI) && "Expected a reduction PHI node");
}
for (PHINode *PHI : InnerLoopPHIs) {
PHI->moveBefore(OuterLoopHeader->getFirstNonPHI());
- assert(OuterInnerReductions.find(PHI) != OuterInnerReductions.end() &&
- "Expected a reduction PHI node");
+ assert(OuterInnerReductions.count(PHI) && "Expected a reduction PHI node");
}
// Update the incoming blocks for moved PHI nodes.
diff --git a/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp b/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp
index 801069bb97b4..65f88b68fd07 100644
--- a/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp
+++ b/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp
@@ -705,7 +705,7 @@ class LowerMatrixIntrinsics {
// Third, lower remaining instructions with shape information.
for (Instruction *Inst : MatrixInsts) {
- if (FusedInsts.find(Inst) != FusedInsts.end())
+ if (FusedInsts.count(Inst))
continue;
IRBuilder<> Builder(Inst);
@@ -1593,7 +1593,7 @@ class LowerMatrixIntrinsics {
// Deal with shared subtrees. Mark them as shared, if required.
if (!ParentShared) {
auto SI = Shared.find(Expr);
- assert(SI != Shared.end() && SI->second.find(Leaf) != SI->second.end());
+ assert(SI != Shared.end() && SI->second.count(Leaf));
for (Value *S : SI->second) {
if (S == Leaf)
diff --git a/llvm/lib/Transforms/Utils/BypassSlowDivision.cpp b/llvm/lib/Transforms/Utils/BypassSlowDivision.cpp
index 9a6761040bd8..77ce60c3f5a3 100644
--- a/llvm/lib/Transforms/Utils/BypassSlowDivision.cpp
+++ b/llvm/lib/Transforms/Utils/BypassSlowDivision.cpp
@@ -213,9 +213,8 @@ bool FastDivInsertionTask::isHashLikeValue(Value *V, VisitedSetTy &Visited) {
return false;
// Do not visit nodes that have been visited already. We return true because
// it means that we couldn't find any value that doesn't look hash-like.
- if (Visited.find(I) != Visited.end())
+ if (!Visited.insert(I).second)
return true;
- Visited.insert(I);
return llvm::all_of(cast<PHINode>(I)->incoming_values(), [&](Value *V) {
// Ignore undef values as they probably don't affect the division
// operands.
diff --git a/llvm/lib/Transforms/Utils/Local.cpp b/llvm/lib/Transforms/Utils/Local.cpp
index f0df08251a01..56b58c469756 100644
--- a/llvm/lib/Transforms/Utils/Local.cpp
+++ b/llvm/lib/Transforms/Utils/Local.cpp
@@ -2260,7 +2260,7 @@ bool llvm::removeUnreachableBlocks(Function &F, DomTreeUpdater *DTU,
SmallSetVector<BasicBlock *, 8> DeadBlockSet;
for (BasicBlock &BB : F) {
// Skip reachable basic blocks
- if (Reachable.find(&BB) != Reachable.end())
+ if (Reachable.count(&BB))
continue;
DeadBlockSet.insert(&BB);
}
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index 4f40d8d529f3..81c81e441d7e 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -1070,7 +1070,7 @@ class LoopVectorizationCostModel {
auto UniformsPerVF = Uniforms.find(VF);
assert(UniformsPerVF != Uniforms.end() &&
"VF not yet analyzed for uniformity");
- return UniformsPerVF->second.find(I) != UniformsPerVF->second.end();
+ return UniformsPerVF->second.count(I);
}
/// Returns true if \p I is known to be scalar after vectorization.
@@ -1086,7 +1086,7 @@ class LoopVectorizationCostModel {
auto ScalarsPerVF = Scalars.find(VF);
assert(ScalarsPerVF != Scalars.end() &&
"Scalar values are not calculated for VF");
- return ScalarsPerVF->second.find(I) != ScalarsPerVF->second.end();
+ return ScalarsPerVF->second.count(I);
}
/// \returns True if instruction \p I can be truncated to a smaller bitwidth
@@ -3354,8 +3354,7 @@ void InnerLoopVectorizer::truncateToMinimalBitwidths() {
continue;
for (unsigned Part = 0; Part < UF; ++Part) {
Value *I = getOrCreateVectorValue(KV.first, Part);
- if (Erased.find(I) != Erased.end() || I->use_empty() ||
- !isa<Instruction>(I))
+ if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I))
continue;
Type *OriginalTy = I->getType();
Type *ScalarTruncatedTy =
@@ -4532,7 +4531,7 @@ void LoopVectorizationCostModel::collectLoopScalars(unsigned VF) {
}
}
for (auto *I : ScalarPtrs)
- if (PossibleNonScalarPtrs.find(I) == PossibleNonScalarPtrs.end()) {
+ if (!PossibleNonScalarPtrs.count(I)) {
LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n");
Worklist.insert(I);
}
@@ -4838,7 +4837,7 @@ void LoopVectorizationCostModel::collectLoopUniforms(unsigned VF) {
// Add to the Worklist all consecutive and consecutive-like pointers that
// aren't also identified as possibly non-uniform.
for (auto *V : ConsecutiveLikePtrs)
- if (PossibleNonUniformPtrs.find(V) == PossibleNonUniformPtrs.end())
+ if (!PossibleNonUniformPtrs.count(V))
addToWorklistIfAllowed(V);
// Expand Worklist in topological order: whenever a new instruction
@@ -5185,7 +5184,7 @@ LoopVectorizationCostModel::getSmallestAndWidestTypes() {
Type *T = I.getType();
// Skip ignored values.
- if (ValuesToIgnore.find(&I) != ValuesToIgnore.end())
+ if (ValuesToIgnore.count(&I))
continue;
// Only examine Loads, Stores and PHINodes.
@@ -5504,11 +5503,11 @@ LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<unsigned> VFs) {
OpenIntervals.erase(ToRemove);
// Ignore instructions that are never used within the loop.
- if (Ends.find(I) == Ends.end())
+ if (!Ends.count(I))
continue;
// Skip ignored values.
- if (ValuesToIgnore.find(I) != ValuesToIgnore.end())
+ if (ValuesToIgnore.count(I))
continue;
// For each VF find the maximum usage of registers.
@@ -5528,7 +5527,7 @@ LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<unsigned> VFs) {
collectUniformsAndScalars(VFs[j]);
for (auto Inst : OpenIntervals) {
// Skip ignored values for VF > 1.
- if (VecValuesToIgnore.find(Inst) != VecValuesToIgnore.end())
+ if (VecValuesToIgnore.count(Inst))
continue;
if (isScalarAfterVectorization(Inst, VFs[j])) {
unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
@@ -5766,8 +5765,7 @@ LoopVectorizationCostModel::expectedCost(unsigned VF) {
// For each instruction in the old loop.
for (Instruction &I : BB->instructionsWithoutDebug()) {
// Skip ignored values.
- if (ValuesToIgnore.find(&I) != ValuesToIgnore.end() ||
- (VF > 1 && VecValuesToIgnore.find(&I) != VecValuesToIgnore.end()))
+ if (ValuesToIgnore.count(&I) || (VF > 1 && VecValuesToIgnore.count(&I)))
continue;
VectorizationCostTy C = getInstructionCost(&I, VF);
@@ -6011,7 +6009,7 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) {
auto ForcedScalar = ForcedScalars.find(VF);
if (VF > 1 && ForcedScalar != ForcedScalars.end()) {
auto InstSet = ForcedScalar->second;
- if (InstSet.find(I) != InstSet.end())
+ if (InstSet.count(I))
return VectorizationCostTy((getInstructionCost(I, 1).first * VF), false);
}
@@ -6231,10 +6229,8 @@ unsigned LoopVectorizationCostModel::getInstructionCost(Instruction *I,
bool ScalarPredicatedBB = false;
BranchInst *BI = cast<BranchInst>(I);
if (VF > 1 && BI->isConditional() &&
- (PredicatedBBsAfterVectorization.find(BI->getSuccessor(0)) !=
- PredicatedBBsAfterVectorization.end() ||
- PredicatedBBsAfterVectorization.find(BI->getSuccessor(1)) !=
- PredicatedBBsAfterVectorization.end()))
+ (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) ||
+ PredicatedBBsAfterVectorization.count(BI->getSuccessor(1))))
ScalarPredicatedBB = true;
if (ScalarPredicatedBB) {
@@ -6673,8 +6669,7 @@ void LoopVectorizationPlanner::collectTriviallyDeadInstructions(
PHINode *Ind = Induction.first;
auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
- return U == Ind || DeadInstructions.find(cast<Instruction>(U)) !=
- DeadInstructions.end();
+ return U == Ind || DeadInstructions.count(cast<Instruction>(U));
}))
DeadInstructions.insert(IndUpdate);
@@ -7291,8 +7286,7 @@ VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes(
// First filter out irrelevant instructions, to ensure no recipes are
// built for them.
- if (isa<BranchInst>(Instr) ||
- DeadInstructions.find(Instr) != DeadInstructions.end())
+ if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr))
continue;
if (auto Recipe =
diff --git a/llvm/tools/bugpoint/CrashDebugger.cpp b/llvm/tools/bugpoint/CrashDebugger.cpp
index 962873ea6da1..1a39ff654f05 100644
--- a/llvm/tools/bugpoint/CrashDebugger.cpp
+++ b/llvm/tools/bugpoint/CrashDebugger.cpp
@@ -864,7 +864,7 @@ bool ReduceCrashingMetadata::TestInsts(std::vector<Instruction *> &Insts) {
// selected in Instructions.
for (Function &F : *M)
for (Instruction &Inst : instructions(F)) {
- if (Instructions.find(&Inst) == Instructions.end()) {
+ if (!Instructions.count(&Inst)) {
Inst.dropUnknownNonDebugMetadata();
Inst.setDebugLoc({});
}
More information about the cfe-commits
mailing list