[llvm] 856f793 - Compress a few pairs using PointerIntPairs
Benjamin Kramer via llvm-commits
llvm-commits at lists.llvm.org
Sun Dec 4 07:55:27 PST 2022
Author: Benjamin Kramer
Date: 2022-12-04T16:55:16+01:00
New Revision: 856f7937c79592d8157bcf515162bf9ee02853cd
URL: https://github.com/llvm/llvm-project/commit/856f7937c79592d8157bcf515162bf9ee02853cd
DIFF: https://github.com/llvm/llvm-project/commit/856f7937c79592d8157bcf515162bf9ee02853cd.diff
LOG: Compress a few pairs using PointerIntPairs
Use the uniform structured bindings interface where possible. NFCI.
Added:
Modified:
llvm/include/llvm/Transforms/IPO/Attributor.h
llvm/lib/Analysis/LoopAccessAnalysis.cpp
llvm/lib/Analysis/ScalarEvolution.cpp
llvm/lib/Transforms/IPO/Attributor.cpp
llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
Removed:
################################################################################
diff --git a/llvm/include/llvm/Transforms/IPO/Attributor.h b/llvm/include/llvm/Transforms/IPO/Attributor.h
index 11c3cdf160b82..0d02993d1f26b 100644
--- a/llvm/include/llvm/Transforms/IPO/Attributor.h
+++ b/llvm/include/llvm/Transforms/IPO/Attributor.h
@@ -1700,14 +1700,13 @@ struct Attributor {
}
Value &V = IRP.getAssociatedValue();
auto &Entry = ToBeChangedValues[&V];
- Value *&CurNV = Entry.first;
+ Value *CurNV = get<0>(Entry);
if (CurNV && (CurNV->stripPointerCasts() == NV.stripPointerCasts() ||
isa<UndefValue>(CurNV)))
return false;
assert((!CurNV || CurNV == &NV || isa<UndefValue>(NV)) &&
"Value replacement was registered twice with
diff erent values!");
- CurNV = &NV;
- Entry.second = ChangeDroppable;
+ Entry = {&NV, ChangeDroppable};
return true;
}
@@ -2265,7 +2264,8 @@ struct Attributor {
/// Values we replace with a new value after manifest is done. We will remove
/// then trivially dead instructions as well.
- SmallMapVector<Value *, std::pair<Value *, bool>, 32> ToBeChangedValues;
+ SmallMapVector<Value *, PointerIntPair<Value *, 1, bool>, 32>
+ ToBeChangedValues;
/// Instructions we replace with `unreachable` insts after manifest is done.
SmallSetVector<WeakVH, 16> ToBeChangedToUnreachableInsts;
diff --git a/llvm/lib/Analysis/LoopAccessAnalysis.cpp b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
index b65db6e3aff75..56175ea397e7b 100644
--- a/llvm/lib/Analysis/LoopAccessAnalysis.cpp
+++ b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
@@ -805,10 +805,10 @@ static void visitPointers(Value *StartPtr, const Loop &InnermostLoop,
// in by the caller. If we have a node that may potentially yield a valid
// SCEVAddRecExpr then we decompose it into parts and build the SCEV terms
// ourselves before adding to the list.
-static void
-findForkedSCEVs(ScalarEvolution *SE, const Loop *L, Value *Ptr,
- SmallVectorImpl<std::pair<const SCEV *, bool>> &ScevList,
- unsigned Depth) {
+static void findForkedSCEVs(
+ ScalarEvolution *SE, const Loop *L, Value *Ptr,
+ SmallVectorImpl<PointerIntPair<const SCEV *, 1, bool>> &ScevList,
+ unsigned Depth) {
// If our Value is a SCEVAddRecExpr, loop invariant, not an instruction, or
// we've exceeded our limit on recursion, just return whatever we have
// regardless of whether it can be used for a forked pointer or not, along
@@ -816,15 +816,14 @@ findForkedSCEVs(ScalarEvolution *SE, const Loop *L, Value *Ptr,
const SCEV *Scev = SE->getSCEV(Ptr);
if (isa<SCEVAddRecExpr>(Scev) || L->isLoopInvariant(Ptr) ||
!isa<Instruction>(Ptr) || Depth == 0) {
- ScevList.push_back(
- std::make_pair(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr)));
+ ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
return;
}
Depth--;
- auto UndefPoisonCheck = [](std::pair<const SCEV *, bool> S) -> bool {
- return S.second;
+ auto UndefPoisonCheck = [](PointerIntPair<const SCEV *, 1, bool> S) {
+ return get<1>(S);
};
auto GetBinOpExpr = [&SE](unsigned Opcode, const SCEV *L, const SCEV *R) {
@@ -847,12 +846,11 @@ findForkedSCEVs(ScalarEvolution *SE, const Loop *L, Value *Ptr,
// We only handle base + single offset GEPs here for now.
// Not dealing with preexisting gathers yet, so no vectors.
if (I->getNumOperands() != 2 || SourceTy->isVectorTy()) {
- ScevList.push_back(
- std::make_pair(Scev, !isGuaranteedNotToBeUndefOrPoison(GEP)));
+ ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(GEP));
break;
}
- SmallVector<std::pair<const SCEV *, bool>, 2> BaseScevs;
- SmallVector<std::pair<const SCEV *, bool>, 2> OffsetScevs;
+ SmallVector<PointerIntPair<const SCEV *, 1, bool>, 2> BaseScevs;
+ SmallVector<PointerIntPair<const SCEV *, 1, bool>, 2> OffsetScevs;
findForkedSCEVs(SE, L, I->getOperand(0), BaseScevs, Depth);
findForkedSCEVs(SE, L, I->getOperand(1), OffsetScevs, Depth);
@@ -868,7 +866,7 @@ findForkedSCEVs(ScalarEvolution *SE, const Loop *L, Value *Ptr,
else if (BaseScevs.size() == 2 && OffsetScevs.size() == 1)
OffsetScevs.push_back(OffsetScevs[0]);
else {
- ScevList.push_back(std::make_pair(Scev, NeedsFreeze));
+ ScevList.emplace_back(Scev, NeedsFreeze);
break;
}
@@ -883,17 +881,17 @@ findForkedSCEVs(ScalarEvolution *SE, const Loop *L, Value *Ptr,
// Scale up the offsets by the size of the type, then add to the bases.
const SCEV *Scaled1 = SE->getMulExpr(
- Size, SE->getTruncateOrSignExtend(OffsetScevs[0].first, IntPtrTy));
+ Size, SE->getTruncateOrSignExtend(get<0>(OffsetScevs[0]), IntPtrTy));
const SCEV *Scaled2 = SE->getMulExpr(
- Size, SE->getTruncateOrSignExtend(OffsetScevs[1].first, IntPtrTy));
- ScevList.push_back(std::make_pair(
- SE->getAddExpr(BaseScevs[0].first, Scaled1), NeedsFreeze));
- ScevList.push_back(std::make_pair(
- SE->getAddExpr(BaseScevs[1].first, Scaled2), NeedsFreeze));
+ Size, SE->getTruncateOrSignExtend(get<0>(OffsetScevs[1]), IntPtrTy));
+ ScevList.emplace_back(SE->getAddExpr(get<0>(BaseScevs[0]), Scaled1),
+ NeedsFreeze);
+ ScevList.emplace_back(SE->getAddExpr(get<0>(BaseScevs[1]), Scaled2),
+ NeedsFreeze);
break;
}
case Instruction::Select: {
- SmallVector<std::pair<const SCEV *, bool>, 2> ChildScevs;
+ SmallVector<PointerIntPair<const SCEV *, 1, bool>, 2> ChildScevs;
// A select means we've found a forked pointer, but we currently only
// support a single select per pointer so if there's another behind this
// then we just bail out and return the generic SCEV.
@@ -903,14 +901,13 @@ findForkedSCEVs(ScalarEvolution *SE, const Loop *L, Value *Ptr,
ScevList.push_back(ChildScevs[0]);
ScevList.push_back(ChildScevs[1]);
} else
- ScevList.push_back(
- std::make_pair(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr)));
+ ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
break;
}
case Instruction::Add:
case Instruction::Sub: {
- SmallVector<std::pair<const SCEV *, bool>> LScevs;
- SmallVector<std::pair<const SCEV *, bool>> RScevs;
+ SmallVector<PointerIntPair<const SCEV *, 1, bool>> LScevs;
+ SmallVector<PointerIntPair<const SCEV *, 1, bool>> RScevs;
findForkedSCEVs(SE, L, I->getOperand(0), LScevs, Depth);
findForkedSCEVs(SE, L, I->getOperand(1), RScevs, Depth);
@@ -926,49 +923,49 @@ findForkedSCEVs(ScalarEvolution *SE, const Loop *L, Value *Ptr,
else if (RScevs.size() == 2 && LScevs.size() == 1)
LScevs.push_back(LScevs[0]);
else {
- ScevList.push_back(std::make_pair(Scev, NeedsFreeze));
+ ScevList.emplace_back(Scev, NeedsFreeze);
break;
}
- ScevList.push_back(std::make_pair(
- GetBinOpExpr(Opcode, LScevs[0].first, RScevs[0].first), NeedsFreeze));
- ScevList.push_back(std::make_pair(
- GetBinOpExpr(Opcode, LScevs[1].first, RScevs[1].first), NeedsFreeze));
+ ScevList.emplace_back(
+ GetBinOpExpr(Opcode, get<0>(LScevs[0]), get<0>(RScevs[0])),
+ NeedsFreeze);
+ ScevList.emplace_back(
+ GetBinOpExpr(Opcode, get<0>(LScevs[1]), get<0>(RScevs[1])),
+ NeedsFreeze);
break;
}
default:
// Just return the current SCEV if we haven't handled the instruction yet.
LLVM_DEBUG(dbgs() << "ForkedPtr unhandled instruction: " << *I << "\n");
- ScevList.push_back(
- std::make_pair(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr)));
+ ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
break;
}
}
-static SmallVector<std::pair<const SCEV *, bool>>
+static SmallVector<PointerIntPair<const SCEV *, 1, bool>>
findForkedPointer(PredicatedScalarEvolution &PSE,
const ValueToValueMap &StridesMap, Value *Ptr,
const Loop *L) {
ScalarEvolution *SE = PSE.getSE();
assert(SE->isSCEVable(Ptr->getType()) && "Value is not SCEVable!");
- SmallVector<std::pair<const SCEV *, bool>> Scevs;
+ SmallVector<PointerIntPair<const SCEV *, 1, bool>> Scevs;
findForkedSCEVs(SE, L, Ptr, Scevs, MaxForkedSCEVDepth);
// For now, we will only accept a forked pointer with two possible SCEVs
// that are either SCEVAddRecExprs or loop invariant.
if (Scevs.size() == 2 &&
- (isa<SCEVAddRecExpr>(Scevs[0].first) ||
- SE->isLoopInvariant(Scevs[0].first, L)) &&
- (isa<SCEVAddRecExpr>(Scevs[1].first) ||
- SE->isLoopInvariant(Scevs[1].first, L))) {
+ (isa<SCEVAddRecExpr>(get<0>(Scevs[0])) ||
+ SE->isLoopInvariant(get<0>(Scevs[0]), L)) &&
+ (isa<SCEVAddRecExpr>(get<0>(Scevs[1])) ||
+ SE->isLoopInvariant(get<0>(Scevs[1]), L))) {
LLVM_DEBUG(dbgs() << "LAA: Found forked pointer: " << *Ptr << "\n");
- LLVM_DEBUG(dbgs() << "\t(1) " << *(Scevs[0].first) << "\n");
- LLVM_DEBUG(dbgs() << "\t(2) " << *(Scevs[1].first) << "\n");
+ LLVM_DEBUG(dbgs() << "\t(1) " << *get<0>(Scevs[0]) << "\n");
+ LLVM_DEBUG(dbgs() << "\t(2) " << *get<0>(Scevs[1]) << "\n");
return Scevs;
}
- return {
- std::make_pair(replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr), false)};
+ return {{replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr), false}};
}
bool AccessAnalysis::createCheckForAccess(RuntimePointerChecking &RtCheck,
@@ -980,11 +977,11 @@ bool AccessAnalysis::createCheckForAccess(RuntimePointerChecking &RtCheck,
bool Assume) {
Value *Ptr = Access.getPointer();
- SmallVector<std::pair<const SCEV *, bool>> TranslatedPtrs =
+ SmallVector<PointerIntPair<const SCEV *, 1, bool>> TranslatedPtrs =
findForkedPointer(PSE, StridesMap, Ptr, TheLoop);
for (auto &P : TranslatedPtrs) {
- const SCEV *PtrExpr = P.first;
+ const SCEV *PtrExpr = get<0>(P);
if (!hasComputableBounds(PSE, Ptr, PtrExpr, TheLoop, Assume))
return false;
@@ -1005,13 +1002,11 @@ bool AccessAnalysis::createCheckForAccess(RuntimePointerChecking &RtCheck,
// If there's only one option for Ptr, look it up after bounds and wrap
// checking, because assumptions might have been added to PSE.
if (TranslatedPtrs.size() == 1)
- TranslatedPtrs[0] = std::make_pair(
- replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr), false);
+ TranslatedPtrs[0] = {replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr),
+ false};
}
- for (auto &P : TranslatedPtrs) {
- const SCEV *PtrExpr = P.first;
-
+ for (auto [PtrExpr, NeedsFreeze] : TranslatedPtrs) {
// The id of the dependence set.
unsigned DepId;
@@ -1027,7 +1022,7 @@ bool AccessAnalysis::createCheckForAccess(RuntimePointerChecking &RtCheck,
bool IsWrite = Access.getInt();
RtCheck.insert(TheLoop, Ptr, PtrExpr, AccessTy, IsWrite, DepId, ASId, PSE,
- P.second);
+ NeedsFreeze);
LLVM_DEBUG(dbgs() << "LAA: Found a runtime check ptr:" << *Ptr << '\n');
}
@@ -1830,10 +1825,8 @@ MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx,
const ValueToValueMap &Strides) {
assert (AIdx < BIdx && "Must pass arguments in program order");
- Value *APtr = A.getPointer();
- Value *BPtr = B.getPointer();
- bool AIsWrite = A.getInt();
- bool BIsWrite = B.getInt();
+ auto [APtr, AIsWrite] = A;
+ auto [BPtr, BIsWrite] = B;
Type *ATy = getLoadStoreType(InstMap[AIdx]);
Type *BTy = getLoadStoreType(InstMap[BIdx]);
diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp
index 2029e47eb1374..ad48de1755a3a 100644
--- a/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -14110,12 +14110,8 @@ void ScalarEvolution::verify() const {
VerifyBECountUsers(/* Predicated */ true);
// Verify intergity of loop disposition cache.
- for (const auto &It : LoopDispositions) {
- const SCEV *S = It.first;
- auto &Values = It.second;
- for (auto &V : Values) {
- auto CachedDisposition = V.getInt();
- const auto *Loop = V.getPointer();
+ for (auto &[S, Values] : LoopDispositions) {
+ for (auto [Loop, CachedDisposition] : Values) {
const auto RecomputedDisposition = SE2.getLoopDisposition(S, Loop);
if (CachedDisposition != RecomputedDisposition) {
dbgs() << "Cached disposition of " << *S << " for loop " << *Loop
@@ -14128,12 +14124,8 @@ void ScalarEvolution::verify() const {
}
// Verify integrity of the block disposition cache.
- for (const auto &It : BlockDispositions) {
- const SCEV *S = It.first;
- auto &Values = It.second;
- for (auto &V : Values) {
- auto CachedDisposition = V.getInt();
- const BasicBlock *BB = V.getPointer();
+ for (auto &[S, Values] : BlockDispositions) {
+ for (auto [BB, CachedDisposition] : Values) {
const auto RecomputedDisposition = SE2.getBlockDisposition(S, BB);
if (CachedDisposition != RecomputedDisposition) {
dbgs() << "Cached disposition of " << *S << " for block %"
@@ -14944,7 +14936,7 @@ const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) {
}
};
- SmallVector<std::pair<Value *, bool>> Terms;
+ SmallVector<PointerIntPair<Value *, 1, bool>> Terms;
// First, collect information from assumptions dominating the loop.
for (auto &AssumeVH : AC.assumptions()) {
if (!AssumeVH)
@@ -14978,11 +14970,10 @@ const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) {
// processed first. This ensures the SCEVs with the shortest dependency chains
// are constructed first.
DenseMap<const SCEV *, const SCEV *> RewriteMap;
- for (auto &E : reverse(Terms)) {
- bool EnterIfTrue = E.second;
+ for (auto [Term, EnterIfTrue] : reverse(Terms)) {
SmallVector<Value *, 8> Worklist;
SmallPtrSet<Value *, 8> Visited;
- Worklist.push_back(E.first);
+ Worklist.push_back(Term);
while (!Worklist.empty()) {
Value *Cond = Worklist.pop_back_val();
if (!Visited.insert(Cond).second)
diff --git a/llvm/lib/Transforms/IPO/Attributor.cpp b/llvm/lib/Transforms/IPO/Attributor.cpp
index 227ccc96530bf..ebb5c47b31fdd 100644
--- a/llvm/lib/Transforms/IPO/Attributor.cpp
+++ b/llvm/lib/Transforms/IPO/Attributor.cpp
@@ -2026,9 +2026,9 @@ ChangeStatus Attributor::cleanupIR() {
// If we plan to replace NewV we need to update it at this point.
do {
const auto &Entry = ToBeChangedValues.lookup(NewV);
- if (!Entry.first)
+ if (!get<0>(Entry))
break;
- NewV = Entry.first;
+ NewV = get<0>(Entry);
} while (true);
Instruction *I = dyn_cast<Instruction>(U->getUser());
@@ -2092,11 +2092,10 @@ ChangeStatus Attributor::cleanupIR() {
SmallVector<Use *, 4> Uses;
for (auto &It : ToBeChangedValues) {
Value *OldV = It.first;
- auto &Entry = It.second;
- Value *NewV = Entry.first;
+ auto [NewV, Done] = It.second;
Uses.clear();
for (auto &U : OldV->uses())
- if (Entry.second || !U.getUser()->isDroppable())
+ if (Done || !U.getUser()->isDroppable())
Uses.push_back(&U);
for (Use *U : Uses) {
if (auto *I = dyn_cast<Instruction>(U->getUser()))
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
index c11510cc159b4..65074c87bdf30 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
@@ -46,12 +46,11 @@ isOnlyCopiedFromConstantMemory(AAResults *AA, AllocaInst *V,
// ahead and replace the value with the memory location, this lets the caller
// quickly eliminate the markers.
- SmallVector<std::pair<Value *, bool>, 35> ValuesToInspect;
+ SmallVector<PointerIntPair<Value *, 1, bool>, 35> ValuesToInspect;
ValuesToInspect.emplace_back(V, false);
while (!ValuesToInspect.empty()) {
- auto ValuePair = ValuesToInspect.pop_back_val();
- const bool IsOffset = ValuePair.second;
- for (auto &U : ValuePair.first->uses()) {
+ const auto [Value, IsOffset] = ValuesToInspect.pop_back_val();
+ for (auto &U : Value->uses()) {
auto *I = cast<Instruction>(U.getUser());
if (auto *LI = dyn_cast<LoadInst>(I)) {
More information about the llvm-commits
mailing list