[llvm] 0d8ffcc - Analysis: Add AssumptionCache argument to isDereferenceableAndAlignedPointer
Matt Arsenault via llvm-commits
llvm-commits at lists.llvm.org
Mon Sep 19 15:57:44 PDT 2022
Author: Matt Arsenault
Date: 2022-09-19T18:57:33-04:00
New Revision: 0d8ffcc532ebd1ec5c7f69313be53274d140f72f
URL: https://github.com/llvm/llvm-project/commit/0d8ffcc532ebd1ec5c7f69313be53274d140f72f
DIFF: https://github.com/llvm/llvm-project/commit/0d8ffcc532ebd1ec5c7f69313be53274d140f72f.diff
LOG: Analysis: Add AssumptionCache argument to isDereferenceableAndAlignedPointer
This does not try to pass it through from the end users.
Added:
Modified:
llvm/include/llvm/Analysis/Loads.h
llvm/lib/Analysis/Loads.cpp
llvm/lib/Analysis/ValueTracking.cpp
llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
llvm/lib/Transforms/Scalar/LICM.cpp
llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
llvm/lib/Transforms/Utils/LoopPeel.cpp
Removed:
################################################################################
diff --git a/llvm/include/llvm/Analysis/Loads.h b/llvm/include/llvm/Analysis/Loads.h
index 29e3efb38e195..5ea51c2487c8f 100644
--- a/llvm/include/llvm/Analysis/Loads.h
+++ b/llvm/include/llvm/Analysis/Loads.h
@@ -19,6 +19,7 @@
namespace llvm {
class AAResults;
+class AssumptionCache;
class DataLayout;
class DominatorTree;
class Instruction;
@@ -31,9 +32,9 @@ class TargetLibraryInfo;
/// Return true if this is always a dereferenceable pointer. If the context
/// instruction is specified perform context-sensitive analysis and return true
/// if the pointer is dereferenceable at the specified instruction.
-bool isDereferenceablePointer(const Value *V, Type *Ty,
- const DataLayout &DL,
+bool isDereferenceablePointer(const Value *V, Type *Ty, const DataLayout &DL,
const Instruction *CtxI = nullptr,
+ AssumptionCache *AC = nullptr,
const DominatorTree *DT = nullptr,
const TargetLibraryInfo *TLI = nullptr);
@@ -44,6 +45,7 @@ bool isDereferenceablePointer(const Value *V, Type *Ty,
bool isDereferenceableAndAlignedPointer(const Value *V, Type *Ty,
Align Alignment, const DataLayout &DL,
const Instruction *CtxI = nullptr,
+ AssumptionCache *AC = nullptr,
const DominatorTree *DT = nullptr,
const TargetLibraryInfo *TLI = nullptr);
@@ -54,6 +56,7 @@ bool isDereferenceableAndAlignedPointer(const Value *V, Type *Ty,
bool isDereferenceableAndAlignedPointer(const Value *V, Align Alignment,
const APInt &Size, const DataLayout &DL,
const Instruction *CtxI = nullptr,
+ AssumptionCache *AC = nullptr,
const DominatorTree *DT = nullptr,
const TargetLibraryInfo *TLI = nullptr);
@@ -79,8 +82,8 @@ bool isSafeToLoadUnconditionally(Value *V, Align Alignment, APInt &Size,
/// if desired.) This is more powerful than the variants above when the
/// address loaded from is analyzeable by SCEV.
bool isDereferenceableAndAlignedInLoop(LoadInst *LI, Loop *L,
- ScalarEvolution &SE,
- DominatorTree &DT);
+ ScalarEvolution &SE, DominatorTree &DT,
+ AssumptionCache *AC = nullptr);
/// Return true if we know that executing a load from this value cannot trap.
///
diff --git a/llvm/lib/Analysis/Loads.cpp b/llvm/lib/Analysis/Loads.cpp
index 938d950e6da79..5a693c27bc421 100644
--- a/llvm/lib/Analysis/Loads.cpp
+++ b/llvm/lib/Analysis/Loads.cpp
@@ -38,7 +38,7 @@ static bool isAligned(const Value *Base, const APInt &Offset, Align Alignment,
/// a simple load or store.
static bool isDereferenceableAndAlignedPointer(
const Value *V, Align Alignment, const APInt &Size, const DataLayout &DL,
- const Instruction *CtxI, const DominatorTree *DT,
+ const Instruction *CtxI, AssumptionCache *AC, const DominatorTree *DT,
const TargetLibraryInfo *TLI, SmallPtrSetImpl<const Value *> &Visited,
unsigned MaxDepth) {
assert(V->getType()->isPointerTy() && "Base must be pointer");
@@ -57,19 +57,19 @@ static bool isDereferenceableAndAlignedPointer(
// Recurse into both hands of select.
if (const SelectInst *Sel = dyn_cast<SelectInst>(V)) {
return isDereferenceableAndAlignedPointer(Sel->getTrueValue(), Alignment,
- Size, DL, CtxI, DT, TLI, Visited,
- MaxDepth) &&
+ Size, DL, CtxI, AC, DT, TLI,
+ Visited, MaxDepth) &&
isDereferenceableAndAlignedPointer(Sel->getFalseValue(), Alignment,
- Size, DL, CtxI, DT, TLI, Visited,
- MaxDepth);
+ Size, DL, CtxI, AC, DT, TLI,
+ Visited, MaxDepth);
}
// bitcast instructions are no-ops as far as dereferenceability is concerned.
if (const BitCastOperator *BC = dyn_cast<BitCastOperator>(V)) {
if (BC->getSrcTy()->isPointerTy())
- return isDereferenceableAndAlignedPointer(
- BC->getOperand(0), Alignment, Size, DL, CtxI, DT, TLI,
- Visited, MaxDepth);
+ return isDereferenceableAndAlignedPointer(BC->getOperand(0), Alignment,
+ Size, DL, CtxI, AC, DT, TLI,
+ Visited, MaxDepth);
}
bool CheckForNonNull, CheckForFreed;
@@ -94,7 +94,7 @@ static bool isDereferenceableAndAlignedPointer(
RetainedKnowledge AlignRK;
RetainedKnowledge DerefRK;
if (getKnowledgeForValue(
- V, {Attribute::Dereferenceable, Attribute::Alignment}, nullptr,
+ V, {Attribute::Dereferenceable, Attribute::Alignment}, AC,
[&](RetainedKnowledge RK, Instruction *Assume, auto) {
if (!isValidAssumeForContext(Assume, CtxI))
return false;
@@ -133,24 +133,24 @@ static bool isDereferenceableAndAlignedPointer(
// addrspacecast, so we can't do arithmetic directly on the APInt values.
return isDereferenceableAndAlignedPointer(
Base, Alignment, Offset + Size.sextOrTrunc(Offset.getBitWidth()), DL,
- CtxI, DT, TLI, Visited, MaxDepth);
+ CtxI, AC, DT, TLI, Visited, MaxDepth);
}
// For gc.relocate, look through relocations
if (const GCRelocateInst *RelocateInst = dyn_cast<GCRelocateInst>(V))
return isDereferenceableAndAlignedPointer(RelocateInst->getDerivedPtr(),
- Alignment, Size, DL, CtxI, DT,
+ Alignment, Size, DL, CtxI, AC, DT,
TLI, Visited, MaxDepth);
if (const AddrSpaceCastOperator *ASC = dyn_cast<AddrSpaceCastOperator>(V))
return isDereferenceableAndAlignedPointer(ASC->getOperand(0), Alignment,
- Size, DL, CtxI, DT, TLI,
+ Size, DL, CtxI, AC, DT, TLI,
Visited, MaxDepth);
if (const auto *Call = dyn_cast<CallBase>(V)) {
if (auto *RP = getArgumentAliasingToReturnedPointer(Call, true))
return isDereferenceableAndAlignedPointer(RP, Alignment, Size, DL, CtxI,
- DT, TLI, Visited, MaxDepth);
+ AC, DT, TLI, Visited, MaxDepth);
// If we have a call we can't recurse through, check to see if this is an
// allocation function for which we can establish an minimum object size.
@@ -173,7 +173,7 @@ static bool isDereferenceableAndAlignedPointer(
// As we recursed through GEPs to get here, we've incrementally
// checked that each step advanced by a multiple of the alignment. If
// our base is properly aligned, then the original offset accessed
- // must also be.
+ // must also be.
Type *Ty = V->getType();
assert(Ty->isSized() && "must be sized");
APInt Offset(DL.getTypeStoreSizeInBits(Ty), 0);
@@ -186,28 +186,24 @@ static bool isDereferenceableAndAlignedPointer(
return false;
}
-bool llvm::isDereferenceableAndAlignedPointer(const Value *V, Align Alignment,
- const APInt &Size,
- const DataLayout &DL,
- const Instruction *CtxI,
- const DominatorTree *DT,
- const TargetLibraryInfo *TLI) {
+bool llvm::isDereferenceableAndAlignedPointer(
+ const Value *V, Align Alignment, const APInt &Size, const DataLayout &DL,
+ const Instruction *CtxI, AssumptionCache *AC, const DominatorTree *DT,
+ const TargetLibraryInfo *TLI) {
// Note: At the moment, Size can be zero. This ends up being interpreted as
// a query of whether [Base, V] is dereferenceable and V is aligned (since
// that's what the implementation happened to do). It's unclear if this is
// the desired semantic, but at least SelectionDAG does exercise this case.
SmallPtrSet<const Value *, 32> Visited;
- return ::isDereferenceableAndAlignedPointer(V, Alignment, Size, DL, CtxI, DT,
- TLI, Visited, 16);
+ return ::isDereferenceableAndAlignedPointer(V, Alignment, Size, DL, CtxI, AC,
+ DT, TLI, Visited, 16);
}
-bool llvm::isDereferenceableAndAlignedPointer(const Value *V, Type *Ty,
- Align Alignment,
- const DataLayout &DL,
- const Instruction *CtxI,
- const DominatorTree *DT,
- const TargetLibraryInfo *TLI) {
+bool llvm::isDereferenceableAndAlignedPointer(
+ const Value *V, Type *Ty, Align Alignment, const DataLayout &DL,
+ const Instruction *CtxI, AssumptionCache *AC, const DominatorTree *DT,
+ const TargetLibraryInfo *TLI) {
// For unsized types or scalable vectors we don't know exactly how many bytes
// are dereferenced, so bail out.
if (!Ty->isSized() || isa<ScalableVectorType>(Ty))
@@ -221,15 +217,17 @@ bool llvm::isDereferenceableAndAlignedPointer(const Value *V, Type *Ty,
APInt AccessSize(DL.getPointerTypeSizeInBits(V->getType()),
DL.getTypeStoreSize(Ty));
return isDereferenceableAndAlignedPointer(V, Alignment, AccessSize, DL, CtxI,
- DT, TLI);
+ AC, DT, TLI);
}
bool llvm::isDereferenceablePointer(const Value *V, Type *Ty,
const DataLayout &DL,
const Instruction *CtxI,
+ AssumptionCache *AC,
const DominatorTree *DT,
const TargetLibraryInfo *TLI) {
- return isDereferenceableAndAlignedPointer(V, Ty, Align(1), DL, CtxI, DT, TLI);
+ return isDereferenceableAndAlignedPointer(V, Ty, Align(1), DL, CtxI, AC, DT,
+ TLI);
}
/// Test if A and B will obviously have the same value.
@@ -265,7 +263,8 @@ static bool AreEquivalentAddressValues(const Value *A, const Value *B) {
bool llvm::isDereferenceableAndAlignedInLoop(LoadInst *LI, Loop *L,
ScalarEvolution &SE,
- DominatorTree &DT) {
+ DominatorTree &DT,
+ AssumptionCache *AC) {
auto &DL = LI->getModule()->getDataLayout();
Value *Ptr = LI->getPointerOperand();
@@ -279,7 +278,7 @@ bool llvm::isDereferenceableAndAlignedInLoop(LoadInst *LI, Loop *L,
// access is safe within the loop w/o needing predication.
if (L->isLoopInvariant(Ptr))
return isDereferenceableAndAlignedPointer(Ptr, Alignment, EltSize, DL,
- HeaderFirstNonPHI, &DT);
+ HeaderFirstNonPHI, AC, &DT);
// Otherwise, check to see if we have a repeating access pattern where we can
// prove that all accesses are well aligned and dereferenceable.
@@ -311,7 +310,7 @@ bool llvm::isDereferenceableAndAlignedInLoop(LoadInst *LI, Loop *L,
if (EltSize.urem(Alignment.value()) != 0)
return false;
return isDereferenceableAndAlignedPointer(Base, Alignment, AccessSize, DL,
- HeaderFirstNonPHI, &DT);
+ HeaderFirstNonPHI, AC, &DT);
}
/// Check if executing a load of this pointer value cannot trap.
@@ -332,7 +331,8 @@ bool llvm::isSafeToLoadUnconditionally(Value *V, Align Alignment, APInt &Size,
const TargetLibraryInfo *TLI) {
// If DT is not specified we can't make context-sensitive query
const Instruction* CtxI = DT ? ScanFrom : nullptr;
- if (isDereferenceableAndAlignedPointer(V, Alignment, Size, DL, CtxI, DT, TLI))
+ if (isDereferenceableAndAlignedPointer(V, Alignment, Size, DL, CtxI, nullptr,
+ DT, TLI))
return true;
if (!ScanFrom)
diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index 5f22714f40890..7c0e0e641db55 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -4785,8 +4785,9 @@ bool llvm::isSafeToSpeculativelyExecuteWithOpcode(
return false;
const DataLayout &DL = LI->getModule()->getDataLayout();
return isDereferenceableAndAlignedPointer(
- LI->getPointerOperand(), LI->getType(), LI->getAlign(), DL, CtxI, DT,
- TLI);
+ LI->getPointerOperand(), LI->getType(), LI->getAlign(), DL, CtxI,
+ nullptr, // FIXME
+ DT, TLI);
}
case Instruction::Call: {
auto *CI = dyn_cast<const CallInst>(Inst);
diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index 9a4f2d145bdf8..714f563894d67 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -1313,7 +1313,7 @@ bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
if (!(Flags & MachineMemOperand::MODereferenceable)) {
if (isDereferenceableAndAlignedPointer(Ptr, LI.getType(), LI.getAlign(),
- *DL, &LI, nullptr, LibInfo))
+ *DL, &LI, nullptr, nullptr, LibInfo))
Flags |= MachineMemOperand::MODereferenceable;
}
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 14bd53b83e806..89e23f133005c 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -4139,7 +4139,7 @@ void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
}
if (isDereferenceableAndAlignedPointer(SV, Ty, Alignment, DAG.getDataLayout(),
- &I, nullptr, LibInfo))
+ &I, nullptr, nullptr, LibInfo))
MMOFlags |= MachineMemOperand::MODereferenceable;
SDLoc dl = getCurSDLoc();
diff --git a/llvm/lib/Transforms/Scalar/LICM.cpp b/llvm/lib/Transforms/Scalar/LICM.cpp
index 120dd2fa2b098..cdfdfbd4ffe83 100644
--- a/llvm/lib/Transforms/Scalar/LICM.cpp
+++ b/llvm/lib/Transforms/Scalar/LICM.cpp
@@ -2090,7 +2090,9 @@ bool llvm::promoteLoopAccessesToScalars(
if (!DereferenceableInPH) {
DereferenceableInPH = isDereferenceableAndAlignedPointer(
Store->getPointerOperand(), Store->getValueOperand()->getType(),
- Store->getAlign(), MDL, Preheader->getTerminator(), DT, TLI);
+ Store->getAlign(), MDL, Preheader->getTerminator(),
+ nullptr, // FIXME
+ DT, TLI);
}
} else
continue; // Not a load or store.
diff --git a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
index 1f5bc69acecd0..fd10f23dec5b1 100644
--- a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
+++ b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
@@ -920,12 +920,11 @@ bool MemCpyOptPass::performCallSlotOptzn(Instruction *cpyLoad,
// trap. Otherwise the transform is invalid since it might cause a trap
// to occur earlier than it otherwise would.
if (!isDereferenceableAndAlignedPointer(cpyDest, Align(1), APInt(64, cpySize),
- DL, C, DT)) {
+ DL, C, nullptr, DT)) {
LLVM_DEBUG(dbgs() << "Call Slot: Dest pointer not dereferenceable\n");
return false;
}
-
// Make sure that nothing can observe cpyDest being written early. There are
// a number of cases to consider:
// 1. cpyDest cannot be accessed between C and cpyStore as a precondition of
diff --git a/llvm/lib/Transforms/Utils/LoopPeel.cpp b/llvm/lib/Transforms/Utils/LoopPeel.cpp
index 87b4ce06c1fb1..16007102033f8 100644
--- a/llvm/lib/Transforms/Utils/LoopPeel.cpp
+++ b/llvm/lib/Transforms/Utils/LoopPeel.cpp
@@ -202,7 +202,7 @@ static unsigned peelToTurnInvariantLoadsDerefencebale(Loop &L,
if (auto *LI = dyn_cast<LoadInst>(&I)) {
Value *Ptr = LI->getPointerOperand();
if (DT.dominates(BB, Latch) && L.isLoopInvariant(Ptr) &&
- !isDereferenceablePointer(Ptr, LI->getType(), DL, LI, &DT))
+ !isDereferenceablePointer(Ptr, LI->getType(), DL, LI, nullptr, &DT))
for (Value *U : I.users())
LoadUsers.insert(U);
}
More information about the llvm-commits
mailing list