[llvm] 393b9e9 - [MemLoc] Require LocationSize argument (NFC)
Nikita Popov via llvm-commits
llvm-commits at lists.llvm.org
Thu Nov 19 12:46:18 PST 2020
Author: Nikita Popov
Date: 2020-11-19T21:45:52+01:00
New Revision: 393b9e9db31a3f83bc8b813ee24b56bc8ed93a49
URL: https://github.com/llvm/llvm-project/commit/393b9e9db31a3f83bc8b813ee24b56bc8ed93a49
DIFF: https://github.com/llvm/llvm-project/commit/393b9e9db31a3f83bc8b813ee24b56bc8ed93a49.diff
LOG: [MemLoc] Require LocationSize argument (NFC)
When constructing a MemoryLocation by hand, require that a
LocationSize is explicitly specified. D91649 will split up
LocationSize::unknown() into two different states, and callers
should make an explicit choice regarding the kind of MemoryLocation
they want to have.
Added:
Modified:
llvm/include/llvm/Analysis/AliasAnalysis.h
llvm/include/llvm/Analysis/MemoryLocation.h
llvm/lib/Analysis/AliasAnalysis.cpp
llvm/lib/Analysis/BasicAliasAnalysis.cpp
llvm/lib/Analysis/GlobalsModRef.cpp
llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
llvm/lib/Analysis/MemorySSA.cpp
llvm/lib/Analysis/ObjCARCAliasAnalysis.cpp
llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
llvm/lib/Target/AMDGPU/AMDGPUAnnotateUniformValues.cpp
llvm/lib/Target/AMDGPU/AMDGPURewriteOutArguments.cpp
llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
Removed:
################################################################################
diff --git a/llvm/include/llvm/Analysis/AliasAnalysis.h b/llvm/include/llvm/Analysis/AliasAnalysis.h
index 35ee7f4841ee..6e642d4e6f4c 100644
--- a/llvm/include/llvm/Analysis/AliasAnalysis.h
+++ b/llvm/include/llvm/Analysis/AliasAnalysis.h
@@ -424,7 +424,8 @@ class AAResults {
/// A convenience wrapper around the \c isNoAlias helper interface.
bool isNoAlias(const Value *V1, const Value *V2) {
- return isNoAlias(MemoryLocation(V1), MemoryLocation(V2));
+ return isNoAlias(MemoryLocation(V1, LocationSize::unknown()),
+ MemoryLocation(V2, LocationSize::unknown()));
}
/// A trivial helper function to check to see if the specified pointers are
@@ -446,7 +447,8 @@ class AAResults {
/// A convenience wrapper around the primary \c pointsToConstantMemory
/// interface.
bool pointsToConstantMemory(const Value *P, bool OrLocal = false) {
- return pointsToConstantMemory(MemoryLocation(P), OrLocal);
+ return pointsToConstantMemory(MemoryLocation(P, LocationSize::unknown()),
+ OrLocal);
}
/// @}
diff --git a/llvm/include/llvm/Analysis/MemoryLocation.h b/llvm/include/llvm/Analysis/MemoryLocation.h
index d01ac7da85cd..d838f690d894 100644
--- a/llvm/include/llvm/Analysis/MemoryLocation.h
+++ b/llvm/include/llvm/Analysis/MemoryLocation.h
@@ -248,8 +248,9 @@ class MemoryLocation {
return T.isScalable() ? UnknownSize : T.getFixedSize();
}
- explicit MemoryLocation(const Value *Ptr = nullptr,
- LocationSize Size = LocationSize::unknown(),
+ MemoryLocation() : Ptr(nullptr), Size(LocationSize::unknown()), AATags() {}
+
+ explicit MemoryLocation(const Value *Ptr, LocationSize Size,
const AAMDNodes &AATags = AAMDNodes())
: Ptr(Ptr), Size(Size), AATags(AATags) {}
diff --git a/llvm/lib/Analysis/AliasAnalysis.cpp b/llvm/lib/Analysis/AliasAnalysis.cpp
index 515aecdf1e8d..c3d15655ab64 100644
--- a/llvm/lib/Analysis/AliasAnalysis.cpp
+++ b/llvm/lib/Analysis/AliasAnalysis.cpp
@@ -668,7 +668,7 @@ ModRefInfo AAResults::callCapturesBefore(const Instruction *I,
!Call->isByValArgument(ArgNo)))
continue;
- AliasResult AR = alias(MemoryLocation(*CI), MemoryLocation(Object));
+ AliasResult AR = alias(*CI, Object);
// If this is a no-capture pointer argument, see if we can tell that it
// is impossible to alias the pointer we're checking. If not, we have to
// assume that the call could touch the pointer, even though it doesn't
diff --git a/llvm/lib/Analysis/BasicAliasAnalysis.cpp b/llvm/lib/Analysis/BasicAliasAnalysis.cpp
index c2ea4f5be9f7..2095702e8ae8 100644
--- a/llvm/lib/Analysis/BasicAliasAnalysis.cpp
+++ b/llvm/lib/Analysis/BasicAliasAnalysis.cpp
@@ -875,8 +875,9 @@ ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call,
// If this is a no-capture pointer argument, see if we can tell that it
// is impossible to alias the pointer we're checking.
- AliasResult AR = getBestAAResults().alias(MemoryLocation(*CI),
- MemoryLocation(Object), AAQI);
+ AliasResult AR = getBestAAResults().alias(
+ MemoryLocation(*CI, LocationSize::unknown()),
+ MemoryLocation(Object, LocationSize::unknown()), AAQI);
if (AR != MustAlias)
IsMustAlias = false;
// Operand doesn't alias 'Object', continue looking for other aliases
@@ -922,7 +923,8 @@ ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call,
if (isMallocOrCallocLikeFn(Call, &TLI)) {
// Be conservative if the accessed pointer may alias the allocation -
// fallback to the generic handling below.
- if (getBestAAResults().alias(MemoryLocation(Call), Loc, AAQI) == NoAlias)
+ if (getBestAAResults().alias(MemoryLocation(Call, LocationSize::unknown()),
+ Loc, AAQI) == NoAlias)
return ModRefInfo::NoModRef;
}
diff --git a/llvm/lib/Analysis/GlobalsModRef.cpp b/llvm/lib/Analysis/GlobalsModRef.cpp
index 34669d0c662b..37a345885b33 100644
--- a/llvm/lib/Analysis/GlobalsModRef.cpp
+++ b/llvm/lib/Analysis/GlobalsModRef.cpp
@@ -921,8 +921,9 @@ ModRefInfo GlobalsAAResult::getModRefInfoForArgument(const CallBase *Call,
if (!all_of(Objects, isIdentifiedObject) &&
// Try ::alias to see if all objects are known not to alias GV.
!all_of(Objects, [&](const Value *V) {
- return this->alias(MemoryLocation(V), MemoryLocation(GV), AAQI) ==
- NoAlias;
+ return this->alias(MemoryLocation(V, LocationSize::unknown()),
+ MemoryLocation(GV, LocationSize::unknown()),
+ AAQI) == NoAlias;
}))
return ConservativeResult;
diff --git a/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp b/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
index a19c1d78526b..ab0105906682 100644
--- a/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
+++ b/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
@@ -148,7 +148,7 @@ static ModRefInfo GetLocation(const Instruction *Inst, MemoryLocation &Loc,
if (const CallInst *CI = isFreeCall(Inst, &TLI)) {
// calls to free() deallocate the entire structure
- Loc = MemoryLocation(CI->getArgOperand(0));
+ Loc = MemoryLocation(CI->getArgOperand(0), LocationSize::unknown());
return ModRefInfo::Mod;
}
@@ -450,14 +450,16 @@ MemDepResult MemoryDependenceResults::getSimplePointerDependencyFrom(
// because the value is undefined.
Intrinsic::ID ID = II->getIntrinsicID();
switch (ID) {
- case Intrinsic::lifetime_start:
+ case Intrinsic::lifetime_start: {
// FIXME: This only considers queries directly on the invariant-tagged
// pointer, not on query pointers that are indexed off of them. It'd
// be nice to handle that at some point (the right approach is to use
// GetPointerBaseWithConstantOffset).
- if (BatchAA.isMustAlias(MemoryLocation(II->getArgOperand(1)), MemLoc))
+ MemoryLocation ArgLoc(II->getArgOperand(1), LocationSize::unknown());
+ if (BatchAA.isMustAlias(ArgLoc, MemLoc))
return MemDepResult::getDef(II);
continue;
+ }
case Intrinsic::masked_load:
case Intrinsic::masked_store: {
MemoryLocation Loc;
diff --git a/llvm/lib/Analysis/MemorySSA.cpp b/llvm/lib/Analysis/MemorySSA.cpp
index 6f6f730a92de..0d0d990df4e7 100644
--- a/llvm/lib/Analysis/MemorySSA.cpp
+++ b/llvm/lib/Analysis/MemorySSA.cpp
@@ -362,8 +362,10 @@ static bool lifetimeEndsAt(MemoryDef *MD, const MemoryLocation &Loc,
Instruction *Inst = MD->getMemoryInst();
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
switch (II->getIntrinsicID()) {
- case Intrinsic::lifetime_end:
- return AA.alias(MemoryLocation(II->getArgOperand(1)), Loc) == MustAlias;
+ case Intrinsic::lifetime_end: {
+ MemoryLocation ArgLoc(II->getArgOperand(1), LocationSize::unknown());
+ return AA.alias(ArgLoc, Loc) == MustAlias;
+ }
default:
return false;
}
@@ -376,9 +378,10 @@ static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysisType &AA,
const Instruction *I) {
// If the memory can't be changed, then loads of the memory can't be
// clobbered.
- return isa<LoadInst>(I) && (I->hasMetadata(LLVMContext::MD_invariant_load) ||
- AA.pointsToConstantMemory(MemoryLocation(
- cast<LoadInst>(I)->getPointerOperand())));
+ if (auto *LI = dyn_cast<LoadInst>(I))
+ return I->hasMetadata(LLVMContext::MD_invariant_load) ||
+ AA.pointsToConstantMemory(MemoryLocation::get(LI));
+ return false;
}
/// Verifies that `Start` is clobbered by `ClobberAt`, and that nothing
diff --git a/llvm/lib/Analysis/ObjCARCAliasAnalysis.cpp b/llvm/lib/Analysis/ObjCARCAliasAnalysis.cpp
index 2cb43d768230..7c28b876597d 100644
--- a/llvm/lib/Analysis/ObjCARCAliasAnalysis.cpp
+++ b/llvm/lib/Analysis/ObjCARCAliasAnalysis.cpp
@@ -57,7 +57,9 @@ AliasResult ObjCARCAAResult::alias(const MemoryLocation &LocA,
const Value *UA = GetUnderlyingObjCPtr(SA);
const Value *UB = GetUnderlyingObjCPtr(SB);
if (UA != SA || UB != SB) {
- Result = AAResultBase::alias(MemoryLocation(UA), MemoryLocation(UB), AAQI);
+ Result = AAResultBase::alias(
+ MemoryLocation(UA, LocationSize::unknown()),
+ MemoryLocation(UB, LocationSize::unknown()), AAQI);
// We can't use MustAlias or PartialAlias results here because
// GetUnderlyingObjCPtr may return an offsetted pointer value.
if (Result == NoAlias)
@@ -85,8 +87,8 @@ bool ObjCARCAAResult::pointsToConstantMemory(const MemoryLocation &Loc,
// ObjC-specific no-ops, and try making an imprecise alias query.
const Value *U = GetUnderlyingObjCPtr(S);
if (U != S)
- return AAResultBase::pointsToConstantMemory(MemoryLocation(U), AAQI,
- OrLocal);
+ return AAResultBase::pointsToConstantMemory(
+ MemoryLocation(U, LocationSize::unknown()), AAQI, OrLocal);
// If that failed, fail. We don't need to chain here, since that's covered
// by the earlier precise query.
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 375c3e9d5304..f815da1cbfe4 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -4353,7 +4353,7 @@ void SelectionDAGBuilder::visitMaskedLoad(const CallInst &I, bool IsExpanding) {
// Do not serialize masked loads of constant memory with anything.
MemoryLocation ML;
if (VT.isScalableVector())
- ML = MemoryLocation(PtrOperand);
+ ML = MemoryLocation(PtrOperand, LocationSize::unknown());
else
ML = MemoryLocation(PtrOperand, LocationSize::precise(
DAG.getDataLayout().getTypeStoreSize(I.getType())),
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAnnotateUniformValues.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAnnotateUniformValues.cpp
index 45f515c5115e..cf262c2a5358 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUAnnotateUniformValues.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAnnotateUniformValues.cpp
@@ -108,8 +108,8 @@ bool AMDGPUAnnotateUniformValues::isClobberedInFunction(LoadInst * Load) {
for (auto &BB : Checklist) {
BasicBlock::iterator StartIt = (!L && (BB == Load->getParent())) ?
BasicBlock::iterator(Load) : BB->end();
- auto Q = MDR->getPointerDependencyFrom(MemoryLocation(Ptr), true,
- StartIt, BB, Load);
+ auto Q = MDR->getPointerDependencyFrom(
+ MemoryLocation(Ptr, LocationSize::unknown()), true, StartIt, BB, Load);
if (Q.isClobber() || Q.isUnknown())
return true;
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURewriteOutArguments.cpp b/llvm/lib/Target/AMDGPU/AMDGPURewriteOutArguments.cpp
index 9c3d96de6d68..17e2ac864a7b 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURewriteOutArguments.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURewriteOutArguments.cpp
@@ -303,8 +303,9 @@ bool AMDGPURewriteOutArguments::runOnFunction(Function &F) {
for (ReturnInst *RI : Returns) {
BasicBlock *BB = RI->getParent();
- MemDepResult Q = MDA->getPointerDependencyFrom(MemoryLocation(OutArg),
- true, BB->end(), BB, RI);
+ MemDepResult Q = MDA->getPointerDependencyFrom(
+ MemoryLocation(OutArg, LocationSize::unknown()), true, BB->end(),
+ BB, RI);
StoreInst *SI = nullptr;
if (Q.isDef())
SI = dyn_cast<StoreInst>(Q.getInst());
diff --git a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
index bc561ae0065c..a1050ee63451 100644
--- a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -275,7 +275,7 @@ static MemoryLocation getLocForWrite(Instruction *Inst,
default:
return MemoryLocation(); // Unhandled intrinsic.
case Intrinsic::init_trampoline:
- return MemoryLocation(II->getArgOperand(0));
+ return MemoryLocation(II->getArgOperand(0), LocationSize::unknown());
case Intrinsic::masked_store:
return MemoryLocation::getForArgument(II, 1, TLI);
case Intrinsic::lifetime_end: {
@@ -287,7 +287,7 @@ static MemoryLocation getLocForWrite(Instruction *Inst,
if (auto *CB = dyn_cast<CallBase>(Inst))
// All the supported TLI functions so far happen to have dest as their
// first argument.
- return MemoryLocation(CB->getArgOperand(0));
+ return MemoryLocation(CB->getArgOperand(0), LocationSize::unknown());
return MemoryLocation();
}
@@ -828,7 +828,8 @@ static bool handleFree(CallInst *F, AliasAnalysis *AA,
MapVector<Instruction *, bool> &ThrowableInst) {
bool MadeChange = false;
- MemoryLocation Loc = MemoryLocation(F->getOperand(0));
+ MemoryLocation Loc = MemoryLocation(F->getOperand(0),
+ LocationSize::unknown());
SmallVector<BasicBlock *, 16> Blocks;
Blocks.push_back(F->getParent());
@@ -1725,14 +1726,15 @@ struct DSEState {
case LibFunc_strncpy:
case LibFunc_strcat:
case LibFunc_strncat:
- return {MemoryLocation(CB->getArgOperand(0))};
+ return {MemoryLocation(CB->getArgOperand(0),
+ LocationSize::unknown())};
default:
break;
}
}
switch (CB->getIntrinsicID()) {
case Intrinsic::init_trampoline:
- return {MemoryLocation(CB->getArgOperand(0))};
+ return {MemoryLocation(CB->getArgOperand(0), LocationSize::unknown())};
case Intrinsic::masked_store:
return {MemoryLocation::getForArgument(CB, 1, TLI)};
default:
@@ -1827,7 +1829,9 @@ struct DSEState {
if (auto *CB = dyn_cast<CallBase>(I)) {
if (isFreeCall(I, &TLI))
- return {std::make_pair(MemoryLocation(CB->getArgOperand(0)), true)};
+ return {std::make_pair(MemoryLocation(CB->getArgOperand(0),
+ LocationSize::unknown()),
+ true)};
}
return None;
More information about the llvm-commits
mailing list