[llvm] 89051eb - [NFC] GetUnderlyingObject -> getUnderlyingObject
Vitaly Buka via llvm-commits
llvm-commits at lists.llvm.org
Thu Jul 30 21:08:36 PDT 2020
Author: Vitaly Buka
Date: 2020-07-30T21:08:24-07:00
New Revision: 89051ebacea72733ff7088cf09a760b1be707acf
URL: https://github.com/llvm/llvm-project/commit/89051ebacea72733ff7088cf09a760b1be707acf
DIFF: https://github.com/llvm/llvm-project/commit/89051ebacea72733ff7088cf09a760b1be707acf.diff
LOG: [NFC] GetUnderlyingObject -> getUnderlyingObject
I am going to touch them in the next patch anyway
Added:
Modified:
llvm/include/llvm/Analysis/ObjCARCAnalysisUtils.h
llvm/include/llvm/Analysis/ValueTracking.h
llvm/lib/Analysis/AliasAnalysis.cpp
llvm/lib/Analysis/BasicAliasAnalysis.cpp
llvm/lib/Analysis/CaptureTracking.cpp
llvm/lib/Analysis/ConstantFolding.cpp
llvm/lib/Analysis/DependenceAnalysis.cpp
llvm/lib/Analysis/GlobalsModRef.cpp
llvm/lib/Analysis/InstructionSimplify.cpp
llvm/lib/Analysis/LazyValueInfo.cpp
llvm/lib/Analysis/Lint.cpp
llvm/lib/Analysis/LoopAccessAnalysis.cpp
llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
llvm/lib/Analysis/ValueTracking.cpp
llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
llvm/lib/CodeGen/MachinePipeliner.cpp
llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
llvm/lib/Target/AMDGPU/AMDGPUAliasAnalysis.cpp
llvm/lib/Target/AMDGPU/AMDGPUInline.cpp
llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
llvm/lib/Target/AMDGPU/R600Instructions.td
llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
llvm/lib/Target/Mips/MipsDelaySlotFiller.cpp
llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
llvm/lib/Target/NVPTX/NVPTXLowerArgs.cpp
llvm/lib/Transforms/IPO/AttributorAttributes.cpp
llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
llvm/lib/Transforms/Scalar/LICM.cpp
llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp
llvm/lib/Transforms/Utils/AssumeBundleBuilder.cpp
llvm/lib/Transforms/Utils/InlineFunction.cpp
llvm/lib/Transforms/Utils/LoopUtils.cpp
llvm/lib/Transforms/Utils/VNCoercion.cpp
llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
llvm/test/Analysis/LoopAccessAnalysis/nullptr.ll
llvm/test/CodeGen/AMDGPU/promote-alloca-to-lds-phi.ll
llvm/test/Transforms/SLPVectorizer/AMDGPU/address-space-ptr-sze-gep-index-assert.ll
Removed:
################################################################################
diff --git a/llvm/include/llvm/Analysis/ObjCARCAnalysisUtils.h b/llvm/include/llvm/Analysis/ObjCARCAnalysisUtils.h
index 945d41c37677..bd541eea9e81 100644
--- a/llvm/include/llvm/Analysis/ObjCARCAnalysisUtils.h
+++ b/llvm/include/llvm/Analysis/ObjCARCAnalysisUtils.h
@@ -67,7 +67,7 @@ inline bool ModuleHasARC(const Module &M) {
inline const Value *GetUnderlyingObjCPtr(const Value *V,
const DataLayout &DL) {
for (;;) {
- V = GetUnderlyingObject(V, DL);
+ V = getUnderlyingObject(V, DL);
if (!IsForwarding(GetBasicARCInstKind(V)))
break;
V = cast<CallInst>(V)->getArgOperand(0);
diff --git a/llvm/include/llvm/Analysis/ValueTracking.h b/llvm/include/llvm/Analysis/ValueTracking.h
index 92dc7c01e83b..2c1426be9734 100644
--- a/llvm/include/llvm/Analysis/ValueTracking.h
+++ b/llvm/include/llvm/Analysis/ValueTracking.h
@@ -368,14 +368,14 @@ class Value;
/// that the returned value has pointer type if the specified value does. If
/// the MaxLookup value is non-zero, it limits the number of instructions to
/// be stripped off.
- Value *GetUnderlyingObject(Value *V, const DataLayout &DL,
+ Value *getUnderlyingObject(Value *V, const DataLayout &DL,
unsigned MaxLookup = 6);
- inline const Value *GetUnderlyingObject(const Value *V, const DataLayout &DL,
+ inline const Value *getUnderlyingObject(const Value *V, const DataLayout &DL,
unsigned MaxLookup = 6) {
- return GetUnderlyingObject(const_cast<Value *>(V), DL, MaxLookup);
+ return getUnderlyingObject(const_cast<Value *>(V), DL, MaxLookup);
}
- /// This method is similar to GetUnderlyingObject except that it can
+ /// This method is similar to getUnderlyingObject except that it can
/// look through phi and select instructions and return multiple objects.
///
/// If LoopInfo is passed, loop phis are further analyzed. If a pointer
@@ -403,12 +403,12 @@ class Value;
/// Since A[i] and A[i-1] are independent pointers, getUnderlyingObjects
/// should not assume that Curr and Prev share the same underlying object thus
/// it shouldn't look through the phi above.
- void GetUnderlyingObjects(const Value *V,
+ void getUnderlyingObjects(const Value *V,
SmallVectorImpl<const Value *> &Objects,
const DataLayout &DL, LoopInfo *LI = nullptr,
unsigned MaxLookup = 6);
- /// This is a wrapper around GetUnderlyingObjects and adds support for basic
+ /// This is a wrapper around getUnderlyingObjects and adds support for basic
/// ptrtoint+arithmetic+inttoptr sequences.
bool getUnderlyingObjectsForCodeGen(const Value *V,
SmallVectorImpl<Value *> &Objects,
diff --git a/llvm/lib/Analysis/AliasAnalysis.cpp b/llvm/lib/Analysis/AliasAnalysis.cpp
index fec2415a0e45..e77c203fc8d4 100644
--- a/llvm/lib/Analysis/AliasAnalysis.cpp
+++ b/llvm/lib/Analysis/AliasAnalysis.cpp
@@ -642,7 +642,7 @@ ModRefInfo AAResults::callCapturesBefore(const Instruction *I,
return ModRefInfo::ModRef;
const Value *Object =
- GetUnderlyingObject(MemLoc.Ptr, I->getModule()->getDataLayout());
+ getUnderlyingObject(MemLoc.Ptr, I->getModule()->getDataLayout());
if (!isIdentifiedObject(Object) || isa<GlobalValue>(Object) ||
isa<Constant>(Object))
return ModRefInfo::ModRef;
diff --git a/llvm/lib/Analysis/BasicAliasAnalysis.cpp b/llvm/lib/Analysis/BasicAliasAnalysis.cpp
index 33f122728d2a..720ce0f688cf 100644
--- a/llvm/lib/Analysis/BasicAliasAnalysis.cpp
+++ b/llvm/lib/Analysis/BasicAliasAnalysis.cpp
@@ -91,7 +91,7 @@ STATISTIC(SearchTimes, "Number of times a GEP is decomposed");
const unsigned MaxNumPhiBBsValueReachabilityCheck = 20;
// The max limit of the search depth in DecomposeGEPExpression() and
-// GetUnderlyingObject(), both functions need to use the same search
+// getUnderlyingObject(), both functions need to use the same search
// depth otherwise the algorithm in aliasGEP will assert.
static const unsigned MaxLookupSearchDepth = 6;
@@ -456,8 +456,8 @@ static unsigned getMaxPointerSize(const DataLayout &DL) {
/// such, the gep cannot necessarily be reconstructed from its decomposed form.
///
/// When DataLayout is around, this function is capable of analyzing everything
-/// that GetUnderlyingObject can look through. To be able to do that
-/// GetUnderlyingObject and DecomposeGEPExpression must use the same search
+/// that getUnderlyingObject can look through. To be able to do that
+/// getUnderlyingObject and DecomposeGEPExpression must use the same search
/// depth (MaxLookupSearchDepth). When DataLayout not is around, it just looks
/// through pointer casts.
bool BasicAAResult::DecomposeGEPExpression(const Value *V,
@@ -661,7 +661,7 @@ bool BasicAAResult::pointsToConstantMemory(const MemoryLocation &Loc,
SmallVector<const Value *, 16> Worklist;
Worklist.push_back(Loc.Ptr);
do {
- const Value *V = GetUnderlyingObject(Worklist.pop_back_val(), DL);
+ const Value *V = getUnderlyingObject(Worklist.pop_back_val(), DL);
if (!Visited.insert(V).second) {
Visited.clear();
return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
@@ -875,7 +875,7 @@ ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call,
assert(notDifferentParent(Call, Loc.Ptr) &&
"AliasAnalysis query involving multiple functions!");
- const Value *Object = GetUnderlyingObject(Loc.Ptr, DL);
+ const Value *Object = getUnderlyingObject(Loc.Ptr, DL);
// Calls marked 'tail' cannot read or write allocas from the current frame
// because the current frame might be destroyed by the time they run. However,
@@ -1309,7 +1309,7 @@ bool BasicAAResult::isGEPBaseAtNegativeOffset(const GEPOperator *GEPOp,
/// another pointer.
///
/// We know that V1 is a GEP, but we don't know anything about V2.
-/// UnderlyingV1 is GetUnderlyingObject(GEP1, DL), UnderlyingV2 is the same for
+/// UnderlyingV1 is getUnderlyingObject(GEP1, DL), UnderlyingV2 is the same for
/// V2.
AliasResult BasicAAResult::aliasGEP(
const GEPOperator *GEP1, LocationSize V1Size, const AAMDNodes &V1AAInfo,
@@ -1338,7 +1338,7 @@ AliasResult BasicAAResult::aliasGEP(
assert(DecompGEP1.Base == UnderlyingV1 && DecompGEP2.Base == UnderlyingV2 &&
"DecomposeGEPExpression returned a result
diff erent from "
- "GetUnderlyingObject");
+ "getUnderlyingObject");
// If the GEP's offset relative to its base is such that the base would
// fall below the start of the object underlying V2, then the GEP and V2
@@ -1782,10 +1782,10 @@ AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size,
// Figure out what objects these things are pointing to if we can.
if (O1 == nullptr)
- O1 = GetUnderlyingObject(V1, DL, MaxLookupSearchDepth);
+ O1 = getUnderlyingObject(V1, DL, MaxLookupSearchDepth);
if (O2 == nullptr)
- O2 = GetUnderlyingObject(V2, DL, MaxLookupSearchDepth);
+ O2 = getUnderlyingObject(V2, DL, MaxLookupSearchDepth);
// Null values in the default address space don't point to any object, so they
// don't alias any other pointer.
diff --git a/llvm/lib/Analysis/CaptureTracking.cpp b/llvm/lib/Analysis/CaptureTracking.cpp
index 8b101e3b2cc4..6de6aaa48d4f 100644
--- a/llvm/lib/Analysis/CaptureTracking.cpp
+++ b/llvm/lib/Analysis/CaptureTracking.cpp
@@ -273,7 +273,7 @@ void llvm::PointerMayBeCaptured(const Value *V, CaptureTracker *Tracker,
// The pointer is not captured if returned pointer is not captured.
// NOTE: CaptureTracking users should not assume that only functions
// marked with nocapture do not capture. This means that places like
- // GetUnderlyingObject in ValueTracking or DecomposeGEPExpression
+ // getUnderlyingObject in ValueTracking or DecomposeGEPExpression
// in BasicAA also need to know about this property.
if (isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(Call,
true)) {
diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp
index a7458ffe4572..1b3fa199d7a0 100644
--- a/llvm/lib/Analysis/ConstantFolding.cpp
+++ b/llvm/lib/Analysis/ConstantFolding.cpp
@@ -718,7 +718,7 @@ Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty,
// If this load comes from anywhere in a constant global, and if the global
// is all undef or zero, we know what it loads.
- if (auto *GV = dyn_cast<GlobalVariable>(GetUnderlyingObject(CE, DL))) {
+ if (auto *GV = dyn_cast<GlobalVariable>(getUnderlyingObject(CE, DL))) {
if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
if (GV->getInitializer()->isNullValue())
return Constant::getNullValue(Ty);
diff --git a/llvm/lib/Analysis/DependenceAnalysis.cpp b/llvm/lib/Analysis/DependenceAnalysis.cpp
index bcfeef7fb8ab..3a67b4518c0e 100644
--- a/llvm/lib/Analysis/DependenceAnalysis.cpp
+++ b/llvm/lib/Analysis/DependenceAnalysis.cpp
@@ -659,8 +659,8 @@ static AliasResult underlyingObjectsAlias(AAResults *AA,
return NoAlias;
// Check the underlying objects are the same
- const Value *AObj = GetUnderlyingObject(LocA.Ptr, DL);
- const Value *BObj = GetUnderlyingObject(LocB.Ptr, DL);
+ const Value *AObj = getUnderlyingObject(LocA.Ptr, DL);
+ const Value *BObj = getUnderlyingObject(LocB.Ptr, DL);
// If the underlying objects are the same, they must alias
if (AObj == BObj)
diff --git a/llvm/lib/Analysis/GlobalsModRef.cpp b/llvm/lib/Analysis/GlobalsModRef.cpp
index 8c8ccf04ebba..b1cdfc1896d5 100644
--- a/llvm/lib/Analysis/GlobalsModRef.cpp
+++ b/llvm/lib/Analysis/GlobalsModRef.cpp
@@ -44,7 +44,7 @@ STATISTIC(NumIndirectGlobalVars, "Number of indirect global objects");
// An option to enable unsafe alias results from the GlobalsModRef analysis.
// When enabled, GlobalsModRef will provide no-alias results which in extremely
// rare cases may not be conservatively correct. In particular, in the face of
-// transforms which cause assymetry between how effective GetUnderlyingObject
+// transforms which cause assymetry between how effective getUnderlyingObject
// is for two pointers, it may produce incorrect results.
//
// These unsafe results have been returned by GMR for many years without
@@ -435,7 +435,7 @@ bool GlobalsAAResult::AnalyzeIndirectGlobalMemory(GlobalVariable *GV) {
continue;
// Check the value being stored.
- Value *Ptr = GetUnderlyingObject(SI->getOperand(0),
+ Value *Ptr = getUnderlyingObject(SI->getOperand(0),
GV->getParent()->getDataLayout());
if (!isAllocLikeFn(Ptr, &GetTLI(*SI->getFunction())))
@@ -661,12 +661,12 @@ static bool isNonEscapingGlobalNoAliasWithLoad(const GlobalValue *GV,
return false;
if (auto *LI = dyn_cast<LoadInst>(Input)) {
- Inputs.push_back(GetUnderlyingObject(LI->getPointerOperand(), DL));
+ Inputs.push_back(getUnderlyingObject(LI->getPointerOperand(), DL));
continue;
}
if (auto *SI = dyn_cast<SelectInst>(Input)) {
- const Value *LHS = GetUnderlyingObject(SI->getTrueValue(), DL);
- const Value *RHS = GetUnderlyingObject(SI->getFalseValue(), DL);
+ const Value *LHS = getUnderlyingObject(SI->getTrueValue(), DL);
+ const Value *RHS = getUnderlyingObject(SI->getFalseValue(), DL);
if (Visited.insert(LHS).second)
Inputs.push_back(LHS);
if (Visited.insert(RHS).second)
@@ -675,7 +675,7 @@ static bool isNonEscapingGlobalNoAliasWithLoad(const GlobalValue *GV,
}
if (auto *PN = dyn_cast<PHINode>(Input)) {
for (const Value *Op : PN->incoming_values()) {
- Op = GetUnderlyingObject(Op, DL);
+ Op = getUnderlyingObject(Op, DL);
if (Visited.insert(Op).second)
Inputs.push_back(Op);
}
@@ -774,7 +774,7 @@ bool GlobalsAAResult::isNonEscapingGlobalNoAlias(const GlobalValue *GV,
if (auto *LI = dyn_cast<LoadInst>(Input)) {
// A pointer loaded from a global would have been captured, and we know
// that the global is non-escaping, so no alias.
- const Value *Ptr = GetUnderlyingObject(LI->getPointerOperand(), DL);
+ const Value *Ptr = getUnderlyingObject(LI->getPointerOperand(), DL);
if (isNonEscapingGlobalNoAliasWithLoad(GV, Ptr, Depth, DL))
// The load does not alias with GV.
continue;
@@ -782,8 +782,8 @@ bool GlobalsAAResult::isNonEscapingGlobalNoAlias(const GlobalValue *GV,
return false;
}
if (auto *SI = dyn_cast<SelectInst>(Input)) {
- const Value *LHS = GetUnderlyingObject(SI->getTrueValue(), DL);
- const Value *RHS = GetUnderlyingObject(SI->getFalseValue(), DL);
+ const Value *LHS = getUnderlyingObject(SI->getTrueValue(), DL);
+ const Value *RHS = getUnderlyingObject(SI->getFalseValue(), DL);
if (Visited.insert(LHS).second)
Inputs.push_back(LHS);
if (Visited.insert(RHS).second)
@@ -792,7 +792,7 @@ bool GlobalsAAResult::isNonEscapingGlobalNoAlias(const GlobalValue *GV,
}
if (auto *PN = dyn_cast<PHINode>(Input)) {
for (const Value *Op : PN->incoming_values()) {
- Op = GetUnderlyingObject(Op, DL);
+ Op = getUnderlyingObject(Op, DL);
if (Visited.insert(Op).second)
Inputs.push_back(Op);
}
@@ -827,8 +827,8 @@ AliasResult GlobalsAAResult::alias(const MemoryLocation &LocA,
const MemoryLocation &LocB,
AAQueryInfo &AAQI) {
// Get the base object these pointers point to.
- const Value *UV1 = GetUnderlyingObject(LocA.Ptr, DL);
- const Value *UV2 = GetUnderlyingObject(LocB.Ptr, DL);
+ const Value *UV1 = getUnderlyingObject(LocA.Ptr, DL);
+ const Value *UV2 = getUnderlyingObject(LocB.Ptr, DL);
// If either of the underlying values is a global, they may be non-addr-taken
// globals, which we can answer queries about.
@@ -915,7 +915,7 @@ ModRefInfo GlobalsAAResult::getModRefInfoForArgument(const CallBase *Call,
// is based on GV, return the conservative result.
for (auto &A : Call->args()) {
SmallVector<const Value*, 4> Objects;
- GetUnderlyingObjects(A, Objects, DL);
+ getUnderlyingObjects(A, Objects, DL);
// All objects must be identified.
if (!all_of(Objects, isIdentifiedObject) &&
@@ -942,7 +942,7 @@ ModRefInfo GlobalsAAResult::getModRefInfo(const CallBase *Call,
// If we are asking for mod/ref info of a direct call with a pointer to a
// global we are tracking, return information if we have it.
if (const GlobalValue *GV =
- dyn_cast<GlobalValue>(GetUnderlyingObject(Loc.Ptr, DL)))
+ dyn_cast<GlobalValue>(getUnderlyingObject(Loc.Ptr, DL)))
// If GV is internal to this IR and there is no function with local linkage
// that has had their address taken, keep looking for a tighter ModRefInfo.
if (GV->hasLocalLinkage() && !UnknownFunctionsWithLocalLinkage)
diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp
index 178b5bed2006..bf55bedd801e 100644
--- a/llvm/lib/Analysis/InstructionSimplify.cpp
+++ b/llvm/lib/Analysis/InstructionSimplify.cpp
@@ -2524,8 +2524,8 @@ computePointerICmp(const DataLayout &DL, const TargetLibraryInfo *TLI,
// memory within the lifetime of the current function (allocas, byval
// arguments, globals), then determine the comparison result here.
SmallVector<const Value *, 8> LHSUObjs, RHSUObjs;
- GetUnderlyingObjects(LHS, LHSUObjs, DL);
- GetUnderlyingObjects(RHS, RHSUObjs, DL);
+ getUnderlyingObjects(LHS, LHSUObjs, DL);
+ getUnderlyingObjects(RHS, RHSUObjs, DL);
// Is the set of underlying objects all noalias calls?
auto IsNAC = [](ArrayRef<const Value *> Objects) {
diff --git a/llvm/lib/Analysis/LazyValueInfo.cpp b/llvm/lib/Analysis/LazyValueInfo.cpp
index d326a442051a..5011fc8782fe 100644
--- a/llvm/lib/Analysis/LazyValueInfo.cpp
+++ b/llvm/lib/Analysis/LazyValueInfo.cpp
@@ -606,12 +606,12 @@ Optional<ValueLatticeElement> LazyValueInfoImpl::solveBlockValueImpl(
static bool InstructionDereferencesPointer(Instruction *I, Value *Ptr) {
if (LoadInst *L = dyn_cast<LoadInst>(I)) {
return L->getPointerAddressSpace() == 0 &&
- GetUnderlyingObject(L->getPointerOperand(),
+ getUnderlyingObject(L->getPointerOperand(),
L->getModule()->getDataLayout()) == Ptr;
}
if (StoreInst *S = dyn_cast<StoreInst>(I)) {
return S->getPointerAddressSpace() == 0 &&
- GetUnderlyingObject(S->getPointerOperand(),
+ getUnderlyingObject(S->getPointerOperand(),
S->getModule()->getDataLayout()) == Ptr;
}
if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) {
@@ -622,12 +622,12 @@ static bool InstructionDereferencesPointer(Instruction *I, Value *Ptr) {
if (!Len || Len->isZero()) return false;
if (MI->getDestAddressSpace() == 0)
- if (GetUnderlyingObject(MI->getRawDest(),
+ if (getUnderlyingObject(MI->getRawDest(),
MI->getModule()->getDataLayout()) == Ptr)
return true;
if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI))
if (MTI->getSourceAddressSpace() == 0)
- if (GetUnderlyingObject(MTI->getRawSource(),
+ if (getUnderlyingObject(MTI->getRawSource(),
MTI->getModule()->getDataLayout()) == Ptr)
return true;
}
@@ -642,10 +642,10 @@ static bool isObjectDereferencedInBlock(Value *Val, BasicBlock *BB) {
assert(Val->getType()->isPointerTy());
const DataLayout &DL = BB->getModule()->getDataLayout();
- Value *UnderlyingVal = GetUnderlyingObject(Val, DL);
- // If 'GetUnderlyingObject' didn't converge, skip it. It won't converge
+ Value *UnderlyingVal = getUnderlyingObject(Val, DL);
+ // If 'getUnderlyingObject' didn't converge, skip it. It won't converge
// inside InstructionDereferencesPointer either.
- if (UnderlyingVal == GetUnderlyingObject(UnderlyingVal, DL, 1))
+ if (UnderlyingVal == getUnderlyingObject(UnderlyingVal, DL, 1))
for (Instruction &I : *BB)
if (InstructionDereferencesPointer(&I, UnderlyingVal))
return true;
diff --git a/llvm/lib/Analysis/Lint.cpp b/llvm/lib/Analysis/Lint.cpp
index 38017530c322..a03f4257e97f 100644
--- a/llvm/lib/Analysis/Lint.cpp
+++ b/llvm/lib/Analysis/Lint.cpp
@@ -673,7 +673,7 @@ Value *Lint::findValueImpl(Value *V, bool OffsetOk,
// TODO: Look through eliminable cast pairs.
// TODO: Look through calls with unique return values.
// TODO: Look through vector insert/extract/shuffle.
- V = OffsetOk ? GetUnderlyingObject(V, *DL) : V->stripPointerCasts();
+ V = OffsetOk ? getUnderlyingObject(V, *DL) : V->stripPointerCasts();
if (LoadInst *L = dyn_cast<LoadInst>(V)) {
BasicBlock::iterator BBI = L->getIterator();
BasicBlock *BB = L->getParent();
diff --git a/llvm/lib/Analysis/LoopAccessAnalysis.cpp b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
index f409cd322146..e678b313c0fd 100644
--- a/llvm/lib/Analysis/LoopAccessAnalysis.cpp
+++ b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
@@ -938,7 +938,7 @@ void AccessAnalysis::processMemAccesses() {
typedef SmallVector<const Value *, 16> ValueVector;
ValueVector TempObjects;
- GetUnderlyingObjects(Ptr, TempObjects, DL, LI);
+ getUnderlyingObjects(Ptr, TempObjects, DL, LI);
LLVM_DEBUG(dbgs()
<< "Underlying objects for pointer " << *Ptr << "\n");
for (const Value *UnderlyingObj : TempObjects) {
@@ -1142,7 +1142,7 @@ bool llvm::sortPtrAccesses(ArrayRef<Value *> VL, const DataLayout &DL,
// first pointer in the array.
Value *Ptr0 = VL[0];
const SCEV *Scev0 = SE.getSCEV(Ptr0);
- Value *Obj0 = GetUnderlyingObject(Ptr0, DL);
+ Value *Obj0 = getUnderlyingObject(Ptr0, DL);
llvm::SmallSet<int64_t, 4> Offsets;
for (auto *Ptr : VL) {
@@ -1153,7 +1153,7 @@ bool llvm::sortPtrAccesses(ArrayRef<Value *> VL, const DataLayout &DL,
return false;
// If a pointer refers to a
diff erent underlying object, bail - the
// pointers are by definition incomparable.
- Value *CurrObj = GetUnderlyingObject(Ptr, DL);
+ Value *CurrObj = getUnderlyingObject(Ptr, DL);
if (CurrObj != Obj0)
return false;
@@ -1950,7 +1950,7 @@ void LoopAccessInfo::analyzeLoop(AAResults *AA, LoopInfo *LI,
AccessAnalysis Accesses(TheLoop->getHeader()->getModule()->getDataLayout(),
TheLoop, AA, LI, DependentAccesses, *PSE);
- // Holds the analyzed pointers. We don't want to call GetUnderlyingObjects
+ // Holds the analyzed pointers. We don't want to call getUnderlyingObjects
// multiple times on the same object. If the ptr is accessed twice, once
// for read and once for write, it will only appear once (on the write
// list). This is okay, since we are going to check for conflicts between
diff --git a/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp b/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
index 566eba5c54af..645eb3d51371 100644
--- a/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
+++ b/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
@@ -576,7 +576,7 @@ MemDepResult MemoryDependenceResults::getSimplePointerDependencyFrom(
// looking for a clobber in many cases; that's an alias property and is
// handled by BasicAA.
if (isa<AllocaInst>(Inst) || isNoAliasFn(Inst, &TLI)) {
- const Value *AccessPtr = GetUnderlyingObject(MemLoc.Ptr, DL);
+ const Value *AccessPtr = getUnderlyingObject(MemLoc.Ptr, DL);
if (AccessPtr == Inst || AA.isMustAlias(Inst, AccessPtr))
return MemDepResult::getDef(Inst);
}
diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index 9dfe8fdcb1a5..54a2796ee2f6 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -4160,7 +4160,7 @@ static bool isSameUnderlyingObjectInLoop(const PHINode *PN,
return true;
}
-Value *llvm::GetUnderlyingObject(Value *V, const DataLayout &DL,
+Value *llvm::getUnderlyingObject(Value *V, const DataLayout &DL,
unsigned MaxLookup) {
if (!V->getType()->isPointerTy())
return V;
@@ -4206,7 +4206,7 @@ Value *llvm::GetUnderlyingObject(Value *V, const DataLayout &DL,
return V;
}
-void llvm::GetUnderlyingObjects(const Value *V,
+void llvm::getUnderlyingObjects(const Value *V,
SmallVectorImpl<const Value *> &Objects,
const DataLayout &DL, LoopInfo *LI,
unsigned MaxLookup) {
@@ -4215,7 +4215,7 @@ void llvm::GetUnderlyingObjects(const Value *V,
Worklist.push_back(V);
do {
const Value *P = Worklist.pop_back_val();
- P = GetUnderlyingObject(P, DL, MaxLookup);
+ P = getUnderlyingObject(P, DL, MaxLookup);
if (!Visited.insert(P).second)
continue;
@@ -4276,9 +4276,9 @@ static const Value *getUnderlyingObjectFromInt(const Value *V) {
} while (true);
}
-/// This is a wrapper around GetUnderlyingObjects and adds support for basic
+/// This is a wrapper around getUnderlyingObjects and adds support for basic
/// ptrtoint+arithmetic+inttoptr sequences.
-/// It returns false if unidentified object is found in GetUnderlyingObjects.
+/// It returns false if unidentified object is found in getUnderlyingObjects.
bool llvm::getUnderlyingObjectsForCodeGen(const Value *V,
SmallVectorImpl<Value *> &Objects,
const DataLayout &DL) {
@@ -4288,7 +4288,7 @@ bool llvm::getUnderlyingObjectsForCodeGen(const Value *V,
V = Working.pop_back_val();
SmallVector<const Value *, 4> Objs;
- GetUnderlyingObjects(V, Objs, DL);
+ getUnderlyingObjects(V, Objs, DL);
for (const Value *V : Objs) {
if (!Visited.insert(V).second)
@@ -4301,7 +4301,7 @@ bool llvm::getUnderlyingObjectsForCodeGen(const Value *V,
continue;
}
}
- // If GetUnderlyingObjects fails to find an identifiable object,
+ // If getUnderlyingObjects fails to find an identifiable object,
// getUnderlyingObjectsForCodeGen also fails for safety.
if (!isIdentifiedObject(V)) {
Objects.clear();
diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index 264af2b562b3..61a1efc1295f 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -1387,7 +1387,7 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
// Get the underlying objects for the location passed on the lifetime
// marker.
SmallVector<const Value *, 4> Allocas;
- GetUnderlyingObjects(CI.getArgOperand(1), Allocas, *DL);
+ getUnderlyingObjects(CI.getArgOperand(1), Allocas, *DL);
// Iterate over each underlying object, creating lifetime markers for each
// static alloca. Quit if we find a non-static alloca.
diff --git a/llvm/lib/CodeGen/MachinePipeliner.cpp b/llvm/lib/CodeGen/MachinePipeliner.cpp
index ef4b02ca9e3e..908f4e45618a 100644
--- a/llvm/lib/CodeGen/MachinePipeliner.cpp
+++ b/llvm/lib/CodeGen/MachinePipeliner.cpp
@@ -712,7 +712,7 @@ static void getUnderlyingObjects(const MachineInstr *MI,
MachineMemOperand *MM = *MI->memoperands_begin();
if (!MM->getValue())
return;
- GetUnderlyingObjects(MM->getValue(), Objs, DL);
+ getUnderlyingObjects(MM->getValue(), Objs, DL);
for (const Value *V : Objs) {
if (!isIdentifiedObject(V)) {
Objs.clear();
@@ -736,7 +736,7 @@ void SwingSchedulerDAG::addLoopCarriedDependences(AliasAnalysis *AA) {
PendingLoads.clear();
else if (MI.mayLoad()) {
SmallVector<const Value *, 4> Objs;
- getUnderlyingObjects(&MI, Objs, MF.getDataLayout());
+ ::getUnderlyingObjects(&MI, Objs, MF.getDataLayout());
if (Objs.empty())
Objs.push_back(UnknownValue);
for (auto V : Objs) {
@@ -745,7 +745,7 @@ void SwingSchedulerDAG::addLoopCarriedDependences(AliasAnalysis *AA) {
}
} else if (MI.mayStore()) {
SmallVector<const Value *, 4> Objs;
- getUnderlyingObjects(&MI, Objs, MF.getDataLayout());
+ ::getUnderlyingObjects(&MI, Objs, MF.getDataLayout());
if (Objs.empty())
Objs.push_back(UnknownValue);
for (auto V : Objs) {
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index d231e61a103b..c0ba4c55995d 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -6636,7 +6636,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
cast<ConstantInt>(I.getArgOperand(0))->getSExtValue();
Value *const ObjectPtr = I.getArgOperand(1);
SmallVector<const Value *, 4> Allocas;
- GetUnderlyingObjects(ObjectPtr, Allocas, *DL);
+ getUnderlyingObjects(ObjectPtr, Allocas, *DL);
for (SmallVectorImpl<const Value*>::iterator Object = Allocas.begin(),
E = Allocas.end(); Object != E; ++Object) {
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAliasAnalysis.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAliasAnalysis.cpp
index bb2aba044974..e0159bb9b113 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUAliasAnalysis.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAliasAnalysis.cpp
@@ -96,7 +96,7 @@ bool AMDGPUAAResult::pointsToConstantMemory(const MemoryLocation &Loc,
AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT)
return true;
- const Value *Base = GetUnderlyingObject(Loc.Ptr, DL);
+ const Value *Base = getUnderlyingObject(Loc.Ptr, DL);
AS = Base->getType()->getPointerAddressSpace();
if (AS == AMDGPUAS::CONSTANT_ADDRESS ||
AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInline.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInline.cpp
index 3b5d91133a2f..d859e798a74c 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInline.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInline.cpp
@@ -134,7 +134,7 @@ unsigned AMDGPUInliner::getInlineThreshold(CallBase &CB) const {
Ty->getAddressSpace() != AMDGPUAS::FLAT_ADDRESS))
continue;
- PtrArg = GetUnderlyingObject(PtrArg, DL);
+ PtrArg = getUnderlyingObject(PtrArg, DL);
if (const AllocaInst *AI = dyn_cast<AllocaInst>(PtrArg)) {
if (!AI->isStaticAlloca() || !AIVisited.insert(AI).second)
continue;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
index c9e2bd768cce..965da223dac4 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
@@ -605,7 +605,7 @@ bool AMDGPUPromoteAlloca::binaryOpIsDerivedFromSameAlloca(Value *BaseAlloca,
if (isa<ConstantPointerNull>(OtherOp))
return true;
- Value *OtherObj = GetUnderlyingObject(OtherOp, *DL);
+ Value *OtherObj = getUnderlyingObject(OtherOp, *DL);
if (!isa<AllocaInst>(OtherObj))
return false;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
index 4ae092ce55bf..18d8ad1d3848 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
@@ -169,7 +169,7 @@ void AMDGPUTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
const Value *Ptr = GEP->getPointerOperand();
const AllocaInst *Alloca =
- dyn_cast<AllocaInst>(GetUnderlyingObject(Ptr, DL));
+ dyn_cast<AllocaInst>(getUnderlyingObject(Ptr, DL));
if (!Alloca || !Alloca->isStaticAlloca())
continue;
Type *Ty = Alloca->getAllocatedType();
diff --git a/llvm/lib/Target/AMDGPU/R600Instructions.td b/llvm/lib/Target/AMDGPU/R600Instructions.td
index 2cc21364c439..03bfb2babf0c 100644
--- a/llvm/lib/Target/AMDGPU/R600Instructions.td
+++ b/llvm/lib/Target/AMDGPU/R600Instructions.td
@@ -353,7 +353,7 @@ class LoadVtxId1 <PatFrag load> : PatFrag <
const MemSDNode *LD = cast<MemSDNode>(N);
return LD->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS ||
(LD->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS &&
- !isa<GlobalValue>(GetUnderlyingObject(
+ !isa<GlobalValue>(getUnderlyingObject(
LD->getMemOperand()->getValue(), CurDAG->getDataLayout())));
}]>;
@@ -365,7 +365,7 @@ class LoadVtxId2 <PatFrag load> : PatFrag <
(ops node:$ptr), (load node:$ptr), [{
const MemSDNode *LD = cast<MemSDNode>(N);
return LD->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS &&
- isa<GlobalValue>(GetUnderlyingObject(
+ isa<GlobalValue>(getUnderlyingObject(
LD->getMemOperand()->getValue(), CurDAG->getDataLayout()));
}]>;
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index 745a52608556..17d1322d7e59 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -461,8 +461,8 @@ static bool memOpsHaveSameBasePtr(const MachineInstr &MI1,
return false;
const MachineFunction &MF = *MI1.getParent()->getParent();
const DataLayout &DL = MF.getFunction().getParent()->getDataLayout();
- Base1 = GetUnderlyingObject(Base1, DL);
- Base2 = GetUnderlyingObject(Base2, DL);
+ Base1 = getUnderlyingObject(Base1, DL);
+ Base2 = getUnderlyingObject(Base2, DL);
if (isa<UndefValue>(Base1) || isa<UndefValue>(Base2))
return false;
diff --git a/llvm/lib/Target/Mips/MipsDelaySlotFiller.cpp b/llvm/lib/Target/Mips/MipsDelaySlotFiller.cpp
index 155d19ba6959..0fcbeba6db5c 100644
--- a/llvm/lib/Target/Mips/MipsDelaySlotFiller.cpp
+++ b/llvm/lib/Target/Mips/MipsDelaySlotFiller.cpp
@@ -542,7 +542,7 @@ getUnderlyingObjects(const MachineInstr &MI,
if (const Value *V = MMO.getValue()) {
SmallVector<const Value *, 4> Objs;
- GetUnderlyingObjects(V, Objs, DL);
+ ::getUnderlyingObjects(V, Objs, DL);
for (const Value *UValue : Objs) {
if (!isIdentifiedObject(V))
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
index 4296eca6a8df..4cdb8393ebb2 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
@@ -700,12 +700,12 @@ static bool canLowerToLDG(MemSDNode *N, const NVPTXSubtarget &Subtarget,
bool IsKernelFn = isKernelFunction(F->getFunction());
- // We use GetUnderlyingObjects() here instead of GetUnderlyingObject() mainly
+ // We use getUnderlyingObjects() here instead of getUnderlyingObject() mainly
// because the former looks through phi nodes while the latter does not. We
// need to look through phi nodes to handle pointer induction variables.
SmallVector<const Value *, 8> Objs;
- GetUnderlyingObjects(N->getMemOperand()->getValue(),
- Objs, F->getDataLayout());
+ getUnderlyingObjects(N->getMemOperand()->getValue(), Objs,
+ F->getDataLayout());
return all_of(Objs, [&](const Value *V) {
if (auto *A = dyn_cast<const Argument>(V))
diff --git a/llvm/lib/Target/NVPTX/NVPTXLowerArgs.cpp b/llvm/lib/Target/NVPTX/NVPTXLowerArgs.cpp
index e60b5eeacdae..453072818a53 100644
--- a/llvm/lib/Target/NVPTX/NVPTXLowerArgs.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXLowerArgs.cpp
@@ -214,7 +214,7 @@ bool NVPTXLowerArgs::runOnKernelFunction(Function &F) {
for (auto &I : B) {
if (LoadInst *LI = dyn_cast<LoadInst>(&I)) {
if (LI->getType()->isPointerTy()) {
- Value *UO = GetUnderlyingObject(LI->getPointerOperand(),
+ Value *UO = getUnderlyingObject(LI->getPointerOperand(),
F.getParent()->getDataLayout());
if (Argument *Arg = dyn_cast<Argument>(UO)) {
if (Arg->hasByValAttr()) {
diff --git a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
index 142a4e48217c..24b6bd4818dc 100644
--- a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
+++ b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
@@ -5422,7 +5422,7 @@ struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl {
/// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
Optional<Type *> identifyPrivatizableType(Attributor &A) override {
Value *Obj =
- GetUnderlyingObject(&getAssociatedValue(), A.getInfoCache().getDL());
+ getUnderlyingObject(&getAssociatedValue(), A.getInfoCache().getDL());
if (!Obj) {
LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n");
return nullptr;
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
index 2b8732fdb80d..4c75bc97de66 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -5643,10 +5643,10 @@ Instruction *InstCombinerImpl::visitICmpInst(ICmpInst &I) {
// Try to optimize equality comparisons against alloca-based pointers.
if (Op0->getType()->isPointerTy() && I.isEquality()) {
assert(Op1->getType()->isPointerTy() && "Comparing pointer with non-pointer?");
- if (auto *Alloca = dyn_cast<AllocaInst>(GetUnderlyingObject(Op0, DL)))
+ if (auto *Alloca = dyn_cast<AllocaInst>(getUnderlyingObject(Op0, DL)))
if (Instruction *New = foldAllocaCmp(I, Alloca, Op1))
return New;
- if (auto *Alloca = dyn_cast<AllocaInst>(GetUnderlyingObject(Op1, DL)))
+ if (auto *Alloca = dyn_cast<AllocaInst>(getUnderlyingObject(Op1, DL)))
if (Instruction *New = foldAllocaCmp(I, Alloca, Op0))
return New;
}
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
index e533198bd489..c14a330542c9 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
@@ -2849,8 +2849,9 @@ Instruction *InstCombinerImpl::visitSelectInst(SelectInst &SI) {
return replaceOperand(SI, 1, TrueSI->getTrueValue());
}
// select(C0, select(C1, a, b), b) -> select(C0&C1, a, b)
- // We choose this as normal form to enable folding on the And and shortening
- // paths for the values (this helps GetUnderlyingObjects() for example).
+ // We choose this as normal form to enable folding on the And and
+ // shortening paths for the values (this helps getUnderlyingObjects() for
+ // example).
if (TrueSI->getFalseValue() == FalseVal && TrueSI->hasOneUse()) {
Value *And = Builder.CreateAnd(CondVal, TrueSI->getCondition());
replaceOperand(SI, 0, And);
diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index 053b9aa812bf..0b542b638aee 100644
--- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -1556,7 +1556,7 @@ void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
if (ClOpt && ClOptGlobals) {
// If initialization order checking is disabled, a simple access to a
// dynamically initialized global is always valid.
- GlobalVariable *G = dyn_cast<GlobalVariable>(GetUnderlyingObject(Addr, DL));
+ GlobalVariable *G = dyn_cast<GlobalVariable>(getUnderlyingObject(Addr, DL));
if (G && (!ClInitializers || GlobalIsLinkerInitialized(G)) &&
isSafeAccess(ObjSizeVis, Addr, O.TypeSize)) {
NumOptimizedAccessesToGlobalVar++;
@@ -1566,7 +1566,7 @@ void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
if (ClOpt && ClOptStack) {
// A direct inbounds access to a stack variable is always valid.
- if (isa<AllocaInst>(GetUnderlyingObject(Addr, DL)) &&
+ if (isa<AllocaInst>(getUnderlyingObject(Addr, DL)) &&
isSafeAccess(ObjSizeVis, Addr, O.TypeSize)) {
NumOptimizedAccessesToStackVar++;
return;
diff --git a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
index feb1abb18777..b1f5de810110 100644
--- a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
@@ -1246,7 +1246,7 @@ Value *DFSanFunction::loadShadow(Value *Addr, uint64_t Size, uint64_t Align,
const llvm::Align ShadowAlign(Align * DFS.ShadowWidthBytes);
SmallVector<const Value *, 2> Objs;
- GetUnderlyingObjects(Addr, Objs, Pos->getModule()->getDataLayout());
+ getUnderlyingObjects(Addr, Objs, Pos->getModule()->getDataLayout());
bool AllConstants = true;
for (const Value *Obj : Objs) {
if (isa<Function>(Obj) || isa<BlockAddress>(Obj))
diff --git a/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
index 8ce12c514f0b..ca1535b3f9fa 100644
--- a/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
@@ -478,7 +478,7 @@ void ThreadSanitizer::chooseInstructionsToInstrument(
}
}
- if (isa<AllocaInst>(GetUnderlyingObject(Addr, DL)) &&
+ if (isa<AllocaInst>(getUnderlyingObject(Addr, DL)) &&
!PointerMayBeCaptured(Addr, true, true)) {
// The variable is addressable but not captured, so it cannot be
// referenced from a
diff erent thread and participate in a data race
diff --git a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
index 258fd5b9454f..ad80ef15362e 100644
--- a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -415,8 +415,8 @@ static OverwriteResult isOverwrite(const MemoryLocation &Later,
// Check to see if the later store is to the entire object (either a global,
// an alloca, or a byval/inalloca argument). If so, then it clearly
// overwrites any other store to the same object.
- const Value *UO1 = GetUnderlyingObject(P1, DL),
- *UO2 = GetUnderlyingObject(P2, DL);
+ const Value *UO1 = getUnderlyingObject(P1, DL),
+ *UO2 = getUnderlyingObject(P2, DL);
// If we can't resolve the same pointers to the same object, then we can't
// analyze them at all.
@@ -755,7 +755,7 @@ static bool handleFree(CallInst *F, AliasAnalysis *AA,
break;
Value *DepPointer =
- GetUnderlyingObject(getStoredPointerOperand(Dependency), DL);
+ getUnderlyingObject(getStoredPointerOperand(Dependency), DL);
// Check for aliasing.
if (!AA->isMustAlias(F->getArgOperand(0), DepPointer))
@@ -795,7 +795,7 @@ static void removeAccessedObjects(const MemoryLocation &LoadedLoc,
const DataLayout &DL, AliasAnalysis *AA,
const TargetLibraryInfo *TLI,
const Function *F) {
- const Value *UnderlyingPointer = GetUnderlyingObject(LoadedLoc.Ptr, DL);
+ const Value *UnderlyingPointer = getUnderlyingObject(LoadedLoc.Ptr, DL);
// A constant can't be in the dead pointer set.
if (isa<Constant>(UnderlyingPointer))
@@ -861,7 +861,7 @@ static bool handleEndBlock(BasicBlock &BB, AliasAnalysis *AA,
if (hasAnalyzableMemoryWrite(&*BBI, *TLI) && isRemovable(&*BBI)) {
// See through pointer-to-pointer bitcasts
SmallVector<const Value *, 4> Pointers;
- GetUnderlyingObjects(getStoredPointerOperand(&*BBI), Pointers, DL);
+ getUnderlyingObjects(getStoredPointerOperand(&*BBI), Pointers, DL);
// Stores to stack values are valid candidates for removal.
bool AllDead = true;
@@ -1134,7 +1134,7 @@ static bool eliminateNoopStore(Instruction *Inst, BasicBlock::iterator &BBI,
Constant *StoredConstant = dyn_cast<Constant>(SI->getValueOperand());
if (StoredConstant && StoredConstant->isNullValue() && isRemovable(SI)) {
Instruction *UnderlyingPointer =
- dyn_cast<Instruction>(GetUnderlyingObject(SI->getPointerOperand(), DL));
+ dyn_cast<Instruction>(getUnderlyingObject(SI->getPointerOperand(), DL));
if (UnderlyingPointer && isCallocLikeFn(UnderlyingPointer, TLI) &&
memoryIsNotModifiedBetween(UnderlyingPointer, SI, AA, DL, DT)) {
@@ -1289,7 +1289,7 @@ static bool eliminateDeadStores(BasicBlock &BB, AliasAnalysis *AA,
// to it is dead along the unwind edge. Otherwise, we need to preserve
// the store.
if (LastThrowing && DepWrite->comesBefore(LastThrowing)) {
- const Value* Underlying = GetUnderlyingObject(DepLoc.Ptr, DL);
+ const Value *Underlying = getUnderlyingObject(DepLoc.Ptr, DL);
bool IsStoreDeadOnUnwind = isa<AllocaInst>(Underlying);
if (!IsStoreDeadOnUnwind) {
// We're looking for a call to an allocation function
@@ -1715,7 +1715,7 @@ struct DSEState {
// object can be considered terminated.
if (MaybeTermLoc->second) {
DataLayout DL = MaybeTerm->getParent()->getModule()->getDataLayout();
- DefLoc = MemoryLocation(GetUnderlyingObject(DefLoc.Ptr, DL));
+ DefLoc = MemoryLocation(getUnderlyingObject(DefLoc.Ptr, DL));
}
return AA.isMustAlias(MaybeTermLoc->first, DefLoc);
}
@@ -2047,7 +2047,7 @@ struct DSEState {
Instruction *DefI = Def->getMemoryInst();
// See through pointer-to-pointer bitcasts
SmallVector<const Value *, 4> Pointers;
- GetUnderlyingObjects(getLocForWriteEx(DefI)->Ptr, Pointers, DL);
+ getUnderlyingObjects(getLocForWriteEx(DefI)->Ptr, Pointers, DL);
LLVM_DEBUG(dbgs() << " ... MemoryDef is not accessed until the end "
"of the function\n");
@@ -2130,7 +2130,7 @@ bool eliminateDeadStoresMemorySSA(Function &F, AliasAnalysis &AA,
}
MemoryLocation SILoc = *MaybeSILoc;
assert(SILoc.Ptr && "SILoc should not be null");
- const Value *SILocUnd = GetUnderlyingObject(SILoc.Ptr, DL);
+ const Value *SILocUnd = getUnderlyingObject(SILoc.Ptr, DL);
// Check if the store is a no-op.
if (isRemovable(SI) && State.storeIsNoop(KillingDef, SILoc, SILocUnd)) {
@@ -2231,7 +2231,7 @@ bool eliminateDeadStoresMemorySSA(Function &F, AliasAnalysis &AA,
MemoryLocation NILoc = *State.getLocForWriteEx(NI);
if (State.isMemTerminatorInst(SI)) {
- const Value *NIUnd = GetUnderlyingObject(NILoc.Ptr, DL);
+ const Value *NIUnd = getUnderlyingObject(NILoc.Ptr, DL);
if (!SILocUnd || SILocUnd != NIUnd)
continue;
LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: " << *NI
diff --git a/llvm/lib/Transforms/Scalar/LICM.cpp b/llvm/lib/Transforms/Scalar/LICM.cpp
index 1a22edaf8726..043d3683aa2e 100644
--- a/llvm/lib/Transforms/Scalar/LICM.cpp
+++ b/llvm/lib/Transforms/Scalar/LICM.cpp
@@ -1909,7 +1909,7 @@ bool llvm::promoteLoopAccessesToScalars(
// we have to prove that the store is dead along the unwind edge. We do
// this by proving that the caller can't have a reference to the object
// after return and thus can't possibly load from the object.
- Value *Object = GetUnderlyingObject(SomePtr, MDL);
+ Value *Object = getUnderlyingObject(SomePtr, MDL);
if (!isKnownNonEscaping(Object, TLI))
return false;
// Subtlety: Alloca's aren't visible to callers, but *are* potentially
@@ -2041,7 +2041,7 @@ bool llvm::promoteLoopAccessesToScalars(
if (IsKnownThreadLocalObject)
SafeToInsertStore = true;
else {
- Value *Object = GetUnderlyingObject(SomePtr, MDL);
+ Value *Object = getUnderlyingObject(SomePtr, MDL);
SafeToInsertStore =
(isAllocLikeFn(Object, TLI) || isa<AllocaInst>(Object)) &&
!PointerMayBeCaptured(Object, true, true);
diff --git a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
index fc980cf3689f..99d3519eff15 100644
--- a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
@@ -564,12 +564,12 @@ void LoopIdiomRecognize::collectStores(BasicBlock *BB) {
break;
case LegalStoreKind::Memset: {
// Find the base pointer.
- Value *Ptr = GetUnderlyingObject(SI->getPointerOperand(), *DL);
+ Value *Ptr = getUnderlyingObject(SI->getPointerOperand(), *DL);
StoreRefsForMemset[Ptr].push_back(SI);
} break;
case LegalStoreKind::MemsetPattern: {
// Find the base pointer.
- Value *Ptr = GetUnderlyingObject(SI->getPointerOperand(), *DL);
+ Value *Ptr = getUnderlyingObject(SI->getPointerOperand(), *DL);
StoreRefsForMemsetPattern[Ptr].push_back(SI);
} break;
case LegalStoreKind::Memcpy:
diff --git a/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp b/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp
index a9332356b371..e0cd0253f9bf 100644
--- a/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp
+++ b/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp
@@ -1562,7 +1562,7 @@ class LowerMatrixIntrinsics {
if (Value *Ptr = getPointerOperand(V))
return getUnderlyingObjectThroughLoads(Ptr);
else if (V->getType()->isPointerTy())
- return GetUnderlyingObject(V, DL);
+ return getUnderlyingObject(V, DL);
return V;
}
diff --git a/llvm/lib/Transforms/Utils/AssumeBundleBuilder.cpp b/llvm/lib/Transforms/Utils/AssumeBundleBuilder.cpp
index 7ff73fcdada7..762158f7e19e 100644
--- a/llvm/lib/Transforms/Utils/AssumeBundleBuilder.cpp
+++ b/llvm/lib/Transforms/Utils/AssumeBundleBuilder.cpp
@@ -69,7 +69,7 @@ RetainedKnowledge canonicalizedKnowledge(RetainedKnowledge RK, Module *M) {
default:
return RK;
case Attribute::NonNull:
- RK.WasOn = GetUnderlyingObject(RK.WasOn, M->getDataLayout());
+ RK.WasOn = getUnderlyingObject(RK.WasOn, M->getDataLayout());
return RK;
case Attribute::Alignment: {
Value *V = RK.WasOn->stripInBoundsOffsets([&](const Value *Strip) {
@@ -145,7 +145,7 @@ struct AssumeBuilderState {
if (!RK.WasOn)
return true;
if (RK.WasOn->getType()->isPointerTy()) {
- Value *UnderlyingPtr = GetUnderlyingObject(RK.WasOn, M->getDataLayout());
+ Value *UnderlyingPtr = getUnderlyingObject(RK.WasOn, M->getDataLayout());
if (isa<AllocaInst>(UnderlyingPtr) || isa<GlobalValue>(UnderlyingPtr))
return false;
}
diff --git a/llvm/lib/Transforms/Utils/InlineFunction.cpp b/llvm/lib/Transforms/Utils/InlineFunction.cpp
index 5c9cb1245d01..090ba9daa678 100644
--- a/llvm/lib/Transforms/Utils/InlineFunction.cpp
+++ b/llvm/lib/Transforms/Utils/InlineFunction.cpp
@@ -1037,7 +1037,7 @@ static void AddAliasScopeMetadata(CallBase &CB, ValueToValueMapTy &VMap,
SmallSetVector<const Argument *, 4> NAPtrArgs;
for (const Value *V : PtrArgs) {
SmallVector<const Value *, 4> Objects;
- GetUnderlyingObjects(V, Objects, DL, /* LI = */ nullptr);
+ getUnderlyingObjects(V, Objects, DL, /* LI = */ nullptr);
for (const Value *O : Objects)
ObjSet.insert(O);
diff --git a/llvm/lib/Transforms/Utils/LoopUtils.cpp b/llvm/lib/Transforms/Utils/LoopUtils.cpp
index 43363736684e..d7cd9b19b8d5 100644
--- a/llvm/lib/Transforms/Utils/LoopUtils.cpp
+++ b/llvm/lib/Transforms/Utils/LoopUtils.cpp
@@ -1145,7 +1145,7 @@ static bool isValidRewrite(ScalarEvolution *SE, Value *FromVal, Value *ToVal) {
// producing an expression involving multiple pointers. Until then, we must
// bail out here.
//
- // Retrieve the pointer operand of the GEP. Don't use GetUnderlyingObject
+ // Retrieve the pointer operand of the GEP. Don't use getUnderlyingObject
// because it understands lcssa phis while SCEV does not.
Value *FromPtr = FromVal;
Value *ToPtr = ToVal;
@@ -1162,7 +1162,7 @@ static bool isValidRewrite(ScalarEvolution *SE, Value *FromVal, Value *ToVal) {
// SCEV may have rewritten an expression that produces the GEP's pointer
// operand. That's ok as long as the pointer operand has the same base
- // pointer. Unlike GetUnderlyingObject(), getPointerBase() will find the
+ // pointer. Unlike getUnderlyingObject(), getPointerBase() will find the
// base of a recurrence. This handles the case in which SCEV expansion
// converts a pointer type recurrence into a nonrecurrent pointer base
// indexed by an integer recurrence.
diff --git a/llvm/lib/Transforms/Utils/VNCoercion.cpp b/llvm/lib/Transforms/Utils/VNCoercion.cpp
index 6ff08cd28712..11b079d3c050 100644
--- a/llvm/lib/Transforms/Utils/VNCoercion.cpp
+++ b/llvm/lib/Transforms/Utils/VNCoercion.cpp
@@ -393,7 +393,7 @@ int analyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr,
if (!Src)
return -1;
- GlobalVariable *GV = dyn_cast<GlobalVariable>(GetUnderlyingObject(Src, DL));
+ GlobalVariable *GV = dyn_cast<GlobalVariable>(getUnderlyingObject(Src, DL));
if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
return -1;
diff --git a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
index 9b81afbb4b6c..0b10e50bd1f7 100644
--- a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
@@ -763,7 +763,7 @@ Vectorizer::getVectorizablePrefix(ArrayRef<Instruction *> Chain) {
}
static ChainID getChainID(const Value *Ptr, const DataLayout &DL) {
- const Value *ObjPtr = GetUnderlyingObject(Ptr, DL);
+ const Value *ObjPtr = getUnderlyingObject(Ptr, DL);
if (const auto *Sel = dyn_cast<SelectInst>(ObjPtr)) {
// The select's themselves are distinct instructions even if they share the
// same condition and evaluate to consecutive pointers for true and false
diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index dd6ff20b1def..08c0021a4126 100644
--- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -5912,7 +5912,7 @@ void SLPVectorizerPass::collectSeedInstructions(BasicBlock *BB) {
continue;
if (!isValidElementType(SI->getValueOperand()->getType()))
continue;
- Stores[GetUnderlyingObject(SI->getPointerOperand(), *DL)].push_back(SI);
+ Stores[getUnderlyingObject(SI->getPointerOperand(), *DL)].push_back(SI);
}
// Ignore getelementptr instructions that have more than one index, a
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/nullptr.ll b/llvm/test/Analysis/LoopAccessAnalysis/nullptr.ll
index 476cf3208c84..8fbf47304e80 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/nullptr.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/nullptr.ll
@@ -2,9 +2,9 @@
; RUN: opt -passes='require<scalar-evolution>,require<aa>,loop(print-access-info)' -disable-output < %s 2>&1 | FileCheck %s
; Test that the loop accesses are proven safe in this case.
-; The analyzer uses to be confused by the "diamond" because GetUnderlyingObjects
+; The analyzer uses to be confused by the "diamond" because getUnderlyingObjects
; is saying that the two pointers can both points to null. The loop analyzer
-; needs to ignore null in the results returned by GetUnderlyingObjects.
+; needs to ignore null in the results returned by getUnderlyingObjects.
; CHECK: Memory dependences are safe with run-time checks
diff --git a/llvm/test/CodeGen/AMDGPU/promote-alloca-to-lds-phi.ll b/llvm/test/CodeGen/AMDGPU/promote-alloca-to-lds-phi.ll
index 2fecbcb9d7a6..bd37356b15ec 100644
--- a/llvm/test/CodeGen/AMDGPU/promote-alloca-to-lds-phi.ll
+++ b/llvm/test/CodeGen/AMDGPU/promote-alloca-to-lds-phi.ll
@@ -129,7 +129,7 @@ endif:
; }
; FIXME: This should be promotable. We need to use
-; GetUnderlyingObjects when looking at the icmp user.
+; getUnderlyingObjects when looking at the icmp user.
; CHECK-LABEL: @ptr_induction_var_same_alloca(
; CHECK: %alloca = alloca [64 x i32], align 4
diff --git a/llvm/test/Transforms/SLPVectorizer/AMDGPU/address-space-ptr-sze-gep-index-assert.ll b/llvm/test/Transforms/SLPVectorizer/AMDGPU/address-space-ptr-sze-gep-index-assert.ll
index 4c904b66f3d3..4f85482e3ae0 100644
--- a/llvm/test/Transforms/SLPVectorizer/AMDGPU/address-space-ptr-sze-gep-index-assert.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AMDGPU/address-space-ptr-sze-gep-index-assert.ll
@@ -65,7 +65,7 @@ bb:
ret void
}
-; This should vectorize if using GetUnderlyingObject
+; This should vectorize if using getUnderlyingObject
define void @multi_as_reduction_same_size(i32 addrspace(1)* %global, i64 %idx0, i64 %idx1) #0 {
; CHECK-LABEL: @multi_as_reduction_same_size(
; CHECK-NEXT: bb:
@@ -106,7 +106,7 @@ bb:
ret void
}
-; This should vectorize if using GetUnderlyingObject
+; This should vectorize if using getUnderlyingObject
; The add is done in the same width, even though the address space size is smaller
define void @multi_as_reduction_
diff erent_sized_noncanon(i32 addrspace(3)* %lds, i64 %idx0, i64 %idx1) #0 {
; CHECK-LABEL: @multi_as_reduction_
diff erent_sized_noncanon(
More information about the llvm-commits
mailing list