[llvm] r327173 - [NFC] Consolidate six getPointerOperand() utility functions into one place
Renato Golin via llvm-commits
llvm-commits at lists.llvm.org
Fri Mar 9 13:05:59 PST 2018
Author: rengolin
Date: Fri Mar 9 13:05:58 2018
New Revision: 327173
URL: http://llvm.org/viewvc/llvm-project?rev=327173&view=rev
Log:
[NFC] Consolidate six getPointerOperand() utility functions into one place
There are six separate instances of getPointerOperand() utility.
LoopVectorize.cpp has one of them,
and I don't want to create a 7th one while I'm trying to move
LoopVectorizationLegality into a separate file
(eventual objective is to move it to Analysis tree).
See http://lists.llvm.org/pipermail/llvm-dev/2018-February/120999.html
for llvm-dev discussions
Closes D43323.
Patch by Hideki Saito <hideki.saito at intel.com>.
Modified:
llvm/trunk/include/llvm/IR/Instructions.h
llvm/trunk/lib/Analysis/Delinearization.cpp
llvm/trunk/lib/Analysis/DependenceAnalysis.cpp
llvm/trunk/lib/Analysis/LoopAccessAnalysis.cpp
llvm/trunk/lib/Transforms/Scalar/EarlyCSE.cpp
llvm/trunk/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
llvm/trunk/lib/Transforms/Vectorize/LoopVectorize.cpp
Modified: llvm/trunk/include/llvm/IR/Instructions.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/IR/Instructions.h?rev=327173&r1=327172&r2=327173&view=diff
==============================================================================
--- llvm/trunk/include/llvm/IR/Instructions.h (original)
+++ llvm/trunk/include/llvm/IR/Instructions.h Fri Mar 9 13:05:58 2018
@@ -5043,6 +5043,26 @@ public:
}
};
+/// A helper function that returns the pointer operand of a load or store
+/// instruction. Returns nullptr if not load or store.
+inline Value *getLoadStorePointerOperand(Value *V) {
+ if (auto *Load = dyn_cast<LoadInst>(V))
+ return Load->getPointerOperand();
+ if (auto *Store = dyn_cast<StoreInst>(V))
+ return Store->getPointerOperand();
+ return nullptr;
+}
+
+/// A helper function that returns the pointer operand of a load, store
+/// or GEP instruction. Returns nullptr if not load, store, or GEP.
+inline Value *getPointerOperand(Value *V) {
+ if (auto *Ptr = getLoadStorePointerOperand(V))
+ return Ptr;
+ if (auto *Gep = dyn_cast<GetElementPtrInst>(V))
+ return Gep->getPointerOperand();
+ return nullptr;
+}
+
} // end namespace llvm
#endif // LLVM_IR_INSTRUCTIONS_H
Modified: llvm/trunk/lib/Analysis/Delinearization.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/Delinearization.cpp?rev=327173&r1=327172&r2=327173&view=diff
==============================================================================
--- llvm/trunk/lib/Analysis/Delinearization.cpp (original)
+++ llvm/trunk/lib/Analysis/Delinearization.cpp Fri Mar 9 13:05:58 2018
@@ -69,16 +69,6 @@ bool Delinearization::runOnFunction(Func
return false;
}
-static Value *getPointerOperand(Instruction &Inst) {
- if (LoadInst *Load = dyn_cast<LoadInst>(&Inst))
- return Load->getPointerOperand();
- else if (StoreInst *Store = dyn_cast<StoreInst>(&Inst))
- return Store->getPointerOperand();
- else if (GetElementPtrInst *Gep = dyn_cast<GetElementPtrInst>(&Inst))
- return Gep->getPointerOperand();
- return nullptr;
-}
-
void Delinearization::print(raw_ostream &O, const Module *) const {
O << "Delinearization on function " << F->getName() << ":\n";
for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) {
@@ -93,7 +83,7 @@ void Delinearization::print(raw_ostream
// Delinearize the memory access as analyzed in all the surrounding loops.
// Do not analyze memory accesses outside loops.
for (Loop *L = LI->getLoopFor(BB); L != nullptr; L = L->getParentLoop()) {
- const SCEV *AccessFn = SE->getSCEVAtScope(getPointerOperand(*Inst), L);
+ const SCEV *AccessFn = SE->getSCEVAtScope(getPointerOperand(Inst), L);
const SCEVUnknown *BasePointer =
dyn_cast<SCEVUnknown>(SE->getPointerBase(AccessFn));
Modified: llvm/trunk/lib/Analysis/DependenceAnalysis.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/DependenceAnalysis.cpp?rev=327173&r1=327172&r2=327173&view=diff
==============================================================================
--- llvm/trunk/lib/Analysis/DependenceAnalysis.cpp (original)
+++ llvm/trunk/lib/Analysis/DependenceAnalysis.cpp Fri Mar 9 13:05:58 2018
@@ -643,17 +643,6 @@ bool isLoadOrStore(const Instruction *I)
}
-static
-Value *getPointerOperand(Instruction *I) {
- if (LoadInst *LI = dyn_cast<LoadInst>(I))
- return LI->getPointerOperand();
- if (StoreInst *SI = dyn_cast<StoreInst>(I))
- return SI->getPointerOperand();
- llvm_unreachable("Value is not load or store instruction");
- return nullptr;
-}
-
-
// Examines the loop nesting of the Src and Dst
// instructions and establishes their shared loops. Sets the variables
// CommonLevels, SrcLevels, and MaxLevels.
@@ -3176,8 +3165,10 @@ void DependenceInfo::updateDirection(Dep
/// for each loop level.
bool DependenceInfo::tryDelinearize(Instruction *Src, Instruction *Dst,
SmallVectorImpl<Subscript> &Pair) {
- Value *SrcPtr = getPointerOperand(Src);
- Value *DstPtr = getPointerOperand(Dst);
+ assert(isLoadOrStore(Src) && "instruction is not load or store");
+ assert(isLoadOrStore(Dst) && "instruction is not load or store");
+ Value *SrcPtr = getLoadStorePointerOperand(Src);
+ Value *DstPtr = getLoadStorePointerOperand(Dst);
Loop *SrcLoop = LI->getLoopFor(Src->getParent());
Loop *DstLoop = LI->getLoopFor(Dst->getParent());
@@ -3302,8 +3293,10 @@ DependenceInfo::depends(Instruction *Src
return make_unique<Dependence>(Src, Dst);
}
- Value *SrcPtr = getPointerOperand(Src);
- Value *DstPtr = getPointerOperand(Dst);
+ assert(isLoadOrStore(Src) && "instruction is not load or store");
+ assert(isLoadOrStore(Dst) && "instruction is not load or store");
+ Value *SrcPtr = getLoadStorePointerOperand(Src);
+ Value *DstPtr = getLoadStorePointerOperand(Dst);
switch (underlyingObjectsAlias(AA, F->getParent()->getDataLayout(), DstPtr,
SrcPtr)) {
@@ -3720,8 +3713,8 @@ const SCEV *DependenceInfo::getSplitIter
assert(Dst->mayReadFromMemory() || Dst->mayWriteToMemory());
assert(isLoadOrStore(Src));
assert(isLoadOrStore(Dst));
- Value *SrcPtr = getPointerOperand(Src);
- Value *DstPtr = getPointerOperand(Dst);
+ Value *SrcPtr = getLoadStorePointerOperand(Src);
+ Value *DstPtr = getLoadStorePointerOperand(Dst);
assert(underlyingObjectsAlias(AA, F->getParent()->getDataLayout(), DstPtr,
SrcPtr) == MustAlias);
Modified: llvm/trunk/lib/Analysis/LoopAccessAnalysis.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/LoopAccessAnalysis.cpp?rev=327173&r1=327172&r2=327173&view=diff
==============================================================================
--- llvm/trunk/lib/Analysis/LoopAccessAnalysis.cpp (original)
+++ llvm/trunk/lib/Analysis/LoopAccessAnalysis.cpp Fri Mar 9 13:05:58 2018
@@ -1087,16 +1087,6 @@ int64_t llvm::getPtrStride(PredicatedSca
return Stride;
}
-/// Take the pointer operand from the Load/Store instruction.
-/// Returns NULL if this is not a valid Load/Store instruction.
-static Value *getPointerOperand(Value *I) {
- if (auto *LI = dyn_cast<LoadInst>(I))
- return LI->getPointerOperand();
- if (auto *SI = dyn_cast<StoreInst>(I))
- return SI->getPointerOperand();
- return nullptr;
-}
-
/// Take the address space operand from the Load/Store instruction.
/// Returns -1 if this is not a valid Load/Store instruction.
static unsigned getAddressSpaceOperand(Value *I) {
@@ -1110,8 +1100,8 @@ static unsigned getAddressSpaceOperand(V
/// Returns true if the memory operations \p A and \p B are consecutive.
bool llvm::isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL,
ScalarEvolution &SE, bool CheckType) {
- Value *PtrA = getPointerOperand(A);
- Value *PtrB = getPointerOperand(B);
+ Value *PtrA = getLoadStorePointerOperand(A);
+ Value *PtrB = getLoadStorePointerOperand(B);
unsigned ASA = getAddressSpaceOperand(A);
unsigned ASB = getAddressSpaceOperand(B);
Modified: llvm/trunk/lib/Transforms/Scalar/EarlyCSE.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/EarlyCSE.cpp?rev=327173&r1=327172&r2=327173&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Scalar/EarlyCSE.cpp (original)
+++ llvm/trunk/lib/Transforms/Scalar/EarlyCSE.cpp Fri Mar 9 13:05:58 2018
@@ -532,12 +532,7 @@ private:
Value *getPointerOperand() const {
if (IsTargetMemInst) return Info.PtrVal;
- if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
- return LI->getPointerOperand();
- } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
- return SI->getPointerOperand();
- }
- return nullptr;
+ return getLoadStorePointerOperand(Inst);
}
bool mayReadFromMemory() const {
Modified: llvm/trunk/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp?rev=327173&r1=327172&r2=327173&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp (original)
+++ llvm/trunk/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp Fri Mar 9 13:05:58 2018
@@ -118,8 +118,6 @@ public:
bool run();
private:
- Value *getPointerOperand(Value *I) const;
-
GetElementPtrInst *getSourceGEP(Value *Src) const;
unsigned getPointerAddressSpace(Value *I);
@@ -271,14 +269,6 @@ bool Vectorizer::run() {
return Changed;
}
-Value *Vectorizer::getPointerOperand(Value *I) const {
- if (LoadInst *LI = dyn_cast<LoadInst>(I))
- return LI->getPointerOperand();
- if (StoreInst *SI = dyn_cast<StoreInst>(I))
- return SI->getPointerOperand();
- return nullptr;
-}
-
unsigned Vectorizer::getPointerAddressSpace(Value *I) {
if (LoadInst *L = dyn_cast<LoadInst>(I))
return L->getPointerAddressSpace();
@@ -292,7 +282,7 @@ GetElementPtrInst *Vectorizer::getSource
// and without casts.
// TODO: a stride set by the add instruction below can match the difference
// in pointee type size here. Currently it will not be vectorized.
- Value *SrcPtr = getPointerOperand(Src);
+ Value *SrcPtr = getLoadStorePointerOperand(Src);
Value *SrcBase = SrcPtr->stripPointerCasts();
if (DL.getTypeStoreSize(SrcPtr->getType()->getPointerElementType()) ==
DL.getTypeStoreSize(SrcBase->getType()->getPointerElementType()))
@@ -302,8 +292,8 @@ GetElementPtrInst *Vectorizer::getSource
// FIXME: Merge with llvm::isConsecutiveAccess
bool Vectorizer::isConsecutiveAccess(Value *A, Value *B) {
- Value *PtrA = getPointerOperand(A);
- Value *PtrB = getPointerOperand(B);
+ Value *PtrA = getLoadStorePointerOperand(A);
+ Value *PtrB = getLoadStorePointerOperand(B);
unsigned ASA = getPointerAddressSpace(A);
unsigned ASB = getPointerAddressSpace(B);
@@ -482,7 +472,7 @@ Vectorizer::getBoundaryInstrs(ArrayRef<I
void Vectorizer::eraseInstructions(ArrayRef<Instruction *> Chain) {
SmallVector<Instruction *, 16> Instrs;
for (Instruction *I : Chain) {
- Value *PtrOperand = getPointerOperand(I);
+ Value *PtrOperand = getLoadStorePointerOperand(I);
assert(PtrOperand && "Instruction must have a pointer operand.");
Instrs.push_back(I);
if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(PtrOperand))
@@ -592,10 +582,10 @@ Vectorizer::getVectorizablePrefix(ArrayR
dbgs() << "LSV: Found alias:\n"
" Aliasing instruction and pointer:\n"
<< " " << *MemInstr << '\n'
- << " " << *getPointerOperand(MemInstr) << '\n'
+ << " " << *getLoadStorePointerOperand(MemInstr) << '\n'
<< " Aliased instruction and pointer:\n"
<< " " << *ChainInstr << '\n'
- << " " << *getPointerOperand(ChainInstr) << '\n';
+ << " " << *getLoadStorePointerOperand(ChainInstr) << '\n';
});
// Save this aliasing memory instruction as a barrier, but allow other
// instructions that precede the barrier to be vectorized with this one.
Modified: llvm/trunk/lib/Transforms/Vectorize/LoopVectorize.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Vectorize/LoopVectorize.cpp?rev=327173&r1=327172&r2=327173&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Vectorize/LoopVectorize.cpp (original)
+++ llvm/trunk/lib/Transforms/Vectorize/LoopVectorize.cpp Fri Mar 9 13:05:58 2018
@@ -296,16 +296,6 @@ static Type *ToVectorTy(Type *Scalar, un
// in the project. They can be effectively organized in a common Load/Store
// utilities unit.
-/// A helper function that returns the pointer operand of a load or store
-/// instruction.
-static Value *getPointerOperand(Value *I) {
- if (auto *LI = dyn_cast<LoadInst>(I))
- return LI->getPointerOperand();
- if (auto *SI = dyn_cast<StoreInst>(I))
- return SI->getPointerOperand();
- return nullptr;
-}
-
/// A helper function that returns the type of loaded or stored value.
static Type *getMemInstValueType(Value *I) {
assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
@@ -2860,7 +2850,7 @@ void InnerLoopVectorizer::vectorizeInter
return;
const DataLayout &DL = Instr->getModule()->getDataLayout();
- Value *Ptr = getPointerOperand(Instr);
+ Value *Ptr = getLoadStorePointerOperand(Instr);
// Prepare for the vector type of the interleaved load/store.
Type *ScalarTy = getMemInstValueType(Instr);
@@ -3002,7 +2992,7 @@ void InnerLoopVectorizer::vectorizeMemor
Type *ScalarDataTy = getMemInstValueType(Instr);
Type *DataTy = VectorType::get(ScalarDataTy, VF);
- Value *Ptr = getPointerOperand(Instr);
+ Value *Ptr = getLoadStorePointerOperand(Instr);
unsigned Alignment = getMemInstAlignment(Instr);
// An alignment of 0 means target abi alignment. We need to use the scalar's
// target abi alignment in such a case.
@@ -4797,7 +4787,7 @@ bool LoopVectorizationLegality::canVecto
continue;
for (Instruction &I : *BB)
- if (auto *Ptr = getPointerOperand(&I))
+ if (auto *Ptr = getLoadStorePointerOperand(&I))
SafePointes.insert(Ptr);
}
@@ -5248,7 +5238,7 @@ void LoopVectorizationCostModel::collect
if (auto *Store = dyn_cast<StoreInst>(MemAccess))
if (Ptr == Store->getValueOperand())
return WideningDecision == CM_Scalarize;
- assert(Ptr == getPointerOperand(MemAccess) &&
+ assert(Ptr == getLoadStorePointerOperand(MemAccess) &&
"Ptr is neither a value or pointer operand");
return WideningDecision != CM_GatherScatter;
};
@@ -5416,7 +5406,7 @@ bool LoopVectorizationCostModel::isScala
case Instruction::Store: {
if (!Legal->isMaskRequired(I))
return false;
- auto *Ptr = getPointerOperand(I);
+ auto *Ptr = getLoadStorePointerOperand(I);
auto *Ty = getMemInstValueType(I);
return isa<LoadInst>(I) ?
!(isLegalMaskedLoad(Ty, Ptr) || isLegalMaskedGather(Ty))
@@ -5438,7 +5428,7 @@ bool LoopVectorizationCostModel::memoryI
StoreInst *SI = dyn_cast<StoreInst>(I);
assert((LI || SI) && "Invalid memory instruction");
- auto *Ptr = getPointerOperand(I);
+ auto *Ptr = getLoadStorePointerOperand(I);
// In order to be widened, the pointer should be consecutive, first of all.
if (!Legal->isConsecutivePtr(Ptr))
@@ -5524,7 +5514,7 @@ void LoopVectorizationCostModel::collect
for (auto *BB : TheLoop->blocks())
for (auto &I : *BB) {
// If there's no pointer operand, there's nothing to do.
- auto *Ptr = dyn_cast_or_null<Instruction>(getPointerOperand(&I));
+ auto *Ptr = dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I));
if (!Ptr)
continue;
@@ -5532,7 +5522,7 @@ void LoopVectorizationCostModel::collect
// pointer operand.
auto UsersAreMemAccesses =
llvm::all_of(Ptr->users(), [&](User *U) -> bool {
- return getPointerOperand(U) == Ptr;
+ return getLoadStorePointerOperand(U) == Ptr;
});
// Ensure the memory instruction will not be scalarized or used by
@@ -5572,7 +5562,8 @@ void LoopVectorizationCostModel::collect
if (llvm::all_of(OI->users(), [&](User *U) -> bool {
auto *J = cast<Instruction>(U);
return !TheLoop->contains(J) || Worklist.count(J) ||
- (OI == getPointerOperand(J) && isUniformDecision(J, VF));
+ (OI == getLoadStorePointerOperand(J) &&
+ isUniformDecision(J, VF));
})) {
Worklist.insert(OI);
DEBUG(dbgs() << "LV: Found uniform instruction: " << *OI << "\n");
@@ -5583,7 +5574,7 @@ void LoopVectorizationCostModel::collect
// Returns true if Ptr is the pointer operand of a memory access instruction
// I, and I is known to not require scalarization.
auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool {
- return getPointerOperand(I) == Ptr && isUniformDecision(I, VF);
+ return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF);
};
// For an instruction to be added into Worklist above, all its users inside
@@ -5744,7 +5735,7 @@ void InterleavedAccessInfo::collectConst
if (!LI && !SI)
continue;
- Value *Ptr = getPointerOperand(&I);
+ Value *Ptr = getLoadStorePointerOperand(&I);
// We don't check wrapping here because we don't know yet if Ptr will be
// part of a full group or a group with gaps. Checking wrapping for all
// pointers (even those that end up in groups with no gaps) will be overly
@@ -5994,7 +5985,7 @@ void InterleavedAccessInfo::analyzeInter
// So we check only group member 0 (which is always guaranteed to exist),
// and group member Factor - 1; If the latter doesn't exist we rely on
// peeling (if it is a non-reveresed accsess -- see Case 3).
- Value *FirstMemberPtr = getPointerOperand(Group->getMember(0));
+ Value *FirstMemberPtr = getLoadStorePointerOperand(Group->getMember(0));
if (!getPtrStride(PSE, FirstMemberPtr, TheLoop, Strides, /*Assume=*/false,
/*ShouldCheckWrap=*/true)) {
DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to "
@@ -6004,7 +5995,7 @@ void InterleavedAccessInfo::analyzeInter
}
Instruction *LastMember = Group->getMember(Group->getFactor() - 1);
if (LastMember) {
- Value *LastMemberPtr = getPointerOperand(LastMember);
+ Value *LastMemberPtr = getLoadStorePointerOperand(LastMember);
if (!getPtrStride(PSE, LastMemberPtr, TheLoop, Strides, /*Assume=*/false,
/*ShouldCheckWrap=*/true)) {
DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to "
@@ -6824,7 +6815,7 @@ unsigned LoopVectorizationCostModel::get
unsigned Alignment = getMemInstAlignment(I);
unsigned AS = getMemInstAddressSpace(I);
- Value *Ptr = getPointerOperand(I);
+ Value *Ptr = getLoadStorePointerOperand(I);
Type *PtrTy = ToVectorTy(Ptr->getType(), VF);
// Figure out whether the access is strided and get the stride value
@@ -6862,7 +6853,7 @@ unsigned LoopVectorizationCostModel::get
Type *ValTy = getMemInstValueType(I);
Type *VectorTy = ToVectorTy(ValTy, VF);
unsigned Alignment = getMemInstAlignment(I);
- Value *Ptr = getPointerOperand(I);
+ Value *Ptr = getLoadStorePointerOperand(I);
unsigned AS = getMemInstAddressSpace(I);
int ConsecutiveStride = Legal->isConsecutivePtr(Ptr);
@@ -6898,7 +6889,7 @@ unsigned LoopVectorizationCostModel::get
Type *ValTy = getMemInstValueType(I);
Type *VectorTy = ToVectorTy(ValTy, VF);
unsigned Alignment = getMemInstAlignment(I);
- Value *Ptr = getPointerOperand(I);
+ Value *Ptr = getLoadStorePointerOperand(I);
return TTI.getAddressComputationCost(VectorTy) +
TTI.getGatherScatterOpCost(I->getOpcode(), VectorTy, Ptr,
@@ -6982,7 +6973,7 @@ void LoopVectorizationCostModel::setCost
for (BasicBlock *BB : TheLoop->blocks()) {
// For each instruction in the old loop.
for (Instruction &I : *BB) {
- Value *Ptr = getPointerOperand(&I);
+ Value *Ptr = getLoadStorePointerOperand(&I);
if (!Ptr)
continue;
@@ -6998,7 +6989,8 @@ void LoopVectorizationCostModel::setCost
// We assume that widening is the best solution when possible.
if (memoryInstructionCanBeWidened(&I, VF)) {
unsigned Cost = getConsecutiveMemOpCost(&I, VF);
- int ConsecutiveStride = Legal->isConsecutivePtr(getPointerOperand(&I));
+ int ConsecutiveStride =
+ Legal->isConsecutivePtr(getLoadStorePointerOperand(&I));
assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
"Expected consecutive stride.");
InstWidening Decision =
@@ -7068,7 +7060,7 @@ void LoopVectorizationCostModel::setCost
for (BasicBlock *BB : TheLoop->blocks())
for (Instruction &I : *BB) {
Instruction *PtrDef =
- dyn_cast_or_null<Instruction>(getPointerOperand(&I));
+ dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I));
if (PtrDef && TheLoop->contains(PtrDef) &&
getWideningDecision(&I, VF) != CM_GatherScatter)
AddrDefs.insert(PtrDef);
@@ -7382,7 +7374,7 @@ Pass *createLoopVectorizePass(bool NoUnr
bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) {
// Check if the pointer operand of a load or store instruction is
// consecutive.
- if (auto *Ptr = getPointerOperand(Inst))
+ if (auto *Ptr = getLoadStorePointerOperand(Inst))
return Legal->isConsecutivePtr(Ptr);
return false;
}
More information about the llvm-commits
mailing list