[llvm] r371558 - [Loads] Move generic code out of vectorizer into a location it might be reused [NFC]

Philip Reames via llvm-commits llvm-commits at lists.llvm.org
Tue Sep 10 14:33:53 PDT 2019


Author: reames
Date: Tue Sep 10 14:33:53 2019
New Revision: 371558

URL: http://llvm.org/viewvc/llvm-project?rev=371558&view=rev
Log:
[Loads] Move generic code out of vectorizer into a location it might be reused [NFC]


Modified:
    llvm/trunk/include/llvm/Analysis/Loads.h
    llvm/trunk/lib/Analysis/Loads.cpp
    llvm/trunk/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp

Modified: llvm/trunk/include/llvm/Analysis/Loads.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/Analysis/Loads.h?rev=371558&r1=371557&r2=371558&view=diff
==============================================================================
--- llvm/trunk/include/llvm/Analysis/Loads.h (original)
+++ llvm/trunk/include/llvm/Analysis/Loads.h Tue Sep 10 14:33:53 2019
@@ -20,7 +20,9 @@
 namespace llvm {
 
 class DataLayout;
+class Loop;
 class MDNode;
+class ScalarEvolution;
 
 /// Return true if this is always a dereferenceable pointer. If the context
 /// instruction is specified perform context-sensitive analysis and return true
@@ -61,6 +63,17 @@ bool isSafeToLoadUnconditionally(Value *
                                  Instruction *ScanFrom = nullptr,
                                  const DominatorTree *DT = nullptr);
 
+/// Return true if we can prove that the given load (which is assumed to be
+/// within the specified loop) would access only dereferenceable memory, and
+/// be properly aligned on every iteration of the specified loop regardless of
+/// its placement within the loop. (i.e. does not require predication beyond
+/// that required by the the header itself and could be hoisted into the header
+/// if desired.)  This is more powerful than the variants above when the
+/// address loaded from is analyzeable by SCEV.  
+bool isDereferenceableAndAlignedInLoop(LoadInst *LI, Loop *L,
+                                       ScalarEvolution &SE,
+                                       DominatorTree &DT);
+
 /// Return true if we know that executing a load from this value cannot trap.
 ///
 /// If DT and ScanFrom are specified this method performs context-sensitive

Modified: llvm/trunk/lib/Analysis/Loads.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/Loads.cpp?rev=371558&r1=371557&r2=371558&view=diff
==============================================================================
--- llvm/trunk/lib/Analysis/Loads.cpp (original)
+++ llvm/trunk/lib/Analysis/Loads.cpp Tue Sep 10 14:33:53 2019
@@ -12,6 +12,9 @@
 
 #include "llvm/Analysis/Loads.h"
 #include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/Analysis/ScalarEvolution.h"
+#include "llvm/Analysis/ScalarEvolutionExpressions.h"
 #include "llvm/Analysis/ValueTracking.h"
 #include "llvm/IR/DataLayout.h"
 #include "llvm/IR/GlobalAlias.h"
@@ -190,6 +193,53 @@ static bool AreEquivalentAddressValues(c
   return false;
 }
 
+bool llvm::isDereferenceableAndAlignedInLoop(LoadInst *LI, Loop *L,
+                                             ScalarEvolution &SE,
+                                             DominatorTree &DT) {
+  auto &DL = LI->getModule()->getDataLayout();
+  Value *Ptr = LI->getPointerOperand();
+  auto *AddRec = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(Ptr));
+  if (!AddRec || AddRec->getLoop() != L || !AddRec->isAffine())
+    return false;
+  auto* Step = dyn_cast<SCEVConstant>(AddRec->getStepRecurrence(SE));
+  if (!Step)
+    return false;
+  APInt StepC = Step->getAPInt();
+  APInt EltSize(DL.getIndexTypeSizeInBits(Ptr->getType()),
+                 DL.getTypeStoreSize(LI->getType()));
+  // TODO: generalize to access patterns which have gaps
+  // TODO: handle uniform addresses (if not already handled by LICM)
+  if (StepC != EltSize)
+    return false;
+
+  // TODO: If the symbolic trip count has a small bound (max count), we might
+  // be able to prove safety.
+  auto TC = SE.getSmallConstantTripCount(L);
+  if (!TC)
+    return false;
+
+  const APInt AccessSize = TC * EltSize;
+
+  auto *StartS = dyn_cast<SCEVUnknown>(AddRec->getStart());
+  if (!StartS)
+    return false;
+  assert(SE.isLoopInvariant(StartS, L) && "implied by addrec definition");
+  Value *Base = StartS->getValue();
+
+  Instruction *HeaderFirstNonPHI = L->getHeader()->getFirstNonPHI();
+
+  unsigned Align = LI->getAlignment();
+  if (Align == 0)
+    Align = DL.getABITypeAlignment(LI->getType());
+  // For the moment, restrict ourselves to the case where the access size is a
+  // multiple of the requested alignment and the base is aligned.
+  // TODO: generalize if a case found which warrants
+  if (EltSize.urem(Align) != 0)
+    return false;
+  return isDereferenceableAndAlignedPointer(Base, Align, AccessSize,
+                                            DL, HeaderFirstNonPHI, &DT);
+}
+
 /// Check if executing a load of this pointer value cannot trap.
 ///
 /// If DT and ScanFrom are specified this method performs context-sensitive

Modified: llvm/trunk/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp?rev=371558&r1=371557&r2=371558&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp (original)
+++ llvm/trunk/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp Tue Sep 10 14:33:53 2019
@@ -918,57 +918,6 @@ bool LoopVectorizationLegality::blockCan
   return true;
 }
 
-/// Return true if we can prove that the given load would access only
-/// dereferenceable memory, and be properly aligned on every iteration.
-/// (i.e. does not require predication beyond that required by the the header
-/// itself) TODO: Move to Loads.h/cpp in a separate change
-static bool isDereferenceableAndAlignedInLoop(LoadInst *LI, Loop *L,
-                                              ScalarEvolution &SE,
-                                              DominatorTree &DT) {
-  auto &DL = LI->getModule()->getDataLayout();
-  Value *Ptr = LI->getPointerOperand();
-  auto *AddRec = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(Ptr));
-  if (!AddRec || AddRec->getLoop() != L || !AddRec->isAffine())
-    return false;
-  auto* Step = dyn_cast<SCEVConstant>(AddRec->getStepRecurrence(SE));
-  if (!Step)
-    return false;
-  APInt StepC = Step->getAPInt();
-  APInt EltSize(DL.getIndexTypeSizeInBits(Ptr->getType()),
-                 DL.getTypeStoreSize(LI->getType()));
-  // TODO: generalize to access patterns which have gaps
-  // TODO: handle uniform addresses (if not already handled by LICM)
-  if (StepC != EltSize)
-    return false;
-
-  // TODO: If the symbolic trip count has a small bound (max count), we might
-  // be able to prove safety.
-  auto TC = SE.getSmallConstantTripCount(L);
-  if (!TC)
-    return false;
-
-  const APInt AccessSize = TC * EltSize;
-
-  auto *StartS = dyn_cast<SCEVUnknown>(AddRec->getStart());
-  if (!StartS)
-    return false;
-  assert(SE.isLoopInvariant(StartS, L) && "implied by addrec definition");
-  Value *Base = StartS->getValue();
-
-  Instruction *HeaderFirstNonPHI = L->getHeader()->getFirstNonPHI();
-
-  unsigned Align = LI->getAlignment();
-  if (Align == 0)
-    Align = DL.getABITypeAlignment(LI->getType());
-  // For the moment, restrict ourselves to the case where the access size is a
-  // multiple of the requested alignment and the base is aligned.
-  // TODO: generalize if a case found which warrants
-  if (EltSize.urem(Align) != 0)
-    return false;
-  return isDereferenceableAndAlignedPointer(Base, Align, AccessSize,
-                                            DL, HeaderFirstNonPHI, &DT);
-}
-
 bool LoopVectorizationLegality::canVectorizeWithIfConvert() {
   if (!EnableIfConversion) {
     reportVectorizationFailure("If-conversion is disabled",




More information about the llvm-commits mailing list