[llvm] r294892 - [LV/LoopAccess] Check statically if an unknown dependence distance can be

Dorit Nuzman via llvm-commits llvm-commits at lists.llvm.org
Sun Feb 12 01:32:54 PST 2017


Author: dorit
Date: Sun Feb 12 03:32:53 2017
New Revision: 294892

URL: http://llvm.org/viewvc/llvm-project?rev=294892&view=rev
Log:
[LV/LoopAccess] Check statically if an unknown dependence distance can be 
proven larger than the loop-count

This fixes PR31098: Try to resolve statically data-dependences whose
compile-time-unknown distance can be proven larger than the loop-count, 
instead of resorting to runtime dependence checking (which are not always 
possible).

For vectorization it is sufficient to prove that the dependence distance 
is >= VF; But in some cases we can prune unknown dependence distances early,
and even before selecting the VF, and without a runtime test, by comparing 
the distance against the loop iteration count. Since the vectorized code 
will be executed only if LoopCount >= VF, proving distance >= LoopCount 
also guarantees that distance >= VF. This check is also equivalent to the 
Strong SIV Test.

Reviewers: mkuper, anemet, sanjoy

Differential Revision: https://reviews.llvm.org/D28044


Added:
    llvm/trunk/test/Analysis/LoopAccessAnalysis/pr31098.ll
    llvm/trunk/test/Transforms/LoopVectorize/pr31098.ll
Modified:
    llvm/trunk/lib/Analysis/LoopAccessAnalysis.cpp
    llvm/trunk/test/Analysis/LoopAccessAnalysis/multiple-strides-rt-memory-checks.ll
    llvm/trunk/test/Transforms/LoopVectorize/multiple-strides-vectorization.ll

Modified: llvm/trunk/lib/Analysis/LoopAccessAnalysis.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/LoopAccessAnalysis.cpp?rev=294892&r1=294891&r2=294892&view=diff
==============================================================================
--- llvm/trunk/lib/Analysis/LoopAccessAnalysis.cpp (original)
+++ llvm/trunk/lib/Analysis/LoopAccessAnalysis.cpp Sun Feb 12 03:32:53 2017
@@ -1251,6 +1251,73 @@ bool MemoryDepChecker::couldPreventStore
   return false;
 }
 
+/// Given a non-constant (unknown) dependence-distance \p Dist between two 
+/// memory accesses, that have the same stride whose absolute value is given
+/// in \p Stride, and that have the same type size \p TypeByteSize,
+/// in a loop whose takenCount is \p BackedgeTakenCount, check if it is
+/// possible to prove statically that the dependence distance is larger
+/// than the range that the accesses will travel through the execution of
+/// the loop. If so, return true; false otherwise. This is useful for
+/// example in loops such as the following (PR31098):
+///     for (i = 0; i < D; ++i) {
+///                = out[i];
+///       out[i+D] =
+///     }
+static bool isSafeDependenceDistance(const DataLayout &DL, ScalarEvolution &SE,
+                                     const SCEV &BackedgeTakenCount,
+                                     const SCEV &Dist, uint64_t Stride,
+                                     uint64_t TypeByteSize) {
+
+  // If we can prove that
+  //      (**) |Dist| > BackedgeTakenCount * Step
+  // where Step is the absolute stride of the memory accesses in bytes, 
+  // then there is no dependence.
+  //
+  // Ratioanle: 
+  // We basically want to check if the absolute distance (|Dist/Step|) 
+  // is >= the loop iteration count (or > BackedgeTakenCount). 
+  // This is equivalent to the Strong SIV Test (Practical Dependence Testing, 
+  // Section 4.2.1); Note, that for vectorization it is sufficient to prove 
+  // that the dependence distance is >= VF; This is checked elsewhere.
+  // But in some cases we can prune unknown dependence distances early, and 
+  // even before selecting the VF, and without a runtime test, by comparing 
+  // the distance against the loop iteration count. Since the vectorized code 
+  // will be executed only if LoopCount >= VF, proving distance >= LoopCount 
+  // also guarantees that distance >= VF.
+  //
+  const uint64_t ByteStride = Stride * TypeByteSize;
+  const SCEV *Step = SE.getConstant(BackedgeTakenCount.getType(), ByteStride);
+  const SCEV *Product = SE.getMulExpr(&BackedgeTakenCount, Step);
+
+  const SCEV *CastedDist = &Dist;
+  const SCEV *CastedProduct = Product;
+  uint64_t DistTypeSize = DL.getTypeAllocSize(Dist.getType());
+  uint64_t ProductTypeSize = DL.getTypeAllocSize(Product->getType());
+
+  // The dependence distance can be positive/negative, so we sign extend Dist; 
+  // The multiplication of the absolute stride in bytes and the 
+  // backdgeTakenCount is non-negative, so we zero extend Product.
+  if (DistTypeSize > ProductTypeSize)
+    CastedProduct = SE.getZeroExtendExpr(Product, Dist.getType());
+  else
+    CastedDist = SE.getNoopOrSignExtend(&Dist, Product->getType());
+
+  // Is  Dist - (BackedgeTakenCount * Step) > 0 ?
+  // (If so, then we have proven (**) because |Dist| >= Dist)
+  const SCEV *Minus = SE.getMinusSCEV(CastedDist, CastedProduct);
+  if (SE.isKnownPositive(Minus))
+    return true;
+
+  // Second try: Is  -Dist - (BackedgeTakenCount * Step) > 0 ?
+  // (If so, then we have proven (**) because |Dist| >= -1*Dist)
+  const SCEV *NegDist = SE.getNegativeSCEV(CastedDist);
+  Minus = SE.getMinusSCEV(NegDist, CastedProduct);
+  if (SE.isKnownPositive(Minus))
+    return true;
+
+  return false;
+}
+
 /// \brief Check the dependence for two accesses with the same stride \p Stride.
 /// \p Distance is the positive distance and \p TypeByteSize is type size in
 /// bytes.
@@ -1338,21 +1405,26 @@ MemoryDepChecker::isDependent(const MemA
     return Dependence::Unknown;
   }
 
+  Type *ATy = APtr->getType()->getPointerElementType();
+  Type *BTy = BPtr->getType()->getPointerElementType();
+  auto &DL = InnermostLoop->getHeader()->getModule()->getDataLayout();
+  uint64_t TypeByteSize = DL.getTypeAllocSize(ATy);
+  uint64_t Stride = std::abs(StrideAPtr);
   const SCEVConstant *C = dyn_cast<SCEVConstant>(Dist);
   if (!C) {
+    if (TypeByteSize == DL.getTypeAllocSize(BTy) &&
+        isSafeDependenceDistance(DL, *(PSE.getSE()),
+                                 *(PSE.getBackedgeTakenCount()), *Dist, Stride,
+                                 TypeByteSize))
+      return Dependence::NoDep;
+
     DEBUG(dbgs() << "LAA: Dependence because of non-constant distance\n");
     ShouldRetryWithRuntimeCheck = true;
     return Dependence::Unknown;
   }
 
-  Type *ATy = APtr->getType()->getPointerElementType();
-  Type *BTy = BPtr->getType()->getPointerElementType();
-  auto &DL = InnermostLoop->getHeader()->getModule()->getDataLayout();
-  uint64_t TypeByteSize = DL.getTypeAllocSize(ATy);
-
   const APInt &Val = C->getAPInt();
   int64_t Distance = Val.getSExtValue();
-  uint64_t Stride = std::abs(StrideAPtr);
 
   // Attempt to prove strided accesses independent.
   if (std::abs(Distance) > 0 && Stride > 1 && ATy == BTy &&

Modified: llvm/trunk/test/Analysis/LoopAccessAnalysis/multiple-strides-rt-memory-checks.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/LoopAccessAnalysis/multiple-strides-rt-memory-checks.ll?rev=294892&r1=294891&r2=294892&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/LoopAccessAnalysis/multiple-strides-rt-memory-checks.ll (original)
+++ llvm/trunk/test/Analysis/LoopAccessAnalysis/multiple-strides-rt-memory-checks.ll Sun Feb 12 03:32:53 2017
@@ -13,9 +13,9 @@
 ;	int v3[Z][Z];
 ; } s;
 ;
-; void slow_function (s* const obj) {
+; void slow_function (s* const obj, int z) {
 ;    for (int j=0; j<Z; j++) {
-;        for (int k=0; k<Z; k++) {
+;        for (int k=0; k<z; k++) {
 ;            int x = obj->v1[k] + obj->v2[j];
 ;            obj->v3[j][k] += x;
 ;        }
@@ -35,7 +35,7 @@ target triple = "x86_64-unknown-linux-gn
 
 %struct.s = type { [32 x i32], [32 x i32], [32 x [32 x i32]] }
 
-define void @Test(%struct.s* nocapture %obj) #0 {
+define void @Test(%struct.s* nocapture %obj, i64 %z) #0 {
   br label %.outer.preheader
 
 
@@ -63,6 +63,6 @@ define void @Test(%struct.s* nocapture %
   %8 = add nsw i32 %5, %7
   store i32 %8, i32* %6  
   %j.next = add nuw nsw i64 %j, 1
-  %exitcond.inner = icmp eq i64 %j.next, 32
+  %exitcond.inner = icmp eq i64 %j.next, %z
   br i1 %exitcond.inner, label %.outer, label %.inner
 }

Added: llvm/trunk/test/Analysis/LoopAccessAnalysis/pr31098.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/LoopAccessAnalysis/pr31098.ll?rev=294892&view=auto
==============================================================================
--- llvm/trunk/test/Analysis/LoopAccessAnalysis/pr31098.ll (added)
+++ llvm/trunk/test/Analysis/LoopAccessAnalysis/pr31098.ll Sun Feb 12 03:32:53 2017
@@ -0,0 +1,99 @@
+; RUN: opt -loop-accesses -analyze < %s | FileCheck %s
+; RUN: opt -passes='require<scalar-evolution>,require<aa>,loop(print-access-info)' -disable-output  < %s 2>&1 | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+; Check that the compile-time-unknown depenendece-distance is resolved 
+; statically. Due to the non-unit stride of the accesses in this testcase
+; we are currently not able to create runtime dependence checks, and therefore
+; if we don't resolve the dependence statically we cannot vectorize the loop.
+;
+; Specifically in this example, during dependence analysis we get 6 unknown 
+; dependence distances between the 8 real/imaginary accesses below: 
+;    dist = 8*D, 4+8*D, -4+8*D, -8*D, 4-8*D, -4-8*D.
+; At compile time we can prove for all of the above that |dist|>loopBound*step
+; (where the step is 8bytes, and the loopBound is D-1), and thereby conclude 
+; that there are no dependencies (without runtime tests):
+; |8*D|>8*D-8, |4+8*D|>8*D-8, |-4+8*D|>8*D-8, etc.
+
+; #include <stdlib.h>
+; class Complex {
+; private:
+;   float real_;
+;   float imaginary_;
+;
+; public:
+;   Complex() : real_(0), imaginary_(0) { }
+;   Complex(float real, float imaginary) : real_(real), imaginary_(imaginary) { }
+;   Complex(const Complex &rhs) : real_(rhs.real()), imaginary_(rhs.imaginary()) { }
+; 
+;   inline float real() const { return real_; }
+;   inline float imaginary() const { return imaginary_; }
+; 
+;   Complex operator+(const Complex& rhs) const
+;   {
+;    return Complex(real_ + rhs.real_, imaginary_ + rhs.imaginary_);
+;   }
+;
+;   Complex operator-(const Complex& rhs) const
+;  {
+;     return Complex(real_ - rhs.real_, imaginary_ - rhs.imaginary_);
+;   }
+; };
+;
+; void Test(Complex *out, size_t size)
+; {
+;     size_t D = size / 2;
+;     for (size_t offset = 0; offset < D; ++offset)
+;     {
+;         Complex t0 = out[offset];
+;         Complex t1 = out[offset + D];
+;         out[offset] = t1 + t0;
+;         out[offset + D] = t0 - t1;
+;     }
+; }
+
+; CHECK-LABEL: Test
+; CHECK: Memory dependences are safe
+
+
+%class.Complex = type { float, float }
+
+define void @Test(%class.Complex* nocapture %out, i64 %size) local_unnamed_addr {
+entry:
+  %div = lshr i64 %size, 1
+  %cmp47 = icmp eq i64 %div, 0
+  br i1 %cmp47, label %for.cond.cleanup, label %for.body.preheader
+
+for.body.preheader:
+  br label %for.body
+
+for.cond.cleanup.loopexit:
+  br label %for.cond.cleanup
+
+for.cond.cleanup:
+  ret void
+
+for.body:
+  %offset.048 = phi i64 [ %inc, %for.body ], [ 0, %for.body.preheader ]
+  %0 = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %offset.048, i32 0
+  %1 = load float, float* %0, align 4
+  %imaginary_.i.i = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %offset.048, i32 1
+  %2 = load float, float* %imaginary_.i.i, align 4
+  %add = add nuw i64 %offset.048, %div
+  %3 = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %add, i32 0
+  %4 = load float, float* %3, align 4
+  %imaginary_.i.i28 = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %add, i32 1
+  %5 = load float, float* %imaginary_.i.i28, align 4
+  %add.i = fadd fast float %4, %1
+  %add4.i = fadd fast float %5, %2
+  store float %add.i, float* %0, align 4
+  store float %add4.i, float* %imaginary_.i.i, align 4
+  %sub.i = fsub fast float %1, %4
+  %sub4.i = fsub fast float %2, %5
+  store float %sub.i, float* %3, align 4
+  store float %sub4.i, float* %imaginary_.i.i28, align 4
+  %inc = add nuw nsw i64 %offset.048, 1
+  %exitcond = icmp eq i64 %inc, %div
+  br i1 %exitcond, label %for.cond.cleanup.loopexit, label %for.body
+}

Modified: llvm/trunk/test/Transforms/LoopVectorize/multiple-strides-vectorization.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/LoopVectorize/multiple-strides-vectorization.ll?rev=294892&r1=294891&r2=294892&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/LoopVectorize/multiple-strides-vectorization.ll (original)
+++ llvm/trunk/test/Transforms/LoopVectorize/multiple-strides-vectorization.ll Sun Feb 12 03:32:53 2017
@@ -13,9 +13,9 @@
 ;       int v3[Z][Z];
 ; } s;
 ;
-; void slow_function (s* const obj) {
+; void slow_function (s* const obj, int z) {
 ;    for (int j=0; j<Z; j++) {
-;        for (int k=0; k<Z; k++) {
+;        for (int k=0; k<z; k++) {
 ;            int x = obj->v1[k] + obj->v2[j];
 ;            obj->v3[j][k] += x;
 ;        }
@@ -31,7 +31,7 @@ target datalayout = "e-m:e-i64:64-f80:12
 
 %struct.s = type { [32 x i32], [32 x i32], [32 x [32 x i32]] }
 
-define void @Test(%struct.s* nocapture %obj) #0 {
+define void @Test(%struct.s* nocapture %obj, i64 %z) #0 {
   br label %.outer.preheader
 
 
@@ -59,6 +59,6 @@ define void @Test(%struct.s* nocapture %
   %8 = add nsw i32 %5, %7
   store i32 %8, i32* %6  
   %j.next = add nuw nsw i64 %j, 1
-  %exitcond.inner = icmp eq i64 %j.next, 32
+  %exitcond.inner = icmp eq i64 %j.next, %z
   br i1 %exitcond.inner, label %.outer, label %.inner
 }

Added: llvm/trunk/test/Transforms/LoopVectorize/pr31098.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/LoopVectorize/pr31098.ll?rev=294892&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/LoopVectorize/pr31098.ll (added)
+++ llvm/trunk/test/Transforms/LoopVectorize/pr31098.ll Sun Feb 12 03:32:53 2017
@@ -0,0 +1,100 @@
+; REQUIRES: asserts
+; RUN: opt -S -loop-vectorize -force-vector-width=4 -force-vector-interleave=1 -enable-interleaved-mem-accesses=true -debug-only=loop-accesses < %s  2>&1 | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+; Check that the compile-time-unknown depenendece-distance is resolved 
+; statically. Due to the non-unit stride of the accesses in this testcase
+; we are currently not able to create runtime dependence checks, and therefore
+; if we don't resolve the dependence statically we cannot vectorize the loop.
+;
+; Specifically in this example, during dependence analysis we get 6 unknown 
+; dependence distances between the 8 real/imaginary accesses below: 
+;    dist = 8*D, 4+8*D, -4+8*D, -8*D, 4-8*D, -4-8*D.
+; At compile time we can prove for all of the above that |dist|>loopBound*step
+; (where the step is 8bytes, and the loopBound is D-1), and thereby conclude 
+; that there are no dependencies (without runtime tests):
+; |8*D|>8*D-8, |4+8*D|>8*D-8, |-4+8*D|>8*D-8, etc.
+
+; #include <stdlib.h>
+; class Complex {
+; private:
+;   float real_;
+;   float imaginary_;
+;
+; public:
+;   Complex() : real_(0), imaginary_(0) { }
+;   Complex(float real, float imaginary) : real_(real), imaginary_(imaginary) { }
+;   Complex(const Complex &rhs) : real_(rhs.real()), imaginary_(rhs.imaginary()) { }
+; 
+;   inline float real() const { return real_; }
+;   inline float imaginary() const { return imaginary_; }
+; 
+;   Complex operator+(const Complex& rhs) const
+;   {
+;    return Complex(real_ + rhs.real_, imaginary_ + rhs.imaginary_);
+;   }
+;
+;   Complex operator-(const Complex& rhs) const
+;  {
+;     return Complex(real_ - rhs.real_, imaginary_ - rhs.imaginary_);
+;   }
+; };
+;
+; void Test(Complex *out, size_t size)
+; {
+;     size_t D = size / 2;
+;     for (size_t offset = 0; offset < D; ++offset)
+;     {
+;         Complex t0 = out[offset];
+;         Complex t1 = out[offset + D];
+;         out[offset] = t1 + t0;
+;         out[offset + D] = t0 - t1;
+;     }
+; }
+
+; CHECK-LABEL: Test
+; CHECK: LAA: No unsafe dependent memory operations in loop.  We don't need runtime memory checks.
+; CHECK: vector.body:
+; CHECK: <4 x i32>
+
+%class.Complex = type { float, float }
+
+define void @Test(%class.Complex* nocapture %out, i64 %size) local_unnamed_addr {
+entry:
+  %div = lshr i64 %size, 1
+  %cmp47 = icmp eq i64 %div, 0
+  br i1 %cmp47, label %for.cond.cleanup, label %for.body.preheader
+
+for.body.preheader:
+  br label %for.body
+
+for.cond.cleanup.loopexit:
+  br label %for.cond.cleanup
+
+for.cond.cleanup:
+  ret void
+
+for.body:
+  %offset.048 = phi i64 [ %inc, %for.body ], [ 0, %for.body.preheader ]
+  %0 = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %offset.048, i32 0
+  %1 = load float, float* %0, align 4
+  %imaginary_.i.i = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %offset.048, i32 1
+  %2 = load float, float* %imaginary_.i.i, align 4
+  %add = add nuw i64 %offset.048, %div
+  %3 = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %add, i32 0
+  %4 = load float, float* %3, align 4
+  %imaginary_.i.i28 = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %add, i32 1
+  %5 = load float, float* %imaginary_.i.i28, align 4
+  %add.i = fadd fast float %4, %1
+  %add4.i = fadd fast float %5, %2
+  store float %add.i, float* %0, align 4
+  store float %add4.i, float* %imaginary_.i.i, align 4
+  %sub.i = fsub fast float %1, %4
+  %sub4.i = fsub fast float %2, %5
+  store float %sub.i, float* %3, align 4
+  store float %sub4.i, float* %imaginary_.i.i28, align 4
+  %inc = add nuw nsw i64 %offset.048, 1
+  %exitcond = icmp eq i64 %inc, %div
+  br i1 %exitcond, label %for.cond.cleanup.loopexit, label %for.body
+}




More information about the llvm-commits mailing list