[llvm] r242400 - Fix memcheck interval ends for pointers with negative strides

Silviu Baranga silviu.baranga at arm.com
Thu Jul 16 07:02:59 PDT 2015


Author: sbaranga
Date: Thu Jul 16 09:02:58 2015
New Revision: 242400

URL: http://llvm.org/viewvc/llvm-project?rev=242400&view=rev
Log:
Fix memcheck interval ends for pointers with negative strides

Summary:
The checking pointer grouping algorithm assumes that the
starts/ends of the pointers are well formed (start <= end).

The runtime memory checking algorithm also assumes this by doing:

 start0 < end1 && start1 < end0

to detect conflicts. This check only works if start0 <= end0 and
start1 <= end1.

This change correctly orders the interval ends by either checking
the stride (if it is constant) or by using min/max SCEV expressions.

Reviewers: anemet, rengolin

Subscribers: rengolin, llvm-commits

Differential Revision: http://reviews.llvm.org/D11149

Added:
    llvm/trunk/test/Analysis/LoopAccessAnalysis/reverse-memcheck-bounds.ll
Modified:
    llvm/trunk/lib/Analysis/LoopAccessAnalysis.cpp

Modified: llvm/trunk/lib/Analysis/LoopAccessAnalysis.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/LoopAccessAnalysis.cpp?rev=242400&r1=242399&r2=242400&view=diff
==============================================================================
--- llvm/trunk/lib/Analysis/LoopAccessAnalysis.cpp (original)
+++ llvm/trunk/lib/Analysis/LoopAccessAnalysis.cpp Thu Jul 16 09:02:58 2015
@@ -127,9 +127,25 @@ void RuntimePointerChecking::insert(Loop
   const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Sc);
   assert(AR && "Invalid addrec expression");
   const SCEV *Ex = SE->getBackedgeTakenCount(Lp);
+
+  const SCEV *ScStart = AR->getStart();
   const SCEV *ScEnd = AR->evaluateAtIteration(Ex, *SE);
-  Pointers.emplace_back(Ptr, AR->getStart(), ScEnd, WritePtr, DepSetId, ASId,
-                        Sc);
+  const SCEV *Step = AR->getStepRecurrence(*SE);
+
+  // For expressions with negative step, the upper bound is ScStart and the
+  // lower bound is ScEnd.
+  if (const SCEVConstant *CStep = dyn_cast<const SCEVConstant>(Step)) {
+    if (CStep->getValue()->isNegative())
+      std::swap(ScStart, ScEnd);
+  } else {
+    // Fallback case: the step is not constant, but the we can still
+    // get the upper and lower bounds of the interval by using min/max
+    // expressions.
+    ScStart = SE->getUMinExpr(ScStart, ScEnd);
+    ScEnd = SE->getUMaxExpr(AR->getStart(), ScEnd);
+  }
+
+  Pointers.emplace_back(Ptr, ScStart, ScEnd, WritePtr, DepSetId, ASId, Sc);
 }
 
 bool RuntimePointerChecking::needsChecking(

Added: llvm/trunk/test/Analysis/LoopAccessAnalysis/reverse-memcheck-bounds.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/LoopAccessAnalysis/reverse-memcheck-bounds.ll?rev=242400&view=auto
==============================================================================
--- llvm/trunk/test/Analysis/LoopAccessAnalysis/reverse-memcheck-bounds.ll (added)
+++ llvm/trunk/test/Analysis/LoopAccessAnalysis/reverse-memcheck-bounds.ll Thu Jul 16 09:02:58 2015
@@ -0,0 +1,89 @@
+; RUN: opt -loop-accesses -analyze < %s | FileCheck %s
+
+; The runtime memory check code and the access grouping
+; algorithm both assume that the start and end values
+; for an access range are ordered (start <= stop).
+; When generating checks for accesses with negative stride
+; we need to take this into account and swap the interval
+; ends.
+;
+;   for (i = 0; i < 10000; i++) {
+;     B[i] = A[15000 - i] * 3;
+;   }
+
+target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
+target triple = "aarch64--linux-gnueabi"
+
+; CHECK: function 'f':
+; CHECK: (Low: (20000 + %a) High: (60000 + %a))
+
+ at B = common global i32* null, align 8
+ at A = common global i32* null, align 8
+
+define void @f() {
+entry:
+  %a = load i32*, i32** @A, align 8
+  %b = load i32*, i32** @B, align 8
+  br label %for.body
+
+for.body:                                         ; preds = %for.body, %entry
+  %idx = phi i64 [ 0, %entry ], [ %add, %for.body ]
+  %negidx = sub i64 15000, %idx
+
+  %arrayidxA0 = getelementptr inbounds i32, i32* %a, i64 %negidx
+  %loadA0 = load i32, i32* %arrayidxA0, align 2
+
+  %res = mul i32 %loadA0, 3
+
+  %add = add nuw nsw i64 %idx, 1
+
+  %arrayidxB = getelementptr inbounds i32, i32* %b, i64 %idx
+  store i32 %res, i32* %arrayidxB, align 2
+
+  %exitcond = icmp eq i64 %idx, 10000
+  br i1 %exitcond, label %for.end, label %for.body
+
+for.end:                                          ; preds = %for.body
+  ret void
+}
+
+; CHECK: function 'g':
+; When the stride is not constant, we are forced to do umin/umax to get
+; the interval limits.
+
+;   for (i = 0; i < 10000; i++) {
+;     B[i] = A[15000 - step * i] * 3;
+;   }
+
+; Here it is not obvious what the limits are, since 'step' could be negative.
+
+; CHECK: Low: (-1 + (-1 * ((-60001 + (-1 * %a)) umax (-60001 + (40000 * %step) + (-1 * %a)))))
+; CHECK: High: ((60000 + %a) umax (60000 + (-40000 * %step) + %a))
+
+define void @g(i64 %step) {
+entry:
+  %a = load i32*, i32** @A, align 8
+  %b = load i32*, i32** @B, align 8
+  br label %for.body
+
+for.body:                                         ; preds = %for.body, %entry
+  %idx = phi i64 [ 0, %entry ], [ %add, %for.body ]
+  %idx_mul = mul i64 %idx, %step
+  %negidx = sub i64 15000, %idx_mul
+
+  %arrayidxA0 = getelementptr inbounds i32, i32* %a, i64 %negidx
+  %loadA0 = load i32, i32* %arrayidxA0, align 2
+
+  %res = mul i32 %loadA0, 3
+
+  %add = add nuw nsw i64 %idx, 1
+
+  %arrayidxB = getelementptr inbounds i32, i32* %b, i64 %idx
+  store i32 %res, i32* %arrayidxB, align 2
+
+  %exitcond = icmp eq i64 %idx, 10000
+  br i1 %exitcond, label %for.end, label %for.body
+
+for.end:                                          ; preds = %for.body
+  ret void
+}





More information about the llvm-commits mailing list