[llvm] r358688 - [LoopPred] Fix a blatantly obvious bug in r358684
Philip Reames via llvm-commits
llvm-commits at lists.llvm.org
Thu Apr 18 10:01:19 PDT 2019
Author: reames
Date: Thu Apr 18 10:01:19 2019
New Revision: 358688
URL: http://llvm.org/viewvc/llvm-project?rev=358688&view=rev
Log:
[LoopPred] Fix a blatantly obvious bug in r358684
The bug is that I didn't check whether the operand of the invariant_loads were themselves invariant. I don't know how this got missed in the patch and review. I even had an unreduced test case locally, and I remember handling this case, but I must have lost it in one of the rebases. Oops.
Modified:
llvm/trunk/lib/Transforms/Scalar/LoopPredication.cpp
llvm/trunk/test/Transforms/LoopPredication/invariant_load.ll
Modified: llvm/trunk/lib/Transforms/Scalar/LoopPredication.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/LoopPredication.cpp?rev=358688&r1=358687&r2=358688&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Scalar/LoopPredication.cpp (original)
+++ llvm/trunk/lib/Transforms/Scalar/LoopPredication.cpp Thu Apr 18 10:01:19 2019
@@ -510,7 +510,7 @@ bool LoopPredication::isLoopInvariantVal
// TODO: This should be sunk inside SCEV.
if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S))
if (const auto *LI = dyn_cast<LoadInst>(U->getValue()))
- if (LI->isUnordered())
+ if (LI->isUnordered() && L->hasLoopInvariantOperands(LI))
if (AA->pointsToConstantMemory(LI->getOperand(0)) ||
LI->getMetadata(LLVMContext::MD_invariant_load) != nullptr)
return true;
Modified: llvm/trunk/test/Transforms/LoopPredication/invariant_load.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/LoopPredication/invariant_load.ll?rev=358688&r1=358687&r2=358688&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/LoopPredication/invariant_load.ll (original)
+++ llvm/trunk/test/Transforms/LoopPredication/invariant_load.ll Thu Apr 18 10:01:19 2019
@@ -126,6 +126,70 @@ exit:
ret i32 %result
}
+; Case where we have an invariant load, but it's not loading from a loop
+; invariant location.
+define i32 @neg_varying_invariant_load_op(i32* %array, i32* %lengths, i32 %n) {
+; CHECK-LABEL: @neg_varying_invariant_load_op(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[N:%.*]], 0
+; CHECK-NEXT: br i1 [[TMP5]], label [[EXIT:%.*]], label [[LOOP_PREHEADER:%.*]]
+; CHECK: loop.preheader:
+; CHECK-NEXT: br label [[LOOP:%.*]]
+; CHECK: loop:
+; CHECK-NEXT: [[LOOP_ACC:%.*]] = phi i32 [ [[LOOP_ACC_NEXT:%.*]], [[LOOP]] ], [ 0, [[LOOP_PREHEADER]] ]
+; CHECK-NEXT: [[I:%.*]] = phi i32 [ [[I_NEXT:%.*]], [[LOOP]] ], [ 0, [[LOOP_PREHEADER]] ]
+; CHECK-NEXT: [[UNKNOWN:%.*]] = load volatile i1, i1* @UNKNOWN
+; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[UNKNOWN]]) [ "deopt"() ]
+; CHECK-NEXT: [[LENGTH_ADDR:%.*]] = getelementptr i32, i32* [[LENGTHS:%.*]], i32 [[I]]
+; CHECK-NEXT: [[LEN:%.*]] = load i32, i32* [[LENGTH_ADDR]], align 4, !invariant.load !0
+; CHECK-NEXT: [[WITHIN_BOUNDS:%.*]] = icmp ult i32 [[I]], [[LEN]]
+; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[WITHIN_BOUNDS]], i32 9) [ "deopt"() ]
+; CHECK-NEXT: [[I_I64:%.*]] = zext i32 [[I]] to i64
+; CHECK-NEXT: [[ARRAY_I_PTR:%.*]] = getelementptr inbounds i32, i32* [[ARRAY:%.*]], i64 [[I_I64]]
+; CHECK-NEXT: [[ARRAY_I:%.*]] = load i32, i32* [[ARRAY_I_PTR]], align 4
+; CHECK-NEXT: [[LOOP_ACC_NEXT]] = add i32 [[LOOP_ACC]], [[ARRAY_I]]
+; CHECK-NEXT: [[I_NEXT]] = add nuw i32 [[I]], 1
+; CHECK-NEXT: [[CONTINUE:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[CONTINUE]], label [[LOOP]], label [[EXIT_LOOPEXIT:%.*]]
+; CHECK: exit.loopexit:
+; CHECK-NEXT: [[LOOP_ACC_NEXT_LCSSA:%.*]] = phi i32 [ [[LOOP_ACC_NEXT]], [[LOOP]] ]
+; CHECK-NEXT: br label [[EXIT]]
+; CHECK: exit:
+; CHECK-NEXT: [[RESULT:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[LOOP_ACC_NEXT_LCSSA]], [[EXIT_LOOPEXIT]] ]
+; CHECK-NEXT: ret i32 [[RESULT]]
+;
+entry:
+ %tmp5 = icmp eq i32 %n, 0
+ br i1 %tmp5, label %exit, label %loop.preheader
+
+loop.preheader:
+ br label %loop
+
+loop:
+ %loop.acc = phi i32 [ %loop.acc.next, %loop ], [ 0, %loop.preheader ]
+ %i = phi i32 [ %i.next, %loop ], [ 0, %loop.preheader ]
+ %unknown = load volatile i1, i1* @UNKNOWN
+ call void (i1, ...) @llvm.experimental.guard(i1 %unknown) [ "deopt"() ]
+
+ %length.addr = getelementptr i32, i32* %lengths, i32 %i
+ %len = load i32, i32* %length.addr, align 4, !invariant.load !{}
+ %within.bounds = icmp ult i32 %i, %len
+ call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ]
+
+ %i.i64 = zext i32 %i to i64
+ %array.i.ptr = getelementptr inbounds i32, i32* %array, i64 %i.i64
+ %array.i = load i32, i32* %array.i.ptr, align 4
+ %loop.acc.next = add i32 %loop.acc, %array.i
+
+ %i.next = add nuw i32 %i, 1
+ %continue = icmp ult i32 %i.next, %n
+ br i1 %continue, label %loop, label %exit
+
+exit:
+ %result = phi i32 [ 0, %entry ], [ %loop.acc.next, %loop ]
+ ret i32 %result
+}
+
; This is a case where moving the load which provides the limit for the latch
; would be invalid, so we can't preform the tempting transform. Loading the
; latch limit may fault since we could always fail the guard.
More information about the llvm-commits
mailing list