[llvm] r247184 - ScalarEvolution assume hanging bugfix

Piotr Padlewski via llvm-commits llvm-commits at lists.llvm.org
Wed Sep 9 13:47:30 PDT 2015


Author: prazek
Date: Wed Sep  9 15:47:30 2015
New Revision: 247184

URL: http://llvm.org/viewvc/llvm-project?rev=247184&view=rev
Log:
ScalarEvolution assume hanging bugfix

http://reviews.llvm.org/D12719

Added:
    llvm/trunk/test/Analysis/ScalarEvolution/avoid-assume-hang.ll
Modified:
    llvm/trunk/lib/Analysis/ScalarEvolution.cpp

Modified: llvm/trunk/lib/Analysis/ScalarEvolution.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/ScalarEvolution.cpp?rev=247184&r1=247183&r2=247184&view=diff
==============================================================================
--- llvm/trunk/lib/Analysis/ScalarEvolution.cpp (original)
+++ llvm/trunk/lib/Analysis/ScalarEvolution.cpp Wed Sep  9 15:47:30 2015
@@ -6958,18 +6958,6 @@ ScalarEvolution::isLoopBackedgeGuardedBy
                     LoopContinuePredicate->getSuccessor(0) != L->getHeader()))
     return true;
 
-  // Check conditions due to any @llvm.assume intrinsics.
-  for (auto &AssumeVH : AC.assumptions()) {
-    if (!AssumeVH)
-      continue;
-    auto *CI = cast<CallInst>(AssumeVH);
-    if (!DT.dominates(CI, Latch->getTerminator()))
-      continue;
-
-    if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false))
-      return true;
-  }
-
   struct ClearWalkingBEDominatingCondsOnExit {
     ScalarEvolution &SE;
 
@@ -6981,7 +6969,7 @@ ScalarEvolution::isLoopBackedgeGuardedBy
     }
   };
 
-  // We don't want more than one activation of the following loop on the stack
+  // We don't want more than one activation of the following loops on the stack
   // -- that can lead to O(n!) time complexity.
   if (WalkingBEDominatingConds)
     return false;
@@ -6989,6 +6977,18 @@ ScalarEvolution::isLoopBackedgeGuardedBy
   WalkingBEDominatingConds = true;
   ClearWalkingBEDominatingCondsOnExit ClearOnExit(*this);
 
+  // Check conditions due to any @llvm.assume intrinsics.
+  for (auto &AssumeVH : AC.assumptions()) {
+    if (!AssumeVH)
+      continue;
+    auto *CI = cast<CallInst>(AssumeVH);
+    if (!DT.dominates(CI, Latch->getTerminator()))
+      continue;
+
+    if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false))
+      return true;
+  }
+
   // If the loop is not reachable from the entry block, we risk running into an
   // infinite loop as we walk up into the dom tree.  These loops do not matter
   // anyway, so we just return a conservative answer when we see them.

Added: llvm/trunk/test/Analysis/ScalarEvolution/avoid-assume-hang.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/ScalarEvolution/avoid-assume-hang.ll?rev=247184&view=auto
==============================================================================
--- llvm/trunk/test/Analysis/ScalarEvolution/avoid-assume-hang.ll (added)
+++ llvm/trunk/test/Analysis/ScalarEvolution/avoid-assume-hang.ll Wed Sep  9 15:47:30 2015
@@ -0,0 +1,139 @@
+; RUN: opt %s -always-inline | opt -analyze -scalar-evolution
+; There was optimization bug in ScalarEvolution, that causes too long 
+; compute time and stack overflow crash.
+
+declare void @body(i32)
+declare void @llvm.assume(i1)
+
+define available_externally void @assume1(i64 %i.ext, i64 %a) alwaysinline {
+  %cmp0 = icmp ne i64 %i.ext, %a
+  call void @llvm.assume(i1 %cmp0)
+
+  %a1 = add i64 %a, 1
+  %cmp1 = icmp ne i64 %i.ext, %a1
+  call void @llvm.assume(i1 %cmp1)
+
+  %a2 = add i64 %a1, 1
+  %cmp2 = icmp ne i64 %i.ext, %a2
+  call void @llvm.assume(i1 %cmp2)
+
+  %a3 = add i64 %a2, 1
+  %cmp3 = icmp ne i64 %i.ext, %a3
+  call void @llvm.assume(i1 %cmp3)
+
+  %a4 = add i64 %a3, 1
+  %cmp4 = icmp ne i64 %i.ext, %a4
+  call void @llvm.assume(i1 %cmp4)
+
+  ret void
+}
+
+define available_externally void @assume2(i64 %i.ext, i64 %a) alwaysinline {
+  call void @assume1(i64 %i.ext, i64 %a)
+
+  %a1 = add i64 %a, 5
+  %cmp1 = icmp ne i64 %i.ext, %a1
+  call void @assume1(i64 %i.ext, i64 %a1)
+
+  %a2 = add i64 %a1, 5
+  %cmp2 = icmp ne i64 %i.ext, %a2
+  call void @assume1(i64 %i.ext, i64 %a2)
+
+  %a3 = add i64 %a2, 5
+  %cmp3 = icmp ne i64 %i.ext, %a3
+  call void @assume1(i64 %i.ext, i64 %a3)
+
+  %a4 = add i64 %a3, 5
+  %cmp4 = icmp ne i64 %i.ext, %a4
+  call void @assume1(i64 %i.ext, i64 %a4)
+
+  ret void
+}
+
+define available_externally void @assume3(i64 %i.ext, i64 %a) alwaysinline {
+  call void @assume2(i64 %i.ext, i64 %a)
+
+  %a1 = add i64 %a, 25
+  %cmp1 = icmp ne i64 %i.ext, %a1
+  call void @assume2(i64 %i.ext, i64 %a1)
+
+  %a2 = add i64 %a1, 25
+  %cmp2 = icmp ne i64 %i.ext, %a2
+  call void @assume2(i64 %i.ext, i64 %a2)
+
+  %a3 = add i64 %a2, 25
+  %cmp3 = icmp ne i64 %i.ext, %a3
+  call void @assume2(i64 %i.ext, i64 %a3)
+
+  %a4 = add i64 %a3, 25
+  %cmp4 = icmp ne i64 %i.ext, %a4
+  call void @assume2(i64 %i.ext, i64 %a4)
+
+  ret void
+}
+
+define available_externally void @assume4(i64 %i.ext, i64 %a) alwaysinline {
+  call void @assume3(i64 %i.ext, i64 %a)
+
+  %a1 = add i64 %a, 125
+  %cmp1 = icmp ne i64 %i.ext, %a1
+  call void @assume3(i64 %i.ext, i64 %a1)
+
+  %a2 = add i64 %a1, 125
+  %cmp2 = icmp ne i64 %i.ext, %a2
+  call void @assume3(i64 %i.ext, i64 %a2)
+
+  %a3 = add i64 %a2, 125
+  %cmp3 = icmp ne i64 %i.ext, %a3
+  call void @assume3(i64 %i.ext, i64 %a3)
+
+  %a4 = add i64 %a3, 125
+  %cmp4 = icmp ne i64 %i.ext, %a4
+  call void @assume3(i64 %i.ext, i64 %a4)
+
+  ret void
+}
+
+define available_externally void @assume5(i64 %i.ext, i64 %a) alwaysinline {
+  call void @assume4(i64 %i.ext, i64 %a)
+
+  %a1 = add i64 %a, 625
+  %cmp1 = icmp ne i64 %i.ext, %a1
+  call void @assume4(i64 %i.ext, i64 %a1)
+
+  %a2 = add i64 %a1, 625
+  %cmp2 = icmp ne i64 %i.ext, %a2
+  call void @assume4(i64 %i.ext, i64 %a2)
+
+  %a3 = add i64 %a2, 625
+  %cmp3 = icmp ne i64 %i.ext, %a3
+  call void @assume4(i64 %i.ext, i64 %a3)
+
+  %a4 = add i64 %a3, 625
+  %cmp4 = icmp ne i64 %i.ext, %a4
+  call void @assume4(i64 %i.ext, i64 %a4)
+
+  ret void
+}
+
+define void @fn(i32 %init) {
+entry:
+  br label %loop
+
+loop:
+  %i = phi i32 [%init, %entry], [%next, %loop]
+  call void @body(i32 %i)
+
+  %i.ext = zext i32 %i to i64
+
+  call void @assume5(i64 %i.ext, i64 500000000)
+
+  %i.next = add i64 %i.ext, 1
+  %next = trunc i64 %i.next to i32
+  %done = icmp eq i32 %i, 500000000
+
+  br i1 %done, label %exit, label %loop
+
+exit:
+  ret void
+}
\ No newline at end of file




More information about the llvm-commits mailing list