[llvm] r251373 - Fix llc crash processing S/UREM for -Oz builds caused by rL250825.

Steve King via llvm-commits llvm-commits at lists.llvm.org
Mon Oct 26 17:14:06 PDT 2015


Author: srking
Date: Mon Oct 26 19:14:06 2015
New Revision: 251373

URL: http://llvm.org/viewvc/llvm-project?rev=251373&view=rev
Log:
Fix llc crash processing S/UREM for -Oz builds caused by rL250825.

When taking the remainder of a value divided by a constant, visitREM()
attempts to convert the REM to a longer but faster sequence of instructions.
This conversion calls combine() on a speculative DIV instruction. Commit
rL250825 may cause this combine() to return a DIVREM, corrupting nearby nodes.
Flow eventually hits unreachable().

This patch adds a test case and a check to prevent visitREM() from trying
to convert the REM instruction in cases where a DIVREM is possible.
See http://reviews.llvm.org/D14035

Added:
    llvm/trunk/test/CodeGen/X86/rem_crash.ll
Modified:
    llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp

Modified: llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp?rev=251373&r1=251372&r2=251373&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp Mon Oct 26 19:14:06 2015
@@ -2329,8 +2329,11 @@ SDValue DAGCombiner::visitSDIV(SDNode *N
       return Op;
 
   // sdiv, srem -> sdivrem
-  if (SDValue DivRem = useDivRem(N))
-    return DivRem;
+  // If the divisor is constant, then return DIVREM only if isIntDivCheap() is true.
+  // Otherwise, we break the simplification logic in visitREM().
+  if (!N1C || TLI.isIntDivCheap(N->getValueType(0), Attr))
+    if (SDValue DivRem = useDivRem(N))
+        return DivRem;
 
   // undef / X -> 0
   if (N0.getOpcode() == ISD::UNDEF)
@@ -2390,8 +2393,11 @@ SDValue DAGCombiner::visitUDIV(SDNode *N
       return Op;
 
   // sdiv, srem -> sdivrem
-  if (SDValue DivRem = useDivRem(N))
-    return DivRem;
+  // If the divisor is constant, then return DIVREM only if isIntDivCheap() is true.
+  // Otherwise, we break the simplification logic in visitREM().
+  if (!N1C || TLI.isIntDivCheap(N->getValueType(0), Attr))
+    if (SDValue DivRem = useDivRem(N))
+        return DivRem;
 
   // undef / X -> 0
   if (N0.getOpcode() == ISD::UNDEF)
@@ -2448,14 +2454,24 @@ SDValue DAGCombiner::visitREM(SDNode *N)
     }
   }
 
+  AttributeSet Attr = DAG.getMachineFunction().getFunction()->getAttributes();
+
   // If X/C can be simplified by the division-by-constant logic, lower
   // X%C to the equivalent of X-X/C*C.
-  if (N1C && !N1C->isNullValue()) {
+  // To avoid mangling nodes, this simplification requires that the combine()
+  // call for the speculative DIV must not cause a DIVREM conversion.  We guard
+  // against this by skipping the simplification if isIntDivCheap().  When
+  // div is not cheap, combine will not return a DIVREM.  Regardless,
+  // checking cheapness here makes sense since the simplification results in
+  // fatter code.
+  if (N1C && !N1C->isNullValue() && !TLI.isIntDivCheap(VT, Attr)) {
     unsigned DivOpcode = isSigned ? ISD::SDIV : ISD::UDIV;
     SDValue Div = DAG.getNode(DivOpcode, DL, VT, N0, N1);
     AddToWorklist(Div.getNode());
     SDValue OptimizedDiv = combine(Div.getNode());
     if (OptimizedDiv.getNode() && OptimizedDiv.getNode() != Div.getNode()) {
+      assert((OptimizedDiv.getOpcode() != ISD::UDIVREM) &&
+             (OptimizedDiv.getOpcode() != ISD::SDIVREM));
       SDValue Mul = DAG.getNode(ISD::MUL, DL, VT, OptimizedDiv, N1);
       SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, N0, Mul);
       AddToWorklist(Mul.getNode());

Added: llvm/trunk/test/CodeGen/X86/rem_crash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/rem_crash.ll?rev=251373&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/rem_crash.ll (added)
+++ llvm/trunk/test/CodeGen/X86/rem_crash.ll Mon Oct 26 19:14:06 2015
@@ -0,0 +1,257 @@
+; RUN: llc < %s
+
+define i8 @test_minsize_uu8(i8 %x) minsize optsize {
+entry:
+  %0 = udiv i8 %x, 10
+  %1 = urem i8 %x, 10
+  %res = add i8 %0, %1
+  ret i8 %res
+}
+
+define i8 @test_minsize_ss8(i8 %x) minsize optsize {
+entry:
+  %0 = sdiv i8 %x, 10
+  %1 = srem i8 %x, 10
+  %res = add i8 %0, %1
+  ret i8 %res
+}
+
+define i8 @test_minsize_us8(i8 %x) minsize optsize {
+entry:
+  %0 = udiv i8 %x, 10
+  %1 = srem i8 %x, 10
+  %res = add i8 %0, %1
+  ret i8 %res
+}
+
+define i8 @test_minsize_su8(i8 %x) minsize optsize {
+entry:
+  %0 = sdiv i8 %x, 10
+  %1 = urem i8 %x, 10
+  %res = add i8 %0, %1
+  ret i8 %res
+}
+
+define i16 @test_minsize_uu16(i16 %x) minsize optsize {
+entry:
+  %0 = udiv i16 %x, 10
+  %1 = urem i16 %x, 10
+  %res = add i16 %0, %1
+  ret i16 %res
+}
+
+define i16 @test_minsize_ss16(i16 %x) minsize optsize {
+entry:
+  %0 = sdiv i16 %x, 10
+  %1 = srem i16 %x, 10
+  %res = add i16 %0, %1
+  ret i16 %res
+}
+
+define i16 @test_minsize_us16(i16 %x) minsize optsize {
+entry:
+  %0 = udiv i16 %x, 10
+  %1 = srem i16 %x, 10
+  %res = add i16 %0, %1
+  ret i16 %res
+}
+
+define i16 @test_minsize_su16(i16 %x) minsize optsize {
+entry:
+  %0 = sdiv i16 %x, 10
+  %1 = urem i16 %x, 10
+  %res = add i16 %0, %1
+  ret i16 %res
+}
+
+define i32 @test_minsize_uu32(i32 %x) minsize optsize {
+entry:
+  %0 = udiv i32 %x, 10
+  %1 = urem i32 %x, 10
+  %res = add i32 %0, %1
+  ret i32 %res
+}
+
+define i32 @test_minsize_ss32(i32 %x) minsize optsize {
+entry:
+  %0 = sdiv i32 %x, 10
+  %1 = srem i32 %x, 10
+  %res = add i32 %0, %1
+  ret i32 %res
+}
+
+define i32 @test_minsize_us32(i32 %x) minsize optsize {
+entry:
+  %0 = udiv i32 %x, 10
+  %1 = srem i32 %x, 10
+  %res = add i32 %0, %1
+  ret i32 %res
+}
+
+define i32 @test_minsize_su32(i32 %x) minsize optsize {
+entry:
+  %0 = sdiv i32 %x, 10
+  %1 = urem i32 %x, 10
+  %res = add i32 %0, %1
+  ret i32 %res
+}
+
+define i64 @test_minsize_uu64(i64 %x) minsize optsize {
+entry:
+  %0 = udiv i64 %x, 10
+  %1 = urem i64 %x, 10
+  %res = add i64 %0, %1
+  ret i64 %res
+}
+
+define i64 @test_minsize_ss64(i64 %x) minsize optsize {
+entry:
+  %0 = sdiv i64 %x, 10
+  %1 = srem i64 %x, 10
+  %res = add i64 %0, %1
+  ret i64 %res
+}
+
+define i64 @test_minsize_us64(i64 %x) minsize optsize {
+entry:
+  %0 = udiv i64 %x, 10
+  %1 = srem i64 %x, 10
+  %res = add i64 %0, %1
+  ret i64 %res
+}
+
+define i64 @test_minsize_su64(i64 %x) minsize optsize {
+entry:
+  %0 = sdiv i64 %x, 10
+  %1 = urem i64 %x, 10
+  %res = add i64 %0, %1
+  ret i64 %res
+}
+
+define i8 @test_uu8(i8 %x) optsize {
+entry:
+  %0 = udiv i8 %x, 10
+  %1 = urem i8 %x, 10
+  %res = add i8 %0, %1
+  ret i8 %res
+}
+
+define i8 @test_ss8(i8 %x) optsize {
+entry:
+  %0 = sdiv i8 %x, 10
+  %1 = srem i8 %x, 10
+  %res = add i8 %0, %1
+  ret i8 %res
+}
+
+define i8 @test_us8(i8 %x) optsize {
+entry:
+  %0 = udiv i8 %x, 10
+  %1 = srem i8 %x, 10
+  %res = add i8 %0, %1
+  ret i8 %res
+}
+
+define i8 @test_su8(i8 %x) optsize {
+entry:
+  %0 = sdiv i8 %x, 10
+  %1 = urem i8 %x, 10
+  %res = add i8 %0, %1
+  ret i8 %res
+}
+
+define i16 @test_uu16(i16 %x) optsize {
+entry:
+  %0 = udiv i16 %x, 10
+  %1 = urem i16 %x, 10
+  %res = add i16 %0, %1
+  ret i16 %res
+}
+
+define i16 @test_ss16(i16 %x) optsize {
+entry:
+  %0 = sdiv i16 %x, 10
+  %1 = srem i16 %x, 10
+  %res = add i16 %0, %1
+  ret i16 %res
+}
+
+define i16 @test_us16(i16 %x) optsize {
+entry:
+  %0 = udiv i16 %x, 10
+  %1 = srem i16 %x, 10
+  %res = add i16 %0, %1
+  ret i16 %res
+}
+
+define i16 @test_su16(i16 %x) optsize {
+entry:
+  %0 = sdiv i16 %x, 10
+  %1 = urem i16 %x, 10
+  %res = add i16 %0, %1
+  ret i16 %res
+}
+
+define i32 @test_uu32(i32 %x) optsize {
+entry:
+  %0 = udiv i32 %x, 10
+  %1 = urem i32 %x, 10
+  %res = add i32 %0, %1
+  ret i32 %res
+}
+
+define i32 @test_ss32(i32 %x) optsize {
+entry:
+  %0 = sdiv i32 %x, 10
+  %1 = srem i32 %x, 10
+  %res = add i32 %0, %1
+  ret i32 %res
+}
+
+define i32 @test_us32(i32 %x) optsize {
+entry:
+  %0 = udiv i32 %x, 10
+  %1 = srem i32 %x, 10
+  %res = add i32 %0, %1
+  ret i32 %res
+}
+
+define i32 @test_su32(i32 %x) optsize {
+entry:
+  %0 = sdiv i32 %x, 10
+  %1 = urem i32 %x, 10
+  %res = add i32 %0, %1
+  ret i32 %res
+}
+
+define i64 @test_uu64(i64 %x) optsize {
+entry:
+  %0 = udiv i64 %x, 10
+  %1 = urem i64 %x, 10
+  %res = add i64 %0, %1
+  ret i64 %res
+}
+
+define i64 @test_ss64(i64 %x) optsize {
+entry:
+  %0 = sdiv i64 %x, 10
+  %1 = srem i64 %x, 10
+  %res = add i64 %0, %1
+  ret i64 %res
+}
+
+define i64 @test_us64(i64 %x) optsize {
+entry:
+  %0 = udiv i64 %x, 10
+  %1 = srem i64 %x, 10
+  %res = add i64 %0, %1
+  ret i64 %res
+}
+
+define i64 @test_su64(i64 %x) optsize {
+entry:
+  %0 = sdiv i64 %x, 10
+  %1 = urem i64 %x, 10
+  %res = add i64 %0, %1
+  ret i64 %res
+}




More information about the llvm-commits mailing list