[llvm] r351057 - [DAGCombiner] Add add saturation constant folding tests.

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Mon Jan 14 04:12:42 PST 2019


Author: rksimon
Date: Mon Jan 14 04:12:42 2019
New Revision: 351057

URL: http://llvm.org/viewvc/llvm-project?rev=351057&view=rev
Log:
[DAGCombiner] Add add saturation constant folding tests.

Exposes an issue with sadd_sat for computeOverflowKind, so I've disabled it for now.

Modified:
    llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
    llvm/trunk/test/CodeGen/X86/combine-add-ssat.ll
    llvm/trunk/test/CodeGen/X86/combine-add-usat.ll

Modified: llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp?rev=351057&r1=351056&r2=351057&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp Mon Jan 14 04:12:42 2019
@@ -2207,8 +2207,9 @@ SDValue DAGCombiner::visitADDSAT(SDNode
     return N0;
 
   // If it cannot overflow, transform into an add.
-  if (DAG.computeOverflowKind(N0, N1) == SelectionDAG::OFK_Never)
-    return DAG.getNode(ISD::ADD, DL, VT, N0, N1);
+  if (Opcode == ISD::UADDSAT)
+    if (DAG.computeOverflowKind(N0, N1) == SelectionDAG::OFK_Never)
+      return DAG.getNode(ISD::ADD, DL, VT, N0, N1);
 
   return SDValue();
 }

Modified: llvm/trunk/test/CodeGen/X86/combine-add-ssat.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/combine-add-ssat.ll?rev=351057&r1=351056&r2=351057&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/combine-add-ssat.ll (original)
+++ llvm/trunk/test/CodeGen/X86/combine-add-ssat.ll Mon Jan 14 04:12:42 2019
@@ -11,6 +11,39 @@ declare  i32 @llvm.sadd.sat.i32  (i32, i
 declare  i64 @llvm.sadd.sat.i64  (i64, i64)
 declare  <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16>, <8 x i16>)
 
+; fold (sadd_sat c1, c2) -> c3
+define i32 @combine_constfold_i32() {
+; CHECK-LABEL: combine_constfold_i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl $2147483647, %eax # imm = 0x7FFFFFFF
+; CHECK-NEXT:    xorl %ecx, %ecx
+; CHECK-NEXT:    movl $2147483647, %edx # imm = 0x7FFFFFFF
+; CHECK-NEXT:    addl $100, %edx
+; CHECK-NEXT:    setns %cl
+; CHECK-NEXT:    addl $2147483647, %ecx # imm = 0x7FFFFFFF
+; CHECK-NEXT:    addl $100, %eax
+; CHECK-NEXT:    cmovol %ecx, %eax
+; CHECK-NEXT:    retq
+  %res = call i32 @llvm.sadd.sat.i32(i32 2147483647, i32 100)
+  ret i32 %res
+}
+
+define <8 x i16> @combine_constfold_v8i16() {
+; SSE-LABEL: combine_constfold_v8i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movdqa {{.*#+}} xmm0 = [0,1,255,65535,65535,65281,1,1]
+; SSE-NEXT:    paddsw {{.*}}(%rip), %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: combine_constfold_v8i16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovdqa {{.*#+}} xmm0 = [0,1,255,65535,65535,65281,1,1]
+; AVX-NEXT:    vpaddsw {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %res = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> <i16 0, i16 1, i16 255, i16 65535, i16 -1, i16 -255, i16 -65535, i16 1>, <8 x i16> <i16 1, i16 65535, i16 1, i16 65535, i16 1, i16 65535, i16 1, i16 65535>)
+  ret <8 x i16> %res
+}
+
 ; fold (sadd_sat c, x) -> (sadd_sat x, c)
 define i32 @combine_constant_i32(i32 %a0) {
 ; CHECK-LABEL: combine_constant_i32:
@@ -23,8 +56,8 @@ define i32 @combine_constant_i32(i32 %a0
 ; CHECK-NEXT:    incl %edi
 ; CHECK-NEXT:    cmovnol %edi, %eax
 ; CHECK-NEXT:    retq
-  %res = call i32 @llvm.sadd.sat.i32(i32 1, i32 %a0);
-  ret i32 %res;
+  %res = call i32 @llvm.sadd.sat.i32(i32 1, i32 %a0)
+  ret i32 %res
 }
 
 define <8 x i16> @combine_constant_v8i16(<8 x i16> %a0) {
@@ -37,8 +70,8 @@ define <8 x i16> @combine_constant_v8i16
 ; AVX:       # %bb.0:
 ; AVX-NEXT:    vpaddsw {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    retq
-  %res = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, <8 x i16> %a0);
-  ret <8 x i16> %res;
+  %res = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, <8 x i16> %a0)
+  ret <8 x i16> %res
 }
 
 ; fold (sadd_sat c, 0) -> x
@@ -47,7 +80,7 @@ define i32 @combine_zero_i32(i32 %a0) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl %edi, %eax
 ; CHECK-NEXT:    retq
-  %1 = call i32 @llvm.sadd.sat.i32(i32 %a0, i32 0);
+  %1 = call i32 @llvm.sadd.sat.i32(i32 %a0, i32 0)
   ret i32 %1
 }
 
@@ -55,7 +88,7 @@ define <8 x i16> @combine_zero_v8i16(<8
 ; CHECK-LABEL: combine_zero_v8i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    retq
-  %1 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %a0, <8 x i16> zeroinitializer);
+  %1 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %a0, <8 x i16> zeroinitializer)
   ret <8 x i16> %1
 }
 
@@ -75,7 +108,7 @@ define i32 @combine_no_overflow_i32(i32
 ; CHECK-NEXT:    retq
   %1 = ashr i32 %a0, 16
   %2 = lshr i32 %a1, 16
-  %3 = call i32 @llvm.sadd.sat.i32(i32 %1, i32 %2);
+  %3 = call i32 @llvm.sadd.sat.i32(i32 %1, i32 %2)
   ret i32 %3
 }
 
@@ -95,6 +128,6 @@ define <8 x i16> @combine_no_overflow_v8
 ; AVX-NEXT:    retq
   %1 = ashr <8 x i16> %a0, <i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10>
   %2 = lshr <8 x i16> %a1, <i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10>
-  %3 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %1, <8 x i16> %2);
+  %3 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %1, <8 x i16> %2)
   ret <8 x i16> %3
 }

Modified: llvm/trunk/test/CodeGen/X86/combine-add-usat.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/combine-add-usat.ll?rev=351057&r1=351056&r2=351057&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/combine-add-usat.ll (original)
+++ llvm/trunk/test/CodeGen/X86/combine-add-usat.ll Mon Jan 14 04:12:42 2019
@@ -11,6 +11,35 @@ declare  i32 @llvm.uadd.sat.i32  (i32, i
 declare  i64 @llvm.uadd.sat.i64  (i64, i64)
 declare  <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16>, <8 x i16>)
 
+; fold (uadd_sat c1, c2) -> c3
+define i32 @combine_constfold_i32() {
+; CHECK-LABEL: combine_constfold_i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl $-1, %ecx
+; CHECK-NEXT:    movl $-1, %eax
+; CHECK-NEXT:    addl $100, %eax
+; CHECK-NEXT:    cmovbl %ecx, %eax
+; CHECK-NEXT:    retq
+  %res = call i32 @llvm.uadd.sat.i32(i32 4294967295, i32 100)
+  ret i32 %res
+}
+
+define <8 x i16> @combine_constfold_v8i16() {
+; SSE-LABEL: combine_constfold_v8i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movdqa {{.*#+}} xmm0 = [0,1,255,65535,65535,65281,1,1]
+; SSE-NEXT:    paddusw {{.*}}(%rip), %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: combine_constfold_v8i16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovdqa {{.*#+}} xmm0 = [0,1,255,65535,65535,65281,1,1]
+; AVX-NEXT:    vpaddusw {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %res = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> <i16 0, i16 1, i16 255, i16 65535, i16 -1, i16 -255, i16 -65535, i16 1>, <8 x i16> <i16 1, i16 65535, i16 1, i16 65535, i16 1, i16 65535, i16 1, i16 65535>)
+  ret <8 x i16> %res
+}
+
 ; fold (uadd_sat c, x) -> (add_ssat x, c)
 define i32 @combine_constant_i32(i32 %a0) {
 ; CHECK-LABEL: combine_constant_i32:
@@ -19,7 +48,7 @@ define i32 @combine_constant_i32(i32 %a0
 ; CHECK-NEXT:    movl $-1, %eax
 ; CHECK-NEXT:    cmovael %edi, %eax
 ; CHECK-NEXT:    retq
-  %1 = call i32 @llvm.uadd.sat.i32(i32 1, i32 %a0);
+  %1 = call i32 @llvm.uadd.sat.i32(i32 1, i32 %a0)
   ret i32 %1
 }
 
@@ -33,7 +62,7 @@ define <8 x i16> @combine_constant_v8i16
 ; AVX:       # %bb.0:
 ; AVX-NEXT:    vpaddusw {{.*}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    retq
-  %1 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, <8 x i16> %a0);
+  %1 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, <8 x i16> %a0)
   ret <8 x i16> %1
 }
 
@@ -43,7 +72,7 @@ define i32 @combine_zero_i32(i32 %a0) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl %edi, %eax
 ; CHECK-NEXT:    retq
-  %1 = call i32 @llvm.uadd.sat.i32(i32 %a0, i32 0);
+  %1 = call i32 @llvm.uadd.sat.i32(i32 %a0, i32 0)
   ret i32 %1
 }
 
@@ -51,7 +80,7 @@ define <8 x i16> @combine_zero_v8i16(<8
 ; CHECK-LABEL: combine_zero_v8i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    retq
-  %1 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %a0, <8 x i16> zeroinitializer);
+  %1 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %a0, <8 x i16> zeroinitializer)
   ret <8 x i16> %1
 }
 
@@ -67,7 +96,7 @@ define i32 @combine_no_overflow_i32(i32
 ; CHECK-NEXT:    retq
   %1 = lshr i32 %a0, 16
   %2 = lshr i32 %a1, 16
-  %3 = call i32 @llvm.uadd.sat.i32(i32 %1, i32 %2);
+  %3 = call i32 @llvm.uadd.sat.i32(i32 %1, i32 %2)
   ret i32 %3
 }
 
@@ -87,6 +116,6 @@ define <8 x i16> @combine_no_overflow_v8
 ; AVX-NEXT:    retq
   %1 = lshr <8 x i16> %a0, <i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10>
   %2 = lshr <8 x i16> %a1, <i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10>
-  %3 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %1, <8 x i16> %2);
+  %3 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %1, <8 x i16> %2)
   ret <8 x i16> %3
 }




More information about the llvm-commits mailing list