[llvm] r336774 - [DAGCombiner] Support non-uniform X%C -> X-(X/C)*C folds

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Wed Jul 11 02:22:42 PDT 2018


Author: rksimon
Date: Wed Jul 11 02:22:42 2018
New Revision: 336774

URL: http://llvm.org/viewvc/llvm-project?rev=336774&view=rev
Log:
[DAGCombiner] Support non-uniform X%C -> X-(X/C)*C folds

First stage in PR38057 - support non-uniform constant vectors in the combine to reuse the division-by-constant logic.

We can definitely do better for srem pow2 remainders (and avoid that extra multiply....) but this at least helps keep everything on the vector unit.

Differential Revision: https://reviews.llvm.org/D48975

Modified:
    llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
    llvm/trunk/test/CodeGen/X86/combine-srem.ll

Modified: llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp?rev=336774&r1=336773&r2=336774&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp Wed Jul 11 02:22:42 2018
@@ -3297,7 +3297,10 @@ SDValue DAGCombiner::visitREM(SDNode *N)
   // by skipping the simplification if isIntDivCheap().  When div is not cheap,
   // combine will not return a DIVREM.  Regardless, checking cheapness here
   // makes sense since the simplification results in fatter code.
-  if (N1C && !N1C->isNullValue() && !TLI.isIntDivCheap(VT, Attr)) {
+  // TODO: replace matchUnaryPredicate with SelectionDAG::isKnownNeverZero(N1).
+  if (ISD::matchUnaryPredicate(
+          N1, [](ConstantSDNode *C) { return !C->isNullValue(); }) &&
+      !TLI.isIntDivCheap(VT, Attr)) {
     SDValue OptimizedDiv =
         isSigned ? visitSDIVLike(N0, N1, N) : visitUDIVLike(N0, N1, N);
     if (OptimizedDiv.getNode() && OptimizedDiv.getOpcode() != ISD::UDIVREM &&

Modified: llvm/trunk/test/CodeGen/X86/combine-srem.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/combine-srem.ll?rev=336774&r1=336773&r2=336774&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/combine-srem.ll (original)
+++ llvm/trunk/test/CodeGen/X86/combine-srem.ll Wed Jul 11 02:22:42 2018
@@ -274,61 +274,64 @@ define <4 x i32> @combine_vec_srem_by_po
 define <4 x i32> @combine_vec_srem_by_pow2b(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_srem_by_pow2b:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    extractps $3, %xmm0, %eax
-; SSE-NEXT:    movl %eax, %ecx
-; SSE-NEXT:    sarl $31, %ecx
-; SSE-NEXT:    shrl $29, %ecx
-; SSE-NEXT:    addl %eax, %ecx
-; SSE-NEXT:    andl $-8, %ecx
-; SSE-NEXT:    subl %ecx, %eax
-; SSE-NEXT:    movd %eax, %xmm1
-; SSE-NEXT:    extractps $2, %xmm0, %eax
-; SSE-NEXT:    movl %eax, %ecx
-; SSE-NEXT:    sarl $31, %ecx
-; SSE-NEXT:    shrl $30, %ecx
-; SSE-NEXT:    addl %eax, %ecx
-; SSE-NEXT:    andl $-4, %ecx
-; SSE-NEXT:    subl %ecx, %eax
-; SSE-NEXT:    movd %eax, %xmm2
-; SSE-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; SSE-NEXT:    extractps $1, %xmm0, %eax
-; SSE-NEXT:    movl %eax, %ecx
-; SSE-NEXT:    shrl $31, %ecx
-; SSE-NEXT:    addl %eax, %ecx
-; SSE-NEXT:    andl $-2, %ecx
-; SSE-NEXT:    subl %ecx, %eax
-; SSE-NEXT:    movd %eax, %xmm0
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,0],xmm2[0,1]
+; SSE-NEXT:    movdqa %xmm0, %xmm1
+; SSE-NEXT:    psrld $31, %xmm1
+; SSE-NEXT:    movdqa %xmm0, %xmm2
+; SSE-NEXT:    psrad $31, %xmm2
+; SSE-NEXT:    movdqa %xmm2, %xmm3
+; SSE-NEXT:    psrld $29, %xmm3
+; SSE-NEXT:    pblendw {{.*#+}} xmm3 = xmm1[0,1,2,3],xmm3[4,5,6,7]
+; SSE-NEXT:    psrld $30, %xmm2
+; SSE-NEXT:    pxor %xmm1, %xmm1
+; SSE-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
+; SSE-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
+; SSE-NEXT:    paddd %xmm0, %xmm1
+; SSE-NEXT:    movdqa %xmm1, %xmm2
+; SSE-NEXT:    movdqa %xmm1, %xmm3
+; SSE-NEXT:    psrad $2, %xmm3
+; SSE-NEXT:    pblendw {{.*#+}} xmm3 = xmm1[0,1,2,3],xmm3[4,5,6,7]
+; SSE-NEXT:    psrad $3, %xmm1
+; SSE-NEXT:    psrad $1, %xmm2
+; SSE-NEXT:    pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
+; SSE-NEXT:    pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7]
+; SSE-NEXT:    pblendw {{.*#+}} xmm3 = xmm0[0,1],xmm3[2,3,4,5,6,7]
+; SSE-NEXT:    pmulld {{.*}}(%rip), %xmm3
+; SSE-NEXT:    psubd %xmm3, %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX-LABEL: combine_vec_srem_by_pow2b:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vextractps $3, %xmm0, %eax
-; AVX-NEXT:    movl %eax, %ecx
-; AVX-NEXT:    sarl $31, %ecx
-; AVX-NEXT:    shrl $29, %ecx
-; AVX-NEXT:    addl %eax, %ecx
-; AVX-NEXT:    andl $-8, %ecx
-; AVX-NEXT:    subl %ecx, %eax
-; AVX-NEXT:    vmovd %eax, %xmm1
-; AVX-NEXT:    vextractps $2, %xmm0, %eax
-; AVX-NEXT:    movl %eax, %ecx
-; AVX-NEXT:    sarl $31, %ecx
-; AVX-NEXT:    shrl $30, %ecx
-; AVX-NEXT:    addl %eax, %ecx
-; AVX-NEXT:    andl $-4, %ecx
-; AVX-NEXT:    subl %ecx, %eax
-; AVX-NEXT:    vmovd %eax, %xmm2
-; AVX-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; AVX-NEXT:    vextractps $1, %xmm0, %eax
-; AVX-NEXT:    movl %eax, %ecx
-; AVX-NEXT:    shrl $31, %ecx
-; AVX-NEXT:    addl %eax, %ecx
-; AVX-NEXT:    andl $-2, %ecx
-; AVX-NEXT:    subl %ecx, %eax
-; AVX-NEXT:    vmovd %eax, %xmm0
-; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[0,1]
-; AVX-NEXT:    retq
+; AVX1-LABEL: combine_vec_srem_by_pow2b:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpsrld $31, %xmm0, %xmm1
+; AVX1-NEXT:    vpsrad $31, %xmm0, %xmm2
+; AVX1-NEXT:    vpsrld $29, %xmm2, %xmm3
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7]
+; AVX1-NEXT:    vpsrld $30, %xmm2, %xmm2
+; AVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
+; AVX1-NEXT:    vpaddd %xmm1, %xmm0, %xmm1
+; AVX1-NEXT:    vpsrad $3, %xmm1, %xmm2
+; AVX1-NEXT:    vpsrad $1, %xmm1, %xmm3
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
+; AVX1-NEXT:    vpsrad $2, %xmm1, %xmm3
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3,4,5,6,7]
+; AVX1-NEXT:    vpmulld {{.*}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: combine_vec_srem_by_pow2b:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsrad $31, %xmm0, %xmm1
+; AVX2-NEXT:    vpsrlvd {{.*}}(%rip), %xmm1, %xmm1
+; AVX2-NEXT:    vpaddd %xmm1, %xmm0, %xmm1
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,2,3]
+; AVX2-NEXT:    vpsravd %xmm2, %xmm1, %xmm1
+; AVX2-NEXT:    vpblendd {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
+; AVX2-NEXT:    vpsllvd %xmm2, %xmm1, %xmm1
+; AVX2-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    retq
   %1 = srem <4 x i32> %x, <i32 1, i32 2, i32 4, i32 8>
   ret <4 x i32> %1
 }
@@ -336,98 +339,70 @@ define <4 x i32> @combine_vec_srem_by_po
 define <4 x i32> @combine_vec_srem_by_pow2b_neg(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_srem_by_pow2b_neg:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    pextrd $1, %xmm0, %eax
-; SSE-NEXT:    movl %eax, %ecx
-; SSE-NEXT:    sarl $31, %ecx
-; SSE-NEXT:    shrl $30, %ecx
-; SSE-NEXT:    addl %eax, %ecx
-; SSE-NEXT:    shrl $2, %ecx
-; SSE-NEXT:    negl %ecx
-; SSE-NEXT:    shll $2, %ecx
-; SSE-NEXT:    negl %ecx
-; SSE-NEXT:    subl %ecx, %eax
-; SSE-NEXT:    movd %xmm0, %ecx
-; SSE-NEXT:    movl %ecx, %edx
-; SSE-NEXT:    shrl $31, %edx
-; SSE-NEXT:    addl %ecx, %edx
-; SSE-NEXT:    shrl %edx
-; SSE-NEXT:    negl %edx
-; SSE-NEXT:    addl %edx, %edx
-; SSE-NEXT:    negl %edx
-; SSE-NEXT:    subl %edx, %ecx
-; SSE-NEXT:    movd %ecx, %xmm1
-; SSE-NEXT:    pinsrd $1, %eax, %xmm1
-; SSE-NEXT:    pextrd $2, %xmm0, %eax
-; SSE-NEXT:    movl %eax, %ecx
-; SSE-NEXT:    sarl $31, %ecx
-; SSE-NEXT:    shrl $29, %ecx
-; SSE-NEXT:    addl %eax, %ecx
-; SSE-NEXT:    shrl $3, %ecx
-; SSE-NEXT:    negl %ecx
-; SSE-NEXT:    shll $3, %ecx
-; SSE-NEXT:    negl %ecx
-; SSE-NEXT:    subl %ecx, %eax
-; SSE-NEXT:    pinsrd $2, %eax, %xmm1
-; SSE-NEXT:    pextrd $3, %xmm0, %eax
-; SSE-NEXT:    movl %eax, %ecx
-; SSE-NEXT:    sarl $31, %ecx
-; SSE-NEXT:    shrl $28, %ecx
-; SSE-NEXT:    addl %eax, %ecx
-; SSE-NEXT:    shrl $4, %ecx
-; SSE-NEXT:    negl %ecx
-; SSE-NEXT:    shll $4, %ecx
-; SSE-NEXT:    negl %ecx
-; SSE-NEXT:    subl %ecx, %eax
-; SSE-NEXT:    pinsrd $3, %eax, %xmm1
-; SSE-NEXT:    movdqa %xmm1, %xmm0
+; SSE-NEXT:    movdqa %xmm0, %xmm1
+; SSE-NEXT:    psrad $31, %xmm1
+; SSE-NEXT:    movdqa %xmm1, %xmm2
+; SSE-NEXT:    psrld $28, %xmm2
+; SSE-NEXT:    movdqa %xmm1, %xmm3
+; SSE-NEXT:    psrld $30, %xmm3
+; SSE-NEXT:    pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm2[4,5,6,7]
+; SSE-NEXT:    movdqa %xmm0, %xmm2
+; SSE-NEXT:    psrld $31, %xmm2
+; SSE-NEXT:    psrld $29, %xmm1
+; SSE-NEXT:    pblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
+; SSE-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
+; SSE-NEXT:    paddd %xmm0, %xmm1
+; SSE-NEXT:    movdqa %xmm1, %xmm2
+; SSE-NEXT:    psrad $4, %xmm2
+; SSE-NEXT:    movdqa %xmm1, %xmm3
+; SSE-NEXT:    psrad $2, %xmm3
+; SSE-NEXT:    pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm2[4,5,6,7]
+; SSE-NEXT:    movdqa %xmm1, %xmm2
+; SSE-NEXT:    psrad $3, %xmm2
+; SSE-NEXT:    psrad $1, %xmm1
+; SSE-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
+; SSE-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
+; SSE-NEXT:    pxor %xmm2, %xmm2
+; SSE-NEXT:    psubd %xmm1, %xmm2
+; SSE-NEXT:    pmulld {{.*}}(%rip), %xmm2
+; SSE-NEXT:    psubd %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX-LABEL: combine_vec_srem_by_pow2b_neg:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vpextrd $1, %xmm0, %eax
-; AVX-NEXT:    movl %eax, %ecx
-; AVX-NEXT:    sarl $31, %ecx
-; AVX-NEXT:    shrl $30, %ecx
-; AVX-NEXT:    addl %eax, %ecx
-; AVX-NEXT:    shrl $2, %ecx
-; AVX-NEXT:    negl %ecx
-; AVX-NEXT:    shll $2, %ecx
-; AVX-NEXT:    negl %ecx
-; AVX-NEXT:    subl %ecx, %eax
-; AVX-NEXT:    vmovd %xmm0, %ecx
-; AVX-NEXT:    movl %ecx, %edx
-; AVX-NEXT:    shrl $31, %edx
-; AVX-NEXT:    addl %ecx, %edx
-; AVX-NEXT:    shrl %edx
-; AVX-NEXT:    negl %edx
-; AVX-NEXT:    addl %edx, %edx
-; AVX-NEXT:    negl %edx
-; AVX-NEXT:    subl %edx, %ecx
-; AVX-NEXT:    vmovd %ecx, %xmm1
-; AVX-NEXT:    vpinsrd $1, %eax, %xmm1, %xmm1
-; AVX-NEXT:    vpextrd $2, %xmm0, %eax
-; AVX-NEXT:    movl %eax, %ecx
-; AVX-NEXT:    sarl $31, %ecx
-; AVX-NEXT:    shrl $29, %ecx
-; AVX-NEXT:    addl %eax, %ecx
-; AVX-NEXT:    shrl $3, %ecx
-; AVX-NEXT:    negl %ecx
-; AVX-NEXT:    shll $3, %ecx
-; AVX-NEXT:    negl %ecx
-; AVX-NEXT:    subl %ecx, %eax
-; AVX-NEXT:    vpinsrd $2, %eax, %xmm1, %xmm1
-; AVX-NEXT:    vpextrd $3, %xmm0, %eax
-; AVX-NEXT:    movl %eax, %ecx
-; AVX-NEXT:    sarl $31, %ecx
-; AVX-NEXT:    shrl $28, %ecx
-; AVX-NEXT:    addl %eax, %ecx
-; AVX-NEXT:    shrl $4, %ecx
-; AVX-NEXT:    negl %ecx
-; AVX-NEXT:    shll $4, %ecx
-; AVX-NEXT:    negl %ecx
-; AVX-NEXT:    subl %ecx, %eax
-; AVX-NEXT:    vpinsrd $3, %eax, %xmm1, %xmm0
-; AVX-NEXT:    retq
+; AVX1-LABEL: combine_vec_srem_by_pow2b_neg:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpsrad $31, %xmm0, %xmm1
+; AVX1-NEXT:    vpsrld $28, %xmm1, %xmm2
+; AVX1-NEXT:    vpsrld $30, %xmm1, %xmm3
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
+; AVX1-NEXT:    vpsrld $31, %xmm0, %xmm3
+; AVX1-NEXT:    vpsrld $29, %xmm1, %xmm1
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm3[0,1,2,3],xmm1[4,5,6,7]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
+; AVX1-NEXT:    vpaddd %xmm1, %xmm0, %xmm1
+; AVX1-NEXT:    vpsrad $4, %xmm1, %xmm2
+; AVX1-NEXT:    vpsrad $2, %xmm1, %xmm3
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
+; AVX1-NEXT:    vpsrad $3, %xmm1, %xmm3
+; AVX1-NEXT:    vpsrad $1, %xmm1, %xmm1
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
+; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT:    vpsubd %xmm1, %xmm2, %xmm1
+; AVX1-NEXT:    vpmulld {{.*}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: combine_vec_srem_by_pow2b_neg:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsrad $31, %xmm0, %xmm1
+; AVX2-NEXT:    vpsrlvd {{.*}}(%rip), %xmm1, %xmm1
+; AVX2-NEXT:    vpaddd %xmm1, %xmm0, %xmm1
+; AVX2-NEXT:    vpsravd {{.*}}(%rip), %xmm1, %xmm1
+; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX2-NEXT:    vpsubd %xmm1, %xmm2, %xmm1
+; AVX2-NEXT:    vpmulld {{.*}}(%rip), %xmm1, %xmm1
+; AVX2-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    retq
   %1 = srem <4 x i32> %x, <i32 -2, i32 -4, i32 -8, i32 -16>
   ret <4 x i32> %1
 }




More information about the llvm-commits mailing list