[llvm] r336048 - [DAGCombiner] Handle correctly non-splat power of 2 -1 divisor (PR37119)

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Sat Jun 30 05:22:55 PDT 2018


Author: rksimon
Date: Sat Jun 30 05:22:55 2018
New Revision: 336048

URL: http://llvm.org/viewvc/llvm-project?rev=336048&view=rev
Log:
[DAGCombiner] Handle correctly non-splat power of 2 -1 divisor (PR37119)

The combine added in commit 329525 overlooked the case where one, but not all, of the divisor elements is -1, -1 is the only power of two value for which the sdiv expansion recipe breaks.

Thanks to @zvi for the original patch.

Differential Revision: https://reviews.llvm.org/D45806

Modified:
    llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
    llvm/trunk/test/CodeGen/X86/combine-sdiv.ll

Modified: llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp?rev=336048&r1=336047&r2=336048&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp Sat Jun 30 05:22:55 2018
@@ -3051,11 +3051,8 @@ SDValue DAGCombiner::visitSDIV(SDNode *N
   auto IsPowerOfTwo = [](ConstantSDNode *C) {
     if (C->isNullValue() || C->isOpaque())
       return false;
-    if (C->getAPIntValue().isAllOnesValue())
-      return false;
     if (C->getAPIntValue().isMinSignedValue())
       return false;
-
     if (C->getAPIntValue().isPowerOf2())
       return true;
     if ((-C->getAPIntValue()).isPowerOf2())
@@ -3095,6 +3092,15 @@ SDValue DAGCombiner::visitSDIV(SDNode *N
     SDValue Sra = DAG.getNode(ISD::SRA, DL, VT, Add, C1);
     AddToWorklist(Sra.getNode());
 
+    // Special case: (sdiv X, 1) -> X
+    // Special Case: (sdiv X, -1) -> 0-X
+    SDValue One = DAG.getConstant(1, DL, VT);
+    SDValue AllOnes = DAG.getAllOnesConstant(DL, VT);
+    SDValue IsOne = DAG.getSetCC(DL, CCVT, N1, One, ISD::SETEQ);
+    SDValue IsAllOnes = DAG.getSetCC(DL, CCVT, N1, AllOnes, ISD::SETEQ);
+    SDValue IsOneOrAllOnes = DAG.getNode(ISD::OR, DL, CCVT, IsOne, IsAllOnes);
+    Sra = DAG.getSelect(DL, VT, IsOneOrAllOnes, N0, Sra);
+
     // If dividing by a positive value, we're done. Otherwise, the result must
     // be negated.
     SDValue Zero = DAG.getConstant(0, DL, VT);
@@ -3103,10 +3109,6 @@ SDValue DAGCombiner::visitSDIV(SDNode *N
     // FIXME: Use SELECT_CC once we improve SELECT_CC constant-folding.
     SDValue IsNeg = DAG.getSetCC(DL, CCVT, N1, Zero, ISD::SETLT);
     SDValue Res = DAG.getSelect(DL, VT, IsNeg, Sub, Sra);
-    // Special case: (sdiv X, 1) -> X
-    SDValue One = DAG.getConstant(1, DL, VT);
-    SDValue IsOne = DAG.getSetCC(DL, CCVT, N1, One, ISD::SETEQ);
-    Res = DAG.getSelect(DL, VT, IsOne, N0, Res);
     return Res;
   }
 

Modified: llvm/trunk/test/CodeGen/X86/combine-sdiv.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/combine-sdiv.ll?rev=336048&r1=336047&r2=336048&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/combine-sdiv.ll (original)
+++ llvm/trunk/test/CodeGen/X86/combine-sdiv.ll Sat Jun 30 05:22:55 2018
@@ -1900,9 +1900,9 @@ define <4 x i32> @combine_vec_sdiv_by_po
 ; SSE-NEXT:    psrad $2, %xmm3
 ; SSE-NEXT:    pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm2[4,5,6,7]
 ; SSE-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
-; SSE-NEXT:    psubd %xmm1, %xmm4
-; SSE-NEXT:    pblendw {{.*#+}} xmm1 = xmm4[0,1,2,3],xmm1[4,5],xmm4[6,7]
 ; SSE-NEXT:    pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3,4,5,6,7]
+; SSE-NEXT:    psubd %xmm1, %xmm4
+; SSE-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm4[2,3],xmm1[4,5],xmm4[6,7]
 ; SSE-NEXT:    movdqa %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
@@ -1923,9 +1923,9 @@ define <4 x i32> @combine_vec_sdiv_by_po
 ; AVX1-NEXT:    vpsrad $3, %xmm1, %xmm4
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm4[4,5,6,7]
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
-; AVX1-NEXT:    vpsubd %xmm1, %xmm3, %xmm2
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5],xmm2[6,7]
 ; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
+; AVX1-NEXT:    vpsubd %xmm0, %xmm3, %xmm1
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2ORLATER-LABEL: combine_vec_sdiv_by_pow2b_PosAndNeg:
@@ -1934,10 +1934,10 @@ define <4 x i32> @combine_vec_sdiv_by_po
 ; AVX2ORLATER-NEXT:    vpsrlvd {{.*}}(%rip), %xmm1, %xmm1
 ; AVX2ORLATER-NEXT:    vpaddd %xmm1, %xmm0, %xmm1
 ; AVX2ORLATER-NEXT:    vpsravd {{.*}}(%rip), %xmm1, %xmm1
-; AVX2ORLATER-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; AVX2ORLATER-NEXT:    vpsubd %xmm1, %xmm2, %xmm2
-; AVX2ORLATER-NEXT:    vpblendd {{.*#+}} xmm1 = xmm2[0,1],xmm1[2],xmm2[3]
 ; AVX2ORLATER-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
+; AVX2ORLATER-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX2ORLATER-NEXT:    vpsubd %xmm0, %xmm1, %xmm1
+; AVX2ORLATER-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
 ; AVX2ORLATER-NEXT:    retq
 ;
 ; XOP-LABEL: combine_vec_sdiv_by_pow2b_PosAndNeg:
@@ -1946,10 +1946,10 @@ define <4 x i32> @combine_vec_sdiv_by_po
 ; XOP-NEXT:    vpshld {{.*}}(%rip), %xmm1, %xmm1
 ; XOP-NEXT:    vpaddd %xmm1, %xmm0, %xmm1
 ; XOP-NEXT:    vpshad {{.*}}(%rip), %xmm1, %xmm1
-; XOP-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; XOP-NEXT:    vpsubd %xmm1, %xmm2, %xmm2
-; XOP-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5],xmm2[6,7]
 ; XOP-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
+; XOP-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; XOP-NEXT:    vpsubd %xmm0, %xmm1, %xmm1
+; XOP-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
 ; XOP-NEXT:    retq
   %1 = sdiv <4 x i32> %x, <i32 1, i32 -4, i32 8, i32 -16>
   ret <4 x i32> %1
@@ -1983,245 +1983,52 @@ define <4 x i32> @combine_vec_sdiv_by_po
 define <16 x i8> @non_splat_minus_one_divisor_0(<16 x i8> %A) {
 ; SSE-LABEL: non_splat_minus_one_divisor_0:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    pextrb $1, %xmm0, %eax
-; SSE-NEXT:    negb %al
-; SSE-NEXT:    movzbl %al, %eax
-; SSE-NEXT:    pextrb $0, %xmm0, %ecx
-; SSE-NEXT:    negb %cl
-; SSE-NEXT:    movzbl %cl, %ecx
-; SSE-NEXT:    movd %ecx, %xmm1
-; SSE-NEXT:    pinsrb $1, %eax, %xmm1
-; SSE-NEXT:    pextrb $2, %xmm0, %eax
-; SSE-NEXT:    pinsrb $2, %eax, %xmm1
-; SSE-NEXT:    pextrb $3, %xmm0, %eax
-; SSE-NEXT:    negb %al
-; SSE-NEXT:    movzbl %al, %eax
-; SSE-NEXT:    pinsrb $3, %eax, %xmm1
-; SSE-NEXT:    pextrb $4, %xmm0, %eax
-; SSE-NEXT:    negb %al
-; SSE-NEXT:    movzbl %al, %eax
-; SSE-NEXT:    pinsrb $4, %eax, %xmm1
-; SSE-NEXT:    pextrb $5, %xmm0, %eax
-; SSE-NEXT:    negb %al
-; SSE-NEXT:    movzbl %al, %eax
-; SSE-NEXT:    pinsrb $5, %eax, %xmm1
-; SSE-NEXT:    pextrb $6, %xmm0, %eax
-; SSE-NEXT:    pinsrb $6, %eax, %xmm1
-; SSE-NEXT:    pextrb $7, %xmm0, %eax
-; SSE-NEXT:    negb %al
-; SSE-NEXT:    movzbl %al, %eax
-; SSE-NEXT:    pinsrb $7, %eax, %xmm1
-; SSE-NEXT:    pextrb $8, %xmm0, %eax
-; SSE-NEXT:    negb %al
-; SSE-NEXT:    movzbl %al, %eax
-; SSE-NEXT:    pinsrb $8, %eax, %xmm1
-; SSE-NEXT:    pextrb $9, %xmm0, %eax
-; SSE-NEXT:    pinsrb $9, %eax, %xmm1
-; SSE-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4],xmm0[5,6,7]
+; SSE-NEXT:    movdqa %xmm0, %xmm1
+; SSE-NEXT:    pxor %xmm2, %xmm2
+; SSE-NEXT:    psubb %xmm0, %xmm2
+; SSE-NEXT:    movaps {{.*#+}} xmm0 = [0,0,255,0,0,0,255,0,0,255,255,255,255,255,255,255]
+; SSE-NEXT:    pblendvb %xmm0, %xmm1, %xmm2
+; SSE-NEXT:    movdqa %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: non_splat_minus_one_divisor_0:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vpextrb $1, %xmm0, %eax
-; AVX1-NEXT:    negb %al
-; AVX1-NEXT:    movzbl %al, %eax
-; AVX1-NEXT:    vpextrb $0, %xmm0, %ecx
-; AVX1-NEXT:    negb %cl
-; AVX1-NEXT:    movzbl %cl, %ecx
-; AVX1-NEXT:    vmovd %ecx, %xmm1
-; AVX1-NEXT:    vpinsrb $1, %eax, %xmm1, %xmm1
-; AVX1-NEXT:    vpextrb $2, %xmm0, %eax
-; AVX1-NEXT:    vpinsrb $2, %eax, %xmm1, %xmm1
-; AVX1-NEXT:    vpextrb $3, %xmm0, %eax
-; AVX1-NEXT:    negb %al
-; AVX1-NEXT:    movzbl %al, %eax
-; AVX1-NEXT:    vpinsrb $3, %eax, %xmm1, %xmm1
-; AVX1-NEXT:    vpextrb $4, %xmm0, %eax
-; AVX1-NEXT:    negb %al
-; AVX1-NEXT:    movzbl %al, %eax
-; AVX1-NEXT:    vpinsrb $4, %eax, %xmm1, %xmm1
-; AVX1-NEXT:    vpextrb $5, %xmm0, %eax
-; AVX1-NEXT:    negb %al
-; AVX1-NEXT:    movzbl %al, %eax
-; AVX1-NEXT:    vpinsrb $5, %eax, %xmm1, %xmm1
-; AVX1-NEXT:    vpextrb $6, %xmm0, %eax
-; AVX1-NEXT:    vpinsrb $6, %eax, %xmm1, %xmm1
-; AVX1-NEXT:    vpextrb $7, %xmm0, %eax
-; AVX1-NEXT:    negb %al
-; AVX1-NEXT:    movzbl %al, %eax
-; AVX1-NEXT:    vpinsrb $7, %eax, %xmm1, %xmm1
-; AVX1-NEXT:    vpextrb $8, %xmm0, %eax
-; AVX1-NEXT:    negb %al
-; AVX1-NEXT:    movzbl %al, %eax
-; AVX1-NEXT:    vpinsrb $8, %eax, %xmm1, %xmm1
-; AVX1-NEXT:    vpextrb $9, %xmm0, %eax
-; AVX1-NEXT:    vpinsrb $9, %eax, %xmm1, %xmm1
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4],xmm0[5,6,7]
+; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX1-NEXT:    vpsubb %xmm0, %xmm1, %xmm1
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,0,255,0,0,0,255,0,0,255,255,255,255,255,255,255]
+; AVX1-NEXT:    vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: non_splat_minus_one_divisor_0:
 ; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpextrb $1, %xmm0, %eax
-; AVX2-NEXT:    negb %al
-; AVX2-NEXT:    movzbl %al, %eax
-; AVX2-NEXT:    vpextrb $0, %xmm0, %ecx
-; AVX2-NEXT:    negb %cl
-; AVX2-NEXT:    movzbl %cl, %ecx
-; AVX2-NEXT:    vmovd %ecx, %xmm1
-; AVX2-NEXT:    vpinsrb $1, %eax, %xmm1, %xmm1
-; AVX2-NEXT:    vpextrb $2, %xmm0, %eax
-; AVX2-NEXT:    vpinsrb $2, %eax, %xmm1, %xmm1
-; AVX2-NEXT:    vpextrb $3, %xmm0, %eax
-; AVX2-NEXT:    negb %al
-; AVX2-NEXT:    movzbl %al, %eax
-; AVX2-NEXT:    vpinsrb $3, %eax, %xmm1, %xmm1
-; AVX2-NEXT:    vpextrb $4, %xmm0, %eax
-; AVX2-NEXT:    negb %al
-; AVX2-NEXT:    movzbl %al, %eax
-; AVX2-NEXT:    vpinsrb $4, %eax, %xmm1, %xmm1
-; AVX2-NEXT:    vpextrb $5, %xmm0, %eax
-; AVX2-NEXT:    negb %al
-; AVX2-NEXT:    movzbl %al, %eax
-; AVX2-NEXT:    vpinsrb $5, %eax, %xmm1, %xmm1
-; AVX2-NEXT:    vpextrb $6, %xmm0, %eax
-; AVX2-NEXT:    vpinsrb $6, %eax, %xmm1, %xmm1
-; AVX2-NEXT:    vpextrb $7, %xmm0, %eax
-; AVX2-NEXT:    negb %al
-; AVX2-NEXT:    movzbl %al, %eax
-; AVX2-NEXT:    vpinsrb $7, %eax, %xmm1, %xmm1
-; AVX2-NEXT:    vpextrb $8, %xmm0, %eax
-; AVX2-NEXT:    negb %al
-; AVX2-NEXT:    movzbl %al, %eax
-; AVX2-NEXT:    vpinsrb $8, %eax, %xmm1, %xmm1
-; AVX2-NEXT:    vpextrb $9, %xmm0, %eax
-; AVX2-NEXT:    vpinsrb $9, %eax, %xmm1, %xmm1
-; AVX2-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4],xmm0[5,6,7]
+; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT:    vpsubb %xmm0, %xmm1, %xmm1
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,0,255,0,0,0,255,0,0,255,255,255,255,255,255,255]
+; AVX2-NEXT:    vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: non_splat_minus_one_divisor_0:
 ; AVX512F:       # %bb.0:
-; AVX512F-NEXT:    vpextrb $1, %xmm0, %eax
-; AVX512F-NEXT:    negb %al
-; AVX512F-NEXT:    movzbl %al, %eax
-; AVX512F-NEXT:    vpextrb $0, %xmm0, %ecx
-; AVX512F-NEXT:    negb %cl
-; AVX512F-NEXT:    movzbl %cl, %ecx
-; AVX512F-NEXT:    vmovd %ecx, %xmm1
-; AVX512F-NEXT:    vpinsrb $1, %eax, %xmm1, %xmm1
-; AVX512F-NEXT:    vpextrb $2, %xmm0, %eax
-; AVX512F-NEXT:    vpinsrb $2, %eax, %xmm1, %xmm1
-; AVX512F-NEXT:    vpextrb $3, %xmm0, %eax
-; AVX512F-NEXT:    negb %al
-; AVX512F-NEXT:    movzbl %al, %eax
-; AVX512F-NEXT:    vpinsrb $3, %eax, %xmm1, %xmm1
-; AVX512F-NEXT:    vpextrb $4, %xmm0, %eax
-; AVX512F-NEXT:    negb %al
-; AVX512F-NEXT:    movzbl %al, %eax
-; AVX512F-NEXT:    vpinsrb $4, %eax, %xmm1, %xmm1
-; AVX512F-NEXT:    vpextrb $5, %xmm0, %eax
-; AVX512F-NEXT:    negb %al
-; AVX512F-NEXT:    movzbl %al, %eax
-; AVX512F-NEXT:    vpinsrb $5, %eax, %xmm1, %xmm1
-; AVX512F-NEXT:    vpextrb $6, %xmm0, %eax
-; AVX512F-NEXT:    vpinsrb $6, %eax, %xmm1, %xmm1
-; AVX512F-NEXT:    vpextrb $7, %xmm0, %eax
-; AVX512F-NEXT:    negb %al
-; AVX512F-NEXT:    movzbl %al, %eax
-; AVX512F-NEXT:    vpinsrb $7, %eax, %xmm1, %xmm1
-; AVX512F-NEXT:    vpextrb $8, %xmm0, %eax
-; AVX512F-NEXT:    negb %al
-; AVX512F-NEXT:    movzbl %al, %eax
-; AVX512F-NEXT:    vpinsrb $8, %eax, %xmm1, %xmm1
-; AVX512F-NEXT:    vpextrb $9, %xmm0, %eax
-; AVX512F-NEXT:    vpinsrb $9, %eax, %xmm1, %xmm1
-; AVX512F-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4],xmm0[5,6,7]
+; AVX512F-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX512F-NEXT:    vpsubb %xmm0, %xmm1, %xmm1
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,0,255,0,0,0,255,0,0,255,255,255,255,255,255,255]
+; AVX512F-NEXT:    vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: non_splat_minus_one_divisor_0:
 ; AVX512BW:       # %bb.0:
-; AVX512BW-NEXT:    vpextrb $1, %xmm0, %eax
-; AVX512BW-NEXT:    negb %al
-; AVX512BW-NEXT:    movzbl %al, %eax
-; AVX512BW-NEXT:    vpextrb $0, %xmm0, %ecx
-; AVX512BW-NEXT:    negb %cl
-; AVX512BW-NEXT:    movzbl %cl, %ecx
-; AVX512BW-NEXT:    vmovd %ecx, %xmm1
-; AVX512BW-NEXT:    vpinsrb $1, %eax, %xmm1, %xmm1
-; AVX512BW-NEXT:    vpextrb $2, %xmm0, %eax
-; AVX512BW-NEXT:    vpinsrb $2, %eax, %xmm1, %xmm1
-; AVX512BW-NEXT:    vpextrb $3, %xmm0, %eax
-; AVX512BW-NEXT:    negb %al
-; AVX512BW-NEXT:    movzbl %al, %eax
-; AVX512BW-NEXT:    vpinsrb $3, %eax, %xmm1, %xmm1
-; AVX512BW-NEXT:    vpextrb $4, %xmm0, %eax
-; AVX512BW-NEXT:    negb %al
-; AVX512BW-NEXT:    movzbl %al, %eax
-; AVX512BW-NEXT:    vpinsrb $4, %eax, %xmm1, %xmm1
-; AVX512BW-NEXT:    vpextrb $5, %xmm0, %eax
-; AVX512BW-NEXT:    negb %al
-; AVX512BW-NEXT:    movzbl %al, %eax
-; AVX512BW-NEXT:    vpinsrb $5, %eax, %xmm1, %xmm1
-; AVX512BW-NEXT:    vpextrb $6, %xmm0, %eax
-; AVX512BW-NEXT:    vpinsrb $6, %eax, %xmm1, %xmm1
-; AVX512BW-NEXT:    vpextrb $7, %xmm0, %eax
-; AVX512BW-NEXT:    negb %al
-; AVX512BW-NEXT:    movzbl %al, %eax
-; AVX512BW-NEXT:    vpinsrb $7, %eax, %xmm1, %xmm1
-; AVX512BW-NEXT:    vpextrb $8, %xmm0, %eax
-; AVX512BW-NEXT:    negb %al
-; AVX512BW-NEXT:    movzbl %al, %eax
-; AVX512BW-NEXT:    vpinsrb $8, %eax, %xmm1, %xmm1
-; AVX512BW-NEXT:    vpextrb $9, %xmm0, %eax
-; AVX512BW-NEXT:    vpinsrb $9, %eax, %xmm1, %xmm1
-; AVX512BW-NEXT:    vpextrb $10, %xmm0, %eax
-; AVX512BW-NEXT:    vpinsrb $10, %eax, %xmm1, %xmm1
-; AVX512BW-NEXT:    vpextrb $11, %xmm0, %eax
-; AVX512BW-NEXT:    vpinsrb $11, %eax, %xmm1, %xmm1
-; AVX512BW-NEXT:    vpextrb $12, %xmm0, %eax
-; AVX512BW-NEXT:    vpinsrb $12, %eax, %xmm1, %xmm1
-; AVX512BW-NEXT:    vpextrb $13, %xmm0, %eax
-; AVX512BW-NEXT:    vpinsrb $13, %eax, %xmm1, %xmm1
-; AVX512BW-NEXT:    vpextrb $14, %xmm0, %eax
-; AVX512BW-NEXT:    vpinsrb $14, %eax, %xmm1, %xmm1
-; AVX512BW-NEXT:    vpextrb $15, %xmm0, %eax
-; AVX512BW-NEXT:    vpinsrb $15, %eax, %xmm1, %xmm0
+; AVX512BW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX512BW-NEXT:    movw $443, %ax # imm = 0x1BB
+; AVX512BW-NEXT:    kmovd %eax, %k1
+; AVX512BW-NEXT:    vpsubb %xmm0, %xmm1, %xmm0 {%k1}
 ; AVX512BW-NEXT:    retq
 ;
 ; XOP-LABEL: non_splat_minus_one_divisor_0:
 ; XOP:       # %bb.0:
-; XOP-NEXT:    vpextrb $1, %xmm0, %eax
-; XOP-NEXT:    negb %al
-; XOP-NEXT:    movzbl %al, %eax
-; XOP-NEXT:    vpextrb $0, %xmm0, %ecx
-; XOP-NEXT:    negb %cl
-; XOP-NEXT:    movzbl %cl, %ecx
-; XOP-NEXT:    vmovd %ecx, %xmm1
-; XOP-NEXT:    vpinsrb $1, %eax, %xmm1, %xmm1
-; XOP-NEXT:    vpextrb $2, %xmm0, %eax
-; XOP-NEXT:    vpinsrb $2, %eax, %xmm1, %xmm1
-; XOP-NEXT:    vpextrb $3, %xmm0, %eax
-; XOP-NEXT:    negb %al
-; XOP-NEXT:    movzbl %al, %eax
-; XOP-NEXT:    vpinsrb $3, %eax, %xmm1, %xmm1
-; XOP-NEXT:    vpextrb $4, %xmm0, %eax
-; XOP-NEXT:    negb %al
-; XOP-NEXT:    movzbl %al, %eax
-; XOP-NEXT:    vpinsrb $4, %eax, %xmm1, %xmm1
-; XOP-NEXT:    vpextrb $5, %xmm0, %eax
-; XOP-NEXT:    negb %al
-; XOP-NEXT:    movzbl %al, %eax
-; XOP-NEXT:    vpinsrb $5, %eax, %xmm1, %xmm1
-; XOP-NEXT:    vpextrb $6, %xmm0, %eax
-; XOP-NEXT:    vpinsrb $6, %eax, %xmm1, %xmm1
-; XOP-NEXT:    vpextrb $7, %xmm0, %eax
-; XOP-NEXT:    negb %al
-; XOP-NEXT:    movzbl %al, %eax
-; XOP-NEXT:    vpinsrb $7, %eax, %xmm1, %xmm1
-; XOP-NEXT:    vpextrb $8, %xmm0, %eax
-; XOP-NEXT:    negb %al
-; XOP-NEXT:    movzbl %al, %eax
-; XOP-NEXT:    vpinsrb $8, %eax, %xmm1, %xmm1
-; XOP-NEXT:    vpperm {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5,6,7,8],xmm0[9,10,11,12,13,14,15]
+; XOP-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; XOP-NEXT:    vpsubb %xmm0, %xmm1, %xmm1
+; XOP-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,0,255,0,0,0,255,0,0,255,255,255,255,255,255,255]
+; XOP-NEXT:    vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
 ; XOP-NEXT:    retq
   %div = sdiv <16 x i8> %A, <i8 -1, i8 -1, i8 1, i8 -1, i8 -1, i8 -1, i8 1, i8 -1, i8 -1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
   ret <16 x i8> %div
@@ -2417,48 +2224,53 @@ define <16 x i8> @non_splat_minus_one_di
 define <4 x i32> @non_splat_minus_one_divisor_2(<4 x i32> %A) {
 ; SSE-LABEL: non_splat_minus_one_divisor_2:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    pextrd $1, %xmm0, %eax
-; SSE-NEXT:    movd %xmm0, %ecx
-; SSE-NEXT:    negl %ecx
-; SSE-NEXT:    movd %ecx, %xmm1
-; SSE-NEXT:    pinsrd $1, %eax, %xmm1
-; SSE-NEXT:    pextrd $2, %xmm0, %eax
-; SSE-NEXT:    movl %eax, %ecx
-; SSE-NEXT:    shrl $31, %ecx
-; SSE-NEXT:    addl %eax, %ecx
-; SSE-NEXT:    sarl %ecx
-; SSE-NEXT:    pinsrd $2, %ecx, %xmm1
-; SSE-NEXT:    pextrd $3, %xmm0, %eax
-; SSE-NEXT:    movl %eax, %ecx
-; SSE-NEXT:    shrl $31, %ecx
-; SSE-NEXT:    addl %eax, %ecx
-; SSE-NEXT:    sarl %ecx
-; SSE-NEXT:    negl %ecx
-; SSE-NEXT:    pinsrd $3, %ecx, %xmm1
+; SSE-NEXT:    movdqa %xmm0, %xmm1
+; SSE-NEXT:    psrld $31, %xmm1
+; SSE-NEXT:    pxor %xmm2, %xmm2
+; SSE-NEXT:    pblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
+; SSE-NEXT:    paddd %xmm0, %xmm1
+; SSE-NEXT:    psrad $1, %xmm1
+; SSE-NEXT:    pblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; SSE-NEXT:    psubd %xmm1, %xmm2
+; SSE-NEXT:    pblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3,4,5],xmm2[6,7]
 ; SSE-NEXT:    movdqa %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX-LABEL: non_splat_minus_one_divisor_2:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vpextrd $1, %xmm0, %eax
-; AVX-NEXT:    vmovd %xmm0, %ecx
-; AVX-NEXT:    negl %ecx
-; AVX-NEXT:    vmovd %ecx, %xmm1
-; AVX-NEXT:    vpinsrd $1, %eax, %xmm1, %xmm1
-; AVX-NEXT:    vpextrd $2, %xmm0, %eax
-; AVX-NEXT:    movl %eax, %ecx
-; AVX-NEXT:    shrl $31, %ecx
-; AVX-NEXT:    addl %eax, %ecx
-; AVX-NEXT:    sarl %ecx
-; AVX-NEXT:    vpinsrd $2, %ecx, %xmm1, %xmm1
-; AVX-NEXT:    vpextrd $3, %xmm0, %eax
-; AVX-NEXT:    movl %eax, %ecx
-; AVX-NEXT:    shrl $31, %ecx
-; AVX-NEXT:    addl %eax, %ecx
-; AVX-NEXT:    sarl %ecx
-; AVX-NEXT:    negl %ecx
-; AVX-NEXT:    vpinsrd $3, %ecx, %xmm1, %xmm0
-; AVX-NEXT:    retq
+; AVX1-LABEL: non_splat_minus_one_divisor_2:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpsrld $31, %xmm0, %xmm1
+; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
+; AVX1-NEXT:    vpaddd %xmm1, %xmm0, %xmm1
+; AVX1-NEXT:    vpsrad $1, %xmm1, %xmm1
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; AVX1-NEXT:    vpsubd %xmm0, %xmm2, %xmm1
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5],xmm1[6,7]
+; AVX1-NEXT:    retq
+;
+; AVX2ORLATER-LABEL: non_splat_minus_one_divisor_2:
+; AVX2ORLATER:       # %bb.0:
+; AVX2ORLATER-NEXT:    vpsrad $31, %xmm0, %xmm1
+; AVX2ORLATER-NEXT:    vpsrlvd {{.*}}(%rip), %xmm1, %xmm1
+; AVX2ORLATER-NEXT:    vpaddd %xmm1, %xmm0, %xmm1
+; AVX2ORLATER-NEXT:    vpsravd {{.*}}(%rip), %xmm1, %xmm1
+; AVX2ORLATER-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
+; AVX2ORLATER-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX2ORLATER-NEXT:    vpsubd %xmm0, %xmm1, %xmm1
+; AVX2ORLATER-NEXT:    vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3]
+; AVX2ORLATER-NEXT:    retq
+;
+; XOP-LABEL: non_splat_minus_one_divisor_2:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vpsrad $31, %xmm0, %xmm1
+; XOP-NEXT:    vpshld {{.*}}(%rip), %xmm1, %xmm1
+; XOP-NEXT:    vpaddd %xmm1, %xmm0, %xmm1
+; XOP-NEXT:    vpshad {{.*}}(%rip), %xmm1, %xmm1
+; XOP-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; XOP-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; XOP-NEXT:    vpsubd %xmm0, %xmm1, %xmm1
+; XOP-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5],xmm1[6,7]
+; XOP-NEXT:    retq
   %div = sdiv <4 x i32> %A, <i32 -1, i32 1, i32 2, i32 -2>
   ret <4 x i32> %div
 }




More information about the llvm-commits mailing list