[llvm] r264517 - [X86][AVX] Enabled SMUL_LOHI/UMUL_LOHI v8i32 vectors on AVX1 targets

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Sat Mar 26 11:32:14 PDT 2016


Author: rksimon
Date: Sat Mar 26 13:32:13 2016
New Revision: 264517

URL: http://llvm.org/viewvc/llvm-project?rev=264517&view=rev
Log:
[X86][AVX] Enabled SMUL_LOHI/UMUL_LOHI v8i32 vectors on AVX1 targets

Correct splitting of v8i32 vectors into v4i32 vectors to prevent scalarization

Modified:
    llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
    llvm/trunk/test/CodeGen/X86/vector-idiv-sdiv-256.ll
    llvm/trunk/test/CodeGen/X86/vector-idiv-udiv-256.ll

Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=264517&r1=264516&r2=264517&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Sat Mar 26 13:32:13 2016
@@ -1253,6 +1253,9 @@ X86TargetLowering::X86TargetLowering(con
       setOperationAction(ISD::MUL,             MVT::v16i16, Custom);
       setOperationAction(ISD::MUL,             MVT::v32i8, Custom);
 
+      setOperationAction(ISD::UMUL_LOHI,       MVT::v8i32, Custom);
+      setOperationAction(ISD::SMUL_LOHI,       MVT::v8i32, Custom);
+
       setOperationAction(ISD::MULHU,           MVT::v16i16, Custom);
       setOperationAction(ISD::MULHS,           MVT::v16i16, Custom);
       setOperationAction(ISD::MULHU,           MVT::v32i8, Custom);
@@ -19219,6 +19222,24 @@ static SDValue LowerMUL_LOHI(SDValue Op,
   MVT VT = Op0.getSimpleValueType();
   SDLoc dl(Op);
 
+  // Decompose 256-bit ops into smaller 128-bit ops.
+  if (VT.is256BitVector() && !Subtarget.hasInt256()) {
+    unsigned Opcode = Op.getOpcode();
+    unsigned NumElems = VT.getVectorNumElements();
+    MVT HalfVT = MVT::getVectorVT(VT.getScalarType(), NumElems / 2);
+    SDValue Lo0 = extract128BitVector(Op0, 0, DAG, dl);
+    SDValue Lo1 = extract128BitVector(Op1, 0, DAG, dl);
+    SDValue Hi0 = extract128BitVector(Op0, NumElems / 2, DAG, dl);
+    SDValue Hi1 = extract128BitVector(Op1, NumElems / 2, DAG, dl);
+    SDValue Lo = DAG.getNode(Opcode, dl, DAG.getVTList(HalfVT, HalfVT), Lo0, Lo1);
+    SDValue Hi = DAG.getNode(Opcode, dl, DAG.getVTList(HalfVT, HalfVT), Hi0, Hi1);
+    SDValue Ops[] = {
+      DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo.getValue(0), Hi.getValue(0)),
+      DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo.getValue(1), Hi.getValue(1))
+    };
+    return DAG.getMergeValues(Ops, dl);
+  }
+
   assert((VT == MVT::v4i32 && Subtarget.hasSSE2()) ||
          (VT == MVT::v8i32 && Subtarget.hasInt256()));
 

Modified: llvm/trunk/test/CodeGen/X86/vector-idiv-sdiv-256.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-idiv-sdiv-256.ll?rev=264517&r1=264516&r2=264517&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-idiv-sdiv-256.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-idiv-sdiv-256.ll Sat Mar 26 13:32:13 2016
@@ -87,88 +87,30 @@ define <4 x i64> @test_div7_4i64(<4 x i6
 define <8 x i32> @test_div7_8i32(<8 x i32> %a) nounwind {
 ; AVX1-LABEL: test_div7_8i32:
 ; AVX1:       # BB#0:
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT:    vpextrd $1, %xmm1, %eax
-; AVX1-NEXT:    cltq
-; AVX1-NEXT:    imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493
-; AVX1-NEXT:    shrq $32, %rcx
-; AVX1-NEXT:    addl %ecx, %eax
-; AVX1-NEXT:    movl %eax, %ecx
-; AVX1-NEXT:    shrl $31, %ecx
-; AVX1-NEXT:    sarl $2, %eax
-; AVX1-NEXT:    addl %ecx, %eax
-; AVX1-NEXT:    vmovd %xmm1, %ecx
-; AVX1-NEXT:    movslq %ecx, %rcx
-; AVX1-NEXT:    imulq $-1840700269, %rcx, %rdx # imm = 0xFFFFFFFF92492493
-; AVX1-NEXT:    shrq $32, %rdx
-; AVX1-NEXT:    addl %edx, %ecx
-; AVX1-NEXT:    movl %ecx, %edx
-; AVX1-NEXT:    shrl $31, %edx
-; AVX1-NEXT:    sarl $2, %ecx
-; AVX1-NEXT:    addl %edx, %ecx
-; AVX1-NEXT:    vmovd %ecx, %xmm2
-; AVX1-NEXT:    vpinsrd $1, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrd $2, %xmm1, %eax
-; AVX1-NEXT:    cltq
-; AVX1-NEXT:    imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493
-; AVX1-NEXT:    shrq $32, %rcx
-; AVX1-NEXT:    addl %ecx, %eax
-; AVX1-NEXT:    movl %eax, %ecx
-; AVX1-NEXT:    shrl $31, %ecx
-; AVX1-NEXT:    sarl $2, %eax
-; AVX1-NEXT:    addl %ecx, %eax
-; AVX1-NEXT:    vpinsrd $2, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrd $3, %xmm1, %eax
-; AVX1-NEXT:    cltq
-; AVX1-NEXT:    imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493
-; AVX1-NEXT:    shrq $32, %rcx
-; AVX1-NEXT:    addl %ecx, %eax
-; AVX1-NEXT:    movl %eax, %ecx
-; AVX1-NEXT:    shrl $31, %ecx
-; AVX1-NEXT:    sarl $2, %eax
-; AVX1-NEXT:    addl %ecx, %eax
-; AVX1-NEXT:    vpinsrd $3, %eax, %xmm2, %xmm1
-; AVX1-NEXT:    vpextrd $1, %xmm0, %eax
-; AVX1-NEXT:    cltq
-; AVX1-NEXT:    imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493
-; AVX1-NEXT:    shrq $32, %rcx
-; AVX1-NEXT:    addl %ecx, %eax
-; AVX1-NEXT:    movl %eax, %ecx
-; AVX1-NEXT:    shrl $31, %ecx
-; AVX1-NEXT:    sarl $2, %eax
-; AVX1-NEXT:    addl %ecx, %eax
-; AVX1-NEXT:    vmovd %xmm0, %ecx
-; AVX1-NEXT:    movslq %ecx, %rcx
-; AVX1-NEXT:    imulq $-1840700269, %rcx, %rdx # imm = 0xFFFFFFFF92492493
-; AVX1-NEXT:    shrq $32, %rdx
-; AVX1-NEXT:    addl %edx, %ecx
-; AVX1-NEXT:    movl %ecx, %edx
-; AVX1-NEXT:    shrl $31, %edx
-; AVX1-NEXT:    sarl $2, %ecx
-; AVX1-NEXT:    addl %edx, %ecx
-; AVX1-NEXT:    vmovd %ecx, %xmm2
-; AVX1-NEXT:    vpinsrd $1, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrd $2, %xmm0, %eax
-; AVX1-NEXT:    cltq
-; AVX1-NEXT:    imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493
-; AVX1-NEXT:    shrq $32, %rcx
-; AVX1-NEXT:    addl %ecx, %eax
-; AVX1-NEXT:    movl %eax, %ecx
-; AVX1-NEXT:    shrl $31, %ecx
-; AVX1-NEXT:    sarl $2, %eax
-; AVX1-NEXT:    addl %ecx, %eax
-; AVX1-NEXT:    vpinsrd $2, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrd $3, %xmm0, %eax
-; AVX1-NEXT:    cltq
-; AVX1-NEXT:    imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493
-; AVX1-NEXT:    shrq $32, %rcx
-; AVX1-NEXT:    addl %ecx, %eax
-; AVX1-NEXT:    movl %eax, %ecx
-; AVX1-NEXT:    shrl $31, %ecx
-; AVX1-NEXT:    sarl $2, %eax
-; AVX1-NEXT:    addl %ecx, %eax
-; AVX1-NEXT:    vpinsrd $3, %eax, %xmm2, %xmm0
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    vmovaps {{.*#+}} ymm1 = [2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027]
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm4[1,1,3,3]
+; AVX1-NEXT:    vpmuldq %xmm3, %xmm5, %xmm3
+; AVX1-NEXT:    vpmuldq %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7]
+; AVX1-NEXT:    vpaddd %xmm4, %xmm2, %xmm2
+; AVX1-NEXT:    vpsrld $31, %xmm2, %xmm3
+; AVX1-NEXT:    vpsrad $2, %xmm2, %xmm2
+; AVX1-NEXT:    vpaddd %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; AVX1-NEXT:    vpmuldq %xmm3, %xmm4, %xmm3
+; AVX1-NEXT:    vpmuldq %xmm1, %xmm0, %xmm1
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
+; AVX1-NEXT:    vpaddd %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    vpsrld $31, %xmm0, %xmm1
+; AVX1-NEXT:    vpsrad $2, %xmm0, %xmm0
+; AVX1-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test_div7_8i32:
@@ -406,112 +348,35 @@ define <4 x i64> @test_rem7_4i64(<4 x i6
 define <8 x i32> @test_rem7_8i32(<8 x i32> %a) nounwind {
 ; AVX1-LABEL: test_rem7_8i32:
 ; AVX1:       # BB#0:
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT:    vpextrd $1, %xmm1, %eax
-; AVX1-NEXT:    cltq
-; AVX1-NEXT:    imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493
-; AVX1-NEXT:    shrq $32, %rcx
-; AVX1-NEXT:    addl %eax, %ecx
-; AVX1-NEXT:    movl %ecx, %edx
-; AVX1-NEXT:    shrl $31, %edx
-; AVX1-NEXT:    sarl $2, %ecx
-; AVX1-NEXT:    addl %edx, %ecx
-; AVX1-NEXT:    leal (,%rcx,8), %edx
-; AVX1-NEXT:    subl %ecx, %edx
-; AVX1-NEXT:    subl %edx, %eax
-; AVX1-NEXT:    vmovd %xmm1, %ecx
-; AVX1-NEXT:    movslq %ecx, %rcx
-; AVX1-NEXT:    imulq $-1840700269, %rcx, %rdx # imm = 0xFFFFFFFF92492493
-; AVX1-NEXT:    shrq $32, %rdx
-; AVX1-NEXT:    addl %ecx, %edx
-; AVX1-NEXT:    movl %edx, %esi
-; AVX1-NEXT:    shrl $31, %esi
-; AVX1-NEXT:    sarl $2, %edx
-; AVX1-NEXT:    addl %esi, %edx
-; AVX1-NEXT:    leal (,%rdx,8), %esi
-; AVX1-NEXT:    subl %edx, %esi
-; AVX1-NEXT:    subl %esi, %ecx
-; AVX1-NEXT:    vmovd %ecx, %xmm2
-; AVX1-NEXT:    vpinsrd $1, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrd $2, %xmm1, %eax
-; AVX1-NEXT:    cltq
-; AVX1-NEXT:    imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493
-; AVX1-NEXT:    shrq $32, %rcx
-; AVX1-NEXT:    addl %eax, %ecx
-; AVX1-NEXT:    movl %ecx, %edx
-; AVX1-NEXT:    shrl $31, %edx
-; AVX1-NEXT:    sarl $2, %ecx
-; AVX1-NEXT:    addl %edx, %ecx
-; AVX1-NEXT:    leal (,%rcx,8), %edx
-; AVX1-NEXT:    subl %ecx, %edx
-; AVX1-NEXT:    subl %edx, %eax
-; AVX1-NEXT:    vpinsrd $2, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrd $3, %xmm1, %eax
-; AVX1-NEXT:    cltq
-; AVX1-NEXT:    imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493
-; AVX1-NEXT:    shrq $32, %rcx
-; AVX1-NEXT:    addl %eax, %ecx
-; AVX1-NEXT:    movl %ecx, %edx
-; AVX1-NEXT:    shrl $31, %edx
-; AVX1-NEXT:    sarl $2, %ecx
-; AVX1-NEXT:    addl %edx, %ecx
-; AVX1-NEXT:    leal (,%rcx,8), %edx
-; AVX1-NEXT:    subl %ecx, %edx
-; AVX1-NEXT:    subl %edx, %eax
-; AVX1-NEXT:    vpinsrd $3, %eax, %xmm2, %xmm1
-; AVX1-NEXT:    vpextrd $1, %xmm0, %eax
-; AVX1-NEXT:    cltq
-; AVX1-NEXT:    imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493
-; AVX1-NEXT:    shrq $32, %rcx
-; AVX1-NEXT:    addl %eax, %ecx
-; AVX1-NEXT:    movl %ecx, %edx
-; AVX1-NEXT:    shrl $31, %edx
-; AVX1-NEXT:    sarl $2, %ecx
-; AVX1-NEXT:    addl %edx, %ecx
-; AVX1-NEXT:    leal (,%rcx,8), %edx
-; AVX1-NEXT:    subl %ecx, %edx
-; AVX1-NEXT:    subl %edx, %eax
-; AVX1-NEXT:    vmovd %xmm0, %ecx
-; AVX1-NEXT:    movslq %ecx, %rcx
-; AVX1-NEXT:    imulq $-1840700269, %rcx, %rdx # imm = 0xFFFFFFFF92492493
-; AVX1-NEXT:    shrq $32, %rdx
-; AVX1-NEXT:    addl %ecx, %edx
-; AVX1-NEXT:    movl %edx, %esi
-; AVX1-NEXT:    shrl $31, %esi
-; AVX1-NEXT:    sarl $2, %edx
-; AVX1-NEXT:    addl %esi, %edx
-; AVX1-NEXT:    leal (,%rdx,8), %esi
-; AVX1-NEXT:    subl %edx, %esi
-; AVX1-NEXT:    subl %esi, %ecx
-; AVX1-NEXT:    vmovd %ecx, %xmm2
-; AVX1-NEXT:    vpinsrd $1, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrd $2, %xmm0, %eax
-; AVX1-NEXT:    cltq
-; AVX1-NEXT:    imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493
-; AVX1-NEXT:    shrq $32, %rcx
-; AVX1-NEXT:    addl %eax, %ecx
-; AVX1-NEXT:    movl %ecx, %edx
-; AVX1-NEXT:    shrl $31, %edx
-; AVX1-NEXT:    sarl $2, %ecx
-; AVX1-NEXT:    addl %edx, %ecx
-; AVX1-NEXT:    leal (,%rcx,8), %edx
-; AVX1-NEXT:    subl %ecx, %edx
-; AVX1-NEXT:    subl %edx, %eax
-; AVX1-NEXT:    vpinsrd $2, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrd $3, %xmm0, %eax
-; AVX1-NEXT:    cltq
-; AVX1-NEXT:    imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493
-; AVX1-NEXT:    shrq $32, %rcx
-; AVX1-NEXT:    addl %eax, %ecx
-; AVX1-NEXT:    movl %ecx, %edx
-; AVX1-NEXT:    shrl $31, %edx
-; AVX1-NEXT:    sarl $2, %ecx
-; AVX1-NEXT:    addl %edx, %ecx
-; AVX1-NEXT:    leal (,%rcx,8), %edx
-; AVX1-NEXT:    subl %ecx, %edx
-; AVX1-NEXT:    subl %edx, %eax
-; AVX1-NEXT:    vpinsrd $3, %eax, %xmm2, %xmm0
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    vmovaps {{.*#+}} ymm1 = [2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027]
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm4[1,1,3,3]
+; AVX1-NEXT:    vpmuldq %xmm3, %xmm5, %xmm3
+; AVX1-NEXT:    vpmuldq %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7]
+; AVX1-NEXT:    vpaddd %xmm4, %xmm2, %xmm2
+; AVX1-NEXT:    vpsrld $31, %xmm2, %xmm3
+; AVX1-NEXT:    vpsrad $2, %xmm2, %xmm2
+; AVX1-NEXT:    vpaddd %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [7,7,7,7]
+; AVX1-NEXT:    vpmulld %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpsubd %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm1[1,1,3,3]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
+; AVX1-NEXT:    vpmuldq %xmm4, %xmm5, %xmm4
+; AVX1-NEXT:    vpmuldq %xmm1, %xmm0, %xmm1
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm4[2,3],xmm1[4,5],xmm4[6,7]
+; AVX1-NEXT:    vpaddd %xmm0, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrld $31, %xmm1, %xmm4
+; AVX1-NEXT:    vpsrad $2, %xmm1, %xmm1
+; AVX1-NEXT:    vpaddd %xmm4, %xmm1, %xmm1
+; AVX1-NEXT:    vpmulld %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test_rem7_8i32:

Modified: llvm/trunk/test/CodeGen/X86/vector-idiv-udiv-256.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-idiv-udiv-256.ll?rev=264517&r1=264516&r2=264517&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-idiv-udiv-256.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-idiv-udiv-256.ll Sat Mar 26 13:32:13 2016
@@ -95,72 +95,30 @@ define <4 x i64> @test_div7_4i64(<4 x i6
 define <8 x i32> @test_div7_8i32(<8 x i32> %a) nounwind {
 ; AVX1-LABEL: test_div7_8i32:
 ; AVX1:       # BB#0:
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT:    vpextrd $1, %xmm1, %eax
-; AVX1-NEXT:    imulq $613566757, %rax, %rcx # imm = 0x24924925
-; AVX1-NEXT:    shrq $32, %rcx
-; AVX1-NEXT:    subl %ecx, %eax
-; AVX1-NEXT:    shrl %eax
-; AVX1-NEXT:    addl %ecx, %eax
-; AVX1-NEXT:    shrl $2, %eax
-; AVX1-NEXT:    vmovd %xmm1, %ecx
-; AVX1-NEXT:    imulq $613566757, %rcx, %rdx # imm = 0x24924925
-; AVX1-NEXT:    shrq $32, %rdx
-; AVX1-NEXT:    subl %edx, %ecx
-; AVX1-NEXT:    shrl %ecx
-; AVX1-NEXT:    addl %edx, %ecx
-; AVX1-NEXT:    shrl $2, %ecx
-; AVX1-NEXT:    vmovd %ecx, %xmm2
-; AVX1-NEXT:    vpinsrd $1, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrd $2, %xmm1, %eax
-; AVX1-NEXT:    imulq $613566757, %rax, %rcx # imm = 0x24924925
-; AVX1-NEXT:    shrq $32, %rcx
-; AVX1-NEXT:    subl %ecx, %eax
-; AVX1-NEXT:    shrl %eax
-; AVX1-NEXT:    addl %ecx, %eax
-; AVX1-NEXT:    shrl $2, %eax
-; AVX1-NEXT:    vpinsrd $2, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrd $3, %xmm1, %eax
-; AVX1-NEXT:    imulq $613566757, %rax, %rcx # imm = 0x24924925
-; AVX1-NEXT:    shrq $32, %rcx
-; AVX1-NEXT:    subl %ecx, %eax
-; AVX1-NEXT:    shrl %eax
-; AVX1-NEXT:    addl %ecx, %eax
-; AVX1-NEXT:    shrl $2, %eax
-; AVX1-NEXT:    vpinsrd $3, %eax, %xmm2, %xmm1
-; AVX1-NEXT:    vpextrd $1, %xmm0, %eax
-; AVX1-NEXT:    imulq $613566757, %rax, %rcx # imm = 0x24924925
-; AVX1-NEXT:    shrq $32, %rcx
-; AVX1-NEXT:    subl %ecx, %eax
-; AVX1-NEXT:    shrl %eax
-; AVX1-NEXT:    addl %ecx, %eax
-; AVX1-NEXT:    shrl $2, %eax
-; AVX1-NEXT:    vmovd %xmm0, %ecx
-; AVX1-NEXT:    imulq $613566757, %rcx, %rdx # imm = 0x24924925
-; AVX1-NEXT:    shrq $32, %rdx
-; AVX1-NEXT:    subl %edx, %ecx
-; AVX1-NEXT:    shrl %ecx
-; AVX1-NEXT:    addl %edx, %ecx
-; AVX1-NEXT:    shrl $2, %ecx
-; AVX1-NEXT:    vmovd %ecx, %xmm2
-; AVX1-NEXT:    vpinsrd $1, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrd $2, %xmm0, %eax
-; AVX1-NEXT:    imulq $613566757, %rax, %rcx # imm = 0x24924925
-; AVX1-NEXT:    shrq $32, %rcx
-; AVX1-NEXT:    subl %ecx, %eax
-; AVX1-NEXT:    shrl %eax
-; AVX1-NEXT:    addl %ecx, %eax
-; AVX1-NEXT:    shrl $2, %eax
-; AVX1-NEXT:    vpinsrd $2, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrd $3, %xmm0, %eax
-; AVX1-NEXT:    imulq $613566757, %rax, %rcx # imm = 0x24924925
-; AVX1-NEXT:    shrq $32, %rcx
-; AVX1-NEXT:    subl %ecx, %eax
-; AVX1-NEXT:    shrl %eax
-; AVX1-NEXT:    addl %ecx, %eax
-; AVX1-NEXT:    shrl $2, %eax
-; AVX1-NEXT:    vpinsrd $3, %eax, %xmm2, %xmm0
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    vmovdqa {{.*#+}} ymm1 = [613566757,613566757,613566757,613566757,613566757,613566757,613566757,613566757]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; AVX1-NEXT:    vpmuludq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpmuludq %xmm1, %xmm0, %xmm3
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7]
+; AVX1-NEXT:    vpsubd %xmm2, %xmm0, %xmm3
+; AVX1-NEXT:    vpsrld $1, %xmm3, %xmm3
+; AVX1-NEXT:    vpaddd %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpsrld $2, %xmm2, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; AVX1-NEXT:    vpmuludq %xmm3, %xmm4, %xmm3
+; AVX1-NEXT:    vpmuludq %xmm1, %xmm0, %xmm1
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
+; AVX1-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpsrld $1, %xmm0, %xmm0
+; AVX1-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpsrld $2, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm2, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test_div7_8i32:
@@ -401,104 +359,35 @@ define <4 x i64> @test_rem7_4i64(<4 x i6
 define <8 x i32> @test_rem7_8i32(<8 x i32> %a) nounwind {
 ; AVX1-LABEL: test_rem7_8i32:
 ; AVX1:       # BB#0:
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT:    vpextrd $1, %xmm1, %eax
-; AVX1-NEXT:    imulq $613566757, %rax, %rcx # imm = 0x24924925
-; AVX1-NEXT:    shrq $32, %rcx
-; AVX1-NEXT:    movl %eax, %edx
-; AVX1-NEXT:    subl %ecx, %edx
-; AVX1-NEXT:    shrl %edx
-; AVX1-NEXT:    addl %ecx, %edx
-; AVX1-NEXT:    shrl $2, %edx
-; AVX1-NEXT:    leal (,%rdx,8), %ecx
-; AVX1-NEXT:    subl %edx, %ecx
-; AVX1-NEXT:    subl %ecx, %eax
-; AVX1-NEXT:    vmovd %xmm1, %ecx
-; AVX1-NEXT:    imulq $613566757, %rcx, %rdx # imm = 0x24924925
-; AVX1-NEXT:    shrq $32, %rdx
-; AVX1-NEXT:    movl %ecx, %esi
-; AVX1-NEXT:    subl %edx, %esi
-; AVX1-NEXT:    shrl %esi
-; AVX1-NEXT:    addl %edx, %esi
-; AVX1-NEXT:    shrl $2, %esi
-; AVX1-NEXT:    leal (,%rsi,8), %edx
-; AVX1-NEXT:    subl %esi, %edx
-; AVX1-NEXT:    subl %edx, %ecx
-; AVX1-NEXT:    vmovd %ecx, %xmm2
-; AVX1-NEXT:    vpinsrd $1, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrd $2, %xmm1, %eax
-; AVX1-NEXT:    imulq $613566757, %rax, %rcx # imm = 0x24924925
-; AVX1-NEXT:    shrq $32, %rcx
-; AVX1-NEXT:    movl %eax, %edx
-; AVX1-NEXT:    subl %ecx, %edx
-; AVX1-NEXT:    shrl %edx
-; AVX1-NEXT:    addl %ecx, %edx
-; AVX1-NEXT:    shrl $2, %edx
-; AVX1-NEXT:    leal (,%rdx,8), %ecx
-; AVX1-NEXT:    subl %edx, %ecx
-; AVX1-NEXT:    subl %ecx, %eax
-; AVX1-NEXT:    vpinsrd $2, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrd $3, %xmm1, %eax
-; AVX1-NEXT:    imulq $613566757, %rax, %rcx # imm = 0x24924925
-; AVX1-NEXT:    shrq $32, %rcx
-; AVX1-NEXT:    movl %eax, %edx
-; AVX1-NEXT:    subl %ecx, %edx
-; AVX1-NEXT:    shrl %edx
-; AVX1-NEXT:    addl %ecx, %edx
-; AVX1-NEXT:    shrl $2, %edx
-; AVX1-NEXT:    leal (,%rdx,8), %ecx
-; AVX1-NEXT:    subl %edx, %ecx
-; AVX1-NEXT:    subl %ecx, %eax
-; AVX1-NEXT:    vpinsrd $3, %eax, %xmm2, %xmm1
-; AVX1-NEXT:    vpextrd $1, %xmm0, %eax
-; AVX1-NEXT:    imulq $613566757, %rax, %rcx # imm = 0x24924925
-; AVX1-NEXT:    shrq $32, %rcx
-; AVX1-NEXT:    movl %eax, %edx
-; AVX1-NEXT:    subl %ecx, %edx
-; AVX1-NEXT:    shrl %edx
-; AVX1-NEXT:    addl %ecx, %edx
-; AVX1-NEXT:    shrl $2, %edx
-; AVX1-NEXT:    leal (,%rdx,8), %ecx
-; AVX1-NEXT:    subl %edx, %ecx
-; AVX1-NEXT:    subl %ecx, %eax
-; AVX1-NEXT:    vmovd %xmm0, %ecx
-; AVX1-NEXT:    imulq $613566757, %rcx, %rdx # imm = 0x24924925
-; AVX1-NEXT:    shrq $32, %rdx
-; AVX1-NEXT:    movl %ecx, %esi
-; AVX1-NEXT:    subl %edx, %esi
-; AVX1-NEXT:    shrl %esi
-; AVX1-NEXT:    addl %edx, %esi
-; AVX1-NEXT:    shrl $2, %esi
-; AVX1-NEXT:    leal (,%rsi,8), %edx
-; AVX1-NEXT:    subl %esi, %edx
-; AVX1-NEXT:    subl %edx, %ecx
-; AVX1-NEXT:    vmovd %ecx, %xmm2
-; AVX1-NEXT:    vpinsrd $1, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrd $2, %xmm0, %eax
-; AVX1-NEXT:    imulq $613566757, %rax, %rcx # imm = 0x24924925
-; AVX1-NEXT:    shrq $32, %rcx
-; AVX1-NEXT:    movl %eax, %edx
-; AVX1-NEXT:    subl %ecx, %edx
-; AVX1-NEXT:    shrl %edx
-; AVX1-NEXT:    addl %ecx, %edx
-; AVX1-NEXT:    shrl $2, %edx
-; AVX1-NEXT:    leal (,%rdx,8), %ecx
-; AVX1-NEXT:    subl %edx, %ecx
-; AVX1-NEXT:    subl %ecx, %eax
-; AVX1-NEXT:    vpinsrd $2, %eax, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrd $3, %xmm0, %eax
-; AVX1-NEXT:    imulq $613566757, %rax, %rcx # imm = 0x24924925
-; AVX1-NEXT:    shrq $32, %rcx
-; AVX1-NEXT:    movl %eax, %edx
-; AVX1-NEXT:    subl %ecx, %edx
-; AVX1-NEXT:    shrl %edx
-; AVX1-NEXT:    addl %ecx, %edx
-; AVX1-NEXT:    shrl $2, %edx
-; AVX1-NEXT:    leal (,%rdx,8), %ecx
-; AVX1-NEXT:    subl %edx, %ecx
-; AVX1-NEXT:    subl %ecx, %eax
-; AVX1-NEXT:    vpinsrd $3, %eax, %xmm2, %xmm0
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    vmovaps {{.*#+}} ymm1 = [613566757,613566757,613566757,613566757,613566757,613566757,613566757,613566757]
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm4[1,1,3,3]
+; AVX1-NEXT:    vpmuludq %xmm3, %xmm5, %xmm3
+; AVX1-NEXT:    vpmuludq %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7]
+; AVX1-NEXT:    vpsubd %xmm2, %xmm4, %xmm3
+; AVX1-NEXT:    vpsrld $1, %xmm3, %xmm3
+; AVX1-NEXT:    vpaddd %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpsrld $2, %xmm2, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [7,7,7,7]
+; AVX1-NEXT:    vpmulld %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpsubd %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm1[1,1,3,3]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
+; AVX1-NEXT:    vpmuludq %xmm4, %xmm5, %xmm4
+; AVX1-NEXT:    vpmuludq %xmm1, %xmm0, %xmm1
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm4[2,3],xmm1[4,5],xmm4[6,7]
+; AVX1-NEXT:    vpsubd %xmm1, %xmm0, %xmm4
+; AVX1-NEXT:    vpsrld $1, %xmm4, %xmm4
+; AVX1-NEXT:    vpaddd %xmm1, %xmm4, %xmm1
+; AVX1-NEXT:    vpsrld $2, %xmm1, %xmm1
+; AVX1-NEXT:    vpmulld %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: test_rem7_8i32:




More information about the llvm-commits mailing list