[llvm] r327446 - [DAGCombiner] Allow visitEXTRACT_SUBVECTOR to combine with BUILD_VECTORS between LegalizeVectorOps and LegalizeDAG.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Tue Mar 13 13:36:28 PDT 2018


Author: ctopper
Date: Tue Mar 13 13:36:28 2018
New Revision: 327446

URL: http://llvm.org/viewvc/llvm-project?rev=327446&view=rev
Log:
[DAGCombiner] Allow visitEXTRACT_SUBVECTOR to combine with BUILD_VECTORS between LegalizeVectorOps and LegalizeDAG.

BUILD_VECTORs aren't themselves legalized until LegalizeDAG so we should still be able to create an "illegal" one before that. This helps combine with BUILD_VECTORS that are introduced during LegalizeVectorOps due to unrolling.

Modified:
    llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
    llvm/trunk/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll
    llvm/trunk/test/CodeGen/X86/known-signbits-vector.ll
    llvm/trunk/test/CodeGen/X86/shrink_vmul.ll
    llvm/trunk/test/CodeGen/X86/subvector-broadcast.ll
    llvm/trunk/test/CodeGen/X86/vector-shift-ashr-256.ll
    llvm/trunk/test/CodeGen/X86/vector-shift-lshr-256.ll
    llvm/trunk/test/CodeGen/X86/vector-shift-shl-256.ll

Modified: llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp?rev=327446&r1=327445&r2=327446&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp Tue Mar 13 13:36:28 2018
@@ -15477,7 +15477,7 @@ SDValue DAGCombiner::visitEXTRACT_SUBVEC
         unsigned NumElems = ExtractSize / EltSize;
         EVT ExtractVT = EVT::getVectorVT(*DAG.getContext(),
                                          InVT.getVectorElementType(), NumElems);
-        if ((!LegalOperations ||
+        if ((Level < AfterLegalizeDAG ||
              TLI.isOperationLegal(ISD::BUILD_VECTOR, ExtractVT)) &&
             (!LegalTypes || TLI.isTypeLegal(ExtractVT))) {
           unsigned IdxVal = (Idx->getZExtValue() * NVT.getScalarSizeInBits()) /

Modified: llvm/trunk/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll?rev=327446&r1=327445&r2=327446&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll (original)
+++ llvm/trunk/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll Tue Mar 13 13:36:28 2018
@@ -1380,13 +1380,12 @@ define <16 x i32> @f16xi32_i128(<16 x i3
 define <4 x i64> @f4xi64_i128(<4 x i64> %a) {
 ; AVX-LABEL: f4xi64_i128:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vmovdqa {{.*#+}} ymm1 = [0,0,1,0,0,0,1,0]
-; AVX-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX-NEXT:    vpaddq %xmm2, %xmm3, %xmm2
-; AVX-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX-NEXT:    vandps %ymm1, %ymm0, %ymm0
+; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,0,1,0]
+; AVX-NEXT:    vpaddq %xmm2, %xmm1, %xmm1
+; AVX-NEXT:    vpaddq %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX-NEXT:    vandps {{\.LCPI.*}}, %ymm0, %ymm0
 ; AVX-NEXT:    retl
 ;
 ; ALL32-LABEL: f4xi64_i128:
@@ -1424,16 +1423,16 @@ define <4 x i64> @f4xi64_i128(<4 x i64>
 define <8 x i64> @f8xi64_i128(<8 x i64> %a) {
 ; AVX-LABEL: f8xi64_i128:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,0,1,0,0,0,1,0]
-; AVX-NEXT:    vextractf128 $1, %ymm2, %xmm3
-; AVX-NEXT:    vextractf128 $1, %ymm1, %xmm4
-; AVX-NEXT:    vpaddq %xmm3, %xmm4, %xmm4
-; AVX-NEXT:    vpaddq %xmm2, %xmm1, %xmm1
-; AVX-NEXT:    vinsertf128 $1, %xmm4, %ymm1, %ymm1
-; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm4
-; AVX-NEXT:    vpaddq %xmm3, %xmm4, %xmm3
-; AVX-NEXT:    vpaddq %xmm2, %xmm0, %xmm0
-; AVX-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; AVX-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX-NEXT:    vmovdqa {{.*#+}} xmm3 = [0,0,1,0]
+; AVX-NEXT:    vpaddq %xmm3, %xmm2, %xmm2
+; AVX-NEXT:    vpaddq %xmm3, %xmm1, %xmm1
+; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX-NEXT:    vpaddq %xmm3, %xmm2, %xmm2
+; AVX-NEXT:    vpaddq %xmm3, %xmm0, %xmm0
+; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX-NEXT:    vmovaps {{.*#+}} ymm2 = [0,0,1,0,0,0,1,0]
 ; AVX-NEXT:    vandps %ymm2, %ymm0, %ymm0
 ; AVX-NEXT:    vandps %ymm2, %ymm1, %ymm1
 ; AVX-NEXT:    retl
@@ -1499,16 +1498,17 @@ define <8 x i64> @f8xi64_i128(<8 x i64>
 define <8 x i64> @f8xi64_i256(<8 x i64> %a) {
 ; AVX-LABEL: f8xi64_i256:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,0,1,0,2,0,3,0]
-; AVX-NEXT:    vextractf128 $1, %ymm2, %xmm3
-; AVX-NEXT:    vextractf128 $1, %ymm1, %xmm4
-; AVX-NEXT:    vpaddq %xmm3, %xmm4, %xmm4
-; AVX-NEXT:    vpaddq %xmm2, %xmm1, %xmm1
-; AVX-NEXT:    vinsertf128 $1, %xmm4, %ymm1, %ymm1
-; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm4
-; AVX-NEXT:    vpaddq %xmm3, %xmm4, %xmm3
-; AVX-NEXT:    vpaddq %xmm2, %xmm0, %xmm0
-; AVX-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; AVX-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX-NEXT:    vmovdqa {{.*#+}} xmm3 = [2,0,3,0]
+; AVX-NEXT:    vpaddq %xmm3, %xmm2, %xmm2
+; AVX-NEXT:    vmovdqa {{.*#+}} xmm4 = [0,0,1,0]
+; AVX-NEXT:    vpaddq %xmm4, %xmm1, %xmm1
+; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX-NEXT:    vpaddq %xmm3, %xmm2, %xmm2
+; AVX-NEXT:    vpaddq %xmm4, %xmm0, %xmm0
+; AVX-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX-NEXT:    vmovaps {{.*#+}} ymm2 = [0,0,1,0,2,0,3,0]
 ; AVX-NEXT:    vandps %ymm2, %ymm0, %ymm0
 ; AVX-NEXT:    vandps %ymm2, %ymm1, %ymm1
 ; AVX-NEXT:    retl

Modified: llvm/trunk/test/CodeGen/X86/known-signbits-vector.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/known-signbits-vector.ll?rev=327446&r1=327445&r2=327446&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/known-signbits-vector.ll (original)
+++ llvm/trunk/test/CodeGen/X86/known-signbits-vector.ll Tue Mar 13 13:36:28 2018
@@ -28,19 +28,9 @@ define <4 x float> @signbits_sext_v4i64_
 ; X32-NEXT:    movsbl {{[0-9]+}}(%esp), %eax
 ; X32-NEXT:    movswl {{[0-9]+}}(%esp), %ecx
 ; X32-NEXT:    vmovd %eax, %xmm0
-; X32-NEXT:    sarl $31, %eax
-; X32-NEXT:    vpinsrd $1, %eax, %xmm0, %xmm0
 ; X32-NEXT:    vpinsrd $2, %ecx, %xmm0, %xmm0
-; X32-NEXT:    sarl $31, %ecx
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X32-NEXT:    vmovd %eax, %xmm1
-; X32-NEXT:    sarl $31, %eax
-; X32-NEXT:    vpinsrd $1, %eax, %xmm1, %xmm1
-; X32-NEXT:    vpinsrd $2, %edx, %xmm1, %xmm1
-; X32-NEXT:    sarl $31, %edx
-; X32-NEXT:    vpinsrd $3, %edx, %xmm1, %xmm1
-; X32-NEXT:    vpinsrd $3, %ecx, %xmm0, %xmm0
+; X32-NEXT:    vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X32-NEXT:    vpinsrd $2, {{[0-9]+}}(%esp), %xmm1, %xmm1
 ; X32-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
 ; X32-NEXT:    vcvtdq2ps %xmm0, %xmm0
 ; X32-NEXT:    retl
@@ -391,19 +381,17 @@ define <4 x float> @signbits_ashr_sext_s
 ; X32-NEXT:    movl %esp, %ebp
 ; X32-NEXT:    andl $-16, %esp
 ; X32-NEXT:    subl $16, %esp
-; X32-NEXT:    vmovdqa {{.*#+}} ymm3 = [33,0,63,0,33,0,63,0]
-; X32-NEXT:    vextractf128 $1, %ymm3, %xmm4
-; X32-NEXT:    vmovdqa {{.*#+}} xmm5 = [0,2147483648,0,2147483648]
-; X32-NEXT:    vpsrlq %xmm4, %xmm5, %xmm6
-; X32-NEXT:    vextractf128 $1, %ymm2, %xmm7
-; X32-NEXT:    vpsrlq %xmm4, %xmm7, %xmm4
-; X32-NEXT:    vpxor %xmm6, %xmm4, %xmm4
-; X32-NEXT:    vpsubq %xmm6, %xmm4, %xmm4
+; X32-NEXT:    vmovdqa {{.*#+}} xmm3 = [33,0,63,0]
+; X32-NEXT:    vmovdqa {{.*#+}} xmm4 = [0,2147483648,0,2147483648]
+; X32-NEXT:    vpsrlq %xmm3, %xmm4, %xmm4
+; X32-NEXT:    vextractf128 $1, %ymm2, %xmm5
 ; X32-NEXT:    vpsrlq %xmm3, %xmm5, %xmm5
+; X32-NEXT:    vpxor %xmm4, %xmm5, %xmm5
+; X32-NEXT:    vpsubq %xmm4, %xmm5, %xmm5
 ; X32-NEXT:    vpsrlq %xmm3, %xmm2, %xmm2
-; X32-NEXT:    vpxor %xmm5, %xmm2, %xmm2
-; X32-NEXT:    vpsubq %xmm5, %xmm2, %xmm2
-; X32-NEXT:    vinsertf128 $1, %xmm4, %ymm2, %ymm2
+; X32-NEXT:    vpxor %xmm4, %xmm2, %xmm2
+; X32-NEXT:    vpsubq %xmm4, %xmm2, %xmm2
+; X32-NEXT:    vinsertf128 $1, %xmm5, %ymm2, %ymm2
 ; X32-NEXT:    vpmovsxdq 8(%ebp), %xmm3
 ; X32-NEXT:    vpmovsxdq 16(%ebp), %xmm4
 ; X32-NEXT:    vinsertf128 $1, %xmm4, %ymm3, %ymm3

Modified: llvm/trunk/test/CodeGen/X86/shrink_vmul.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/shrink_vmul.ll?rev=327446&r1=327445&r2=327446&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/shrink_vmul.ll (original)
+++ llvm/trunk/test/CodeGen/X86/shrink_vmul.ll Tue Mar 13 13:36:28 2018
@@ -2227,67 +2227,89 @@ define void @PR34947() {
 ;
 ; X86-AVX1-LABEL: PR34947:
 ; X86-AVX1:       # %bb.0:
-; X86-AVX1-NEXT:    pushl %esi
+; X86-AVX1-NEXT:    pushl %ebp
 ; X86-AVX1-NEXT:    .cfi_def_cfa_offset 8
-; X86-AVX1-NEXT:    .cfi_offset %esi, -8
+; X86-AVX1-NEXT:    pushl %ebx
+; X86-AVX1-NEXT:    .cfi_def_cfa_offset 12
+; X86-AVX1-NEXT:    pushl %edi
+; X86-AVX1-NEXT:    .cfi_def_cfa_offset 16
+; X86-AVX1-NEXT:    pushl %esi
+; X86-AVX1-NEXT:    .cfi_def_cfa_offset 20
+; X86-AVX1-NEXT:    subl $16, %esp
+; X86-AVX1-NEXT:    .cfi_def_cfa_offset 36
+; X86-AVX1-NEXT:    .cfi_offset %esi, -20
+; X86-AVX1-NEXT:    .cfi_offset %edi, -16
+; X86-AVX1-NEXT:    .cfi_offset %ebx, -12
+; X86-AVX1-NEXT:    .cfi_offset %ebp, -8
 ; X86-AVX1-NEXT:    vmovdqa (%eax), %ymm0
-; X86-AVX1-NEXT:    vpextrd $1, %xmm0, %ecx
 ; X86-AVX1-NEXT:    xorl %eax, %eax
 ; X86-AVX1-NEXT:    xorl %edx, %edx
-; X86-AVX1-NEXT:    divl %ecx
-; X86-AVX1-NEXT:    movl %edx, %ecx
-; X86-AVX1-NEXT:    vmovd %xmm0, %esi
+; X86-AVX1-NEXT:    divl (%eax)
+; X86-AVX1-NEXT:    movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X86-AVX1-NEXT:    vpextrd $3, %xmm0, %ecx
 ; X86-AVX1-NEXT:    xorl %eax, %eax
 ; X86-AVX1-NEXT:    xorl %edx, %edx
-; X86-AVX1-NEXT:    divl %esi
-; X86-AVX1-NEXT:    vmovd %edx, %xmm1
-; X86-AVX1-NEXT:    vpinsrd $1, %ecx, %xmm1, %xmm1
+; X86-AVX1-NEXT:    divl %ecx
+; X86-AVX1-NEXT:    movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
 ; X86-AVX1-NEXT:    vpextrd $2, %xmm0, %ecx
 ; X86-AVX1-NEXT:    xorl %eax, %eax
 ; X86-AVX1-NEXT:    xorl %edx, %edx
 ; X86-AVX1-NEXT:    divl %ecx
-; X86-AVX1-NEXT:    vpinsrd $2, %edx, %xmm1, %xmm1
-; X86-AVX1-NEXT:    vpextrd $3, %xmm0, %ecx
+; X86-AVX1-NEXT:    movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X86-AVX1-NEXT:    vpextrd $1, %xmm0, %ecx
 ; X86-AVX1-NEXT:    xorl %eax, %eax
 ; X86-AVX1-NEXT:    xorl %edx, %edx
 ; X86-AVX1-NEXT:    divl %ecx
-; X86-AVX1-NEXT:    vpinsrd $3, %edx, %xmm1, %xmm1
+; X86-AVX1-NEXT:    movl %edx, (%esp) # 4-byte Spill
+; X86-AVX1-NEXT:    vmovd %xmm0, %ecx
+; X86-AVX1-NEXT:    xorl %eax, %eax
+; X86-AVX1-NEXT:    xorl %edx, %edx
+; X86-AVX1-NEXT:    divl %ecx
+; X86-AVX1-NEXT:    movl %edx, %ebp
 ; X86-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
-; X86-AVX1-NEXT:    vpextrd $1, %xmm0, %ecx
 ; X86-AVX1-NEXT:    xorl %eax, %eax
 ; X86-AVX1-NEXT:    xorl %edx, %edx
+; X86-AVX1-NEXT:    vpextrd $3, %xmm0, %ecx
 ; X86-AVX1-NEXT:    divl %ecx
 ; X86-AVX1-NEXT:    movl %edx, %ecx
-; X86-AVX1-NEXT:    vmovd %xmm0, %esi
 ; X86-AVX1-NEXT:    xorl %eax, %eax
 ; X86-AVX1-NEXT:    xorl %edx, %edx
+; X86-AVX1-NEXT:    vpextrd $2, %xmm0, %esi
 ; X86-AVX1-NEXT:    divl %esi
-; X86-AVX1-NEXT:    vmovd %edx, %xmm2
-; X86-AVX1-NEXT:    vpinsrd $1, %ecx, %xmm2, %xmm2
-; X86-AVX1-NEXT:    vpextrd $2, %xmm0, %ecx
+; X86-AVX1-NEXT:    movl %edx, %esi
 ; X86-AVX1-NEXT:    xorl %eax, %eax
 ; X86-AVX1-NEXT:    xorl %edx, %edx
-; X86-AVX1-NEXT:    divl %ecx
-; X86-AVX1-NEXT:    vpinsrd $2, %edx, %xmm2, %xmm2
-; X86-AVX1-NEXT:    vpextrd $3, %xmm0, %ecx
-; X86-AVX1-NEXT:    xorl %eax, %eax
-; X86-AVX1-NEXT:    xorl %edx, %edx
-; X86-AVX1-NEXT:    divl %ecx
-; X86-AVX1-NEXT:    vpinsrd $3, %edx, %xmm2, %xmm0
+; X86-AVX1-NEXT:    vpextrd $1, %xmm0, %edi
+; X86-AVX1-NEXT:    divl %edi
+; X86-AVX1-NEXT:    movl %edx, %edi
 ; X86-AVX1-NEXT:    xorl %eax, %eax
 ; X86-AVX1-NEXT:    xorl %edx, %edx
-; X86-AVX1-NEXT:    divl (%eax)
-; X86-AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [8199,8199,8199,8199]
-; X86-AVX1-NEXT:    vpmaddwd %xmm2, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vpmaddwd %xmm2, %xmm1, %xmm1
-; X86-AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; X86-AVX1-NEXT:    vmovd %edx, %xmm1
+; X86-AVX1-NEXT:    vmovd %xmm0, %ebx
+; X86-AVX1-NEXT:    divl %ebx
+; X86-AVX1-NEXT:    vmovd %edx, %xmm0
+; X86-AVX1-NEXT:    vpinsrd $1, %edi, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpinsrd $2, %esi, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpinsrd $3, %ecx, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vmovd %ebp, %xmm1
+; X86-AVX1-NEXT:    vpinsrd $1, (%esp), %xmm1, %xmm1 # 4-byte Folded Reload
+; X86-AVX1-NEXT:    vpinsrd $2, {{[0-9]+}}(%esp), %xmm1, %xmm1 # 4-byte Folded Reload
+; X86-AVX1-NEXT:    vpinsrd $3, {{[0-9]+}}(%esp), %xmm1, %xmm1 # 4-byte Folded Reload
+; X86-AVX1-NEXT:    vmovd {{[0-9]+}}(%esp), %xmm2 # 4-byte Folded Reload
+; X86-AVX1-NEXT:    # xmm2 = mem[0],zero,zero,zero
 ; X86-AVX1-NEXT:    movl $8199, %eax # imm = 0x2007
-; X86-AVX1-NEXT:    vmovd %eax, %xmm2
-; X86-AVX1-NEXT:    vpmulld %xmm2, %xmm1, %xmm1
+; X86-AVX1-NEXT:    vmovd %eax, %xmm3
+; X86-AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [8199,8199,8199,8199]
+; X86-AVX1-NEXT:    vpmaddwd %xmm4, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpmaddwd %xmm4, %xmm1, %xmm1
+; X86-AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; X86-AVX1-NEXT:    vpmulld %xmm3, %xmm2, %xmm1
 ; X86-AVX1-NEXT:    vmovd %xmm1, (%eax)
 ; X86-AVX1-NEXT:    vmovaps %ymm0, (%eax)
+; X86-AVX1-NEXT:    addl $16, %esp
 ; X86-AVX1-NEXT:    popl %esi
+; X86-AVX1-NEXT:    popl %edi
+; X86-AVX1-NEXT:    popl %ebx
+; X86-AVX1-NEXT:    popl %ebp
 ; X86-AVX1-NEXT:    vzeroupper
 ; X86-AVX1-NEXT:    retl
 ;
@@ -2399,63 +2421,77 @@ define void @PR34947() {
 ;
 ; X64-AVX1-LABEL: PR34947:
 ; X64-AVX1:       # %bb.0:
+; X64-AVX1-NEXT:    pushq %rbp
+; X64-AVX1-NEXT:    .cfi_def_cfa_offset 16
+; X64-AVX1-NEXT:    pushq %rbx
+; X64-AVX1-NEXT:    .cfi_def_cfa_offset 24
+; X64-AVX1-NEXT:    .cfi_offset %rbx, -24
+; X64-AVX1-NEXT:    .cfi_offset %rbp, -16
 ; X64-AVX1-NEXT:    vmovdqa (%rax), %ymm0
-; X64-AVX1-NEXT:    vpextrd $1, %xmm0, %ecx
 ; X64-AVX1-NEXT:    xorl %eax, %eax
 ; X64-AVX1-NEXT:    xorl %edx, %edx
-; X64-AVX1-NEXT:    divl %ecx
-; X64-AVX1-NEXT:    movl %edx, %ecx
-; X64-AVX1-NEXT:    vmovd %xmm0, %esi
+; X64-AVX1-NEXT:    divl (%rax)
+; X64-AVX1-NEXT:    movl %edx, %r8d
+; X64-AVX1-NEXT:    vpextrd $3, %xmm0, %ecx
 ; X64-AVX1-NEXT:    xorl %eax, %eax
 ; X64-AVX1-NEXT:    xorl %edx, %edx
-; X64-AVX1-NEXT:    divl %esi
-; X64-AVX1-NEXT:    vmovd %edx, %xmm1
-; X64-AVX1-NEXT:    vpinsrd $1, %ecx, %xmm1, %xmm1
+; X64-AVX1-NEXT:    divl %ecx
+; X64-AVX1-NEXT:    movl %edx, %r9d
 ; X64-AVX1-NEXT:    vpextrd $2, %xmm0, %ecx
 ; X64-AVX1-NEXT:    xorl %eax, %eax
 ; X64-AVX1-NEXT:    xorl %edx, %edx
 ; X64-AVX1-NEXT:    divl %ecx
-; X64-AVX1-NEXT:    vpinsrd $2, %edx, %xmm1, %xmm1
-; X64-AVX1-NEXT:    vpextrd $3, %xmm0, %ecx
+; X64-AVX1-NEXT:    movl %edx, %r10d
+; X64-AVX1-NEXT:    vpextrd $1, %xmm0, %ecx
 ; X64-AVX1-NEXT:    xorl %eax, %eax
 ; X64-AVX1-NEXT:    xorl %edx, %edx
 ; X64-AVX1-NEXT:    divl %ecx
-; X64-AVX1-NEXT:    vpinsrd $3, %edx, %xmm1, %xmm1
-; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
-; X64-AVX1-NEXT:    vpextrd $1, %xmm0, %ecx
+; X64-AVX1-NEXT:    movl %edx, %r11d
+; X64-AVX1-NEXT:    vmovd %xmm0, %ecx
 ; X64-AVX1-NEXT:    xorl %eax, %eax
 ; X64-AVX1-NEXT:    xorl %edx, %edx
 ; X64-AVX1-NEXT:    divl %ecx
-; X64-AVX1-NEXT:    movl %edx, %ecx
-; X64-AVX1-NEXT:    vmovd %xmm0, %esi
+; X64-AVX1-NEXT:    movl %edx, %esi
+; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; X64-AVX1-NEXT:    vpextrd $3, %xmm0, %ecx
 ; X64-AVX1-NEXT:    xorl %eax, %eax
 ; X64-AVX1-NEXT:    xorl %edx, %edx
-; X64-AVX1-NEXT:    divl %esi
-; X64-AVX1-NEXT:    vmovd %edx, %xmm2
-; X64-AVX1-NEXT:    vpinsrd $1, %ecx, %xmm2, %xmm2
+; X64-AVX1-NEXT:    divl %ecx
+; X64-AVX1-NEXT:    movl %edx, %edi
 ; X64-AVX1-NEXT:    vpextrd $2, %xmm0, %ecx
 ; X64-AVX1-NEXT:    xorl %eax, %eax
 ; X64-AVX1-NEXT:    xorl %edx, %edx
 ; X64-AVX1-NEXT:    divl %ecx
-; X64-AVX1-NEXT:    vpinsrd $2, %edx, %xmm2, %xmm2
-; X64-AVX1-NEXT:    vpextrd $3, %xmm0, %ecx
+; X64-AVX1-NEXT:    movl %edx, %ecx
+; X64-AVX1-NEXT:    vpextrd $1, %xmm0, %ebx
 ; X64-AVX1-NEXT:    xorl %eax, %eax
 ; X64-AVX1-NEXT:    xorl %edx, %edx
-; X64-AVX1-NEXT:    divl %ecx
-; X64-AVX1-NEXT:    vpinsrd $3, %edx, %xmm2, %xmm0
+; X64-AVX1-NEXT:    divl %ebx
+; X64-AVX1-NEXT:    movl %edx, %ebx
+; X64-AVX1-NEXT:    vmovd %xmm0, %ebp
 ; X64-AVX1-NEXT:    xorl %eax, %eax
 ; X64-AVX1-NEXT:    xorl %edx, %edx
-; X64-AVX1-NEXT:    divl (%rax)
-; X64-AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [8199,8199,8199,8199]
-; X64-AVX1-NEXT:    vpmaddwd %xmm2, %xmm0, %xmm0
-; X64-AVX1-NEXT:    vpmaddwd %xmm2, %xmm1, %xmm1
+; X64-AVX1-NEXT:    divl %ebp
+; X64-AVX1-NEXT:    vmovd %edx, %xmm0
+; X64-AVX1-NEXT:    vpinsrd $1, %ebx, %xmm0, %xmm0
+; X64-AVX1-NEXT:    vpinsrd $2, %ecx, %xmm0, %xmm0
+; X64-AVX1-NEXT:    vpinsrd $3, %edi, %xmm0, %xmm0
+; X64-AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [8199,8199,8199,8199]
+; X64-AVX1-NEXT:    vpmaddwd %xmm1, %xmm0, %xmm0
+; X64-AVX1-NEXT:    vmovd %esi, %xmm2
+; X64-AVX1-NEXT:    vpinsrd $1, %r11d, %xmm2, %xmm2
+; X64-AVX1-NEXT:    vpinsrd $2, %r10d, %xmm2, %xmm2
+; X64-AVX1-NEXT:    vpinsrd $3, %r9d, %xmm2, %xmm2
+; X64-AVX1-NEXT:    vpmaddwd %xmm1, %xmm2, %xmm1
 ; X64-AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; X64-AVX1-NEXT:    vmovd %edx, %xmm1
+; X64-AVX1-NEXT:    vmovd %r8d, %xmm1
 ; X64-AVX1-NEXT:    movl $8199, %eax # imm = 0x2007
 ; X64-AVX1-NEXT:    vmovd %eax, %xmm2
 ; X64-AVX1-NEXT:    vpmulld %xmm2, %xmm1, %xmm1
 ; X64-AVX1-NEXT:    vmovd %xmm1, (%rax)
 ; X64-AVX1-NEXT:    vmovaps %ymm0, (%rax)
+; X64-AVX1-NEXT:    popq %rbx
+; X64-AVX1-NEXT:    popq %rbp
 ; X64-AVX1-NEXT:    vzeroupper
 ; X64-AVX1-NEXT:    retq
 ;

Modified: llvm/trunk/test/CodeGen/X86/subvector-broadcast.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/subvector-broadcast.ll?rev=327446&r1=327445&r2=327446&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/subvector-broadcast.ll (original)
+++ llvm/trunk/test/CodeGen/X86/subvector-broadcast.ll Tue Mar 13 13:36:28 2018
@@ -916,21 +916,22 @@ define void @fallback_broadcast_v4i64_to
 ; X32-AVX1-LABEL: fallback_broadcast_v4i64_to_v8i64:
 ; X32-AVX1:       # %bb.0: # %entry
 ; X32-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; X32-AVX1-NEXT:    vmovdqa {{.*#+}} ymm4 = [1,0,2,0,3,0,4,0]
-; X32-AVX1-NEXT:    vextractf128 $1, %ymm4, %xmm5
-; X32-AVX1-NEXT:    vpaddq %xmm5, %xmm3, %xmm3
-; X32-AVX1-NEXT:    vpaddq %xmm4, %xmm0, %xmm0
+; X32-AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [3,0,4,0]
+; X32-AVX1-NEXT:    vpaddq %xmm4, %xmm3, %xmm3
+; X32-AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [1,0,2,0]
+; X32-AVX1-NEXT:    vpaddq %xmm5, %xmm0, %xmm0
 ; X32-AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm0
-; X32-AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm3
-; X32-AVX1-NEXT:    vpaddq %xmm5, %xmm3, %xmm3
-; X32-AVX1-NEXT:    vpaddq %xmm4, %xmm2, %xmm2
-; X32-AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
-; X32-AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; X32-AVX1-NEXT:    vpaddq %xmm5, %xmm3, %xmm3
-; X32-AVX1-NEXT:    vpaddq %xmm4, %xmm1, %xmm1
-; X32-AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm1, %ymm1
-; X32-AVX1-NEXT:    vandps %ymm4, %ymm1, %ymm1
-; X32-AVX1-NEXT:    vandps %ymm4, %ymm2, %ymm2
+; X32-AVX1-NEXT:    vmovaps {{.*#+}} ymm3 = [1,0,2,0,3,0,4,0]
+; X32-AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm6
+; X32-AVX1-NEXT:    vpaddq %xmm4, %xmm6, %xmm6
+; X32-AVX1-NEXT:    vpaddq %xmm5, %xmm2, %xmm2
+; X32-AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm2, %ymm2
+; X32-AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm6
+; X32-AVX1-NEXT:    vpaddq %xmm4, %xmm6, %xmm4
+; X32-AVX1-NEXT:    vpaddq %xmm5, %xmm1, %xmm1
+; X32-AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm1, %ymm1
+; X32-AVX1-NEXT:    vandps %ymm3, %ymm1, %ymm1
+; X32-AVX1-NEXT:    vandps %ymm3, %ymm2, %ymm2
 ; X32-AVX1-NEXT:    vmovups %ymm0, ga4
 ; X32-AVX1-NEXT:    vmovups %ymm2, gb4+32
 ; X32-AVX1-NEXT:    vmovups %ymm1, gb4

Modified: llvm/trunk/test/CodeGen/X86/vector-shift-ashr-256.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shift-ashr-256.ll?rev=327446&r1=327445&r2=327446&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shift-ashr-256.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shift-ashr-256.ll Tue Mar 13 13:36:28 2018
@@ -1183,29 +1183,29 @@ define <4 x i64> @constant_shift_v4i64(<
 ;
 ; X32-AVX1-LABEL: constant_shift_v4i64:
 ; X32-AVX1:       # %bb.0:
-; X32-AVX1-NEXT:    vmovdqa {{.*#+}} ymm1 = [1,0,7,0,31,0,62,0]
-; X32-AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; X32-AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [0,2147483648,0,2147483648]
-; X32-AVX1-NEXT:    vpsrlq %xmm2, %xmm3, %xmm4
-; X32-AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm2[2,3,0,1]
-; X32-AVX1-NEXT:    vpsrlq %xmm5, %xmm3, %xmm6
-; X32-AVX1-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm6[4,5,6,7]
-; X32-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm6
-; X32-AVX1-NEXT:    vpsrlq %xmm2, %xmm6, %xmm2
-; X32-AVX1-NEXT:    vpsrlq %xmm5, %xmm6, %xmm5
-; X32-AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm5[4,5,6,7]
-; X32-AVX1-NEXT:    vpxor %xmm4, %xmm2, %xmm2
-; X32-AVX1-NEXT:    vpsubq %xmm4, %xmm2, %xmm2
-; X32-AVX1-NEXT:    vpsrlq %xmm1, %xmm3, %xmm4
-; X32-AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm1[2,3,0,1]
-; X32-AVX1-NEXT:    vpsrlq %xmm5, %xmm3, %xmm3
-; X32-AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5,6,7]
-; X32-AVX1-NEXT:    vpsrlq %xmm1, %xmm0, %xmm1
-; X32-AVX1-NEXT:    vpsrlq %xmm5, %xmm0, %xmm0
-; X32-AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
-; X32-AVX1-NEXT:    vpxor %xmm3, %xmm0, %xmm0
-; X32-AVX1-NEXT:    vpsubq %xmm3, %xmm0, %xmm0
-; X32-AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X32-AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [31,0,62,0]
+; X32-AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,2147483648,0,2147483648]
+; X32-AVX1-NEXT:    vpsrlq %xmm1, %xmm2, %xmm3
+; X32-AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm1[2,3,0,1]
+; X32-AVX1-NEXT:    vpsrlq %xmm4, %xmm2, %xmm5
+; X32-AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm5[4,5,6,7]
+; X32-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
+; X32-AVX1-NEXT:    vpsrlq %xmm4, %xmm5, %xmm4
+; X32-AVX1-NEXT:    vpsrlq %xmm1, %xmm5, %xmm1
+; X32-AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm4[4,5,6,7]
+; X32-AVX1-NEXT:    vpxor %xmm3, %xmm1, %xmm1
+; X32-AVX1-NEXT:    vpsubq %xmm3, %xmm1, %xmm1
+; X32-AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [1,0,7,0]
+; X32-AVX1-NEXT:    vpsrlq %xmm3, %xmm2, %xmm4
+; X32-AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm3[2,3,0,1]
+; X32-AVX1-NEXT:    vpsrlq %xmm5, %xmm2, %xmm2
+; X32-AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm4[0,1,2,3],xmm2[4,5,6,7]
+; X32-AVX1-NEXT:    vpsrlq %xmm5, %xmm0, %xmm4
+; X32-AVX1-NEXT:    vpsrlq %xmm3, %xmm0, %xmm0
+; X32-AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm4[4,5,6,7]
+; X32-AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm0
+; X32-AVX1-NEXT:    vpsubq %xmm2, %xmm0, %xmm0
+; X32-AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; X32-AVX1-NEXT:    retl
 ;
 ; X32-AVX2-LABEL: constant_shift_v4i64:

Modified: llvm/trunk/test/CodeGen/X86/vector-shift-lshr-256.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shift-lshr-256.ll?rev=327446&r1=327445&r2=327446&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shift-lshr-256.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shift-lshr-256.ll Tue Mar 13 13:36:28 2018
@@ -930,18 +930,18 @@ define <4 x i64> @constant_shift_v4i64(<
 ;
 ; X32-AVX1-LABEL: constant_shift_v4i64:
 ; X32-AVX1:       # %bb.0:
-; X32-AVX1-NEXT:    vmovdqa {{.*#+}} ymm1 = [1,0,7,0,31,0,62,0]
-; X32-AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; X32-AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [31,0,62,0]
+; X32-AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
 ; X32-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; X32-AVX1-NEXT:    vpsrlq %xmm2, %xmm3, %xmm4
-; X32-AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
 ; X32-AVX1-NEXT:    vpsrlq %xmm2, %xmm3, %xmm2
-; X32-AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm4[0,1,2,3],xmm2[4,5,6,7]
-; X32-AVX1-NEXT:    vpsrlq %xmm1, %xmm0, %xmm3
-; X32-AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; X32-AVX1-NEXT:    vpsrlq %xmm1, %xmm0, %xmm0
-; X32-AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5,6,7]
-; X32-AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X32-AVX1-NEXT:    vpsrlq %xmm1, %xmm3, %xmm1
+; X32-AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
+; X32-AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [1,0,7,0]
+; X32-AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm2[2,3,0,1]
+; X32-AVX1-NEXT:    vpsrlq %xmm3, %xmm0, %xmm3
+; X32-AVX1-NEXT:    vpsrlq %xmm2, %xmm0, %xmm0
+; X32-AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7]
+; X32-AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; X32-AVX1-NEXT:    retl
 ;
 ; X32-AVX2-LABEL: constant_shift_v4i64:

Modified: llvm/trunk/test/CodeGen/X86/vector-shift-shl-256.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shift-shl-256.ll?rev=327446&r1=327445&r2=327446&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shift-shl-256.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shift-shl-256.ll Tue Mar 13 13:36:28 2018
@@ -857,18 +857,18 @@ define <4 x i64> @constant_shift_v4i64(<
 ;
 ; X32-AVX1-LABEL: constant_shift_v4i64:
 ; X32-AVX1:       # %bb.0:
-; X32-AVX1-NEXT:    vmovdqa {{.*#+}} ymm1 = [1,0,7,0,31,0,62,0]
-; X32-AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; X32-AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [31,0,62,0]
+; X32-AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
 ; X32-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; X32-AVX1-NEXT:    vpsllq %xmm2, %xmm3, %xmm4
-; X32-AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
 ; X32-AVX1-NEXT:    vpsllq %xmm2, %xmm3, %xmm2
-; X32-AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm4[0,1,2,3],xmm2[4,5,6,7]
-; X32-AVX1-NEXT:    vpsllq %xmm1, %xmm0, %xmm3
-; X32-AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; X32-AVX1-NEXT:    vpsllq %xmm1, %xmm0, %xmm0
-; X32-AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5,6,7]
-; X32-AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X32-AVX1-NEXT:    vpsllq %xmm1, %xmm3, %xmm1
+; X32-AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
+; X32-AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [1,0,7,0]
+; X32-AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm2[2,3,0,1]
+; X32-AVX1-NEXT:    vpsllq %xmm3, %xmm0, %xmm3
+; X32-AVX1-NEXT:    vpsllq %xmm2, %xmm0, %xmm0
+; X32-AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7]
+; X32-AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; X32-AVX1-NEXT:    retl
 ;
 ; X32-AVX2-LABEL: constant_shift_v4i64:




More information about the llvm-commits mailing list