[llvm] d6b68d1 - [X86][SSE] combineExtractWithShuffle - support zero-extending to allow extracting from narrow shuffle masks

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Fri Jan 29 06:27:56 PST 2021


Author: Simon Pilgrim
Date: 2021-01-29T14:22:10Z
New Revision: d6b68d134415300cad55220bfb80e4934217ba19

URL: https://github.com/llvm/llvm-project/commit/d6b68d134415300cad55220bfb80e4934217ba19
DIFF: https://github.com/llvm/llvm-project/commit/d6b68d134415300cad55220bfb80e4934217ba19.diff

LOG: [X86][SSE] combineExtractWithShuffle - support zero-extending to allow extracting from narrow shuffle masks

If the shuffle mask can't be widened to match the original extracted element width, see if the upper bits are zeroable - which allows us to extract+zero-extend the smaller extraction.

Added: 
    

Modified: 
    llvm/lib/Target/X86/X86ISelLowering.cpp
    llvm/test/CodeGen/X86/avg.ll
    llvm/test/CodeGen/X86/buildvec-insertvec.ll
    llvm/test/CodeGen/X86/promote-vec3.ll
    llvm/test/CodeGen/X86/shrink_vmul.ll
    llvm/test/CodeGen/X86/udiv_fix.ll
    llvm/test/CodeGen/X86/udiv_fix_sat.ll
    llvm/test/CodeGen/X86/vec-strict-inttofp-256.ll
    llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 00dbc7a35b87..e746e0255d22 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -40275,6 +40275,27 @@ static SDValue combineExtractWithShuffle(SDNode *N, SelectionDAG &DAG,
                        Idx);
   }
 
+  // We can only legally extract other elements from 128-bit vectors and in
+  // certain circumstances, depending on SSE-level.
+  // TODO: Investigate using extract_subvector for larger vectors.
+  // TODO: Investigate float/double extraction if it will be just stored.
+  auto GetLegalExtract = [&Subtarget, &DAG, &dl](SDValue Vec, EVT VecVT,
+                                                 unsigned Idx) {
+    if ((VecVT == MVT::v4i32 || VecVT == MVT::v2i64) &&
+        ((Idx == 0 && Subtarget.hasSSE2()) || Subtarget.hasSSE41())) {
+      return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VecVT.getScalarType(),
+                         DAG.getBitcast(VecVT, Vec),
+                         DAG.getIntPtrConstant(Idx, dl));
+    }
+    if ((VecVT == MVT::v8i16 && Subtarget.hasSSE2()) ||
+        (VecVT == MVT::v16i8 && Subtarget.hasSSE41())) {
+      unsigned OpCode = (VecVT == MVT::v8i16 ? X86ISD::PEXTRW : X86ISD::PEXTRB);
+      return DAG.getNode(OpCode, dl, MVT::i32, DAG.getBitcast(VecVT, Vec),
+                         DAG.getTargetConstant(Idx, dl, MVT::i8));
+    }
+    return SDValue();
+  };
+
   // Resolve the target shuffle inputs and mask.
   SmallVector<int, 16> Mask;
   SmallVector<SDValue, 2> Ops;
@@ -40296,7 +40317,7 @@ static SDValue combineExtractWithShuffle(SDNode *N, SelectionDAG &DAG,
       Mask = std::move(ScaledMask);
     } else if ((Mask.size() % NumSrcElts) == 0) {
       // Simplify Mask based on demanded element.
-      int ExtractIdx = (int)N->getConstantOperandVal(1);
+      int ExtractIdx = (int)IdxC.getZExtValue();
       int Scale = Mask.size() / NumSrcElts;
       int Lo = Scale * ExtractIdx;
       int Hi = Scale * (ExtractIdx + 1);
@@ -40313,44 +40334,38 @@ static SDValue combineExtractWithShuffle(SDNode *N, SelectionDAG &DAG,
     }
   }
 
-  // Check if narrowing/widening failed.
-  if (Mask.size() != NumSrcElts)
-    return SDValue();
-
-  int SrcIdx = Mask[IdxC.getZExtValue()];
+  // If narrowing/widening failed, see if we can extract+zero-extend.
+  int ExtractIdx;
+  EVT ExtractVT;
+  if (Mask.size() == NumSrcElts) {
+    ExtractIdx = Mask[IdxC.getZExtValue()];
+    ExtractVT = SrcVT;
+  } else {
+    unsigned Scale = Mask.size() / NumSrcElts;
+    if ((Mask.size() % NumSrcElts) != 0 || SrcVT.isFloatingPoint())
+      return SDValue();
+    unsigned ScaledIdx = Scale * IdxC.getZExtValue();
+    if (!isUndefOrZeroInRange(Mask, ScaledIdx + 1, Scale - 1))
+      return SDValue();
+    ExtractIdx = Mask[ScaledIdx];
+    EVT ExtractSVT = EVT::getIntegerVT(*DAG.getContext(), SrcEltBits / Scale);
+    ExtractVT = EVT::getVectorVT(*DAG.getContext(), ExtractSVT, Mask.size());
+    assert(SrcVT.getSizeInBits() == ExtractVT.getSizeInBits() &&
+           "Failed to widen vector type");
+  }
 
   // If the shuffle source element is undef/zero then we can just accept it.
-  if (SrcIdx == SM_SentinelUndef)
+  if (ExtractIdx == SM_SentinelUndef)
     return DAG.getUNDEF(VT);
 
-  if (SrcIdx == SM_SentinelZero)
+  if (ExtractIdx == SM_SentinelZero)
     return VT.isFloatingPoint() ? DAG.getConstantFP(0.0, dl, VT)
                                 : DAG.getConstant(0, dl, VT);
 
-  SDValue SrcOp = Ops[SrcIdx / Mask.size()];
-  SrcIdx = SrcIdx % Mask.size();
-
-  // We can only extract other elements from 128-bit vectors and in certain
-  // circumstances, depending on SSE-level.
-  // TODO: Investigate using extract_subvector for larger vectors.
-  // TODO: Investigate float/double extraction if it will be just stored.
-  if ((SrcVT == MVT::v4i32 || SrcVT == MVT::v2i64) &&
-      ((SrcIdx == 0 && Subtarget.hasSSE2()) || Subtarget.hasSSE41())) {
-    assert(SrcSVT == VT && "Unexpected extraction type");
-    SrcOp = DAG.getBitcast(SrcVT, SrcOp);
-    return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SrcSVT, SrcOp,
-                       DAG.getIntPtrConstant(SrcIdx, dl));
-  }
-
-  if ((SrcVT == MVT::v8i16 && Subtarget.hasSSE2()) ||
-      (SrcVT == MVT::v16i8 && Subtarget.hasSSE41())) {
-    assert(VT.getSizeInBits() >= SrcEltBits && "Unexpected extraction type");
-    unsigned OpCode = (SrcVT == MVT::v8i16 ? X86ISD::PEXTRW : X86ISD::PEXTRB);
-    SrcOp = DAG.getBitcast(SrcVT, SrcOp);
-    SDValue ExtOp = DAG.getNode(OpCode, dl, MVT::i32, SrcOp,
-                                DAG.getTargetConstant(SrcIdx, dl, MVT::i8));
-    return DAG.getZExtOrTrunc(ExtOp, dl, VT);
-  }
+  SDValue SrcOp = Ops[ExtractIdx / Mask.size()];
+  ExtractIdx = ExtractIdx % Mask.size();
+  if (SDValue V = GetLegalExtract(SrcOp, ExtractVT, ExtractIdx))
+    return DAG.getZExtOrTrunc(V, dl, VT);
 
   return SDValue();
 }

diff  --git a/llvm/test/CodeGen/X86/avg.ll b/llvm/test/CodeGen/X86/avg.ll
index 23fa7e033db9..9866cf5c0e1d 100644
--- a/llvm/test/CodeGen/X86/avg.ll
+++ b/llvm/test/CodeGen/X86/avg.ll
@@ -1919,123 +1919,102 @@ define void @not_avg_v16i8_wide_constants(<16 x i8>* %a, <16 x i8>* %b) nounwind
 ; AVX1-NEXT:    pushq %r13
 ; AVX1-NEXT:    pushq %r12
 ; AVX1-NEXT:    pushq %rbx
-; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
+; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
+; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
 ; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
 ; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
-; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm9 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
-; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm5 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
-; AVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; AVX1-NEXT:    vpunpckhdq {{.*#+}} xmm5 = xmm5[2],xmm3[2],xmm5[3],xmm3[3]
-; AVX1-NEXT:    vmovq %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX1-NEXT:    vpextrq $1, %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
-; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm6 = xmm5[0],zero,xmm5[1],zero
-; AVX1-NEXT:    vmovq %xmm6, %rcx
-; AVX1-NEXT:    vpextrq $1, %xmm6, %r11
-; AVX1-NEXT:    vpunpckhdq {{.*#+}} xmm5 = xmm5[2],xmm3[2],xmm5[3],xmm3[3]
-; AVX1-NEXT:    vmovq %xmm5, %r14
-; AVX1-NEXT:    vpextrq $1, %xmm5, %r15
-; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm5 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero
-; AVX1-NEXT:    vpunpckhdq {{.*#+}} xmm5 = xmm5[2],xmm3[2],xmm5[3],xmm3[3]
-; AVX1-NEXT:    vmovq %xmm5, %r10
-; AVX1-NEXT:    vpextrq $1, %xmm5, %r12
-; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
-; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm6 = xmm5[0],zero,xmm5[1],zero
-; AVX1-NEXT:    vmovq %xmm6, %r8
-; AVX1-NEXT:    vpextrq $1, %xmm6, %r9
-; AVX1-NEXT:    vpunpckhdq {{.*#+}} xmm5 = xmm5[2],xmm3[2],xmm5[3],xmm3[3]
-; AVX1-NEXT:    vmovq %xmm5, %rsi
-; AVX1-NEXT:    vpextrq $1, %xmm5, %rdi
-; AVX1-NEXT:    vpmovzxwq {{.*#+}} xmm8 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
-; AVX1-NEXT:    vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
-; AVX1-NEXT:    vpextrq $1, %xmm4, %rax
-; AVX1-NEXT:    vmovq %xmm4, %rbx
-; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm4 = xmm9[0],zero,xmm9[1],zero,xmm9[2],zero,xmm9[3],zero
-; AVX1-NEXT:    vpunpckhdq {{.*#+}} xmm4 = xmm4[2],xmm3[2],xmm4[3],xmm3[3]
-; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm6 = xmm9[4],xmm3[4],xmm9[5],xmm3[5],xmm9[6],xmm3[6],xmm9[7],xmm3[7]
-; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm5 = xmm6[0],zero,xmm6[1],zero
-; AVX1-NEXT:    vpunpckhdq {{.*#+}} xmm6 = xmm6[2],xmm3[2],xmm6[3],xmm3[3]
-; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm7 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; AVX1-NEXT:    vpunpckhdq {{.*#+}} xmm7 = xmm7[2],xmm3[2],xmm7[3],xmm3[3]
-; AVX1-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
-; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm0 = xmm2[0],zero,xmm2[1],zero
-; AVX1-NEXT:    vpunpckhdq {{.*#+}} xmm2 = xmm2[2],xmm3[2],xmm2[3],xmm3[3]
-; AVX1-NEXT:    vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
-; AVX1-NEXT:    vpextrq $1, %xmm1, %rdx
-; AVX1-NEXT:    addq %rax, %rdx
-; AVX1-NEXT:    vmovq %xmm1, %rax
-; AVX1-NEXT:    addq %rbx, %rax
-; AVX1-NEXT:    vpextrq $1, %xmm2, %rbx
-; AVX1-NEXT:    leaq -1(%rdi,%rbx), %rdi
-; AVX1-NEXT:    vmovq %xmm2, %rbx
-; AVX1-NEXT:    leaq -1(%rsi,%rbx), %rsi
-; AVX1-NEXT:    vpextrq $1, %xmm0, %rbx
-; AVX1-NEXT:    leaq -1(%r9,%rbx), %r9
-; AVX1-NEXT:    vmovq %xmm0, %rbx
-; AVX1-NEXT:    leaq -1(%r8,%rbx), %r8
-; AVX1-NEXT:    vpextrq $1, %xmm7, %rbx
-; AVX1-NEXT:    leaq -1(%r12,%rbx), %rbx
-; AVX1-NEXT:    vmovq %xmm7, %rbp
-; AVX1-NEXT:    leaq -1(%r10,%rbp), %r10
-; AVX1-NEXT:    vpextrq $1, %xmm6, %rbp
-; AVX1-NEXT:    leaq -1(%r15,%rbp), %r13
-; AVX1-NEXT:    vmovq %xmm6, %rbp
-; AVX1-NEXT:    leaq -1(%r14,%rbp), %r12
-; AVX1-NEXT:    vpextrq $1, %xmm5, %rbp
-; AVX1-NEXT:    leaq -1(%r11,%rbp), %r14
-; AVX1-NEXT:    vmovq %xmm5, %rbp
-; AVX1-NEXT:    leaq -1(%rcx,%rbp), %r15
-; AVX1-NEXT:    vpextrq $1, %xmm4, %rcx
-; AVX1-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload
-; AVX1-NEXT:    leaq -1(%rbp,%rcx), %r11
-; AVX1-NEXT:    vmovq %xmm4, %rcx
-; AVX1-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload
-; AVX1-NEXT:    leaq -1(%rbp,%rcx), %rcx
-; AVX1-NEXT:    movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX1-NEXT:    vpextrq $1, %xmm8, %rcx
-; AVX1-NEXT:    vpmovzxwq {{.*#+}} xmm0 = xmm9[0],zero,zero,zero,xmm9[1],zero,zero,zero
-; AVX1-NEXT:    vpextrq $1, %xmm0, %rbp
-; AVX1-NEXT:    leaq -1(%rcx,%rbp), %rcx
-; AVX1-NEXT:    movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX1-NEXT:    vmovq %xmm8, %rcx
-; AVX1-NEXT:    vmovq %xmm0, %rbp
-; AVX1-NEXT:    leaq -1(%rcx,%rbp), %rcx
-; AVX1-NEXT:    movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX1-NEXT:    xorl %ebp, %ebp
-; AVX1-NEXT:    addq $-1, %rdx
-; AVX1-NEXT:    movl $0, %ecx
-; AVX1-NEXT:    adcq $-1, %rcx
+; AVX1-NEXT:    vpextrw $4, %xmm0, %eax
+; AVX1-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT:    vpextrw $5, %xmm0, %eax
+; AVX1-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT:    vpextrw $6, %xmm0, %eax
+; AVX1-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT:    vpextrw $7, %xmm0, %r11d
+; AVX1-NEXT:    vpextrw $0, %xmm3, %r14d
+; AVX1-NEXT:    vpextrw $1, %xmm3, %r15d
+; AVX1-NEXT:    vpextrw $2, %xmm3, %r10d
+; AVX1-NEXT:    vpextrw $3, %xmm3, %r9d
+; AVX1-NEXT:    vpextrw $4, %xmm3, %r8d
+; AVX1-NEXT:    vpextrw $5, %xmm3, %ebx
+; AVX1-NEXT:    vpextrw $6, %xmm3, %ebp
+; AVX1-NEXT:    vpextrw $7, %xmm3, %edi
+; AVX1-NEXT:    vpextrw $1, %xmm0, %edx
+; AVX1-NEXT:    vpextrw $0, %xmm0, %esi
+; AVX1-NEXT:    vpextrw $1, %xmm1, %ecx
+; AVX1-NEXT:    addq %rdx, %rcx
+; AVX1-NEXT:    vpextrw $0, %xmm1, %eax
+; AVX1-NEXT:    addq %rsi, %rax
+; AVX1-NEXT:    vpextrw $7, %xmm2, %edx
+; AVX1-NEXT:    leaq -1(%rdi,%rdx), %rdi
+; AVX1-NEXT:    vpextrw $6, %xmm2, %edx
+; AVX1-NEXT:    leaq -1(%rbp,%rdx), %rbp
+; AVX1-NEXT:    vpextrw $5, %xmm2, %edx
+; AVX1-NEXT:    leaq -1(%rbx,%rdx), %rbx
+; AVX1-NEXT:    vpextrw $4, %xmm2, %edx
+; AVX1-NEXT:    leaq -1(%r8,%rdx), %r8
+; AVX1-NEXT:    vpextrw $3, %xmm2, %edx
+; AVX1-NEXT:    leaq -1(%r9,%rdx), %r9
+; AVX1-NEXT:    vpextrw $2, %xmm2, %edx
+; AVX1-NEXT:    leaq -1(%r10,%rdx), %r10
+; AVX1-NEXT:    vpextrw $1, %xmm2, %edx
+; AVX1-NEXT:    leaq -1(%r15,%rdx), %r13
+; AVX1-NEXT:    vpextrw $0, %xmm2, %edx
+; AVX1-NEXT:    leaq -1(%r14,%rdx), %r12
+; AVX1-NEXT:    vpextrw $7, %xmm1, %edx
+; AVX1-NEXT:    leaq -1(%r11,%rdx), %r15
+; AVX1-NEXT:    vpextrw $6, %xmm1, %edx
+; AVX1-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
+; AVX1-NEXT:    leaq -1(%rsi,%rdx), %r14
+; AVX1-NEXT:    vpextrw $5, %xmm1, %edx
+; AVX1-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
+; AVX1-NEXT:    leaq -1(%rsi,%rdx), %r11
+; AVX1-NEXT:    vpextrw $4, %xmm1, %edx
+; AVX1-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
+; AVX1-NEXT:    leaq -1(%rsi,%rdx), %rdx
+; AVX1-NEXT:    movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT:    vpextrw $3, %xmm0, %edx
+; AVX1-NEXT:    vpextrw $3, %xmm1, %esi
+; AVX1-NEXT:    leaq -1(%rdx,%rsi), %rdx
+; AVX1-NEXT:    movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT:    vpextrw $2, %xmm0, %edx
+; AVX1-NEXT:    vpextrw $2, %xmm1, %esi
+; AVX1-NEXT:    leaq -1(%rdx,%rsi), %rdx
+; AVX1-NEXT:    movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX1-NEXT:    xorl %edx, %edx
+; AVX1-NEXT:    addq $-1, %rcx
+; AVX1-NEXT:    movl $0, %esi
+; AVX1-NEXT:    adcq $-1, %rsi
 ; AVX1-NEXT:    addq $-1, %rax
-; AVX1-NEXT:    adcq $-1, %rbp
-; AVX1-NEXT:    shldq $63, %rax, %rbp
-; AVX1-NEXT:    shldq $63, %rdx, %rcx
+; AVX1-NEXT:    adcq $-1, %rdx
+; AVX1-NEXT:    shldq $63, %rax, %rdx
+; AVX1-NEXT:    shldq $63, %rcx, %rsi
 ; AVX1-NEXT:    shrq %rdi
 ; AVX1-NEXT:    vmovq %rdi, %xmm8
-; AVX1-NEXT:    shrq %rsi
-; AVX1-NEXT:    vmovq %rsi, %xmm9
-; AVX1-NEXT:    shrq %r9
-; AVX1-NEXT:    vmovq %r9, %xmm0
+; AVX1-NEXT:    shrq %rbp
+; AVX1-NEXT:    vmovq %rbp, %xmm9
+; AVX1-NEXT:    shrq %rbx
+; AVX1-NEXT:    vmovq %rbx, %xmm0
 ; AVX1-NEXT:    shrq %r8
 ; AVX1-NEXT:    vmovq %r8, %xmm1
-; AVX1-NEXT:    vmovq %rcx, %xmm12
-; AVX1-NEXT:    vmovq %rbp, %xmm13
-; AVX1-NEXT:    shrq %rbx
-; AVX1-NEXT:    vmovq %rbx, %xmm14
+; AVX1-NEXT:    shrq %r9
+; AVX1-NEXT:    vmovq %r9, %xmm12
 ; AVX1-NEXT:    shrq %r10
-; AVX1-NEXT:    vmovq %r10, %xmm15
+; AVX1-NEXT:    vmovq %r10, %xmm13
 ; AVX1-NEXT:    shrq %r13
-; AVX1-NEXT:    vmovq %r13, %xmm10
+; AVX1-NEXT:    vmovq %r13, %xmm14
 ; AVX1-NEXT:    shrq %r12
-; AVX1-NEXT:    vmovq %r12, %xmm11
-; AVX1-NEXT:    shrq %r14
-; AVX1-NEXT:    vmovq %r14, %xmm2
+; AVX1-NEXT:    vmovq %r12, %xmm15
 ; AVX1-NEXT:    shrq %r15
-; AVX1-NEXT:    vmovq %r15, %xmm3
+; AVX1-NEXT:    vmovq %r15, %xmm10
+; AVX1-NEXT:    shrq %r14
+; AVX1-NEXT:    vmovq %r14, %xmm11
 ; AVX1-NEXT:    shrq %r11
-; AVX1-NEXT:    vmovq %r11, %xmm4
+; AVX1-NEXT:    vmovq %r11, %xmm2
 ; AVX1-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
 ; AVX1-NEXT:    shrq %rax
-; AVX1-NEXT:    vmovq %rax, %xmm5
+; AVX1-NEXT:    vmovq %rax, %xmm3
+; AVX1-NEXT:    vmovq %rsi, %xmm4
+; AVX1-NEXT:    vmovq %rdx, %xmm5
 ; AVX1-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
 ; AVX1-NEXT:    shrq %rax
 ; AVX1-NEXT:    vmovq %rax, %xmm6
@@ -2044,26 +2023,26 @@ define void @not_avg_v16i8_wide_constants(<16 x i8>* %a, <16 x i8>* %b) nounwind
 ; AVX1-NEXT:    vmovq %rax, %xmm7
 ; AVX1-NEXT:    vpunpcklbw {{.*#+}} xmm8 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3],xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7]
 ; AVX1-NEXT:    vpunpcklbw {{.*#+}} xmm9 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; AVX1-NEXT:    vpsllq $48, %xmm8, %xmm8
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm9[0,0,1,1]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm8 = xmm0[0,1,2],xmm8[3],xmm0[4,5,6,7]
+; AVX1-NEXT:    vpslldq {{.*#+}} xmm8 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm8[0,1]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm9[0,0,0,0]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm8 = xmm0[0,1,2,3,4,5,6],xmm8[7]
 ; AVX1-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3],xmm13[4],xmm12[4],xmm13[5],xmm12[5],xmm13[6],xmm12[6],xmm13[7],xmm12[7]
-; AVX1-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm15[0],xmm14[0],xmm15[1],xmm14[1],xmm15[2],xmm14[2],xmm15[3],xmm14[3],xmm15[4],xmm14[4],xmm15[5],xmm14[5],xmm15[6],xmm14[6],xmm15[7],xmm14[7]
-; AVX1-NEXT:    vpslld $16, %xmm1, %xmm1
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3,4,5,6,7]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm8[2,3],xmm0[4,5,6,7]
+; AVX1-NEXT:    vpunpcklbw {{.*#+}} xmm9 = xmm15[0],xmm14[0],xmm15[1],xmm14[1],xmm15[2],xmm14[2],xmm15[3],xmm14[3],xmm15[4],xmm14[4],xmm15[5],xmm14[5],xmm15[6],xmm14[6],xmm15[7],xmm14[7]
+; AVX1-NEXT:    vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm9[0,1,0,1]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4],xmm0[5],xmm1[6,7]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm8[6,7]
 ; AVX1-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm11[0],xmm10[0],xmm11[1],xmm10[1],xmm11[2],xmm10[2],xmm11[3],xmm10[3],xmm11[4],xmm10[4],xmm11[5],xmm10[5],xmm11[6],xmm10[6],xmm11[7],xmm10[7]
 ; AVX1-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
-; AVX1-NEXT:    vpslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3,4,5,6],xmm1[7]
+; AVX1-NEXT:    vpsllq $48, %xmm1, %xmm1
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,0,1,1]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3],xmm2[4,5,6,7]
 ; AVX1-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3],xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
-; AVX1-NEXT:    vpslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0,1,2,3,4,5]
 ; AVX1-NEXT:    vpunpcklbw {{.*#+}} xmm3 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3],xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,1,0,1]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3,4],xmm2[5],xmm3[6,7]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3,4,5],xmm1[6,7]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; AVX1-NEXT:    vpslld $16, %xmm3, %xmm3
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3,4,5,6,7]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3],xmm2[4,5,6,7]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
 ; AVX1-NEXT:    vmovdqu %xmm0, (%rax)
 ; AVX1-NEXT:    popq %rbx
 ; AVX1-NEXT:    popq %r12

diff  --git a/llvm/test/CodeGen/X86/buildvec-insertvec.ll b/llvm/test/CodeGen/X86/buildvec-insertvec.ll
index abe1fa0d3b5d..e6b1595a8be2 100644
--- a/llvm/test/CodeGen/X86/buildvec-insertvec.ll
+++ b/llvm/test/CodeGen/X86/buildvec-insertvec.ll
@@ -802,9 +802,9 @@ define i32 @PR46586(i8* %p, <4 x i32> %v) {
 ;
 ; SSE41-LABEL: PR46586:
 ; SSE41:       # %bb.0:
-; SSE41-NEXT:    pmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; SSE41-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; SSE41-NEXT:    extractps $3, %xmm0, %ecx
-; SSE41-NEXT:    pextrd $3, %xmm1, %eax
+; SSE41-NEXT:    pextrb $3, %xmm1, %eax
 ; SSE41-NEXT:    xorl %edx, %edx
 ; SSE41-NEXT:    divl %ecx
 ; SSE41-NEXT:    movl %edx, %eax
@@ -812,9 +812,9 @@ define i32 @PR46586(i8* %p, <4 x i32> %v) {
 ;
 ; AVX-LABEL: PR46586:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; AVX-NEXT:    vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; AVX-NEXT:    vextractps $3, %xmm0, %ecx
-; AVX-NEXT:    vpextrd $3, %xmm1, %eax
+; AVX-NEXT:    vpextrb $3, %xmm1, %eax
 ; AVX-NEXT:    xorl %edx, %edx
 ; AVX-NEXT:    divl %ecx
 ; AVX-NEXT:    movl %edx, %eax

diff  --git a/llvm/test/CodeGen/X86/promote-vec3.ll b/llvm/test/CodeGen/X86/promote-vec3.ll
index 19eda22b5b76..984db8b1f665 100644
--- a/llvm/test/CodeGen/X86/promote-vec3.ll
+++ b/llvm/test/CodeGen/X86/promote-vec3.ll
@@ -18,13 +18,11 @@ define <3 x i16> @zext_i8(<3 x i8>) {
 ;
 ; SSE41-LABEL: zext_i8:
 ; SSE41:       # %bb.0:
+; SSE41-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
+; SSE41-NEXT:    movzbl {{[0-9]+}}(%esp), %edx
 ; SSE41-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; SSE41-NEXT:    pinsrb $1, {{[0-9]+}}(%esp), %xmm0
-; SSE41-NEXT:    pinsrb $2, {{[0-9]+}}(%esp), %xmm0
 ; SSE41-NEXT:    pmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 ; SSE41-NEXT:    movd %xmm0, %eax
-; SSE41-NEXT:    pextrw $1, %xmm0, %edx
-; SSE41-NEXT:    pextrw $2, %xmm0, %ecx
 ; SSE41-NEXT:    # kill: def $ax killed $ax killed $eax
 ; SSE41-NEXT:    # kill: def $dx killed $dx killed $edx
 ; SSE41-NEXT:    # kill: def $cx killed $cx killed $ecx
@@ -32,13 +30,11 @@ define <3 x i16> @zext_i8(<3 x i8>) {
 ;
 ; AVX-32-LABEL: zext_i8:
 ; AVX-32:       # %bb.0:
+; AVX-32-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
+; AVX-32-NEXT:    movzbl {{[0-9]+}}(%esp), %edx
 ; AVX-32-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; AVX-32-NEXT:    vpinsrb $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
-; AVX-32-NEXT:    vpinsrb $2, {{[0-9]+}}(%esp), %xmm0, %xmm0
 ; AVX-32-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 ; AVX-32-NEXT:    vmovd %xmm0, %eax
-; AVX-32-NEXT:    vpextrw $1, %xmm0, %edx
-; AVX-32-NEXT:    vpextrw $2, %xmm0, %ecx
 ; AVX-32-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX-32-NEXT:    # kill: def $dx killed $dx killed $edx
 ; AVX-32-NEXT:    # kill: def $cx killed $cx killed $ecx
@@ -46,15 +42,13 @@ define <3 x i16> @zext_i8(<3 x i8>) {
 ;
 ; AVX-64-LABEL: zext_i8:
 ; AVX-64:       # %bb.0:
+; AVX-64-NEXT:    movzbl %sil, %esi
 ; AVX-64-NEXT:    vmovd %edi, %xmm0
-; AVX-64-NEXT:    vpinsrb $1, %esi, %xmm0, %xmm0
-; AVX-64-NEXT:    vpinsrb $2, %edx, %xmm0, %xmm0
 ; AVX-64-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX-64-NEXT:    movzbl %dl, %ecx
 ; AVX-64-NEXT:    vmovd %xmm0, %eax
-; AVX-64-NEXT:    vpextrw $1, %xmm0, %edx
-; AVX-64-NEXT:    vpextrw $2, %xmm0, %ecx
 ; AVX-64-NEXT:    # kill: def $ax killed $ax killed $eax
-; AVX-64-NEXT:    # kill: def $dx killed $dx killed $edx
+; AVX-64-NEXT:    movl %esi, %edx
 ; AVX-64-NEXT:    # kill: def $cx killed $cx killed $ecx
 ; AVX-64-NEXT:    retq
   %2 = zext <3 x i8> %0 to <3 x i16>

diff  --git a/llvm/test/CodeGen/X86/shrink_vmul.ll b/llvm/test/CodeGen/X86/shrink_vmul.ll
index e09b4ce81e7c..e6660f9e4957 100644
--- a/llvm/test/CodeGen/X86/shrink_vmul.ll
+++ b/llvm/test/CodeGen/X86/shrink_vmul.ll
@@ -2073,100 +2073,108 @@ entry:
 define void @PR34947(<9 x i16>* %p0, <9 x i32>* %p1) nounwind {
 ; X86-SSE-LABEL: PR34947:
 ; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    pushl %ebp
+; X86-SSE-NEXT:    pushl %ebx
+; X86-SSE-NEXT:    pushl %edi
 ; X86-SSE-NEXT:    pushl %esi
+; X86-SSE-NEXT:    pushl %eax
 ; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-SSE-NEXT:    movdqa (%eax), %xmm4
-; X86-SSE-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-SSE-NEXT:    movdqa (%ecx), %xmm1
-; X86-SSE-NEXT:    movdqa 16(%ecx), %xmm5
-; X86-SSE-NEXT:    pxor %xmm3, %xmm3
-; X86-SSE-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
-; X86-SSE-NEXT:    movdqa %xmm4, %xmm2
-; X86-SSE-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
-; X86-SSE-NEXT:    movdqa %xmm4, %xmm6
-; X86-SSE-NEXT:    punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm3[4],xmm6[5],xmm3[5],xmm6[6],xmm3[6],xmm6[7],xmm3[7]
-; X86-SSE-NEXT:    movdqa %xmm4, %xmm3
-; X86-SSE-NEXT:    psrldq {{.*#+}} xmm3 = xmm3[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X86-SSE-NEXT:    movd %xmm3, %eax
-; X86-SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm5[3,3,3,3]
-; X86-SSE-NEXT:    movd %xmm3, %esi
+; X86-SSE-NEXT:    movzwl 16(%eax), %edx
+; X86-SSE-NEXT:    movl %edx, (%esp) # 4-byte Spill
+; X86-SSE-NEXT:    movdqa (%eax), %xmm3
+; X86-SSE-NEXT:    movdqa (%ecx), %xmm0
+; X86-SSE-NEXT:    movdqa 16(%ecx), %xmm1
+; X86-SSE-NEXT:    pxor %xmm4, %xmm4
+; X86-SSE-NEXT:    movdqa %xmm3, %xmm2
+; X86-SSE-NEXT:    pextrw $7, %xmm3, %eax
+; X86-SSE-NEXT:    pextrw $4, %xmm3, %esi
+; X86-SSE-NEXT:    pextrw $0, %xmm3, %edi
+; X86-SSE-NEXT:    pextrw $1, %xmm3, %ebx
+; X86-SSE-NEXT:    pextrw $3, %xmm3, %ebp
+; X86-SSE-NEXT:    movdqa %xmm3, %xmm5
+; X86-SSE-NEXT:    punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
+; X86-SSE-NEXT:    punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
+; X86-SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm1[3,3,3,3]
+; X86-SSE-NEXT:    movd %xmm3, %ecx
 ; X86-SSE-NEXT:    xorl %edx, %edx
-; X86-SSE-NEXT:    divl %esi
+; X86-SSE-NEXT:    divl %ecx
 ; X86-SSE-NEXT:    movd %edx, %xmm3
-; X86-SSE-NEXT:    pshufd {{.*#+}} xmm7 = xmm6[2,3,2,3]
-; X86-SSE-NEXT:    movd %xmm7, %eax
-; X86-SSE-NEXT:    pshufd {{.*#+}} xmm7 = xmm5[2,3,2,3]
-; X86-SSE-NEXT:    movd %xmm7, %esi
+; X86-SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm2[2,3,2,3]
+; X86-SSE-NEXT:    movd %xmm4, %eax
+; X86-SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm1[2,3,2,3]
+; X86-SSE-NEXT:    movd %xmm4, %ecx
 ; X86-SSE-NEXT:    xorl %edx, %edx
-; X86-SSE-NEXT:    divl %esi
-; X86-SSE-NEXT:    movd %edx, %xmm7
-; X86-SSE-NEXT:    punpckldq {{.*#+}} xmm7 = xmm7[0],xmm3[0],xmm7[1],xmm3[1]
-; X86-SSE-NEXT:    movd %xmm6, %eax
-; X86-SSE-NEXT:    movd %xmm5, %esi
+; X86-SSE-NEXT:    divl %ecx
+; X86-SSE-NEXT:    movd %edx, %xmm4
+; X86-SSE-NEXT:    punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
+; X86-SSE-NEXT:    movd %xmm1, %ecx
+; X86-SSE-NEXT:    movl %esi, %eax
 ; X86-SSE-NEXT:    xorl %edx, %edx
-; X86-SSE-NEXT:    divl %esi
+; X86-SSE-NEXT:    divl %ecx
 ; X86-SSE-NEXT:    movd %edx, %xmm3
-; X86-SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[1,1,1,1]
-; X86-SSE-NEXT:    movd %xmm6, %eax
-; X86-SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[1,1,1,1]
-; X86-SSE-NEXT:    movd %xmm5, %esi
+; X86-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,1,1]
+; X86-SSE-NEXT:    movd %xmm2, %eax
+; X86-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
+; X86-SSE-NEXT:    movd %xmm1, %ecx
 ; X86-SSE-NEXT:    xorl %edx, %edx
-; X86-SSE-NEXT:    divl %esi
-; X86-SSE-NEXT:    movd %edx, %xmm5
-; X86-SSE-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1]
-; X86-SSE-NEXT:    punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm7[0]
-; X86-SSE-NEXT:    movdqa %xmm4, %xmm5
-; X86-SSE-NEXT:    psrld $16, %xmm5
-; X86-SSE-NEXT:    movd %xmm5, %eax
-; X86-SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm1[1,1,1,1]
-; X86-SSE-NEXT:    movd %xmm5, %esi
+; X86-SSE-NEXT:    divl %ecx
+; X86-SSE-NEXT:    movd %edx, %xmm1
+; X86-SSE-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
+; X86-SSE-NEXT:    punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm4[0]
+; X86-SSE-NEXT:    movd %xmm0, %ecx
+; X86-SSE-NEXT:    movl %edi, %eax
 ; X86-SSE-NEXT:    xorl %edx, %edx
-; X86-SSE-NEXT:    divl %esi
-; X86-SSE-NEXT:    movd %edx, %xmm6
-; X86-SSE-NEXT:    movd %xmm2, %eax
-; X86-SSE-NEXT:    movd %xmm1, %esi
+; X86-SSE-NEXT:    divl %ecx
+; X86-SSE-NEXT:    movd %edx, %xmm1
+; X86-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,1,1]
+; X86-SSE-NEXT:    movd %xmm2, %ecx
+; X86-SSE-NEXT:    movl %ebx, %eax
 ; X86-SSE-NEXT:    xorl %edx, %edx
-; X86-SSE-NEXT:    divl %esi
-; X86-SSE-NEXT:    movd %edx, %xmm5
-; X86-SSE-NEXT:    punpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
-; X86-SSE-NEXT:    psrlq $48, %xmm4
-; X86-SSE-NEXT:    movd %xmm4, %eax
-; X86-SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm1[3,3,3,3]
-; X86-SSE-NEXT:    movd %xmm4, %esi
+; X86-SSE-NEXT:    divl %ecx
+; X86-SSE-NEXT:    movd %edx, %xmm2
+; X86-SSE-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; X86-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[3,3,3,3]
+; X86-SSE-NEXT:    movd %xmm2, %ecx
+; X86-SSE-NEXT:    movl %ebp, %eax
 ; X86-SSE-NEXT:    xorl %edx, %edx
-; X86-SSE-NEXT:    divl %esi
-; X86-SSE-NEXT:    movd %edx, %xmm4
-; X86-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
-; X86-SSE-NEXT:    movd %xmm2, %eax
-; X86-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; X86-SSE-NEXT:    movd %xmm1, %esi
+; X86-SSE-NEXT:    divl %ecx
+; X86-SSE-NEXT:    movd %edx, %xmm2
+; X86-SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm5[2,3,2,3]
+; X86-SSE-NEXT:    movd %xmm4, %eax
+; X86-SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; X86-SSE-NEXT:    movd %xmm0, %ecx
 ; X86-SSE-NEXT:    xorl %edx, %edx
-; X86-SSE-NEXT:    divl %esi
-; X86-SSE-NEXT:    movd %edx, %xmm1
-; X86-SSE-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
-; X86-SSE-NEXT:    punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm1[0]
-; X86-SSE-NEXT:    movd %xmm0, %eax
+; X86-SSE-NEXT:    divl %ecx
+; X86-SSE-NEXT:    movd %edx, %xmm0
+; X86-SSE-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; X86-SSE-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; X86-SSE-NEXT:    movl (%esp), %eax # 4-byte Reload
 ; X86-SSE-NEXT:    xorl %edx, %edx
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-SSE-NEXT:    divl 32(%ecx)
 ; X86-SSE-NEXT:    movdqa {{.*#+}} xmm0 = [8199,8199,8199,8199]
-; X86-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm5[1,1,3,3]
-; X86-SSE-NEXT:    pmuludq %xmm0, %xmm5
-; X86-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm5[0,2,2,3]
+; X86-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
 ; X86-SSE-NEXT:    pmuludq %xmm0, %xmm1
 ; X86-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; X86-SSE-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; X86-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm3[1,1,3,3]
+; X86-SSE-NEXT:    pmuludq %xmm0, %xmm2
+; X86-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; X86-SSE-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; X86-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm3[1,1,3,3]
 ; X86-SSE-NEXT:    pmuludq %xmm0, %xmm3
 ; X86-SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
-; X86-SSE-NEXT:    pmuludq %xmm0, %xmm1
-; X86-SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
+; X86-SSE-NEXT:    pmuludq %xmm0, %xmm2
+; X86-SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
 ; X86-SSE-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
 ; X86-SSE-NEXT:    imull $8199, %edx, %eax # imm = 0x2007
 ; X86-SSE-NEXT:    movl %eax, (%eax)
 ; X86-SSE-NEXT:    movdqa %xmm3, (%eax)
-; X86-SSE-NEXT:    movdqa %xmm2, (%eax)
+; X86-SSE-NEXT:    movdqa %xmm1, (%eax)
+; X86-SSE-NEXT:    addl $4, %esp
 ; X86-SSE-NEXT:    popl %esi
+; X86-SSE-NEXT:    popl %edi
+; X86-SSE-NEXT:    popl %ebx
+; X86-SSE-NEXT:    popl %ebp
 ; X86-SSE-NEXT:    retl
 ;
 ; X86-AVX1-LABEL: PR34947:
@@ -2318,96 +2326,94 @@ define void @PR34947(<9 x i16>* %p0, <9 x i32>* %p1) nounwind {
 ;
 ; X64-SSE-LABEL: PR34947:
 ; X64-SSE:       # %bb.0:
-; X64-SSE-NEXT:    movdqa (%rdi), %xmm4
-; X64-SSE-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X64-SSE-NEXT:    movdqa (%rsi), %xmm1
-; X64-SSE-NEXT:    movdqa 16(%rsi), %xmm5
-; X64-SSE-NEXT:    pxor %xmm3, %xmm3
-; X64-SSE-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
-; X64-SSE-NEXT:    movdqa %xmm4, %xmm2
-; X64-SSE-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
-; X64-SSE-NEXT:    movdqa %xmm4, %xmm6
-; X64-SSE-NEXT:    punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm3[4],xmm6[5],xmm3[5],xmm6[6],xmm3[6],xmm6[7],xmm3[7]
-; X64-SSE-NEXT:    movdqa %xmm4, %xmm3
-; X64-SSE-NEXT:    psrldq {{.*#+}} xmm3 = xmm3[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X64-SSE-NEXT:    movd %xmm3, %eax
-; X64-SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm5[3,3,3,3]
-; X64-SSE-NEXT:    movd %xmm3, %ecx
+; X64-SSE-NEXT:    movzwl 16(%rdi), %r8d
+; X64-SSE-NEXT:    movdqa (%rdi), %xmm3
+; X64-SSE-NEXT:    movdqa (%rsi), %xmm0
+; X64-SSE-NEXT:    movdqa 16(%rsi), %xmm1
+; X64-SSE-NEXT:    pxor %xmm4, %xmm4
+; X64-SSE-NEXT:    movdqa %xmm3, %xmm2
+; X64-SSE-NEXT:    pextrw $7, %xmm3, %eax
+; X64-SSE-NEXT:    pextrw $4, %xmm3, %r9d
+; X64-SSE-NEXT:    pextrw $0, %xmm3, %r10d
+; X64-SSE-NEXT:    pextrw $1, %xmm3, %r11d
+; X64-SSE-NEXT:    pextrw $3, %xmm3, %ecx
+; X64-SSE-NEXT:    movdqa %xmm3, %xmm5
+; X64-SSE-NEXT:    punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
+; X64-SSE-NEXT:    punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
+; X64-SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm1[3,3,3,3]
+; X64-SSE-NEXT:    movd %xmm3, %edi
 ; X64-SSE-NEXT:    xorl %edx, %edx
-; X64-SSE-NEXT:    divl %ecx
+; X64-SSE-NEXT:    divl %edi
 ; X64-SSE-NEXT:    movd %edx, %xmm3
-; X64-SSE-NEXT:    pshufd {{.*#+}} xmm7 = xmm6[2,3,2,3]
-; X64-SSE-NEXT:    movd %xmm7, %eax
-; X64-SSE-NEXT:    pshufd {{.*#+}} xmm7 = xmm5[2,3,2,3]
-; X64-SSE-NEXT:    movd %xmm7, %ecx
+; X64-SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm2[2,3,2,3]
+; X64-SSE-NEXT:    movd %xmm4, %eax
+; X64-SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm1[2,3,2,3]
+; X64-SSE-NEXT:    movd %xmm4, %edi
 ; X64-SSE-NEXT:    xorl %edx, %edx
-; X64-SSE-NEXT:    divl %ecx
-; X64-SSE-NEXT:    movd %edx, %xmm7
-; X64-SSE-NEXT:    punpckldq {{.*#+}} xmm7 = xmm7[0],xmm3[0],xmm7[1],xmm3[1]
-; X64-SSE-NEXT:    movd %xmm6, %eax
-; X64-SSE-NEXT:    movd %xmm5, %ecx
+; X64-SSE-NEXT:    divl %edi
+; X64-SSE-NEXT:    movd %edx, %xmm4
+; X64-SSE-NEXT:    punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
+; X64-SSE-NEXT:    movd %xmm1, %edi
+; X64-SSE-NEXT:    movl %r9d, %eax
 ; X64-SSE-NEXT:    xorl %edx, %edx
-; X64-SSE-NEXT:    divl %ecx
+; X64-SSE-NEXT:    divl %edi
 ; X64-SSE-NEXT:    movd %edx, %xmm3
-; X64-SSE-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[1,1,1,1]
-; X64-SSE-NEXT:    movd %xmm6, %eax
-; X64-SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[1,1,1,1]
-; X64-SSE-NEXT:    movd %xmm5, %ecx
+; X64-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,1,1]
+; X64-SSE-NEXT:    movd %xmm2, %eax
+; X64-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
+; X64-SSE-NEXT:    movd %xmm1, %edi
 ; X64-SSE-NEXT:    xorl %edx, %edx
-; X64-SSE-NEXT:    divl %ecx
-; X64-SSE-NEXT:    movd %edx, %xmm5
-; X64-SSE-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1]
-; X64-SSE-NEXT:    punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm7[0]
-; X64-SSE-NEXT:    movdqa %xmm4, %xmm5
-; X64-SSE-NEXT:    psrld $16, %xmm5
-; X64-SSE-NEXT:    movd %xmm5, %eax
-; X64-SSE-NEXT:    pshufd {{.*#+}} xmm5 = xmm1[1,1,1,1]
-; X64-SSE-NEXT:    movd %xmm5, %ecx
+; X64-SSE-NEXT:    divl %edi
+; X64-SSE-NEXT:    movd %edx, %xmm1
+; X64-SSE-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
+; X64-SSE-NEXT:    punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm4[0]
+; X64-SSE-NEXT:    movd %xmm0, %edi
+; X64-SSE-NEXT:    movl %r10d, %eax
 ; X64-SSE-NEXT:    xorl %edx, %edx
-; X64-SSE-NEXT:    divl %ecx
-; X64-SSE-NEXT:    movd %edx, %xmm6
-; X64-SSE-NEXT:    movd %xmm2, %eax
-; X64-SSE-NEXT:    movd %xmm1, %ecx
+; X64-SSE-NEXT:    divl %edi
+; X64-SSE-NEXT:    movd %edx, %xmm1
+; X64-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,1,1]
+; X64-SSE-NEXT:    movd %xmm2, %edi
+; X64-SSE-NEXT:    movl %r11d, %eax
 ; X64-SSE-NEXT:    xorl %edx, %edx
-; X64-SSE-NEXT:    divl %ecx
-; X64-SSE-NEXT:    movd %edx, %xmm5
-; X64-SSE-NEXT:    punpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
-; X64-SSE-NEXT:    psrlq $48, %xmm4
-; X64-SSE-NEXT:    movd %xmm4, %eax
-; X64-SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm1[3,3,3,3]
-; X64-SSE-NEXT:    movd %xmm4, %ecx
+; X64-SSE-NEXT:    divl %edi
+; X64-SSE-NEXT:    movd %edx, %xmm2
+; X64-SSE-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; X64-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[3,3,3,3]
+; X64-SSE-NEXT:    movd %xmm2, %edi
+; X64-SSE-NEXT:    movl %ecx, %eax
 ; X64-SSE-NEXT:    xorl %edx, %edx
-; X64-SSE-NEXT:    divl %ecx
-; X64-SSE-NEXT:    movd %edx, %xmm4
-; X64-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
-; X64-SSE-NEXT:    movd %xmm2, %eax
-; X64-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; X64-SSE-NEXT:    movd %xmm1, %ecx
+; X64-SSE-NEXT:    divl %edi
+; X64-SSE-NEXT:    movd %edx, %xmm2
+; X64-SSE-NEXT:    pshufd {{.*#+}} xmm4 = xmm5[2,3,2,3]
+; X64-SSE-NEXT:    movd %xmm4, %eax
+; X64-SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; X64-SSE-NEXT:    movd %xmm0, %ecx
 ; X64-SSE-NEXT:    xorl %edx, %edx
 ; X64-SSE-NEXT:    divl %ecx
-; X64-SSE-NEXT:    movd %edx, %xmm1
-; X64-SSE-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
-; X64-SSE-NEXT:    punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm1[0]
-; X64-SSE-NEXT:    movd %xmm0, %eax
+; X64-SSE-NEXT:    movd %edx, %xmm0
+; X64-SSE-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; X64-SSE-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; X64-SSE-NEXT:    movl %r8d, %eax
 ; X64-SSE-NEXT:    xorl %edx, %edx
 ; X64-SSE-NEXT:    divl 32(%rsi)
 ; X64-SSE-NEXT:    movdqa {{.*#+}} xmm0 = [8199,8199,8199,8199]
-; X64-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm5[1,1,3,3]
-; X64-SSE-NEXT:    pmuludq %xmm0, %xmm5
-; X64-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm5[0,2,2,3]
+; X64-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
 ; X64-SSE-NEXT:    pmuludq %xmm0, %xmm1
 ; X64-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; X64-SSE-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; X64-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm3[1,1,3,3]
+; X64-SSE-NEXT:    pmuludq %xmm0, %xmm2
+; X64-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; X64-SSE-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; X64-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm3[1,1,3,3]
 ; X64-SSE-NEXT:    pmuludq %xmm0, %xmm3
 ; X64-SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
-; X64-SSE-NEXT:    pmuludq %xmm0, %xmm1
-; X64-SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
+; X64-SSE-NEXT:    pmuludq %xmm0, %xmm2
+; X64-SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
 ; X64-SSE-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
 ; X64-SSE-NEXT:    imull $8199, %edx, %eax # imm = 0x2007
 ; X64-SSE-NEXT:    movl %eax, (%rax)
 ; X64-SSE-NEXT:    movdqa %xmm3, (%rax)
-; X64-SSE-NEXT:    movdqa %xmm2, (%rax)
+; X64-SSE-NEXT:    movdqa %xmm1, (%rax)
 ; X64-SSE-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: PR34947:

diff  --git a/llvm/test/CodeGen/X86/udiv_fix.ll b/llvm/test/CodeGen/X86/udiv_fix.ll
index 1eef8c78cc5c..a6b7e5b70d8a 100644
--- a/llvm/test/CodeGen/X86/udiv_fix.ll
+++ b/llvm/test/CodeGen/X86/udiv_fix.ll
@@ -258,10 +258,9 @@ define <4 x i32> @vec(<4 x i32> %x, <4 x i32> %y) nounwind {
 ; X64-NEXT:    movq %rax, %xmm4
 ; X64-NEXT:    punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm4[0]
 ; X64-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; X64-NEXT:    movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3]
-; X64-NEXT:    movq %xmm2, %rcx
 ; X64-NEXT:    psllq $31, %xmm0
 ; X64-NEXT:    movq %xmm0, %rax
+; X64-NEXT:    movd %xmm1, %ecx
 ; X64-NEXT:    xorl %edx, %edx
 ; X64-NEXT:    divq %rcx
 ; X64-NEXT:    movq %rax, %xmm2

diff  --git a/llvm/test/CodeGen/X86/udiv_fix_sat.ll b/llvm/test/CodeGen/X86/udiv_fix_sat.ll
index 0f295a0f7c2a..67b0ea303848 100644
--- a/llvm/test/CodeGen/X86/udiv_fix_sat.ll
+++ b/llvm/test/CodeGen/X86/udiv_fix_sat.ll
@@ -317,12 +317,12 @@ define i16 @func7(i16 %x, i16 %y) nounwind {
 define <4 x i32> @vec(<4 x i32> %x, <4 x i32> %y) nounwind {
 ; X64-LABEL: vec:
 ; X64:       # %bb.0:
-; X64-NEXT:    pxor %xmm3, %xmm3
+; X64-NEXT:    pxor %xmm8, %xmm8
 ; X64-NEXT:    movdqa %xmm1, %xmm2
-; X64-NEXT:    punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm3[2],xmm2[3],xmm3[3]
+; X64-NEXT:    punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm8[2],xmm2[3],xmm8[3]
 ; X64-NEXT:    movq %xmm2, %rcx
 ; X64-NEXT:    movdqa %xmm0, %xmm2
-; X64-NEXT:    punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm3[2],xmm2[3],xmm3[3]
+; X64-NEXT:    punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm8[2],xmm2[3],xmm8[3]
 ; X64-NEXT:    paddq %xmm2, %xmm2
 ; X64-NEXT:    psllq $31, %xmm2
 ; X64-NEXT:    movq %xmm2, %rax
@@ -338,16 +338,16 @@ define <4 x i32> @vec(<4 x i32> %x, <4 x i32> %y) nounwind {
 ; X64-NEXT:    divq %rcx
 ; X64-NEXT:    movq %rax, %xmm2
 ; X64-NEXT:    punpcklqdq {{.*#+}} xmm7 = xmm7[0],xmm2[0]
-; X64-NEXT:    movdqa {{.*#+}} xmm4 = [9223372039002259456,9223372039002259456]
+; X64-NEXT:    movdqa {{.*#+}} xmm3 = [9223372039002259456,9223372039002259456]
 ; X64-NEXT:    movdqa %xmm7, %xmm2
-; X64-NEXT:    pxor %xmm4, %xmm2
-; X64-NEXT:    movdqa {{.*#+}} xmm8 = [9223372043297226751,9223372043297226751]
-; X64-NEXT:    movdqa %xmm8, %xmm6
+; X64-NEXT:    pxor %xmm3, %xmm2
+; X64-NEXT:    movdqa {{.*#+}} xmm9 = [9223372043297226751,9223372043297226751]
+; X64-NEXT:    movdqa %xmm9, %xmm6
 ; X64-NEXT:    pcmpgtd %xmm2, %xmm6
-; X64-NEXT:    pshufd {{.*#+}} xmm9 = xmm6[0,0,2,2]
-; X64-NEXT:    pcmpeqd %xmm8, %xmm2
+; X64-NEXT:    pshufd {{.*#+}} xmm4 = xmm6[0,0,2,2]
+; X64-NEXT:    pcmpeqd %xmm9, %xmm2
 ; X64-NEXT:    pshufd {{.*#+}} xmm5 = xmm2[1,1,3,3]
-; X64-NEXT:    pand %xmm9, %xmm5
+; X64-NEXT:    pand %xmm4, %xmm5
 ; X64-NEXT:    pshufd {{.*#+}} xmm2 = xmm6[1,1,3,3]
 ; X64-NEXT:    por %xmm5, %xmm2
 ; X64-NEXT:    movdqa {{.*#+}} xmm6 = [8589934591,8589934591]
@@ -355,15 +355,14 @@ define <4 x i32> @vec(<4 x i32> %x, <4 x i32> %y) nounwind {
 ; X64-NEXT:    pandn %xmm6, %xmm2
 ; X64-NEXT:    por %xmm7, %xmm2
 ; X64-NEXT:    psrlq $1, %xmm2
-; X64-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
-; X64-NEXT:    movss {{.*#+}} xmm3 = xmm1[0],xmm3[1,2,3]
-; X64-NEXT:    movq %xmm3, %rcx
+; X64-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1]
 ; X64-NEXT:    paddq %xmm0, %xmm0
 ; X64-NEXT:    psllq $31, %xmm0
 ; X64-NEXT:    movq %xmm0, %rax
+; X64-NEXT:    movd %xmm1, %ecx
 ; X64-NEXT:    xorl %edx, %edx
 ; X64-NEXT:    divq %rcx
-; X64-NEXT:    movq %rax, %xmm3
+; X64-NEXT:    movq %rax, %xmm4
 ; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
 ; X64-NEXT:    movq %xmm0, %rax
 ; X64-NEXT:    psrlq $32, %xmm1
@@ -371,19 +370,19 @@ define <4 x i32> @vec(<4 x i32> %x, <4 x i32> %y) nounwind {
 ; X64-NEXT:    xorl %edx, %edx
 ; X64-NEXT:    divq %rcx
 ; X64-NEXT:    movq %rax, %xmm0
-; X64-NEXT:    punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0]
-; X64-NEXT:    pxor %xmm3, %xmm4
-; X64-NEXT:    movdqa %xmm8, %xmm0
-; X64-NEXT:    pcmpgtd %xmm4, %xmm0
+; X64-NEXT:    punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm0[0]
+; X64-NEXT:    pxor %xmm4, %xmm3
+; X64-NEXT:    movdqa %xmm9, %xmm0
+; X64-NEXT:    pcmpgtd %xmm3, %xmm0
 ; X64-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; X64-NEXT:    pcmpeqd %xmm8, %xmm4
-; X64-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
-; X64-NEXT:    pand %xmm1, %xmm4
+; X64-NEXT:    pcmpeqd %xmm9, %xmm3
+; X64-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; X64-NEXT:    pand %xmm1, %xmm3
 ; X64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; X64-NEXT:    por %xmm4, %xmm0
-; X64-NEXT:    pand %xmm0, %xmm3
-; X64-NEXT:    pandn %xmm6, %xmm0
 ; X64-NEXT:    por %xmm3, %xmm0
+; X64-NEXT:    pand %xmm0, %xmm4
+; X64-NEXT:    pandn %xmm6, %xmm0
+; X64-NEXT:    por %xmm4, %xmm0
 ; X64-NEXT:    psrlq $1, %xmm0
 ; X64-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
 ; X64-NEXT:    retq

diff  --git a/llvm/test/CodeGen/X86/vec-strict-inttofp-256.ll b/llvm/test/CodeGen/X86/vec-strict-inttofp-256.ll
index f0cb46e63d8f..daf9d6a9e020 100644
--- a/llvm/test/CodeGen/X86/vec-strict-inttofp-256.ll
+++ b/llvm/test/CodeGen/X86/vec-strict-inttofp-256.ll
@@ -815,16 +815,14 @@ define <4 x double> @uitofp_v4i64_v4f64(<4 x i64> %x) #0 {
 ; AVX1-64-NEXT:    vunpcklpd {{.*#+}} xmm1 = xmm1[0],xmm3[0]
 ; AVX1-64-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
 ; AVX1-64-NEXT:    vextractf128 $1, %ymm0, %xmm2
-; AVX1-64-NEXT:    vpsrlq $32, %xmm2, %xmm2
-; AVX1-64-NEXT:    vpextrq $1, %xmm2, %rax
+; AVX1-64-NEXT:    vextractps $3, %xmm2, %eax
 ; AVX1-64-NEXT:    vcvtsi2sd %rax, %xmm4, %xmm3
-; AVX1-64-NEXT:    vmovq %xmm2, %rax
+; AVX1-64-NEXT:    vextractps $1, %xmm2, %eax
 ; AVX1-64-NEXT:    vcvtsi2sd %rax, %xmm4, %xmm2
 ; AVX1-64-NEXT:    vunpcklpd {{.*#+}} xmm2 = xmm2[0],xmm3[0]
-; AVX1-64-NEXT:    vpsrlq $32, %xmm0, %xmm0
-; AVX1-64-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX1-64-NEXT:    vpextrd $3, %xmm0, %eax
 ; AVX1-64-NEXT:    vcvtsi2sd %rax, %xmm4, %xmm3
-; AVX1-64-NEXT:    vmovq %xmm0, %rax
+; AVX1-64-NEXT:    vpextrd $1, %xmm0, %eax
 ; AVX1-64-NEXT:    vcvtsi2sd %rax, %xmm4, %xmm0
 ; AVX1-64-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm3[0]
 ; AVX1-64-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
@@ -834,19 +832,17 @@ define <4 x double> @uitofp_v4i64_v4f64(<4 x i64> %x) #0 {
 ;
 ; AVX2-64-LABEL: uitofp_v4i64_v4f64:
 ; AVX2-64:       # %bb.0:
-; AVX2-64-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; AVX2-64-NEXT:    vpsrlq $32, %xmm1, %xmm1
-; AVX2-64-NEXT:    vpextrq $1, %xmm1, %rax
+; AVX2-64-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX2-64-NEXT:    vextractps $3, %xmm1, %eax
 ; AVX2-64-NEXT:    vcvtsi2sd %rax, %xmm2, %xmm2
-; AVX2-64-NEXT:    vmovq %xmm1, %rax
+; AVX2-64-NEXT:    vextractps $1, %xmm1, %eax
 ; AVX2-64-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm1
 ; AVX2-64-NEXT:    vunpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; AVX2-64-NEXT:    vpsrlq $32, %xmm0, %xmm2
-; AVX2-64-NEXT:    vpextrq $1, %xmm2, %rax
+; AVX2-64-NEXT:    vextractps $3, %xmm0, %eax
+; AVX2-64-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm2
+; AVX2-64-NEXT:    vextractps $1, %xmm0, %eax
 ; AVX2-64-NEXT:    vcvtsi2sd %rax, %xmm3, %xmm3
-; AVX2-64-NEXT:    vmovq %xmm2, %rax
-; AVX2-64-NEXT:    vcvtsi2sd %rax, %xmm4, %xmm2
-; AVX2-64-NEXT:    vunpcklpd {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; AVX2-64-NEXT:    vunpcklpd {{.*#+}} xmm2 = xmm3[0],xmm2[0]
 ; AVX2-64-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
 ; AVX2-64-NEXT:    vbroadcastsd {{.*#+}} ymm2 = [4.294967296E+9,4.294967296E+9,4.294967296E+9,4.294967296E+9]
 ; AVX2-64-NEXT:    vmulpd %ymm2, %ymm1, %ymm1

diff  --git a/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll b/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll
index bb42a4c6102d..0b5d5913f38c 100644
--- a/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll
@@ -7556,16 +7556,14 @@ define <4 x double> @constrained_vector_uitofp_v4f64_v4i64(<4 x i64> %x) #0 {
 ; AVX1-NEXT:    vunpcklpd {{.*#+}} xmm1 = xmm1[0],xmm3[0]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT:    vpsrlq $32, %xmm2, %xmm2
-; AVX1-NEXT:    vpextrq $1, %xmm2, %rax
+; AVX1-NEXT:    vextractps $3, %xmm2, %eax
 ; AVX1-NEXT:    vcvtsi2sd %rax, %xmm4, %xmm3
-; AVX1-NEXT:    vmovq %xmm2, %rax
+; AVX1-NEXT:    vextractps $1, %xmm2, %eax
 ; AVX1-NEXT:    vcvtsi2sd %rax, %xmm4, %xmm2
 ; AVX1-NEXT:    vunpcklpd {{.*#+}} xmm2 = xmm2[0],xmm3[0]
-; AVX1-NEXT:    vpsrlq $32, %xmm0, %xmm0
-; AVX1-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX1-NEXT:    vpextrd $3, %xmm0, %eax
 ; AVX1-NEXT:    vcvtsi2sd %rax, %xmm4, %xmm3
-; AVX1-NEXT:    vmovq %xmm0, %rax
+; AVX1-NEXT:    vpextrd $1, %xmm0, %eax
 ; AVX1-NEXT:    vcvtsi2sd %rax, %xmm4, %xmm0
 ; AVX1-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm3[0]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0


        


More information about the llvm-commits mailing list