[llvm] r362797 - [x86] narrow extract subvector of vector select

Sanjay Patel via llvm-commits llvm-commits at lists.llvm.org
Fri Jun 7 06:17:46 PDT 2019


Author: spatel
Date: Fri Jun  7 06:17:46 2019
New Revision: 362797

URL: http://llvm.org/viewvc/llvm-project?rev=362797&view=rev
Log:
[x86] narrow extract subvector of vector select

This is a potentially large perf win for AVX1 targets because of the way we
auto-vectorize to 256-bit but then expect the backend to legalize/optimize
for the half-implemented AVX1 ISA.

On the motivating example from PR37428 (even though this patch doesn't solve
the vector shift issue):
https://bugs.llvm.org/show_bug.cgi?id=37428
...there's a 16% speedup when compiling with "-mavx" (perf tested on Haswell)
because we eliminate the remaining 256-bit vblendv ops.

I added comments on a couple of tests that require further work. If we have
256-bit logic ops separating the vselect and extract, we should probably narrow
everything to 128-bit, but that requires a larger pattern match.

Differential Revision: https://reviews.llvm.org/D62969

Modified:
    llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
    llvm/trunk/test/CodeGen/X86/horizontal-reduce-smax.ll
    llvm/trunk/test/CodeGen/X86/horizontal-reduce-smin.ll
    llvm/trunk/test/CodeGen/X86/horizontal-reduce-umax.ll
    llvm/trunk/test/CodeGen/X86/horizontal-reduce-umin.ll
    llvm/trunk/test/CodeGen/X86/masked_store_trunc_ssat.ll
    llvm/trunk/test/CodeGen/X86/masked_store_trunc_usat.ll
    llvm/trunk/test/CodeGen/X86/midpoint-int-vec-256.ll
    llvm/trunk/test/CodeGen/X86/psubus.ll
    llvm/trunk/test/CodeGen/X86/uadd_sat_vec.ll
    llvm/trunk/test/CodeGen/X86/usub_sat_vec.ll
    llvm/trunk/test/CodeGen/X86/vector-reduce-smax-widen.ll
    llvm/trunk/test/CodeGen/X86/vector-reduce-smax.ll
    llvm/trunk/test/CodeGen/X86/vector-reduce-smin-widen.ll
    llvm/trunk/test/CodeGen/X86/vector-reduce-smin.ll
    llvm/trunk/test/CodeGen/X86/vector-reduce-umax-widen.ll
    llvm/trunk/test/CodeGen/X86/vector-reduce-umax.ll
    llvm/trunk/test/CodeGen/X86/vector-reduce-umin-widen.ll
    llvm/trunk/test/CodeGen/X86/vector-reduce-umin.ll
    llvm/trunk/test/CodeGen/X86/vector-trunc-packus-widen.ll
    llvm/trunk/test/CodeGen/X86/vector-trunc-packus.ll
    llvm/trunk/test/CodeGen/X86/vector-trunc-ssat-widen.ll
    llvm/trunk/test/CodeGen/X86/vector-trunc-ssat.ll
    llvm/trunk/test/CodeGen/X86/vector-trunc-usat-widen.ll
    llvm/trunk/test/CodeGen/X86/vector-trunc-usat.ll

Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=362797&r1=362796&r2=362797&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Fri Jun  7 06:17:46 2019
@@ -43158,6 +43158,55 @@ static SDValue combineInsertSubvector(SD
   return SDValue();
 }
 
+/// If we are extracting a subvector of a vector select and the select condition
+/// is composed of concatenated vectors, try to narrow the select width. This
+/// is a common pattern for AVX1 integer code because 256-bit selects may be
+/// legal, but there is almost no integer math/logic available for 256-bit.
+/// This function should only be called with legal types (otherwise, the calls
+/// to get simple value types will assert).
+static SDValue narrowExtractedVectorSelect(SDNode *Ext, SelectionDAG &DAG) {
+  SDValue Sel = peekThroughBitcasts(Ext->getOperand(0));
+  SmallVector<SDValue, 4> CatOps;
+  if (Sel.getOpcode() != ISD::VSELECT ||
+      !collectConcatOps(Sel.getOperand(0).getNode(), CatOps))
+    return SDValue();
+
+  // TODO: This can be extended to handle extraction to 256-bits.
+  MVT VT = Ext->getSimpleValueType(0);
+  if (!VT.is128BitVector())
+    return SDValue();
+
+  MVT WideVT = Ext->getOperand(0).getSimpleValueType();
+  MVT SelVT = Sel.getSimpleValueType();
+  unsigned SelElts = SelVT.getVectorNumElements();
+  unsigned CastedElts = WideVT.getVectorNumElements();
+  unsigned ExtIdx = cast<ConstantSDNode>(Ext->getOperand(1))->getZExtValue();
+  if (SelElts % CastedElts == 0) {
+    // The select has the same or more (narrower) elements than the extract
+    // operand. The extraction index gets scaled by that factor.
+    ExtIdx *= (SelElts / CastedElts);
+  } else if (CastedElts % SelElts == 0) {
+    // The select has less (wider) elements than the extract operand. Make sure
+    // that the extraction index can be divided evenly.
+    unsigned IndexDivisor = CastedElts / SelElts;
+    if (ExtIdx % IndexDivisor != 0)
+      return SDValue();
+    ExtIdx /= IndexDivisor;
+  } else {
+    llvm_unreachable("Element count of simple vector types are not divisible?");
+  }
+
+  unsigned NarrowingFactor = WideVT.getSizeInBits() / VT.getSizeInBits();
+  unsigned NarrowElts = SelElts / NarrowingFactor;
+  MVT NarrowSelVT = MVT::getVectorVT(SelVT.getVectorElementType(), NarrowElts);
+  SDLoc DL(Ext);
+  SDValue ExtCond = extract128BitVector(Sel.getOperand(0), ExtIdx, DAG, DL);
+  SDValue ExtT = extract128BitVector(Sel.getOperand(1), ExtIdx, DAG, DL);
+  SDValue ExtF = extract128BitVector(Sel.getOperand(2), ExtIdx, DAG, DL);
+  SDValue NarrowSel = DAG.getSelect(DL, NarrowSelVT, ExtCond, ExtT, ExtF);
+  return DAG.getBitcast(VT, NarrowSel);
+}
+
 static SDValue combineExtractSubvector(SDNode *N, SelectionDAG &DAG,
                                        TargetLowering::DAGCombinerInfo &DCI,
                                        const X86Subtarget &Subtarget) {
@@ -43200,6 +43249,9 @@ static SDValue combineExtractSubvector(S
   if (DCI.isBeforeLegalizeOps())
     return SDValue();
 
+  if (SDValue V = narrowExtractedVectorSelect(N, DAG))
+    return V;
+
   SDValue InVec = N->getOperand(0);
   unsigned IdxVal = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
 

Modified: llvm/trunk/test/CodeGen/X86/horizontal-reduce-smax.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/horizontal-reduce-smax.ll?rev=362797&r1=362796&r2=362797&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/horizontal-reduce-smax.ll (original)
+++ llvm/trunk/test/CodeGen/X86/horizontal-reduce-smax.ll Fri Jun  7 06:17:46 2019
@@ -455,13 +455,11 @@ define i64 @test_reduce_v4i64(<4 x i64>
 ; X86-AVX1-LABEL: test_reduce_v4i64:
 ; X86-AVX1:       ## %bb.0:
 ; X86-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; X86-AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm2
-; X86-AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm3
-; X86-AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
-; X86-AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; X86-AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
+; X86-AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
 ; X86-AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X86-AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
-; X86-AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; X86-AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
 ; X86-AVX1-NEXT:    vmovd %xmm0, %eax
 ; X86-AVX1-NEXT:    vpextrd $1, %xmm0, %edx
 ; X86-AVX1-NEXT:    vzeroupper
@@ -531,13 +529,11 @@ define i64 @test_reduce_v4i64(<4 x i64>
 ; X64-AVX1-LABEL: test_reduce_v4i64:
 ; X64-AVX1:       ## %bb.0:
 ; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; X64-AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm2
-; X64-AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm3
-; X64-AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
-; X64-AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; X64-AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
+; X64-AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
 ; X64-AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
-; X64-AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; X64-AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
 ; X64-AVX1-NEXT:    vmovq %xmm0, %rax
 ; X64-AVX1-NEXT:    vzeroupper
 ; X64-AVX1-NEXT:    retq
@@ -1118,20 +1114,17 @@ define i64 @test_reduce_v8i64(<8 x i64>
 ;
 ; X86-AVX1-LABEL: test_reduce_v8i64:
 ; X86-AVX1:       ## %bb.0:
-; X86-AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; X86-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; X86-AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
-; X86-AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm3
-; X86-AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
-; X86-AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
-; X86-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; X86-AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
-; X86-AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm3
-; X86-AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
-; X86-AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; X86-AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
+; X86-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
+; X86-AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm5
+; X86-AVX1-NEXT:    vblendvpd %xmm5, %xmm4, %xmm3, %xmm3
+; X86-AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; X86-AVX1-NEXT:    vpcmpgtq %xmm3, %xmm0, %xmm1
+; X86-AVX1-NEXT:    vblendvpd %xmm1, %xmm0, %xmm3, %xmm0
 ; X86-AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X86-AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
-; X86-AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; X86-AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
 ; X86-AVX1-NEXT:    vmovd %xmm0, %eax
 ; X86-AVX1-NEXT:    vpextrd $1, %xmm0, %edx
 ; X86-AVX1-NEXT:    vzeroupper
@@ -1239,20 +1232,17 @@ define i64 @test_reduce_v8i64(<8 x i64>
 ;
 ; X64-AVX1-LABEL: test_reduce_v8i64:
 ; X64-AVX1:       ## %bb.0:
-; X64-AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; X64-AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
-; X64-AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm3
-; X64-AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
-; X64-AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
-; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; X64-AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
-; X64-AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm3
-; X64-AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
-; X64-AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; X64-AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
+; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
+; X64-AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm5
+; X64-AVX1-NEXT:    vblendvpd %xmm5, %xmm4, %xmm3, %xmm3
+; X64-AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; X64-AVX1-NEXT:    vpcmpgtq %xmm3, %xmm0, %xmm1
+; X64-AVX1-NEXT:    vblendvpd %xmm1, %xmm0, %xmm3, %xmm0
 ; X64-AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
-; X64-AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; X64-AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
 ; X64-AVX1-NEXT:    vmovq %xmm0, %rax
 ; X64-AVX1-NEXT:    vzeroupper
 ; X64-AVX1-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/horizontal-reduce-smin.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/horizontal-reduce-smin.ll?rev=362797&r1=362796&r2=362797&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/horizontal-reduce-smin.ll (original)
+++ llvm/trunk/test/CodeGen/X86/horizontal-reduce-smin.ll Fri Jun  7 06:17:46 2019
@@ -458,13 +458,11 @@ define i64 @test_reduce_v4i64(<4 x i64>
 ; X86-AVX1-LABEL: test_reduce_v4i64:
 ; X86-AVX1:       ## %bb.0:
 ; X86-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; X86-AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
-; X86-AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm3
-; X86-AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
-; X86-AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; X86-AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm2
+; X86-AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
 ; X86-AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X86-AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm2
-; X86-AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; X86-AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
 ; X86-AVX1-NEXT:    vmovd %xmm0, %eax
 ; X86-AVX1-NEXT:    vpextrd $1, %xmm0, %edx
 ; X86-AVX1-NEXT:    vzeroupper
@@ -535,13 +533,11 @@ define i64 @test_reduce_v4i64(<4 x i64>
 ; X64-AVX1-LABEL: test_reduce_v4i64:
 ; X64-AVX1:       ## %bb.0:
 ; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; X64-AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
-; X64-AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm3
-; X64-AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
-; X64-AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; X64-AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm2
+; X64-AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
 ; X64-AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm2
-; X64-AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; X64-AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
 ; X64-AVX1-NEXT:    vmovq %xmm0, %rax
 ; X64-AVX1-NEXT:    vzeroupper
 ; X64-AVX1-NEXT:    retq
@@ -1124,18 +1120,15 @@ define i64 @test_reduce_v8i64(<8 x i64>
 ; X86-AVX1:       ## %bb.0:
 ; X86-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
 ; X86-AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; X86-AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
-; X86-AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm3
-; X86-AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
-; X86-AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
-; X86-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; X86-AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm4
+; X86-AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm5
+; X86-AVX1-NEXT:    vblendvpd %xmm5, %xmm0, %xmm1, %xmm0
+; X86-AVX1-NEXT:    vblendvpd %xmm4, %xmm2, %xmm3, %xmm1
 ; X86-AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm2
-; X86-AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm3
-; X86-AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
-; X86-AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; X86-AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
 ; X86-AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X86-AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm2
-; X86-AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; X86-AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
 ; X86-AVX1-NEXT:    vmovd %xmm0, %eax
 ; X86-AVX1-NEXT:    vpextrd $1, %xmm0, %edx
 ; X86-AVX1-NEXT:    vzeroupper
@@ -1245,18 +1238,15 @@ define i64 @test_reduce_v8i64(<8 x i64>
 ; X64-AVX1:       ## %bb.0:
 ; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
 ; X64-AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; X64-AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
-; X64-AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm3
-; X64-AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
-; X64-AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
-; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; X64-AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm4
+; X64-AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm5
+; X64-AVX1-NEXT:    vblendvpd %xmm5, %xmm0, %xmm1, %xmm0
+; X64-AVX1-NEXT:    vblendvpd %xmm4, %xmm2, %xmm3, %xmm1
 ; X64-AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm2
-; X64-AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm3
-; X64-AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
-; X64-AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; X64-AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
 ; X64-AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm2
-; X64-AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; X64-AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
 ; X64-AVX1-NEXT:    vmovq %xmm0, %rax
 ; X64-AVX1-NEXT:    vzeroupper
 ; X64-AVX1-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/horizontal-reduce-umax.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/horizontal-reduce-umax.ll?rev=362797&r1=362796&r2=362797&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/horizontal-reduce-umax.ll (original)
+++ llvm/trunk/test/CodeGen/X86/horizontal-reduce-umax.ll Fri Jun  7 06:17:46 2019
@@ -541,15 +541,13 @@ define i64 @test_reduce_v4i64(<4 x i64>
 ; X86-AVX1-NEXT:    ## xmm2 = mem[0,0]
 ; X86-AVX1-NEXT:    vxorps %xmm2, %xmm1, %xmm3
 ; X86-AVX1-NEXT:    vxorps %xmm2, %xmm0, %xmm4
-; X86-AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm4
-; X86-AVX1-NEXT:    vpcmpgtq %xmm0, %xmm3, %xmm3
-; X86-AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
-; X86-AVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm1, %ymm0
+; X86-AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm3
+; X86-AVX1-NEXT:    vblendvpd %xmm3, %xmm0, %xmm1, %xmm0
 ; X86-AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X86-AVX1-NEXT:    vxorpd %xmm2, %xmm0, %xmm3
 ; X86-AVX1-NEXT:    vxorpd %xmm2, %xmm1, %xmm2
 ; X86-AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
-; X86-AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; X86-AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
 ; X86-AVX1-NEXT:    vmovd %xmm0, %eax
 ; X86-AVX1-NEXT:    vpextrd $1, %xmm0, %edx
 ; X86-AVX1-NEXT:    vzeroupper
@@ -633,15 +631,13 @@ define i64 @test_reduce_v4i64(<4 x i64>
 ; X64-AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
 ; X64-AVX1-NEXT:    vpxor %xmm2, %xmm1, %xmm3
 ; X64-AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm4
-; X64-AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm4
-; X64-AVX1-NEXT:    vpcmpgtq %xmm0, %xmm3, %xmm3
-; X64-AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
-; X64-AVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm1, %ymm0
+; X64-AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm3
+; X64-AVX1-NEXT:    vblendvpd %xmm3, %xmm0, %xmm1, %xmm0
 ; X64-AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-AVX1-NEXT:    vxorpd %xmm2, %xmm0, %xmm3
 ; X64-AVX1-NEXT:    vxorpd %xmm2, %xmm1, %xmm2
 ; X64-AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
-; X64-AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; X64-AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
 ; X64-AVX1-NEXT:    vmovq %xmm0, %rax
 ; X64-AVX1-NEXT:    vzeroupper
 ; X64-AVX1-NEXT:    retq
@@ -1252,30 +1248,27 @@ define i64 @test_reduce_v8i64(<8 x i64>
 ;
 ; X86-AVX1-LABEL: test_reduce_v8i64:
 ; X86-AVX1:       ## %bb.0:
-; X86-AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; X86-AVX1-NEXT:    vmovddup {{.*#+}} xmm3 = [-0.0E+0,-0.0E+0]
-; X86-AVX1-NEXT:    ## xmm3 = mem[0,0]
-; X86-AVX1-NEXT:    vxorps %xmm3, %xmm2, %xmm2
-; X86-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
-; X86-AVX1-NEXT:    vxorps %xmm3, %xmm4, %xmm4
-; X86-AVX1-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm2
-; X86-AVX1-NEXT:    vxorps %xmm3, %xmm1, %xmm4
-; X86-AVX1-NEXT:    vxorps %xmm3, %xmm0, %xmm5
-; X86-AVX1-NEXT:    vpcmpgtq %xmm4, %xmm5, %xmm4
-; X86-AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm4, %ymm2
-; X86-AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
-; X86-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; X86-AVX1-NEXT:    vxorps %xmm3, %xmm1, %xmm2
-; X86-AVX1-NEXT:    vxorpd %xmm3, %xmm0, %xmm4
-; X86-AVX1-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm4
-; X86-AVX1-NEXT:    vpcmpgtq %xmm0, %xmm2, %xmm2
-; X86-AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm4, %ymm2
-; X86-AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; X86-AVX1-NEXT:    vmovddup {{.*#+}} xmm2 = [-0.0E+0,-0.0E+0]
+; X86-AVX1-NEXT:    ## xmm2 = mem[0,0]
+; X86-AVX1-NEXT:    vxorps %xmm2, %xmm1, %xmm3
+; X86-AVX1-NEXT:    vxorps %xmm2, %xmm0, %xmm4
+; X86-AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm3
+; X86-AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
+; X86-AVX1-NEXT:    vxorps %xmm2, %xmm4, %xmm5
+; X86-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm6
+; X86-AVX1-NEXT:    vxorps %xmm2, %xmm6, %xmm7
+; X86-AVX1-NEXT:    vpcmpgtq %xmm5, %xmm7, %xmm5
+; X86-AVX1-NEXT:    vblendvpd %xmm5, %xmm6, %xmm4, %xmm4
+; X86-AVX1-NEXT:    vxorpd %xmm2, %xmm4, %xmm5
+; X86-AVX1-NEXT:    vblendvpd %xmm3, %xmm0, %xmm1, %xmm0
+; X86-AVX1-NEXT:    vxorpd %xmm2, %xmm0, %xmm1
+; X86-AVX1-NEXT:    vpcmpgtq %xmm5, %xmm1, %xmm1
+; X86-AVX1-NEXT:    vblendvpd %xmm1, %xmm0, %xmm4, %xmm0
 ; X86-AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; X86-AVX1-NEXT:    vxorpd %xmm3, %xmm0, %xmm2
-; X86-AVX1-NEXT:    vxorpd %xmm3, %xmm1, %xmm3
-; X86-AVX1-NEXT:    vpcmpgtq %xmm3, %xmm2, %xmm2
-; X86-AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; X86-AVX1-NEXT:    vxorpd %xmm2, %xmm0, %xmm3
+; X86-AVX1-NEXT:    vxorpd %xmm2, %xmm1, %xmm2
+; X86-AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
+; X86-AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
 ; X86-AVX1-NEXT:    vmovd %xmm0, %eax
 ; X86-AVX1-NEXT:    vpextrd $1, %xmm0, %edx
 ; X86-AVX1-NEXT:    vzeroupper
@@ -1401,29 +1394,26 @@ define i64 @test_reduce_v8i64(<8 x i64>
 ;
 ; X64-AVX1-LABEL: test_reduce_v8i64:
 ; X64-AVX1:       ## %bb.0:
-; X64-AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; X64-AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
-; X64-AVX1-NEXT:    vpxor %xmm3, %xmm2, %xmm2
-; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
-; X64-AVX1-NEXT:    vpxor %xmm3, %xmm4, %xmm4
-; X64-AVX1-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm2
-; X64-AVX1-NEXT:    vpxor %xmm3, %xmm1, %xmm4
-; X64-AVX1-NEXT:    vpxor %xmm3, %xmm0, %xmm5
-; X64-AVX1-NEXT:    vpcmpgtq %xmm4, %xmm5, %xmm4
-; X64-AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm4, %ymm2
-; X64-AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
-; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; X64-AVX1-NEXT:    vpxor %xmm3, %xmm1, %xmm2
-; X64-AVX1-NEXT:    vxorpd %xmm3, %xmm0, %xmm4
-; X64-AVX1-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm4
-; X64-AVX1-NEXT:    vpcmpgtq %xmm0, %xmm2, %xmm2
-; X64-AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm4, %ymm2
-; X64-AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; X64-AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
+; X64-AVX1-NEXT:    vpxor %xmm2, %xmm1, %xmm3
+; X64-AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm4
+; X64-AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm3
+; X64-AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
+; X64-AVX1-NEXT:    vpxor %xmm2, %xmm4, %xmm5
+; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm6
+; X64-AVX1-NEXT:    vpxor %xmm2, %xmm6, %xmm7
+; X64-AVX1-NEXT:    vpcmpgtq %xmm5, %xmm7, %xmm5
+; X64-AVX1-NEXT:    vblendvpd %xmm5, %xmm6, %xmm4, %xmm4
+; X64-AVX1-NEXT:    vxorpd %xmm2, %xmm4, %xmm5
+; X64-AVX1-NEXT:    vblendvpd %xmm3, %xmm0, %xmm1, %xmm0
+; X64-AVX1-NEXT:    vxorpd %xmm2, %xmm0, %xmm1
+; X64-AVX1-NEXT:    vpcmpgtq %xmm5, %xmm1, %xmm1
+; X64-AVX1-NEXT:    vblendvpd %xmm1, %xmm0, %xmm4, %xmm0
 ; X64-AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; X64-AVX1-NEXT:    vxorpd %xmm3, %xmm0, %xmm2
-; X64-AVX1-NEXT:    vxorpd %xmm3, %xmm1, %xmm3
-; X64-AVX1-NEXT:    vpcmpgtq %xmm3, %xmm2, %xmm2
-; X64-AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; X64-AVX1-NEXT:    vxorpd %xmm2, %xmm0, %xmm3
+; X64-AVX1-NEXT:    vxorpd %xmm2, %xmm1, %xmm2
+; X64-AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
+; X64-AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
 ; X64-AVX1-NEXT:    vmovq %xmm0, %rax
 ; X64-AVX1-NEXT:    vzeroupper
 ; X64-AVX1-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/horizontal-reduce-umin.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/horizontal-reduce-umin.ll?rev=362797&r1=362796&r2=362797&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/horizontal-reduce-umin.ll (original)
+++ llvm/trunk/test/CodeGen/X86/horizontal-reduce-umin.ll Fri Jun  7 06:17:46 2019
@@ -474,20 +474,18 @@ define i64 @test_reduce_v4i64(<4 x i64>
 ;
 ; X86-AVX1-LABEL: test_reduce_v4i64:
 ; X86-AVX1:       ## %bb.0:
-; X86-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; X86-AVX1-NEXT:    vmovddup {{.*#+}} xmm2 = [-0.0E+0,-0.0E+0]
-; X86-AVX1-NEXT:    ## xmm2 = mem[0,0]
-; X86-AVX1-NEXT:    vxorps %xmm2, %xmm0, %xmm3
-; X86-AVX1-NEXT:    vxorps %xmm2, %xmm1, %xmm4
-; X86-AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm3
-; X86-AVX1-NEXT:    vpcmpgtq %xmm4, %xmm0, %xmm4
-; X86-AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm3, %ymm3
-; X86-AVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm1, %ymm0
-; X86-AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; X86-AVX1-NEXT:    vxorpd %xmm2, %xmm0, %xmm3
-; X86-AVX1-NEXT:    vxorpd %xmm2, %xmm1, %xmm2
-; X86-AVX1-NEXT:    vpcmpgtq %xmm3, %xmm2, %xmm2
-; X86-AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; X86-AVX1-NEXT:    vmovddup {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0]
+; X86-AVX1-NEXT:    ## xmm1 = mem[0,0]
+; X86-AVX1-NEXT:    vxorps %xmm1, %xmm0, %xmm2
+; X86-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; X86-AVX1-NEXT:    vxorps %xmm1, %xmm3, %xmm4
+; X86-AVX1-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm2
+; X86-AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm3, %xmm0
+; X86-AVX1-NEXT:    vpermilps {{.*#+}} xmm2 = xmm0[2,3,0,1]
+; X86-AVX1-NEXT:    vxorpd %xmm1, %xmm0, %xmm3
+; X86-AVX1-NEXT:    vxorpd %xmm1, %xmm2, %xmm1
+; X86-AVX1-NEXT:    vpcmpgtq %xmm3, %xmm1, %xmm1
+; X86-AVX1-NEXT:    vblendvpd %xmm1, %xmm0, %xmm2, %xmm0
 ; X86-AVX1-NEXT:    vmovd %xmm0, %eax
 ; X86-AVX1-NEXT:    vpextrd $1, %xmm0, %edx
 ; X86-AVX1-NEXT:    vzeroupper
@@ -569,19 +567,17 @@ define i64 @test_reduce_v4i64(<4 x i64>
 ;
 ; X64-AVX1-LABEL: test_reduce_v4i64:
 ; X64-AVX1:       ## %bb.0:
-; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; X64-AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
-; X64-AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm3
-; X64-AVX1-NEXT:    vpxor %xmm2, %xmm1, %xmm4
-; X64-AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm3
-; X64-AVX1-NEXT:    vpcmpgtq %xmm4, %xmm0, %xmm4
-; X64-AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm3, %ymm3
-; X64-AVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm1, %ymm0
-; X64-AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; X64-AVX1-NEXT:    vxorpd %xmm2, %xmm0, %xmm3
-; X64-AVX1-NEXT:    vxorpd %xmm2, %xmm1, %xmm2
-; X64-AVX1-NEXT:    vpcmpgtq %xmm3, %xmm2, %xmm2
-; X64-AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; X64-AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [9223372036854775808,9223372036854775808]
+; X64-AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm2
+; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; X64-AVX1-NEXT:    vpxor %xmm1, %xmm3, %xmm4
+; X64-AVX1-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm2
+; X64-AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm3, %xmm0
+; X64-AVX1-NEXT:    vpermilps {{.*#+}} xmm2 = xmm0[2,3,0,1]
+; X64-AVX1-NEXT:    vxorpd %xmm1, %xmm0, %xmm3
+; X64-AVX1-NEXT:    vxorpd %xmm1, %xmm2, %xmm1
+; X64-AVX1-NEXT:    vpcmpgtq %xmm3, %xmm1, %xmm1
+; X64-AVX1-NEXT:    vblendvpd %xmm1, %xmm0, %xmm2, %xmm0
 ; X64-AVX1-NEXT:    vmovq %xmm0, %rax
 ; X64-AVX1-NEXT:    vzeroupper
 ; X64-AVX1-NEXT:    retq
@@ -1157,27 +1153,24 @@ define i64 @test_reduce_v8i64(<8 x i64>
 ; X86-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
 ; X86-AVX1-NEXT:    vmovddup {{.*#+}} xmm3 = [-0.0E+0,-0.0E+0]
 ; X86-AVX1-NEXT:    ## xmm3 = mem[0,0]
-; X86-AVX1-NEXT:    vxorps %xmm3, %xmm2, %xmm2
-; X86-AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
-; X86-AVX1-NEXT:    vxorps %xmm3, %xmm4, %xmm4
-; X86-AVX1-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm2
-; X86-AVX1-NEXT:    vxorps %xmm3, %xmm0, %xmm4
-; X86-AVX1-NEXT:    vxorps %xmm3, %xmm1, %xmm5
-; X86-AVX1-NEXT:    vpcmpgtq %xmm4, %xmm5, %xmm4
-; X86-AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm4, %ymm2
-; X86-AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
-; X86-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; X86-AVX1-NEXT:    vxorpd %xmm3, %xmm0, %xmm2
-; X86-AVX1-NEXT:    vxorps %xmm3, %xmm1, %xmm4
-; X86-AVX1-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm2
-; X86-AVX1-NEXT:    vpcmpgtq %xmm4, %xmm0, %xmm4
-; X86-AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm2, %ymm2
-; X86-AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; X86-AVX1-NEXT:    vxorps %xmm3, %xmm2, %xmm4
+; X86-AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm5
+; X86-AVX1-NEXT:    vxorps %xmm3, %xmm5, %xmm6
+; X86-AVX1-NEXT:    vpcmpgtq %xmm4, %xmm6, %xmm4
+; X86-AVX1-NEXT:    vxorps %xmm3, %xmm0, %xmm6
+; X86-AVX1-NEXT:    vxorps %xmm3, %xmm1, %xmm7
+; X86-AVX1-NEXT:    vpcmpgtq %xmm6, %xmm7, %xmm6
+; X86-AVX1-NEXT:    vblendvpd %xmm6, %xmm0, %xmm1, %xmm0
+; X86-AVX1-NEXT:    vxorpd %xmm3, %xmm0, %xmm1
+; X86-AVX1-NEXT:    vblendvpd %xmm4, %xmm2, %xmm5, %xmm2
+; X86-AVX1-NEXT:    vxorpd %xmm3, %xmm2, %xmm4
+; X86-AVX1-NEXT:    vpcmpgtq %xmm1, %xmm4, %xmm1
+; X86-AVX1-NEXT:    vblendvpd %xmm1, %xmm0, %xmm2, %xmm0
 ; X86-AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X86-AVX1-NEXT:    vxorpd %xmm3, %xmm0, %xmm2
 ; X86-AVX1-NEXT:    vxorpd %xmm3, %xmm1, %xmm3
 ; X86-AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
-; X86-AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; X86-AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
 ; X86-AVX1-NEXT:    vmovd %xmm0, %eax
 ; X86-AVX1-NEXT:    vpextrd $1, %xmm0, %edx
 ; X86-AVX1-NEXT:    vzeroupper
@@ -1307,27 +1300,24 @@ define i64 @test_reduce_v8i64(<8 x i64>
 ; X64-AVX1:       ## %bb.0:
 ; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
 ; X64-AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
-; X64-AVX1-NEXT:    vpxor %xmm3, %xmm2, %xmm2
-; X64-AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
-; X64-AVX1-NEXT:    vpxor %xmm3, %xmm4, %xmm4
-; X64-AVX1-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm2
-; X64-AVX1-NEXT:    vpxor %xmm3, %xmm0, %xmm4
-; X64-AVX1-NEXT:    vpxor %xmm3, %xmm1, %xmm5
-; X64-AVX1-NEXT:    vpcmpgtq %xmm4, %xmm5, %xmm4
-; X64-AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm4, %ymm2
-; X64-AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
-; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; X64-AVX1-NEXT:    vxorpd %xmm3, %xmm0, %xmm2
-; X64-AVX1-NEXT:    vpxor %xmm3, %xmm1, %xmm4
-; X64-AVX1-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm2
-; X64-AVX1-NEXT:    vpcmpgtq %xmm4, %xmm0, %xmm4
-; X64-AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm2, %ymm2
-; X64-AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; X64-AVX1-NEXT:    vpxor %xmm3, %xmm2, %xmm4
+; X64-AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm5
+; X64-AVX1-NEXT:    vpxor %xmm3, %xmm5, %xmm6
+; X64-AVX1-NEXT:    vpcmpgtq %xmm4, %xmm6, %xmm4
+; X64-AVX1-NEXT:    vpxor %xmm3, %xmm0, %xmm6
+; X64-AVX1-NEXT:    vpxor %xmm3, %xmm1, %xmm7
+; X64-AVX1-NEXT:    vpcmpgtq %xmm6, %xmm7, %xmm6
+; X64-AVX1-NEXT:    vblendvpd %xmm6, %xmm0, %xmm1, %xmm0
+; X64-AVX1-NEXT:    vxorpd %xmm3, %xmm0, %xmm1
+; X64-AVX1-NEXT:    vblendvpd %xmm4, %xmm2, %xmm5, %xmm2
+; X64-AVX1-NEXT:    vxorpd %xmm3, %xmm2, %xmm4
+; X64-AVX1-NEXT:    vpcmpgtq %xmm1, %xmm4, %xmm1
+; X64-AVX1-NEXT:    vblendvpd %xmm1, %xmm0, %xmm2, %xmm0
 ; X64-AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; X64-AVX1-NEXT:    vxorpd %xmm3, %xmm0, %xmm2
 ; X64-AVX1-NEXT:    vxorpd %xmm3, %xmm1, %xmm3
 ; X64-AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
-; X64-AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; X64-AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
 ; X64-AVX1-NEXT:    vmovq %xmm0, %rax
 ; X64-AVX1-NEXT:    vzeroupper
 ; X64-AVX1-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/masked_store_trunc_ssat.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/masked_store_trunc_ssat.ll?rev=362797&r1=362796&r2=362797&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/masked_store_trunc_ssat.ll (original)
+++ llvm/trunk/test/CodeGen/X86/masked_store_trunc_ssat.ll Fri Jun  7 06:17:46 2019
@@ -309,37 +309,31 @@ define void @truncstore_v8i64_v8i32(<8 x
 ; AVX1-NEXT:    vpxor %xmm5, %xmm3, %xmm3
 ; AVX1-NEXT:    vpcmpeqd %xmm4, %xmm2, %xmm2
 ; AVX1-NEXT:    vpxor %xmm5, %xmm2, %xmm2
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm3 = [2147483647,2147483647,2147483647,2147483647]
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [2147483647,2147483647]
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm5, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm5, %xmm6
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm6, %ymm4
-; AVX1-NEXT:    vblendvpd %ymm4, %ymm1, %ymm3, %ymm1
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm5, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm5, %xmm5
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm5, %ymm4
-; AVX1-NEXT:    vblendvpd %ymm4, %ymm0, %ymm3, %ymm0
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm3 = [18446744071562067968,18446744071562067968,18446744071562067968,18446744071562067968]
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [18446744071562067968,18446744071562067968]
-; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm4, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm0, %xmm6
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm6, %ymm4
-; AVX1-NEXT:    vblendvpd %ymm4, %ymm0, %ymm3, %ymm0
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm4, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm1, %xmm5
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm5, %ymm4
-; AVX1-NEXT:    vblendvpd %ymm4, %ymm1, %ymm3, %ymm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm8
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [2147483647,2147483647]
+; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm9
+; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm4, %xmm6
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm7
+; AVX1-NEXT:    vpcmpgtq %xmm7, %xmm4, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm4, %xmm5
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm0, %xmm4, %xmm0
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [18446744071562067968,18446744071562067968]
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm0, %xmm10
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm7, %xmm4, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm2, %xmm7
+; AVX1-NEXT:    vblendvpd %xmm6, %xmm1, %xmm4, %xmm1
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm1, %xmm6
+; AVX1-NEXT:    vblendvpd %xmm9, %xmm3, %xmm4, %xmm3
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm3, %xmm4
+; AVX1-NEXT:    vblendvpd %xmm4, %xmm3, %xmm5, %xmm3
+; AVX1-NEXT:    vblendvpd %xmm6, %xmm1, %xmm5, %xmm1
 ; AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm3[0,2]
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm3[0,2]
+; AVX1-NEXT:    vblendvpd %xmm7, %xmm2, %xmm5, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm10, %xmm0, %xmm5, %xmm0
+; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1-NEXT:    vmaskmovps %ymm0, %ymm2, (%rdi)
+; AVX1-NEXT:    vmaskmovps %ymm0, %ymm8, (%rdi)
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
@@ -706,45 +700,39 @@ define void @truncstore_v8i64_v8i16(<8 x
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpxor %xmm8, %xmm8, %xmm8
 ; AVX1-NEXT:    vpcmpeqd %xmm8, %xmm2, %xmm5
-; AVX1-NEXT:    vpcmpeqd %xmm4, %xmm4, %xmm4
-; AVX1-NEXT:    vpxor %xmm4, %xmm5, %xmm5
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm6 = [32767,32767,32767,32767]
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm7
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [32767,32767]
-; AVX1-NEXT:    vpcmpgtq %xmm7, %xmm3, %xmm7
-; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm3, %xmm9
-; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm9, %ymm7
-; AVX1-NEXT:    vblendvpd %ymm7, %ymm1, %ymm6, %ymm1
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm7
-; AVX1-NEXT:    vpcmpgtq %xmm7, %xmm3, %xmm7
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm3, %xmm3
-; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm3, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm6, %ymm0
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm3 = [18446744073709518848,18446744073709518848,18446744073709518848,18446744073709518848]
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm6
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm7 = [18446744073709518848,18446744073709518848]
-; AVX1-NEXT:    vpcmpgtq %xmm7, %xmm6, %xmm6
-; AVX1-NEXT:    vpcmpgtq %xmm7, %xmm0, %xmm9
-; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm9, %ymm6
-; AVX1-NEXT:    vblendvpd %ymm6, %ymm0, %ymm3, %ymm0
+; AVX1-NEXT:    vpcmpeqd %xmm9, %xmm9, %xmm9
+; AVX1-NEXT:    vpxor %xmm9, %xmm5, %xmm10
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm6
-; AVX1-NEXT:    vpcmpgtq %xmm7, %xmm6, %xmm6
-; AVX1-NEXT:    vpcmpgtq %xmm7, %xmm1, %xmm7
-; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm7, %ymm6
-; AVX1-NEXT:    vblendvpd %ymm6, %ymm1, %ymm3, %ymm1
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT:    vpackssdw %xmm3, %xmm1, %xmm1
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm7 = [32767,32767]
+; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm7, %xmm11
+; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm7, %xmm12
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm7, %xmm3
+; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm7, %xmm4
+; AVX1-NEXT:    vblendvpd %xmm4, %xmm0, %xmm7, %xmm0
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [18446744073709518848,18446744073709518848]
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm0, %xmm13
+; AVX1-NEXT:    vblendvpd %xmm3, %xmm5, %xmm7, %xmm3
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm3, %xmm14
+; AVX1-NEXT:    vblendvpd %xmm12, %xmm1, %xmm7, %xmm1
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm1, %xmm5
+; AVX1-NEXT:    vblendvpd %xmm11, %xmm6, %xmm7, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm6, %xmm7
+; AVX1-NEXT:    vblendvpd %xmm7, %xmm6, %xmm4, %xmm6
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm1, %xmm4, %xmm1
+; AVX1-NEXT:    vpackssdw %xmm6, %xmm1, %xmm1
+; AVX1-NEXT:    vblendvpd %xmm14, %xmm3, %xmm4, %xmm3
+; AVX1-NEXT:    vblendvpd %xmm13, %xmm0, %xmm4, %xmm0
 ; AVX1-NEXT:    vpackssdw %xmm3, %xmm0, %xmm0
 ; AVX1-NEXT:    vpackssdw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpextrb $0, %xmm5, %eax
+; AVX1-NEXT:    vpextrb $0, %xmm10, %eax
 ; AVX1-NEXT:    testb $1, %al
 ; AVX1-NEXT:    je .LBB1_2
 ; AVX1-NEXT:  # %bb.1: # %cond.store
 ; AVX1-NEXT:    vpextrw $0, %xmm0, (%rdi)
 ; AVX1-NEXT:  .LBB1_2: # %else
 ; AVX1-NEXT:    vpcmpeqd %xmm8, %xmm2, %xmm1
-; AVX1-NEXT:    vpxor %xmm4, %xmm1, %xmm1
+; AVX1-NEXT:    vpxor %xmm9, %xmm1, %xmm1
 ; AVX1-NEXT:    vpextrb $4, %xmm1, %eax
 ; AVX1-NEXT:    testb $1, %al
 ; AVX1-NEXT:    je .LBB1_4
@@ -1291,45 +1279,39 @@ define void @truncstore_v8i64_v8i8(<8 x
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vpxor %xmm8, %xmm8, %xmm8
 ; AVX1-NEXT:    vpcmpeqd %xmm8, %xmm2, %xmm5
-; AVX1-NEXT:    vpcmpeqd %xmm4, %xmm4, %xmm4
-; AVX1-NEXT:    vpxor %xmm4, %xmm5, %xmm5
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm6 = [127,127,127,127]
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm7
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [127,127]
-; AVX1-NEXT:    vpcmpgtq %xmm7, %xmm3, %xmm7
-; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm3, %xmm9
-; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm9, %ymm7
-; AVX1-NEXT:    vblendvpd %ymm7, %ymm1, %ymm6, %ymm1
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm7
-; AVX1-NEXT:    vpcmpgtq %xmm7, %xmm3, %xmm7
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm3, %xmm3
-; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm3, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm6, %ymm0
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm3 = [18446744073709551488,18446744073709551488,18446744073709551488,18446744073709551488]
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm6
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm7 = [18446744073709551488,18446744073709551488]
-; AVX1-NEXT:    vpcmpgtq %xmm7, %xmm6, %xmm6
-; AVX1-NEXT:    vpcmpgtq %xmm7, %xmm0, %xmm9
-; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm9, %ymm6
-; AVX1-NEXT:    vblendvpd %ymm6, %ymm0, %ymm3, %ymm0
+; AVX1-NEXT:    vpcmpeqd %xmm9, %xmm9, %xmm9
+; AVX1-NEXT:    vpxor %xmm9, %xmm5, %xmm10
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm6
-; AVX1-NEXT:    vpcmpgtq %xmm7, %xmm6, %xmm6
-; AVX1-NEXT:    vpcmpgtq %xmm7, %xmm1, %xmm7
-; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm7, %ymm6
-; AVX1-NEXT:    vblendvpd %ymm6, %ymm1, %ymm3, %ymm1
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT:    vpackssdw %xmm3, %xmm1, %xmm1
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm7 = [127,127]
+; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm7, %xmm11
+; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm7, %xmm12
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm7, %xmm3
+; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm7, %xmm4
+; AVX1-NEXT:    vblendvpd %xmm4, %xmm0, %xmm7, %xmm0
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [18446744073709551488,18446744073709551488]
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm0, %xmm13
+; AVX1-NEXT:    vblendvpd %xmm3, %xmm5, %xmm7, %xmm3
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm3, %xmm14
+; AVX1-NEXT:    vblendvpd %xmm12, %xmm1, %xmm7, %xmm1
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm1, %xmm5
+; AVX1-NEXT:    vblendvpd %xmm11, %xmm6, %xmm7, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm6, %xmm7
+; AVX1-NEXT:    vblendvpd %xmm7, %xmm6, %xmm4, %xmm6
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm1, %xmm4, %xmm1
+; AVX1-NEXT:    vpackssdw %xmm6, %xmm1, %xmm1
+; AVX1-NEXT:    vblendvpd %xmm14, %xmm3, %xmm4, %xmm3
+; AVX1-NEXT:    vblendvpd %xmm13, %xmm0, %xmm4, %xmm0
 ; AVX1-NEXT:    vpackssdw %xmm3, %xmm0, %xmm0
 ; AVX1-NEXT:    vpackssdw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpextrb $0, %xmm5, %eax
+; AVX1-NEXT:    vpextrb $0, %xmm10, %eax
 ; AVX1-NEXT:    testb $1, %al
 ; AVX1-NEXT:    je .LBB2_2
 ; AVX1-NEXT:  # %bb.1: # %cond.store
 ; AVX1-NEXT:    vpextrb $0, %xmm0, (%rdi)
 ; AVX1-NEXT:  .LBB2_2: # %else
 ; AVX1-NEXT:    vpcmpeqd %xmm8, %xmm2, %xmm1
-; AVX1-NEXT:    vpxor %xmm4, %xmm1, %xmm1
+; AVX1-NEXT:    vpxor %xmm9, %xmm1, %xmm1
 ; AVX1-NEXT:    vpextrb $4, %xmm1, %eax
 ; AVX1-NEXT:    testb $1, %al
 ; AVX1-NEXT:    je .LBB2_4
@@ -1736,21 +1718,17 @@ define void @truncstore_v4i64_v4i32(<4 x
 ; AVX1-NEXT:    vpcmpeqd %xmm2, %xmm1, %xmm1
 ; AVX1-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
 ; AVX1-NEXT:    vpxor %xmm2, %xmm1, %xmm1
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm2 = [2147483647,2147483647,2147483647,2147483647]
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [2147483647,2147483647]
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm4, %xmm4
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm2 = [18446744071562067968,18446744071562067968,18446744071562067968,18446744071562067968]
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [18446744071562067968,18446744071562067968]
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm3, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm0, %xmm4
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [2147483647,2147483647]
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm4
+; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm3, %xmm5
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm0, %xmm3, %xmm0
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [18446744071562067968,18446744071562067968]
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm0, %xmm6
+; AVX1-NEXT:    vblendvpd %xmm4, %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm2, %xmm3
+; AVX1-NEXT:    vblendvpd %xmm3, %xmm2, %xmm5, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm6, %xmm0, %xmm5, %xmm0
 ; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
 ; AVX1-NEXT:    vmaskmovps %xmm0, %xmm1, (%rdi)
 ; AVX1-NEXT:    vzeroupper
@@ -1984,21 +1962,17 @@ define void @truncstore_v4i64_v4i16(<4 x
 ; AVX1-NEXT:    vpcmpeqd %xmm2, %xmm1, %xmm2
 ; AVX1-NEXT:    vpcmpeqd %xmm3, %xmm3, %xmm3
 ; AVX1-NEXT:    vpxor %xmm3, %xmm2, %xmm2
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm3 = [32767,32767,32767,32767]
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [32767,32767]
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm5, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm5, %xmm5
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm5, %ymm4
-; AVX1-NEXT:    vblendvpd %ymm4, %ymm0, %ymm3, %ymm0
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm3 = [18446744073709518848,18446744073709518848,18446744073709518848,18446744073709518848]
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [18446744073709518848,18446744073709518848]
-; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm4, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm0, %xmm5
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm5, %ymm4
-; AVX1-NEXT:    vblendvpd %ymm4, %ymm0, %ymm3, %ymm0
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [32767,32767]
+; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm5
+; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm4, %xmm6
+; AVX1-NEXT:    vblendvpd %xmm6, %xmm0, %xmm4, %xmm0
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [18446744073709518848,18446744073709518848]
+; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm0, %xmm7
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm3, %xmm4, %xmm3
+; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm3, %xmm4
+; AVX1-NEXT:    vblendvpd %xmm4, %xmm3, %xmm6, %xmm3
+; AVX1-NEXT:    vblendvpd %xmm7, %xmm0, %xmm6, %xmm0
 ; AVX1-NEXT:    vpackssdw %xmm3, %xmm0, %xmm0
 ; AVX1-NEXT:    vpextrb $0, %xmm2, %eax
 ; AVX1-NEXT:    testb $1, %al
@@ -2314,21 +2288,17 @@ define void @truncstore_v4i64_v4i8(<4 x
 ; AVX1-NEXT:    vpcmpeqd %xmm2, %xmm1, %xmm2
 ; AVX1-NEXT:    vpcmpeqd %xmm3, %xmm3, %xmm3
 ; AVX1-NEXT:    vpxor %xmm3, %xmm2, %xmm2
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm3 = [127,127,127,127]
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [127,127]
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm5, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm5, %xmm5
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm5, %ymm4
-; AVX1-NEXT:    vblendvpd %ymm4, %ymm0, %ymm3, %ymm0
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm3 = [18446744073709551488,18446744073709551488,18446744073709551488,18446744073709551488]
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [18446744073709551488,18446744073709551488]
-; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm4, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm0, %xmm5
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm5, %ymm4
-; AVX1-NEXT:    vblendvpd %ymm4, %ymm0, %ymm3, %ymm0
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [127,127]
+; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm5
+; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm4, %xmm6
+; AVX1-NEXT:    vblendvpd %xmm6, %xmm0, %xmm4, %xmm0
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [18446744073709551488,18446744073709551488]
+; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm0, %xmm7
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm3, %xmm4, %xmm3
+; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm3, %xmm4
+; AVX1-NEXT:    vblendvpd %xmm4, %xmm3, %xmm6, %xmm3
+; AVX1-NEXT:    vblendvpd %xmm7, %xmm0, %xmm6, %xmm0
 ; AVX1-NEXT:    vpackssdw %xmm3, %xmm0, %xmm0
 ; AVX1-NEXT:    vpextrb $0, %xmm2, %eax
 ; AVX1-NEXT:    testb $1, %al

Modified: llvm/trunk/test/CodeGen/X86/masked_store_trunc_usat.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/masked_store_trunc_usat.ll?rev=362797&r1=362796&r2=362797&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/masked_store_trunc_usat.ll (original)
+++ llvm/trunk/test/CodeGen/X86/masked_store_trunc_usat.ll Fri Jun  7 06:17:46 2019
@@ -248,30 +248,28 @@ define void @truncstore_v8i64_v8i32(<8 x
 ; AVX1-NEXT:    vpxor %xmm5, %xmm3, %xmm3
 ; AVX1-NEXT:    vpcmpeqd %xmm4, %xmm2, %xmm2
 ; AVX1-NEXT:    vpxor %xmm5, %xmm2, %xmm2
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm3 = [4294967295,4294967295,4294967295,4294967295]
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT:    vpxor %xmm5, %xmm4, %xmm4
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [9223372041149743103,9223372041149743103]
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm6, %xmm4
-; AVX1-NEXT:    vpxor %xmm5, %xmm0, %xmm7
-; AVX1-NEXT:    vpcmpgtq %xmm7, %xmm6, %xmm7
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm7, %ymm4
-; AVX1-NEXT:    vblendvpd %ymm4, %ymm0, %ymm3, %ymm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm8
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT:    vpxor %xmm3, %xmm0, %xmm4
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [9223372041149743103,9223372041149743103]
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm5, %xmm9
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm6
+; AVX1-NEXT:    vpxor %xmm3, %xmm6, %xmm7
+; AVX1-NEXT:    vpcmpgtq %xmm7, %xmm5, %xmm7
+; AVX1-NEXT:    vpxor %xmm3, %xmm1, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm5, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
-; AVX1-NEXT:    vpxor %xmm5, %xmm4, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm6, %xmm4
-; AVX1-NEXT:    vpxor %xmm5, %xmm1, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm6, %xmm5
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm5, %ymm4
-; AVX1-NEXT:    vblendvpd %ymm4, %ymm1, %ymm3, %ymm1
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT:    vpxor %xmm3, %xmm4, %xmm3
+; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm5, %xmm3
+; AVX1-NEXT:    vmovapd {{.*#+}} xmm5 = [4294967295,4294967295]
+; AVX1-NEXT:    vblendvpd %xmm3, %xmm4, %xmm5, %xmm3
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm1, %xmm5, %xmm1
 ; AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm3[0,2]
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm3[0,2]
+; AVX1-NEXT:    vblendvpd %xmm7, %xmm6, %xmm5, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm9, %xmm0, %xmm5, %xmm0
+; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1-NEXT:    vmaskmovps %ymm0, %ymm2, (%rdi)
+; AVX1-NEXT:    vmaskmovps %ymm0, %ymm8, (%rdi)
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
@@ -580,30 +578,28 @@ define void @truncstore_v8i64_v8i16(<8 x
 ; AVX1-NEXT:    vpxor %xmm8, %xmm8, %xmm8
 ; AVX1-NEXT:    vpcmpeqd %xmm8, %xmm2, %xmm5
 ; AVX1-NEXT:    vpcmpeqd %xmm9, %xmm9, %xmm9
-; AVX1-NEXT:    vpxor %xmm9, %xmm5, %xmm5
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm10 = [65535,65535,65535,65535]
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm7
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT:    vpxor %xmm3, %xmm7, %xmm7
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [9223372036854841343,9223372036854841343]
-; AVX1-NEXT:    vpcmpgtq %xmm7, %xmm4, %xmm7
-; AVX1-NEXT:    vpxor %xmm3, %xmm0, %xmm6
-; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm4, %xmm6
-; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm6, %ymm6
-; AVX1-NEXT:    vblendvpd %ymm6, %ymm0, %ymm10, %ymm0
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm6
-; AVX1-NEXT:    vpxor %xmm3, %xmm6, %xmm6
-; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm4, %xmm6
-; AVX1-NEXT:    vpxor %xmm3, %xmm1, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm3, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm1, %ymm10, %ymm1
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT:    vpxor %xmm9, %xmm5, %xmm10
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT:    vpxor %xmm6, %xmm0, %xmm7
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [9223372036854841343,9223372036854841343]
+; AVX1-NEXT:    vpcmpgtq %xmm7, %xmm3, %xmm11
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT:    vpxor %xmm6, %xmm4, %xmm5
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm3, %xmm12
+; AVX1-NEXT:    vpxor %xmm6, %xmm1, %xmm7
+; AVX1-NEXT:    vpcmpgtq %xmm7, %xmm3, %xmm7
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm5
+; AVX1-NEXT:    vpxor %xmm6, %xmm5, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm3, %xmm3
+; AVX1-NEXT:    vmovapd {{.*#+}} xmm6 = [65535,65535]
+; AVX1-NEXT:    vblendvpd %xmm3, %xmm5, %xmm6, %xmm3
+; AVX1-NEXT:    vblendvpd %xmm7, %xmm1, %xmm6, %xmm1
 ; AVX1-NEXT:    vpackusdw %xmm3, %xmm1, %xmm1
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT:    vblendvpd %xmm12, %xmm4, %xmm6, %xmm3
+; AVX1-NEXT:    vblendvpd %xmm11, %xmm0, %xmm6, %xmm0
 ; AVX1-NEXT:    vpackusdw %xmm3, %xmm0, %xmm0
 ; AVX1-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpextrb $0, %xmm5, %eax
+; AVX1-NEXT:    vpextrb $0, %xmm10, %eax
 ; AVX1-NEXT:    testb $1, %al
 ; AVX1-NEXT:    je .LBB1_2
 ; AVX1-NEXT:  # %bb.1: # %cond.store
@@ -1092,30 +1088,28 @@ define void @truncstore_v8i64_v8i8(<8 x
 ; AVX1-NEXT:    vpxor %xmm8, %xmm8, %xmm8
 ; AVX1-NEXT:    vpcmpeqd %xmm8, %xmm2, %xmm5
 ; AVX1-NEXT:    vpcmpeqd %xmm9, %xmm9, %xmm9
-; AVX1-NEXT:    vpxor %xmm9, %xmm5, %xmm5
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm10 = [255,255,255,255]
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm7
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT:    vpxor %xmm3, %xmm7, %xmm7
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [9223372036854776063,9223372036854776063]
-; AVX1-NEXT:    vpcmpgtq %xmm7, %xmm4, %xmm7
-; AVX1-NEXT:    vpxor %xmm3, %xmm0, %xmm6
-; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm4, %xmm6
-; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm6, %ymm6
-; AVX1-NEXT:    vblendvpd %ymm6, %ymm0, %ymm10, %ymm0
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm6
-; AVX1-NEXT:    vpxor %xmm3, %xmm6, %xmm6
-; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm4, %xmm6
-; AVX1-NEXT:    vpxor %xmm3, %xmm1, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm3, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm1, %ymm10, %ymm1
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT:    vpxor %xmm9, %xmm5, %xmm10
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT:    vpxor %xmm6, %xmm0, %xmm7
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [9223372036854776063,9223372036854776063]
+; AVX1-NEXT:    vpcmpgtq %xmm7, %xmm3, %xmm11
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT:    vpxor %xmm6, %xmm4, %xmm5
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm3, %xmm12
+; AVX1-NEXT:    vpxor %xmm6, %xmm1, %xmm7
+; AVX1-NEXT:    vpcmpgtq %xmm7, %xmm3, %xmm7
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm5
+; AVX1-NEXT:    vpxor %xmm6, %xmm5, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm3, %xmm3
+; AVX1-NEXT:    vmovapd {{.*#+}} xmm6 = [255,255]
+; AVX1-NEXT:    vblendvpd %xmm3, %xmm5, %xmm6, %xmm3
+; AVX1-NEXT:    vblendvpd %xmm7, %xmm1, %xmm6, %xmm1
 ; AVX1-NEXT:    vpackusdw %xmm3, %xmm1, %xmm1
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT:    vblendvpd %xmm12, %xmm4, %xmm6, %xmm3
+; AVX1-NEXT:    vblendvpd %xmm11, %xmm0, %xmm6, %xmm0
 ; AVX1-NEXT:    vpackusdw %xmm3, %xmm0, %xmm0
 ; AVX1-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpextrb $0, %xmm5, %eax
+; AVX1-NEXT:    vpextrb $0, %xmm10, %eax
 ; AVX1-NEXT:    testb $1, %al
 ; AVX1-NEXT:    je .LBB2_2
 ; AVX1-NEXT:  # %bb.1: # %cond.store
@@ -1492,17 +1486,16 @@ define void @truncstore_v4i64_v4i32(<4 x
 ; AVX1-NEXT:    vpcmpeqd %xmm2, %xmm1, %xmm1
 ; AVX1-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
 ; AVX1-NEXT:    vpxor %xmm2, %xmm1, %xmm1
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm2 = [4294967295,4294967295,4294967295,4294967295]
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT:    vpxor %xmm4, %xmm3, %xmm3
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [9223372041149743103,9223372041149743103]
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm5, %xmm3
-; AVX1-NEXT:    vpxor %xmm4, %xmm0, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm5, %xmm4
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm3
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [9223372041149743103,9223372041149743103]
+; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm3
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
+; AVX1-NEXT:    vpxor %xmm2, %xmm5, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vmovapd {{.*#+}} xmm4 = [4294967295,4294967295]
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm5, %xmm4, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm3, %xmm0, %xmm4, %xmm0
 ; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
 ; AVX1-NEXT:    vmaskmovps %xmm0, %xmm1, (%rdi)
 ; AVX1-NEXT:    vzeroupper
@@ -1698,17 +1691,16 @@ define void @truncstore_v4i64_v4i16(<4 x
 ; AVX1-NEXT:    vpcmpeqd %xmm2, %xmm1, %xmm2
 ; AVX1-NEXT:    vpcmpeqd %xmm3, %xmm3, %xmm3
 ; AVX1-NEXT:    vpxor %xmm3, %xmm2, %xmm2
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm3 = [65535,65535,65535,65535]
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT:    vpxor %xmm5, %xmm4, %xmm4
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [9223372036854841343,9223372036854841343]
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm6, %xmm4
-; AVX1-NEXT:    vpxor %xmm5, %xmm0, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm6, %xmm5
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm5, %ymm4
-; AVX1-NEXT:    vblendvpd %ymm4, %ymm0, %ymm3, %ymm0
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT:    vpxor %xmm3, %xmm0, %xmm4
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [9223372036854841343,9223372036854841343]
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm5, %xmm4
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm6
+; AVX1-NEXT:    vpxor %xmm3, %xmm6, %xmm3
+; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm5, %xmm3
+; AVX1-NEXT:    vmovapd {{.*#+}} xmm5 = [65535,65535]
+; AVX1-NEXT:    vblendvpd %xmm3, %xmm6, %xmm5, %xmm3
+; AVX1-NEXT:    vblendvpd %xmm4, %xmm0, %xmm5, %xmm0
 ; AVX1-NEXT:    vpackusdw %xmm3, %xmm0, %xmm0
 ; AVX1-NEXT:    vpextrb $0, %xmm2, %eax
 ; AVX1-NEXT:    testb $1, %al
@@ -1986,17 +1978,16 @@ define void @truncstore_v4i64_v4i8(<4 x
 ; AVX1-NEXT:    vpcmpeqd %xmm2, %xmm1, %xmm2
 ; AVX1-NEXT:    vpcmpeqd %xmm3, %xmm3, %xmm3
 ; AVX1-NEXT:    vpxor %xmm3, %xmm2, %xmm2
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm3 = [255,255,255,255]
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT:    vpxor %xmm5, %xmm4, %xmm4
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [9223372036854776063,9223372036854776063]
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm6, %xmm4
-; AVX1-NEXT:    vpxor %xmm5, %xmm0, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm6, %xmm5
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm5, %ymm4
-; AVX1-NEXT:    vblendvpd %ymm4, %ymm0, %ymm3, %ymm0
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT:    vpxor %xmm3, %xmm0, %xmm4
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [9223372036854776063,9223372036854776063]
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm5, %xmm4
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm6
+; AVX1-NEXT:    vpxor %xmm3, %xmm6, %xmm3
+; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm5, %xmm3
+; AVX1-NEXT:    vmovapd {{.*#+}} xmm5 = [255,255]
+; AVX1-NEXT:    vblendvpd %xmm3, %xmm6, %xmm5, %xmm3
+; AVX1-NEXT:    vblendvpd %xmm4, %xmm0, %xmm5, %xmm0
 ; AVX1-NEXT:    vpackusdw %xmm3, %xmm0, %xmm0
 ; AVX1-NEXT:    vpextrb $0, %xmm2, %eax
 ; AVX1-NEXT:    testb $1, %al

Modified: llvm/trunk/test/CodeGen/X86/midpoint-int-vec-256.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/midpoint-int-vec-256.ll?rev=362797&r1=362796&r2=362797&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/midpoint-int-vec-256.ll (original)
+++ llvm/trunk/test/CodeGen/X86/midpoint-int-vec-256.ll Fri Jun  7 06:17:46 2019
@@ -475,44 +475,42 @@ define <8 x i32> @vec256_i32_signed_mem_
 define <4 x i64> @vec256_i64_signed_reg_reg(<4 x i64> %a1, <4 x i64> %a2) nounwind {
 ; AVX1-FALLBACK-LABEL: vec256_i64_signed_reg_reg:
 ; AVX1-FALLBACK:       # %bb.0:
-; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm2
-; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm3, %xmm2, %xmm4
+; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm4
 ; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm5
-; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm4, %ymm5, %ymm6
-; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm3, %xmm2, %xmm6
 ; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm7
-; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm3, %ymm7, %ymm3
-; AVX1-FALLBACK-NEXT:    vblendvpd %ymm3, %ymm0, %ymm1, %ymm3
-; AVX1-FALLBACK-NEXT:    vblendvpd %ymm6, %ymm0, %ymm1, %ymm1
-; AVX1-FALLBACK-NEXT:    vpsubq %xmm3, %xmm1, %xmm6
-; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm3, %xmm3
-; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsubq %xmm3, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vblendvpd %xmm7, %xmm0, %xmm1, %xmm7
+; AVX1-FALLBACK-NEXT:    vblendvpd %xmm5, %xmm0, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsubq %xmm7, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vblendvpd %xmm6, %xmm3, %xmm2, %xmm6
+; AVX1-FALLBACK-NEXT:    vblendvpd %xmm4, %xmm3, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpsubq %xmm6, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm2, %xmm2
 ; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm6, %xmm3
 ; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm8 = [1,1]
 ; AVX1-FALLBACK-NEXT:    vpor %xmm8, %xmm5, %xmm5
 ; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm5, %xmm7
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm7, %xmm3, %xmm7
-; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm3, %xmm6
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm7, %xmm1, %xmm7
+; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm1, %xmm6
 ; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm6, %xmm6
 ; AVX1-FALLBACK-NEXT:    vpaddq %xmm6, %xmm7, %xmm6
 ; AVX1-FALLBACK-NEXT:    vpsllq $32, %xmm6, %xmm6
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm1, %xmm1
 ; AVX1-FALLBACK-NEXT:    vpor %xmm8, %xmm4, %xmm4
 ; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm4, %xmm5
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm1, %xmm5
-; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm1, %xmm7
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm2, %xmm5
+; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm2, %xmm7
 ; AVX1-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm7, %xmm7
 ; AVX1-FALLBACK-NEXT:    vpaddq %xmm7, %xmm5, %xmm5
 ; AVX1-FALLBACK-NEXT:    vpsllq $32, %xmm5, %xmm5
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm2, %xmm5, %xmm2
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm2, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm3, %xmm5, %xmm3
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm3, %xmm2, %xmm2
 ; AVX1-FALLBACK-NEXT:    vpaddq %xmm0, %xmm6, %xmm0
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm0, %xmm3, %xmm0
-; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm0, %xmm1, %xmm0
+; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; AVX1-FALLBACK-NEXT:    retq
 ;
 ; AVX2-LABEL: vec256_i64_signed_reg_reg:
@@ -538,86 +536,82 @@ define <4 x i64> @vec256_i64_signed_reg_
 ;
 ; XOP-FALLBACK-LABEL: vec256_i64_signed_reg_reg:
 ; XOP-FALLBACK:       # %bb.0:
-; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm2
-; XOP-FALLBACK-NEXT:    vpcomgtq %xmm3, %xmm2, %xmm4
+; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; XOP-FALLBACK-NEXT:    vpcomgtq %xmm2, %xmm3, %xmm4
 ; XOP-FALLBACK-NEXT:    vpcomgtq %xmm1, %xmm0, %xmm5
-; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm4, %ymm5, %ymm6
-; XOP-FALLBACK-NEXT:    vpcomltq %xmm3, %xmm2, %xmm3
+; XOP-FALLBACK-NEXT:    vpcomltq %xmm2, %xmm3, %xmm6
 ; XOP-FALLBACK-NEXT:    vpcomltq %xmm1, %xmm0, %xmm7
-; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm3, %ymm7, %ymm3
-; XOP-FALLBACK-NEXT:    vblendvpd %ymm3, %ymm0, %ymm1, %ymm3
-; XOP-FALLBACK-NEXT:    vblendvpd %ymm6, %ymm0, %ymm1, %ymm1
-; XOP-FALLBACK-NEXT:    vpsubq %xmm3, %xmm1, %xmm6
-; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm3, %xmm3
-; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm1, %xmm1
-; XOP-FALLBACK-NEXT:    vpsubq %xmm3, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vblendvpd %xmm7, %xmm0, %xmm1, %xmm7
+; XOP-FALLBACK-NEXT:    vblendvpd %xmm5, %xmm0, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpsubq %xmm7, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vblendvpd %xmm6, %xmm3, %xmm2, %xmm6
+; XOP-FALLBACK-NEXT:    vblendvpd %xmm4, %xmm3, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpsubq %xmm6, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpsrlq $1, %xmm2, %xmm2
 ; XOP-FALLBACK-NEXT:    vpsrlq $1, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vpsrlq $1, %xmm6, %xmm3
 ; XOP-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm8 = [1,1]
 ; XOP-FALLBACK-NEXT:    vpor %xmm8, %xmm5, %xmm5
 ; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm5, %xmm7
-; XOP-FALLBACK-NEXT:    vpmuludq %xmm7, %xmm3, %xmm7
-; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm3, %xmm6
+; XOP-FALLBACK-NEXT:    vpmuludq %xmm7, %xmm1, %xmm7
+; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm1, %xmm6
 ; XOP-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm6, %xmm6
 ; XOP-FALLBACK-NEXT:    vpaddq %xmm6, %xmm7, %xmm6
 ; XOP-FALLBACK-NEXT:    vpsllq $32, %xmm6, %xmm6
-; XOP-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm3, %xmm3
+; XOP-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm1, %xmm1
 ; XOP-FALLBACK-NEXT:    vpor %xmm8, %xmm4, %xmm4
 ; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm4, %xmm5
-; XOP-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm1, %xmm5
-; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm1, %xmm7
+; XOP-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm2, %xmm5
+; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm2, %xmm7
 ; XOP-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm7, %xmm7
 ; XOP-FALLBACK-NEXT:    vpaddq %xmm7, %xmm5, %xmm5
 ; XOP-FALLBACK-NEXT:    vpsllq $32, %xmm5, %xmm5
-; XOP-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vpaddq %xmm2, %xmm5, %xmm2
-; XOP-FALLBACK-NEXT:    vpaddq %xmm2, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpaddq %xmm3, %xmm5, %xmm3
+; XOP-FALLBACK-NEXT:    vpaddq %xmm3, %xmm2, %xmm2
 ; XOP-FALLBACK-NEXT:    vpaddq %xmm0, %xmm6, %xmm0
-; XOP-FALLBACK-NEXT:    vpaddq %xmm0, %xmm3, %xmm0
-; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOP-FALLBACK-NEXT:    vpaddq %xmm0, %xmm1, %xmm0
+; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; XOP-FALLBACK-NEXT:    retq
 ;
 ; XOPAVX1-LABEL: vec256_i64_signed_reg_reg:
 ; XOPAVX1:       # %bb.0:
-; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
-; XOPAVX1-NEXT:    vpcomgtq %xmm3, %xmm2, %xmm4
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; XOPAVX1-NEXT:    vpcomgtq %xmm2, %xmm3, %xmm4
 ; XOPAVX1-NEXT:    vpcomgtq %xmm1, %xmm0, %xmm5
-; XOPAVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm5, %ymm6
-; XOPAVX1-NEXT:    vpcomltq %xmm3, %xmm2, %xmm3
+; XOPAVX1-NEXT:    vpcomltq %xmm2, %xmm3, %xmm6
 ; XOPAVX1-NEXT:    vpcomltq %xmm1, %xmm0, %xmm7
-; XOPAVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm7, %ymm3
-; XOPAVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm1, %ymm3
-; XOPAVX1-NEXT:    vblendvpd %ymm6, %ymm0, %ymm1, %ymm1
-; XOPAVX1-NEXT:    vpsubq %xmm3, %xmm1, %xmm6
-; XOPAVX1-NEXT:    vextractf128 $1, %ymm3, %xmm3
-; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1
-; XOPAVX1-NEXT:    vpsubq %xmm3, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vblendvpd %xmm7, %xmm0, %xmm1, %xmm7
+; XOPAVX1-NEXT:    vblendvpd %xmm5, %xmm0, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpsubq %xmm7, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vblendvpd %xmm6, %xmm3, %xmm2, %xmm6
+; XOPAVX1-NEXT:    vblendvpd %xmm4, %xmm3, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpsubq %xmm6, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpsrlq $1, %xmm2, %xmm2
 ; XOPAVX1-NEXT:    vpsrlq $1, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpsrlq $1, %xmm6, %xmm3
 ; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm8 = [1,1]
 ; XOPAVX1-NEXT:    vpor %xmm8, %xmm5, %xmm5
 ; XOPAVX1-NEXT:    vpsrlq $32, %xmm5, %xmm7
-; XOPAVX1-NEXT:    vpmuludq %xmm7, %xmm3, %xmm7
-; XOPAVX1-NEXT:    vpsrlq $32, %xmm3, %xmm6
+; XOPAVX1-NEXT:    vpmuludq %xmm7, %xmm1, %xmm7
+; XOPAVX1-NEXT:    vpsrlq $32, %xmm1, %xmm6
 ; XOPAVX1-NEXT:    vpmuludq %xmm5, %xmm6, %xmm6
 ; XOPAVX1-NEXT:    vpaddq %xmm6, %xmm7, %xmm6
 ; XOPAVX1-NEXT:    vpsllq $32, %xmm6, %xmm6
-; XOPAVX1-NEXT:    vpmuludq %xmm5, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vpmuludq %xmm5, %xmm1, %xmm1
 ; XOPAVX1-NEXT:    vpor %xmm8, %xmm4, %xmm4
 ; XOPAVX1-NEXT:    vpsrlq $32, %xmm4, %xmm5
-; XOPAVX1-NEXT:    vpmuludq %xmm5, %xmm1, %xmm5
-; XOPAVX1-NEXT:    vpsrlq $32, %xmm1, %xmm7
+; XOPAVX1-NEXT:    vpmuludq %xmm5, %xmm2, %xmm5
+; XOPAVX1-NEXT:    vpsrlq $32, %xmm2, %xmm7
 ; XOPAVX1-NEXT:    vpmuludq %xmm4, %xmm7, %xmm7
 ; XOPAVX1-NEXT:    vpaddq %xmm7, %xmm5, %xmm5
 ; XOPAVX1-NEXT:    vpsllq $32, %xmm5, %xmm5
-; XOPAVX1-NEXT:    vpmuludq %xmm4, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpaddq %xmm2, %xmm5, %xmm2
-; XOPAVX1-NEXT:    vpaddq %xmm2, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpmuludq %xmm4, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpaddq %xmm3, %xmm5, %xmm3
+; XOPAVX1-NEXT:    vpaddq %xmm3, %xmm2, %xmm2
 ; XOPAVX1-NEXT:    vpaddq %xmm0, %xmm6, %xmm0
-; XOPAVX1-NEXT:    vpaddq %xmm0, %xmm3, %xmm0
-; XOPAVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOPAVX1-NEXT:    vpaddq %xmm0, %xmm1, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; XOPAVX1-NEXT:    retq
 ;
 ; AVX512F-LABEL: vec256_i64_signed_reg_reg:
@@ -700,37 +694,27 @@ define <4 x i64> @vec256_i64_signed_reg_
 define <4 x i64> @vec256_i64_unsigned_reg_reg(<4 x i64> %a1, <4 x i64> %a2) nounwind {
 ; AVX1-FALLBACK-LABEL: vec256_i64_unsigned_reg_reg:
 ; AVX1-FALLBACK:       # %bb.0:
-; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
-; AVX1-FALLBACK-NEXT:    vpxor %xmm3, %xmm2, %xmm4
+; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm1, %xmm3
+; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm4 = [9223372036854775808,9223372036854775808]
+; AVX1-FALLBACK-NEXT:    vpxor %xmm4, %xmm3, %xmm5
 ; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm2
-; AVX1-FALLBACK-NEXT:    vpxor %xmm3, %xmm2, %xmm5
-; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm4, %xmm5, %xmm6
-; AVX1-FALLBACK-NEXT:    vpxor %xmm3, %xmm1, %xmm7
-; AVX1-FALLBACK-NEXT:    vpxor %xmm3, %xmm0, %xmm3
-; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm7, %xmm3, %xmm8
-; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm6, %ymm8, %ymm9
-; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm5, %xmm4, %xmm4
-; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm3, %xmm7, %xmm3
-; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm4, %ymm3, %ymm3
-; AVX1-FALLBACK-NEXT:    vblendvpd %ymm3, %ymm0, %ymm1, %ymm3
-; AVX1-FALLBACK-NEXT:    vblendvpd %ymm9, %ymm0, %ymm1, %ymm1
-; AVX1-FALLBACK-NEXT:    vpsubq %xmm3, %xmm1, %xmm4
-; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm3, %xmm3
-; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsubq %xmm3, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpxor %xmm4, %xmm2, %xmm6
+; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm5, %xmm6, %xmm8
+; AVX1-FALLBACK-NEXT:    vpxor %xmm4, %xmm1, %xmm7
+; AVX1-FALLBACK-NEXT:    vpxor %xmm4, %xmm0, %xmm4
+; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm7, %xmm4, %xmm9
+; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm6, %xmm5, %xmm5
+; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm4, %xmm7, %xmm4
+; AVX1-FALLBACK-NEXT:    vblendvpd %xmm4, %xmm0, %xmm1, %xmm4
+; AVX1-FALLBACK-NEXT:    vblendvpd %xmm9, %xmm0, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsubq %xmm4, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vblendvpd %xmm5, %xmm2, %xmm3, %xmm4
+; AVX1-FALLBACK-NEXT:    vblendvpd %xmm8, %xmm2, %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vpsubq %xmm4, %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm3, %xmm3
 ; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm4, %xmm3
-; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm9 = [1,1]
-; AVX1-FALLBACK-NEXT:    vpor %xmm9, %xmm8, %xmm5
-; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm5, %xmm7
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm7, %xmm3, %xmm7
-; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm3, %xmm4
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm4, %xmm4
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm4, %xmm7, %xmm4
-; AVX1-FALLBACK-NEXT:    vpsllq $32, %xmm4, %xmm4
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm3, %xmm3
-; AVX1-FALLBACK-NEXT:    vpor %xmm9, %xmm6, %xmm5
+; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm4 = [1,1]
+; AVX1-FALLBACK-NEXT:    vpor %xmm4, %xmm9, %xmm5
 ; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm5, %xmm6
 ; AVX1-FALLBACK-NEXT:    vpmuludq %xmm6, %xmm1, %xmm6
 ; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm1, %xmm7
@@ -738,11 +722,19 @@ define <4 x i64> @vec256_i64_unsigned_re
 ; AVX1-FALLBACK-NEXT:    vpaddq %xmm7, %xmm6, %xmm6
 ; AVX1-FALLBACK-NEXT:    vpsllq $32, %xmm6, %xmm6
 ; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm2, %xmm6, %xmm2
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm2, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm0, %xmm4, %xmm0
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm0, %xmm3, %xmm0
-; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-FALLBACK-NEXT:    vpor %xmm4, %xmm8, %xmm4
+; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm4, %xmm5
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm3, %xmm5
+; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm3, %xmm7
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm7, %xmm7
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm7, %xmm5, %xmm5
+; AVX1-FALLBACK-NEXT:    vpsllq $32, %xmm5, %xmm5
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm2, %xmm5, %xmm2
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm2, %xmm3, %xmm2
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm0, %xmm6, %xmm0
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm0, %xmm1, %xmm0
+; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; AVX1-FALLBACK-NEXT:    retq
 ;
 ; AVX2-LABEL: vec256_i64_unsigned_reg_reg:
@@ -771,86 +763,82 @@ define <4 x i64> @vec256_i64_unsigned_re
 ;
 ; XOP-FALLBACK-LABEL: vec256_i64_unsigned_reg_reg:
 ; XOP-FALLBACK:       # %bb.0:
-; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm2
-; XOP-FALLBACK-NEXT:    vpcomgtuq %xmm3, %xmm2, %xmm4
+; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; XOP-FALLBACK-NEXT:    vpcomgtuq %xmm2, %xmm3, %xmm4
 ; XOP-FALLBACK-NEXT:    vpcomgtuq %xmm1, %xmm0, %xmm5
-; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm4, %ymm5, %ymm6
-; XOP-FALLBACK-NEXT:    vpcomltuq %xmm3, %xmm2, %xmm3
+; XOP-FALLBACK-NEXT:    vpcomltuq %xmm2, %xmm3, %xmm6
 ; XOP-FALLBACK-NEXT:    vpcomltuq %xmm1, %xmm0, %xmm7
-; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm3, %ymm7, %ymm3
-; XOP-FALLBACK-NEXT:    vblendvpd %ymm3, %ymm0, %ymm1, %ymm3
-; XOP-FALLBACK-NEXT:    vblendvpd %ymm6, %ymm0, %ymm1, %ymm1
-; XOP-FALLBACK-NEXT:    vpsubq %xmm3, %xmm1, %xmm6
-; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm3, %xmm3
-; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm1, %xmm1
-; XOP-FALLBACK-NEXT:    vpsubq %xmm3, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vblendvpd %xmm7, %xmm0, %xmm1, %xmm7
+; XOP-FALLBACK-NEXT:    vblendvpd %xmm5, %xmm0, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpsubq %xmm7, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vblendvpd %xmm6, %xmm3, %xmm2, %xmm6
+; XOP-FALLBACK-NEXT:    vblendvpd %xmm4, %xmm3, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpsubq %xmm6, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpsrlq $1, %xmm2, %xmm2
 ; XOP-FALLBACK-NEXT:    vpsrlq $1, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vpsrlq $1, %xmm6, %xmm3
 ; XOP-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm8 = [1,1]
 ; XOP-FALLBACK-NEXT:    vpor %xmm8, %xmm5, %xmm5
 ; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm5, %xmm7
-; XOP-FALLBACK-NEXT:    vpmuludq %xmm7, %xmm3, %xmm7
-; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm3, %xmm6
+; XOP-FALLBACK-NEXT:    vpmuludq %xmm7, %xmm1, %xmm7
+; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm1, %xmm6
 ; XOP-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm6, %xmm6
 ; XOP-FALLBACK-NEXT:    vpaddq %xmm6, %xmm7, %xmm6
 ; XOP-FALLBACK-NEXT:    vpsllq $32, %xmm6, %xmm6
-; XOP-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm3, %xmm3
+; XOP-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm1, %xmm1
 ; XOP-FALLBACK-NEXT:    vpor %xmm8, %xmm4, %xmm4
 ; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm4, %xmm5
-; XOP-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm1, %xmm5
-; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm1, %xmm7
+; XOP-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm2, %xmm5
+; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm2, %xmm7
 ; XOP-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm7, %xmm7
 ; XOP-FALLBACK-NEXT:    vpaddq %xmm7, %xmm5, %xmm5
 ; XOP-FALLBACK-NEXT:    vpsllq $32, %xmm5, %xmm5
-; XOP-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vpaddq %xmm2, %xmm5, %xmm2
-; XOP-FALLBACK-NEXT:    vpaddq %xmm2, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpaddq %xmm3, %xmm5, %xmm3
+; XOP-FALLBACK-NEXT:    vpaddq %xmm3, %xmm2, %xmm2
 ; XOP-FALLBACK-NEXT:    vpaddq %xmm0, %xmm6, %xmm0
-; XOP-FALLBACK-NEXT:    vpaddq %xmm0, %xmm3, %xmm0
-; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOP-FALLBACK-NEXT:    vpaddq %xmm0, %xmm1, %xmm0
+; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; XOP-FALLBACK-NEXT:    retq
 ;
 ; XOPAVX1-LABEL: vec256_i64_unsigned_reg_reg:
 ; XOPAVX1:       # %bb.0:
-; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
-; XOPAVX1-NEXT:    vpcomgtuq %xmm3, %xmm2, %xmm4
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; XOPAVX1-NEXT:    vpcomgtuq %xmm2, %xmm3, %xmm4
 ; XOPAVX1-NEXT:    vpcomgtuq %xmm1, %xmm0, %xmm5
-; XOPAVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm5, %ymm6
-; XOPAVX1-NEXT:    vpcomltuq %xmm3, %xmm2, %xmm3
+; XOPAVX1-NEXT:    vpcomltuq %xmm2, %xmm3, %xmm6
 ; XOPAVX1-NEXT:    vpcomltuq %xmm1, %xmm0, %xmm7
-; XOPAVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm7, %ymm3
-; XOPAVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm1, %ymm3
-; XOPAVX1-NEXT:    vblendvpd %ymm6, %ymm0, %ymm1, %ymm1
-; XOPAVX1-NEXT:    vpsubq %xmm3, %xmm1, %xmm6
-; XOPAVX1-NEXT:    vextractf128 $1, %ymm3, %xmm3
-; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1
-; XOPAVX1-NEXT:    vpsubq %xmm3, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vblendvpd %xmm7, %xmm0, %xmm1, %xmm7
+; XOPAVX1-NEXT:    vblendvpd %xmm5, %xmm0, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpsubq %xmm7, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vblendvpd %xmm6, %xmm3, %xmm2, %xmm6
+; XOPAVX1-NEXT:    vblendvpd %xmm4, %xmm3, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpsubq %xmm6, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpsrlq $1, %xmm2, %xmm2
 ; XOPAVX1-NEXT:    vpsrlq $1, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpsrlq $1, %xmm6, %xmm3
 ; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm8 = [1,1]
 ; XOPAVX1-NEXT:    vpor %xmm8, %xmm5, %xmm5
 ; XOPAVX1-NEXT:    vpsrlq $32, %xmm5, %xmm7
-; XOPAVX1-NEXT:    vpmuludq %xmm7, %xmm3, %xmm7
-; XOPAVX1-NEXT:    vpsrlq $32, %xmm3, %xmm6
+; XOPAVX1-NEXT:    vpmuludq %xmm7, %xmm1, %xmm7
+; XOPAVX1-NEXT:    vpsrlq $32, %xmm1, %xmm6
 ; XOPAVX1-NEXT:    vpmuludq %xmm5, %xmm6, %xmm6
 ; XOPAVX1-NEXT:    vpaddq %xmm6, %xmm7, %xmm6
 ; XOPAVX1-NEXT:    vpsllq $32, %xmm6, %xmm6
-; XOPAVX1-NEXT:    vpmuludq %xmm5, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vpmuludq %xmm5, %xmm1, %xmm1
 ; XOPAVX1-NEXT:    vpor %xmm8, %xmm4, %xmm4
 ; XOPAVX1-NEXT:    vpsrlq $32, %xmm4, %xmm5
-; XOPAVX1-NEXT:    vpmuludq %xmm5, %xmm1, %xmm5
-; XOPAVX1-NEXT:    vpsrlq $32, %xmm1, %xmm7
+; XOPAVX1-NEXT:    vpmuludq %xmm5, %xmm2, %xmm5
+; XOPAVX1-NEXT:    vpsrlq $32, %xmm2, %xmm7
 ; XOPAVX1-NEXT:    vpmuludq %xmm4, %xmm7, %xmm7
 ; XOPAVX1-NEXT:    vpaddq %xmm7, %xmm5, %xmm5
 ; XOPAVX1-NEXT:    vpsllq $32, %xmm5, %xmm5
-; XOPAVX1-NEXT:    vpmuludq %xmm4, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpaddq %xmm2, %xmm5, %xmm2
-; XOPAVX1-NEXT:    vpaddq %xmm2, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpmuludq %xmm4, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpaddq %xmm3, %xmm5, %xmm3
+; XOPAVX1-NEXT:    vpaddq %xmm3, %xmm2, %xmm2
 ; XOPAVX1-NEXT:    vpaddq %xmm0, %xmm6, %xmm0
-; XOPAVX1-NEXT:    vpaddq %xmm0, %xmm3, %xmm0
-; XOPAVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOPAVX1-NEXT:    vpaddq %xmm0, %xmm1, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; XOPAVX1-NEXT:    retq
 ;
 ; AVX512F-LABEL: vec256_i64_unsigned_reg_reg:
@@ -935,46 +923,43 @@ define <4 x i64> @vec256_i64_unsigned_re
 define <4 x i64> @vec256_i64_signed_mem_reg(<4 x i64>* %a1_addr, <4 x i64> %a2) nounwind {
 ; AVX1-FALLBACK-LABEL: vec256_i64_signed_mem_reg:
 ; AVX1-FALLBACK:       # %bb.0:
-; AVX1-FALLBACK-NEXT:    vmovapd (%rdi), %ymm3
-; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm4
+; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm2
 ; AVX1-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
-; AVX1-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm2
-; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm4, %xmm2, %xmm5
-; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm6
-; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm5, %ymm6, %ymm7
-; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm4
-; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm8
-; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm4, %ymm8, %ymm4
-; AVX1-FALLBACK-NEXT:    vblendvpd %ymm4, %ymm3, %ymm0, %ymm4
-; AVX1-FALLBACK-NEXT:    vblendvpd %ymm7, %ymm3, %ymm0, %ymm0
-; AVX1-FALLBACK-NEXT:    vpsubq %xmm4, %xmm0, %xmm3
-; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm4, %xmm4
-; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm0
-; AVX1-FALLBACK-NEXT:    vpsubq %xmm4, %xmm0, %xmm0
+; AVX1-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm3
+; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm4
+; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm5
+; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm3, %xmm2, %xmm6
+; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm7
+; AVX1-FALLBACK-NEXT:    vblendvpd %xmm7, %xmm1, %xmm0, %xmm7
+; AVX1-FALLBACK-NEXT:    vblendvpd %xmm5, %xmm1, %xmm0, %xmm0
+; AVX1-FALLBACK-NEXT:    vpsubq %xmm7, %xmm0, %xmm0
+; AVX1-FALLBACK-NEXT:    vblendvpd %xmm6, %xmm3, %xmm2, %xmm6
+; AVX1-FALLBACK-NEXT:    vblendvpd %xmm4, %xmm3, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpsubq %xmm6, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm2, %xmm2
 ; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm3, %xmm3
 ; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm8 = [1,1]
-; AVX1-FALLBACK-NEXT:    vpor %xmm8, %xmm6, %xmm6
-; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm6, %xmm7
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm7, %xmm3, %xmm7
-; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm3, %xmm4
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm6, %xmm4, %xmm4
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm4, %xmm7, %xmm4
-; AVX1-FALLBACK-NEXT:    vpsllq $32, %xmm4, %xmm4
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm6, %xmm3, %xmm3
 ; AVX1-FALLBACK-NEXT:    vpor %xmm8, %xmm5, %xmm5
-; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm5, %xmm6
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm6, %xmm0, %xmm6
-; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm0, %xmm7
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm7, %xmm7
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm7, %xmm6, %xmm6
+; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm5, %xmm7
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm7, %xmm0, %xmm7
+; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm0, %xmm6
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm6, %xmm6
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm6, %xmm7, %xmm6
 ; AVX1-FALLBACK-NEXT:    vpsllq $32, %xmm6, %xmm6
 ; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm2, %xmm6, %xmm2
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm2, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm1, %xmm4, %xmm1
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm1, %xmm3, %xmm1
-; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-FALLBACK-NEXT:    vpor %xmm8, %xmm4, %xmm4
+; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm4, %xmm5
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm2, %xmm5
+; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm2, %xmm7
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm7, %xmm7
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm7, %xmm5, %xmm5
+; AVX1-FALLBACK-NEXT:    vpsllq $32, %xmm5, %xmm5
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm3, %xmm5, %xmm3
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm3, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm1, %xmm6, %xmm1
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
+; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; AVX1-FALLBACK-NEXT:    retq
 ;
 ; AVX2-LABEL: vec256_i64_signed_mem_reg:
@@ -1001,90 +986,84 @@ define <4 x i64> @vec256_i64_signed_mem_
 ;
 ; XOP-FALLBACK-LABEL: vec256_i64_signed_mem_reg:
 ; XOP-FALLBACK:       # %bb.0:
-; XOP-FALLBACK-NEXT:    vmovapd (%rdi), %ymm3
-; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm4
+; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm2
 ; XOP-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
-; XOP-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm2
-; XOP-FALLBACK-NEXT:    vpcomgtq %xmm4, %xmm2, %xmm5
-; XOP-FALLBACK-NEXT:    vpcomgtq %xmm0, %xmm1, %xmm6
-; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm5, %ymm6, %ymm7
-; XOP-FALLBACK-NEXT:    vpcomltq %xmm4, %xmm2, %xmm4
-; XOP-FALLBACK-NEXT:    vpcomltq %xmm0, %xmm1, %xmm8
-; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm4, %ymm8, %ymm4
-; XOP-FALLBACK-NEXT:    vblendvpd %ymm4, %ymm3, %ymm0, %ymm4
-; XOP-FALLBACK-NEXT:    vblendvpd %ymm7, %ymm3, %ymm0, %ymm0
-; XOP-FALLBACK-NEXT:    vpsubq %xmm4, %xmm0, %xmm3
-; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm4, %xmm4
-; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm0
-; XOP-FALLBACK-NEXT:    vpsubq %xmm4, %xmm0, %xmm0
+; XOP-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm3
+; XOP-FALLBACK-NEXT:    vpcomgtq %xmm2, %xmm3, %xmm4
+; XOP-FALLBACK-NEXT:    vpcomgtq %xmm0, %xmm1, %xmm5
+; XOP-FALLBACK-NEXT:    vpcomltq %xmm2, %xmm3, %xmm6
+; XOP-FALLBACK-NEXT:    vpcomltq %xmm0, %xmm1, %xmm7
+; XOP-FALLBACK-NEXT:    vblendvpd %xmm7, %xmm1, %xmm0, %xmm7
+; XOP-FALLBACK-NEXT:    vblendvpd %xmm5, %xmm1, %xmm0, %xmm0
+; XOP-FALLBACK-NEXT:    vpsubq %xmm7, %xmm0, %xmm0
+; XOP-FALLBACK-NEXT:    vblendvpd %xmm6, %xmm3, %xmm2, %xmm6
+; XOP-FALLBACK-NEXT:    vblendvpd %xmm4, %xmm3, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpsubq %xmm6, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpsrlq $1, %xmm2, %xmm2
 ; XOP-FALLBACK-NEXT:    vpsrlq $1, %xmm0, %xmm0
-; XOP-FALLBACK-NEXT:    vpsrlq $1, %xmm3, %xmm3
 ; XOP-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm8 = [1,1]
-; XOP-FALLBACK-NEXT:    vpor %xmm8, %xmm6, %xmm6
-; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm6, %xmm7
-; XOP-FALLBACK-NEXT:    vpmuludq %xmm7, %xmm3, %xmm7
-; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm3, %xmm4
-; XOP-FALLBACK-NEXT:    vpmuludq %xmm6, %xmm4, %xmm4
-; XOP-FALLBACK-NEXT:    vpaddq %xmm4, %xmm7, %xmm4
-; XOP-FALLBACK-NEXT:    vpsllq $32, %xmm4, %xmm4
-; XOP-FALLBACK-NEXT:    vpmuludq %xmm6, %xmm3, %xmm3
 ; XOP-FALLBACK-NEXT:    vpor %xmm8, %xmm5, %xmm5
-; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm5, %xmm6
-; XOP-FALLBACK-NEXT:    vpmuludq %xmm6, %xmm0, %xmm6
-; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm0, %xmm7
-; XOP-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm7, %xmm7
-; XOP-FALLBACK-NEXT:    vpaddq %xmm7, %xmm6, %xmm6
+; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm5, %xmm7
+; XOP-FALLBACK-NEXT:    vpmuludq %xmm7, %xmm0, %xmm7
+; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm0, %xmm6
+; XOP-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm6, %xmm6
+; XOP-FALLBACK-NEXT:    vpaddq %xmm6, %xmm7, %xmm6
 ; XOP-FALLBACK-NEXT:    vpsllq $32, %xmm6, %xmm6
 ; XOP-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm0, %xmm0
-; XOP-FALLBACK-NEXT:    vpaddq %xmm2, %xmm6, %xmm2
-; XOP-FALLBACK-NEXT:    vpaddq %xmm2, %xmm0, %xmm0
-; XOP-FALLBACK-NEXT:    vpaddq %xmm1, %xmm4, %xmm1
-; XOP-FALLBACK-NEXT:    vpaddq %xmm1, %xmm3, %xmm1
-; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; XOP-FALLBACK-NEXT:    vpor %xmm8, %xmm4, %xmm4
+; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm4, %xmm5
+; XOP-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm2, %xmm5
+; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm2, %xmm7
+; XOP-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm7, %xmm7
+; XOP-FALLBACK-NEXT:    vpaddq %xmm7, %xmm5, %xmm5
+; XOP-FALLBACK-NEXT:    vpsllq $32, %xmm5, %xmm5
+; XOP-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpaddq %xmm3, %xmm5, %xmm3
+; XOP-FALLBACK-NEXT:    vpaddq %xmm3, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpaddq %xmm1, %xmm6, %xmm1
+; XOP-FALLBACK-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
+; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; XOP-FALLBACK-NEXT:    retq
 ;
 ; XOPAVX1-LABEL: vec256_i64_signed_mem_reg:
 ; XOPAVX1:       # %bb.0:
-; XOPAVX1-NEXT:    vmovapd (%rdi), %ymm3
-; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
 ; XOPAVX1-NEXT:    vmovdqa (%rdi), %xmm1
-; XOPAVX1-NEXT:    vmovdqa 16(%rdi), %xmm2
-; XOPAVX1-NEXT:    vpcomgtq %xmm4, %xmm2, %xmm5
-; XOPAVX1-NEXT:    vpcomgtq %xmm0, %xmm1, %xmm6
-; XOPAVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm6, %ymm7
-; XOPAVX1-NEXT:    vpcomltq %xmm4, %xmm2, %xmm4
-; XOPAVX1-NEXT:    vpcomltq %xmm0, %xmm1, %xmm8
-; XOPAVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm8, %ymm4
-; XOPAVX1-NEXT:    vblendvpd %ymm4, %ymm3, %ymm0, %ymm4
-; XOPAVX1-NEXT:    vblendvpd %ymm7, %ymm3, %ymm0, %ymm0
-; XOPAVX1-NEXT:    vpsubq %xmm4, %xmm0, %xmm3
-; XOPAVX1-NEXT:    vextractf128 $1, %ymm4, %xmm4
-; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
-; XOPAVX1-NEXT:    vpsubq %xmm4, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vmovdqa 16(%rdi), %xmm3
+; XOPAVX1-NEXT:    vpcomgtq %xmm2, %xmm3, %xmm4
+; XOPAVX1-NEXT:    vpcomgtq %xmm0, %xmm1, %xmm5
+; XOPAVX1-NEXT:    vpcomltq %xmm2, %xmm3, %xmm6
+; XOPAVX1-NEXT:    vpcomltq %xmm0, %xmm1, %xmm7
+; XOPAVX1-NEXT:    vblendvpd %xmm7, %xmm1, %xmm0, %xmm7
+; XOPAVX1-NEXT:    vblendvpd %xmm5, %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vpsubq %xmm7, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vblendvpd %xmm6, %xmm3, %xmm2, %xmm6
+; XOPAVX1-NEXT:    vblendvpd %xmm4, %xmm3, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpsubq %xmm6, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpsrlq $1, %xmm2, %xmm2
 ; XOPAVX1-NEXT:    vpsrlq $1, %xmm0, %xmm0
-; XOPAVX1-NEXT:    vpsrlq $1, %xmm3, %xmm3
 ; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm8 = [1,1]
-; XOPAVX1-NEXT:    vpor %xmm8, %xmm6, %xmm6
-; XOPAVX1-NEXT:    vpsrlq $32, %xmm6, %xmm7
-; XOPAVX1-NEXT:    vpmuludq %xmm7, %xmm3, %xmm7
-; XOPAVX1-NEXT:    vpsrlq $32, %xmm3, %xmm4
-; XOPAVX1-NEXT:    vpmuludq %xmm6, %xmm4, %xmm4
-; XOPAVX1-NEXT:    vpaddq %xmm4, %xmm7, %xmm4
-; XOPAVX1-NEXT:    vpsllq $32, %xmm4, %xmm4
-; XOPAVX1-NEXT:    vpmuludq %xmm6, %xmm3, %xmm3
 ; XOPAVX1-NEXT:    vpor %xmm8, %xmm5, %xmm5
-; XOPAVX1-NEXT:    vpsrlq $32, %xmm5, %xmm6
-; XOPAVX1-NEXT:    vpmuludq %xmm6, %xmm0, %xmm6
-; XOPAVX1-NEXT:    vpsrlq $32, %xmm0, %xmm7
-; XOPAVX1-NEXT:    vpmuludq %xmm5, %xmm7, %xmm7
-; XOPAVX1-NEXT:    vpaddq %xmm7, %xmm6, %xmm6
+; XOPAVX1-NEXT:    vpsrlq $32, %xmm5, %xmm7
+; XOPAVX1-NEXT:    vpmuludq %xmm7, %xmm0, %xmm7
+; XOPAVX1-NEXT:    vpsrlq $32, %xmm0, %xmm6
+; XOPAVX1-NEXT:    vpmuludq %xmm5, %xmm6, %xmm6
+; XOPAVX1-NEXT:    vpaddq %xmm6, %xmm7, %xmm6
 ; XOPAVX1-NEXT:    vpsllq $32, %xmm6, %xmm6
 ; XOPAVX1-NEXT:    vpmuludq %xmm5, %xmm0, %xmm0
-; XOPAVX1-NEXT:    vpaddq %xmm2, %xmm6, %xmm2
-; XOPAVX1-NEXT:    vpaddq %xmm2, %xmm0, %xmm0
-; XOPAVX1-NEXT:    vpaddq %xmm1, %xmm4, %xmm1
-; XOPAVX1-NEXT:    vpaddq %xmm1, %xmm3, %xmm1
-; XOPAVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; XOPAVX1-NEXT:    vpor %xmm8, %xmm4, %xmm4
+; XOPAVX1-NEXT:    vpsrlq $32, %xmm4, %xmm5
+; XOPAVX1-NEXT:    vpmuludq %xmm5, %xmm2, %xmm5
+; XOPAVX1-NEXT:    vpsrlq $32, %xmm2, %xmm7
+; XOPAVX1-NEXT:    vpmuludq %xmm4, %xmm7, %xmm7
+; XOPAVX1-NEXT:    vpaddq %xmm7, %xmm5, %xmm5
+; XOPAVX1-NEXT:    vpsllq $32, %xmm5, %xmm5
+; XOPAVX1-NEXT:    vpmuludq %xmm4, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpaddq %xmm3, %xmm5, %xmm3
+; XOPAVX1-NEXT:    vpaddq %xmm3, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpaddq %xmm1, %xmm6, %xmm1
+; XOPAVX1-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; XOPAVX1-NEXT:    retq
 ;
 ; AVX512F-LABEL: vec256_i64_signed_mem_reg:
@@ -1169,46 +1148,43 @@ define <4 x i64> @vec256_i64_signed_mem_
 define <4 x i64> @vec256_i64_signed_reg_mem(<4 x i64> %a1, <4 x i64>* %a2_addr) nounwind {
 ; AVX1-FALLBACK-LABEL: vec256_i64_signed_reg_mem:
 ; AVX1-FALLBACK:       # %bb.0:
-; AVX1-FALLBACK-NEXT:    vmovapd (%rdi), %ymm2
-; AVX1-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm3
-; AVX1-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm4
-; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm4, %xmm1, %xmm5
-; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm3, %xmm0, %xmm6
-; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm5, %ymm6, %ymm7
-; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm1, %xmm4, %xmm4
-; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm0, %xmm3, %xmm3
-; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm4, %ymm3, %ymm3
-; AVX1-FALLBACK-NEXT:    vblendvpd %ymm3, %ymm0, %ymm2, %ymm3
-; AVX1-FALLBACK-NEXT:    vblendvpd %ymm7, %ymm0, %ymm2, %ymm2
-; AVX1-FALLBACK-NEXT:    vpsubq %xmm3, %xmm2, %xmm4
-; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm3, %xmm3
-; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpsubq %xmm3, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX1-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm2
+; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm4
+; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm5
+; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm3, %xmm2, %xmm6
+; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm7
+; AVX1-FALLBACK-NEXT:    vblendvpd %xmm7, %xmm0, %xmm1, %xmm7
+; AVX1-FALLBACK-NEXT:    vblendvpd %xmm5, %xmm0, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsubq %xmm7, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vblendvpd %xmm6, %xmm3, %xmm2, %xmm6
+; AVX1-FALLBACK-NEXT:    vblendvpd %xmm4, %xmm3, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpsubq %xmm6, %xmm2, %xmm2
 ; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm4, %xmm3
+; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm1, %xmm1
 ; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm8 = [1,1]
-; AVX1-FALLBACK-NEXT:    vpor %xmm8, %xmm6, %xmm6
-; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm6, %xmm7
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm7, %xmm3, %xmm7
-; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm3, %xmm4
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm6, %xmm4, %xmm4
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm4, %xmm7, %xmm4
-; AVX1-FALLBACK-NEXT:    vpsllq $32, %xmm4, %xmm4
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm6, %xmm3, %xmm3
 ; AVX1-FALLBACK-NEXT:    vpor %xmm8, %xmm5, %xmm5
-; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm5, %xmm6
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm6, %xmm2, %xmm6
-; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm2, %xmm7
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm7, %xmm7
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm7, %xmm6, %xmm6
+; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm5, %xmm7
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm7, %xmm1, %xmm7
+; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm1, %xmm6
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm6, %xmm6
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm6, %xmm7, %xmm6
 ; AVX1-FALLBACK-NEXT:    vpsllq $32, %xmm6, %xmm6
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm1, %xmm6, %xmm1
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm1, %xmm2, %xmm1
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm0, %xmm4, %xmm0
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm0, %xmm3, %xmm0
-; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpor %xmm8, %xmm4, %xmm4
+; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm4, %xmm5
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm2, %xmm5
+; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm2, %xmm7
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm7, %xmm7
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm7, %xmm5, %xmm5
+; AVX1-FALLBACK-NEXT:    vpsllq $32, %xmm5, %xmm5
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm3, %xmm5, %xmm3
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm3, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm0, %xmm6, %xmm0
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm0, %xmm1, %xmm0
+; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; AVX1-FALLBACK-NEXT:    retq
 ;
 ; AVX2-LABEL: vec256_i64_signed_reg_mem:
@@ -1235,90 +1211,84 @@ define <4 x i64> @vec256_i64_signed_reg_
 ;
 ; XOP-FALLBACK-LABEL: vec256_i64_signed_reg_mem:
 ; XOP-FALLBACK:       # %bb.0:
-; XOP-FALLBACK-NEXT:    vmovapd (%rdi), %ymm2
-; XOP-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm3
-; XOP-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm4
-; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; XOP-FALLBACK-NEXT:    vpcomgtq %xmm4, %xmm1, %xmm5
-; XOP-FALLBACK-NEXT:    vpcomgtq %xmm3, %xmm0, %xmm6
-; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm5, %ymm6, %ymm7
-; XOP-FALLBACK-NEXT:    vpcomltq %xmm4, %xmm1, %xmm4
-; XOP-FALLBACK-NEXT:    vpcomltq %xmm3, %xmm0, %xmm3
-; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm4, %ymm3, %ymm3
-; XOP-FALLBACK-NEXT:    vblendvpd %ymm3, %ymm0, %ymm2, %ymm3
-; XOP-FALLBACK-NEXT:    vblendvpd %ymm7, %ymm0, %ymm2, %ymm2
-; XOP-FALLBACK-NEXT:    vpsubq %xmm3, %xmm2, %xmm4
-; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm3, %xmm3
-; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm2, %xmm2
-; XOP-FALLBACK-NEXT:    vpsubq %xmm3, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
+; XOP-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm2
+; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; XOP-FALLBACK-NEXT:    vpcomgtq %xmm2, %xmm3, %xmm4
+; XOP-FALLBACK-NEXT:    vpcomgtq %xmm1, %xmm0, %xmm5
+; XOP-FALLBACK-NEXT:    vpcomltq %xmm2, %xmm3, %xmm6
+; XOP-FALLBACK-NEXT:    vpcomltq %xmm1, %xmm0, %xmm7
+; XOP-FALLBACK-NEXT:    vblendvpd %xmm7, %xmm0, %xmm1, %xmm7
+; XOP-FALLBACK-NEXT:    vblendvpd %xmm5, %xmm0, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpsubq %xmm7, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vblendvpd %xmm6, %xmm3, %xmm2, %xmm6
+; XOP-FALLBACK-NEXT:    vblendvpd %xmm4, %xmm3, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpsubq %xmm6, %xmm2, %xmm2
 ; XOP-FALLBACK-NEXT:    vpsrlq $1, %xmm2, %xmm2
-; XOP-FALLBACK-NEXT:    vpsrlq $1, %xmm4, %xmm3
+; XOP-FALLBACK-NEXT:    vpsrlq $1, %xmm1, %xmm1
 ; XOP-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm8 = [1,1]
-; XOP-FALLBACK-NEXT:    vpor %xmm8, %xmm6, %xmm6
-; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm6, %xmm7
-; XOP-FALLBACK-NEXT:    vpmuludq %xmm7, %xmm3, %xmm7
-; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm3, %xmm4
-; XOP-FALLBACK-NEXT:    vpmuludq %xmm6, %xmm4, %xmm4
-; XOP-FALLBACK-NEXT:    vpaddq %xmm4, %xmm7, %xmm4
-; XOP-FALLBACK-NEXT:    vpsllq $32, %xmm4, %xmm4
-; XOP-FALLBACK-NEXT:    vpmuludq %xmm6, %xmm3, %xmm3
 ; XOP-FALLBACK-NEXT:    vpor %xmm8, %xmm5, %xmm5
-; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm5, %xmm6
-; XOP-FALLBACK-NEXT:    vpmuludq %xmm6, %xmm2, %xmm6
-; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm2, %xmm7
-; XOP-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm7, %xmm7
-; XOP-FALLBACK-NEXT:    vpaddq %xmm7, %xmm6, %xmm6
+; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm5, %xmm7
+; XOP-FALLBACK-NEXT:    vpmuludq %xmm7, %xmm1, %xmm7
+; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm1, %xmm6
+; XOP-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm6, %xmm6
+; XOP-FALLBACK-NEXT:    vpaddq %xmm6, %xmm7, %xmm6
 ; XOP-FALLBACK-NEXT:    vpsllq $32, %xmm6, %xmm6
-; XOP-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm2, %xmm2
-; XOP-FALLBACK-NEXT:    vpaddq %xmm1, %xmm6, %xmm1
-; XOP-FALLBACK-NEXT:    vpaddq %xmm1, %xmm2, %xmm1
-; XOP-FALLBACK-NEXT:    vpaddq %xmm0, %xmm4, %xmm0
-; XOP-FALLBACK-NEXT:    vpaddq %xmm0, %xmm3, %xmm0
-; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOP-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpor %xmm8, %xmm4, %xmm4
+; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm4, %xmm5
+; XOP-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm2, %xmm5
+; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm2, %xmm7
+; XOP-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm7, %xmm7
+; XOP-FALLBACK-NEXT:    vpaddq %xmm7, %xmm5, %xmm5
+; XOP-FALLBACK-NEXT:    vpsllq $32, %xmm5, %xmm5
+; XOP-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpaddq %xmm3, %xmm5, %xmm3
+; XOP-FALLBACK-NEXT:    vpaddq %xmm3, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpaddq %xmm0, %xmm6, %xmm0
+; XOP-FALLBACK-NEXT:    vpaddq %xmm0, %xmm1, %xmm0
+; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; XOP-FALLBACK-NEXT:    retq
 ;
 ; XOPAVX1-LABEL: vec256_i64_signed_reg_mem:
 ; XOPAVX1:       # %bb.0:
-; XOPAVX1-NEXT:    vmovapd (%rdi), %ymm2
-; XOPAVX1-NEXT:    vmovdqa (%rdi), %xmm3
-; XOPAVX1-NEXT:    vmovdqa 16(%rdi), %xmm4
-; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; XOPAVX1-NEXT:    vpcomgtq %xmm4, %xmm1, %xmm5
-; XOPAVX1-NEXT:    vpcomgtq %xmm3, %xmm0, %xmm6
-; XOPAVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm6, %ymm7
-; XOPAVX1-NEXT:    vpcomltq %xmm4, %xmm1, %xmm4
-; XOPAVX1-NEXT:    vpcomltq %xmm3, %xmm0, %xmm3
-; XOPAVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm3, %ymm3
-; XOPAVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm2, %ymm3
-; XOPAVX1-NEXT:    vblendvpd %ymm7, %ymm0, %ymm2, %ymm2
-; XOPAVX1-NEXT:    vpsubq %xmm3, %xmm2, %xmm4
-; XOPAVX1-NEXT:    vextractf128 $1, %ymm3, %xmm3
-; XOPAVX1-NEXT:    vextractf128 $1, %ymm2, %xmm2
-; XOPAVX1-NEXT:    vpsubq %xmm3, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vmovdqa (%rdi), %xmm1
+; XOPAVX1-NEXT:    vmovdqa 16(%rdi), %xmm2
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; XOPAVX1-NEXT:    vpcomgtq %xmm2, %xmm3, %xmm4
+; XOPAVX1-NEXT:    vpcomgtq %xmm1, %xmm0, %xmm5
+; XOPAVX1-NEXT:    vpcomltq %xmm2, %xmm3, %xmm6
+; XOPAVX1-NEXT:    vpcomltq %xmm1, %xmm0, %xmm7
+; XOPAVX1-NEXT:    vblendvpd %xmm7, %xmm0, %xmm1, %xmm7
+; XOPAVX1-NEXT:    vblendvpd %xmm5, %xmm0, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpsubq %xmm7, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vblendvpd %xmm6, %xmm3, %xmm2, %xmm6
+; XOPAVX1-NEXT:    vblendvpd %xmm4, %xmm3, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpsubq %xmm6, %xmm2, %xmm2
 ; XOPAVX1-NEXT:    vpsrlq $1, %xmm2, %xmm2
-; XOPAVX1-NEXT:    vpsrlq $1, %xmm4, %xmm3
+; XOPAVX1-NEXT:    vpsrlq $1, %xmm1, %xmm1
 ; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm8 = [1,1]
-; XOPAVX1-NEXT:    vpor %xmm8, %xmm6, %xmm6
-; XOPAVX1-NEXT:    vpsrlq $32, %xmm6, %xmm7
-; XOPAVX1-NEXT:    vpmuludq %xmm7, %xmm3, %xmm7
-; XOPAVX1-NEXT:    vpsrlq $32, %xmm3, %xmm4
-; XOPAVX1-NEXT:    vpmuludq %xmm6, %xmm4, %xmm4
-; XOPAVX1-NEXT:    vpaddq %xmm4, %xmm7, %xmm4
-; XOPAVX1-NEXT:    vpsllq $32, %xmm4, %xmm4
-; XOPAVX1-NEXT:    vpmuludq %xmm6, %xmm3, %xmm3
 ; XOPAVX1-NEXT:    vpor %xmm8, %xmm5, %xmm5
-; XOPAVX1-NEXT:    vpsrlq $32, %xmm5, %xmm6
-; XOPAVX1-NEXT:    vpmuludq %xmm6, %xmm2, %xmm6
-; XOPAVX1-NEXT:    vpsrlq $32, %xmm2, %xmm7
-; XOPAVX1-NEXT:    vpmuludq %xmm5, %xmm7, %xmm7
-; XOPAVX1-NEXT:    vpaddq %xmm7, %xmm6, %xmm6
+; XOPAVX1-NEXT:    vpsrlq $32, %xmm5, %xmm7
+; XOPAVX1-NEXT:    vpmuludq %xmm7, %xmm1, %xmm7
+; XOPAVX1-NEXT:    vpsrlq $32, %xmm1, %xmm6
+; XOPAVX1-NEXT:    vpmuludq %xmm5, %xmm6, %xmm6
+; XOPAVX1-NEXT:    vpaddq %xmm6, %xmm7, %xmm6
 ; XOPAVX1-NEXT:    vpsllq $32, %xmm6, %xmm6
-; XOPAVX1-NEXT:    vpmuludq %xmm5, %xmm2, %xmm2
-; XOPAVX1-NEXT:    vpaddq %xmm1, %xmm6, %xmm1
-; XOPAVX1-NEXT:    vpaddq %xmm1, %xmm2, %xmm1
-; XOPAVX1-NEXT:    vpaddq %xmm0, %xmm4, %xmm0
-; XOPAVX1-NEXT:    vpaddq %xmm0, %xmm3, %xmm0
-; XOPAVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOPAVX1-NEXT:    vpmuludq %xmm5, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpor %xmm8, %xmm4, %xmm4
+; XOPAVX1-NEXT:    vpsrlq $32, %xmm4, %xmm5
+; XOPAVX1-NEXT:    vpmuludq %xmm5, %xmm2, %xmm5
+; XOPAVX1-NEXT:    vpsrlq $32, %xmm2, %xmm7
+; XOPAVX1-NEXT:    vpmuludq %xmm4, %xmm7, %xmm7
+; XOPAVX1-NEXT:    vpaddq %xmm7, %xmm5, %xmm5
+; XOPAVX1-NEXT:    vpsllq $32, %xmm5, %xmm5
+; XOPAVX1-NEXT:    vpmuludq %xmm4, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpaddq %xmm3, %xmm5, %xmm3
+; XOPAVX1-NEXT:    vpaddq %xmm3, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpaddq %xmm0, %xmm6, %xmm0
+; XOPAVX1-NEXT:    vpaddq %xmm0, %xmm1, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; XOPAVX1-NEXT:    retq
 ;
 ; AVX512F-LABEL: vec256_i64_signed_reg_mem:
@@ -1403,48 +1373,44 @@ define <4 x i64> @vec256_i64_signed_reg_
 define <4 x i64> @vec256_i64_signed_mem_mem(<4 x i64>* %a1_addr, <4 x i64>* %a2_addr) nounwind {
 ; AVX1-FALLBACK-LABEL: vec256_i64_signed_mem_mem:
 ; AVX1-FALLBACK:       # %bb.0:
-; AVX1-FALLBACK-NEXT:    vmovapd (%rdi), %ymm2
-; AVX1-FALLBACK-NEXT:    vmovapd (%rsi), %ymm3
-; AVX1-FALLBACK-NEXT:    vmovdqa (%rsi), %xmm4
-; AVX1-FALLBACK-NEXT:    vmovdqa 16(%rsi), %xmm5
+; AVX1-FALLBACK-NEXT:    vmovdqa (%rsi), %xmm1
+; AVX1-FALLBACK-NEXT:    vmovdqa 16(%rsi), %xmm2
 ; AVX1-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX1-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm1
-; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm5, %xmm1, %xmm6
-; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm4, %xmm0, %xmm7
-; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm6, %ymm7, %ymm8
-; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm1, %xmm5, %xmm5
-; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm0, %xmm4, %xmm4
-; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm5, %ymm4, %ymm4
-; AVX1-FALLBACK-NEXT:    vblendvpd %ymm4, %ymm2, %ymm3, %ymm4
-; AVX1-FALLBACK-NEXT:    vblendvpd %ymm8, %ymm2, %ymm3, %ymm2
-; AVX1-FALLBACK-NEXT:    vpsubq %xmm4, %xmm2, %xmm3
-; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm4, %xmm4
-; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpsubq %xmm4, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm3
+; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm4
+; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm5
+; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm3, %xmm2, %xmm6
+; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm7
+; AVX1-FALLBACK-NEXT:    vblendvpd %xmm7, %xmm0, %xmm1, %xmm7
+; AVX1-FALLBACK-NEXT:    vblendvpd %xmm5, %xmm0, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsubq %xmm7, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vblendvpd %xmm6, %xmm3, %xmm2, %xmm6
+; AVX1-FALLBACK-NEXT:    vblendvpd %xmm4, %xmm3, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpsubq %xmm6, %xmm2, %xmm2
 ; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm1, %xmm1
 ; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm8 = [1,1]
-; AVX1-FALLBACK-NEXT:    vpor %xmm8, %xmm7, %xmm5
+; AVX1-FALLBACK-NEXT:    vpor %xmm8, %xmm5, %xmm5
 ; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm5, %xmm7
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm7, %xmm3, %xmm7
-; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm3, %xmm4
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm4, %xmm4
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm4, %xmm7, %xmm4
-; AVX1-FALLBACK-NEXT:    vpsllq $32, %xmm4, %xmm4
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm3, %xmm3
-; AVX1-FALLBACK-NEXT:    vpor %xmm8, %xmm6, %xmm5
-; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm5, %xmm6
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm6, %xmm2, %xmm6
-; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm2, %xmm7
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm7, %xmm7
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm7, %xmm6, %xmm6
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm7, %xmm1, %xmm7
+; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm1, %xmm6
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm6, %xmm6
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm6, %xmm7, %xmm6
 ; AVX1-FALLBACK-NEXT:    vpsllq $32, %xmm6, %xmm6
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm1, %xmm6, %xmm1
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm1, %xmm2, %xmm1
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm0, %xmm4, %xmm0
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm0, %xmm3, %xmm0
-; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpor %xmm8, %xmm4, %xmm4
+; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm4, %xmm5
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm2, %xmm5
+; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm2, %xmm7
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm7, %xmm7
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm7, %xmm5, %xmm5
+; AVX1-FALLBACK-NEXT:    vpsllq $32, %xmm5, %xmm5
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm3, %xmm5, %xmm3
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm3, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm0, %xmm6, %xmm0
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm0, %xmm1, %xmm0
+; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; AVX1-FALLBACK-NEXT:    retq
 ;
 ; AVX2-LABEL: vec256_i64_signed_mem_mem:
@@ -1472,94 +1438,86 @@ define <4 x i64> @vec256_i64_signed_mem_
 ;
 ; XOP-FALLBACK-LABEL: vec256_i64_signed_mem_mem:
 ; XOP-FALLBACK:       # %bb.0:
-; XOP-FALLBACK-NEXT:    vmovapd (%rdi), %ymm2
-; XOP-FALLBACK-NEXT:    vmovapd (%rsi), %ymm3
-; XOP-FALLBACK-NEXT:    vmovdqa (%rsi), %xmm4
-; XOP-FALLBACK-NEXT:    vmovdqa 16(%rsi), %xmm5
+; XOP-FALLBACK-NEXT:    vmovdqa (%rsi), %xmm1
+; XOP-FALLBACK-NEXT:    vmovdqa 16(%rsi), %xmm2
 ; XOP-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm0
-; XOP-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm1
-; XOP-FALLBACK-NEXT:    vpcomgtq %xmm5, %xmm1, %xmm6
-; XOP-FALLBACK-NEXT:    vpcomgtq %xmm4, %xmm0, %xmm7
-; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm6, %ymm7, %ymm8
-; XOP-FALLBACK-NEXT:    vpcomltq %xmm5, %xmm1, %xmm5
-; XOP-FALLBACK-NEXT:    vpcomltq %xmm4, %xmm0, %xmm4
-; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm5, %ymm4, %ymm4
-; XOP-FALLBACK-NEXT:    vblendvpd %ymm4, %ymm2, %ymm3, %ymm4
-; XOP-FALLBACK-NEXT:    vblendvpd %ymm8, %ymm2, %ymm3, %ymm2
-; XOP-FALLBACK-NEXT:    vpsubq %xmm4, %xmm2, %xmm3
-; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm4, %xmm4
-; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm2, %xmm2
-; XOP-FALLBACK-NEXT:    vpsubq %xmm4, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm3
+; XOP-FALLBACK-NEXT:    vpcomgtq %xmm2, %xmm3, %xmm4
+; XOP-FALLBACK-NEXT:    vpcomgtq %xmm1, %xmm0, %xmm5
+; XOP-FALLBACK-NEXT:    vpcomltq %xmm2, %xmm3, %xmm6
+; XOP-FALLBACK-NEXT:    vpcomltq %xmm1, %xmm0, %xmm7
+; XOP-FALLBACK-NEXT:    vblendvpd %xmm7, %xmm0, %xmm1, %xmm7
+; XOP-FALLBACK-NEXT:    vblendvpd %xmm5, %xmm0, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpsubq %xmm7, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vblendvpd %xmm6, %xmm3, %xmm2, %xmm6
+; XOP-FALLBACK-NEXT:    vblendvpd %xmm4, %xmm3, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpsubq %xmm6, %xmm2, %xmm2
 ; XOP-FALLBACK-NEXT:    vpsrlq $1, %xmm2, %xmm2
-; XOP-FALLBACK-NEXT:    vpsrlq $1, %xmm3, %xmm3
+; XOP-FALLBACK-NEXT:    vpsrlq $1, %xmm1, %xmm1
 ; XOP-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm8 = [1,1]
-; XOP-FALLBACK-NEXT:    vpor %xmm8, %xmm7, %xmm5
+; XOP-FALLBACK-NEXT:    vpor %xmm8, %xmm5, %xmm5
 ; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm5, %xmm7
-; XOP-FALLBACK-NEXT:    vpmuludq %xmm7, %xmm3, %xmm7
-; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm3, %xmm4
-; XOP-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm4, %xmm4
-; XOP-FALLBACK-NEXT:    vpaddq %xmm4, %xmm7, %xmm4
-; XOP-FALLBACK-NEXT:    vpsllq $32, %xmm4, %xmm4
-; XOP-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm3, %xmm3
-; XOP-FALLBACK-NEXT:    vpor %xmm8, %xmm6, %xmm5
-; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm5, %xmm6
-; XOP-FALLBACK-NEXT:    vpmuludq %xmm6, %xmm2, %xmm6
-; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm2, %xmm7
-; XOP-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm7, %xmm7
-; XOP-FALLBACK-NEXT:    vpaddq %xmm7, %xmm6, %xmm6
+; XOP-FALLBACK-NEXT:    vpmuludq %xmm7, %xmm1, %xmm7
+; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm1, %xmm6
+; XOP-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm6, %xmm6
+; XOP-FALLBACK-NEXT:    vpaddq %xmm6, %xmm7, %xmm6
 ; XOP-FALLBACK-NEXT:    vpsllq $32, %xmm6, %xmm6
-; XOP-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm2, %xmm2
-; XOP-FALLBACK-NEXT:    vpaddq %xmm1, %xmm6, %xmm1
-; XOP-FALLBACK-NEXT:    vpaddq %xmm1, %xmm2, %xmm1
-; XOP-FALLBACK-NEXT:    vpaddq %xmm0, %xmm4, %xmm0
-; XOP-FALLBACK-NEXT:    vpaddq %xmm0, %xmm3, %xmm0
-; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOP-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpor %xmm8, %xmm4, %xmm4
+; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm4, %xmm5
+; XOP-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm2, %xmm5
+; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm2, %xmm7
+; XOP-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm7, %xmm7
+; XOP-FALLBACK-NEXT:    vpaddq %xmm7, %xmm5, %xmm5
+; XOP-FALLBACK-NEXT:    vpsllq $32, %xmm5, %xmm5
+; XOP-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpaddq %xmm3, %xmm5, %xmm3
+; XOP-FALLBACK-NEXT:    vpaddq %xmm3, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpaddq %xmm0, %xmm6, %xmm0
+; XOP-FALLBACK-NEXT:    vpaddq %xmm0, %xmm1, %xmm0
+; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; XOP-FALLBACK-NEXT:    retq
 ;
 ; XOPAVX1-LABEL: vec256_i64_signed_mem_mem:
 ; XOPAVX1:       # %bb.0:
-; XOPAVX1-NEXT:    vmovapd (%rdi), %ymm2
-; XOPAVX1-NEXT:    vmovapd (%rsi), %ymm3
-; XOPAVX1-NEXT:    vmovdqa (%rsi), %xmm4
-; XOPAVX1-NEXT:    vmovdqa 16(%rsi), %xmm5
+; XOPAVX1-NEXT:    vmovdqa (%rsi), %xmm1
+; XOPAVX1-NEXT:    vmovdqa 16(%rsi), %xmm2
 ; XOPAVX1-NEXT:    vmovdqa (%rdi), %xmm0
-; XOPAVX1-NEXT:    vmovdqa 16(%rdi), %xmm1
-; XOPAVX1-NEXT:    vpcomgtq %xmm5, %xmm1, %xmm6
-; XOPAVX1-NEXT:    vpcomgtq %xmm4, %xmm0, %xmm7
-; XOPAVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm7, %ymm8
-; XOPAVX1-NEXT:    vpcomltq %xmm5, %xmm1, %xmm5
-; XOPAVX1-NEXT:    vpcomltq %xmm4, %xmm0, %xmm4
-; XOPAVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm4, %ymm4
-; XOPAVX1-NEXT:    vblendvpd %ymm4, %ymm2, %ymm3, %ymm4
-; XOPAVX1-NEXT:    vblendvpd %ymm8, %ymm2, %ymm3, %ymm2
-; XOPAVX1-NEXT:    vpsubq %xmm4, %xmm2, %xmm3
-; XOPAVX1-NEXT:    vextractf128 $1, %ymm4, %xmm4
-; XOPAVX1-NEXT:    vextractf128 $1, %ymm2, %xmm2
-; XOPAVX1-NEXT:    vpsubq %xmm4, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vmovdqa 16(%rdi), %xmm3
+; XOPAVX1-NEXT:    vpcomgtq %xmm2, %xmm3, %xmm4
+; XOPAVX1-NEXT:    vpcomgtq %xmm1, %xmm0, %xmm5
+; XOPAVX1-NEXT:    vpcomltq %xmm2, %xmm3, %xmm6
+; XOPAVX1-NEXT:    vpcomltq %xmm1, %xmm0, %xmm7
+; XOPAVX1-NEXT:    vblendvpd %xmm7, %xmm0, %xmm1, %xmm7
+; XOPAVX1-NEXT:    vblendvpd %xmm5, %xmm0, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpsubq %xmm7, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vblendvpd %xmm6, %xmm3, %xmm2, %xmm6
+; XOPAVX1-NEXT:    vblendvpd %xmm4, %xmm3, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpsubq %xmm6, %xmm2, %xmm2
 ; XOPAVX1-NEXT:    vpsrlq $1, %xmm2, %xmm2
-; XOPAVX1-NEXT:    vpsrlq $1, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vpsrlq $1, %xmm1, %xmm1
 ; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm8 = [1,1]
-; XOPAVX1-NEXT:    vpor %xmm8, %xmm7, %xmm5
+; XOPAVX1-NEXT:    vpor %xmm8, %xmm5, %xmm5
 ; XOPAVX1-NEXT:    vpsrlq $32, %xmm5, %xmm7
-; XOPAVX1-NEXT:    vpmuludq %xmm7, %xmm3, %xmm7
-; XOPAVX1-NEXT:    vpsrlq $32, %xmm3, %xmm4
-; XOPAVX1-NEXT:    vpmuludq %xmm5, %xmm4, %xmm4
-; XOPAVX1-NEXT:    vpaddq %xmm4, %xmm7, %xmm4
-; XOPAVX1-NEXT:    vpsllq $32, %xmm4, %xmm4
-; XOPAVX1-NEXT:    vpmuludq %xmm5, %xmm3, %xmm3
-; XOPAVX1-NEXT:    vpor %xmm8, %xmm6, %xmm5
-; XOPAVX1-NEXT:    vpsrlq $32, %xmm5, %xmm6
-; XOPAVX1-NEXT:    vpmuludq %xmm6, %xmm2, %xmm6
-; XOPAVX1-NEXT:    vpsrlq $32, %xmm2, %xmm7
-; XOPAVX1-NEXT:    vpmuludq %xmm5, %xmm7, %xmm7
-; XOPAVX1-NEXT:    vpaddq %xmm7, %xmm6, %xmm6
+; XOPAVX1-NEXT:    vpmuludq %xmm7, %xmm1, %xmm7
+; XOPAVX1-NEXT:    vpsrlq $32, %xmm1, %xmm6
+; XOPAVX1-NEXT:    vpmuludq %xmm5, %xmm6, %xmm6
+; XOPAVX1-NEXT:    vpaddq %xmm6, %xmm7, %xmm6
 ; XOPAVX1-NEXT:    vpsllq $32, %xmm6, %xmm6
-; XOPAVX1-NEXT:    vpmuludq %xmm5, %xmm2, %xmm2
-; XOPAVX1-NEXT:    vpaddq %xmm1, %xmm6, %xmm1
-; XOPAVX1-NEXT:    vpaddq %xmm1, %xmm2, %xmm1
-; XOPAVX1-NEXT:    vpaddq %xmm0, %xmm4, %xmm0
-; XOPAVX1-NEXT:    vpaddq %xmm0, %xmm3, %xmm0
-; XOPAVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOPAVX1-NEXT:    vpmuludq %xmm5, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpor %xmm8, %xmm4, %xmm4
+; XOPAVX1-NEXT:    vpsrlq $32, %xmm4, %xmm5
+; XOPAVX1-NEXT:    vpmuludq %xmm5, %xmm2, %xmm5
+; XOPAVX1-NEXT:    vpsrlq $32, %xmm2, %xmm7
+; XOPAVX1-NEXT:    vpmuludq %xmm4, %xmm7, %xmm7
+; XOPAVX1-NEXT:    vpaddq %xmm7, %xmm5, %xmm5
+; XOPAVX1-NEXT:    vpsllq $32, %xmm5, %xmm5
+; XOPAVX1-NEXT:    vpmuludq %xmm4, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpaddq %xmm3, %xmm5, %xmm3
+; XOPAVX1-NEXT:    vpaddq %xmm3, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpaddq %xmm0, %xmm6, %xmm0
+; XOPAVX1-NEXT:    vpaddq %xmm0, %xmm1, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; XOPAVX1-NEXT:    retq
 ;
 ; AVX512F-LABEL: vec256_i64_signed_mem_mem:

Modified: llvm/trunk/test/CodeGen/X86/psubus.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/psubus.ll?rev=362797&r1=362796&r2=362797&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/psubus.ll (original)
+++ llvm/trunk/test/CodeGen/X86/psubus.ll Fri Jun  7 06:17:46 2019
@@ -1724,27 +1724,25 @@ define <8 x i16> @psubus_8i64_max(<8 x i
 ;
 ; AVX1-LABEL: psubus_8i64_max:
 ; AVX1:       # %bb.0: # %vector.ph
-; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm3
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT:    vpxor %xmm4, %xmm3, %xmm3
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [9223372036854841343,9223372036854841343]
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm5, %xmm3
-; AVX1-NEXT:    vpxor %xmm4, %xmm2, %xmm6
-; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm5, %xmm6
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm6, %ymm3
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm6 = [65535,65535,65535,65535]
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm2, %ymm6, %ymm2
-; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm3
-; AVX1-NEXT:    vpackusdw %xmm3, %xmm2, %xmm2
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT:    vpxor %xmm4, %xmm3, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm5, %xmm3
-; AVX1-NEXT:    vpxor %xmm4, %xmm1, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm5, %xmm4
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm1, %ymm6, %ymm1
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT:    vpackusdw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vmovapd {{.*#+}} xmm3 = [65535,65535]
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT:    vpxor %xmm5, %xmm4, %xmm6
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm7 = [9223372036854841343,9223372036854841343]
+; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm7, %xmm6
+; AVX1-NEXT:    vblendvpd %xmm6, %xmm4, %xmm3, %xmm4
+; AVX1-NEXT:    vpxor %xmm5, %xmm2, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm7, %xmm6
+; AVX1-NEXT:    vblendvpd %xmm6, %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpackusdw %xmm4, %xmm2, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
+; AVX1-NEXT:    vpxor %xmm5, %xmm4, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm7, %xmm6
+; AVX1-NEXT:    vblendvpd %xmm6, %xmm4, %xmm3, %xmm4
+; AVX1-NEXT:    vpxor %xmm5, %xmm1, %xmm5
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm7, %xmm5
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm1, %xmm3, %xmm1
+; AVX1-NEXT:    vpackusdw %xmm4, %xmm1, %xmm1
 ; AVX1-NEXT:    vpackusdw %xmm2, %xmm1, %xmm1
 ; AVX1-NEXT:    vpsubusw %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vzeroupper

Modified: llvm/trunk/test/CodeGen/X86/uadd_sat_vec.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/uadd_sat_vec.ll?rev=362797&r1=362796&r2=362797&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/uadd_sat_vec.ll (original)
+++ llvm/trunk/test/CodeGen/X86/uadd_sat_vec.ll Fri Jun  7 06:17:46 2019
@@ -1167,21 +1167,20 @@ define <4 x i64> @v4i64(<4 x i64> %x, <4
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT:    vpxor %xmm3, %xmm2, %xmm2
-; AVX1-NEXT:    vxorps %xmm4, %xmm4, %xmm4
-; AVX1-NEXT:    vcmptrueps %ymm4, %ymm4, %ymm4
-; AVX1-NEXT:    vxorps %ymm4, %ymm1, %ymm4
-; AVX1-NEXT:    vextractf128 $1, %ymm4, %xmm5
-; AVX1-NEXT:    vpxor %xmm3, %xmm5, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm5, %xmm2
-; AVX1-NEXT:    vpxor %xmm3, %xmm0, %xmm5
-; AVX1-NEXT:    vxorps %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm3, %xmm3
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm4, %ymm0
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT:    vpaddq %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpxor %xmm3, %xmm2, %xmm4
+; AVX1-NEXT:    vxorps %xmm5, %xmm5, %xmm5
+; AVX1-NEXT:    vcmptrueps %ymm5, %ymm5, %ymm5
+; AVX1-NEXT:    vxorps %ymm5, %ymm1, %ymm5
+; AVX1-NEXT:    vextractf128 $1, %ymm5, %xmm6
+; AVX1-NEXT:    vpxor %xmm3, %xmm6, %xmm7
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm7, %xmm4
+; AVX1-NEXT:    vblendvpd %xmm4, %xmm2, %xmm6, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
+; AVX1-NEXT:    vpaddq %xmm4, %xmm2, %xmm2
+; AVX1-NEXT:    vpxor %xmm3, %xmm0, %xmm4
+; AVX1-NEXT:    vxorps %xmm3, %xmm5, %xmm3
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm3, %xmm3
+; AVX1-NEXT:    vblendvpd %xmm3, %xmm0, %xmm5, %xmm0
 ; AVX1-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
@@ -1389,37 +1388,35 @@ define <8 x i64> @v8i64(<8 x i64> %x, <8
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT:    vpxor %xmm4, %xmm5, %xmm5
-; AVX1-NEXT:    vxorps %xmm6, %xmm6, %xmm6
-; AVX1-NEXT:    vcmptrueps %ymm6, %ymm6, %ymm8
-; AVX1-NEXT:    vxorps %ymm8, %ymm2, %ymm7
-; AVX1-NEXT:    vextractf128 $1, %ymm7, %xmm6
-; AVX1-NEXT:    vpxor %xmm4, %xmm6, %xmm6
-; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm6, %xmm9
-; AVX1-NEXT:    vpxor %xmm4, %xmm0, %xmm6
-; AVX1-NEXT:    vxorps %xmm4, %xmm7, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm5, %xmm5
-; AVX1-NEXT:    vinsertf128 $1, %xmm9, %ymm5, %ymm5
-; AVX1-NEXT:    vblendvpd %ymm5, %ymm0, %ymm7, %ymm0
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
+; AVX1-NEXT:    vpxor %xmm4, %xmm5, %xmm9
+; AVX1-NEXT:    vxorps %xmm7, %xmm7, %xmm7
+; AVX1-NEXT:    vcmptrueps %ymm7, %ymm7, %ymm10
+; AVX1-NEXT:    vxorps %ymm10, %ymm2, %ymm8
+; AVX1-NEXT:    vextractf128 $1, %ymm8, %xmm6
+; AVX1-NEXT:    vpxor %xmm4, %xmm6, %xmm7
+; AVX1-NEXT:    vpcmpgtq %xmm9, %xmm7, %xmm7
+; AVX1-NEXT:    vblendvpd %xmm7, %xmm5, %xmm6, %xmm5
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm6
 ; AVX1-NEXT:    vpaddq %xmm6, %xmm5, %xmm5
+; AVX1-NEXT:    vpxor %xmm4, %xmm0, %xmm6
+; AVX1-NEXT:    vxorps %xmm4, %xmm8, %xmm7
+; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm7, %xmm6
+; AVX1-NEXT:    vblendvpd %xmm6, %xmm0, %xmm8, %xmm0
 ; AVX1-NEXT:    vpaddq %xmm2, %xmm0, %xmm0
 ; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm0, %ymm0
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT:    vpxor %xmm4, %xmm2, %xmm2
-; AVX1-NEXT:    vxorps %ymm8, %ymm3, %ymm5
-; AVX1-NEXT:    vextractf128 $1, %ymm5, %xmm6
-; AVX1-NEXT:    vpxor %xmm4, %xmm6, %xmm6
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm6, %xmm2
-; AVX1-NEXT:    vpxor %xmm4, %xmm1, %xmm6
-; AVX1-NEXT:    vxorps %xmm4, %xmm5, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm4, %xmm4
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm4, %ymm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm1, %ymm5, %ymm1
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm4
-; AVX1-NEXT:    vpaddq %xmm4, %xmm2, %xmm2
+; AVX1-NEXT:    vpxor %xmm4, %xmm2, %xmm8
+; AVX1-NEXT:    vxorps %ymm10, %ymm3, %ymm6
+; AVX1-NEXT:    vextractf128 $1, %ymm6, %xmm7
+; AVX1-NEXT:    vpxor %xmm4, %xmm7, %xmm5
+; AVX1-NEXT:    vpcmpgtq %xmm8, %xmm5, %xmm5
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm2, %xmm7, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm5
+; AVX1-NEXT:    vpaddq %xmm5, %xmm2, %xmm2
+; AVX1-NEXT:    vpxor %xmm4, %xmm1, %xmm5
+; AVX1-NEXT:    vxorps %xmm4, %xmm6, %xmm4
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm4, %xmm4
+; AVX1-NEXT:    vblendvpd %xmm4, %xmm1, %xmm6, %xmm1
 ; AVX1-NEXT:    vpaddq %xmm3, %xmm1, %xmm1
 ; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
 ; AVX1-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/usub_sat_vec.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/usub_sat_vec.ll?rev=362797&r1=362796&r2=362797&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/usub_sat_vec.ll (original)
+++ llvm/trunk/test/CodeGen/X86/usub_sat_vec.ll Fri Jun  7 06:17:46 2019
@@ -1130,15 +1130,14 @@ define <4 x i64> @v4i64(<4 x i64> %x, <4
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
 ; AVX1-NEXT:    vpxor %xmm3, %xmm2, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
-; AVX1-NEXT:    vpxor %xmm3, %xmm5, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm5, %xmm4
-; AVX1-NEXT:    vpxor %xmm3, %xmm1, %xmm5
+; AVX1-NEXT:    vpxor %xmm3, %xmm5, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm6, %xmm4
+; AVX1-NEXT:    vblendvpd %xmm4, %xmm5, %xmm2, %xmm4
+; AVX1-NEXT:    vpsubq %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vpxor %xmm3, %xmm1, %xmm4
 ; AVX1-NEXT:    vpxor %xmm3, %xmm0, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm3, %xmm3
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm3, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm1, %ymm0
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vpsubq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm3, %xmm3
+; AVX1-NEXT:    vblendvpd %xmm3, %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vpsubq %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
@@ -1343,31 +1342,29 @@ define <8 x i64> @v8i64(<8 x i64> %x, <8
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT:    vpxor %xmm5, %xmm4, %xmm6
+; AVX1-NEXT:    vpxor %xmm5, %xmm4, %xmm8
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm7
-; AVX1-NEXT:    vpxor %xmm5, %xmm7, %xmm7
-; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm7, %xmm8
-; AVX1-NEXT:    vpxor %xmm5, %xmm2, %xmm7
-; AVX1-NEXT:    vpxor %xmm5, %xmm0, %xmm6
-; AVX1-NEXT:    vpcmpgtq %xmm7, %xmm6, %xmm6
-; AVX1-NEXT:    vinsertf128 $1, %xmm8, %ymm6, %ymm6
-; AVX1-NEXT:    vblendvpd %ymm6, %ymm0, %ymm2, %ymm0
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm6
+; AVX1-NEXT:    vpxor %xmm5, %xmm7, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm8, %xmm6, %xmm6
+; AVX1-NEXT:    vblendvpd %xmm6, %xmm7, %xmm4, %xmm6
 ; AVX1-NEXT:    vpsubq %xmm4, %xmm6, %xmm4
+; AVX1-NEXT:    vpxor %xmm5, %xmm2, %xmm6
+; AVX1-NEXT:    vpxor %xmm5, %xmm0, %xmm7
+; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm7, %xmm6
+; AVX1-NEXT:    vblendvpd %xmm6, %xmm0, %xmm2, %xmm0
 ; AVX1-NEXT:    vpsubq %xmm2, %xmm0, %xmm0
 ; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm0
 ; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm2
 ; AVX1-NEXT:    vpxor %xmm5, %xmm2, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm6
-; AVX1-NEXT:    vpxor %xmm5, %xmm6, %xmm6
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm6, %xmm4
-; AVX1-NEXT:    vpxor %xmm5, %xmm3, %xmm6
-; AVX1-NEXT:    vpxor %xmm5, %xmm1, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm5, %xmm5
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm5, %ymm4
-; AVX1-NEXT:    vblendvpd %ymm4, %ymm1, %ymm3, %ymm1
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
+; AVX1-NEXT:    vpxor %xmm5, %xmm6, %xmm7
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm7, %xmm4
+; AVX1-NEXT:    vblendvpd %xmm4, %xmm6, %xmm2, %xmm4
 ; AVX1-NEXT:    vpsubq %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vpxor %xmm5, %xmm3, %xmm4
+; AVX1-NEXT:    vpxor %xmm5, %xmm1, %xmm5
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm5, %xmm4
+; AVX1-NEXT:    vblendvpd %xmm4, %xmm1, %xmm3, %xmm1
 ; AVX1-NEXT:    vpsubq %xmm3, %xmm1, %xmm1
 ; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
 ; AVX1-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/vector-reduce-smax-widen.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-reduce-smax-widen.ll?rev=362797&r1=362796&r2=362797&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-reduce-smax-widen.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-reduce-smax-widen.ll Fri Jun  7 06:17:46 2019
@@ -149,13 +149,11 @@ define i64 @test_v4i64(<4 x i64> %a0) {
 ; AVX1-LABEL: test_v4i64:
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm2
-; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm3
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vmovq %xmm0, %rax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -320,20 +318,17 @@ define i64 @test_v8i64(<8 x i64> %a0) {
 ;
 ; AVX1-LABEL: test_v8i64:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
-; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm3
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm3
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm5
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm4, %xmm3, %xmm3
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm0, %xmm1
+; AVX1-NEXT:    vblendvpd %xmm1, %xmm0, %xmm3, %xmm0
 ; AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vmovq %xmm0, %rax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -611,32 +606,27 @@ define i64 @test_v16i64(<16 x i64> %a0)
 ;
 ; AVX1-LABEL: test_v16i64:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm11
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm5, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm0, %xmm5
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm5, %ymm4
-; AVX1-NEXT:    vblendvpd %ymm4, %ymm0, %ymm2, %ymm0
-; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm2
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm11, %xmm5, %xmm8
+; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm7
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm7, %xmm6, %xmm9
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm0, %xmm10
 ; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm1, %xmm4
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm4, %ymm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm1, %ymm3, %ymm1
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
-; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm3
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT:    vblendvpd %xmm4, %xmm1, %xmm3, %xmm1
+; AVX1-NEXT:    vblendvpd %xmm10, %xmm0, %xmm2, %xmm0
 ; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm3
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX1-NEXT:    vblendvpd %xmm9, %xmm6, %xmm7, %xmm3
+; AVX1-NEXT:    vblendvpd %xmm8, %xmm5, %xmm11, %xmm4
+; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm5
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm4, %xmm3, %xmm3
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm0, %xmm1
+; AVX1-NEXT:    vblendvpd %xmm1, %xmm0, %xmm3, %xmm0
 ; AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vmovq %xmm0, %rax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/vector-reduce-smax.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-reduce-smax.ll?rev=362797&r1=362796&r2=362797&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-reduce-smax.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-reduce-smax.ll Fri Jun  7 06:17:46 2019
@@ -149,13 +149,11 @@ define i64 @test_v4i64(<4 x i64> %a0) {
 ; AVX1-LABEL: test_v4i64:
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm2
-; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm3
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vmovq %xmm0, %rax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -320,20 +318,17 @@ define i64 @test_v8i64(<8 x i64> %a0) {
 ;
 ; AVX1-LABEL: test_v8i64:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
-; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm3
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm3
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm5
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm4, %xmm3, %xmm3
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm0, %xmm1
+; AVX1-NEXT:    vblendvpd %xmm1, %xmm0, %xmm3, %xmm0
 ; AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vmovq %xmm0, %rax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -611,32 +606,27 @@ define i64 @test_v16i64(<16 x i64> %a0)
 ;
 ; AVX1-LABEL: test_v16i64:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm11
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm5, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm0, %xmm5
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm5, %ymm4
-; AVX1-NEXT:    vblendvpd %ymm4, %ymm0, %ymm2, %ymm0
-; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm2
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm11, %xmm5, %xmm8
+; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm7
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm7, %xmm6, %xmm9
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm0, %xmm10
 ; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm1, %xmm4
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm4, %ymm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm1, %ymm3, %ymm1
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
-; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm3
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT:    vblendvpd %xmm4, %xmm1, %xmm3, %xmm1
+; AVX1-NEXT:    vblendvpd %xmm10, %xmm0, %xmm2, %xmm0
 ; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm3
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX1-NEXT:    vblendvpd %xmm9, %xmm6, %xmm7, %xmm3
+; AVX1-NEXT:    vblendvpd %xmm8, %xmm5, %xmm11, %xmm4
+; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm5
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm4, %xmm3, %xmm3
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm0, %xmm1
+; AVX1-NEXT:    vblendvpd %xmm1, %xmm0, %xmm3, %xmm0
 ; AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vmovq %xmm0, %rax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/vector-reduce-smin-widen.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-reduce-smin-widen.ll?rev=362797&r1=362796&r2=362797&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-reduce-smin-widen.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-reduce-smin-widen.ll Fri Jun  7 06:17:46 2019
@@ -148,13 +148,11 @@ define i64 @test_v4i64(<4 x i64> %a0) {
 ; AVX1-LABEL: test_v4i64:
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm3
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vmovq %xmm0, %rax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -321,18 +319,15 @@ define i64 @test_v8i64(<8 x i64> %a0) {
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm3
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm4
+; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm5
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    vblendvpd %xmm4, %xmm2, %xmm3, %xmm1
 ; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm2
-; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm3
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vmovq %xmm0, %rax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -610,32 +605,27 @@ define i64 @test_v16i64(<16 x i64> %a0)
 ;
 ; AVX1-LABEL: test_v16i64:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
-; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm5, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm3, %xmm5
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm5, %ymm4
-; AVX1-NEXT:    vblendvpd %ymm4, %ymm1, %ymm3, %ymm1
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm3, %xmm8
+; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm2, %xmm9
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm11
+; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm7
+; AVX1-NEXT:    vpcmpgtq %xmm11, %xmm7, %xmm10
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm2, %xmm4
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm3
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm4, %xmm6
+; AVX1-NEXT:    vblendvpd %xmm6, %xmm5, %xmm4, %xmm4
+; AVX1-NEXT:    vblendvpd %xmm10, %xmm11, %xmm7, %xmm5
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm5, %xmm6
+; AVX1-NEXT:    vblendvpd %xmm9, %xmm0, %xmm2, %xmm0
+; AVX1-NEXT:    vblendvpd %xmm8, %xmm1, %xmm3, %xmm1
+; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    vblendvpd %xmm6, %xmm4, %xmm5, %xmm1
 ; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm2
-; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm3
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vmovq %xmm0, %rax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/vector-reduce-smin.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-reduce-smin.ll?rev=362797&r1=362796&r2=362797&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-reduce-smin.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-reduce-smin.ll Fri Jun  7 06:17:46 2019
@@ -148,13 +148,11 @@ define i64 @test_v4i64(<4 x i64> %a0) {
 ; AVX1-LABEL: test_v4i64:
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm3
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vmovq %xmm0, %rax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -321,18 +319,15 @@ define i64 @test_v8i64(<8 x i64> %a0) {
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm3
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm4
+; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm5
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    vblendvpd %xmm4, %xmm2, %xmm3, %xmm1
 ; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm2
-; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm3
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vmovq %xmm0, %rax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -610,32 +605,27 @@ define i64 @test_v16i64(<16 x i64> %a0)
 ;
 ; AVX1-LABEL: test_v16i64:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
-; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm5, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm3, %xmm5
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm5, %ymm4
-; AVX1-NEXT:    vblendvpd %ymm4, %ymm1, %ymm3, %ymm1
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm3, %xmm8
+; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm2, %xmm9
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm11
+; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm7
+; AVX1-NEXT:    vpcmpgtq %xmm11, %xmm7, %xmm10
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 ; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm2, %xmm4
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm3
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm4, %xmm6
+; AVX1-NEXT:    vblendvpd %xmm6, %xmm5, %xmm4, %xmm4
+; AVX1-NEXT:    vblendvpd %xmm10, %xmm11, %xmm7, %xmm5
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm5, %xmm6
+; AVX1-NEXT:    vblendvpd %xmm9, %xmm0, %xmm2, %xmm0
+; AVX1-NEXT:    vblendvpd %xmm8, %xmm1, %xmm3, %xmm1
+; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    vblendvpd %xmm6, %xmm4, %xmm5, %xmm1
 ; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm2
-; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm3
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vmovq %xmm0, %rax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/vector-reduce-umax-widen.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-reduce-umax-widen.ll?rev=362797&r1=362796&r2=362797&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-reduce-umax-widen.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-reduce-umax-widen.ll Fri Jun  7 06:17:46 2019
@@ -155,15 +155,13 @@ define i64 @test_v4i64(<4 x i64> %a0) {
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
 ; AVX1-NEXT:    vpxor %xmm2, %xmm1, %xmm3
 ; AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm3, %xmm3
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm1, %ymm0
+; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm3
+; AVX1-NEXT:    vblendvpd %xmm3, %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; AVX1-NEXT:    vxorpd %xmm2, %xmm0, %xmm3
 ; AVX1-NEXT:    vxorpd %xmm2, %xmm1, %xmm2
 ; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vmovq %xmm0, %rax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -333,29 +331,26 @@ define i64 @test_v8i64(<8 x i64> %a0) {
 ;
 ; AVX1-LABEL: test_v8i64:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT:    vpxor %xmm3, %xmm2, %xmm2
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
-; AVX1-NEXT:    vpxor %xmm3, %xmm4, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm2
-; AVX1-NEXT:    vpxor %xmm3, %xmm1, %xmm4
-; AVX1-NEXT:    vpxor %xmm3, %xmm0, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm5, %xmm4
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm4, %ymm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT:    vpxor %xmm3, %xmm1, %xmm2
-; AVX1-NEXT:    vxorpd %xmm3, %xmm0, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm2, %xmm2
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm4, %ymm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT:    vpxor %xmm2, %xmm1, %xmm3
+; AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm4
+; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm3
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
+; AVX1-NEXT:    vpxor %xmm2, %xmm4, %xmm5
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm6
+; AVX1-NEXT:    vpxor %xmm2, %xmm6, %xmm7
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm7, %xmm5
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm6, %xmm4, %xmm4
+; AVX1-NEXT:    vxorpd %xmm2, %xmm4, %xmm5
+; AVX1-NEXT:    vblendvpd %xmm3, %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    vxorpd %xmm2, %xmm0, %xmm1
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm1, %xmm1
+; AVX1-NEXT:    vblendvpd %xmm1, %xmm0, %xmm4, %xmm0
 ; AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT:    vxorpd %xmm3, %xmm0, %xmm2
-; AVX1-NEXT:    vxorpd %xmm3, %xmm1, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm2, %xmm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX1-NEXT:    vxorpd %xmm2, %xmm0, %xmm3
+; AVX1-NEXT:    vxorpd %xmm2, %xmm1, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vmovq %xmm0, %rax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -640,49 +635,44 @@ define i64 @test_v16i64(<16 x i64> %a0)
 ;
 ; AVX1-LABEL: test_v16i64:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm5
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm8
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT:    vpxor %xmm4, %xmm5, %xmm5
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm6
-; AVX1-NEXT:    vpxor %xmm4, %xmm6, %xmm6
-; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm6, %xmm5
+; AVX1-NEXT:    vpxor %xmm4, %xmm8, %xmm6
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm10
+; AVX1-NEXT:    vpxor %xmm4, %xmm10, %xmm5
+; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm5, %xmm9
+; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm11
+; AVX1-NEXT:    vpxor %xmm4, %xmm11, %xmm5
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm7
+; AVX1-NEXT:    vpxor %xmm4, %xmm7, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm6, %xmm12
 ; AVX1-NEXT:    vpxor %xmm4, %xmm2, %xmm6
-; AVX1-NEXT:    vpxor %xmm4, %xmm0, %xmm7
-; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm7, %xmm6
-; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm6, %ymm5
-; AVX1-NEXT:    vblendvpd %ymm5, %ymm0, %ymm2, %ymm0
-; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm2
-; AVX1-NEXT:    vpxor %xmm4, %xmm2, %xmm2
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm5
-; AVX1-NEXT:    vpxor %xmm4, %xmm5, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm5, %xmm2
-; AVX1-NEXT:    vpxor %xmm4, %xmm3, %xmm5
-; AVX1-NEXT:    vpxor %xmm4, %xmm1, %xmm6
-; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm6, %xmm5
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm5, %ymm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm1, %ymm3, %ymm1
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT:    vpxor %xmm4, %xmm2, %xmm2
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vpxor %xmm4, %xmm3, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpxor %xmm4, %xmm0, %xmm5
+; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm5, %xmm13
+; AVX1-NEXT:    vpxor %xmm4, %xmm3, %xmm6
+; AVX1-NEXT:    vpxor %xmm4, %xmm1, %xmm5
+; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm5, %xmm5
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm1, %xmm3, %xmm1
 ; AVX1-NEXT:    vxorpd %xmm4, %xmm1, %xmm3
-; AVX1-NEXT:    vxorpd %xmm4, %xmm0, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm5, %xmm3
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT:    vpxor %xmm4, %xmm1, %xmm2
-; AVX1-NEXT:    vxorpd %xmm4, %xmm0, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm2, %xmm2
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX1-NEXT:    vblendvpd %xmm13, %xmm0, %xmm2, %xmm0
+; AVX1-NEXT:    vxorpd %xmm4, %xmm0, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm12, %xmm7, %xmm11, %xmm3
+; AVX1-NEXT:    vxorpd %xmm4, %xmm3, %xmm5
+; AVX1-NEXT:    vblendvpd %xmm9, %xmm10, %xmm8, %xmm6
+; AVX1-NEXT:    vxorpd %xmm4, %xmm6, %xmm7
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm7, %xmm5
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm6, %xmm3, %xmm3
+; AVX1-NEXT:    vxorpd %xmm4, %xmm3, %xmm5
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    vxorpd %xmm4, %xmm0, %xmm1
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm1, %xmm1
+; AVX1-NEXT:    vblendvpd %xmm1, %xmm0, %xmm3, %xmm0
 ; AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; AVX1-NEXT:    vxorpd %xmm4, %xmm0, %xmm2
 ; AVX1-NEXT:    vxorpd %xmm4, %xmm1, %xmm3
 ; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm2, %xmm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vmovq %xmm0, %rax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/vector-reduce-umax.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-reduce-umax.ll?rev=362797&r1=362796&r2=362797&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-reduce-umax.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-reduce-umax.ll Fri Jun  7 06:17:46 2019
@@ -155,15 +155,13 @@ define i64 @test_v4i64(<4 x i64> %a0) {
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
 ; AVX1-NEXT:    vpxor %xmm2, %xmm1, %xmm3
 ; AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm3, %xmm3
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm1, %ymm0
+; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm3
+; AVX1-NEXT:    vblendvpd %xmm3, %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; AVX1-NEXT:    vxorpd %xmm2, %xmm0, %xmm3
 ; AVX1-NEXT:    vxorpd %xmm2, %xmm1, %xmm2
 ; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vmovq %xmm0, %rax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -333,29 +331,26 @@ define i64 @test_v8i64(<8 x i64> %a0) {
 ;
 ; AVX1-LABEL: test_v8i64:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT:    vpxor %xmm3, %xmm2, %xmm2
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
-; AVX1-NEXT:    vpxor %xmm3, %xmm4, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm2
-; AVX1-NEXT:    vpxor %xmm3, %xmm1, %xmm4
-; AVX1-NEXT:    vpxor %xmm3, %xmm0, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm5, %xmm4
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm4, %ymm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT:    vpxor %xmm3, %xmm1, %xmm2
-; AVX1-NEXT:    vxorpd %xmm3, %xmm0, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm2, %xmm2
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm4, %ymm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT:    vpxor %xmm2, %xmm1, %xmm3
+; AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm4
+; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm3
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
+; AVX1-NEXT:    vpxor %xmm2, %xmm4, %xmm5
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm6
+; AVX1-NEXT:    vpxor %xmm2, %xmm6, %xmm7
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm7, %xmm5
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm6, %xmm4, %xmm4
+; AVX1-NEXT:    vxorpd %xmm2, %xmm4, %xmm5
+; AVX1-NEXT:    vblendvpd %xmm3, %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    vxorpd %xmm2, %xmm0, %xmm1
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm1, %xmm1
+; AVX1-NEXT:    vblendvpd %xmm1, %xmm0, %xmm4, %xmm0
 ; AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT:    vxorpd %xmm3, %xmm0, %xmm2
-; AVX1-NEXT:    vxorpd %xmm3, %xmm1, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm2, %xmm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX1-NEXT:    vxorpd %xmm2, %xmm0, %xmm3
+; AVX1-NEXT:    vxorpd %xmm2, %xmm1, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vmovq %xmm0, %rax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -640,49 +635,44 @@ define i64 @test_v16i64(<16 x i64> %a0)
 ;
 ; AVX1-LABEL: test_v16i64:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm5
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm8
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT:    vpxor %xmm4, %xmm5, %xmm5
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm6
-; AVX1-NEXT:    vpxor %xmm4, %xmm6, %xmm6
-; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm6, %xmm5
+; AVX1-NEXT:    vpxor %xmm4, %xmm8, %xmm6
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm10
+; AVX1-NEXT:    vpxor %xmm4, %xmm10, %xmm5
+; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm5, %xmm9
+; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm11
+; AVX1-NEXT:    vpxor %xmm4, %xmm11, %xmm5
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm7
+; AVX1-NEXT:    vpxor %xmm4, %xmm7, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm6, %xmm12
 ; AVX1-NEXT:    vpxor %xmm4, %xmm2, %xmm6
-; AVX1-NEXT:    vpxor %xmm4, %xmm0, %xmm7
-; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm7, %xmm6
-; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm6, %ymm5
-; AVX1-NEXT:    vblendvpd %ymm5, %ymm0, %ymm2, %ymm0
-; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm2
-; AVX1-NEXT:    vpxor %xmm4, %xmm2, %xmm2
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm5
-; AVX1-NEXT:    vpxor %xmm4, %xmm5, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm5, %xmm2
-; AVX1-NEXT:    vpxor %xmm4, %xmm3, %xmm5
-; AVX1-NEXT:    vpxor %xmm4, %xmm1, %xmm6
-; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm6, %xmm5
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm5, %ymm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm1, %ymm3, %ymm1
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT:    vpxor %xmm4, %xmm2, %xmm2
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vpxor %xmm4, %xmm3, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpxor %xmm4, %xmm0, %xmm5
+; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm5, %xmm13
+; AVX1-NEXT:    vpxor %xmm4, %xmm3, %xmm6
+; AVX1-NEXT:    vpxor %xmm4, %xmm1, %xmm5
+; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm5, %xmm5
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm1, %xmm3, %xmm1
 ; AVX1-NEXT:    vxorpd %xmm4, %xmm1, %xmm3
-; AVX1-NEXT:    vxorpd %xmm4, %xmm0, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm5, %xmm3
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT:    vpxor %xmm4, %xmm1, %xmm2
-; AVX1-NEXT:    vxorpd %xmm4, %xmm0, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm2, %xmm2
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX1-NEXT:    vblendvpd %xmm13, %xmm0, %xmm2, %xmm0
+; AVX1-NEXT:    vxorpd %xmm4, %xmm0, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm12, %xmm7, %xmm11, %xmm3
+; AVX1-NEXT:    vxorpd %xmm4, %xmm3, %xmm5
+; AVX1-NEXT:    vblendvpd %xmm9, %xmm10, %xmm8, %xmm6
+; AVX1-NEXT:    vxorpd %xmm4, %xmm6, %xmm7
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm7, %xmm5
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm6, %xmm3, %xmm3
+; AVX1-NEXT:    vxorpd %xmm4, %xmm3, %xmm5
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    vxorpd %xmm4, %xmm0, %xmm1
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm1, %xmm1
+; AVX1-NEXT:    vblendvpd %xmm1, %xmm0, %xmm3, %xmm0
 ; AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; AVX1-NEXT:    vxorpd %xmm4, %xmm0, %xmm2
 ; AVX1-NEXT:    vxorpd %xmm4, %xmm1, %xmm3
 ; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm2, %xmm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vmovq %xmm0, %rax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/vector-reduce-umin-widen.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-reduce-umin-widen.ll?rev=362797&r1=362796&r2=362797&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-reduce-umin-widen.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-reduce-umin-widen.ll Fri Jun  7 06:17:46 2019
@@ -150,19 +150,17 @@ define i64 @test_v4i64(<4 x i64> %a0) {
 ;
 ; AVX1-LABEL: test_v4i64:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm3
-; AVX1-NEXT:    vpxor %xmm2, %xmm1, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm0, %xmm4
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm3, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm1, %ymm0
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT:    vxorpd %xmm2, %xmm0, %xmm3
-; AVX1-NEXT:    vxorpd %xmm2, %xmm1, %xmm2
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm2, %xmm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT:    vpxor %xmm1, %xmm3, %xmm4
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm3, %xmm0
+; AVX1-NEXT:    vpermilps {{.*#+}} xmm2 = xmm0[2,3,0,1]
+; AVX1-NEXT:    vxorpd %xmm1, %xmm0, %xmm3
+; AVX1-NEXT:    vxorpd %xmm1, %xmm2, %xmm1
+; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vblendvpd %xmm1, %xmm0, %xmm2, %xmm0
 ; AVX1-NEXT:    vmovq %xmm0, %rax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -334,27 +332,24 @@ define i64 @test_v8i64(<8 x i64> %a0) {
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT:    vpxor %xmm3, %xmm2, %xmm2
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
-; AVX1-NEXT:    vpxor %xmm3, %xmm4, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm2
-; AVX1-NEXT:    vpxor %xmm3, %xmm0, %xmm4
-; AVX1-NEXT:    vpxor %xmm3, %xmm1, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm5, %xmm4
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm4, %ymm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT:    vxorpd %xmm3, %xmm0, %xmm2
-; AVX1-NEXT:    vpxor %xmm3, %xmm1, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm2
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm0, %xmm4
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm2, %ymm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX1-NEXT:    vpxor %xmm3, %xmm2, %xmm4
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm5
+; AVX1-NEXT:    vpxor %xmm3, %xmm5, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm6, %xmm4
+; AVX1-NEXT:    vpxor %xmm3, %xmm0, %xmm6
+; AVX1-NEXT:    vpxor %xmm3, %xmm1, %xmm7
+; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm7, %xmm6
+; AVX1-NEXT:    vblendvpd %xmm6, %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    vxorpd %xmm3, %xmm0, %xmm1
+; AVX1-NEXT:    vblendvpd %xmm4, %xmm2, %xmm5, %xmm2
+; AVX1-NEXT:    vxorpd %xmm3, %xmm2, %xmm4
+; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm4, %xmm1
+; AVX1-NEXT:    vblendvpd %xmm1, %xmm0, %xmm2, %xmm0
 ; AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; AVX1-NEXT:    vxorpd %xmm3, %xmm0, %xmm2
 ; AVX1-NEXT:    vxorpd %xmm3, %xmm1, %xmm3
 ; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vmovq %xmm0, %rax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -639,49 +634,44 @@ define i64 @test_v16i64(<16 x i64> %a0)
 ;
 ; AVX1-LABEL: test_v16i64:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm5
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT:    vpxor %xmm4, %xmm5, %xmm5
-; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm6
-; AVX1-NEXT:    vpxor %xmm4, %xmm6, %xmm6
-; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm6, %xmm5
-; AVX1-NEXT:    vpxor %xmm4, %xmm1, %xmm6
-; AVX1-NEXT:    vpxor %xmm4, %xmm3, %xmm7
-; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm7, %xmm6
-; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm6, %ymm5
-; AVX1-NEXT:    vblendvpd %ymm5, %ymm1, %ymm3, %ymm1
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vpxor %xmm4, %xmm3, %xmm3
-; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm5
-; AVX1-NEXT:    vpxor %xmm4, %xmm5, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm5, %xmm3
-; AVX1-NEXT:    vpxor %xmm4, %xmm0, %xmm5
-; AVX1-NEXT:    vpxor %xmm4, %xmm2, %xmm6
-; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm6, %xmm5
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm5, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT:    vpxor %xmm4, %xmm2, %xmm2
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT:    vpxor %xmm4, %xmm3, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
-; AVX1-NEXT:    vxorpd %xmm4, %xmm0, %xmm3
-; AVX1-NEXT:    vxorpd %xmm4, %xmm1, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm5, %xmm3
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT:    vpxor %xmm4, %xmm1, %xmm5
+; AVX1-NEXT:    vpxor %xmm4, %xmm3, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm6, %xmm8
+; AVX1-NEXT:    vpxor %xmm4, %xmm0, %xmm6
+; AVX1-NEXT:    vpxor %xmm4, %xmm2, %xmm7
+; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm7, %xmm9
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm12
+; AVX1-NEXT:    vpxor %xmm4, %xmm12, %xmm10
+; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm13
+; AVX1-NEXT:    vpxor %xmm4, %xmm13, %xmm5
+; AVX1-NEXT:    vpcmpgtq %xmm10, %xmm5, %xmm10
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
+; AVX1-NEXT:    vpxor %xmm4, %xmm5, %xmm11
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm7
+; AVX1-NEXT:    vpxor %xmm4, %xmm7, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm11, %xmm6, %xmm6
+; AVX1-NEXT:    vblendvpd %xmm6, %xmm5, %xmm7, %xmm5
+; AVX1-NEXT:    vxorpd %xmm4, %xmm5, %xmm11
+; AVX1-NEXT:    vblendvpd %xmm10, %xmm12, %xmm13, %xmm7
+; AVX1-NEXT:    vxorpd %xmm4, %xmm7, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm11, %xmm6, %xmm6
+; AVX1-NEXT:    vblendvpd %xmm9, %xmm0, %xmm2, %xmm0
 ; AVX1-NEXT:    vxorpd %xmm4, %xmm0, %xmm2
-; AVX1-NEXT:    vpxor %xmm4, %xmm1, %xmm3
+; AVX1-NEXT:    vblendvpd %xmm8, %xmm1, %xmm3, %xmm1
+; AVX1-NEXT:    vxorpd %xmm4, %xmm1, %xmm3
 ; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm0, %xmm3
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    vxorpd %xmm4, %xmm0, %xmm1
+; AVX1-NEXT:    vblendvpd %xmm6, %xmm5, %xmm7, %xmm2
+; AVX1-NEXT:    vxorpd %xmm4, %xmm2, %xmm3
+; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm3, %xmm1
+; AVX1-NEXT:    vblendvpd %xmm1, %xmm0, %xmm2, %xmm0
 ; AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; AVX1-NEXT:    vxorpd %xmm4, %xmm0, %xmm2
 ; AVX1-NEXT:    vxorpd %xmm4, %xmm1, %xmm3
 ; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vmovq %xmm0, %rax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/vector-reduce-umin.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-reduce-umin.ll?rev=362797&r1=362796&r2=362797&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-reduce-umin.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-reduce-umin.ll Fri Jun  7 06:17:46 2019
@@ -150,19 +150,17 @@ define i64 @test_v4i64(<4 x i64> %a0) {
 ;
 ; AVX1-LABEL: test_v4i64:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm3
-; AVX1-NEXT:    vpxor %xmm2, %xmm1, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm0, %xmm4
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm3, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm1, %ymm0
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX1-NEXT:    vxorpd %xmm2, %xmm0, %xmm3
-; AVX1-NEXT:    vxorpd %xmm2, %xmm1, %xmm2
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm2, %xmm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT:    vpxor %xmm1, %xmm3, %xmm4
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm3, %xmm0
+; AVX1-NEXT:    vpermilps {{.*#+}} xmm2 = xmm0[2,3,0,1]
+; AVX1-NEXT:    vxorpd %xmm1, %xmm0, %xmm3
+; AVX1-NEXT:    vxorpd %xmm1, %xmm2, %xmm1
+; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vblendvpd %xmm1, %xmm0, %xmm2, %xmm0
 ; AVX1-NEXT:    vmovq %xmm0, %rax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -334,27 +332,24 @@ define i64 @test_v8i64(<8 x i64> %a0) {
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT:    vpxor %xmm3, %xmm2, %xmm2
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
-; AVX1-NEXT:    vpxor %xmm3, %xmm4, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm2
-; AVX1-NEXT:    vpxor %xmm3, %xmm0, %xmm4
-; AVX1-NEXT:    vpxor %xmm3, %xmm1, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm5, %xmm4
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm4, %ymm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT:    vxorpd %xmm3, %xmm0, %xmm2
-; AVX1-NEXT:    vpxor %xmm3, %xmm1, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm2
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm0, %xmm4
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm2, %ymm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX1-NEXT:    vpxor %xmm3, %xmm2, %xmm4
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm5
+; AVX1-NEXT:    vpxor %xmm3, %xmm5, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm6, %xmm4
+; AVX1-NEXT:    vpxor %xmm3, %xmm0, %xmm6
+; AVX1-NEXT:    vpxor %xmm3, %xmm1, %xmm7
+; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm7, %xmm6
+; AVX1-NEXT:    vblendvpd %xmm6, %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    vxorpd %xmm3, %xmm0, %xmm1
+; AVX1-NEXT:    vblendvpd %xmm4, %xmm2, %xmm5, %xmm2
+; AVX1-NEXT:    vxorpd %xmm3, %xmm2, %xmm4
+; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm4, %xmm1
+; AVX1-NEXT:    vblendvpd %xmm1, %xmm0, %xmm2, %xmm0
 ; AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; AVX1-NEXT:    vxorpd %xmm3, %xmm0, %xmm2
 ; AVX1-NEXT:    vxorpd %xmm3, %xmm1, %xmm3
 ; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vmovq %xmm0, %rax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -639,49 +634,44 @@ define i64 @test_v16i64(<16 x i64> %a0)
 ;
 ; AVX1-LABEL: test_v16i64:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm5
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT:    vpxor %xmm4, %xmm5, %xmm5
-; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm6
-; AVX1-NEXT:    vpxor %xmm4, %xmm6, %xmm6
-; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm6, %xmm5
-; AVX1-NEXT:    vpxor %xmm4, %xmm1, %xmm6
-; AVX1-NEXT:    vpxor %xmm4, %xmm3, %xmm7
-; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm7, %xmm6
-; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm6, %ymm5
-; AVX1-NEXT:    vblendvpd %ymm5, %ymm1, %ymm3, %ymm1
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vpxor %xmm4, %xmm3, %xmm3
-; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm5
-; AVX1-NEXT:    vpxor %xmm4, %xmm5, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm5, %xmm3
-; AVX1-NEXT:    vpxor %xmm4, %xmm0, %xmm5
-; AVX1-NEXT:    vpxor %xmm4, %xmm2, %xmm6
-; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm6, %xmm5
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm5, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT:    vpxor %xmm4, %xmm2, %xmm2
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT:    vpxor %xmm4, %xmm3, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
-; AVX1-NEXT:    vxorpd %xmm4, %xmm0, %xmm3
-; AVX1-NEXT:    vxorpd %xmm4, %xmm1, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm5, %xmm3
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT:    vpxor %xmm4, %xmm1, %xmm5
+; AVX1-NEXT:    vpxor %xmm4, %xmm3, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm6, %xmm8
+; AVX1-NEXT:    vpxor %xmm4, %xmm0, %xmm6
+; AVX1-NEXT:    vpxor %xmm4, %xmm2, %xmm7
+; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm7, %xmm9
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm12
+; AVX1-NEXT:    vpxor %xmm4, %xmm12, %xmm10
+; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm13
+; AVX1-NEXT:    vpxor %xmm4, %xmm13, %xmm5
+; AVX1-NEXT:    vpcmpgtq %xmm10, %xmm5, %xmm10
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
+; AVX1-NEXT:    vpxor %xmm4, %xmm5, %xmm11
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm7
+; AVX1-NEXT:    vpxor %xmm4, %xmm7, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm11, %xmm6, %xmm6
+; AVX1-NEXT:    vblendvpd %xmm6, %xmm5, %xmm7, %xmm5
+; AVX1-NEXT:    vxorpd %xmm4, %xmm5, %xmm11
+; AVX1-NEXT:    vblendvpd %xmm10, %xmm12, %xmm13, %xmm7
+; AVX1-NEXT:    vxorpd %xmm4, %xmm7, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm11, %xmm6, %xmm6
+; AVX1-NEXT:    vblendvpd %xmm9, %xmm0, %xmm2, %xmm0
 ; AVX1-NEXT:    vxorpd %xmm4, %xmm0, %xmm2
-; AVX1-NEXT:    vpxor %xmm4, %xmm1, %xmm3
+; AVX1-NEXT:    vblendvpd %xmm8, %xmm1, %xmm3, %xmm1
+; AVX1-NEXT:    vxorpd %xmm4, %xmm1, %xmm3
 ; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm0, %xmm3
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    vxorpd %xmm4, %xmm0, %xmm1
+; AVX1-NEXT:    vblendvpd %xmm6, %xmm5, %xmm7, %xmm2
+; AVX1-NEXT:    vxorpd %xmm4, %xmm2, %xmm3
+; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm3, %xmm1
+; AVX1-NEXT:    vblendvpd %xmm1, %xmm0, %xmm2, %xmm0
 ; AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
 ; AVX1-NEXT:    vxorpd %xmm4, %xmm0, %xmm2
 ; AVX1-NEXT:    vxorpd %xmm4, %xmm1, %xmm3
 ; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vmovq %xmm0, %rax
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/vector-trunc-packus-widen.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-trunc-packus-widen.ll?rev=362797&r1=362796&r2=362797&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-trunc-packus-widen.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-trunc-packus-widen.ll Fri Jun  7 06:17:46 2019
@@ -181,19 +181,17 @@ define <4 x i32> @trunc_packus_v4i64_v4i
 ;
 ; AVX1-LABEL: trunc_packus_v4i64_v4i32:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm1 = [4294967295,4294967295,4294967295,4294967295]
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [4294967295,4294967295]
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm3, %xmm3
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
-; AVX1-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
-; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm3, %xmm1
-; AVX1-NEXT:    vpand %xmm3, %xmm1, %xmm1
-; AVX1-NEXT:    vpand %xmm0, %xmm2, %xmm0
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [4294967295,4294967295]
+; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm2, %xmm3
+; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm2, %xmm4
+; AVX1-NEXT:    vblendvpd %xmm4, %xmm0, %xmm2, %xmm0
+; AVX1-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm0, %xmm5
+; AVX1-NEXT:    vblendvpd %xmm3, %xmm1, %xmm2, %xmm1
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm1, %xmm2
+; AVX1-NEXT:    vpand %xmm1, %xmm2, %xmm1
+; AVX1-NEXT:    vpand %xmm0, %xmm5, %xmm0
 ; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -583,30 +581,27 @@ define <8 x i32> @trunc_packus_v8i64_v8i
 ;
 ; AVX1-LABEL: trunc_packus_v8i64_v8i32:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm2 = [4294967295,4294967295,4294967295,4294967295]
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [4294967295,4294967295]
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm4, %xmm5
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm5, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm4, %xmm4
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
-; AVX1-NEXT:    vxorpd %xmm2, %xmm2, %xmm2
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm0, %xmm3
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm1, %xmm6
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm7
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm7, %xmm2
-; AVX1-NEXT:    vpand %xmm7, %xmm2, %xmm2
-; AVX1-NEXT:    vpand %xmm1, %xmm6, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [4294967295,4294967295]
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm8
+; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm3, %xmm5
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm3, %xmm7
+; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm3, %xmm4
+; AVX1-NEXT:    vblendvpd %xmm4, %xmm0, %xmm3, %xmm0
+; AVX1-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm0, %xmm9
+; AVX1-NEXT:    vblendvpd %xmm7, %xmm6, %xmm3, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm6, %xmm7
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm1, %xmm3, %xmm1
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm1, %xmm5
+; AVX1-NEXT:    vblendvpd %xmm8, %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm2, %xmm3
+; AVX1-NEXT:    vpand %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpand %xmm1, %xmm5, %xmm1
 ; AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
-; AVX1-NEXT:    vpand %xmm4, %xmm5, %xmm2
-; AVX1-NEXT:    vpand %xmm0, %xmm3, %xmm0
+; AVX1-NEXT:    vpand %xmm6, %xmm7, %xmm2
+; AVX1-NEXT:    vpand %xmm0, %xmm9, %xmm0
 ; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
@@ -996,30 +991,27 @@ define <8 x i16> @trunc_packus_v8i64_v8i
 ;
 ; AVX1-LABEL: trunc_packus_v8i64_v8i16:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm2 = [65535,65535,65535,65535]
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [65535,65535]
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm4, %xmm5
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm5, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm4, %xmm4
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
-; AVX1-NEXT:    vxorpd %xmm2, %xmm2, %xmm2
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm0, %xmm3
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm1, %xmm6
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm7
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm7, %xmm2
-; AVX1-NEXT:    vpand %xmm7, %xmm2, %xmm2
-; AVX1-NEXT:    vpand %xmm1, %xmm6, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [65535,65535]
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm8
+; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm3, %xmm5
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm3, %xmm7
+; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm3, %xmm4
+; AVX1-NEXT:    vblendvpd %xmm4, %xmm0, %xmm3, %xmm0
+; AVX1-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm0, %xmm9
+; AVX1-NEXT:    vblendvpd %xmm7, %xmm6, %xmm3, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm6, %xmm7
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm1, %xmm3, %xmm1
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm1, %xmm5
+; AVX1-NEXT:    vblendvpd %xmm8, %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm2, %xmm3
+; AVX1-NEXT:    vpand %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpand %xmm1, %xmm5, %xmm1
 ; AVX1-NEXT:    vpackusdw %xmm2, %xmm1, %xmm1
-; AVX1-NEXT:    vpand %xmm4, %xmm5, %xmm2
-; AVX1-NEXT:    vpand %xmm0, %xmm3, %xmm0
+; AVX1-NEXT:    vpand %xmm6, %xmm7, %xmm2
+; AVX1-NEXT:    vpand %xmm0, %xmm9, %xmm0
 ; AVX1-NEXT:    vpackusdw %xmm2, %xmm0, %xmm0
 ; AVX1-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vzeroupper
@@ -1631,30 +1623,27 @@ define <8 x i8> @trunc_packus_v8i64_v8i8
 ;
 ; AVX1-LABEL: trunc_packus_v8i64_v8i8:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm2 = [255,255,255,255]
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [255,255]
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm4, %xmm5
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm5, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm4, %xmm4
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
-; AVX1-NEXT:    vxorpd %xmm2, %xmm2, %xmm2
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm0, %xmm3
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm1, %xmm6
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm7
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm7, %xmm2
-; AVX1-NEXT:    vpand %xmm7, %xmm2, %xmm2
-; AVX1-NEXT:    vpand %xmm1, %xmm6, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [255,255]
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm8
+; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm3, %xmm5
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm3, %xmm7
+; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm3, %xmm4
+; AVX1-NEXT:    vblendvpd %xmm4, %xmm0, %xmm3, %xmm0
+; AVX1-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm0, %xmm9
+; AVX1-NEXT:    vblendvpd %xmm7, %xmm6, %xmm3, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm6, %xmm7
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm1, %xmm3, %xmm1
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm1, %xmm5
+; AVX1-NEXT:    vblendvpd %xmm8, %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm2, %xmm3
+; AVX1-NEXT:    vpand %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpand %xmm1, %xmm5, %xmm1
 ; AVX1-NEXT:    vpackusdw %xmm2, %xmm1, %xmm1
-; AVX1-NEXT:    vpand %xmm4, %xmm5, %xmm2
-; AVX1-NEXT:    vpand %xmm0, %xmm3, %xmm0
+; AVX1-NEXT:    vpand %xmm6, %xmm7, %xmm2
+; AVX1-NEXT:    vpand %xmm0, %xmm9, %xmm0
 ; AVX1-NEXT:    vpackusdw %xmm2, %xmm0, %xmm0
 ; AVX1-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpackuswb %xmm0, %xmm0, %xmm0
@@ -2022,30 +2011,27 @@ define void @trunc_packus_v8i64_v8i8_sto
 ;
 ; AVX1-LABEL: trunc_packus_v8i64_v8i8_store:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm2 = [255,255,255,255]
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [255,255]
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm4, %xmm5
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm5, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm4, %xmm4
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
-; AVX1-NEXT:    vxorpd %xmm2, %xmm2, %xmm2
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm0, %xmm3
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm1, %xmm6
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm7
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm7, %xmm2
-; AVX1-NEXT:    vpand %xmm7, %xmm2, %xmm2
-; AVX1-NEXT:    vpand %xmm1, %xmm6, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [255,255]
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm8
+; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm3, %xmm5
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm3, %xmm7
+; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm3, %xmm4
+; AVX1-NEXT:    vblendvpd %xmm4, %xmm0, %xmm3, %xmm0
+; AVX1-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm0, %xmm9
+; AVX1-NEXT:    vblendvpd %xmm7, %xmm6, %xmm3, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm6, %xmm7
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm1, %xmm3, %xmm1
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm1, %xmm5
+; AVX1-NEXT:    vblendvpd %xmm8, %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm2, %xmm3
+; AVX1-NEXT:    vpand %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpand %xmm1, %xmm5, %xmm1
 ; AVX1-NEXT:    vpackusdw %xmm2, %xmm1, %xmm1
-; AVX1-NEXT:    vpand %xmm4, %xmm5, %xmm2
-; AVX1-NEXT:    vpand %xmm0, %xmm3, %xmm0
+; AVX1-NEXT:    vpand %xmm6, %xmm7, %xmm2
+; AVX1-NEXT:    vpand %xmm0, %xmm9, %xmm0
 ; AVX1-NEXT:    vpackusdw %xmm2, %xmm0, %xmm0
 ; AVX1-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpackuswb %xmm0, %xmm0, %xmm0
@@ -2702,56 +2688,51 @@ define <16 x i8> @trunc_packus_v16i64_v1
 ;
 ; AVX1-LABEL: trunc_packus_v16i64_v16i8:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm4 = [255,255,255,255]
-; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm5
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [255,255]
-; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm6, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm6, %xmm7
-; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm7, %ymm5
-; AVX1-NEXT:    vblendvpd %ymm5, %ymm3, %ymm4, %ymm15
-; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm6, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm6, %xmm7
-; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm7, %ymm5
-; AVX1-NEXT:    vblendvpd %ymm5, %ymm2, %ymm4, %ymm2
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm6, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm6, %xmm7
-; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm7, %ymm5
-; AVX1-NEXT:    vblendvpd %ymm5, %ymm1, %ymm4, %ymm1
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm6, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm6, %xmm6
-; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm6, %ymm5
-; AVX1-NEXT:    vblendvpd %ymm5, %ymm0, %ymm4, %ymm0
-; AVX1-NEXT:    vxorpd %xmm4, %xmm4, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm0, %xmm8
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm14
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm14, %xmm9
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm1, %xmm10
+; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm8
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [255,255]
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm9
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm7
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm7, %xmm11
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm2, %xmm12
-; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm5, %xmm13
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm15, %xmm6
-; AVX1-NEXT:    vextractf128 $1, %ymm15, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm3, %xmm4
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm5, %xmm6
+; AVX1-NEXT:    vblendvpd %xmm6, %xmm0, %xmm5, %xmm10
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm5, %xmm6
+; AVX1-NEXT:    vblendvpd %xmm6, %xmm4, %xmm5, %xmm11
+; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm5, %xmm6
+; AVX1-NEXT:    vblendvpd %xmm6, %xmm1, %xmm5, %xmm1
+; AVX1-NEXT:    vpcmpgtq %xmm7, %xmm5, %xmm6
+; AVX1-NEXT:    vblendvpd %xmm6, %xmm7, %xmm5, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm5, %xmm7
+; AVX1-NEXT:    vblendvpd %xmm7, %xmm2, %xmm5, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm9, %xmm5, %xmm7
+; AVX1-NEXT:    vblendvpd %xmm7, %xmm9, %xmm5, %xmm7
+; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm5, %xmm0
+; AVX1-NEXT:    vblendvpd %xmm0, %xmm3, %xmm5, %xmm0
+; AVX1-NEXT:    vpcmpgtq %xmm8, %xmm5, %xmm3
+; AVX1-NEXT:    vblendvpd %xmm3, %xmm8, %xmm5, %xmm3
+; AVX1-NEXT:    vpxor %xmm5, %xmm5, %xmm5
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm3, %xmm4
 ; AVX1-NEXT:    vpand %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vpand %xmm15, %xmm6, %xmm4
-; AVX1-NEXT:    vpackusdw %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vpand %xmm5, %xmm13, %xmm4
-; AVX1-NEXT:    vpand %xmm2, %xmm12, %xmm2
-; AVX1-NEXT:    vpackusdw %xmm4, %xmm2, %xmm2
-; AVX1-NEXT:    vpackusdw %xmm3, %xmm2, %xmm2
-; AVX1-NEXT:    vpand %xmm7, %xmm11, %xmm3
-; AVX1-NEXT:    vpand %xmm1, %xmm10, %xmm1
-; AVX1-NEXT:    vpackusdw %xmm3, %xmm1, %xmm1
-; AVX1-NEXT:    vpand %xmm14, %xmm9, %xmm3
-; AVX1-NEXT:    vpand %xmm0, %xmm8, %xmm0
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm0, %xmm4
+; AVX1-NEXT:    vpand %xmm0, %xmm4, %xmm0
 ; AVX1-NEXT:    vpackusdw %xmm3, %xmm0, %xmm0
-; AVX1-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpackuswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm7, %xmm3
+; AVX1-NEXT:    vpand %xmm7, %xmm3, %xmm3
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm2, %xmm4
+; AVX1-NEXT:    vpand %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vpackusdw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpackusdw %xmm0, %xmm2, %xmm0
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm6, %xmm2
+; AVX1-NEXT:    vpand %xmm6, %xmm2, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm1, %xmm3
+; AVX1-NEXT:    vpand %xmm1, %xmm3, %xmm1
+; AVX1-NEXT:    vpackusdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm11, %xmm2
+; AVX1-NEXT:    vpand %xmm11, %xmm2, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm10, %xmm3
+; AVX1-NEXT:    vpand %xmm10, %xmm3, %xmm3
+; AVX1-NEXT:    vpackusdw %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpackusdw %xmm1, %xmm2, %xmm1
+; AVX1-NEXT:    vpackuswb %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;

Modified: llvm/trunk/test/CodeGen/X86/vector-trunc-packus.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-trunc-packus.ll?rev=362797&r1=362796&r2=362797&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-trunc-packus.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-trunc-packus.ll Fri Jun  7 06:17:46 2019
@@ -181,19 +181,17 @@ define <4 x i32> @trunc_packus_v4i64_v4i
 ;
 ; AVX1-LABEL: trunc_packus_v4i64_v4i32:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm1 = [4294967295,4294967295,4294967295,4294967295]
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [4294967295,4294967295]
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm3, %xmm3
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
-; AVX1-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
-; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm3, %xmm1
-; AVX1-NEXT:    vpand %xmm3, %xmm1, %xmm1
-; AVX1-NEXT:    vpand %xmm0, %xmm2, %xmm0
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [4294967295,4294967295]
+; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm2, %xmm3
+; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm2, %xmm4
+; AVX1-NEXT:    vblendvpd %xmm4, %xmm0, %xmm2, %xmm0
+; AVX1-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm0, %xmm5
+; AVX1-NEXT:    vblendvpd %xmm3, %xmm1, %xmm2, %xmm1
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm1, %xmm2
+; AVX1-NEXT:    vpand %xmm1, %xmm2, %xmm1
+; AVX1-NEXT:    vpand %xmm0, %xmm5, %xmm0
 ; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -583,30 +581,27 @@ define <8 x i32> @trunc_packus_v8i64_v8i
 ;
 ; AVX1-LABEL: trunc_packus_v8i64_v8i32:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm2 = [4294967295,4294967295,4294967295,4294967295]
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [4294967295,4294967295]
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm4, %xmm5
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm5, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm4, %xmm4
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
-; AVX1-NEXT:    vxorpd %xmm2, %xmm2, %xmm2
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm0, %xmm3
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm1, %xmm6
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm7
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm7, %xmm2
-; AVX1-NEXT:    vpand %xmm7, %xmm2, %xmm2
-; AVX1-NEXT:    vpand %xmm1, %xmm6, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [4294967295,4294967295]
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm8
+; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm3, %xmm5
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm3, %xmm7
+; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm3, %xmm4
+; AVX1-NEXT:    vblendvpd %xmm4, %xmm0, %xmm3, %xmm0
+; AVX1-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm0, %xmm9
+; AVX1-NEXT:    vblendvpd %xmm7, %xmm6, %xmm3, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm6, %xmm7
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm1, %xmm3, %xmm1
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm1, %xmm5
+; AVX1-NEXT:    vblendvpd %xmm8, %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm2, %xmm3
+; AVX1-NEXT:    vpand %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpand %xmm1, %xmm5, %xmm1
 ; AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
-; AVX1-NEXT:    vpand %xmm4, %xmm5, %xmm2
-; AVX1-NEXT:    vpand %xmm0, %xmm3, %xmm0
+; AVX1-NEXT:    vpand %xmm6, %xmm7, %xmm2
+; AVX1-NEXT:    vpand %xmm0, %xmm9, %xmm0
 ; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
@@ -996,30 +991,27 @@ define <8 x i16> @trunc_packus_v8i64_v8i
 ;
 ; AVX1-LABEL: trunc_packus_v8i64_v8i16:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm2 = [65535,65535,65535,65535]
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [65535,65535]
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm4, %xmm5
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm5, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm4, %xmm4
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
-; AVX1-NEXT:    vxorpd %xmm2, %xmm2, %xmm2
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm0, %xmm3
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm1, %xmm6
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm7
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm7, %xmm2
-; AVX1-NEXT:    vpand %xmm7, %xmm2, %xmm2
-; AVX1-NEXT:    vpand %xmm1, %xmm6, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [65535,65535]
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm8
+; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm3, %xmm5
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm3, %xmm7
+; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm3, %xmm4
+; AVX1-NEXT:    vblendvpd %xmm4, %xmm0, %xmm3, %xmm0
+; AVX1-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm0, %xmm9
+; AVX1-NEXT:    vblendvpd %xmm7, %xmm6, %xmm3, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm6, %xmm7
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm1, %xmm3, %xmm1
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm1, %xmm5
+; AVX1-NEXT:    vblendvpd %xmm8, %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm2, %xmm3
+; AVX1-NEXT:    vpand %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpand %xmm1, %xmm5, %xmm1
 ; AVX1-NEXT:    vpackusdw %xmm2, %xmm1, %xmm1
-; AVX1-NEXT:    vpand %xmm4, %xmm5, %xmm2
-; AVX1-NEXT:    vpand %xmm0, %xmm3, %xmm0
+; AVX1-NEXT:    vpand %xmm6, %xmm7, %xmm2
+; AVX1-NEXT:    vpand %xmm0, %xmm9, %xmm0
 ; AVX1-NEXT:    vpackusdw %xmm2, %xmm0, %xmm0
 ; AVX1-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vzeroupper
@@ -1628,30 +1620,27 @@ define <8 x i8> @trunc_packus_v8i64_v8i8
 ;
 ; AVX1-LABEL: trunc_packus_v8i64_v8i8:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm2 = [255,255,255,255]
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [255,255]
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm4, %xmm5
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm5, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm4, %xmm4
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
-; AVX1-NEXT:    vxorpd %xmm2, %xmm2, %xmm2
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm0, %xmm3
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm1, %xmm6
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm7
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm7, %xmm2
-; AVX1-NEXT:    vpand %xmm7, %xmm2, %xmm2
-; AVX1-NEXT:    vpand %xmm1, %xmm6, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [255,255]
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm8
+; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm3, %xmm5
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm3, %xmm7
+; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm3, %xmm4
+; AVX1-NEXT:    vblendvpd %xmm4, %xmm0, %xmm3, %xmm0
+; AVX1-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm0, %xmm9
+; AVX1-NEXT:    vblendvpd %xmm7, %xmm6, %xmm3, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm6, %xmm7
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm1, %xmm3, %xmm1
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm1, %xmm5
+; AVX1-NEXT:    vblendvpd %xmm8, %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm2, %xmm3
+; AVX1-NEXT:    vpand %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpand %xmm1, %xmm5, %xmm1
 ; AVX1-NEXT:    vpackusdw %xmm2, %xmm1, %xmm1
-; AVX1-NEXT:    vpand %xmm4, %xmm5, %xmm2
-; AVX1-NEXT:    vpand %xmm0, %xmm3, %xmm0
+; AVX1-NEXT:    vpand %xmm6, %xmm7, %xmm2
+; AVX1-NEXT:    vpand %xmm0, %xmm9, %xmm0
 ; AVX1-NEXT:    vpackusdw %xmm2, %xmm0, %xmm0
 ; AVX1-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vzeroupper
@@ -2011,30 +2000,27 @@ define void @trunc_packus_v8i64_v8i8_sto
 ;
 ; AVX1-LABEL: trunc_packus_v8i64_v8i8_store:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm2 = [255,255,255,255]
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [255,255]
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm4, %xmm5
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm5, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm4, %xmm4
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
-; AVX1-NEXT:    vxorpd %xmm2, %xmm2, %xmm2
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm0, %xmm3
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm1, %xmm6
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm7
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm7, %xmm2
-; AVX1-NEXT:    vpand %xmm7, %xmm2, %xmm2
-; AVX1-NEXT:    vpand %xmm1, %xmm6, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [255,255]
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm8
+; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm3, %xmm5
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm3, %xmm7
+; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm3, %xmm4
+; AVX1-NEXT:    vblendvpd %xmm4, %xmm0, %xmm3, %xmm0
+; AVX1-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm0, %xmm9
+; AVX1-NEXT:    vblendvpd %xmm7, %xmm6, %xmm3, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm6, %xmm7
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm1, %xmm3, %xmm1
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm1, %xmm5
+; AVX1-NEXT:    vblendvpd %xmm8, %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm2, %xmm3
+; AVX1-NEXT:    vpand %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpand %xmm1, %xmm5, %xmm1
 ; AVX1-NEXT:    vpackusdw %xmm2, %xmm1, %xmm1
-; AVX1-NEXT:    vpand %xmm4, %xmm5, %xmm2
-; AVX1-NEXT:    vpand %xmm0, %xmm3, %xmm0
+; AVX1-NEXT:    vpand %xmm6, %xmm7, %xmm2
+; AVX1-NEXT:    vpand %xmm0, %xmm9, %xmm0
 ; AVX1-NEXT:    vpackusdw %xmm2, %xmm0, %xmm0
 ; AVX1-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpackuswb %xmm0, %xmm0, %xmm0
@@ -2689,56 +2675,51 @@ define <16 x i8> @trunc_packus_v16i64_v1
 ;
 ; AVX1-LABEL: trunc_packus_v16i64_v16i8:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm4 = [255,255,255,255]
-; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm5
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [255,255]
-; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm6, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm6, %xmm7
-; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm7, %ymm5
-; AVX1-NEXT:    vblendvpd %ymm5, %ymm3, %ymm4, %ymm15
-; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm6, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm6, %xmm7
-; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm7, %ymm5
-; AVX1-NEXT:    vblendvpd %ymm5, %ymm2, %ymm4, %ymm2
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm6, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm6, %xmm7
-; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm7, %ymm5
-; AVX1-NEXT:    vblendvpd %ymm5, %ymm1, %ymm4, %ymm1
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm6, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm6, %xmm6
-; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm6, %ymm5
-; AVX1-NEXT:    vblendvpd %ymm5, %ymm0, %ymm4, %ymm0
-; AVX1-NEXT:    vxorpd %xmm4, %xmm4, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm0, %xmm8
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm14
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm14, %xmm9
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm1, %xmm10
+; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm8
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [255,255]
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm9
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm7
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm7, %xmm11
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm2, %xmm12
-; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm5, %xmm13
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm15, %xmm6
-; AVX1-NEXT:    vextractf128 $1, %ymm15, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm3, %xmm4
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm5, %xmm6
+; AVX1-NEXT:    vblendvpd %xmm6, %xmm0, %xmm5, %xmm10
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm5, %xmm6
+; AVX1-NEXT:    vblendvpd %xmm6, %xmm4, %xmm5, %xmm11
+; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm5, %xmm6
+; AVX1-NEXT:    vblendvpd %xmm6, %xmm1, %xmm5, %xmm1
+; AVX1-NEXT:    vpcmpgtq %xmm7, %xmm5, %xmm6
+; AVX1-NEXT:    vblendvpd %xmm6, %xmm7, %xmm5, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm5, %xmm7
+; AVX1-NEXT:    vblendvpd %xmm7, %xmm2, %xmm5, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm9, %xmm5, %xmm7
+; AVX1-NEXT:    vblendvpd %xmm7, %xmm9, %xmm5, %xmm7
+; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm5, %xmm0
+; AVX1-NEXT:    vblendvpd %xmm0, %xmm3, %xmm5, %xmm0
+; AVX1-NEXT:    vpcmpgtq %xmm8, %xmm5, %xmm3
+; AVX1-NEXT:    vblendvpd %xmm3, %xmm8, %xmm5, %xmm3
+; AVX1-NEXT:    vpxor %xmm5, %xmm5, %xmm5
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm3, %xmm4
 ; AVX1-NEXT:    vpand %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vpand %xmm15, %xmm6, %xmm4
-; AVX1-NEXT:    vpackusdw %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vpand %xmm5, %xmm13, %xmm4
-; AVX1-NEXT:    vpand %xmm2, %xmm12, %xmm2
-; AVX1-NEXT:    vpackusdw %xmm4, %xmm2, %xmm2
-; AVX1-NEXT:    vpackusdw %xmm3, %xmm2, %xmm2
-; AVX1-NEXT:    vpand %xmm7, %xmm11, %xmm3
-; AVX1-NEXT:    vpand %xmm1, %xmm10, %xmm1
-; AVX1-NEXT:    vpackusdw %xmm3, %xmm1, %xmm1
-; AVX1-NEXT:    vpand %xmm14, %xmm9, %xmm3
-; AVX1-NEXT:    vpand %xmm0, %xmm8, %xmm0
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm0, %xmm4
+; AVX1-NEXT:    vpand %xmm0, %xmm4, %xmm0
 ; AVX1-NEXT:    vpackusdw %xmm3, %xmm0, %xmm0
-; AVX1-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpackuswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm7, %xmm3
+; AVX1-NEXT:    vpand %xmm7, %xmm3, %xmm3
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm2, %xmm4
+; AVX1-NEXT:    vpand %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vpackusdw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpackusdw %xmm0, %xmm2, %xmm0
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm6, %xmm2
+; AVX1-NEXT:    vpand %xmm6, %xmm2, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm1, %xmm3
+; AVX1-NEXT:    vpand %xmm1, %xmm3, %xmm1
+; AVX1-NEXT:    vpackusdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm11, %xmm2
+; AVX1-NEXT:    vpand %xmm11, %xmm2, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm10, %xmm3
+; AVX1-NEXT:    vpand %xmm10, %xmm3, %xmm3
+; AVX1-NEXT:    vpackusdw %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpackusdw %xmm1, %xmm2, %xmm1
+; AVX1-NEXT:    vpackuswb %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;

Modified: llvm/trunk/test/CodeGen/X86/vector-trunc-ssat-widen.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-trunc-ssat-widen.ll?rev=362797&r1=362796&r2=362797&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-trunc-ssat-widen.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-trunc-ssat-widen.ll Fri Jun  7 06:17:46 2019
@@ -191,21 +191,17 @@ define <4 x i32> @trunc_ssat_v4i64_v4i32
 ;
 ; AVX1-LABEL: trunc_ssat_v4i64_v4i32:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm1 = [2147483647,2147483647,2147483647,2147483647]
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [2147483647,2147483647]
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm3, %xmm3
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm1 = [18446744071562067968,18446744071562067968,18446744071562067968,18446744071562067968]
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [18446744071562067968,18446744071562067968]
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm2, %xmm2
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm0, %xmm3
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [2147483647,2147483647]
+; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm2, %xmm3
+; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm2, %xmm4
+; AVX1-NEXT:    vblendvpd %xmm4, %xmm0, %xmm2, %xmm0
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [18446744071562067968,18446744071562067968]
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm0, %xmm5
+; AVX1-NEXT:    vblendvpd %xmm3, %xmm1, %xmm2, %xmm1
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm1, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm1, %xmm4, %xmm1
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm0, %xmm4, %xmm0
 ; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -609,33 +605,27 @@ define <8 x i32> @trunc_ssat_v8i64_v8i32
 ;
 ; AVX1-LABEL: trunc_ssat_v8i64_v8i32:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm2 = [2147483647,2147483647,2147483647,2147483647]
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [2147483647,2147483647]
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm4, %xmm5
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm5, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm4, %xmm4
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm2 = [18446744071562067968,18446744071562067968,18446744071562067968,18446744071562067968]
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [18446744071562067968,18446744071562067968]
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm3, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm0, %xmm5
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm5, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm3, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm1, %xmm4
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [2147483647,2147483647]
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm8
+; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm3, %xmm5
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm3, %xmm7
+; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm3, %xmm4
+; AVX1-NEXT:    vblendvpd %xmm4, %xmm0, %xmm3, %xmm0
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [18446744071562067968,18446744071562067968]
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm0, %xmm9
+; AVX1-NEXT:    vblendvpd %xmm7, %xmm6, %xmm3, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm6, %xmm7
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm1, %xmm3, %xmm1
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm1, %xmm5
+; AVX1-NEXT:    vblendvpd %xmm8, %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm2, %xmm3
+; AVX1-NEXT:    vblendvpd %xmm3, %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm1, %xmm4, %xmm1
 ; AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm7, %xmm6, %xmm4, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm9, %xmm0, %xmm4, %xmm0
 ; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
@@ -1025,33 +1015,27 @@ define <8 x i16> @trunc_ssat_v8i64_v8i16
 ;
 ; AVX1-LABEL: trunc_ssat_v8i64_v8i16:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm2 = [32767,32767,32767,32767]
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [32767,32767]
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm4, %xmm5
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm5, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm4, %xmm4
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm2 = [18446744073709518848,18446744073709518848,18446744073709518848,18446744073709518848]
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [18446744073709518848,18446744073709518848]
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm3, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm0, %xmm5
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm5, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm3, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm1, %xmm4
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [32767,32767]
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm8
+; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm3, %xmm5
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm3, %xmm7
+; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm3, %xmm4
+; AVX1-NEXT:    vblendvpd %xmm4, %xmm0, %xmm3, %xmm0
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [18446744073709518848,18446744073709518848]
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm0, %xmm9
+; AVX1-NEXT:    vblendvpd %xmm7, %xmm6, %xmm3, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm6, %xmm7
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm1, %xmm3, %xmm1
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm1, %xmm5
+; AVX1-NEXT:    vblendvpd %xmm8, %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm2, %xmm3
+; AVX1-NEXT:    vblendvpd %xmm3, %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm1, %xmm4, %xmm1
 ; AVX1-NEXT:    vpackssdw %xmm2, %xmm1, %xmm1
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm7, %xmm6, %xmm4, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm9, %xmm0, %xmm4, %xmm0
 ; AVX1-NEXT:    vpackssdw %xmm2, %xmm0, %xmm0
 ; AVX1-NEXT:    vpackssdw %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vzeroupper
@@ -1539,30 +1523,32 @@ define <8 x i8> @trunc_ssat_v8i64_v8i8(<
 ;
 ; AVX1-LABEL: trunc_ssat_v8i64_v8i8:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm2 = [127,127,127,127]
+; AVX1-NEXT:    vmovapd {{.*#+}} ymm8 = [127,127,127,127]
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [127,127]
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm4, %xmm5
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm5, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm4, %xmm4
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm2 = [18446744073709551488,18446744073709551488,18446744073709551488,18446744073709551488]
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [18446744073709551488,18446744073709551488]
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm3, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm0, %xmm5
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm5, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm3, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm1, %xmm4
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
+; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm5
+; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm4, %xmm6
+; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm6, %ymm7
+; AVX1-NEXT:    vblendvpd %ymm7, %ymm1, %ymm8, %ymm9
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm7
+; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm4, %xmm10
+; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm10, %ymm11
+; AVX1-NEXT:    vblendvpd %ymm11, %ymm0, %ymm8, %ymm8
+; AVX1-NEXT:    vmovapd {{.*#+}} ymm11 = [18446744073709551488,18446744073709551488,18446744073709551488,18446744073709551488]
+; AVX1-NEXT:    vblendvpd %xmm7, %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm7 = [18446744073709551488,18446744073709551488]
+; AVX1-NEXT:    vpcmpgtq %xmm7, %xmm2, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm10, %xmm0, %xmm4, %xmm0
+; AVX1-NEXT:    vpcmpgtq %xmm7, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    vblendvpd %ymm0, %ymm8, %ymm11, %ymm0
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm3, %xmm4, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm7, %xmm2, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm6, %xmm1, %xmm4, %xmm1
+; AVX1-NEXT:    vpcmpgtq %xmm7, %xmm1, %xmm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX1-NEXT:    vblendvpd %ymm1, %ymm9, %ymm11, %ymm1
 ; AVX1-NEXT:    vmovapd {{.*#+}} ymm2 = [255,255,255,255]
 ; AVX1-NEXT:    vandpd %ymm2, %ymm1, %ymm1
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
@@ -1616,6 +1602,8 @@ define <8 x i8> @trunc_ssat_v8i64_v8i8(<
   ret <8 x i8> %5
 }
 
+; TODO: The AVX1 codegen shows a missed opportunity to narrow blendv+logic to 128-bit.
+
 define void @trunc_ssat_v8i64_v8i8_store(<8 x i64> %a0, <8 x i8> *%p1) {
 ; SSE2-LABEL: trunc_ssat_v8i64_v8i8_store:
 ; SSE2:       # %bb.0:
@@ -1968,30 +1956,32 @@ define void @trunc_ssat_v8i64_v8i8_store
 ;
 ; AVX1-LABEL: trunc_ssat_v8i64_v8i8_store:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm2 = [127,127,127,127]
+; AVX1-NEXT:    vmovapd {{.*#+}} ymm8 = [127,127,127,127]
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [127,127]
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm4, %xmm5
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm5, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm4, %xmm4
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm2 = [18446744073709551488,18446744073709551488,18446744073709551488,18446744073709551488]
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [18446744073709551488,18446744073709551488]
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm3, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm0, %xmm5
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm5, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm3, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm1, %xmm4
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
+; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm5
+; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm4, %xmm6
+; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm6, %ymm7
+; AVX1-NEXT:    vblendvpd %ymm7, %ymm1, %ymm8, %ymm9
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm7
+; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm4, %xmm10
+; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm10, %ymm11
+; AVX1-NEXT:    vblendvpd %ymm11, %ymm0, %ymm8, %ymm8
+; AVX1-NEXT:    vmovapd {{.*#+}} ymm11 = [18446744073709551488,18446744073709551488,18446744073709551488,18446744073709551488]
+; AVX1-NEXT:    vblendvpd %xmm7, %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm7 = [18446744073709551488,18446744073709551488]
+; AVX1-NEXT:    vpcmpgtq %xmm7, %xmm2, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm10, %xmm0, %xmm4, %xmm0
+; AVX1-NEXT:    vpcmpgtq %xmm7, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    vblendvpd %ymm0, %ymm8, %ymm11, %ymm0
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm3, %xmm4, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm7, %xmm2, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm6, %xmm1, %xmm4, %xmm1
+; AVX1-NEXT:    vpcmpgtq %xmm7, %xmm1, %xmm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX1-NEXT:    vblendvpd %ymm1, %ymm9, %ymm11, %ymm1
 ; AVX1-NEXT:    vmovapd {{.*#+}} ymm2 = [255,255,255,255]
 ; AVX1-NEXT:    vandpd %ymm2, %ymm1, %ymm1
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
@@ -2686,61 +2676,51 @@ define <16 x i8> @trunc_ssat_v16i64_v16i
 ;
 ; AVX1-LABEL: trunc_ssat_v16i64_v16i8:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm4 = [127,127,127,127]
-; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm5
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [127,127]
-; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm6, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm6, %xmm7
-; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm7, %ymm5
-; AVX1-NEXT:    vblendvpd %ymm5, %ymm3, %ymm4, %ymm3
-; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm6, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm6, %xmm7
-; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm7, %ymm5
-; AVX1-NEXT:    vblendvpd %ymm5, %ymm2, %ymm4, %ymm2
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm6, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm6, %xmm7
-; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm7, %ymm5
-; AVX1-NEXT:    vblendvpd %ymm5, %ymm1, %ymm4, %ymm1
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm6, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm6, %xmm6
-; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm6, %ymm5
-; AVX1-NEXT:    vblendvpd %ymm5, %ymm0, %ymm4, %ymm0
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm4 = [18446744073709551488,18446744073709551488,18446744073709551488,18446744073709551488]
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [18446744073709551488,18446744073709551488]
-; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm5, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm0, %xmm7
-; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm7, %ymm5
-; AVX1-NEXT:    vblendvpd %ymm5, %ymm0, %ymm4, %ymm0
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm5, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm1, %xmm7
-; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm7, %ymm5
-; AVX1-NEXT:    vblendvpd %ymm5, %ymm1, %ymm4, %ymm1
-; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm5, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm2, %xmm7
-; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm7, %ymm5
-; AVX1-NEXT:    vblendvpd %ymm5, %ymm2, %ymm4, %ymm2
-; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm5, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm3, %xmm6
-; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm6, %ymm5
-; AVX1-NEXT:    vblendvpd %ymm5, %ymm3, %ymm4, %ymm3
-; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm4
-; AVX1-NEXT:    vpackssdw %xmm4, %xmm3, %xmm3
-; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm8
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [127,127]
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm9
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm7
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm5, %xmm6
+; AVX1-NEXT:    vblendvpd %xmm6, %xmm0, %xmm5, %xmm10
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm5, %xmm6
+; AVX1-NEXT:    vblendvpd %xmm6, %xmm4, %xmm5, %xmm11
+; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm5, %xmm6
+; AVX1-NEXT:    vblendvpd %xmm6, %xmm1, %xmm5, %xmm1
+; AVX1-NEXT:    vpcmpgtq %xmm7, %xmm5, %xmm6
+; AVX1-NEXT:    vblendvpd %xmm6, %xmm7, %xmm5, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm5, %xmm7
+; AVX1-NEXT:    vblendvpd %xmm7, %xmm2, %xmm5, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm9, %xmm5, %xmm7
+; AVX1-NEXT:    vblendvpd %xmm7, %xmm9, %xmm5, %xmm7
+; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm5, %xmm0
+; AVX1-NEXT:    vblendvpd %xmm0, %xmm3, %xmm5, %xmm0
+; AVX1-NEXT:    vpcmpgtq %xmm8, %xmm5, %xmm3
+; AVX1-NEXT:    vblendvpd %xmm3, %xmm8, %xmm5, %xmm3
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [18446744073709551488,18446744073709551488]
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm3, %xmm4
+; AVX1-NEXT:    vblendvpd %xmm4, %xmm3, %xmm5, %xmm8
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm0, %xmm4
+; AVX1-NEXT:    vblendvpd %xmm4, %xmm0, %xmm5, %xmm0
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm7, %xmm4
+; AVX1-NEXT:    vblendvpd %xmm4, %xmm7, %xmm5, %xmm4
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm2, %xmm7
+; AVX1-NEXT:    vblendvpd %xmm7, %xmm2, %xmm5, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm6, %xmm7
+; AVX1-NEXT:    vblendvpd %xmm7, %xmm6, %xmm5, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm1, %xmm7
+; AVX1-NEXT:    vblendvpd %xmm7, %xmm1, %xmm5, %xmm1
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm11, %xmm7
+; AVX1-NEXT:    vblendvpd %xmm7, %xmm11, %xmm5, %xmm7
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm10, %xmm3
+; AVX1-NEXT:    vblendvpd %xmm3, %xmm10, %xmm5, %xmm3
+; AVX1-NEXT:    vpackssdw %xmm8, %xmm0, %xmm0
 ; AVX1-NEXT:    vpackssdw %xmm4, %xmm2, %xmm2
-; AVX1-NEXT:    vpackssdw %xmm3, %xmm2, %xmm2
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT:    vpackssdw %xmm3, %xmm1, %xmm1
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vpackssdw %xmm3, %xmm0, %xmm0
-; AVX1-NEXT:    vpackssdw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpacksswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpackssdw %xmm0, %xmm2, %xmm0
+; AVX1-NEXT:    vpackssdw %xmm6, %xmm1, %xmm1
+; AVX1-NEXT:    vpackssdw %xmm7, %xmm3, %xmm2
+; AVX1-NEXT:    vpackssdw %xmm1, %xmm2, %xmm1
+; AVX1-NEXT:    vpacksswb %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;

Modified: llvm/trunk/test/CodeGen/X86/vector-trunc-ssat.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-trunc-ssat.ll?rev=362797&r1=362796&r2=362797&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-trunc-ssat.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-trunc-ssat.ll Fri Jun  7 06:17:46 2019
@@ -191,21 +191,17 @@ define <4 x i32> @trunc_ssat_v4i64_v4i32
 ;
 ; AVX1-LABEL: trunc_ssat_v4i64_v4i32:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm1 = [2147483647,2147483647,2147483647,2147483647]
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [2147483647,2147483647]
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm3, %xmm3
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm1 = [18446744071562067968,18446744071562067968,18446744071562067968,18446744071562067968]
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [18446744071562067968,18446744071562067968]
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm2, %xmm2
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm0, %xmm3
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm3, %ymm2
-; AVX1-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [2147483647,2147483647]
+; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm2, %xmm3
+; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm2, %xmm4
+; AVX1-NEXT:    vblendvpd %xmm4, %xmm0, %xmm2, %xmm0
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [18446744071562067968,18446744071562067968]
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm0, %xmm5
+; AVX1-NEXT:    vblendvpd %xmm3, %xmm1, %xmm2, %xmm1
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm1, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm1, %xmm4, %xmm1
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm0, %xmm4, %xmm0
 ; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -609,33 +605,27 @@ define <8 x i32> @trunc_ssat_v8i64_v8i32
 ;
 ; AVX1-LABEL: trunc_ssat_v8i64_v8i32:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm2 = [2147483647,2147483647,2147483647,2147483647]
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [2147483647,2147483647]
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm4, %xmm5
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm5, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm4, %xmm4
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm2 = [18446744071562067968,18446744071562067968,18446744071562067968,18446744071562067968]
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [18446744071562067968,18446744071562067968]
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm3, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm0, %xmm5
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm5, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm3, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm1, %xmm4
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [2147483647,2147483647]
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm8
+; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm3, %xmm5
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm3, %xmm7
+; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm3, %xmm4
+; AVX1-NEXT:    vblendvpd %xmm4, %xmm0, %xmm3, %xmm0
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [18446744071562067968,18446744071562067968]
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm0, %xmm9
+; AVX1-NEXT:    vblendvpd %xmm7, %xmm6, %xmm3, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm6, %xmm7
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm1, %xmm3, %xmm1
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm1, %xmm5
+; AVX1-NEXT:    vblendvpd %xmm8, %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm2, %xmm3
+; AVX1-NEXT:    vblendvpd %xmm3, %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm1, %xmm4, %xmm1
 ; AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm7, %xmm6, %xmm4, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm9, %xmm0, %xmm4, %xmm0
 ; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
@@ -1025,33 +1015,27 @@ define <8 x i16> @trunc_ssat_v8i64_v8i16
 ;
 ; AVX1-LABEL: trunc_ssat_v8i64_v8i16:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm2 = [32767,32767,32767,32767]
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [32767,32767]
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm4, %xmm5
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm5, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm4, %xmm4
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm2 = [18446744073709518848,18446744073709518848,18446744073709518848,18446744073709518848]
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [18446744073709518848,18446744073709518848]
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm3, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm0, %xmm5
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm5, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm3, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm1, %xmm4
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [32767,32767]
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm8
+; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm3, %xmm5
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm3, %xmm7
+; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm3, %xmm4
+; AVX1-NEXT:    vblendvpd %xmm4, %xmm0, %xmm3, %xmm0
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [18446744073709518848,18446744073709518848]
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm0, %xmm9
+; AVX1-NEXT:    vblendvpd %xmm7, %xmm6, %xmm3, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm6, %xmm7
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm1, %xmm3, %xmm1
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm1, %xmm5
+; AVX1-NEXT:    vblendvpd %xmm8, %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm2, %xmm3
+; AVX1-NEXT:    vblendvpd %xmm3, %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm1, %xmm4, %xmm1
 ; AVX1-NEXT:    vpackssdw %xmm2, %xmm1, %xmm1
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm7, %xmm6, %xmm4, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm9, %xmm0, %xmm4, %xmm0
 ; AVX1-NEXT:    vpackssdw %xmm2, %xmm0, %xmm0
 ; AVX1-NEXT:    vpackssdw %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vzeroupper
@@ -1521,33 +1505,27 @@ define <8 x i8> @trunc_ssat_v8i64_v8i8(<
 ;
 ; AVX1-LABEL: trunc_ssat_v8i64_v8i8:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm2 = [127,127,127,127]
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [127,127]
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm4, %xmm5
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm5, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm4, %xmm4
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm2 = [18446744073709551488,18446744073709551488,18446744073709551488,18446744073709551488]
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [18446744073709551488,18446744073709551488]
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm3, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm0, %xmm5
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm5, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm3, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm1, %xmm4
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [127,127]
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm8
+; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm3, %xmm5
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm3, %xmm7
+; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm3, %xmm4
+; AVX1-NEXT:    vblendvpd %xmm4, %xmm0, %xmm3, %xmm0
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [18446744073709551488,18446744073709551488]
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm0, %xmm9
+; AVX1-NEXT:    vblendvpd %xmm7, %xmm6, %xmm3, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm6, %xmm7
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm1, %xmm3, %xmm1
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm1, %xmm5
+; AVX1-NEXT:    vblendvpd %xmm8, %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm2, %xmm3
+; AVX1-NEXT:    vblendvpd %xmm3, %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm1, %xmm4, %xmm1
 ; AVX1-NEXT:    vpackssdw %xmm2, %xmm1, %xmm1
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm7, %xmm6, %xmm4, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm9, %xmm0, %xmm4, %xmm0
 ; AVX1-NEXT:    vpackssdw %xmm2, %xmm0, %xmm0
 ; AVX1-NEXT:    vpackssdw %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vzeroupper
@@ -1587,6 +1565,8 @@ define <8 x i8> @trunc_ssat_v8i64_v8i8(<
   ret <8 x i8> %5
 }
 
+; TODO: The AVX1 codegen shows a missed opportunity to narrow blendv+logic to 128-bit.
+
 define void @trunc_ssat_v8i64_v8i8_store(<8 x i64> %a0, <8 x i8> *%p1) {
 ; SSE2-LABEL: trunc_ssat_v8i64_v8i8_store:
 ; SSE2:       # %bb.0:
@@ -1939,30 +1919,32 @@ define void @trunc_ssat_v8i64_v8i8_store
 ;
 ; AVX1-LABEL: trunc_ssat_v8i64_v8i8_store:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm2 = [127,127,127,127]
+; AVX1-NEXT:    vmovapd {{.*#+}} ymm8 = [127,127,127,127]
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [127,127]
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm4, %xmm5
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm5, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm4, %xmm4
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm2 = [18446744073709551488,18446744073709551488,18446744073709551488,18446744073709551488]
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [18446744073709551488,18446744073709551488]
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm3, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm0, %xmm5
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm5, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm3, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm1, %xmm4
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
+; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm5
+; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm4, %xmm6
+; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm6, %ymm7
+; AVX1-NEXT:    vblendvpd %ymm7, %ymm1, %ymm8, %ymm9
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm7
+; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm4, %xmm10
+; AVX1-NEXT:    vinsertf128 $1, %xmm7, %ymm10, %ymm11
+; AVX1-NEXT:    vblendvpd %ymm11, %ymm0, %ymm8, %ymm8
+; AVX1-NEXT:    vmovapd {{.*#+}} ymm11 = [18446744073709551488,18446744073709551488,18446744073709551488,18446744073709551488]
+; AVX1-NEXT:    vblendvpd %xmm7, %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm7 = [18446744073709551488,18446744073709551488]
+; AVX1-NEXT:    vpcmpgtq %xmm7, %xmm2, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm10, %xmm0, %xmm4, %xmm0
+; AVX1-NEXT:    vpcmpgtq %xmm7, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    vblendvpd %ymm0, %ymm8, %ymm11, %ymm0
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm3, %xmm4, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm7, %xmm2, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm6, %xmm1, %xmm4, %xmm1
+; AVX1-NEXT:    vpcmpgtq %xmm7, %xmm1, %xmm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX1-NEXT:    vblendvpd %ymm1, %ymm9, %ymm11, %ymm1
 ; AVX1-NEXT:    vmovapd {{.*#+}} ymm2 = [255,255,255,255]
 ; AVX1-NEXT:    vandpd %ymm2, %ymm1, %ymm1
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
@@ -2655,61 +2637,51 @@ define <16 x i8> @trunc_ssat_v16i64_v16i
 ;
 ; AVX1-LABEL: trunc_ssat_v16i64_v16i8:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm4 = [127,127,127,127]
-; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm5
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [127,127]
-; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm6, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm6, %xmm7
-; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm7, %ymm5
-; AVX1-NEXT:    vblendvpd %ymm5, %ymm3, %ymm4, %ymm3
-; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm6, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm6, %xmm7
-; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm7, %ymm5
-; AVX1-NEXT:    vblendvpd %ymm5, %ymm2, %ymm4, %ymm2
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm6, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm6, %xmm7
-; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm7, %ymm5
-; AVX1-NEXT:    vblendvpd %ymm5, %ymm1, %ymm4, %ymm1
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm6, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm6, %xmm6
-; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm6, %ymm5
-; AVX1-NEXT:    vblendvpd %ymm5, %ymm0, %ymm4, %ymm0
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm4 = [18446744073709551488,18446744073709551488,18446744073709551488,18446744073709551488]
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [18446744073709551488,18446744073709551488]
-; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm5, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm0, %xmm7
-; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm7, %ymm5
-; AVX1-NEXT:    vblendvpd %ymm5, %ymm0, %ymm4, %ymm0
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm5, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm1, %xmm7
-; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm7, %ymm5
-; AVX1-NEXT:    vblendvpd %ymm5, %ymm1, %ymm4, %ymm1
-; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm5, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm2, %xmm7
-; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm7, %ymm5
-; AVX1-NEXT:    vblendvpd %ymm5, %ymm2, %ymm4, %ymm2
-; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm5, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm3, %xmm6
-; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm6, %ymm5
-; AVX1-NEXT:    vblendvpd %ymm5, %ymm3, %ymm4, %ymm3
-; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm4
-; AVX1-NEXT:    vpackssdw %xmm4, %xmm3, %xmm3
-; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm8
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [127,127]
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm9
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm7
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm5, %xmm6
+; AVX1-NEXT:    vblendvpd %xmm6, %xmm0, %xmm5, %xmm10
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm5, %xmm6
+; AVX1-NEXT:    vblendvpd %xmm6, %xmm4, %xmm5, %xmm11
+; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm5, %xmm6
+; AVX1-NEXT:    vblendvpd %xmm6, %xmm1, %xmm5, %xmm1
+; AVX1-NEXT:    vpcmpgtq %xmm7, %xmm5, %xmm6
+; AVX1-NEXT:    vblendvpd %xmm6, %xmm7, %xmm5, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm5, %xmm7
+; AVX1-NEXT:    vblendvpd %xmm7, %xmm2, %xmm5, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm9, %xmm5, %xmm7
+; AVX1-NEXT:    vblendvpd %xmm7, %xmm9, %xmm5, %xmm7
+; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm5, %xmm0
+; AVX1-NEXT:    vblendvpd %xmm0, %xmm3, %xmm5, %xmm0
+; AVX1-NEXT:    vpcmpgtq %xmm8, %xmm5, %xmm3
+; AVX1-NEXT:    vblendvpd %xmm3, %xmm8, %xmm5, %xmm3
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [18446744073709551488,18446744073709551488]
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm3, %xmm4
+; AVX1-NEXT:    vblendvpd %xmm4, %xmm3, %xmm5, %xmm8
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm0, %xmm4
+; AVX1-NEXT:    vblendvpd %xmm4, %xmm0, %xmm5, %xmm0
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm7, %xmm4
+; AVX1-NEXT:    vblendvpd %xmm4, %xmm7, %xmm5, %xmm4
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm2, %xmm7
+; AVX1-NEXT:    vblendvpd %xmm7, %xmm2, %xmm5, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm6, %xmm7
+; AVX1-NEXT:    vblendvpd %xmm7, %xmm6, %xmm5, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm1, %xmm7
+; AVX1-NEXT:    vblendvpd %xmm7, %xmm1, %xmm5, %xmm1
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm11, %xmm7
+; AVX1-NEXT:    vblendvpd %xmm7, %xmm11, %xmm5, %xmm7
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm10, %xmm3
+; AVX1-NEXT:    vblendvpd %xmm3, %xmm10, %xmm5, %xmm3
+; AVX1-NEXT:    vpackssdw %xmm8, %xmm0, %xmm0
 ; AVX1-NEXT:    vpackssdw %xmm4, %xmm2, %xmm2
-; AVX1-NEXT:    vpackssdw %xmm3, %xmm2, %xmm2
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT:    vpackssdw %xmm3, %xmm1, %xmm1
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vpackssdw %xmm3, %xmm0, %xmm0
-; AVX1-NEXT:    vpackssdw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpacksswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpackssdw %xmm0, %xmm2, %xmm0
+; AVX1-NEXT:    vpackssdw %xmm6, %xmm1, %xmm1
+; AVX1-NEXT:    vpackssdw %xmm7, %xmm3, %xmm2
+; AVX1-NEXT:    vpackssdw %xmm1, %xmm2, %xmm1
+; AVX1-NEXT:    vpacksswb %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;

Modified: llvm/trunk/test/CodeGen/X86/vector-trunc-usat-widen.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-trunc-usat-widen.ll?rev=362797&r1=362796&r2=362797&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-trunc-usat-widen.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-trunc-usat-widen.ll Fri Jun  7 06:17:46 2019
@@ -112,17 +112,17 @@ define <4 x i32> @trunc_usat_v4i64_v4i32
 ;
 ; AVX1-LABEL: trunc_usat_v4i64_v4i32:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT:    vpxor %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm2
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [9223372041149743103,9223372041149743103]
-; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm3, %xmm1
-; AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm2
 ; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm2 = [4294967295,4294967295,4294967295,429496729]
-; AVX1-NEXT:    vblendvpd %ymm1, %ymm0, %ymm2, %ymm0
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT:    vpxor %xmm1, %xmm4, %xmm1
+; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm3, %xmm1
+; AVX1-NEXT:    vmovapd {{.*#+}} xmm3 = [4294967295,429496729]
+; AVX1-NEXT:    vblendvpd %xmm1, %xmm4, %xmm3, %xmm1
+; AVX1-NEXT:    vmovapd {{.*#+}} xmm3 = [4294967295,4294967295]
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm3, %xmm0
 ; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -380,26 +380,24 @@ define <8 x i32> @trunc_usat_v8i64_v8i32
 ;
 ; AVX1-LABEL: trunc_usat_v8i64_v8i32:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm2 = [4294967295,4294967295,4294967295,4294967295]
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT:    vpxor %xmm4, %xmm3, %xmm3
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [9223372041149743103,9223372041149743103]
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm5, %xmm3
-; AVX1-NEXT:    vpxor %xmm4, %xmm0, %xmm6
-; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm5, %xmm6
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm6, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm3
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [9223372041149743103,9223372041149743103]
+; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm8
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
+; AVX1-NEXT:    vpxor %xmm2, %xmm5, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm4, %xmm6
+; AVX1-NEXT:    vpxor %xmm2, %xmm1, %xmm7
+; AVX1-NEXT:    vpcmpgtq %xmm7, %xmm4, %xmm7
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT:    vpxor %xmm4, %xmm3, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm5, %xmm3
-; AVX1-NEXT:    vpxor %xmm4, %xmm1, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm5, %xmm4
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vpxor %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vmovapd {{.*#+}} xmm4 = [4294967295,4294967295]
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm3, %xmm4, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm7, %xmm1, %xmm4, %xmm1
 ; AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm6, %xmm5, %xmm4, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm8, %xmm0, %xmm4, %xmm0
 ; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
@@ -648,26 +646,24 @@ define <8 x i16> @trunc_usat_v8i64_v8i16
 ;
 ; AVX1-LABEL: trunc_usat_v8i64_v8i16:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm2 = [65535,65535,65535,65535]
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT:    vpxor %xmm4, %xmm3, %xmm3
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [9223372036854841343,9223372036854841343]
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm5, %xmm3
-; AVX1-NEXT:    vpxor %xmm4, %xmm0, %xmm6
-; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm5, %xmm6
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm6, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm3
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [9223372036854841343,9223372036854841343]
+; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm8
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
+; AVX1-NEXT:    vpxor %xmm2, %xmm5, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm4, %xmm6
+; AVX1-NEXT:    vpxor %xmm2, %xmm1, %xmm7
+; AVX1-NEXT:    vpcmpgtq %xmm7, %xmm4, %xmm7
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT:    vpxor %xmm4, %xmm3, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm5, %xmm3
-; AVX1-NEXT:    vpxor %xmm4, %xmm1, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm5, %xmm4
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vpxor %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vmovapd {{.*#+}} xmm4 = [65535,65535]
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm3, %xmm4, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm7, %xmm1, %xmm4, %xmm1
 ; AVX1-NEXT:    vpackusdw %xmm2, %xmm1, %xmm1
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm6, %xmm5, %xmm4, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm8, %xmm0, %xmm4, %xmm0
 ; AVX1-NEXT:    vpackusdw %xmm2, %xmm0, %xmm0
 ; AVX1-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vzeroupper
@@ -1134,26 +1130,24 @@ define <8 x i8> @trunc_usat_v8i64_v8i8(<
 ;
 ; AVX1-LABEL: trunc_usat_v8i64_v8i8:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm2 = [255,255,255,255]
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT:    vpxor %xmm4, %xmm3, %xmm3
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [9223372036854776063,9223372036854776063]
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm5, %xmm3
-; AVX1-NEXT:    vpxor %xmm4, %xmm0, %xmm6
-; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm5, %xmm6
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm6, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm3
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [9223372036854776063,9223372036854776063]
+; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm8
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
+; AVX1-NEXT:    vpxor %xmm2, %xmm5, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm4, %xmm6
+; AVX1-NEXT:    vpxor %xmm2, %xmm1, %xmm7
+; AVX1-NEXT:    vpcmpgtq %xmm7, %xmm4, %xmm7
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT:    vpxor %xmm4, %xmm3, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm5, %xmm3
-; AVX1-NEXT:    vpxor %xmm4, %xmm1, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm5, %xmm4
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vpxor %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vmovapd {{.*#+}} xmm4 = [255,255]
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm3, %xmm4, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm7, %xmm1, %xmm4, %xmm1
 ; AVX1-NEXT:    vpackusdw %xmm2, %xmm1, %xmm1
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm6, %xmm5, %xmm4, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm8, %xmm0, %xmm4, %xmm0
 ; AVX1-NEXT:    vpackusdw %xmm2, %xmm0, %xmm0
 ; AVX1-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpackuswb %xmm0, %xmm0, %xmm0
@@ -1381,26 +1375,24 @@ define void @trunc_usat_v8i64_v8i8_store
 ;
 ; AVX1-LABEL: trunc_usat_v8i64_v8i8_store:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm2 = [255,255,255,255]
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT:    vpxor %xmm4, %xmm3, %xmm3
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [9223372036854776063,9223372036854776063]
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm5, %xmm3
-; AVX1-NEXT:    vpxor %xmm4, %xmm0, %xmm6
-; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm5, %xmm6
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm6, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm3
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [9223372036854776063,9223372036854776063]
+; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm8
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
+; AVX1-NEXT:    vpxor %xmm2, %xmm5, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm4, %xmm6
+; AVX1-NEXT:    vpxor %xmm2, %xmm1, %xmm7
+; AVX1-NEXT:    vpcmpgtq %xmm7, %xmm4, %xmm7
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT:    vpxor %xmm4, %xmm3, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm5, %xmm3
-; AVX1-NEXT:    vpxor %xmm4, %xmm1, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm5, %xmm4
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vpxor %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vmovapd {{.*#+}} xmm4 = [255,255]
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm3, %xmm4, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm7, %xmm1, %xmm4, %xmm1
 ; AVX1-NEXT:    vpackusdw %xmm2, %xmm1, %xmm1
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm6, %xmm5, %xmm4, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm8, %xmm0, %xmm4, %xmm0
 ; AVX1-NEXT:    vpackusdw %xmm2, %xmm0, %xmm0
 ; AVX1-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpackuswb %xmm0, %xmm0, %xmm0
@@ -1785,48 +1777,49 @@ define <16 x i8> @trunc_usat_v16i64_v16i
 ;
 ; AVX1-LABEL: trunc_usat_v16i64_v16i8:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm8 = [255,255,255,255]
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT:    vpxor %xmm6, %xmm5, %xmm5
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm7 = [9223372036854776063,9223372036854776063]
-; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm7, %xmm5
-; AVX1-NEXT:    vpxor %xmm6, %xmm0, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm7, %xmm4
-; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm4, %ymm4
-; AVX1-NEXT:    vblendvpd %ymm4, %ymm0, %ymm8, %ymm0
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
-; AVX1-NEXT:    vpxor %xmm6, %xmm4, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm7, %xmm4
-; AVX1-NEXT:    vpxor %xmm6, %xmm1, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm7, %xmm5
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm5, %ymm4
-; AVX1-NEXT:    vblendvpd %ymm4, %ymm1, %ymm8, %ymm1
-; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
-; AVX1-NEXT:    vpxor %xmm6, %xmm4, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm7, %xmm4
-; AVX1-NEXT:    vpxor %xmm6, %xmm2, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm7, %xmm5
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm5, %ymm4
-; AVX1-NEXT:    vblendvpd %ymm4, %ymm2, %ymm8, %ymm2
-; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm4
-; AVX1-NEXT:    vpxor %xmm6, %xmm4, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm7, %xmm4
-; AVX1-NEXT:    vpxor %xmm6, %xmm3, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm7, %xmm5
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm5, %ymm4
-; AVX1-NEXT:    vblendvpd %ymm4, %ymm3, %ymm8, %ymm3
-; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm4
-; AVX1-NEXT:    vpackusdw %xmm4, %xmm3, %xmm3
-; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT:    vmovdqa %ymm0, %ymm8
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT:    vpxor %xmm5, %xmm8, %xmm4
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [9223372036854776063,9223372036854776063]
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm6, %xmm0
+; AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT:    vextractf128 $1, %ymm8, %xmm11
+; AVX1-NEXT:    vpxor %xmm5, %xmm11, %xmm4
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm6, %xmm0
+; AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT:    vpxor %xmm5, %xmm1, %xmm4
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm6, %xmm10
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm14
+; AVX1-NEXT:    vpxor %xmm5, %xmm14, %xmm7
+; AVX1-NEXT:    vpcmpgtq %xmm7, %xmm6, %xmm12
+; AVX1-NEXT:    vpxor %xmm5, %xmm2, %xmm7
+; AVX1-NEXT:    vpcmpgtq %xmm7, %xmm6, %xmm13
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm7
+; AVX1-NEXT:    vpxor %xmm5, %xmm7, %xmm4
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm6, %xmm15
+; AVX1-NEXT:    vpxor %xmm5, %xmm3, %xmm4
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm6, %xmm4
+; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm0
+; AVX1-NEXT:    vpxor %xmm5, %xmm0, %xmm5
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm6, %xmm5
+; AVX1-NEXT:    vmovapd {{.*#+}} xmm6 = [255,255]
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm0, %xmm6, %xmm9
+; AVX1-NEXT:    vblendvpd %xmm4, %xmm3, %xmm6, %xmm3
+; AVX1-NEXT:    vblendvpd %xmm15, %xmm7, %xmm6, %xmm4
+; AVX1-NEXT:    vblendvpd %xmm13, %xmm2, %xmm6, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm12, %xmm14, %xmm6, %xmm5
+; AVX1-NEXT:    vblendvpd %xmm10, %xmm1, %xmm6, %xmm1
+; AVX1-NEXT:    vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-NEXT:    vblendvpd %xmm0, %xmm11, %xmm6, %xmm7
+; AVX1-NEXT:    vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-NEXT:    vblendvpd %xmm0, %xmm8, %xmm6, %xmm6
+; AVX1-NEXT:    vpackusdw %xmm9, %xmm3, %xmm0
 ; AVX1-NEXT:    vpackusdw %xmm4, %xmm2, %xmm2
-; AVX1-NEXT:    vpackusdw %xmm3, %xmm2, %xmm2
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT:    vpackusdw %xmm3, %xmm1, %xmm1
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vpackusdw %xmm3, %xmm0, %xmm0
-; AVX1-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpackuswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpackusdw %xmm0, %xmm2, %xmm0
+; AVX1-NEXT:    vpackusdw %xmm5, %xmm1, %xmm1
+; AVX1-NEXT:    vpackusdw %xmm7, %xmm6, %xmm2
+; AVX1-NEXT:    vpackusdw %xmm1, %xmm2, %xmm1
+; AVX1-NEXT:    vpackuswb %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;

Modified: llvm/trunk/test/CodeGen/X86/vector-trunc-usat.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-trunc-usat.ll?rev=362797&r1=362796&r2=362797&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-trunc-usat.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-trunc-usat.ll Fri Jun  7 06:17:46 2019
@@ -112,17 +112,17 @@ define <4 x i32> @trunc_usat_v4i64_v4i32
 ;
 ; AVX1-LABEL: trunc_usat_v4i64_v4i32:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT:    vpxor %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm2
 ; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [9223372041149743103,9223372041149743103]
-; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm3, %xmm1
-; AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm2
 ; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
-; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm2 = [4294967295,4294967295,4294967295,429496729]
-; AVX1-NEXT:    vblendvpd %ymm1, %ymm0, %ymm2, %ymm0
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT:    vpxor %xmm1, %xmm4, %xmm1
+; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm3, %xmm1
+; AVX1-NEXT:    vmovapd {{.*#+}} xmm3 = [4294967295,429496729]
+; AVX1-NEXT:    vblendvpd %xmm1, %xmm4, %xmm3, %xmm1
+; AVX1-NEXT:    vmovapd {{.*#+}} xmm3 = [4294967295,4294967295]
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm0, %xmm3, %xmm0
 ; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
@@ -380,26 +380,24 @@ define <8 x i32> @trunc_usat_v8i64_v8i32
 ;
 ; AVX1-LABEL: trunc_usat_v8i64_v8i32:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm2 = [4294967295,4294967295,4294967295,4294967295]
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT:    vpxor %xmm4, %xmm3, %xmm3
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [9223372041149743103,9223372041149743103]
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm5, %xmm3
-; AVX1-NEXT:    vpxor %xmm4, %xmm0, %xmm6
-; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm5, %xmm6
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm6, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm3
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [9223372041149743103,9223372041149743103]
+; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm8
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
+; AVX1-NEXT:    vpxor %xmm2, %xmm5, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm4, %xmm6
+; AVX1-NEXT:    vpxor %xmm2, %xmm1, %xmm7
+; AVX1-NEXT:    vpcmpgtq %xmm7, %xmm4, %xmm7
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT:    vpxor %xmm4, %xmm3, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm5, %xmm3
-; AVX1-NEXT:    vpxor %xmm4, %xmm1, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm5, %xmm4
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vpxor %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vmovapd {{.*#+}} xmm4 = [4294967295,4294967295]
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm3, %xmm4, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm7, %xmm1, %xmm4, %xmm1
 ; AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm6, %xmm5, %xmm4, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm8, %xmm0, %xmm4, %xmm0
 ; AVX1-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
@@ -648,26 +646,24 @@ define <8 x i16> @trunc_usat_v8i64_v8i16
 ;
 ; AVX1-LABEL: trunc_usat_v8i64_v8i16:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm2 = [65535,65535,65535,65535]
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT:    vpxor %xmm4, %xmm3, %xmm3
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [9223372036854841343,9223372036854841343]
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm5, %xmm3
-; AVX1-NEXT:    vpxor %xmm4, %xmm0, %xmm6
-; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm5, %xmm6
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm6, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm3
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [9223372036854841343,9223372036854841343]
+; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm8
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
+; AVX1-NEXT:    vpxor %xmm2, %xmm5, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm4, %xmm6
+; AVX1-NEXT:    vpxor %xmm2, %xmm1, %xmm7
+; AVX1-NEXT:    vpcmpgtq %xmm7, %xmm4, %xmm7
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT:    vpxor %xmm4, %xmm3, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm5, %xmm3
-; AVX1-NEXT:    vpxor %xmm4, %xmm1, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm5, %xmm4
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vpxor %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vmovapd {{.*#+}} xmm4 = [65535,65535]
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm3, %xmm4, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm7, %xmm1, %xmm4, %xmm1
 ; AVX1-NEXT:    vpackusdw %xmm2, %xmm1, %xmm1
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm6, %xmm5, %xmm4, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm8, %xmm0, %xmm4, %xmm0
 ; AVX1-NEXT:    vpackusdw %xmm2, %xmm0, %xmm0
 ; AVX1-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vzeroupper
@@ -1129,26 +1125,24 @@ define <8 x i8> @trunc_usat_v8i64_v8i8(<
 ;
 ; AVX1-LABEL: trunc_usat_v8i64_v8i8:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm2 = [255,255,255,255]
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT:    vpxor %xmm4, %xmm3, %xmm3
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [9223372036854776063,9223372036854776063]
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm5, %xmm3
-; AVX1-NEXT:    vpxor %xmm4, %xmm0, %xmm6
-; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm5, %xmm6
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm6, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm3
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [9223372036854776063,9223372036854776063]
+; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm8
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
+; AVX1-NEXT:    vpxor %xmm2, %xmm5, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm4, %xmm6
+; AVX1-NEXT:    vpxor %xmm2, %xmm1, %xmm7
+; AVX1-NEXT:    vpcmpgtq %xmm7, %xmm4, %xmm7
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT:    vpxor %xmm4, %xmm3, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm5, %xmm3
-; AVX1-NEXT:    vpxor %xmm4, %xmm1, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm5, %xmm4
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vpxor %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vmovapd {{.*#+}} xmm4 = [255,255]
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm3, %xmm4, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm7, %xmm1, %xmm4, %xmm1
 ; AVX1-NEXT:    vpackusdw %xmm2, %xmm1, %xmm1
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm6, %xmm5, %xmm4, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm8, %xmm0, %xmm4, %xmm0
 ; AVX1-NEXT:    vpackusdw %xmm2, %xmm0, %xmm0
 ; AVX1-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vzeroupper
@@ -1368,26 +1362,24 @@ define void @trunc_usat_v8i64_v8i8_store
 ;
 ; AVX1-LABEL: trunc_usat_v8i64_v8i8_store:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm2 = [255,255,255,255]
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT:    vpxor %xmm4, %xmm3, %xmm3
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [9223372036854776063,9223372036854776063]
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm5, %xmm3
-; AVX1-NEXT:    vpxor %xmm4, %xmm0, %xmm6
-; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm5, %xmm6
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm6, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm3
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [9223372036854776063,9223372036854776063]
+; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm8
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
+; AVX1-NEXT:    vpxor %xmm2, %xmm5, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm4, %xmm6
+; AVX1-NEXT:    vpxor %xmm2, %xmm1, %xmm7
+; AVX1-NEXT:    vpcmpgtq %xmm7, %xmm4, %xmm7
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT:    vpxor %xmm4, %xmm3, %xmm3
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm5, %xmm3
-; AVX1-NEXT:    vpxor %xmm4, %xmm1, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm5, %xmm4
-; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
-; AVX1-NEXT:    vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vpxor %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm2
+; AVX1-NEXT:    vmovapd {{.*#+}} xmm4 = [255,255]
+; AVX1-NEXT:    vblendvpd %xmm2, %xmm3, %xmm4, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm7, %xmm1, %xmm4, %xmm1
 ; AVX1-NEXT:    vpackusdw %xmm2, %xmm1, %xmm1
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm6, %xmm5, %xmm4, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm8, %xmm0, %xmm4, %xmm0
 ; AVX1-NEXT:    vpackusdw %xmm2, %xmm0, %xmm0
 ; AVX1-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpackuswb %xmm0, %xmm0, %xmm0
@@ -1770,48 +1762,49 @@ define <16 x i8> @trunc_usat_v16i64_v16i
 ;
 ; AVX1-LABEL: trunc_usat_v16i64_v16i8:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovapd {{.*#+}} ymm8 = [255,255,255,255]
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT:    vpxor %xmm6, %xmm5, %xmm5
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm7 = [9223372036854776063,9223372036854776063]
-; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm7, %xmm5
-; AVX1-NEXT:    vpxor %xmm6, %xmm0, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm7, %xmm4
-; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm4, %ymm4
-; AVX1-NEXT:    vblendvpd %ymm4, %ymm0, %ymm8, %ymm0
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
-; AVX1-NEXT:    vpxor %xmm6, %xmm4, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm7, %xmm4
-; AVX1-NEXT:    vpxor %xmm6, %xmm1, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm7, %xmm5
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm5, %ymm4
-; AVX1-NEXT:    vblendvpd %ymm4, %ymm1, %ymm8, %ymm1
-; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
-; AVX1-NEXT:    vpxor %xmm6, %xmm4, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm7, %xmm4
-; AVX1-NEXT:    vpxor %xmm6, %xmm2, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm7, %xmm5
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm5, %ymm4
-; AVX1-NEXT:    vblendvpd %ymm4, %ymm2, %ymm8, %ymm2
-; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm4
-; AVX1-NEXT:    vpxor %xmm6, %xmm4, %xmm4
-; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm7, %xmm4
-; AVX1-NEXT:    vpxor %xmm6, %xmm3, %xmm5
-; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm7, %xmm5
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm5, %ymm4
-; AVX1-NEXT:    vblendvpd %ymm4, %ymm3, %ymm8, %ymm3
-; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm4
-; AVX1-NEXT:    vpackusdw %xmm4, %xmm3, %xmm3
-; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT:    vmovdqa %ymm0, %ymm8
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT:    vpxor %xmm5, %xmm8, %xmm4
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [9223372036854776063,9223372036854776063]
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm6, %xmm0
+; AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT:    vextractf128 $1, %ymm8, %xmm11
+; AVX1-NEXT:    vpxor %xmm5, %xmm11, %xmm4
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm6, %xmm0
+; AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-NEXT:    vpxor %xmm5, %xmm1, %xmm4
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm6, %xmm10
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm14
+; AVX1-NEXT:    vpxor %xmm5, %xmm14, %xmm7
+; AVX1-NEXT:    vpcmpgtq %xmm7, %xmm6, %xmm12
+; AVX1-NEXT:    vpxor %xmm5, %xmm2, %xmm7
+; AVX1-NEXT:    vpcmpgtq %xmm7, %xmm6, %xmm13
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm7
+; AVX1-NEXT:    vpxor %xmm5, %xmm7, %xmm4
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm6, %xmm15
+; AVX1-NEXT:    vpxor %xmm5, %xmm3, %xmm4
+; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm6, %xmm4
+; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm0
+; AVX1-NEXT:    vpxor %xmm5, %xmm0, %xmm5
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm6, %xmm5
+; AVX1-NEXT:    vmovapd {{.*#+}} xmm6 = [255,255]
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm0, %xmm6, %xmm9
+; AVX1-NEXT:    vblendvpd %xmm4, %xmm3, %xmm6, %xmm3
+; AVX1-NEXT:    vblendvpd %xmm15, %xmm7, %xmm6, %xmm4
+; AVX1-NEXT:    vblendvpd %xmm13, %xmm2, %xmm6, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm12, %xmm14, %xmm6, %xmm5
+; AVX1-NEXT:    vblendvpd %xmm10, %xmm1, %xmm6, %xmm1
+; AVX1-NEXT:    vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-NEXT:    vblendvpd %xmm0, %xmm11, %xmm6, %xmm7
+; AVX1-NEXT:    vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-NEXT:    vblendvpd %xmm0, %xmm8, %xmm6, %xmm6
+; AVX1-NEXT:    vpackusdw %xmm9, %xmm3, %xmm0
 ; AVX1-NEXT:    vpackusdw %xmm4, %xmm2, %xmm2
-; AVX1-NEXT:    vpackusdw %xmm3, %xmm2, %xmm2
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT:    vpackusdw %xmm3, %xmm1, %xmm1
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vpackusdw %xmm3, %xmm0, %xmm0
-; AVX1-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpackuswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpackusdw %xmm0, %xmm2, %xmm0
+; AVX1-NEXT:    vpackusdw %xmm5, %xmm1, %xmm1
+; AVX1-NEXT:    vpackusdw %xmm7, %xmm6, %xmm2
+; AVX1-NEXT:    vpackusdw %xmm1, %xmm2, %xmm1
+; AVX1-NEXT:    vpackuswb %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;




More information about the llvm-commits mailing list