[llvm] 156913c - Revert "[X86] Fold BITOP(PACKSS(X,Z),PACKSS(Y,W)) --> PACKSS(BITOP(X,Y),BITOP(Z,W))"

Arthur Eubanks via llvm-commits llvm-commits at lists.llvm.org
Wed Jul 5 14:53:04 PDT 2023


Author: Arthur Eubanks
Date: 2023-07-05T14:52:57-07:00
New Revision: 156913cb776438f87bd1580de862eac7be79ca2a

URL: https://github.com/llvm/llvm-project/commit/156913cb776438f87bd1580de862eac7be79ca2a
DIFF: https://github.com/llvm/llvm-project/commit/156913cb776438f87bd1580de862eac7be79ca2a.diff

LOG: Revert "[X86] Fold BITOP(PACKSS(X,Z),PACKSS(Y,W)) --> PACKSS(BITOP(X,Y),BITOP(Z,W))"

This reverts commit a32d14fd4c0a43c154f251df1ccfe57e8b0a711a.

Causes crashes, see https://reviews.llvm.org/rGa32d14fd4c0a43c154f251df1ccfe57e8b0a711a.

Added: 
    

Modified: 
    llvm/lib/Target/X86/X86ISelLowering.cpp
    llvm/test/CodeGen/X86/bitcast-and-setcc-256.ll
    llvm/test/CodeGen/X86/bitcast-and-setcc-512.ll
    llvm/test/CodeGen/X86/bitcast-vector-bool.ll
    llvm/test/CodeGen/X86/cast-vsel.ll
    llvm/test/CodeGen/X86/movmsk-cmp.ll
    llvm/test/CodeGen/X86/vector-reduce-or-bool.ll
    llvm/test/CodeGen/X86/vector-reduce-xor-bool.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 7c687fbb23e2bb..c1f804c88acfb7 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -50172,47 +50172,6 @@ static SDValue combineBitOpWithShift(SDNode *N, SelectionDAG &DAG) {
   return SDValue();
 }
 
-// Attempt to fold:
-// BITOP(PACKSS(X,Z),PACKSS(Y,W)) --> PACKSS(BITOP(X,Y),BITOP(Z,W)).
-// TODO: Handle PACKUS handling.
-static SDValue combineBitOpWithPACK(SDNode *N, SelectionDAG &DAG) {
-  unsigned Opc = N->getOpcode();
-  assert((Opc == ISD::OR || Opc == ISD::AND || Opc == ISD::XOR) &&
-         "Unexpected bit opcode");
-
-  SDValue N0 = N->getOperand(0);
-  SDValue N1 = N->getOperand(1);
-  EVT VT = N->getValueType(0);
-
-  // Both operands must be single use.
-  if (!N0.hasOneUse() || !N1.hasOneUse())
-    return SDValue();
-
-  // Search for matching packs.
-  N0 = peekThroughOneUseBitcasts(N0);
-  N1 = peekThroughOneUseBitcasts(N1);
-
-  if (N0.getOpcode() != X86ISD::PACKSS || N1.getOpcode() != X86ISD::PACKSS)
-    return SDValue();
-  if (N0.getSimpleValueType() != N1.getSimpleValueType())
-    return SDValue();
-
-  MVT SrcVT = N0.getOperand(0).getSimpleValueType();
-  unsigned NumSrcBits = SrcVT.getScalarSizeInBits();
-
-  // Limit to allsignbits packing.
-  if (DAG.ComputeNumSignBits(N0.getOperand(0)) != NumSrcBits ||
-      DAG.ComputeNumSignBits(N0.getOperand(1)) != NumSrcBits ||
-      DAG.ComputeNumSignBits(N1.getOperand(0)) != NumSrcBits ||
-      DAG.ComputeNumSignBits(N1.getOperand(1)) != NumSrcBits)
-    return SDValue();
-
-  SDLoc DL(N);
-  SDValue LHS = DAG.getNode(Opc, DL, SrcVT, N0.getOperand(0), N1.getOperand(0));
-  SDValue RHS = DAG.getNode(Opc, DL, SrcVT, N0.getOperand(1), N1.getOperand(1));
-  return DAG.getNode(X86ISD::PACKSS, DL, VT, LHS, RHS);
-}
-
 /// If this is a zero/all-bits result that is bitwise-anded with a low bits
 /// mask. (Mask == 1 for the x86 lowering of a SETCC + ZEXT), replace the 'and'
 /// with a shift-right to eliminate loading the vector constant mask value.
@@ -50637,9 +50596,6 @@ static SDValue combineAnd(SDNode *N, SelectionDAG &DAG,
   if (SDValue R = combineBitOpWithShift(N, DAG))
     return R;
 
-  if (SDValue R = combineBitOpWithPACK(N, DAG))
-    return R;
-
   if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, DCI, Subtarget))
     return FPLogic;
 
@@ -51397,9 +51353,6 @@ static SDValue combineOr(SDNode *N, SelectionDAG &DAG,
   if (SDValue R = combineBitOpWithShift(N, DAG))
     return R;
 
-  if (SDValue R = combineBitOpWithPACK(N, DAG))
-    return R;
-
   if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, DCI, Subtarget))
     return FPLogic;
 
@@ -53936,9 +53889,6 @@ static SDValue combineXor(SDNode *N, SelectionDAG &DAG,
   if (SDValue R = combineBitOpWithShift(N, DAG))
     return R;
 
-  if (SDValue R = combineBitOpWithPACK(N, DAG))
-    return R;
-
   if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, DCI, Subtarget))
     return FPLogic;
 

diff  --git a/llvm/test/CodeGen/X86/bitcast-and-setcc-256.ll b/llvm/test/CodeGen/X86/bitcast-and-setcc-256.ll
index 2f7f9c554c0ef5..26dea5d386b8db 100644
--- a/llvm/test/CodeGen/X86/bitcast-and-setcc-256.ll
+++ b/llvm/test/CodeGen/X86/bitcast-and-setcc-256.ll
@@ -150,13 +150,13 @@ define i4 @v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c, <4 x double>
 define i16 @v16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16> %c, <16 x i16> %d) {
 ; SSE2-SSSE3-LABEL: v16i16:
 ; SSE2-SSSE3:       # %bb.0:
-; SSE2-SSSE3-NEXT:    pcmpgtw %xmm2, %xmm0
 ; SSE2-SSSE3-NEXT:    pcmpgtw %xmm3, %xmm1
-; SSE2-SSSE3-NEXT:    pcmpgtw %xmm6, %xmm4
-; SSE2-SSSE3-NEXT:    pand %xmm0, %xmm4
+; SSE2-SSSE3-NEXT:    pcmpgtw %xmm2, %xmm0
+; SSE2-SSSE3-NEXT:    packsswb %xmm1, %xmm0
 ; SSE2-SSSE3-NEXT:    pcmpgtw %xmm7, %xmm5
-; SSE2-SSSE3-NEXT:    pand %xmm1, %xmm5
+; SSE2-SSSE3-NEXT:    pcmpgtw %xmm6, %xmm4
 ; SSE2-SSSE3-NEXT:    packsswb %xmm5, %xmm4
+; SSE2-SSSE3-NEXT:    pand %xmm0, %xmm4
 ; SSE2-SSSE3-NEXT:    pmovmskb %xmm4, %eax
 ; SSE2-SSSE3-NEXT:    # kill: def $ax killed $ax killed $eax
 ; SSE2-SSSE3-NEXT:    retq
@@ -221,13 +221,13 @@ define i16 @v16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16> %c, <16 x i16> %d) {
 define i8 @v8i32_and(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i32> %d) {
 ; SSE2-SSSE3-LABEL: v8i32_and:
 ; SSE2-SSSE3:       # %bb.0:
-; SSE2-SSSE3-NEXT:    pcmpgtd %xmm2, %xmm0
 ; SSE2-SSSE3-NEXT:    pcmpgtd %xmm3, %xmm1
-; SSE2-SSSE3-NEXT:    pcmpgtd %xmm6, %xmm4
-; SSE2-SSSE3-NEXT:    pand %xmm0, %xmm4
+; SSE2-SSSE3-NEXT:    pcmpgtd %xmm2, %xmm0
+; SSE2-SSSE3-NEXT:    packssdw %xmm1, %xmm0
 ; SSE2-SSSE3-NEXT:    pcmpgtd %xmm7, %xmm5
-; SSE2-SSSE3-NEXT:    pand %xmm1, %xmm5
+; SSE2-SSSE3-NEXT:    pcmpgtd %xmm6, %xmm4
 ; SSE2-SSSE3-NEXT:    packssdw %xmm5, %xmm4
+; SSE2-SSSE3-NEXT:    pand %xmm0, %xmm4
 ; SSE2-SSSE3-NEXT:    packsswb %xmm4, %xmm4
 ; SSE2-SSSE3-NEXT:    pmovmskb %xmm4, %eax
 ; SSE2-SSSE3-NEXT:    # kill: def $al killed $al killed $eax
@@ -290,13 +290,13 @@ define i8 @v8i32_and(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i32> %d) {
 define i8 @v8i32_or(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i32> %d) {
 ; SSE2-SSSE3-LABEL: v8i32_or:
 ; SSE2-SSSE3:       # %bb.0:
-; SSE2-SSSE3-NEXT:    pcmpgtd %xmm2, %xmm0
 ; SSE2-SSSE3-NEXT:    pcmpgtd %xmm3, %xmm1
-; SSE2-SSSE3-NEXT:    pcmpgtd %xmm6, %xmm4
-; SSE2-SSSE3-NEXT:    por %xmm0, %xmm4
+; SSE2-SSSE3-NEXT:    pcmpgtd %xmm2, %xmm0
+; SSE2-SSSE3-NEXT:    packssdw %xmm1, %xmm0
 ; SSE2-SSSE3-NEXT:    pcmpgtd %xmm7, %xmm5
-; SSE2-SSSE3-NEXT:    por %xmm1, %xmm5
+; SSE2-SSSE3-NEXT:    pcmpgtd %xmm6, %xmm4
 ; SSE2-SSSE3-NEXT:    packssdw %xmm5, %xmm4
+; SSE2-SSSE3-NEXT:    por %xmm0, %xmm4
 ; SSE2-SSSE3-NEXT:    packsswb %xmm4, %xmm4
 ; SSE2-SSSE3-NEXT:    pmovmskb %xmm4, %eax
 ; SSE2-SSSE3-NEXT:    # kill: def $al killed $al killed $eax
@@ -365,17 +365,17 @@ define i8 @v8i32_or_and(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i32> %d,
 ; SSE2-SSSE3-NEXT:    movdqa {{[0-9]+}}(%rsp), %xmm9
 ; SSE2-SSSE3-NEXT:    pcmpgtd %xmm3, %xmm1
 ; SSE2-SSSE3-NEXT:    pcmpgtd %xmm2, %xmm0
+; SSE2-SSSE3-NEXT:    packssdw %xmm1, %xmm0
 ; SSE2-SSSE3-NEXT:    pcmpgtd %xmm5, %xmm7
-; SSE2-SSSE3-NEXT:    por %xmm1, %xmm7
 ; SSE2-SSSE3-NEXT:    pcmpgtd %xmm4, %xmm6
+; SSE2-SSSE3-NEXT:    packssdw %xmm7, %xmm6
 ; SSE2-SSSE3-NEXT:    por %xmm0, %xmm6
 ; SSE2-SSSE3-NEXT:    pcmpeqd {{[0-9]+}}(%rsp), %xmm9
-; SSE2-SSSE3-NEXT:    pand %xmm6, %xmm9
 ; SSE2-SSSE3-NEXT:    pcmpeqd {{[0-9]+}}(%rsp), %xmm8
-; SSE2-SSSE3-NEXT:    pand %xmm7, %xmm8
-; SSE2-SSSE3-NEXT:    packssdw %xmm8, %xmm9
-; SSE2-SSSE3-NEXT:    packsswb %xmm9, %xmm9
-; SSE2-SSSE3-NEXT:    pmovmskb %xmm9, %eax
+; SSE2-SSSE3-NEXT:    packssdw %xmm9, %xmm8
+; SSE2-SSSE3-NEXT:    pand %xmm6, %xmm8
+; SSE2-SSSE3-NEXT:    packsswb %xmm8, %xmm8
+; SSE2-SSSE3-NEXT:    pmovmskb %xmm8, %eax
 ; SSE2-SSSE3-NEXT:    # kill: def $al killed $al killed $eax
 ; SSE2-SSSE3-NEXT:    retq
 ;
@@ -630,13 +630,13 @@ define i8 @v8i32_or_select(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2, <8 x i32
 define i8 @v8f32_and(<8 x float> %a, <8 x float> %b, <8 x float> %c, <8 x float> %d) {
 ; SSE2-SSSE3-LABEL: v8f32_and:
 ; SSE2-SSSE3:       # %bb.0:
-; SSE2-SSSE3-NEXT:    cmpltps %xmm0, %xmm2
 ; SSE2-SSSE3-NEXT:    cmpltps %xmm1, %xmm3
-; SSE2-SSSE3-NEXT:    cmpltps %xmm4, %xmm6
-; SSE2-SSSE3-NEXT:    andps %xmm2, %xmm6
+; SSE2-SSSE3-NEXT:    cmpltps %xmm0, %xmm2
+; SSE2-SSSE3-NEXT:    packssdw %xmm3, %xmm2
 ; SSE2-SSSE3-NEXT:    cmpltps %xmm5, %xmm7
-; SSE2-SSSE3-NEXT:    andps %xmm3, %xmm7
+; SSE2-SSSE3-NEXT:    cmpltps %xmm4, %xmm6
 ; SSE2-SSSE3-NEXT:    packssdw %xmm7, %xmm6
+; SSE2-SSSE3-NEXT:    pand %xmm2, %xmm6
 ; SSE2-SSSE3-NEXT:    packsswb %xmm6, %xmm6
 ; SSE2-SSSE3-NEXT:    pmovmskb %xmm6, %eax
 ; SSE2-SSSE3-NEXT:    # kill: def $al killed $al killed $eax
@@ -681,13 +681,13 @@ define i8 @v8f32_and(<8 x float> %a, <8 x float> %b, <8 x float> %c, <8 x float>
 define i8 @v8f32_xor(<8 x float> %a, <8 x float> %b, <8 x float> %c, <8 x float> %d) {
 ; SSE2-SSSE3-LABEL: v8f32_xor:
 ; SSE2-SSSE3:       # %bb.0:
-; SSE2-SSSE3-NEXT:    cmpltps %xmm0, %xmm2
 ; SSE2-SSSE3-NEXT:    cmpltps %xmm1, %xmm3
-; SSE2-SSSE3-NEXT:    cmpltps %xmm4, %xmm6
-; SSE2-SSSE3-NEXT:    xorps %xmm2, %xmm6
+; SSE2-SSSE3-NEXT:    cmpltps %xmm0, %xmm2
+; SSE2-SSSE3-NEXT:    packssdw %xmm3, %xmm2
 ; SSE2-SSSE3-NEXT:    cmpltps %xmm5, %xmm7
-; SSE2-SSSE3-NEXT:    xorps %xmm3, %xmm7
+; SSE2-SSSE3-NEXT:    cmpltps %xmm4, %xmm6
 ; SSE2-SSSE3-NEXT:    packssdw %xmm7, %xmm6
+; SSE2-SSSE3-NEXT:    pxor %xmm2, %xmm6
 ; SSE2-SSSE3-NEXT:    packsswb %xmm6, %xmm6
 ; SSE2-SSSE3-NEXT:    pmovmskb %xmm6, %eax
 ; SSE2-SSSE3-NEXT:    # kill: def $al killed $al killed $eax
@@ -734,25 +734,25 @@ define i8 @v8f32_xor(<8 x float> %a, <8 x float> %b, <8 x float> %c, <8 x float>
 define i8 @v8f32_xor_and(<8 x float> %a, <8 x float> %b, <8 x float> %c, <8 x float> %d, <8 x float> %e, <8 x float> %f) {
 ; SSE2-SSSE3-LABEL: v8f32_xor_and:
 ; SSE2-SSSE3:       # %bb.0:
-; SSE2-SSSE3-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm9
 ; SSE2-SSSE3-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm8
+; SSE2-SSSE3-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm9
 ; SSE2-SSSE3-NEXT:    cmpnleps %xmm3, %xmm1
 ; SSE2-SSSE3-NEXT:    cmpnleps %xmm2, %xmm0
-; SSE2-SSSE3-NEXT:    movaps %xmm5, %xmm2
-; SSE2-SSSE3-NEXT:    cmpeqps %xmm7, %xmm2
+; SSE2-SSSE3-NEXT:    packssdw %xmm1, %xmm0
+; SSE2-SSSE3-NEXT:    movaps %xmm5, %xmm1
+; SSE2-SSSE3-NEXT:    cmpeqps %xmm7, %xmm1
 ; SSE2-SSSE3-NEXT:    cmpunordps %xmm7, %xmm5
-; SSE2-SSSE3-NEXT:    orps %xmm2, %xmm5
-; SSE2-SSSE3-NEXT:    xorps %xmm1, %xmm5
+; SSE2-SSSE3-NEXT:    orps %xmm1, %xmm5
 ; SSE2-SSSE3-NEXT:    movaps %xmm4, %xmm1
 ; SSE2-SSSE3-NEXT:    cmpeqps %xmm6, %xmm1
 ; SSE2-SSSE3-NEXT:    cmpunordps %xmm6, %xmm4
 ; SSE2-SSSE3-NEXT:    orps %xmm1, %xmm4
-; SSE2-SSSE3-NEXT:    xorps %xmm0, %xmm4
-; SSE2-SSSE3-NEXT:    cmpltps {{[0-9]+}}(%rsp), %xmm8
-; SSE2-SSSE3-NEXT:    andps %xmm4, %xmm8
+; SSE2-SSSE3-NEXT:    packssdw %xmm5, %xmm4
+; SSE2-SSSE3-NEXT:    pxor %xmm0, %xmm4
 ; SSE2-SSSE3-NEXT:    cmpltps {{[0-9]+}}(%rsp), %xmm9
-; SSE2-SSSE3-NEXT:    andps %xmm5, %xmm9
+; SSE2-SSSE3-NEXT:    cmpltps {{[0-9]+}}(%rsp), %xmm8
 ; SSE2-SSSE3-NEXT:    packssdw %xmm9, %xmm8
+; SSE2-SSSE3-NEXT:    pand %xmm4, %xmm8
 ; SSE2-SSSE3-NEXT:    packsswb %xmm8, %xmm8
 ; SSE2-SSSE3-NEXT:    pmovmskb %xmm8, %eax
 ; SSE2-SSSE3-NEXT:    # kill: def $al killed $al killed $eax

diff  --git a/llvm/test/CodeGen/X86/bitcast-and-setcc-512.ll b/llvm/test/CodeGen/X86/bitcast-and-setcc-512.ll
index 4816615db255ce..fde72b4cc08b3f 100644
--- a/llvm/test/CodeGen/X86/bitcast-and-setcc-512.ll
+++ b/llvm/test/CodeGen/X86/bitcast-and-setcc-512.ll
@@ -65,12 +65,12 @@ define i8 @v8i64(<8 x i64> %a, <8 x i64> %b, <8 x i64> %c, <8 x i64> %d) {
 ; AVX2-LABEL: v8i64:
 ; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpcmpgtq %ymm7, %ymm5, %ymm5
+; AVX2-NEXT:    vpcmpgtq %ymm6, %ymm4, %ymm4
+; AVX2-NEXT:    vpackssdw %ymm5, %ymm4, %ymm4
 ; AVX2-NEXT:    vpcmpgtq %ymm3, %ymm1, %ymm1
-; AVX2-NEXT:    vpand %ymm5, %ymm1, %ymm1
-; AVX2-NEXT:    vpcmpgtq %ymm6, %ymm4, %ymm3
 ; AVX2-NEXT:    vpcmpgtq %ymm2, %ymm0, %ymm0
-; AVX2-NEXT:    vpand %ymm3, %ymm0, %ymm0
 ; AVX2-NEXT:    vpackssdw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpand %ymm4, %ymm0, %ymm0
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
 ; AVX2-NEXT:    vmovmskps %ymm0, %eax
 ; AVX2-NEXT:    # kill: def $al killed $al killed $eax
@@ -153,12 +153,12 @@ define i8 @v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %c, <8 x double>
 ; AVX2-LABEL: v8f64:
 ; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vcmpltpd %ymm5, %ymm7, %ymm5
+; AVX2-NEXT:    vcmpltpd %ymm4, %ymm6, %ymm4
+; AVX2-NEXT:    vpackssdw %ymm5, %ymm4, %ymm4
 ; AVX2-NEXT:    vcmpltpd %ymm1, %ymm3, %ymm1
-; AVX2-NEXT:    vandpd %ymm5, %ymm1, %ymm1
-; AVX2-NEXT:    vcmpltpd %ymm4, %ymm6, %ymm3
 ; AVX2-NEXT:    vcmpltpd %ymm0, %ymm2, %ymm0
-; AVX2-NEXT:    vandpd %ymm3, %ymm0, %ymm0
 ; AVX2-NEXT:    vpackssdw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpand %ymm4, %ymm0, %ymm0
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
 ; AVX2-NEXT:    vmovmskps %ymm0, %eax
 ; AVX2-NEXT:    # kill: def $al killed $al killed $eax
@@ -196,50 +196,50 @@ define i32 @v32i16(<32 x i16> %a, <32 x i16> %b, <32 x i16> %c, <32 x i16> %d) {
 ; SSE-NEXT:    movdqa {{[0-9]+}}(%rsp), %xmm9
 ; SSE-NEXT:    movdqa {{[0-9]+}}(%rsp), %xmm10
 ; SSE-NEXT:    movdqa {{[0-9]+}}(%rsp), %xmm11
-; SSE-NEXT:    pcmpgtw %xmm4, %xmm0
 ; SSE-NEXT:    pcmpgtw %xmm5, %xmm1
-; SSE-NEXT:    pcmpgtw %xmm6, %xmm2
+; SSE-NEXT:    pcmpgtw %xmm4, %xmm0
+; SSE-NEXT:    packsswb %xmm1, %xmm0
 ; SSE-NEXT:    pcmpgtw %xmm7, %xmm3
+; SSE-NEXT:    pcmpgtw %xmm6, %xmm2
+; SSE-NEXT:    packsswb %xmm3, %xmm2
 ; SSE-NEXT:    pcmpgtw {{[0-9]+}}(%rsp), %xmm11
-; SSE-NEXT:    pand %xmm0, %xmm11
 ; SSE-NEXT:    pcmpgtw {{[0-9]+}}(%rsp), %xmm10
-; SSE-NEXT:    pand %xmm1, %xmm10
-; SSE-NEXT:    packsswb %xmm10, %xmm11
+; SSE-NEXT:    packsswb %xmm11, %xmm10
+; SSE-NEXT:    pand %xmm0, %xmm10
 ; SSE-NEXT:    pcmpgtw {{[0-9]+}}(%rsp), %xmm9
-; SSE-NEXT:    pand %xmm2, %xmm9
 ; SSE-NEXT:    pcmpgtw {{[0-9]+}}(%rsp), %xmm8
-; SSE-NEXT:    pand %xmm3, %xmm8
-; SSE-NEXT:    packsswb %xmm8, %xmm9
-; SSE-NEXT:    pmovmskb %xmm11, %ecx
-; SSE-NEXT:    pmovmskb %xmm9, %eax
+; SSE-NEXT:    packsswb %xmm9, %xmm8
+; SSE-NEXT:    pand %xmm2, %xmm8
+; SSE-NEXT:    pmovmskb %xmm10, %ecx
+; SSE-NEXT:    pmovmskb %xmm8, %eax
 ; SSE-NEXT:    shll $16, %eax
 ; SSE-NEXT:    orl %ecx, %eax
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: v32i16:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vpcmpgtw %xmm3, %xmm1, %xmm8
-; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm3
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm8
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm9
+; AVX1-NEXT:    vpcmpgtw %xmm8, %xmm9, %xmm8
 ; AVX1-NEXT:    vpcmpgtw %xmm3, %xmm1, %xmm1
-; AVX1-NEXT:    vpcmpgtw %xmm2, %xmm0, %xmm3
-; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm2
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT:    vpacksswb %xmm8, %xmm1, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm8
+; AVX1-NEXT:    vpcmpgtw %xmm3, %xmm8, %xmm3
 ; AVX1-NEXT:    vpcmpgtw %xmm2, %xmm0, %xmm0
-; AVX1-NEXT:    vpcmpgtw %xmm7, %xmm5, %xmm2
-; AVX1-NEXT:    vpand %xmm2, %xmm8, %xmm2
-; AVX1-NEXT:    vextractf128 $1, %ymm7, %xmm7
-; AVX1-NEXT:    vextractf128 $1, %ymm5, %xmm5
-; AVX1-NEXT:    vpcmpgtw %xmm7, %xmm5, %xmm5
-; AVX1-NEXT:    vpand %xmm5, %xmm1, %xmm1
-; AVX1-NEXT:    vpacksswb %xmm1, %xmm2, %xmm1
-; AVX1-NEXT:    vpcmpgtw %xmm6, %xmm4, %xmm2
-; AVX1-NEXT:    vpand %xmm2, %xmm3, %xmm2
-; AVX1-NEXT:    vextractf128 $1, %ymm6, %xmm3
-; AVX1-NEXT:    vextractf128 $1, %ymm4, %xmm4
-; AVX1-NEXT:    vpcmpgtw %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vpand %xmm3, %xmm0, %xmm0
-; AVX1-NEXT:    vpacksswb %xmm0, %xmm2, %xmm0
+; AVX1-NEXT:    vpacksswb %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vextractf128 $1, %ymm7, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm5, %xmm3
+; AVX1-NEXT:    vpcmpgtw %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpcmpgtw %xmm7, %xmm5, %xmm3
+; AVX1-NEXT:    vpacksswb %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm6, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm4, %xmm3
+; AVX1-NEXT:    vpcmpgtw %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpcmpgtw %xmm6, %xmm4, %xmm3
+; AVX1-NEXT:    vpacksswb %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpand %xmm2, %xmm0, %xmm0
 ; AVX1-NEXT:    vpmovmskb %xmm0, %ecx
 ; AVX1-NEXT:    vpmovmskb %xmm1, %eax
 ; AVX1-NEXT:    shll $16, %eax
@@ -249,13 +249,13 @@ define i32 @v32i16(<32 x i16> %a, <32 x i16> %b, <32 x i16> %c, <32 x i16> %d) {
 ;
 ; AVX2-LABEL: v32i16:
 ; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpcmpgtw %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpcmpgtw %ymm3, %ymm1, %ymm1
-; AVX2-NEXT:    vpcmpgtw %ymm6, %ymm4, %ymm2
-; AVX2-NEXT:    vpand %ymm2, %ymm0, %ymm0
-; AVX2-NEXT:    vpcmpgtw %ymm7, %ymm5, %ymm2
-; AVX2-NEXT:    vpand %ymm2, %ymm1, %ymm1
+; AVX2-NEXT:    vpcmpgtw %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpacksswb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpcmpgtw %ymm7, %ymm5, %ymm1
+; AVX2-NEXT:    vpcmpgtw %ymm6, %ymm4, %ymm2
+; AVX2-NEXT:    vpacksswb %ymm1, %ymm2, %ymm1
+; AVX2-NEXT:    vpand %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
 ; AVX2-NEXT:    vpmovmskb %ymm0, %eax
 ; AVX2-NEXT:    vzeroupper
@@ -305,50 +305,50 @@ define i16 @v16i32(<16 x i32> %a, <16 x i32> %b, <16 x i32> %c, <16 x i32> %d) {
 ; SSE-NEXT:    movdqa {{[0-9]+}}(%rsp), %xmm9
 ; SSE-NEXT:    movdqa {{[0-9]+}}(%rsp), %xmm10
 ; SSE-NEXT:    movdqa {{[0-9]+}}(%rsp), %xmm11
-; SSE-NEXT:    pcmpgtd %xmm4, %xmm0
-; SSE-NEXT:    pcmpgtd %xmm5, %xmm1
-; SSE-NEXT:    pcmpgtd %xmm6, %xmm2
 ; SSE-NEXT:    pcmpgtd %xmm7, %xmm3
+; SSE-NEXT:    pcmpgtd %xmm6, %xmm2
+; SSE-NEXT:    packssdw %xmm3, %xmm2
+; SSE-NEXT:    pcmpgtd %xmm5, %xmm1
+; SSE-NEXT:    pcmpgtd %xmm4, %xmm0
+; SSE-NEXT:    packssdw %xmm1, %xmm0
+; SSE-NEXT:    packsswb %xmm2, %xmm0
 ; SSE-NEXT:    pcmpgtd {{[0-9]+}}(%rsp), %xmm11
-; SSE-NEXT:    pand %xmm0, %xmm11
 ; SSE-NEXT:    pcmpgtd {{[0-9]+}}(%rsp), %xmm10
-; SSE-NEXT:    pand %xmm1, %xmm10
-; SSE-NEXT:    packssdw %xmm10, %xmm11
+; SSE-NEXT:    packssdw %xmm11, %xmm10
 ; SSE-NEXT:    pcmpgtd {{[0-9]+}}(%rsp), %xmm9
-; SSE-NEXT:    pand %xmm2, %xmm9
 ; SSE-NEXT:    pcmpgtd {{[0-9]+}}(%rsp), %xmm8
-; SSE-NEXT:    pand %xmm3, %xmm8
-; SSE-NEXT:    packssdw %xmm8, %xmm9
-; SSE-NEXT:    packsswb %xmm9, %xmm11
-; SSE-NEXT:    pmovmskb %xmm11, %eax
+; SSE-NEXT:    packssdw %xmm9, %xmm8
+; SSE-NEXT:    packsswb %xmm10, %xmm8
+; SSE-NEXT:    pand %xmm0, %xmm8
+; SSE-NEXT:    pmovmskb %xmm8, %eax
 ; SSE-NEXT:    # kill: def $ax killed $ax killed $eax
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: v16i32:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vpcmpgtd %xmm2, %xmm0, %xmm8
-; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm2
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT:    vpcmpgtd %xmm2, %xmm0, %xmm0
-; AVX1-NEXT:    vpcmpgtd %xmm3, %xmm1, %xmm2
-; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm3
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm8
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm9
+; AVX1-NEXT:    vpcmpgtd %xmm8, %xmm9, %xmm8
 ; AVX1-NEXT:    vpcmpgtd %xmm3, %xmm1, %xmm1
-; AVX1-NEXT:    vpcmpgtd %xmm6, %xmm4, %xmm3
-; AVX1-NEXT:    vpand %xmm3, %xmm8, %xmm3
-; AVX1-NEXT:    vextractf128 $1, %ymm6, %xmm6
-; AVX1-NEXT:    vextractf128 $1, %ymm4, %xmm4
-; AVX1-NEXT:    vpcmpgtd %xmm6, %xmm4, %xmm4
-; AVX1-NEXT:    vpand %xmm4, %xmm0, %xmm0
-; AVX1-NEXT:    vpackssdw %xmm0, %xmm3, %xmm0
-; AVX1-NEXT:    vpcmpgtd %xmm7, %xmm5, %xmm3
-; AVX1-NEXT:    vpand %xmm3, %xmm2, %xmm2
-; AVX1-NEXT:    vextractf128 $1, %ymm7, %xmm3
-; AVX1-NEXT:    vextractf128 $1, %ymm5, %xmm4
-; AVX1-NEXT:    vpcmpgtd %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vpand %xmm3, %xmm1, %xmm1
-; AVX1-NEXT:    vpackssdw %xmm1, %xmm2, %xmm1
+; AVX1-NEXT:    vpackssdw %xmm8, %xmm1, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm8
+; AVX1-NEXT:    vpcmpgtd %xmm3, %xmm8, %xmm3
+; AVX1-NEXT:    vpcmpgtd %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpackssdw %xmm3, %xmm0, %xmm0
 ; AVX1-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vextractf128 $1, %ymm7, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm5, %xmm2
+; AVX1-NEXT:    vpcmpgtd %xmm1, %xmm2, %xmm1
+; AVX1-NEXT:    vpcmpgtd %xmm7, %xmm5, %xmm2
+; AVX1-NEXT:    vpackssdw %xmm1, %xmm2, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm6, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm4, %xmm3
+; AVX1-NEXT:    vpcmpgtd %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpcmpgtd %xmm6, %xmm4, %xmm3
+; AVX1-NEXT:    vpackssdw %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpacksswb %xmm1, %xmm2, %xmm1
+; AVX1-NEXT:    vpand %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpmovmskb %xmm0, %eax
 ; AVX1-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX1-NEXT:    vzeroupper
@@ -356,21 +356,21 @@ define i16 @v16i32(<16 x i32> %a, <16 x i32> %b, <16 x i32> %c, <16 x i32> %d) {
 ;
 ; AVX2-LABEL: v16i32:
 ; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpcmpgtd %ymm2, %ymm0, %ymm0
-; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm2
 ; AVX2-NEXT:    vpcmpgtd %ymm3, %ymm1, %ymm1
 ; AVX2-NEXT:    vextracti128 $1, %ymm1, %xmm3
-; AVX2-NEXT:    vpcmpgtd %ymm6, %ymm4, %ymm4
-; AVX2-NEXT:    vextracti128 $1, %ymm4, %xmm6
-; AVX2-NEXT:    vpand %xmm6, %xmm2, %xmm2
-; AVX2-NEXT:    vpcmpgtd %ymm7, %ymm5, %ymm5
-; AVX2-NEXT:    vextracti128 $1, %ymm5, %xmm6
-; AVX2-NEXT:    vpand %xmm6, %xmm3, %xmm3
-; AVX2-NEXT:    vpand %xmm5, %xmm1, %xmm1
 ; AVX2-NEXT:    vpackssdw %xmm3, %xmm1, %xmm1
-; AVX2-NEXT:    vpand %xmm4, %xmm0, %xmm0
+; AVX2-NEXT:    vpcmpgtd %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm2
 ; AVX2-NEXT:    vpackssdw %xmm2, %xmm0, %xmm0
 ; AVX2-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpcmpgtd %ymm7, %ymm5, %ymm1
+; AVX2-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; AVX2-NEXT:    vpackssdw %xmm2, %xmm1, %xmm1
+; AVX2-NEXT:    vpcmpgtd %ymm6, %ymm4, %ymm2
+; AVX2-NEXT:    vextracti128 $1, %ymm2, %xmm3
+; AVX2-NEXT:    vpackssdw %xmm3, %xmm2, %xmm2
+; AVX2-NEXT:    vpacksswb %xmm1, %xmm2, %xmm1
+; AVX2-NEXT:    vpand %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vpmovmskb %xmm0, %eax
 ; AVX2-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX2-NEXT:    vzeroupper
@@ -407,68 +407,46 @@ define i16 @v16f32(<16 x float> %a, <16 x float> %b, <16 x float> %c, <16 x floa
 ; SSE-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm9
 ; SSE-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm10
 ; SSE-NEXT:    movaps {{[0-9]+}}(%rsp), %xmm11
-; SSE-NEXT:    cmpltps %xmm0, %xmm4
-; SSE-NEXT:    cmpltps %xmm1, %xmm5
-; SSE-NEXT:    cmpltps %xmm2, %xmm6
 ; SSE-NEXT:    cmpltps %xmm3, %xmm7
+; SSE-NEXT:    cmpltps %xmm2, %xmm6
+; SSE-NEXT:    packssdw %xmm7, %xmm6
+; SSE-NEXT:    cmpltps %xmm1, %xmm5
+; SSE-NEXT:    cmpltps %xmm0, %xmm4
+; SSE-NEXT:    packssdw %xmm5, %xmm4
+; SSE-NEXT:    packsswb %xmm6, %xmm4
 ; SSE-NEXT:    cmpltps {{[0-9]+}}(%rsp), %xmm11
-; SSE-NEXT:    andps %xmm4, %xmm11
 ; SSE-NEXT:    cmpltps {{[0-9]+}}(%rsp), %xmm10
-; SSE-NEXT:    andps %xmm5, %xmm10
-; SSE-NEXT:    packssdw %xmm10, %xmm11
+; SSE-NEXT:    packssdw %xmm11, %xmm10
 ; SSE-NEXT:    cmpltps {{[0-9]+}}(%rsp), %xmm9
-; SSE-NEXT:    andps %xmm6, %xmm9
 ; SSE-NEXT:    cmpltps {{[0-9]+}}(%rsp), %xmm8
-; SSE-NEXT:    andps %xmm7, %xmm8
-; SSE-NEXT:    packssdw %xmm8, %xmm9
-; SSE-NEXT:    packsswb %xmm9, %xmm11
-; SSE-NEXT:    pmovmskb %xmm11, %eax
+; SSE-NEXT:    packssdw %xmm9, %xmm8
+; SSE-NEXT:    packsswb %xmm10, %xmm8
+; SSE-NEXT:    pand %xmm4, %xmm8
+; SSE-NEXT:    pmovmskb %xmm8, %eax
 ; SSE-NEXT:    # kill: def $ax killed $ax killed $eax
 ; SSE-NEXT:    retq
 ;
-; AVX1-LABEL: v16f32:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    vcmpltps %ymm0, %ymm2, %ymm0
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT:    vcmpltps %ymm1, %ymm3, %ymm1
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT:    vcmpltps %ymm4, %ymm6, %ymm4
-; AVX1-NEXT:    vextractf128 $1, %ymm4, %xmm6
-; AVX1-NEXT:    vpand %xmm6, %xmm2, %xmm2
-; AVX1-NEXT:    vcmpltps %ymm5, %ymm7, %ymm5
-; AVX1-NEXT:    vextractf128 $1, %ymm5, %xmm6
-; AVX1-NEXT:    vpand %xmm6, %xmm3, %xmm3
-; AVX1-NEXT:    vandps %xmm5, %xmm1, %xmm1
-; AVX1-NEXT:    vpackssdw %xmm3, %xmm1, %xmm1
-; AVX1-NEXT:    vandps %xmm4, %xmm0, %xmm0
-; AVX1-NEXT:    vpackssdw %xmm2, %xmm0, %xmm0
-; AVX1-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpmovmskb %xmm0, %eax
-; AVX1-NEXT:    # kill: def $ax killed $ax killed $eax
-; AVX1-NEXT:    vzeroupper
-; AVX1-NEXT:    retq
-;
-; AVX2-LABEL: v16f32:
-; AVX2:       # %bb.0:
-; AVX2-NEXT:    vcmpltps %ymm0, %ymm2, %ymm0
-; AVX2-NEXT:    vextractf128 $1, %ymm0, %xmm2
-; AVX2-NEXT:    vcmpltps %ymm1, %ymm3, %ymm1
-; AVX2-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; AVX2-NEXT:    vcmpltps %ymm4, %ymm6, %ymm4
-; AVX2-NEXT:    vextractf128 $1, %ymm4, %xmm6
-; AVX2-NEXT:    vandps %xmm6, %xmm2, %xmm2
-; AVX2-NEXT:    vcmpltps %ymm5, %ymm7, %ymm5
-; AVX2-NEXT:    vextractf128 $1, %ymm5, %xmm6
-; AVX2-NEXT:    vandps %xmm6, %xmm3, %xmm3
-; AVX2-NEXT:    vandps %xmm5, %xmm1, %xmm1
-; AVX2-NEXT:    vpackssdw %xmm3, %xmm1, %xmm1
-; AVX2-NEXT:    vandps %xmm4, %xmm0, %xmm0
-; AVX2-NEXT:    vpackssdw %xmm2, %xmm0, %xmm0
-; AVX2-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vpmovmskb %xmm0, %eax
-; AVX2-NEXT:    # kill: def $ax killed $ax killed $eax
-; AVX2-NEXT:    vzeroupper
-; AVX2-NEXT:    retq
+; AVX12-LABEL: v16f32:
+; AVX12:       # %bb.0:
+; AVX12-NEXT:    vcmpltps %ymm1, %ymm3, %ymm1
+; AVX12-NEXT:    vextractf128 $1, %ymm1, %xmm3
+; AVX12-NEXT:    vpackssdw %xmm3, %xmm1, %xmm1
+; AVX12-NEXT:    vcmpltps %ymm0, %ymm2, %ymm0
+; AVX12-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX12-NEXT:    vpackssdw %xmm2, %xmm0, %xmm0
+; AVX12-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0
+; AVX12-NEXT:    vcmpltps %ymm5, %ymm7, %ymm1
+; AVX12-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX12-NEXT:    vpackssdw %xmm2, %xmm1, %xmm1
+; AVX12-NEXT:    vcmpltps %ymm4, %ymm6, %ymm2
+; AVX12-NEXT:    vextractf128 $1, %ymm2, %xmm3
+; AVX12-NEXT:    vpackssdw %xmm3, %xmm2, %xmm2
+; AVX12-NEXT:    vpacksswb %xmm1, %xmm2, %xmm1
+; AVX12-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; AVX12-NEXT:    vpmovmskb %xmm0, %eax
+; AVX12-NEXT:    # kill: def $ax killed $ax killed $eax
+; AVX12-NEXT:    vzeroupper
+; AVX12-NEXT:    retq
 ;
 ; AVX512F-LABEL: v16f32:
 ; AVX512F:       # %bb.0:
@@ -607,5 +585,3 @@ define i64 @v64i8(<64 x i8> %a, <64 x i8> %b, <64 x i8> %c, <64 x i8> %d) {
   %res = bitcast <64 x i1> %y to i64
   ret i64 %res
 }
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; AVX12: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/bitcast-vector-bool.ll b/llvm/test/CodeGen/X86/bitcast-vector-bool.ll
index 042368c1f310d2..a6aa52db2c1658 100644
--- a/llvm/test/CodeGen/X86/bitcast-vector-bool.ll
+++ b/llvm/test/CodeGen/X86/bitcast-vector-bool.ll
@@ -1468,18 +1468,18 @@ define [2 x i8] @PR58546(<16 x float> %a0) {
 define i8 @PR59526(<8 x i32> %a, <8 x i32> %b, ptr %mask) {
 ; SSE-LABEL: PR59526:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    pcmpeqd %xmm2, %xmm0
 ; SSE-NEXT:    pcmpeqd %xmm3, %xmm1
-; SSE-NEXT:    movdqu (%rdi), %xmm2
-; SSE-NEXT:    movdqu 16(%rdi), %xmm3
+; SSE-NEXT:    pcmpeqd %xmm2, %xmm0
+; SSE-NEXT:    packssdw %xmm1, %xmm0
+; SSE-NEXT:    movdqu (%rdi), %xmm1
+; SSE-NEXT:    movdqu 16(%rdi), %xmm2
+; SSE-NEXT:    pxor %xmm3, %xmm3
 ; SSE-NEXT:    pxor %xmm4, %xmm4
-; SSE-NEXT:    pxor %xmm5, %xmm5
-; SSE-NEXT:    pcmpgtd %xmm2, %xmm5
-; SSE-NEXT:    pand %xmm0, %xmm5
-; SSE-NEXT:    pcmpgtd %xmm3, %xmm4
-; SSE-NEXT:    pand %xmm1, %xmm4
-; SSE-NEXT:    packssdw %xmm4, %xmm5
-; SSE-NEXT:    pmovmskb %xmm5, %eax
+; SSE-NEXT:    pcmpgtd %xmm2, %xmm4
+; SSE-NEXT:    pcmpgtd %xmm1, %xmm3
+; SSE-NEXT:    packssdw %xmm4, %xmm3
+; SSE-NEXT:    pand %xmm0, %xmm3
+; SSE-NEXT:    pmovmskb %xmm3, %eax
 ; SSE-NEXT:    testl %eax, %eax
 ; SSE-NEXT:    setne %al
 ; SSE-NEXT:    retq

diff  --git a/llvm/test/CodeGen/X86/cast-vsel.ll b/llvm/test/CodeGen/X86/cast-vsel.ll
index fb0470fd3b4559..2fd7b34eceec95 100644
--- a/llvm/test/CodeGen/X86/cast-vsel.ll
+++ b/llvm/test/CodeGen/X86/cast-vsel.ll
@@ -278,24 +278,28 @@ define dso_local void @example25() nounwind {
 ; SSE2-LABEL: example25:
 ; SSE2:       # %bb.0: # %vector.ph
 ; SSE2-NEXT:    movq $-4096, %rax # imm = 0xF000
-; SSE2-NEXT:    movaps {{.*#+}} xmm0 = [1,1,1,1]
+; SSE2-NEXT:    movdqa {{.*#+}} xmm0 = [1,1,1,1]
 ; SSE2-NEXT:    .p2align 4, 0x90
 ; SSE2-NEXT:  .LBB5_1: # %vector.body
 ; SSE2-NEXT:    # =>This Inner Loop Header: Depth=1
-; SSE2-NEXT:    movaps da+4112(%rax), %xmm1
-; SSE2-NEXT:    movaps da+4096(%rax), %xmm2
-; SSE2-NEXT:    cmpltps db+4096(%rax), %xmm2
-; SSE2-NEXT:    cmpltps db+4112(%rax), %xmm1
+; SSE2-NEXT:    movaps da+4096(%rax), %xmm1
+; SSE2-NEXT:    movaps da+4112(%rax), %xmm2
+; SSE2-NEXT:    cmpltps db+4112(%rax), %xmm2
+; SSE2-NEXT:    cmpltps db+4096(%rax), %xmm1
+; SSE2-NEXT:    packssdw %xmm2, %xmm1
+; SSE2-NEXT:    movaps dc+4096(%rax), %xmm2
 ; SSE2-NEXT:    movaps dc+4112(%rax), %xmm3
-; SSE2-NEXT:    movaps dc+4096(%rax), %xmm4
-; SSE2-NEXT:    cmpltps dd+4096(%rax), %xmm4
-; SSE2-NEXT:    andps %xmm2, %xmm4
 ; SSE2-NEXT:    cmpltps dd+4112(%rax), %xmm3
-; SSE2-NEXT:    andps %xmm1, %xmm3
-; SSE2-NEXT:    andps %xmm0, %xmm4
-; SSE2-NEXT:    andps %xmm0, %xmm3
-; SSE2-NEXT:    movaps %xmm3, dj+4112(%rax)
-; SSE2-NEXT:    movaps %xmm4, dj+4096(%rax)
+; SSE2-NEXT:    cmpltps dd+4096(%rax), %xmm2
+; SSE2-NEXT:    packssdw %xmm3, %xmm2
+; SSE2-NEXT:    pand %xmm1, %xmm2
+; SSE2-NEXT:    movdqa %xmm2, %xmm1
+; SSE2-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3]
+; SSE2-NEXT:    pand %xmm0, %xmm1
+; SSE2-NEXT:    punpckhwd {{.*#+}} xmm2 = xmm2[4,4,5,5,6,6,7,7]
+; SSE2-NEXT:    pand %xmm0, %xmm2
+; SSE2-NEXT:    movdqa %xmm2, dj+4112(%rax)
+; SSE2-NEXT:    movdqa %xmm1, dj+4096(%rax)
 ; SSE2-NEXT:    addq $32, %rax
 ; SSE2-NEXT:    jne .LBB5_1
 ; SSE2-NEXT:  # %bb.2: # %for.end
@@ -304,24 +308,27 @@ define dso_local void @example25() nounwind {
 ; SSE41-LABEL: example25:
 ; SSE41:       # %bb.0: # %vector.ph
 ; SSE41-NEXT:    movq $-4096, %rax # imm = 0xF000
-; SSE41-NEXT:    movaps {{.*#+}} xmm0 = [1,1,1,1]
+; SSE41-NEXT:    movdqa {{.*#+}} xmm0 = [1,1,1,1]
 ; SSE41-NEXT:    .p2align 4, 0x90
 ; SSE41-NEXT:  .LBB5_1: # %vector.body
 ; SSE41-NEXT:    # =>This Inner Loop Header: Depth=1
-; SSE41-NEXT:    movaps da+4112(%rax), %xmm1
-; SSE41-NEXT:    movaps da+4096(%rax), %xmm2
-; SSE41-NEXT:    cmpltps db+4096(%rax), %xmm2
-; SSE41-NEXT:    cmpltps db+4112(%rax), %xmm1
+; SSE41-NEXT:    movaps da+4096(%rax), %xmm1
+; SSE41-NEXT:    movaps da+4112(%rax), %xmm2
+; SSE41-NEXT:    cmpltps db+4112(%rax), %xmm2
+; SSE41-NEXT:    cmpltps db+4096(%rax), %xmm1
+; SSE41-NEXT:    packssdw %xmm2, %xmm1
+; SSE41-NEXT:    movaps dc+4096(%rax), %xmm2
 ; SSE41-NEXT:    movaps dc+4112(%rax), %xmm3
-; SSE41-NEXT:    movaps dc+4096(%rax), %xmm4
-; SSE41-NEXT:    cmpltps dd+4096(%rax), %xmm4
-; SSE41-NEXT:    andps %xmm2, %xmm4
 ; SSE41-NEXT:    cmpltps dd+4112(%rax), %xmm3
-; SSE41-NEXT:    andps %xmm1, %xmm3
-; SSE41-NEXT:    andps %xmm0, %xmm4
-; SSE41-NEXT:    andps %xmm0, %xmm3
-; SSE41-NEXT:    movaps %xmm3, dj+4112(%rax)
-; SSE41-NEXT:    movaps %xmm4, dj+4096(%rax)
+; SSE41-NEXT:    cmpltps dd+4096(%rax), %xmm2
+; SSE41-NEXT:    packssdw %xmm3, %xmm2
+; SSE41-NEXT:    pand %xmm1, %xmm2
+; SSE41-NEXT:    pmovzxwd {{.*#+}} xmm1 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
+; SSE41-NEXT:    pand %xmm0, %xmm1
+; SSE41-NEXT:    punpckhwd {{.*#+}} xmm2 = xmm2[4,4,5,5,6,6,7,7]
+; SSE41-NEXT:    pand %xmm0, %xmm2
+; SSE41-NEXT:    movdqa %xmm2, dj+4112(%rax)
+; SSE41-NEXT:    movdqa %xmm1, dj+4096(%rax)
 ; SSE41-NEXT:    addq $32, %rax
 ; SSE41-NEXT:    jne .LBB5_1
 ; SSE41-NEXT:  # %bb.2: # %for.end

diff  --git a/llvm/test/CodeGen/X86/movmsk-cmp.ll b/llvm/test/CodeGen/X86/movmsk-cmp.ll
index b6eebe95cdd331..dffff765506686 100644
--- a/llvm/test/CodeGen/X86/movmsk-cmp.ll
+++ b/llvm/test/CodeGen/X86/movmsk-cmp.ll
@@ -424,10 +424,10 @@ define i1 @allzeros_v16i16_sign(<16 x i16> %arg) {
 define i1 @allones_v32i16_sign(<32 x i16> %arg) {
 ; SSE-LABEL: allones_v32i16_sign:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    pand %xmm3, %xmm1
-; SSE-NEXT:    pand %xmm2, %xmm0
 ; SSE-NEXT:    packsswb %xmm1, %xmm0
-; SSE-NEXT:    pmovmskb %xmm0, %eax
+; SSE-NEXT:    packsswb %xmm3, %xmm2
+; SSE-NEXT:    pand %xmm0, %xmm2
+; SSE-NEXT:    pmovmskb %xmm2, %eax
 ; SSE-NEXT:    cmpl $65535, %eax # imm = 0xFFFF
 ; SSE-NEXT:    sete %al
 ; SSE-NEXT:    retq
@@ -435,10 +435,10 @@ define i1 @allones_v32i16_sign(<32 x i16> %arg) {
 ; AVX1-LABEL: allones_v32i16_sign:
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT:    vpand %xmm2, %xmm3, %xmm2
-; AVX1-NEXT:    vpand %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vpacksswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vpacksswb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vpand %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vpmovmskb %xmm0, %eax
 ; AVX1-NEXT:    cmpl $65535, %eax # imm = 0xFFFF
 ; AVX1-NEXT:    sete %al
@@ -488,9 +488,9 @@ define i1 @allones_v32i16_sign(<32 x i16> %arg) {
 define i1 @allzeros_v32i16_sign(<32 x i16> %arg) {
 ; SSE-LABEL: allzeros_v32i16_sign:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    por %xmm3, %xmm1
-; SSE-NEXT:    por %xmm2, %xmm0
+; SSE-NEXT:    packsswb %xmm3, %xmm2
 ; SSE-NEXT:    packsswb %xmm1, %xmm0
+; SSE-NEXT:    por %xmm2, %xmm0
 ; SSE-NEXT:    pmovmskb %xmm0, %eax
 ; SSE-NEXT:    testl %eax, %eax
 ; SSE-NEXT:    sete %al
@@ -499,10 +499,10 @@ define i1 @allzeros_v32i16_sign(<32 x i16> %arg) {
 ; AVX1-LABEL: allzeros_v32i16_sign:
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vpor %xmm2, %xmm3, %xmm2
-; AVX1-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpacksswb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
 ; AVX1-NEXT:    vpacksswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpor %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpmovmskb %xmm0, %eax
 ; AVX1-NEXT:    testl %eax, %eax
 ; AVX1-NEXT:    sete %al
@@ -836,10 +836,10 @@ define i1 @allones_v8i64_sign(<8 x i64> %arg) {
 ; AVX1-LABEL: allones_v8i64_sign:
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vpand %xmm2, %xmm3, %xmm2
-; AVX1-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpackssdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
 ; AVX1-NEXT:    vpackssdw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpand %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:    vtestps %xmm1, %xmm0
 ; AVX1-NEXT:    setb %al
@@ -892,10 +892,10 @@ define i1 @allzeros_v8i64_sign(<8 x i64> %arg) {
 ; AVX1-LABEL: allzeros_v8i64_sign:
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vpor %xmm2, %xmm3, %xmm2
-; AVX1-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpackssdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
 ; AVX1-NEXT:    vpackssdw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpor %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vtestps %xmm0, %xmm0
 ; AVX1-NEXT:    sete %al
 ; AVX1-NEXT:    vzeroupper
@@ -1503,12 +1503,14 @@ define i1 @allones_v16i16_and1(<16 x i16> %arg) {
 define i1 @allones_v32i16_and1(<32 x i16> %arg) {
 ; SSE-LABEL: allones_v32i16_and1:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    pand %xmm3, %xmm1
 ; SSE-NEXT:    psllw $15, %xmm1
-; SSE-NEXT:    pand %xmm2, %xmm0
 ; SSE-NEXT:    psllw $15, %xmm0
 ; SSE-NEXT:    packsswb %xmm1, %xmm0
-; SSE-NEXT:    pmovmskb %xmm0, %eax
+; SSE-NEXT:    psllw $15, %xmm3
+; SSE-NEXT:    psllw $15, %xmm2
+; SSE-NEXT:    packsswb %xmm3, %xmm2
+; SSE-NEXT:    pand %xmm0, %xmm2
+; SSE-NEXT:    pmovmskb %xmm2, %eax
 ; SSE-NEXT:    cmpl $65535, %eax # imm = 0xFFFF
 ; SSE-NEXT:    sete %al
 ; SSE-NEXT:    retq
@@ -1516,12 +1518,14 @@ define i1 @allones_v32i16_and1(<32 x i16> %arg) {
 ; AVX1-LABEL: allones_v32i16_and1:
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT:    vpand %xmm2, %xmm3, %xmm2
 ; AVX1-NEXT:    vpsllw $15, %xmm2, %xmm2
-; AVX1-NEXT:    vpand %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vpsllw $15, %xmm0, %xmm0
 ; AVX1-NEXT:    vpacksswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vpsllw $15, %xmm2, %xmm2
+; AVX1-NEXT:    vpsllw $15, %xmm1, %xmm1
+; AVX1-NEXT:    vpacksswb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vpand %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vpmovmskb %xmm0, %eax
 ; AVX1-NEXT:    cmpl $65535, %eax # imm = 0xFFFF
 ; AVX1-NEXT:    sete %al
@@ -2166,15 +2170,15 @@ define i1 @allones_v8i64_and1(<8 x i64> %arg) {
 ;
 ; AVX1-LABEL: allones_v8i64_and1:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vpsllq $63, %xmm1, %xmm2
-; AVX1-NEXT:    vpsllq $63, %xmm0, %xmm3
-; AVX1-NEXT:    vpand %xmm2, %xmm3, %xmm2
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vpsllq $63, %xmm2, %xmm2
 ; AVX1-NEXT:    vpsllq $63, %xmm1, %xmm1
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT:    vpackssdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vpsllq $63, %xmm2, %xmm2
 ; AVX1-NEXT:    vpsllq $63, %xmm0, %xmm0
+; AVX1-NEXT:    vpackssdw %xmm2, %xmm0, %xmm0
 ; AVX1-NEXT:    vpand %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpackssdw %xmm0, %xmm2, %xmm0
 ; AVX1-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:    vtestps %xmm1, %xmm0
 ; AVX1-NEXT:    setb %al
@@ -2704,12 +2708,14 @@ define i1 @allones_v16i16_and4(<16 x i16> %arg) {
 define i1 @allones_v32i16_and4(<32 x i16> %arg) {
 ; SSE-LABEL: allones_v32i16_and4:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    pand %xmm3, %xmm1
 ; SSE-NEXT:    psllw $13, %xmm1
-; SSE-NEXT:    pand %xmm2, %xmm0
 ; SSE-NEXT:    psllw $13, %xmm0
 ; SSE-NEXT:    packsswb %xmm1, %xmm0
-; SSE-NEXT:    pmovmskb %xmm0, %eax
+; SSE-NEXT:    psllw $13, %xmm3
+; SSE-NEXT:    psllw $13, %xmm2
+; SSE-NEXT:    packsswb %xmm3, %xmm2
+; SSE-NEXT:    pand %xmm0, %xmm2
+; SSE-NEXT:    pmovmskb %xmm2, %eax
 ; SSE-NEXT:    cmpl $65535, %eax # imm = 0xFFFF
 ; SSE-NEXT:    sete %al
 ; SSE-NEXT:    retq
@@ -2717,12 +2723,14 @@ define i1 @allones_v32i16_and4(<32 x i16> %arg) {
 ; AVX1-LABEL: allones_v32i16_and4:
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT:    vpand %xmm2, %xmm3, %xmm2
 ; AVX1-NEXT:    vpsllw $13, %xmm2, %xmm2
-; AVX1-NEXT:    vpand %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vpsllw $13, %xmm0, %xmm0
 ; AVX1-NEXT:    vpacksswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vpsllw $13, %xmm2, %xmm2
+; AVX1-NEXT:    vpsllw $13, %xmm1, %xmm1
+; AVX1-NEXT:    vpacksswb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vpand %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    vpmovmskb %xmm0, %eax
 ; AVX1-NEXT:    cmpl $65535, %eax # imm = 0xFFFF
 ; AVX1-NEXT:    sete %al
@@ -3367,15 +3375,15 @@ define i1 @allones_v8i64_and4(<8 x i64> %arg) {
 ;
 ; AVX1-LABEL: allones_v8i64_and4:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vpsllq $61, %xmm1, %xmm2
-; AVX1-NEXT:    vpsllq $61, %xmm0, %xmm3
-; AVX1-NEXT:    vpand %xmm2, %xmm3, %xmm2
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vpsllq $61, %xmm2, %xmm2
 ; AVX1-NEXT:    vpsllq $61, %xmm1, %xmm1
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT:    vpackssdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vpsllq $61, %xmm2, %xmm2
 ; AVX1-NEXT:    vpsllq $61, %xmm0, %xmm0
+; AVX1-NEXT:    vpackssdw %xmm2, %xmm0, %xmm0
 ; AVX1-NEXT:    vpand %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpackssdw %xmm0, %xmm2, %xmm0
 ; AVX1-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:    vtestps %xmm1, %xmm0
 ; AVX1-NEXT:    setb %al

diff  --git a/llvm/test/CodeGen/X86/vector-reduce-or-bool.ll b/llvm/test/CodeGen/X86/vector-reduce-or-bool.ll
index 6752c4f426becc..bb633a39437b31 100644
--- a/llvm/test/CodeGen/X86/vector-reduce-or-bool.ll
+++ b/llvm/test/CodeGen/X86/vector-reduce-or-bool.ll
@@ -1108,16 +1108,16 @@ define i1 @icmp0_v8i64_v8i1(<8 x i64>) {
 ;
 ; AVX1-LABEL: icmp0_v8i64_v8i1:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; AVX1-NEXT:    vpcmpeqq %xmm2, %xmm1, %xmm3
-; AVX1-NEXT:    vpcmpeqq %xmm2, %xmm0, %xmm4
-; AVX1-NEXT:    vpor %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1
-; AVX1-NEXT:    vpcmpeqq %xmm2, %xmm1, %xmm1
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT:    vpcmpeqq %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT:    vpcmpeqq %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpcmpeqq %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vpackssdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vpcmpeqq %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpcmpeqq %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vpackssdw %xmm2, %xmm0, %xmm0
 ; AVX1-NEXT:    vpor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpackssdw %xmm0, %xmm3, %xmm0
 ; AVX1-NEXT:    vtestps %xmm0, %xmm0
 ; AVX1-NEXT:    setne %al
 ; AVX1-NEXT:    vzeroupper
@@ -1227,13 +1227,13 @@ define i1 @icmp0_v32i16_v32i1(<32 x i16>) {
 ; SSE-LABEL: icmp0_v32i16_v32i1:
 ; SSE:       # %bb.0:
 ; SSE-NEXT:    pxor %xmm4, %xmm4
-; SSE-NEXT:    pcmpeqw %xmm4, %xmm2
-; SSE-NEXT:    pcmpeqw %xmm4, %xmm0
-; SSE-NEXT:    por %xmm2, %xmm0
 ; SSE-NEXT:    pcmpeqw %xmm4, %xmm3
+; SSE-NEXT:    pcmpeqw %xmm4, %xmm2
+; SSE-NEXT:    packsswb %xmm3, %xmm2
 ; SSE-NEXT:    pcmpeqw %xmm4, %xmm1
-; SSE-NEXT:    por %xmm3, %xmm1
+; SSE-NEXT:    pcmpeqw %xmm4, %xmm0
 ; SSE-NEXT:    packsswb %xmm1, %xmm0
+; SSE-NEXT:    por %xmm2, %xmm0
 ; SSE-NEXT:    pmovmskb %xmm0, %eax
 ; SSE-NEXT:    testl %eax, %eax
 ; SSE-NEXT:    setne %al
@@ -1241,16 +1241,16 @@ define i1 @icmp0_v32i16_v32i1(<32 x i16>) {
 ;
 ; AVX1-LABEL: icmp0_v32i16_v32i1:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; AVX1-NEXT:    vpcmpeqw %xmm2, %xmm1, %xmm3
-; AVX1-NEXT:    vpcmpeqw %xmm2, %xmm0, %xmm4
-; AVX1-NEXT:    vpor %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1
-; AVX1-NEXT:    vpcmpeqw %xmm2, %xmm1, %xmm1
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT:    vpcmpeqw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT:    vpcmpeqw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpcmpeqw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vpacksswb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vpcmpeqw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpcmpeqw %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vpacksswb %xmm2, %xmm0, %xmm0
 ; AVX1-NEXT:    vpor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpacksswb %xmm0, %xmm3, %xmm0
 ; AVX1-NEXT:    vpmovmskb %xmm0, %eax
 ; AVX1-NEXT:    testl %eax, %eax
 ; AVX1-NEXT:    setne %al
@@ -1905,17 +1905,17 @@ define i1 @icmp_v8i64_v8i1(<8 x i64>, <8 x i64>) {
 ;
 ; AVX1-LABEL: icmp_v8i64_v8i1:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vpcmpeqq %xmm3, %xmm1, %xmm4
-; AVX1-NEXT:    vpcmpeqq %xmm2, %xmm0, %xmm5
-; AVX1-NEXT:    vpor %xmm4, %xmm5, %xmm4
-; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm3
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm4
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm5
+; AVX1-NEXT:    vpcmpeqq %xmm4, %xmm5, %xmm4
 ; AVX1-NEXT:    vpcmpeqq %xmm3, %xmm1, %xmm1
-; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm2
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT:    vpackssdw %xmm4, %xmm1, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT:    vpcmpeqq %xmm3, %xmm4, %xmm3
 ; AVX1-NEXT:    vpcmpeqq %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpackssdw %xmm3, %xmm0, %xmm0
 ; AVX1-NEXT:    vpor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpackssdw %xmm0, %xmm4, %xmm0
 ; AVX1-NEXT:    vtestps %xmm0, %xmm0
 ; AVX1-NEXT:    setne %al
 ; AVX1-NEXT:    vzeroupper
@@ -2022,13 +2022,13 @@ define i1 @icmp_v16i32_v16i1(<16 x i32>, <16 x i32>) {
 define i1 @icmp_v32i16_v32i1(<32 x i16>, <32 x i16>) {
 ; SSE-LABEL: icmp_v32i16_v32i1:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    pcmpeqw %xmm6, %xmm2
-; SSE-NEXT:    pcmpeqw %xmm4, %xmm0
-; SSE-NEXT:    por %xmm2, %xmm0
 ; SSE-NEXT:    pcmpeqw %xmm7, %xmm3
+; SSE-NEXT:    pcmpeqw %xmm6, %xmm2
+; SSE-NEXT:    packsswb %xmm3, %xmm2
 ; SSE-NEXT:    pcmpeqw %xmm5, %xmm1
-; SSE-NEXT:    por %xmm3, %xmm1
+; SSE-NEXT:    pcmpeqw %xmm4, %xmm0
 ; SSE-NEXT:    packsswb %xmm1, %xmm0
+; SSE-NEXT:    por %xmm2, %xmm0
 ; SSE-NEXT:    pmovmskb %xmm0, %eax
 ; SSE-NEXT:    testl %eax, %eax
 ; SSE-NEXT:    setne %al
@@ -2036,17 +2036,17 @@ define i1 @icmp_v32i16_v32i1(<32 x i16>, <32 x i16>) {
 ;
 ; AVX1-LABEL: icmp_v32i16_v32i1:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vpcmpeqw %xmm3, %xmm1, %xmm4
-; AVX1-NEXT:    vpcmpeqw %xmm2, %xmm0, %xmm5
-; AVX1-NEXT:    vpor %xmm4, %xmm5, %xmm4
-; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm3
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm4
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm5
+; AVX1-NEXT:    vpcmpeqw %xmm4, %xmm5, %xmm4
 ; AVX1-NEXT:    vpcmpeqw %xmm3, %xmm1, %xmm1
-; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm2
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT:    vpacksswb %xmm4, %xmm1, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT:    vpcmpeqw %xmm3, %xmm4, %xmm3
 ; AVX1-NEXT:    vpcmpeqw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpacksswb %xmm3, %xmm0, %xmm0
 ; AVX1-NEXT:    vpor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpacksswb %xmm0, %xmm4, %xmm0
 ; AVX1-NEXT:    vpmovmskb %xmm0, %eax
 ; AVX1-NEXT:    testl %eax, %eax
 ; AVX1-NEXT:    setne %al

diff  --git a/llvm/test/CodeGen/X86/vector-reduce-xor-bool.ll b/llvm/test/CodeGen/X86/vector-reduce-xor-bool.ll
index bfb9ca26378f24..5f8c06625a93c2 100644
--- a/llvm/test/CodeGen/X86/vector-reduce-xor-bool.ll
+++ b/llvm/test/CodeGen/X86/vector-reduce-xor-bool.ll
@@ -1635,30 +1635,30 @@ define i1 @icmp0_v32i16_v32i1(<32 x i16>) {
 ; SSE-LABEL: icmp0_v32i16_v32i1:
 ; SSE:       # %bb.0:
 ; SSE-NEXT:    pxor %xmm4, %xmm4
-; SSE-NEXT:    pcmpeqw %xmm4, %xmm2
-; SSE-NEXT:    pcmpeqw %xmm4, %xmm0
-; SSE-NEXT:    pxor %xmm2, %xmm0
-; SSE-NEXT:    pcmpeqw %xmm4, %xmm3
 ; SSE-NEXT:    pcmpeqw %xmm4, %xmm1
-; SSE-NEXT:    pxor %xmm3, %xmm1
+; SSE-NEXT:    pcmpeqw %xmm4, %xmm0
 ; SSE-NEXT:    packsswb %xmm1, %xmm0
-; SSE-NEXT:    pmovmskb %xmm0, %eax
+; SSE-NEXT:    pcmpeqw %xmm4, %xmm3
+; SSE-NEXT:    pcmpeqw %xmm4, %xmm2
+; SSE-NEXT:    packsswb %xmm3, %xmm2
+; SSE-NEXT:    pxor %xmm0, %xmm2
+; SSE-NEXT:    pmovmskb %xmm2, %eax
 ; SSE-NEXT:    xorb %ah, %al
 ; SSE-NEXT:    setnp %al
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: icmp0_v32i16_v32i1:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; AVX1-NEXT:    vpcmpeqw %xmm2, %xmm1, %xmm3
-; AVX1-NEXT:    vpcmpeqw %xmm2, %xmm0, %xmm4
-; AVX1-NEXT:    vpxor %xmm3, %xmm4, %xmm3
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1
-; AVX1-NEXT:    vpcmpeqw %xmm2, %xmm1, %xmm1
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT:    vpcmpeqw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT:    vpcmpeqw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpcmpeqw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vpacksswb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vpcmpeqw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpcmpeqw %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vpacksswb %xmm2, %xmm0, %xmm0
 ; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpacksswb %xmm0, %xmm3, %xmm0
 ; AVX1-NEXT:    vpmovmskb %xmm0, %eax
 ; AVX1-NEXT:    xorb %ah, %al
 ; AVX1-NEXT:    setnp %al
@@ -2563,31 +2563,31 @@ define i1 @icmp_v16i32_v16i1(<16 x i32>, <16 x i32>) {
 define i1 @icmp_v32i16_v32i1(<32 x i16>, <32 x i16>) {
 ; SSE-LABEL: icmp_v32i16_v32i1:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    pcmpeqw %xmm6, %xmm2
-; SSE-NEXT:    pcmpeqw %xmm4, %xmm0
-; SSE-NEXT:    pxor %xmm2, %xmm0
-; SSE-NEXT:    pcmpeqw %xmm7, %xmm3
 ; SSE-NEXT:    pcmpeqw %xmm5, %xmm1
-; SSE-NEXT:    pxor %xmm3, %xmm1
+; SSE-NEXT:    pcmpeqw %xmm4, %xmm0
 ; SSE-NEXT:    packsswb %xmm1, %xmm0
-; SSE-NEXT:    pmovmskb %xmm0, %eax
+; SSE-NEXT:    pcmpeqw %xmm7, %xmm3
+; SSE-NEXT:    pcmpeqw %xmm6, %xmm2
+; SSE-NEXT:    packsswb %xmm3, %xmm2
+; SSE-NEXT:    pxor %xmm0, %xmm2
+; SSE-NEXT:    pmovmskb %xmm2, %eax
 ; SSE-NEXT:    xorb %ah, %al
 ; SSE-NEXT:    setnp %al
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: icmp_v32i16_v32i1:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vpcmpeqw %xmm3, %xmm1, %xmm4
-; AVX1-NEXT:    vpcmpeqw %xmm2, %xmm0, %xmm5
-; AVX1-NEXT:    vpxor %xmm4, %xmm5, %xmm4
-; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm3
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm4
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm5
+; AVX1-NEXT:    vpcmpeqw %xmm4, %xmm5, %xmm4
 ; AVX1-NEXT:    vpcmpeqw %xmm3, %xmm1, %xmm1
-; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm2
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT:    vpacksswb %xmm4, %xmm1, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT:    vpcmpeqw %xmm3, %xmm4, %xmm3
 ; AVX1-NEXT:    vpcmpeqw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpacksswb %xmm3, %xmm0, %xmm0
 ; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpacksswb %xmm0, %xmm4, %xmm0
 ; AVX1-NEXT:    vpmovmskb %xmm0, %eax
 ; AVX1-NEXT:    xorb %ah, %al
 ; AVX1-NEXT:    setnp %al


        


More information about the llvm-commits mailing list