[llvm] r277132 - [X86][SSE] Optimize the truncation of vector comparison results with PACKSS
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Fri Jul 29 03:23:11 PDT 2016
Author: rksimon
Date: Fri Jul 29 05:23:10 2016
New Revision: 277132
URL: http://llvm.org/viewvc/llvm-project?rev=277132&view=rev
Log:
[X86][SSE] Optimize the truncation of vector comparison results with PACKSS
We currently default to using either generic shuffles or MASK+PACKUS/PACKSS to truncate all integer vectors. For vector comparisons, we know that the result will be either all or zero bits in every element, which can be efficiently truncated by directly using PACKSS to repeatedly halve the size of each element.
Due to the limited input values (-1 or 0) we don't need to account for vector element size, so for simplicity we just use the PACKSS(vXi16,vXi16) implementation in all cases. Additionally for AVX2 PACKSS of 256bit data we must perform a PERMQ shuffle to reorder the data into the correct order. I did investigate performing a single shuffle after all the PACKSS calls but the need to cross 128bit lanes makes this difficult to achieve efficiently.
We avoid performing this on AVX512 as it should have better alternative truncation instructions.
Differential Revision: https://reviews.llvm.org/D22814
Modified:
llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
llvm/trunk/test/CodeGen/X86/setcc-lowering.ll
llvm/trunk/test/CodeGen/X86/vector-compare-results.ll
Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=277132&r1=277131&r2=277132&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Fri Jul 29 05:23:10 2016
@@ -4423,8 +4423,6 @@ static SDValue getZeroVector(MVT VT, con
static SDValue extractSubVector(SDValue Vec, unsigned IdxVal, SelectionDAG &DAG,
const SDLoc &dl, unsigned vectorWidth) {
- assert((vectorWidth == 128 || vectorWidth == 256) &&
- "Unsupported vector width");
EVT VT = Vec.getValueType();
EVT ElVT = VT.getVectorElementType();
unsigned Factor = VT.getSizeInBits()/vectorWidth;
@@ -14132,6 +14130,85 @@ static SDValue LowerZERO_EXTEND(SDValue
return SDValue();
}
+/// Helper to recursively truncate vector elements in half with PACKSS.
+/// It makes use of the fact that vector comparison results will be all-zeros
+/// or all-ones to use (vXi8 PACKSS(vYi16, vYi16)) instead of matching types.
+/// AVX2 (Int256) sub-targets require extra shuffling as the PACKSS operates
+/// within each 128-bit lane.
+static SDValue truncateVectorCompareWithPACKSS(EVT DstVT, SDValue In,
+ const SDLoc &DL,
+ SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
+ // AVX512 has fast truncate.
+ if (Subtarget.hasAVX512())
+ return SDValue();
+
+ EVT SrcVT = In.getValueType();
+
+ // No truncation required, we might get here due to recursive calls.
+ if (SrcVT == DstVT)
+ return In;
+
+ // We only support vector truncation to 128bits or greater from a
+ // 256bits or greater source.
+ if ((DstVT.getSizeInBits() % 128) != 0)
+ return SDValue();
+ if ((SrcVT.getSizeInBits() % 256) != 0)
+ return SDValue();
+
+ unsigned NumElems = SrcVT.getVectorNumElements();
+ assert(DstVT.getVectorNumElements() == NumElems && "Illegal truncation");
+ assert(SrcVT.getSizeInBits() > DstVT.getSizeInBits() && "Illegal truncation");
+
+ EVT PackedSVT =
+ EVT::getIntegerVT(*DAG.getContext(), SrcVT.getScalarSizeInBits() / 2);
+
+ // Extract lower/upper subvectors.
+ unsigned NumSubElts = NumElems / 2;
+ unsigned SrcSizeInBits = SrcVT.getSizeInBits();
+ SDValue Lo = extractSubVector(In, 0 * NumSubElts, DAG, DL, SrcSizeInBits / 2);
+ SDValue Hi = extractSubVector(In, 1 * NumSubElts, DAG, DL, SrcSizeInBits / 2);
+
+ // 256bit -> 128bit truncate - PACKSS lower/upper 128-bit subvectors.
+ if (SrcVT.is256BitVector()) {
+ Lo = DAG.getBitcast(MVT::v8i16, Lo);
+ Hi = DAG.getBitcast(MVT::v8i16, Hi);
+ SDValue Res = DAG.getNode(X86ISD::PACKSS, DL, MVT::v16i8, Lo, Hi);
+ return DAG.getBitcast(DstVT, Res);
+ }
+
+ // AVX2: 512bit -> 256bit truncate - PACKSS lower/upper 256-bit subvectors.
+ // AVX2: 512bit -> 128bit truncate - PACKSS(PACKSS, PACKSS).
+ if (SrcVT.is512BitVector() && Subtarget.hasInt256()) {
+ Lo = DAG.getBitcast(MVT::v16i16, Lo);
+ Hi = DAG.getBitcast(MVT::v16i16, Hi);
+ SDValue Res = DAG.getNode(X86ISD::PACKSS, DL, MVT::v32i8, Lo, Hi);
+
+ // 256-bit PACKSS(ARG0, ARG1) leaves us with ((LO0,LO1),(HI0,HI1)),
+ // so we need to shuffle to get ((LO0,HI0),(LO1,HI1)).
+ Res = DAG.getBitcast(MVT::v4i64, Res);
+ Res = DAG.getVectorShuffle(MVT::v4i64, DL, Res, Res, {0, 2, 1, 3});
+
+ if (DstVT.is256BitVector())
+ return DAG.getBitcast(DstVT, Res);
+
+ // If 512bit -> 128bit truncate another stage.
+ EVT PackedVT = EVT::getVectorVT(*DAG.getContext(), PackedSVT, NumElems);
+ Res = DAG.getBitcast(PackedVT, Res);
+ return truncateVectorCompareWithPACKSS(DstVT, Res, DL, DAG, Subtarget);
+ }
+
+ // Recursively pack lower/upper subvectors, concat result and pack again.
+ assert(SrcVT.getSizeInBits() >= 512 && "Expected 512-bit vector or greater");
+ EVT PackedVT = EVT::getVectorVT(*DAG.getContext(), PackedSVT, NumElems / 2);
+ Lo = truncateVectorCompareWithPACKSS(PackedVT, Lo, DL, DAG, Subtarget);
+ Hi = truncateVectorCompareWithPACKSS(PackedVT, Hi, DL, DAG, Subtarget);
+
+ PackedVT = EVT::getVectorVT(*DAG.getContext(), PackedSVT, NumElems);
+ SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, PackedVT, Lo, Hi);
+ return truncateVectorCompareWithPACKSS(DstVT, Res, DL, DAG, Subtarget);
+}
+
static SDValue LowerTruncateVecI1(SDValue Op, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
@@ -14198,6 +14275,23 @@ SDValue X86TargetLowering::LowerTRUNCATE
DAG.getNode(X86ISD::VSEXT, DL, MVT::v16i32, In));
return DAG.getNode(X86ISD::VTRUNC, DL, VT, In);
}
+
+ // Truncate with PACKSS if we are trucating a vector comparison result.
+ // TODO: We should be able to support other operations as long as we
+ // we are saturating+packing zero/all bits only.
+ auto IsPackableComparison = [](SDValue V) {
+ unsigned Opcode = V.getOpcode();
+ return (Opcode == X86ISD::PCMPGT || Opcode == X86ISD::PCMPEQ ||
+ Opcode == X86ISD::CMPP);
+ };
+
+ if (IsPackableComparison(In) ||
+ (In.getOpcode() == ISD::CONCAT_VECTORS &&
+ std::all_of(In->op_begin(), In->op_end(), IsPackableComparison))) {
+ if (SDValue V = truncateVectorCompareWithPACKSS(VT, In, DL, DAG, Subtarget))
+ return V;
+ }
+
if ((VT == MVT::v4i32) && (InVT == MVT::v4i64)) {
// On AVX2, v4i64 -> v4i32 becomes VPERMD.
if (Subtarget.hasInt256()) {
@@ -29652,6 +29746,45 @@ static SDValue combineVectorTruncation(S
return SDValue();
}
+/// This function transforms vector truncation of comparison results from
+/// vXi16/vXi32/vXi64 to vXi8/vXi16/vXi32 into X86ISD::PACKSS operations.
+static SDValue combineVectorCompareTruncation(SDNode *N, SDLoc &DL,
+ SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
+ // AVX512 has fast truncate.
+ if (Subtarget.hasAVX512())
+ return SDValue();
+
+ if (!N->getValueType(0).isVector() || !N->getValueType(0).isSimple())
+ return SDValue();
+
+ // TODO: we should be able to support sources other than compares as long
+ // as we are saturating+packing zero/all bits only.
+ SDValue In = N->getOperand(0);
+ if (In.getOpcode() != ISD::SETCC || !In.getValueType().isSimple())
+ return SDValue();
+
+ MVT VT = N->getValueType(0).getSimpleVT();
+ MVT SVT = VT.getScalarType();
+
+ MVT InVT = In.getValueType().getSimpleVT();
+ MVT InSVT = InVT.getScalarType();
+
+ assert(DAG.getTargetLoweringInfo().getBooleanContents(InVT) ==
+ llvm::TargetLoweringBase::ZeroOrNegativeOneBooleanContent &&
+ "Expected comparison result to be zero/all bits");
+
+ // Check we have a truncation suited for PACKSS.
+ if (!VT.is128BitVector() && !VT.is256BitVector())
+ return SDValue();
+ if (SVT != MVT::i8 && SVT != MVT::i16 && SVT != MVT::i32)
+ return SDValue();
+ if (InSVT != MVT::i16 && InSVT != MVT::i32 && InSVT != MVT::i64)
+ return SDValue();
+
+ return truncateVectorCompareWithPACKSS(VT, In, DL, DAG, Subtarget);
+}
+
static SDValue combineTruncate(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
EVT VT = N->getValueType(0);
@@ -29670,6 +29803,10 @@ static SDValue combineTruncate(SDNode *N
return DAG.getNode(X86ISD::MMX_MOVD2W, DL, MVT::i32, BCSrc);
}
+ // Try to truncate vector comparison results with PACKSS.
+ if (SDValue V = combineVectorCompareTruncation(N, DL, DAG, Subtarget))
+ return V;
+
return combineVectorTruncation(N, DAG, Subtarget);
}
Modified: llvm/trunk/test/CodeGen/X86/setcc-lowering.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/setcc-lowering.ll?rev=277132&r1=277131&r2=277132&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/setcc-lowering.ll (original)
+++ llvm/trunk/test/CodeGen/X86/setcc-lowering.ll Fri Jul 29 05:23:10 2016
@@ -13,11 +13,8 @@ define <8 x i16> @pr25080(<8 x i32> %a)
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX-NEXT: vpcmpeqd %xmm2, %xmm1, %xmm1
-; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
-; AVX-NEXT: vpshufb %xmm3, %xmm1, %xmm1
; AVX-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm0
-; AVX-NEXT: vpshufb %xmm3, %xmm0, %xmm0
-; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: vpsllw $15, %xmm0, %xmm0
; AVX-NEXT: vpsraw $15, %xmm0, %xmm0
Modified: llvm/trunk/test/CodeGen/X86/vector-compare-results.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-compare-results.ll?rev=277132&r1=277131&r2=277132&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-compare-results.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-compare-results.ll Fri Jul 29 05:23:10 2016
@@ -115,40 +115,27 @@ define <16 x i1> @test_cmp_v16i8(<16 x i
;
define <4 x i1> @test_cmp_v4f64(<4 x double> %a0, <4 x double> %a1) nounwind {
-; SSE2-LABEL: test_cmp_v4f64:
-; SSE2: # BB#0:
-; SSE2-NEXT: cmpltpd %xmm1, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,2,2,3]
-; SSE2-NEXT: cmpltpd %xmm0, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSE2-NEXT: retq
-;
-; SSE42-LABEL: test_cmp_v4f64:
-; SSE42: # BB#0:
-; SSE42-NEXT: cmpltpd %xmm1, %xmm3
-; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,1,0,2]
-; SSE42-NEXT: cmpltpd %xmm0, %xmm2
-; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
-; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
-; SSE42-NEXT: retq
+; SSE-LABEL: test_cmp_v4f64:
+; SSE: # BB#0:
+; SSE-NEXT: cmpltpd %xmm1, %xmm3
+; SSE-NEXT: cmpltpd %xmm0, %xmm2
+; SSE-NEXT: packsswb %xmm3, %xmm2
+; SSE-NEXT: movdqa %xmm2, %xmm0
+; SSE-NEXT: retq
;
; AVX1-LABEL: test_cmp_v4f64:
; AVX1: # BB#0:
; AVX1-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v4f64:
; AVX2: # BB#0:
; AVX2-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0
-; AVX2-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -163,46 +150,27 @@ define <4 x i1> @test_cmp_v4f64(<4 x dou
}
define <8 x i1> @test_cmp_v8f32(<8 x float> %a0, <8 x float> %a1) nounwind {
-; SSE2-LABEL: test_cmp_v8f32:
-; SSE2: # BB#0:
-; SSE2-NEXT: cmpltps %xmm1, %xmm3
-; SSE2-NEXT: pslld $16, %xmm3
-; SSE2-NEXT: psrad $16, %xmm3
-; SSE2-NEXT: cmpltps %xmm0, %xmm2
-; SSE2-NEXT: pslld $16, %xmm2
-; SSE2-NEXT: psrad $16, %xmm2
-; SSE2-NEXT: packssdw %xmm3, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm0
-; SSE2-NEXT: retq
-;
-; SSE42-LABEL: test_cmp_v8f32:
-; SSE42: # BB#0:
-; SSE42-NEXT: cmpltps %xmm1, %xmm3
-; SSE42-NEXT: movdqa {{.*#+}} xmm1 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
-; SSE42-NEXT: pshufb %xmm1, %xmm3
-; SSE42-NEXT: cmpltps %xmm0, %xmm2
-; SSE42-NEXT: pshufb %xmm1, %xmm2
-; SSE42-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
-; SSE42-NEXT: movdqa %xmm2, %xmm0
-; SSE42-NEXT: retq
+; SSE-LABEL: test_cmp_v8f32:
+; SSE: # BB#0:
+; SSE-NEXT: cmpltps %xmm1, %xmm3
+; SSE-NEXT: cmpltps %xmm0, %xmm2
+; SSE-NEXT: packsswb %xmm3, %xmm2
+; SSE-NEXT: movdqa %xmm2, %xmm0
+; SSE-NEXT: retq
;
; AVX1-LABEL: test_cmp_v8f32:
; AVX1: # BB#0:
; AVX1-NEXT: vcmpltps %ymm0, %ymm1, %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
-; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v8f32:
; AVX2: # BB#0:
; AVX2-NEXT: vcmpltps %ymm0, %ymm1, %ymm0
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -230,28 +198,24 @@ define <4 x i1> @test_cmp_v4i64(<4 x i64
; SSE2-NEXT: pand %xmm6, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm5[1,1,3,3]
; SSE2-NEXT: por %xmm1, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,2,2,3]
; SSE2-NEXT: pxor %xmm4, %xmm2
; SSE2-NEXT: pxor %xmm4, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: pcmpgtd %xmm2, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: pcmpgtd %xmm2, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[0,0,2,2]
; SSE2-NEXT: pcmpeqd %xmm2, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; SSE2-NEXT: pand %xmm4, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,1,3,3]
-; SSE2-NEXT: por %xmm0, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; SSE2-NEXT: pand %xmm4, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE2-NEXT: por %xmm2, %xmm0
+; SSE2-NEXT: packsswb %xmm3, %xmm0
; SSE2-NEXT: retq
;
; SSE42-LABEL: test_cmp_v4i64:
; SSE42: # BB#0:
; SSE42-NEXT: pcmpgtq %xmm3, %xmm1
-; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
; SSE42-NEXT: pcmpgtq %xmm2, %xmm0
-; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; SSE42-NEXT: packsswb %xmm1, %xmm0
; SSE42-NEXT: retq
;
; AVX1-LABEL: test_cmp_v4i64:
@@ -259,19 +223,16 @@ define <4 x i1> @test_cmp_v4i64(<4 x i64
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
-; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,1,0,2]
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
+; AVX1-NEXT: vpacksswb %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v4i64:
; AVX2: # BB#0:
; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -286,46 +247,28 @@ define <4 x i1> @test_cmp_v4i64(<4 x i64
}
define <8 x i1> @test_cmp_v8i32(<8 x i32> %a0, <8 x i32> %a1) nounwind {
-; SSE2-LABEL: test_cmp_v8i32:
-; SSE2: # BB#0:
-; SSE2-NEXT: pcmpgtd %xmm3, %xmm1
-; SSE2-NEXT: pslld $16, %xmm1
-; SSE2-NEXT: psrad $16, %xmm1
-; SSE2-NEXT: pcmpgtd %xmm2, %xmm0
-; SSE2-NEXT: pslld $16, %xmm0
-; SSE2-NEXT: psrad $16, %xmm0
-; SSE2-NEXT: packssdw %xmm1, %xmm0
-; SSE2-NEXT: retq
-;
-; SSE42-LABEL: test_cmp_v8i32:
-; SSE42: # BB#0:
-; SSE42-NEXT: pcmpgtd %xmm3, %xmm1
-; SSE42-NEXT: movdqa {{.*#+}} xmm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
-; SSE42-NEXT: pshufb %xmm3, %xmm1
-; SSE42-NEXT: pcmpgtd %xmm2, %xmm0
-; SSE42-NEXT: pshufb %xmm3, %xmm0
-; SSE42-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSE42-NEXT: retq
+; SSE-LABEL: test_cmp_v8i32:
+; SSE: # BB#0:
+; SSE-NEXT: pcmpgtd %xmm3, %xmm1
+; SSE-NEXT: pcmpgtd %xmm2, %xmm0
+; SSE-NEXT: packsswb %xmm1, %xmm0
+; SSE-NEXT: retq
;
; AVX1-LABEL: test_cmp_v8i32:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtd %xmm2, %xmm3, %xmm2
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
-; AVX1-NEXT: vpshufb %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufb %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX1-NEXT: vpacksswb %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v8i32:
; AVX2: # BB#0:
; AVX2-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -340,36 +283,20 @@ define <8 x i1> @test_cmp_v8i32(<8 x i32
}
define <16 x i1> @test_cmp_v16i16(<16 x i16> %a0, <16 x i16> %a1) nounwind {
-; SSE2-LABEL: test_cmp_v16i16:
-; SSE2: # BB#0:
-; SSE2-NEXT: pcmpgtw %xmm3, %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
-; SSE2-NEXT: pand %xmm3, %xmm1
-; SSE2-NEXT: pcmpgtw %xmm2, %xmm0
-; SSE2-NEXT: pand %xmm3, %xmm0
-; SSE2-NEXT: packuswb %xmm1, %xmm0
-; SSE2-NEXT: retq
-;
-; SSE42-LABEL: test_cmp_v16i16:
-; SSE42: # BB#0:
-; SSE42-NEXT: pcmpgtw %xmm3, %xmm1
-; SSE42-NEXT: movdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
-; SSE42-NEXT: pshufb %xmm3, %xmm1
-; SSE42-NEXT: pcmpgtw %xmm2, %xmm0
-; SSE42-NEXT: pshufb %xmm3, %xmm0
-; SSE42-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSE42-NEXT: retq
+; SSE-LABEL: test_cmp_v16i16:
+; SSE: # BB#0:
+; SSE-NEXT: pcmpgtw %xmm3, %xmm1
+; SSE-NEXT: pcmpgtw %xmm2, %xmm0
+; SSE-NEXT: packsswb %xmm1, %xmm0
+; SSE-NEXT: retq
;
; AVX1-LABEL: test_cmp_v16i16:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpcmpgtw %xmm2, %xmm3, %xmm2
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
-; AVX1-NEXT: vpshufb %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufb %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX1-NEXT: vpacksswb %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -377,10 +304,7 @@ define <16 x i1> @test_cmp_v16i16(<16 x
; AVX2: # BB#0:
; AVX2-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
-; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1
-; AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -630,76 +554,38 @@ define <32 x i1> @test_cmp_v32i8(<32 x i
;
define <8 x i1> @test_cmp_v8f64(<8 x double> %a0, <8 x double> %a1) nounwind {
-; SSE2-LABEL: test_cmp_v8f64:
-; SSE2: # BB#0:
-; SSE2-NEXT: cmpltpd %xmm3, %xmm7
-; SSE2-NEXT: cmpltpd %xmm1, %xmm5
-; SSE2-NEXT: pextrw $4, %xmm5, %eax
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm7[0],xmm5[1],xmm7[1],xmm5[2],xmm7[2],xmm5[3],xmm7[3]
-; SSE2-NEXT: cmpltpd %xmm2, %xmm6
-; SSE2-NEXT: cmpltpd %xmm0, %xmm4
-; SSE2-NEXT: pextrw $4, %xmm4, %ecx
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
-; SSE2-NEXT: pextrw $4, %xmm7, %edx
-; SSE2-NEXT: movd %edx, %xmm0
-; SSE2-NEXT: movd %eax, %xmm1
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSE2-NEXT: pextrw $4, %xmm6, %eax
-; SSE2-NEXT: movd %eax, %xmm0
-; SSE2-NEXT: movd %ecx, %xmm2
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
-; SSE2-NEXT: movdqa %xmm4, %xmm0
-; SSE2-NEXT: retq
-;
-; SSE42-LABEL: test_cmp_v8f64:
-; SSE42: # BB#0:
-; SSE42-NEXT: cmpltpd %xmm3, %xmm7
-; SSE42-NEXT: xorpd %xmm3, %xmm3
-; SSE42-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0],xmm3[1,2,3],xmm7[4],xmm3[5,6,7]
-; SSE42-NEXT: cmpltpd %xmm2, %xmm6
-; SSE42-NEXT: pblendw {{.*#+}} xmm6 = xmm6[0],xmm3[1,2,3],xmm6[4],xmm3[5,6,7]
-; SSE42-NEXT: packusdw %xmm7, %xmm6
-; SSE42-NEXT: cmpltpd %xmm1, %xmm5
-; SSE42-NEXT: pblendw {{.*#+}} xmm5 = xmm5[0],xmm3[1,2,3],xmm5[4],xmm3[5,6,7]
-; SSE42-NEXT: cmpltpd %xmm0, %xmm4
-; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm4[0],xmm3[1,2,3],xmm4[4],xmm3[5,6,7]
-; SSE42-NEXT: packusdw %xmm5, %xmm3
-; SSE42-NEXT: packusdw %xmm6, %xmm3
-; SSE42-NEXT: movdqa %xmm3, %xmm0
-; SSE42-NEXT: retq
+; SSE-LABEL: test_cmp_v8f64:
+; SSE: # BB#0:
+; SSE-NEXT: cmpltpd %xmm3, %xmm7
+; SSE-NEXT: cmpltpd %xmm2, %xmm6
+; SSE-NEXT: packsswb %xmm7, %xmm6
+; SSE-NEXT: cmpltpd %xmm1, %xmm5
+; SSE-NEXT: cmpltpd %xmm0, %xmm4
+; SSE-NEXT: packsswb %xmm5, %xmm4
+; SSE-NEXT: packsswb %xmm6, %xmm4
+; SSE-NEXT: movdqa %xmm4, %xmm0
+; SSE-NEXT: retq
;
; AVX1-LABEL: test_cmp_v8f64:
; AVX1: # BB#0:
; AVX1-NEXT: vcmpltpd %ymm1, %ymm3, %ymm1
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4
-; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1,2,3],xmm3[4],xmm4[5,6,7]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1,2,3],xmm1[4],xmm4[5,6,7]
-; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpacksswb %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vcmpltpd %ymm0, %ymm2, %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm4[1,2,3],xmm2[4],xmm4[5,6,7]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm4[1,2,3],xmm0[4],xmm4[5,6,7]
-; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v8f64:
; AVX2: # BB#0:
-; AVX2-NEXT: vcmpltpd %ymm0, %ymm2, %ymm0
-; AVX2-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vcmpltpd %ymm1, %ymm3, %ymm1
-; AVX2-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
-; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
-; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX2-NEXT: vcmpltpd %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpacksswb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -718,50 +604,35 @@ define <16 x i1> @test_cmp_v16f32(<16 x
; SSE-LABEL: test_cmp_v16f32:
; SSE: # BB#0:
; SSE-NEXT: cmpltps %xmm3, %xmm7
-; SSE-NEXT: movaps {{.*#+}} xmm3 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
-; SSE-NEXT: andps %xmm3, %xmm7
; SSE-NEXT: cmpltps %xmm2, %xmm6
-; SSE-NEXT: andps %xmm3, %xmm6
-; SSE-NEXT: packuswb %xmm7, %xmm6
+; SSE-NEXT: packsswb %xmm7, %xmm6
; SSE-NEXT: cmpltps %xmm1, %xmm5
-; SSE-NEXT: andps %xmm3, %xmm5
; SSE-NEXT: cmpltps %xmm0, %xmm4
-; SSE-NEXT: andps %xmm4, %xmm3
-; SSE-NEXT: packuswb %xmm5, %xmm3
-; SSE-NEXT: packuswb %xmm6, %xmm3
-; SSE-NEXT: movdqa %xmm3, %xmm0
+; SSE-NEXT: packsswb %xmm5, %xmm4
+; SSE-NEXT: packsswb %xmm6, %xmm4
+; SSE-NEXT: movdqa %xmm4, %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: test_cmp_v16f32:
; AVX1: # BB#0:
; AVX1-NEXT: vcmpltps %ymm1, %ymm3, %ymm1
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT: vmovaps {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
-; AVX1-NEXT: vandps %xmm4, %xmm3, %xmm3
-; AVX1-NEXT: vandps %xmm4, %xmm1, %xmm1
-; AVX1-NEXT: vpackuswb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpacksswb %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vcmpltps %ymm0, %ymm2, %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vandps %xmm4, %xmm2, %xmm2
-; AVX1-NEXT: vandps %xmm4, %xmm0, %xmm0
-; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v16f32:
; AVX2: # BB#0:
; AVX2-NEXT: vcmpltps %ymm1, %ymm3, %ymm1
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128,0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128]
-; AVX2-NEXT: vpshufb %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
-; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
-; AVX2-NEXT: vpshufb %xmm4, %xmm1, %xmm1
; AVX2-NEXT: vcmpltps %ymm0, %ymm2, %ymm0
-; AVX2-NEXT: vpshufb %ymm3, %ymm0, %ymm0
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: vpshufb %xmm4, %xmm0, %xmm0
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX2-NEXT: vpacksswb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -786,71 +657,54 @@ define <8 x i1> @test_cmp_v8i64(<8 x i64
; SSE2-NEXT: pcmpgtd %xmm7, %xmm9
; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm9[0,0,2,2]
; SSE2-NEXT: pcmpeqd %xmm7, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm3[1,1,3,3]
-; SSE2-NEXT: pand %xmm10, %xmm7
-; SSE2-NEXT: pshufd {{.*#+}} xmm9 = xmm9[1,1,3,3]
-; SSE2-NEXT: por %xmm7, %xmm9
-; SSE2-NEXT: pxor %xmm8, %xmm5
-; SSE2-NEXT: pxor %xmm8, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm7
-; SSE2-NEXT: pcmpgtd %xmm5, %xmm7
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm7[0,0,2,2]
-; SSE2-NEXT: pcmpeqd %xmm5, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,3,3]
-; SSE2-NEXT: pand %xmm3, %xmm5
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm7[1,1,3,3]
-; SSE2-NEXT: por %xmm5, %xmm1
-; SSE2-NEXT: pextrw $4, %xmm1, %eax
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm9[0],xmm1[1],xmm9[1],xmm1[2],xmm9[2],xmm1[3],xmm9[3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; SSE2-NEXT: pand %xmm10, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm9[1,1,3,3]
+; SSE2-NEXT: por %xmm3, %xmm7
; SSE2-NEXT: pxor %xmm8, %xmm6
; SSE2-NEXT: pxor %xmm8, %xmm2
; SSE2-NEXT: movdqa %xmm2, %xmm3
; SSE2-NEXT: pcmpgtd %xmm6, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm3[0,0,2,2]
+; SSE2-NEXT: pshufd {{.*#+}} xmm9 = xmm3[0,0,2,2]
; SSE2-NEXT: pcmpeqd %xmm6, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; SSE2-NEXT: pand %xmm5, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm2[1,1,3,3]
+; SSE2-NEXT: pand %xmm9, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,1,3,3]
+; SSE2-NEXT: por %xmm6, %xmm2
+; SSE2-NEXT: packsswb %xmm7, %xmm2
+; SSE2-NEXT: pxor %xmm8, %xmm5
+; SSE2-NEXT: pxor %xmm8, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm3
+; SSE2-NEXT: pcmpgtd %xmm5, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm3[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm5, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT: pand %xmm6, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
-; SSE2-NEXT: por %xmm2, %xmm3
+; SSE2-NEXT: por %xmm1, %xmm3
; SSE2-NEXT: pxor %xmm8, %xmm4
; SSE2-NEXT: pxor %xmm8, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: pcmpgtd %xmm4, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm2[0,0,2,2]
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: pcmpgtd %xmm4, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm1[0,0,2,2]
; SSE2-NEXT: pcmpeqd %xmm4, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
; SSE2-NEXT: pand %xmm5, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
; SSE2-NEXT: por %xmm4, %xmm0
-; SSE2-NEXT: pextrw $4, %xmm9, %ecx
-; SSE2-NEXT: movd %ecx, %xmm2
-; SSE2-NEXT: pextrw $4, %xmm0, %ecx
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; SSE2-NEXT: movd %eax, %xmm1
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; SSE2-NEXT: pextrw $4, %xmm3, %eax
-; SSE2-NEXT: movd %eax, %xmm2
-; SSE2-NEXT: movd %ecx, %xmm3
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; SSE2-NEXT: packsswb %xmm3, %xmm0
+; SSE2-NEXT: packsswb %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSE42-LABEL: test_cmp_v8i64:
; SSE42: # BB#0:
; SSE42-NEXT: pcmpgtq %xmm7, %xmm3
-; SSE42-NEXT: pxor %xmm7, %xmm7
-; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0],xmm7[1,2,3],xmm3[4],xmm7[5,6,7]
; SSE42-NEXT: pcmpgtq %xmm6, %xmm2
-; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0],xmm7[1,2,3],xmm2[4],xmm7[5,6,7]
-; SSE42-NEXT: packusdw %xmm3, %xmm2
+; SSE42-NEXT: packsswb %xmm3, %xmm2
; SSE42-NEXT: pcmpgtq %xmm5, %xmm1
-; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm7[1,2,3],xmm1[4],xmm7[5,6,7]
; SSE42-NEXT: pcmpgtq %xmm4, %xmm0
-; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm7[1,2,3],xmm0[4],xmm7[5,6,7]
-; SSE42-NEXT: packusdw %xmm1, %xmm0
-; SSE42-NEXT: packusdw %xmm2, %xmm0
+; SSE42-NEXT: packsswb %xmm1, %xmm0
+; SSE42-NEXT: packsswb %xmm2, %xmm0
; SSE42-NEXT: retq
;
; AVX1-LABEL: test_cmp_v8i64:
@@ -858,34 +712,25 @@ define <8 x i1> @test_cmp_v8i64(<8 x i64
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
; AVX1-NEXT: vpcmpgtq %xmm4, %xmm5, %xmm4
-; AVX1-NEXT: vpxor %xmm5, %xmm5, %xmm5
-; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0],xmm5[1,2,3],xmm4[4],xmm5[5,6,7]
; AVX1-NEXT: vpcmpgtq %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm5[1,2,3],xmm1[4],xmm5[5,6,7]
-; AVX1-NEXT: vpackusdw %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vpacksswb %xmm4, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
; AVX1-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm3
-; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm5[1,2,3],xmm3[4],xmm5[5,6,7]
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm5[1,2,3],xmm0[4],xmm5[5,6,7]
-; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v8i64:
; AVX2: # BB#0:
-; AVX2-NEXT: vpcmpgtq %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vpcmpgtq %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
-; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
-; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX2-NEXT: vpcmpgtq %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpacksswb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -904,17 +749,12 @@ define <16 x i1> @test_cmp_v16i32(<16 x
; SSE-LABEL: test_cmp_v16i32:
; SSE: # BB#0:
; SSE-NEXT: pcmpgtd %xmm7, %xmm3
-; SSE-NEXT: movdqa {{.*#+}} xmm7 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
-; SSE-NEXT: pand %xmm7, %xmm3
; SSE-NEXT: pcmpgtd %xmm6, %xmm2
-; SSE-NEXT: pand %xmm7, %xmm2
-; SSE-NEXT: packuswb %xmm3, %xmm2
+; SSE-NEXT: packsswb %xmm3, %xmm2
; SSE-NEXT: pcmpgtd %xmm5, %xmm1
-; SSE-NEXT: pand %xmm7, %xmm1
; SSE-NEXT: pcmpgtd %xmm4, %xmm0
-; SSE-NEXT: pand %xmm7, %xmm0
-; SSE-NEXT: packuswb %xmm1, %xmm0
-; SSE-NEXT: packuswb %xmm2, %xmm0
+; SSE-NEXT: packsswb %xmm1, %xmm0
+; SSE-NEXT: packsswb %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: test_cmp_v16i32:
@@ -922,35 +762,25 @@ define <16 x i1> @test_cmp_v16i32(<16 x
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
; AVX1-NEXT: vpcmpgtd %xmm4, %xmm5, %xmm4
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
-; AVX1-NEXT: vpand %xmm5, %xmm4, %xmm4
; AVX1-NEXT: vpcmpgtd %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpand %xmm5, %xmm1, %xmm1
-; AVX1-NEXT: vpackuswb %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vpacksswb %xmm4, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
; AVX1-NEXT: vpcmpgtd %xmm3, %xmm4, %xmm3
-; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vpcmpgtd %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpand %xmm5, %xmm0, %xmm0
-; AVX1-NEXT: vpackuswb %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v16i32:
; AVX2: # BB#0:
; AVX2-NEXT: vpcmpgtd %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128,0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128]
-; AVX2-NEXT: vpshufb %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
-; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
-; AVX2-NEXT: vpshufb %xmm4, %xmm1, %xmm1
; AVX2-NEXT: vpcmpgtd %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpshufb %ymm3, %ymm0, %ymm0
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: vpshufb %xmm4, %xmm0, %xmm0
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX2-NEXT: vpacksswb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -1197,35 +1027,22 @@ define <32 x i1> @test_cmp_v32i16(<32 x
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
; AVX1-NEXT: vpcmpgtw %xmm4, %xmm5, %xmm4
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
-; AVX1-NEXT: vpshufb %xmm5, %xmm4, %xmm4
; AVX1-NEXT: vpcmpgtw %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpshufb %xmm5, %xmm1, %xmm1
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm4[0]
+; AVX1-NEXT: vpacksswb %xmm4, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
; AVX1-NEXT: vpcmpgtw %xmm3, %xmm4, %xmm3
-; AVX1-NEXT: vpshufb %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vpcmpgtw %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpshufb %xmm5, %xmm0, %xmm0
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0]
+; AVX1-NEXT: vpacksswb %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v32i16:
; AVX2: # BB#0:
; AVX2-NEXT: vpcmpgtw %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
-; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
-; AVX2-NEXT: vpshufb %xmm4, %xmm3, %xmm3
-; AVX2-NEXT: vpshufb %xmm4, %xmm1, %xmm1
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
; AVX2-NEXT: vpcmpgtw %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
-; AVX2-NEXT: vpshufb %xmm4, %xmm2, %xmm2
-; AVX2-NEXT: vpshufb %xmm4, %xmm0, %xmm0
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
-; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: vpacksswb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_cmp_v32i16:
@@ -2099,85 +1916,56 @@ define <16 x i1> @test_cmp_v16f64(<16 x
; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm14
; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm15
; SSE-NEXT: cmpltpd %xmm7, %xmm15
-; SSE-NEXT: movapd {{.*#+}} xmm7 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
-; SSE-NEXT: andpd %xmm7, %xmm15
; SSE-NEXT: cmpltpd %xmm6, %xmm14
-; SSE-NEXT: andpd %xmm7, %xmm14
-; SSE-NEXT: packuswb %xmm15, %xmm14
+; SSE-NEXT: packsswb %xmm15, %xmm14
; SSE-NEXT: cmpltpd %xmm5, %xmm13
-; SSE-NEXT: andpd %xmm7, %xmm13
; SSE-NEXT: cmpltpd %xmm4, %xmm9
-; SSE-NEXT: andpd %xmm7, %xmm9
-; SSE-NEXT: packuswb %xmm13, %xmm9
-; SSE-NEXT: packuswb %xmm14, %xmm9
+; SSE-NEXT: packsswb %xmm13, %xmm9
+; SSE-NEXT: packsswb %xmm14, %xmm9
; SSE-NEXT: cmpltpd %xmm3, %xmm12
-; SSE-NEXT: andpd %xmm7, %xmm12
; SSE-NEXT: cmpltpd %xmm2, %xmm10
-; SSE-NEXT: andpd %xmm7, %xmm10
-; SSE-NEXT: packuswb %xmm12, %xmm10
+; SSE-NEXT: packsswb %xmm12, %xmm10
; SSE-NEXT: cmpltpd %xmm1, %xmm11
-; SSE-NEXT: andpd %xmm7, %xmm11
; SSE-NEXT: cmpltpd %xmm8, %xmm0
-; SSE-NEXT: andpd %xmm7, %xmm0
-; SSE-NEXT: packuswb %xmm11, %xmm0
-; SSE-NEXT: packuswb %xmm10, %xmm0
-; SSE-NEXT: packuswb %xmm9, %xmm0
+; SSE-NEXT: packsswb %xmm11, %xmm0
+; SSE-NEXT: packsswb %xmm10, %xmm0
+; SSE-NEXT: packsswb %xmm9, %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: test_cmp_v16f64:
; AVX1: # BB#0:
; AVX1-NEXT: vcmpltpd %ymm3, %ymm7, %ymm3
-; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm8
-; AVX1-NEXT: vmovapd {{.*#+}} xmm7 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
-; AVX1-NEXT: vandpd %xmm7, %xmm8, %xmm8
-; AVX1-NEXT: vandpd %xmm7, %xmm3, %xmm3
-; AVX1-NEXT: vpackuswb %xmm8, %xmm3, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm7
+; AVX1-NEXT: vpacksswb %xmm7, %xmm3, %xmm3
; AVX1-NEXT: vcmpltpd %ymm2, %ymm6, %ymm2
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm6
-; AVX1-NEXT: vandpd %xmm7, %xmm6, %xmm6
-; AVX1-NEXT: vandpd %xmm7, %xmm2, %xmm2
-; AVX1-NEXT: vpackuswb %xmm6, %xmm2, %xmm2
-; AVX1-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpacksswb %xmm6, %xmm2, %xmm2
+; AVX1-NEXT: vpacksswb %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vcmpltpd %ymm1, %ymm5, %ymm1
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT: vandpd %xmm7, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm7, %xmm1, %xmm1
-; AVX1-NEXT: vpackuswb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpacksswb %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vcmpltpd %ymm0, %ymm4, %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vandpd %xmm7, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm7, %xmm0, %xmm0
-; AVX1-NEXT: vpackuswb %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v16f64:
; AVX2: # BB#0:
-; AVX2-NEXT: vcmpltpd %ymm2, %ymm6, %ymm2
-; AVX2-NEXT: vpermilps {{.*#+}} ymm2 = ymm2[0,2,2,3,4,6,6,7]
-; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-NEXT: vcmpltpd %ymm3, %ymm7, %ymm3
-; AVX2-NEXT: vpermilps {{.*#+}} ymm3 = ymm3[0,2,2,3,4,6,6,7]
-; AVX2-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,2,2,3]
-; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128,0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128]
-; AVX2-NEXT: vpshufb %ymm3, %ymm2, %ymm2
-; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
-; AVX2-NEXT: vmovdqa {{.*#+}} xmm6 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
-; AVX2-NEXT: vpshufb %xmm6, %xmm2, %xmm2
-; AVX2-NEXT: vcmpltpd %ymm0, %ymm4, %ymm0
-; AVX2-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-NEXT: vcmpltpd %ymm2, %ymm6, %ymm2
+; AVX2-NEXT: vpacksswb %ymm3, %ymm2, %ymm2
+; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,1,3]
; AVX2-NEXT: vcmpltpd %ymm1, %ymm5, %ymm1
-; AVX2-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
-; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
-; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX2-NEXT: vpshufb %ymm3, %ymm0, %ymm0
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: vpshufb %xmm6, %xmm0, %xmm0
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX2-NEXT: vcmpltpd %ymm0, %ymm4, %ymm0
+; AVX2-NEXT: vpacksswb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vpacksswb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -2610,54 +2398,34 @@ define <32 x i1> @test_cmp_v32f32(<32 x
; AVX1-LABEL: test_cmp_v32f32:
; AVX1: # BB#0:
; AVX1-NEXT: vcmpltps %ymm3, %ymm7, %ymm3
-; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm8
-; AVX1-NEXT: vmovaps {{.*#+}} xmm7 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
-; AVX1-NEXT: vandps %xmm7, %xmm8, %xmm8
-; AVX1-NEXT: vandps %xmm7, %xmm3, %xmm3
-; AVX1-NEXT: vpackuswb %xmm8, %xmm3, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm7
+; AVX1-NEXT: vpacksswb %xmm7, %xmm3, %xmm3
; AVX1-NEXT: vcmpltps %ymm2, %ymm6, %ymm2
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm6
-; AVX1-NEXT: vandps %xmm7, %xmm6, %xmm6
-; AVX1-NEXT: vandps %xmm7, %xmm2, %xmm2
-; AVX1-NEXT: vpackuswb %xmm6, %xmm2, %xmm2
-; AVX1-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpacksswb %xmm6, %xmm2, %xmm2
+; AVX1-NEXT: vpacksswb %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vcmpltps %ymm1, %ymm5, %ymm1
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT: vandps %xmm7, %xmm3, %xmm3
-; AVX1-NEXT: vandps %xmm7, %xmm1, %xmm1
-; AVX1-NEXT: vpackuswb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpacksswb %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vcmpltps %ymm0, %ymm4, %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vandps %xmm7, %xmm3, %xmm3
-; AVX1-NEXT: vandps %xmm7, %xmm0, %xmm0
-; AVX1-NEXT: vpackuswb %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v32f32:
; AVX2: # BB#0:
; AVX2-NEXT: vcmpltps %ymm3, %ymm7, %ymm3
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm8 = [0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128,0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128]
-; AVX2-NEXT: vpshufb %ymm8, %ymm3, %ymm3
-; AVX2-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,2,2,3]
-; AVX2-NEXT: vmovdqa {{.*#+}} xmm7 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
-; AVX2-NEXT: vpshufb %xmm7, %xmm3, %xmm3
; AVX2-NEXT: vcmpltps %ymm2, %ymm6, %ymm2
-; AVX2-NEXT: vpshufb %ymm8, %ymm2, %ymm2
-; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
-; AVX2-NEXT: vpshufb %xmm7, %xmm2, %xmm2
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; AVX2-NEXT: vpacksswb %ymm3, %ymm2, %ymm2
+; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,1,3]
; AVX2-NEXT: vcmpltps %ymm1, %ymm5, %ymm1
-; AVX2-NEXT: vpshufb %ymm8, %ymm1, %ymm1
-; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
-; AVX2-NEXT: vpshufb %xmm7, %xmm1, %xmm1
; AVX2-NEXT: vcmpltps %ymm0, %ymm4, %ymm0
-; AVX2-NEXT: vpshufb %ymm8, %ymm0, %ymm0
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: vpshufb %xmm7, %xmm0, %xmm0
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
+; AVX2-NEXT: vpacksswb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vpacksswb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_cmp_v32f32:
@@ -2880,21 +2648,18 @@ define <16 x i1> @test_cmp_v16i64(<16 x
; SSE2-NEXT: pand %xmm11, %xmm7
; SSE2-NEXT: pshufd {{.*#+}} xmm9 = xmm10[1,1,3,3]
; SSE2-NEXT: por %xmm7, %xmm9
-; SSE2-NEXT: movdqa {{.*#+}} xmm10 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
-; SSE2-NEXT: pand %xmm10, %xmm9
; SSE2-NEXT: pxor %xmm8, %xmm6
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm7
; SSE2-NEXT: pxor %xmm8, %xmm7
-; SSE2-NEXT: movdqa %xmm6, %xmm11
-; SSE2-NEXT: pcmpgtd %xmm7, %xmm11
-; SSE2-NEXT: pshufd {{.*#+}} xmm12 = xmm11[0,0,2,2]
+; SSE2-NEXT: movdqa %xmm6, %xmm10
+; SSE2-NEXT: pcmpgtd %xmm7, %xmm10
+; SSE2-NEXT: pshufd {{.*#+}} xmm11 = xmm10[0,0,2,2]
; SSE2-NEXT: pcmpeqd %xmm6, %xmm7
; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm7[1,1,3,3]
-; SSE2-NEXT: pand %xmm12, %xmm7
-; SSE2-NEXT: pshufd {{.*#+}} xmm11 = xmm11[1,1,3,3]
-; SSE2-NEXT: por %xmm7, %xmm11
-; SSE2-NEXT: pand %xmm10, %xmm11
-; SSE2-NEXT: packuswb %xmm9, %xmm11
+; SSE2-NEXT: pand %xmm11, %xmm7
+; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm10[1,1,3,3]
+; SSE2-NEXT: por %xmm7, %xmm10
+; SSE2-NEXT: packsswb %xmm9, %xmm10
; SSE2-NEXT: pxor %xmm8, %xmm5
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm7
; SSE2-NEXT: pxor %xmm8, %xmm7
@@ -2906,7 +2671,6 @@ define <16 x i1> @test_cmp_v16i64(<16 x
; SSE2-NEXT: pand %xmm9, %xmm5
; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
; SSE2-NEXT: por %xmm5, %xmm6
-; SSE2-NEXT: pand %xmm10, %xmm6
; SSE2-NEXT: pxor %xmm8, %xmm4
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm5
; SSE2-NEXT: pxor %xmm8, %xmm5
@@ -2918,9 +2682,8 @@ define <16 x i1> @test_cmp_v16i64(<16 x
; SSE2-NEXT: pand %xmm9, %xmm5
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm7[1,1,3,3]
; SSE2-NEXT: por %xmm5, %xmm4
-; SSE2-NEXT: pand %xmm10, %xmm4
-; SSE2-NEXT: packuswb %xmm6, %xmm4
-; SSE2-NEXT: packuswb %xmm11, %xmm4
+; SSE2-NEXT: packsswb %xmm6, %xmm4
+; SSE2-NEXT: packsswb %xmm10, %xmm4
; SSE2-NEXT: pxor %xmm8, %xmm3
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm5
; SSE2-NEXT: pxor %xmm8, %xmm5
@@ -2932,7 +2695,6 @@ define <16 x i1> @test_cmp_v16i64(<16 x
; SSE2-NEXT: pand %xmm7, %xmm3
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm6[1,1,3,3]
; SSE2-NEXT: por %xmm3, %xmm5
-; SSE2-NEXT: pand %xmm10, %xmm5
; SSE2-NEXT: pxor %xmm8, %xmm2
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm3
; SSE2-NEXT: pxor %xmm8, %xmm3
@@ -2944,8 +2706,7 @@ define <16 x i1> @test_cmp_v16i64(<16 x
; SSE2-NEXT: pand %xmm7, %xmm3
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm6[1,1,3,3]
; SSE2-NEXT: por %xmm3, %xmm2
-; SSE2-NEXT: pand %xmm10, %xmm2
-; SSE2-NEXT: packuswb %xmm5, %xmm2
+; SSE2-NEXT: packsswb %xmm5, %xmm2
; SSE2-NEXT: pxor %xmm8, %xmm1
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm3
; SSE2-NEXT: pxor %xmm8, %xmm3
@@ -2957,7 +2718,6 @@ define <16 x i1> @test_cmp_v16i64(<16 x
; SSE2-NEXT: pand %xmm6, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm5[1,1,3,3]
; SSE2-NEXT: por %xmm1, %xmm3
-; SSE2-NEXT: pand %xmm10, %xmm3
; SSE2-NEXT: pxor %xmm8, %xmm0
; SSE2-NEXT: pxor {{[0-9]+}}(%rsp), %xmm8
; SSE2-NEXT: movdqa %xmm0, %xmm1
@@ -2968,102 +2728,72 @@ define <16 x i1> @test_cmp_v16i64(<16 x
; SSE2-NEXT: pand %xmm5, %xmm6
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
; SSE2-NEXT: por %xmm6, %xmm0
-; SSE2-NEXT: pand %xmm10, %xmm0
-; SSE2-NEXT: packuswb %xmm3, %xmm0
-; SSE2-NEXT: packuswb %xmm2, %xmm0
-; SSE2-NEXT: packuswb %xmm4, %xmm0
+; SSE2-NEXT: packsswb %xmm3, %xmm0
+; SSE2-NEXT: packsswb %xmm2, %xmm0
+; SSE2-NEXT: packsswb %xmm4, %xmm0
; SSE2-NEXT: retq
;
; SSE42-LABEL: test_cmp_v16i64:
; SSE42: # BB#0:
; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm7
-; SSE42-NEXT: movdqa {{.*#+}} xmm8 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
-; SSE42-NEXT: pand %xmm8, %xmm7
; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm6
-; SSE42-NEXT: pand %xmm8, %xmm6
-; SSE42-NEXT: packuswb %xmm7, %xmm6
+; SSE42-NEXT: packsswb %xmm7, %xmm6
; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm5
-; SSE42-NEXT: pand %xmm8, %xmm5
; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm4
-; SSE42-NEXT: pand %xmm8, %xmm4
-; SSE42-NEXT: packuswb %xmm5, %xmm4
-; SSE42-NEXT: packuswb %xmm6, %xmm4
+; SSE42-NEXT: packsswb %xmm5, %xmm4
+; SSE42-NEXT: packsswb %xmm6, %xmm4
; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm3
-; SSE42-NEXT: pand %xmm8, %xmm3
; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm2
-; SSE42-NEXT: pand %xmm8, %xmm2
-; SSE42-NEXT: packuswb %xmm3, %xmm2
+; SSE42-NEXT: packsswb %xmm3, %xmm2
; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm1
-; SSE42-NEXT: pand %xmm8, %xmm1
; SSE42-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm0
-; SSE42-NEXT: pand %xmm8, %xmm0
-; SSE42-NEXT: packuswb %xmm1, %xmm0
-; SSE42-NEXT: packuswb %xmm2, %xmm0
-; SSE42-NEXT: packuswb %xmm4, %xmm0
+; SSE42-NEXT: packsswb %xmm1, %xmm0
+; SSE42-NEXT: packsswb %xmm2, %xmm0
+; SSE42-NEXT: packsswb %xmm4, %xmm0
; SSE42-NEXT: retq
;
; AVX1-LABEL: test_cmp_v16i64:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm7, %xmm8
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm9
-; AVX1-NEXT: vpcmpgtq %xmm8, %xmm9, %xmm9
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm8 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
-; AVX1-NEXT: vpand %xmm8, %xmm9, %xmm9
+; AVX1-NEXT: vpcmpgtq %xmm8, %xmm9, %xmm8
; AVX1-NEXT: vpcmpgtq %xmm7, %xmm3, %xmm3
-; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm3
-; AVX1-NEXT: vpackuswb %xmm9, %xmm3, %xmm9
+; AVX1-NEXT: vpacksswb %xmm8, %xmm3, %xmm8
; AVX1-NEXT: vextractf128 $1, %ymm6, %xmm7
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm7, %xmm3, %xmm3
-; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm6, %xmm2, %xmm2
-; AVX1-NEXT: vpand %xmm8, %xmm2, %xmm2
-; AVX1-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpackuswb %xmm9, %xmm2, %xmm2
+; AVX1-NEXT: vpacksswb %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpacksswb %xmm8, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm6
; AVX1-NEXT: vpcmpgtq %xmm3, %xmm6, %xmm3
-; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm5, %xmm1, %xmm1
-; AVX1-NEXT: vpand %xmm8, %xmm1, %xmm1
-; AVX1-NEXT: vpackuswb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpacksswb %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpcmpgtq %xmm3, %xmm5, %xmm3
-; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm4, %xmm0, %xmm0
-; AVX1-NEXT: vpand %xmm8, %xmm0, %xmm0
-; AVX1-NEXT: vpackuswb %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v16i64:
; AVX2: # BB#0:
-; AVX2-NEXT: vpcmpgtq %ymm6, %ymm2, %ymm2
-; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[0,2,2,3,4,6,6,7]
-; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-NEXT: vpcmpgtq %ymm7, %ymm3, %ymm3
-; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[0,2,2,3,4,6,6,7]
-; AVX2-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,2,2,3]
-; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128,0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128]
-; AVX2-NEXT: vpshufb %ymm3, %ymm2, %ymm2
-; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
-; AVX2-NEXT: vmovdqa {{.*#+}} xmm6 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
-; AVX2-NEXT: vpshufb %xmm6, %xmm2, %xmm2
-; AVX2-NEXT: vpcmpgtq %ymm4, %ymm0, %ymm0
-; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-NEXT: vpcmpgtq %ymm6, %ymm2, %ymm2
+; AVX2-NEXT: vpacksswb %ymm3, %ymm2, %ymm2
+; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,1,3]
; AVX2-NEXT: vpcmpgtq %ymm5, %ymm1, %ymm1
-; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
-; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
-; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX2-NEXT: vpshufb %ymm3, %ymm0, %ymm0
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: vpshufb %xmm6, %xmm0, %xmm0
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX2-NEXT: vpcmpgtq %ymm4, %ymm0, %ymm0
+; AVX2-NEXT: vpacksswb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vpacksswb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -3497,61 +3227,41 @@ define <32 x i1> @test_cmp_v32i32(<32 x
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm7, %xmm8
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm9
-; AVX1-NEXT: vpcmpgtd %xmm8, %xmm9, %xmm9
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm8 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
-; AVX1-NEXT: vpand %xmm8, %xmm9, %xmm9
+; AVX1-NEXT: vpcmpgtd %xmm8, %xmm9, %xmm8
; AVX1-NEXT: vpcmpgtd %xmm7, %xmm3, %xmm3
-; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm3
-; AVX1-NEXT: vpackuswb %xmm9, %xmm3, %xmm9
+; AVX1-NEXT: vpacksswb %xmm8, %xmm3, %xmm8
; AVX1-NEXT: vextractf128 $1, %ymm6, %xmm7
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
; AVX1-NEXT: vpcmpgtd %xmm7, %xmm3, %xmm3
-; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm3
; AVX1-NEXT: vpcmpgtd %xmm6, %xmm2, %xmm2
-; AVX1-NEXT: vpand %xmm8, %xmm2, %xmm2
-; AVX1-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpackuswb %xmm9, %xmm2, %xmm2
+; AVX1-NEXT: vpacksswb %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpacksswb %xmm8, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm6
; AVX1-NEXT: vpcmpgtd %xmm3, %xmm6, %xmm3
-; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm3
; AVX1-NEXT: vpcmpgtd %xmm5, %xmm1, %xmm1
-; AVX1-NEXT: vpand %xmm8, %xmm1, %xmm1
-; AVX1-NEXT: vpackuswb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpacksswb %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX1-NEXT: vpcmpgtd %xmm3, %xmm5, %xmm3
-; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm3
; AVX1-NEXT: vpcmpgtd %xmm4, %xmm0, %xmm0
-; AVX1-NEXT: vpand %xmm8, %xmm0, %xmm0
-; AVX1-NEXT: vpackuswb %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_cmp_v32i32:
; AVX2: # BB#0:
; AVX2-NEXT: vpcmpgtd %ymm7, %ymm3, %ymm3
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm8 = [0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128,0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128]
-; AVX2-NEXT: vpshufb %ymm8, %ymm3, %ymm3
-; AVX2-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,2,2,3]
-; AVX2-NEXT: vmovdqa {{.*#+}} xmm7 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
-; AVX2-NEXT: vpshufb %xmm7, %xmm3, %xmm3
; AVX2-NEXT: vpcmpgtd %ymm6, %ymm2, %ymm2
-; AVX2-NEXT: vpshufb %ymm8, %ymm2, %ymm2
-; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
-; AVX2-NEXT: vpshufb %xmm7, %xmm2, %xmm2
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; AVX2-NEXT: vpacksswb %ymm3, %ymm2, %ymm2
+; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,1,3]
; AVX2-NEXT: vpcmpgtd %ymm5, %ymm1, %ymm1
-; AVX2-NEXT: vpshufb %ymm8, %ymm1, %ymm1
-; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
-; AVX2-NEXT: vpshufb %xmm7, %xmm1, %xmm1
; AVX2-NEXT: vpcmpgtd %ymm4, %ymm0, %ymm0
-; AVX2-NEXT: vpshufb %ymm8, %ymm0, %ymm0
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: vpshufb %xmm7, %xmm0, %xmm0
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
+; AVX2-NEXT: vpacksswb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vpacksswb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_cmp_v32i32:
@@ -4221,32 +3931,23 @@ define <64 x i1> @test_cmp_v64i16(<64 x
; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm8
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm9
; AVX1-NEXT: vpcmpgtw %xmm8, %xmm9, %xmm8
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm9 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
-; AVX1-NEXT: vpshufb %xmm9, %xmm8, %xmm8
; AVX1-NEXT: vpcmpgtw %xmm4, %xmm0, %xmm0
-; AVX1-NEXT: vpshufb %xmm9, %xmm0, %xmm0
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm8 = xmm0[0],xmm8[0]
+; AVX1-NEXT: vpacksswb %xmm8, %xmm0, %xmm8
; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm0
; AVX1-NEXT: vpcmpgtw %xmm4, %xmm0, %xmm0
-; AVX1-NEXT: vpshufb %xmm9, %xmm0, %xmm0
; AVX1-NEXT: vpcmpgtw %xmm5, %xmm1, %xmm1
-; AVX1-NEXT: vpshufb %xmm9, %xmm1, %xmm1
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; AVX1-NEXT: vpacksswb %xmm0, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm6, %xmm0
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX1-NEXT: vpcmpgtw %xmm0, %xmm4, %xmm0
-; AVX1-NEXT: vpshufb %xmm9, %xmm0, %xmm0
; AVX1-NEXT: vpcmpgtw %xmm6, %xmm2, %xmm2
-; AVX1-NEXT: vpshufb %xmm9, %xmm2, %xmm2
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm0[0]
+; AVX1-NEXT: vpacksswb %xmm0, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm7, %xmm0
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX1-NEXT: vpcmpgtw %xmm0, %xmm4, %xmm0
-; AVX1-NEXT: vpshufb %xmm9, %xmm0, %xmm0
; AVX1-NEXT: vpcmpgtw %xmm7, %xmm3, %xmm3
-; AVX1-NEXT: vpshufb %xmm9, %xmm3, %xmm3
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0]
+; AVX1-NEXT: vpacksswb %xmm0, %xmm3, %xmm3
; AVX1-NEXT: vpextrb $15, %xmm3, %eax
; AVX1-NEXT: andb $1, %al
; AVX1-NEXT: movb %al, 4(%rdi)
@@ -4446,26 +4147,17 @@ define <64 x i1> @test_cmp_v64i16(<64 x
; AVX2-LABEL: test_cmp_v64i16:
; AVX2: # BB#0:
; AVX2-NEXT: vpcmpgtw %ymm4, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm8
-; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
-; AVX2-NEXT: vpshufb %xmm4, %xmm8, %xmm8
-; AVX2-NEXT: vpshufb %xmm4, %xmm0, %xmm0
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm8[0]
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm4
+; AVX2-NEXT: vpacksswb %xmm4, %xmm0, %xmm0
; AVX2-NEXT: vpcmpgtw %ymm5, %ymm1, %ymm1
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm5
-; AVX2-NEXT: vpshufb %xmm4, %xmm5, %xmm5
-; AVX2-NEXT: vpshufb %xmm4, %xmm1, %xmm1
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm5[0]
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm4
+; AVX2-NEXT: vpacksswb %xmm4, %xmm1, %xmm1
; AVX2-NEXT: vpcmpgtw %ymm6, %ymm2, %ymm2
-; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm5
-; AVX2-NEXT: vpshufb %xmm4, %xmm5, %xmm5
-; AVX2-NEXT: vpshufb %xmm4, %xmm2, %xmm2
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm5[0]
+; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm4
+; AVX2-NEXT: vpacksswb %xmm4, %xmm2, %xmm2
; AVX2-NEXT: vpcmpgtw %ymm7, %ymm3, %ymm3
-; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm5
-; AVX2-NEXT: vpshufb %xmm4, %xmm5, %xmm5
-; AVX2-NEXT: vpshufb %xmm4, %xmm3, %xmm3
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm5[0]
+; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm4
+; AVX2-NEXT: vpacksswb %xmm4, %xmm3, %xmm3
; AVX2-NEXT: vpextrb $15, %xmm3, %eax
; AVX2-NEXT: andb $1, %al
; AVX2-NEXT: movb %al, 4(%rdi)
@@ -7066,61 +6758,44 @@ define <32 x i1> @test_cmp_v32f64(<32 x
; AVX1-NEXT: movq %rsp, %rbp
; AVX1-NEXT: andq $-32, %rsp
; AVX1-NEXT: subq $32, %rsp
-; AVX1-NEXT: vmovapd 240(%rbp), %ymm8
-; AVX1-NEXT: vcmpltpd %ymm7, %ymm8, %ymm8
-; AVX1-NEXT: vextractf128 $1, %ymm8, %xmm9
-; AVX1-NEXT: vmovapd {{.*#+}} xmm10 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
-; AVX1-NEXT: vandpd %xmm10, %xmm9, %xmm9
-; AVX1-NEXT: vandpd %xmm10, %xmm8, %xmm7
-; AVX1-NEXT: vpackuswb %xmm9, %xmm7, %xmm9
-; AVX1-NEXT: vmovapd 208(%rbp), %ymm8
-; AVX1-NEXT: vcmpltpd %ymm6, %ymm8, %ymm6
+; AVX1-NEXT: vmovapd 16(%rbp), %ymm8
+; AVX1-NEXT: vmovapd 48(%rbp), %ymm9
+; AVX1-NEXT: vmovapd 80(%rbp), %ymm10
+; AVX1-NEXT: vmovapd 112(%rbp), %ymm11
+; AVX1-NEXT: vmovapd 144(%rbp), %ymm12
+; AVX1-NEXT: vmovapd 176(%rbp), %ymm13
+; AVX1-NEXT: vmovapd 208(%rbp), %ymm14
+; AVX1-NEXT: vmovapd 240(%rbp), %ymm15
+; AVX1-NEXT: vcmpltpd %ymm7, %ymm15, %ymm15
+; AVX1-NEXT: vextractf128 $1, %ymm15, %xmm7
+; AVX1-NEXT: vpacksswb %xmm7, %xmm15, %xmm15
+; AVX1-NEXT: vcmpltpd %ymm6, %ymm14, %ymm6
; AVX1-NEXT: vextractf128 $1, %ymm6, %xmm7
-; AVX1-NEXT: vandpd %xmm10, %xmm7, %xmm7
-; AVX1-NEXT: vandpd %xmm10, %xmm6, %xmm6
-; AVX1-NEXT: vpackuswb %xmm7, %xmm6, %xmm6
-; AVX1-NEXT: vpackuswb %xmm9, %xmm6, %xmm6
-; AVX1-NEXT: vmovapd 176(%rbp), %ymm7
-; AVX1-NEXT: vcmpltpd %ymm5, %ymm7, %ymm5
+; AVX1-NEXT: vpacksswb %xmm7, %xmm6, %xmm6
+; AVX1-NEXT: vpacksswb %xmm15, %xmm6, %xmm6
+; AVX1-NEXT: vcmpltpd %ymm5, %ymm13, %ymm5
; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm7
-; AVX1-NEXT: vandpd %xmm10, %xmm7, %xmm7
-; AVX1-NEXT: vandpd %xmm10, %xmm5, %xmm5
-; AVX1-NEXT: vpackuswb %xmm7, %xmm5, %xmm5
-; AVX1-NEXT: vmovapd 144(%rbp), %ymm7
-; AVX1-NEXT: vcmpltpd %ymm4, %ymm7, %ymm4
+; AVX1-NEXT: vpacksswb %xmm7, %xmm5, %xmm5
+; AVX1-NEXT: vcmpltpd %ymm4, %ymm12, %ymm4
; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm7
-; AVX1-NEXT: vandpd %xmm10, %xmm7, %xmm7
-; AVX1-NEXT: vandpd %xmm10, %xmm4, %xmm4
-; AVX1-NEXT: vpackuswb %xmm7, %xmm4, %xmm4
-; AVX1-NEXT: vpackuswb %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vpackuswb %xmm6, %xmm4, %xmm4
-; AVX1-NEXT: vmovapd 112(%rbp), %ymm5
-; AVX1-NEXT: vcmpltpd %ymm3, %ymm5, %ymm3
+; AVX1-NEXT: vpacksswb %xmm7, %xmm4, %xmm4
+; AVX1-NEXT: vpacksswb %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vpacksswb %xmm6, %xmm4, %xmm4
+; AVX1-NEXT: vcmpltpd %ymm3, %ymm11, %ymm3
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm5
-; AVX1-NEXT: vandpd %xmm10, %xmm5, %xmm5
-; AVX1-NEXT: vandpd %xmm10, %xmm3, %xmm3
-; AVX1-NEXT: vpackuswb %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vmovapd 80(%rbp), %ymm5
-; AVX1-NEXT: vcmpltpd %ymm2, %ymm5, %ymm2
+; AVX1-NEXT: vpacksswb %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vcmpltpd %ymm2, %ymm10, %ymm2
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm5
-; AVX1-NEXT: vandpd %xmm10, %xmm5, %xmm5
-; AVX1-NEXT: vandpd %xmm10, %xmm2, %xmm2
-; AVX1-NEXT: vpackuswb %xmm5, %xmm2, %xmm2
-; AVX1-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vmovapd 48(%rbp), %ymm3
-; AVX1-NEXT: vcmpltpd %ymm1, %ymm3, %ymm1
+; AVX1-NEXT: vpacksswb %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vpacksswb %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vcmpltpd %ymm1, %ymm9, %ymm1
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT: vandpd %xmm10, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm10, %xmm1, %xmm1
-; AVX1-NEXT: vpackuswb %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vmovapd 16(%rbp), %ymm3
-; AVX1-NEXT: vcmpltpd %ymm0, %ymm3, %ymm0
+; AVX1-NEXT: vpacksswb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vcmpltpd %ymm0, %ymm8, %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vandpd %xmm10, %xmm3, %xmm3
-; AVX1-NEXT: vandpd %xmm10, %xmm0, %xmm0
-; AVX1-NEXT: vpackuswb %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0
; AVX1-NEXT: movq %rbp, %rsp
; AVX1-NEXT: popq %rbp
@@ -7132,59 +6807,36 @@ define <32 x i1> @test_cmp_v32f64(<32 x
; AVX2-NEXT: movq %rsp, %rbp
; AVX2-NEXT: andq $-32, %rsp
; AVX2-NEXT: subq $32, %rsp
-; AVX2-NEXT: vmovapd 48(%rbp), %ymm8
-; AVX2-NEXT: vmovapd 16(%rbp), %ymm9
-; AVX2-NEXT: vmovapd 112(%rbp), %ymm10
-; AVX2-NEXT: vmovapd 80(%rbp), %ymm11
-; AVX2-NEXT: vmovapd 176(%rbp), %ymm12
-; AVX2-NEXT: vmovapd 144(%rbp), %ymm13
-; AVX2-NEXT: vmovapd 240(%rbp), %ymm14
-; AVX2-NEXT: vmovapd 208(%rbp), %ymm15
-; AVX2-NEXT: vcmpltpd %ymm6, %ymm15, %ymm6
-; AVX2-NEXT: vpermilps {{.*#+}} ymm6 = ymm6[0,2,2,3,4,6,6,7]
-; AVX2-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,2,2,3]
-; AVX2-NEXT: vcmpltpd %ymm7, %ymm14, %ymm7
-; AVX2-NEXT: vpermilps {{.*#+}} ymm7 = ymm7[0,2,2,3,4,6,6,7]
-; AVX2-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,2,2,3]
-; AVX2-NEXT: vinserti128 $1, %xmm7, %ymm6, %ymm7
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm15 = [0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128,0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128]
-; AVX2-NEXT: vpshufb %ymm15, %ymm7, %ymm7
-; AVX2-NEXT: vpermq {{.*#+}} ymm14 = ymm7[0,2,2,3]
-; AVX2-NEXT: vmovdqa {{.*#+}} xmm7 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
-; AVX2-NEXT: vpshufb %xmm7, %xmm14, %xmm6
-; AVX2-NEXT: vcmpltpd %ymm4, %ymm13, %ymm4
-; AVX2-NEXT: vpermilps {{.*#+}} ymm4 = ymm4[0,2,2,3,4,6,6,7]
-; AVX2-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,2,2,3]
-; AVX2-NEXT: vcmpltpd %ymm5, %ymm12, %ymm5
-; AVX2-NEXT: vpermilps {{.*#+}} ymm5 = ymm5[0,2,2,3,4,6,6,7]
-; AVX2-NEXT: vpermq {{.*#+}} ymm5 = ymm5[0,2,2,3]
-; AVX2-NEXT: vinserti128 $1, %xmm5, %ymm4, %ymm4
-; AVX2-NEXT: vpshufb %ymm15, %ymm4, %ymm4
-; AVX2-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,2,2,3]
-; AVX2-NEXT: vpshufb %xmm7, %xmm4, %xmm4
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm6[0]
-; AVX2-NEXT: vcmpltpd %ymm2, %ymm11, %ymm2
-; AVX2-NEXT: vpermilps {{.*#+}} ymm2 = ymm2[0,2,2,3,4,6,6,7]
-; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
-; AVX2-NEXT: vcmpltpd %ymm3, %ymm10, %ymm3
-; AVX2-NEXT: vpermilps {{.*#+}} ymm3 = ymm3[0,2,2,3,4,6,6,7]
-; AVX2-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,2,2,3]
-; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
-; AVX2-NEXT: vpshufb %ymm15, %ymm2, %ymm2
-; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
-; AVX2-NEXT: vpshufb %xmm7, %xmm2, %xmm2
-; AVX2-NEXT: vcmpltpd %ymm0, %ymm9, %ymm0
-; AVX2-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: vcmpltpd %ymm1, %ymm8, %ymm1
-; AVX2-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
-; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
-; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX2-NEXT: vpshufb %ymm15, %ymm0, %ymm0
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: vpshufb %xmm7, %xmm0, %xmm0
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
-; AVX2-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm0
+; AVX2-NEXT: vmovapd 16(%rbp), %ymm8
+; AVX2-NEXT: vmovapd 48(%rbp), %ymm9
+; AVX2-NEXT: vmovapd 80(%rbp), %ymm10
+; AVX2-NEXT: vmovapd 112(%rbp), %ymm11
+; AVX2-NEXT: vmovapd 144(%rbp), %ymm12
+; AVX2-NEXT: vmovapd 176(%rbp), %ymm13
+; AVX2-NEXT: vmovapd 208(%rbp), %ymm14
+; AVX2-NEXT: vmovapd 240(%rbp), %ymm15
+; AVX2-NEXT: vcmpltpd %ymm7, %ymm15, %ymm7
+; AVX2-NEXT: vcmpltpd %ymm6, %ymm14, %ymm6
+; AVX2-NEXT: vpacksswb %ymm7, %ymm6, %ymm6
+; AVX2-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,2,1,3]
+; AVX2-NEXT: vcmpltpd %ymm5, %ymm13, %ymm5
+; AVX2-NEXT: vcmpltpd %ymm4, %ymm12, %ymm4
+; AVX2-NEXT: vpacksswb %ymm5, %ymm4, %ymm4
+; AVX2-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,2,1,3]
+; AVX2-NEXT: vpacksswb %ymm6, %ymm4, %ymm4
+; AVX2-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,2,1,3]
+; AVX2-NEXT: vcmpltpd %ymm3, %ymm11, %ymm3
+; AVX2-NEXT: vcmpltpd %ymm2, %ymm10, %ymm2
+; AVX2-NEXT: vpacksswb %ymm3, %ymm2, %ymm2
+; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,1,3]
+; AVX2-NEXT: vcmpltpd %ymm1, %ymm9, %ymm1
+; AVX2-NEXT: vcmpltpd %ymm0, %ymm8, %ymm0
+; AVX2-NEXT: vpacksswb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vpacksswb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vpacksswb %ymm4, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2-NEXT: movq %rbp, %rsp
; AVX2-NEXT: popq %rbp
; AVX2-NEXT: retq
@@ -7989,77 +7641,60 @@ define <32 x i1> @test_cmp_v32i64(<32 x
; AVX1-NEXT: movq %rsp, %rbp
; AVX1-NEXT: andq $-32, %rsp
; AVX1-NEXT: subq $32, %rsp
-; AVX1-NEXT: vmovaps 208(%rbp), %ymm9
-; AVX1-NEXT: vmovaps 240(%rbp), %ymm10
-; AVX1-NEXT: vextractf128 $1, %ymm10, %xmm8
-; AVX1-NEXT: vextractf128 $1, %ymm7, %xmm11
-; AVX1-NEXT: vpcmpgtq %xmm8, %xmm11, %xmm11
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm8 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
-; AVX1-NEXT: vpand %xmm8, %xmm11, %xmm11
-; AVX1-NEXT: vpcmpgtq %xmm10, %xmm7, %xmm7
-; AVX1-NEXT: vpand %xmm8, %xmm7, %xmm7
-; AVX1-NEXT: vpackuswb %xmm11, %xmm7, %xmm10
-; AVX1-NEXT: vextractf128 $1, %ymm9, %xmm11
+; AVX1-NEXT: vmovaps 240(%rbp), %ymm8
+; AVX1-NEXT: vextractf128 $1, %ymm8, %xmm9
+; AVX1-NEXT: vextractf128 $1, %ymm7, %xmm10
+; AVX1-NEXT: vpcmpgtq %xmm9, %xmm10, %xmm9
+; AVX1-NEXT: vmovaps 208(%rbp), %ymm10
+; AVX1-NEXT: vpcmpgtq %xmm8, %xmm7, %xmm7
+; AVX1-NEXT: vpacksswb %xmm9, %xmm7, %xmm8
+; AVX1-NEXT: vextractf128 $1, %ymm10, %xmm9
; AVX1-NEXT: vextractf128 $1, %ymm6, %xmm7
-; AVX1-NEXT: vpcmpgtq %xmm11, %xmm7, %xmm7
-; AVX1-NEXT: vpcmpgtq %xmm9, %xmm6, %xmm6
+; AVX1-NEXT: vpcmpgtq %xmm9, %xmm7, %xmm7
+; AVX1-NEXT: vpcmpgtq %xmm10, %xmm6, %xmm6
; AVX1-NEXT: vmovaps 176(%rbp), %ymm9
-; AVX1-NEXT: vpand %xmm8, %xmm7, %xmm7
-; AVX1-NEXT: vpand %xmm8, %xmm6, %xmm6
-; AVX1-NEXT: vpackuswb %xmm7, %xmm6, %xmm6
-; AVX1-NEXT: vpackuswb %xmm10, %xmm6, %xmm10
+; AVX1-NEXT: vpacksswb %xmm7, %xmm6, %xmm6
+; AVX1-NEXT: vpacksswb %xmm8, %xmm6, %xmm8
; AVX1-NEXT: vextractf128 $1, %ymm9, %xmm7
; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm6
; AVX1-NEXT: vpcmpgtq %xmm7, %xmm6, %xmm6
-; AVX1-NEXT: vmovaps 144(%rbp), %ymm11
-; AVX1-NEXT: vpand %xmm8, %xmm6, %xmm6
+; AVX1-NEXT: vmovaps 144(%rbp), %ymm10
; AVX1-NEXT: vpcmpgtq %xmm9, %xmm5, %xmm5
-; AVX1-NEXT: vpand %xmm8, %xmm5, %xmm5
-; AVX1-NEXT: vpackuswb %xmm6, %xmm5, %xmm5
-; AVX1-NEXT: vextractf128 $1, %ymm11, %xmm6
+; AVX1-NEXT: vpacksswb %xmm6, %xmm5, %xmm5
+; AVX1-NEXT: vextractf128 $1, %ymm10, %xmm6
; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm7
; AVX1-NEXT: vpcmpgtq %xmm6, %xmm7, %xmm6
-; AVX1-NEXT: vpcmpgtq %xmm11, %xmm4, %xmm4
-; AVX1-NEXT: vpand %xmm8, %xmm6, %xmm6
-; AVX1-NEXT: vpand %xmm8, %xmm4, %xmm4
-; AVX1-NEXT: vpackuswb %xmm6, %xmm4, %xmm4
+; AVX1-NEXT: vpcmpgtq %xmm10, %xmm4, %xmm4
+; AVX1-NEXT: vpacksswb %xmm6, %xmm4, %xmm4
; AVX1-NEXT: vmovaps 112(%rbp), %ymm6
-; AVX1-NEXT: vpackuswb %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vpackuswb %xmm10, %xmm4, %xmm4
+; AVX1-NEXT: vpacksswb %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vpacksswb %xmm8, %xmm4, %xmm4
; AVX1-NEXT: vextractf128 $1, %ymm6, %xmm5
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm7
; AVX1-NEXT: vpcmpgtq %xmm5, %xmm7, %xmm5
; AVX1-NEXT: vmovaps 80(%rbp), %ymm7
-; AVX1-NEXT: vpand %xmm8, %xmm5, %xmm5
; AVX1-NEXT: vpcmpgtq %xmm6, %xmm3, %xmm3
-; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm3
-; AVX1-NEXT: vpackuswb %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vpacksswb %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm7, %xmm5
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm6
; AVX1-NEXT: vpcmpgtq %xmm5, %xmm6, %xmm5
; AVX1-NEXT: vpcmpgtq %xmm7, %xmm2, %xmm2
; AVX1-NEXT: vmovaps 48(%rbp), %ymm6
-; AVX1-NEXT: vpand %xmm8, %xmm5, %xmm5
-; AVX1-NEXT: vpand %xmm8, %xmm2, %xmm2
-; AVX1-NEXT: vpackuswb %xmm5, %xmm2, %xmm2
-; AVX1-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpacksswb %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vpacksswb %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm6, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
; AVX1-NEXT: vpcmpgtq %xmm3, %xmm5, %xmm3
; AVX1-NEXT: vmovaps 16(%rbp), %ymm5
-; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm6, %xmm1, %xmm1
-; AVX1-NEXT: vpand %xmm8, %xmm1, %xmm1
-; AVX1-NEXT: vpackuswb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpacksswb %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm6
; AVX1-NEXT: vpcmpgtq %xmm3, %xmm6, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm5, %xmm0, %xmm0
-; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm3
-; AVX1-NEXT: vpand %xmm8, %xmm0, %xmm0
-; AVX1-NEXT: vpackuswb %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0
; AVX1-NEXT: movq %rbp, %rsp
; AVX1-NEXT: popq %rbp
@@ -8071,51 +7706,28 @@ define <32 x i1> @test_cmp_v32i64(<32 x
; AVX2-NEXT: movq %rsp, %rbp
; AVX2-NEXT: andq $-32, %rsp
; AVX2-NEXT: subq $32, %rsp
-; AVX2-NEXT: vpcmpgtq 208(%rbp), %ymm6, %ymm6
-; AVX2-NEXT: vpshufd {{.*#+}} ymm6 = ymm6[0,2,2,3,4,6,6,7]
-; AVX2-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,2,2,3]
; AVX2-NEXT: vpcmpgtq 240(%rbp), %ymm7, %ymm7
-; AVX2-NEXT: vpshufd {{.*#+}} ymm7 = ymm7[0,2,2,3,4,6,6,7]
-; AVX2-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,2,2,3]
-; AVX2-NEXT: vinserti128 $1, %xmm7, %ymm6, %ymm7
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm9 = [0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128,0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128]
-; AVX2-NEXT: vpshufb %ymm9, %ymm7, %ymm7
-; AVX2-NEXT: vpermq {{.*#+}} ymm8 = ymm7[0,2,2,3]
-; AVX2-NEXT: vmovdqa {{.*#+}} xmm7 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
-; AVX2-NEXT: vpshufb %xmm7, %xmm8, %xmm6
-; AVX2-NEXT: vpcmpgtq 144(%rbp), %ymm4, %ymm4
-; AVX2-NEXT: vpshufd {{.*#+}} ymm4 = ymm4[0,2,2,3,4,6,6,7]
-; AVX2-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,2,2,3]
+; AVX2-NEXT: vpcmpgtq 208(%rbp), %ymm6, %ymm6
+; AVX2-NEXT: vpacksswb %ymm7, %ymm6, %ymm6
+; AVX2-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,2,1,3]
; AVX2-NEXT: vpcmpgtq 176(%rbp), %ymm5, %ymm5
-; AVX2-NEXT: vpshufd {{.*#+}} ymm5 = ymm5[0,2,2,3,4,6,6,7]
-; AVX2-NEXT: vpermq {{.*#+}} ymm5 = ymm5[0,2,2,3]
-; AVX2-NEXT: vinserti128 $1, %xmm5, %ymm4, %ymm4
-; AVX2-NEXT: vpshufb %ymm9, %ymm4, %ymm4
-; AVX2-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,2,2,3]
-; AVX2-NEXT: vpshufb %xmm7, %xmm4, %xmm4
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm6[0]
-; AVX2-NEXT: vpcmpgtq 80(%rbp), %ymm2, %ymm2
-; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[0,2,2,3,4,6,6,7]
-; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
+; AVX2-NEXT: vpcmpgtq 144(%rbp), %ymm4, %ymm4
+; AVX2-NEXT: vpacksswb %ymm5, %ymm4, %ymm4
+; AVX2-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,2,1,3]
+; AVX2-NEXT: vpacksswb %ymm6, %ymm4, %ymm4
+; AVX2-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,2,1,3]
; AVX2-NEXT: vpcmpgtq 112(%rbp), %ymm3, %ymm3
-; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[0,2,2,3,4,6,6,7]
-; AVX2-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,2,2,3]
-; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
-; AVX2-NEXT: vpshufb %ymm9, %ymm2, %ymm2
-; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
-; AVX2-NEXT: vpshufb %xmm7, %xmm2, %xmm2
-; AVX2-NEXT: vpcmpgtq 16(%rbp), %ymm0, %ymm0
-; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-NEXT: vpcmpgtq 80(%rbp), %ymm2, %ymm2
+; AVX2-NEXT: vpacksswb %ymm3, %ymm2, %ymm2
+; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,1,3]
; AVX2-NEXT: vpcmpgtq 48(%rbp), %ymm1, %ymm1
-; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
-; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
-; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX2-NEXT: vpshufb %ymm9, %ymm0, %ymm0
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: vpshufb %xmm7, %xmm0, %xmm0
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
-; AVX2-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm0
+; AVX2-NEXT: vpcmpgtq 16(%rbp), %ymm0, %ymm0
+; AVX2-NEXT: vpacksswb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vpacksswb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vpacksswb %ymm4, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2-NEXT: movq %rbp, %rsp
; AVX2-NEXT: popq %rbp
; AVX2-NEXT: retq
More information about the llvm-commits
mailing list