[llvm] 51a276c - [X86] Teach combineTruncatedArithmetic to push truncate through subtracts where only one of the inputs is free to truncate.
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Mon May 25 12:19:28 PDT 2020
Author: Craig Topper
Date: 2020-05-25T11:42:42-07:00
New Revision: 51a276c759c90c844bbabf5066195aaf42fb0c6e
URL: https://github.com/llvm/llvm-project/commit/51a276c759c90c844bbabf5066195aaf42fb0c6e
DIFF: https://github.com/llvm/llvm-project/commit/51a276c759c90c844bbabf5066195aaf42fb0c6e.diff
LOG: [X86] Teach combineTruncatedArithmetic to push truncate through subtracts where only one of the inputs is free to truncate.
Fix combineSubToSubus to handle the new DAG to avoid a regression.
There are still regressions in test14/test15/test16. Where it
looks like were trying to set up cases we could match to
umin+trunc+subus but the handling was never finished. The
regression here isn't unique to sub. Its a lost opportunity for
taking an AND with two truncated inputs and producing a larger
AND with a single truncate. The same thing could happen with
any other node we handle in combineTruncatedArithmetic since we
are moving the truncate up the DAG.
Differential Revision: https://reviews.llvm.org/D80483
Added:
Modified:
llvm/lib/Target/X86/X86ISelLowering.cpp
llvm/test/CodeGen/X86/psubus.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 5101977a68ed..54a80151eb69 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -43588,21 +43588,12 @@ static SDValue combineTruncatedArithmetic(SDNode *N, SelectionDAG &DAG,
case ISD::AND:
case ISD::XOR:
case ISD::OR:
- case ISD::ADD: {
- SDValue Op0 = Src.getOperand(0);
- SDValue Op1 = Src.getOperand(1);
- if (TLI.isOperationLegal(SrcOpcode, VT) &&
- (Op0 == Op1 || IsFreeTruncation(Op0) || IsFreeTruncation(Op1)))
- return TruncateArithmetic(Op0, Op1);
- break;
- }
+ case ISD::ADD:
case ISD::SUB: {
- // TODO: ISD::SUB We are conservative and require both sides to be freely
- // truncatable to avoid interfering with combineSubToSubus.
SDValue Op0 = Src.getOperand(0);
SDValue Op1 = Src.getOperand(1);
if (TLI.isOperationLegal(SrcOpcode, VT) &&
- (Op0 == Op1 || (IsFreeTruncation(Op0) && IsFreeTruncation(Op1))))
+ (Op0 == Op1 || IsFreeTruncation(Op0) || IsFreeTruncation(Op1)))
return TruncateArithmetic(Op0, Op1);
break;
}
@@ -46698,6 +46689,38 @@ static SDValue combineSubToSubus(SDNode *N, SelectionDAG &DAG,
SubusRHS = MinLHS;
else
return SDValue();
+ } else if (Op1.getOpcode() == ISD::TRUNCATE &&
+ Op1.getOperand(0).getOpcode() == ISD::UMIN &&
+ (EltVT == MVT::i8 || EltVT == MVT::i16)) {
+ // Special case where the UMIN has been truncated. Try to push the truncate
+ // further up. This is similar to the i32/i64 special processing.
+ SubusLHS = Op0;
+ SDValue MinLHS = Op1.getOperand(0).getOperand(0);
+ SDValue MinRHS = Op1.getOperand(0).getOperand(1);
+ EVT TruncVT = Op1.getOperand(0).getValueType();
+ if (!(Subtarget.hasSSSE3() && (TruncVT == MVT::v8i32 ||
+ TruncVT == MVT::v8i64)) &&
+ !(Subtarget.useBWIRegs() && (TruncVT == MVT::v16i32)))
+ return SDValue();
+ SDValue OpToSaturate;
+ if (MinLHS.getOpcode() == ISD::ZERO_EXTEND &&
+ MinLHS.getOperand(0) == Op0)
+ OpToSaturate = MinRHS;
+ else if (MinRHS.getOpcode() == ISD::ZERO_EXTEND &&
+ MinRHS.getOperand(0) == Op0)
+ OpToSaturate = MinLHS;
+ else
+ return SDValue();
+
+ // Saturate the non-extended input and then truncate it.
+ SDLoc DL(N);
+ SDValue SaturationConst =
+ DAG.getConstant(APInt::getLowBitsSet(TruncVT.getScalarSizeInBits(),
+ VT.getScalarSizeInBits()),
+ DL, TruncVT);
+ SDValue UMin = DAG.getNode(ISD::UMIN, DL, TruncVT, OpToSaturate,
+ SaturationConst);
+ SubusRHS = DAG.getNode(ISD::TRUNCATE, DL, VT, UMin);
} else
return SDValue();
diff --git a/llvm/test/CodeGen/X86/psubus.ll b/llvm/test/CodeGen/X86/psubus.ll
index 21c63da1d1ee..a51893ade021 100644
--- a/llvm/test/CodeGen/X86/psubus.ll
+++ b/llvm/test/CodeGen/X86/psubus.ll
@@ -465,33 +465,33 @@ vector.ph:
ret <32 x i8> %res
}
+; FIXME: match this to UMIN+TRUNC+PSUBUS
define <8 x i16> @test13(<8 x i16> %x, <8 x i32> %y) nounwind {
; SSE2-LABEL: test13:
; SSE2: # %bb.0: # %vector.ph
-; SSE2-NEXT: pxor %xmm4, %xmm4
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
-; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648,2147483648,2147483648]
+; SSE2-NEXT: pxor %xmm3, %xmm3
+; SSE2-NEXT: movdqa %xmm0, %xmm4
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
; SSE2-NEXT: movdqa %xmm0, %xmm5
-; SSE2-NEXT: psubd %xmm2, %xmm0
-; SSE2-NEXT: movdqa %xmm2, %xmm6
-; SSE2-NEXT: pxor %xmm4, %xmm6
-; SSE2-NEXT: por %xmm4, %xmm5
-; SSE2-NEXT: pcmpgtd %xmm5, %xmm6
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: pxor %xmm4, %xmm2
-; SSE2-NEXT: por %xmm3, %xmm4
-; SSE2-NEXT: pcmpgtd %xmm4, %xmm2
-; SSE2-NEXT: packssdw %xmm6, %xmm2
-; SSE2-NEXT: psubd %xmm1, %xmm3
-; SSE2-NEXT: pslld $16, %xmm0
-; SSE2-NEXT: psrad $16, %xmm0
-; SSE2-NEXT: pslld $16, %xmm3
-; SSE2-NEXT: psrad $16, %xmm3
-; SSE2-NEXT: packssdw %xmm0, %xmm3
-; SSE2-NEXT: pandn %xmm3, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm0
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm3[4],xmm5[5],xmm3[5],xmm5[6],xmm3[6],xmm5[7],xmm3[7]
+; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [2147483648,2147483648,2147483648,2147483648]
+; SSE2-NEXT: movdqa %xmm2, %xmm7
+; SSE2-NEXT: pxor %xmm6, %xmm7
+; SSE2-NEXT: por %xmm6, %xmm5
+; SSE2-NEXT: pcmpgtd %xmm5, %xmm7
+; SSE2-NEXT: movdqa %xmm1, %xmm3
+; SSE2-NEXT: pxor %xmm6, %xmm3
+; SSE2-NEXT: por %xmm6, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm4, %xmm3
+; SSE2-NEXT: packssdw %xmm7, %xmm3
+; SSE2-NEXT: pslld $16, %xmm2
+; SSE2-NEXT: psrad $16, %xmm2
+; SSE2-NEXT: pslld $16, %xmm1
+; SSE2-NEXT: psrad $16, %xmm1
+; SSE2-NEXT: packssdw %xmm2, %xmm1
+; SSE2-NEXT: psubw %xmm1, %xmm0
+; SSE2-NEXT: pandn %xmm0, %xmm3
+; SSE2-NEXT: movdqa %xmm3, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test13:
@@ -499,98 +499,96 @@ define <8 x i16> @test13(<8 x i16> %x, <8 x i32> %y) nounwind {
; SSSE3-NEXT: pxor %xmm3, %xmm3
; SSSE3-NEXT: movdqa %xmm0, %xmm4
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
-; SSSE3-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
-; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [2147483648,2147483648,2147483648,2147483648]
; SSSE3-NEXT: movdqa %xmm0, %xmm5
-; SSSE3-NEXT: psubd %xmm2, %xmm0
-; SSSE3-NEXT: movdqa %xmm2, %xmm6
-; SSSE3-NEXT: pxor %xmm3, %xmm6
-; SSSE3-NEXT: por %xmm3, %xmm5
-; SSSE3-NEXT: pcmpgtd %xmm5, %xmm6
-; SSSE3-NEXT: movdqa %xmm1, %xmm2
-; SSSE3-NEXT: pxor %xmm3, %xmm2
-; SSSE3-NEXT: por %xmm4, %xmm3
-; SSSE3-NEXT: pcmpgtd %xmm3, %xmm2
-; SSSE3-NEXT: packssdw %xmm6, %xmm2
-; SSSE3-NEXT: psubd %xmm1, %xmm4
-; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
-; SSSE3-NEXT: pshufb %xmm1, %xmm0
-; SSSE3-NEXT: pshufb %xmm1, %xmm4
-; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm0[0]
-; SSSE3-NEXT: pandn %xmm4, %xmm2
-; SSSE3-NEXT: movdqa %xmm2, %xmm0
+; SSSE3-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm3[4],xmm5[5],xmm3[5],xmm5[6],xmm3[6],xmm5[7],xmm3[7]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm6 = [2147483648,2147483648,2147483648,2147483648]
+; SSSE3-NEXT: movdqa %xmm2, %xmm7
+; SSSE3-NEXT: pxor %xmm6, %xmm7
+; SSSE3-NEXT: por %xmm6, %xmm5
+; SSSE3-NEXT: pcmpgtd %xmm5, %xmm7
+; SSSE3-NEXT: movdqa %xmm1, %xmm3
+; SSSE3-NEXT: pxor %xmm6, %xmm3
+; SSSE3-NEXT: por %xmm6, %xmm4
+; SSSE3-NEXT: pcmpgtd %xmm4, %xmm3
+; SSSE3-NEXT: packssdw %xmm7, %xmm3
+; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; SSSE3-NEXT: pshufb %xmm4, %xmm2
+; SSSE3-NEXT: pshufb %xmm4, %xmm1
+; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; SSSE3-NEXT: psubw %xmm1, %xmm0
+; SSSE3-NEXT: pandn %xmm0, %xmm3
+; SSSE3-NEXT: movdqa %xmm3, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: test13:
; SSE41: # %bb.0: # %vector.ph
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1]
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; SSE41-NEXT: movdqa %xmm4, %xmm0
-; SSE41-NEXT: pmaxud %xmm1, %xmm0
-; SSE41-NEXT: pcmpeqd %xmm4, %xmm0
+; SSE41-NEXT: pmovzxwd {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
+; SSE41-NEXT: pmovzxwd {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; SSE41-NEXT: movdqa %xmm5, %xmm3
+; SSE41-NEXT: pmaxud %xmm1, %xmm3
+; SSE41-NEXT: pcmpeqd %xmm5, %xmm3
; SSE41-NEXT: pcmpeqd %xmm5, %xmm5
-; SSE41-NEXT: pxor %xmm5, %xmm0
-; SSE41-NEXT: movdqa %xmm3, %xmm6
+; SSE41-NEXT: pxor %xmm5, %xmm3
+; SSE41-NEXT: movdqa %xmm4, %xmm6
; SSE41-NEXT: pmaxud %xmm2, %xmm6
-; SSE41-NEXT: pcmpeqd %xmm3, %xmm6
+; SSE41-NEXT: pcmpeqd %xmm4, %xmm6
; SSE41-NEXT: pxor %xmm5, %xmm6
-; SSE41-NEXT: packssdw %xmm6, %xmm0
-; SSE41-NEXT: psubd %xmm2, %xmm3
-; SSE41-NEXT: psubd %xmm1, %xmm4
-; SSE41-NEXT: pxor %xmm1, %xmm1
-; SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0],xmm1[1],xmm4[2],xmm1[3],xmm4[4],xmm1[5],xmm4[6],xmm1[7]
-; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0],xmm1[1],xmm3[2],xmm1[3],xmm3[4],xmm1[5],xmm3[6],xmm1[7]
-; SSE41-NEXT: packusdw %xmm3, %xmm4
-; SSE41-NEXT: pandn %xmm4, %xmm0
+; SSE41-NEXT: packssdw %xmm6, %xmm3
+; SSE41-NEXT: pxor %xmm4, %xmm4
+; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0],xmm4[1],xmm2[2],xmm4[3],xmm2[4],xmm4[5],xmm2[6],xmm4[7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2],xmm4[3],xmm1[4],xmm4[5],xmm1[6],xmm4[7]
+; SSE41-NEXT: packusdw %xmm2, %xmm1
+; SSE41-NEXT: psubw %xmm1, %xmm0
+; SSE41-NEXT: pandn %xmm0, %xmm3
+; SSE41-NEXT: movdqa %xmm3, %xmm0
; SSE41-NEXT: retq
;
; AVX1-LABEL: test13:
; AVX1: # %bb.0: # %vector.ph
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
-; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT: vpmaxud %xmm3, %xmm2, %xmm4
-; AVX1-NEXT: vpcmpeqd %xmm4, %xmm2, %xmm4
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
+; AVX1-NEXT: vpmaxud %xmm4, %xmm2, %xmm5
+; AVX1-NEXT: vpcmpeqd %xmm5, %xmm2, %xmm2
; AVX1-NEXT: vpcmpeqd %xmm5, %xmm5, %xmm5
-; AVX1-NEXT: vpxor %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vpmaxud %xmm1, %xmm0, %xmm6
-; AVX1-NEXT: vpcmpeqd %xmm6, %xmm0, %xmm6
-; AVX1-NEXT: vpxor %xmm5, %xmm6, %xmm5
-; AVX1-NEXT: vpackssdw %xmm4, %xmm5, %xmm4
-; AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsubd %xmm3, %xmm2, %xmm1
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
-; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; AVX1-NEXT: vpandn %xmm0, %xmm4, %xmm0
+; AVX1-NEXT: vpxor %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vpmaxud %xmm1, %xmm3, %xmm6
+; AVX1-NEXT: vpcmpeqd %xmm6, %xmm3, %xmm3
+; AVX1-NEXT: vpxor %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vpackssdw %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; AVX1-NEXT: vpshufb %xmm3, %xmm4, %xmm4
+; AVX1-NEXT: vpshufb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm4[0]
+; AVX1-NEXT: vpsubw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpandn %xmm0, %xmm2, %xmm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: test13:
; AVX2: # %bb.0: # %vector.ph
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX2-NEXT: vpmaxud %ymm1, %ymm0, %ymm2
-; AVX2-NEXT: vpcmpeqd %ymm2, %ymm0, %ymm2
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX2-NEXT: vpmaxud %ymm1, %ymm2, %ymm3
+; AVX2-NEXT: vpcmpeqd %ymm3, %ymm2, %ymm2
; AVX2-NEXT: vpcmpeqd %ymm3, %ymm3, %ymm3
; AVX2-NEXT: vpxor %ymm3, %ymm2, %ymm2
; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3
; AVX2-NEXT: vpackssdw %xmm3, %xmm2, %xmm2
-; AVX2-NEXT: vpsubd %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-NEXT: vpsubw %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpandn %xmm0, %xmm2, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: test13:
; AVX512: # %bb.0: # %vector.ph
-; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX512-NEXT: vpcmpnltud %ymm1, %ymm0, %k1
-; AVX512-NEXT: vpsubd %ymm1, %ymm0, %ymm0
-; AVX512-NEXT: vpmovdw %ymm0, %xmm0 {%k1} {z}
+; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX512-NEXT: vpcmpnltud %ymm1, %ymm2, %k1
+; AVX512-NEXT: vpmovdw %ymm1, %xmm1
+; AVX512-NEXT: vpsubw %xmm1, %xmm0, %xmm0 {%k1} {z}
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
vector.ph:
@@ -602,186 +600,172 @@ vector.ph:
ret <8 x i16> %res
}
+; FIXME: match this to UMIN+TRUNC+PSUBUS
define <16 x i8> @test14(<16 x i8> %x, <16 x i32> %y) nounwind {
; SSE2-LABEL: test14:
; SSE2: # %bb.0: # %vector.ph
-; SSE2-NEXT: movdqa %xmm0, %xmm5
-; SSE2-NEXT: pxor %xmm0, %xmm0
-; SSE2-NEXT: movdqa %xmm5, %xmm6
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3],xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm6, %xmm8
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm0[0],xmm8[1],xmm0[1],xmm8[2],xmm0[2],xmm8[3],xmm0[3]
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7]
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15]
-; SSE2-NEXT: movdqa %xmm5, %xmm10
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm0[0],xmm10[1],xmm0[1],xmm10[2],xmm0[2],xmm10[3],xmm0[3]
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,2147483648,2147483648,2147483648]
+; SSE2-NEXT: pxor %xmm8, %xmm8
+; SSE2-NEXT: movdqa %xmm0, %xmm6
; SSE2-NEXT: movdqa %xmm4, %xmm9
-; SSE2-NEXT: pxor %xmm0, %xmm9
-; SSE2-NEXT: psubd %xmm5, %xmm4
-; SSE2-NEXT: por %xmm0, %xmm5
-; SSE2-NEXT: pcmpgtd %xmm9, %xmm5
-; SSE2-NEXT: movdqa %xmm3, %xmm7
-; SSE2-NEXT: pxor %xmm0, %xmm7
-; SSE2-NEXT: psubd %xmm10, %xmm3
-; SSE2-NEXT: por %xmm0, %xmm10
-; SSE2-NEXT: pcmpgtd %xmm7, %xmm10
-; SSE2-NEXT: packssdw %xmm5, %xmm10
-; SSE2-NEXT: movdqa %xmm2, %xmm5
-; SSE2-NEXT: pxor %xmm0, %xmm5
-; SSE2-NEXT: psubd %xmm6, %xmm2
-; SSE2-NEXT: por %xmm0, %xmm6
-; SSE2-NEXT: pcmpgtd %xmm5, %xmm6
-; SSE2-NEXT: movdqa %xmm1, %xmm5
-; SSE2-NEXT: pxor %xmm0, %xmm5
-; SSE2-NEXT: por %xmm8, %xmm0
-; SSE2-NEXT: pcmpgtd %xmm5, %xmm0
-; SSE2-NEXT: packssdw %xmm6, %xmm0
-; SSE2-NEXT: packsswb %xmm10, %xmm0
-; SSE2-NEXT: psubd %xmm8, %xmm1
+; SSE2-NEXT: movdqa %xmm3, %xmm10
+; SSE2-NEXT: movdqa %xmm2, %xmm7
; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
; SSE2-NEXT: pand %xmm5, %xmm4
; SSE2-NEXT: pand %xmm5, %xmm3
; SSE2-NEXT: packuswb %xmm4, %xmm3
+; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: pand %xmm5, %xmm2
; SSE2-NEXT: pand %xmm5, %xmm1
; SSE2-NEXT: packuswb %xmm2, %xmm1
; SSE2-NEXT: packuswb %xmm3, %xmm1
+; SSE2-NEXT: psubb %xmm0, %xmm1
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm8[0],xmm2[1],xmm8[1],xmm2[2],xmm8[2],xmm2[3],xmm8[3],xmm2[4],xmm8[4],xmm2[5],xmm8[5],xmm2[6],xmm8[6],xmm2[7],xmm8[7]
+; SSE2-NEXT: movdqa %xmm2, %xmm0
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1],xmm0[2],xmm8[2],xmm0[3],xmm8[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm8[4],xmm2[5],xmm8[5],xmm2[6],xmm8[6],xmm2[7],xmm8[7]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm8[8],xmm6[9],xmm8[9],xmm6[10],xmm8[10],xmm6[11],xmm8[11],xmm6[12],xmm8[12],xmm6[13],xmm8[13],xmm6[14],xmm8[14],xmm6[15],xmm8[15]
+; SSE2-NEXT: movdqa %xmm6, %xmm3
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1],xmm3[2],xmm8[2],xmm3[3],xmm8[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm8[4],xmm6[5],xmm8[5],xmm6[6],xmm8[6],xmm6[7],xmm8[7]
+; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [2147483648,2147483648,2147483648,2147483648]
+; SSE2-NEXT: pxor %xmm5, %xmm9
+; SSE2-NEXT: por %xmm5, %xmm6
+; SSE2-NEXT: pcmpgtd %xmm9, %xmm6
+; SSE2-NEXT: pxor %xmm5, %xmm10
+; SSE2-NEXT: por %xmm5, %xmm3
+; SSE2-NEXT: pcmpgtd %xmm10, %xmm3
+; SSE2-NEXT: packssdw %xmm6, %xmm3
+; SSE2-NEXT: pxor %xmm5, %xmm7
+; SSE2-NEXT: por %xmm5, %xmm2
+; SSE2-NEXT: pcmpgtd %xmm7, %xmm2
+; SSE2-NEXT: pxor %xmm5, %xmm4
+; SSE2-NEXT: por %xmm5, %xmm0
+; SSE2-NEXT: pcmpgtd %xmm4, %xmm0
+; SSE2-NEXT: packssdw %xmm2, %xmm0
+; SSE2-NEXT: packsswb %xmm3, %xmm0
; SSE2-NEXT: pandn %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test14:
; SSSE3: # %bb.0: # %vector.ph
-; SSSE3-NEXT: movdqa %xmm0, %xmm5
-; SSSE3-NEXT: pxor %xmm0, %xmm0
-; SSSE3-NEXT: movdqa %xmm5, %xmm6
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3],xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7]
-; SSSE3-NEXT: movdqa %xmm6, %xmm8
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm0[0],xmm8[1],xmm0[1],xmm8[2],xmm0[2],xmm8[3],xmm0[3]
-; SSSE3-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7]
-; SSSE3-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15]
-; SSSE3-NEXT: movdqa %xmm5, %xmm10
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm0[0],xmm10[1],xmm0[1],xmm10[2],xmm0[2],xmm10[3],xmm0[3]
-; SSSE3-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,2147483648,2147483648,2147483648]
+; SSSE3-NEXT: pxor %xmm8, %xmm8
+; SSSE3-NEXT: movdqa %xmm0, %xmm6
; SSSE3-NEXT: movdqa %xmm4, %xmm9
-; SSSE3-NEXT: pxor %xmm0, %xmm9
-; SSSE3-NEXT: psubd %xmm5, %xmm4
-; SSSE3-NEXT: por %xmm0, %xmm5
-; SSSE3-NEXT: pcmpgtd %xmm9, %xmm5
-; SSSE3-NEXT: movdqa %xmm3, %xmm7
-; SSSE3-NEXT: pxor %xmm0, %xmm7
-; SSSE3-NEXT: psubd %xmm10, %xmm3
-; SSSE3-NEXT: por %xmm0, %xmm10
-; SSSE3-NEXT: pcmpgtd %xmm7, %xmm10
-; SSSE3-NEXT: packssdw %xmm5, %xmm10
-; SSSE3-NEXT: movdqa %xmm2, %xmm5
-; SSSE3-NEXT: pxor %xmm0, %xmm5
-; SSSE3-NEXT: psubd %xmm6, %xmm2
-; SSSE3-NEXT: por %xmm0, %xmm6
-; SSSE3-NEXT: pcmpgtd %xmm5, %xmm6
-; SSSE3-NEXT: movdqa %xmm1, %xmm5
-; SSSE3-NEXT: pxor %xmm0, %xmm5
-; SSSE3-NEXT: por %xmm8, %xmm0
-; SSSE3-NEXT: pcmpgtd %xmm5, %xmm0
-; SSSE3-NEXT: packssdw %xmm6, %xmm0
-; SSSE3-NEXT: packsswb %xmm10, %xmm0
-; SSSE3-NEXT: psubd %xmm8, %xmm1
+; SSSE3-NEXT: movdqa %xmm3, %xmm10
+; SSSE3-NEXT: movdqa %xmm2, %xmm7
; SSSE3-NEXT: movdqa {{.*#+}} xmm5 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
; SSSE3-NEXT: pand %xmm5, %xmm4
; SSSE3-NEXT: pand %xmm5, %xmm3
; SSSE3-NEXT: packuswb %xmm4, %xmm3
+; SSSE3-NEXT: movdqa %xmm1, %xmm4
; SSSE3-NEXT: pand %xmm5, %xmm2
; SSSE3-NEXT: pand %xmm5, %xmm1
; SSSE3-NEXT: packuswb %xmm2, %xmm1
; SSSE3-NEXT: packuswb %xmm3, %xmm1
+; SSSE3-NEXT: psubb %xmm0, %xmm1
+; SSSE3-NEXT: movdqa %xmm0, %xmm2
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm8[0],xmm2[1],xmm8[1],xmm2[2],xmm8[2],xmm2[3],xmm8[3],xmm2[4],xmm8[4],xmm2[5],xmm8[5],xmm2[6],xmm8[6],xmm2[7],xmm8[7]
+; SSSE3-NEXT: movdqa %xmm2, %xmm0
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1],xmm0[2],xmm8[2],xmm0[3],xmm8[3]
+; SSSE3-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm8[4],xmm2[5],xmm8[5],xmm2[6],xmm8[6],xmm2[7],xmm8[7]
+; SSSE3-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm8[8],xmm6[9],xmm8[9],xmm6[10],xmm8[10],xmm6[11],xmm8[11],xmm6[12],xmm8[12],xmm6[13],xmm8[13],xmm6[14],xmm8[14],xmm6[15],xmm8[15]
+; SSSE3-NEXT: movdqa %xmm6, %xmm3
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1],xmm3[2],xmm8[2],xmm3[3],xmm8[3]
+; SSSE3-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm8[4],xmm6[5],xmm8[5],xmm6[6],xmm8[6],xmm6[7],xmm8[7]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm5 = [2147483648,2147483648,2147483648,2147483648]
+; SSSE3-NEXT: pxor %xmm5, %xmm9
+; SSSE3-NEXT: por %xmm5, %xmm6
+; SSSE3-NEXT: pcmpgtd %xmm9, %xmm6
+; SSSE3-NEXT: pxor %xmm5, %xmm10
+; SSSE3-NEXT: por %xmm5, %xmm3
+; SSSE3-NEXT: pcmpgtd %xmm10, %xmm3
+; SSSE3-NEXT: packssdw %xmm6, %xmm3
+; SSSE3-NEXT: pxor %xmm5, %xmm7
+; SSSE3-NEXT: por %xmm5, %xmm2
+; SSSE3-NEXT: pcmpgtd %xmm7, %xmm2
+; SSSE3-NEXT: pxor %xmm5, %xmm4
+; SSSE3-NEXT: por %xmm5, %xmm0
+; SSSE3-NEXT: pcmpgtd %xmm4, %xmm0
+; SSSE3-NEXT: packssdw %xmm2, %xmm0
+; SSSE3-NEXT: packsswb %xmm3, %xmm0
; SSSE3-NEXT: pandn %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: test14:
; SSE41: # %bb.0: # %vector.ph
; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,2,3]
-; SSE41-NEXT: pmovzxbd {{.*#+}} xmm11 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero,xmm5[2],zero,zero,zero,xmm5[3],zero,zero,zero
-; SSE41-NEXT: pmovzxbd {{.*#+}} xmm8 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm0[2,3,0,1]
-; SSE41-NEXT: pmovzxbd {{.*#+}} xmm9 = xmm6[0],zero,zero,zero,xmm6[1],zero,zero,zero,xmm6[2],zero,zero,zero,xmm6[3],zero,zero,zero
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; SSE41-NEXT: pmovzxbd {{.*#+}} xmm10 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; SSE41-NEXT: movdqa %xmm4, %xmm0
-; SSE41-NEXT: pmaxud %xmm10, %xmm0
-; SSE41-NEXT: pcmpeqd %xmm4, %xmm0
-; SSE41-NEXT: pcmpeqd %xmm6, %xmm6
-; SSE41-NEXT: pxor %xmm6, %xmm0
-; SSE41-NEXT: movdqa %xmm3, %xmm7
-; SSE41-NEXT: pmaxud %xmm9, %xmm7
+; SSE41-NEXT: pmovzxbd {{.*#+}} xmm8 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero,xmm5[2],zero,zero,zero,xmm5[3],zero,zero,zero
+; SSE41-NEXT: pmovzxbd {{.*#+}} xmm5 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm0[2,3,0,1]
+; SSE41-NEXT: pmovzxbd {{.*#+}} xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm0[3,1,2,3]
+; SSE41-NEXT: pmovzxbd {{.*#+}} xmm6 = xmm6[0],zero,zero,zero,xmm6[1],zero,zero,zero,xmm6[2],zero,zero,zero,xmm6[3],zero,zero,zero
+; SSE41-NEXT: pmaxud %xmm4, %xmm6
+; SSE41-NEXT: pcmpeqd %xmm4, %xmm6
+; SSE41-NEXT: pcmpeqd %xmm9, %xmm9
+; SSE41-NEXT: pxor %xmm9, %xmm6
+; SSE41-NEXT: pmaxud %xmm3, %xmm7
; SSE41-NEXT: pcmpeqd %xmm3, %xmm7
-; SSE41-NEXT: pxor %xmm6, %xmm7
-; SSE41-NEXT: packssdw %xmm0, %xmm7
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: pmaxud %xmm8, %xmm0
-; SSE41-NEXT: pcmpeqd %xmm1, %xmm0
-; SSE41-NEXT: pxor %xmm6, %xmm0
-; SSE41-NEXT: movdqa %xmm2, %xmm5
-; SSE41-NEXT: pmaxud %xmm11, %xmm5
-; SSE41-NEXT: pcmpeqd %xmm2, %xmm5
-; SSE41-NEXT: pxor %xmm6, %xmm5
-; SSE41-NEXT: packssdw %xmm5, %xmm0
-; SSE41-NEXT: packsswb %xmm7, %xmm0
-; SSE41-NEXT: psubd %xmm11, %xmm2
-; SSE41-NEXT: psubd %xmm8, %xmm1
-; SSE41-NEXT: psubd %xmm9, %xmm3
-; SSE41-NEXT: psubd %xmm10, %xmm4
-; SSE41-NEXT: movdqa {{.*#+}} xmm5 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
-; SSE41-NEXT: pand %xmm5, %xmm4
-; SSE41-NEXT: pand %xmm5, %xmm3
+; SSE41-NEXT: pxor %xmm9, %xmm7
+; SSE41-NEXT: packssdw %xmm6, %xmm7
+; SSE41-NEXT: pmaxud %xmm1, %xmm5
+; SSE41-NEXT: pcmpeqd %xmm1, %xmm5
+; SSE41-NEXT: pxor %xmm9, %xmm5
+; SSE41-NEXT: pmaxud %xmm2, %xmm8
+; SSE41-NEXT: pcmpeqd %xmm2, %xmm8
+; SSE41-NEXT: pxor %xmm9, %xmm8
+; SSE41-NEXT: packssdw %xmm8, %xmm5
+; SSE41-NEXT: packsswb %xmm7, %xmm5
+; SSE41-NEXT: movdqa {{.*#+}} xmm6 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
+; SSE41-NEXT: pand %xmm6, %xmm4
+; SSE41-NEXT: pand %xmm6, %xmm3
; SSE41-NEXT: packusdw %xmm4, %xmm3
-; SSE41-NEXT: pand %xmm5, %xmm1
-; SSE41-NEXT: pand %xmm5, %xmm2
+; SSE41-NEXT: pand %xmm6, %xmm2
+; SSE41-NEXT: pand %xmm6, %xmm1
; SSE41-NEXT: packusdw %xmm2, %xmm1
; SSE41-NEXT: packuswb %xmm3, %xmm1
-; SSE41-NEXT: pandn %xmm1, %xmm0
+; SSE41-NEXT: psubb %xmm0, %xmm1
+; SSE41-NEXT: pandn %xmm1, %xmm5
+; SSE41-NEXT: movdqa %xmm5, %xmm0
; SSE41-NEXT: retq
;
; AVX1-LABEL: test14:
; AVX1: # %bb.0: # %vector.ph
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,2,3]
-; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm8 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
-; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm9 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm4 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm0[2,3,0,1]
-; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm11 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero,xmm5[2],zero,zero,zero,xmm5[3],zero,zero,zero
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,3,0,1]
-; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm6
-; AVX1-NEXT: vpmaxud %xmm0, %xmm6, %xmm7
-; AVX1-NEXT: vpcmpeqd %xmm7, %xmm6, %xmm7
-; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
-; AVX1-NEXT: vpxor %xmm3, %xmm7, %xmm7
-; AVX1-NEXT: vpmaxud %xmm11, %xmm2, %xmm4
-; AVX1-NEXT: vpcmpeqd %xmm4, %xmm2, %xmm4
-; AVX1-NEXT: vpxor %xmm3, %xmm4, %xmm4
-; AVX1-NEXT: vpackssdw %xmm7, %xmm4, %xmm10
-; AVX1-NEXT: vpmaxud %xmm9, %xmm1, %xmm7
-; AVX1-NEXT: vpcmpeqd %xmm7, %xmm1, %xmm7
-; AVX1-NEXT: vpxor %xmm3, %xmm7, %xmm7
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm5 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero,xmm5[2],zero,zero,zero,xmm5[3],zero,zero,zero
+; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm0[3,3,0,1]
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm6 = xmm6[0],zero,zero,zero,xmm6[1],zero,zero,zero,xmm6[2],zero,zero,zero,xmm6[3],zero,zero,zero
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm7
+; AVX1-NEXT: vpmaxud %xmm6, %xmm7, %xmm6
+; AVX1-NEXT: vpcmpeqd %xmm6, %xmm7, %xmm6
+; AVX1-NEXT: vpcmpeqd %xmm7, %xmm7, %xmm7
+; AVX1-NEXT: vpxor %xmm7, %xmm6, %xmm6
+; AVX1-NEXT: vpmaxud %xmm5, %xmm2, %xmm5
+; AVX1-NEXT: vpcmpeqd %xmm5, %xmm2, %xmm5
+; AVX1-NEXT: vpxor %xmm7, %xmm5, %xmm5
+; AVX1-NEXT: vpackssdw %xmm6, %xmm5, %xmm5
+; AVX1-NEXT: vpmaxud %xmm4, %xmm1, %xmm4
+; AVX1-NEXT: vpcmpeqd %xmm4, %xmm1, %xmm4
+; AVX1-NEXT: vpxor %xmm7, %xmm4, %xmm4
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm6
+; AVX1-NEXT: vpmaxud %xmm3, %xmm6, %xmm3
+; AVX1-NEXT: vpcmpeqd %xmm3, %xmm6, %xmm3
+; AVX1-NEXT: vpxor %xmm7, %xmm3, %xmm3
+; AVX1-NEXT: vpackssdw %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpacksswb %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vmovaps {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255]
+; AVX1-NEXT: vandps %ymm4, %ymm2, %ymm2
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm5
+; AVX1-NEXT: vpackusdw %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vandps %ymm4, %ymm1, %ymm1
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
-; AVX1-NEXT: vpmaxud %xmm8, %xmm4, %xmm5
-; AVX1-NEXT: vpcmpeqd %xmm5, %xmm4, %xmm5
-; AVX1-NEXT: vpxor %xmm3, %xmm5, %xmm3
-; AVX1-NEXT: vpackssdw %xmm3, %xmm7, %xmm3
-; AVX1-NEXT: vpacksswb %xmm10, %xmm3, %xmm3
-; AVX1-NEXT: vpsubd %xmm8, %xmm4, %xmm4
-; AVX1-NEXT: vpsubd %xmm9, %xmm1, %xmm1
-; AVX1-NEXT: vpsubd %xmm11, %xmm2, %xmm2
-; AVX1-NEXT: vpsubd %xmm0, %xmm6, %xmm0
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [255,255,255,255]
-; AVX1-NEXT: vpand %xmm5, %xmm0, %xmm0
-; AVX1-NEXT: vpand %xmm5, %xmm2, %xmm2
-; AVX1-NEXT: vpackusdw %xmm0, %xmm2, %xmm0
-; AVX1-NEXT: vpand %xmm5, %xmm1, %xmm1
-; AVX1-NEXT: vpand %xmm5, %xmm4, %xmm2
-; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpackuswb %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vpackusdw %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpsubb %xmm0, %xmm1, %xmm0
; AVX1-NEXT: vpandn %xmm0, %xmm3, %xmm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
@@ -790,40 +774,39 @@ define <16 x i8> @test14(<16 x i8> %x, <16 x i32> %y) nounwind {
; AVX2: # %bb.0: # %vector.ph
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,3,0,1]
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero,xmm3[4],zero,zero,zero,xmm3[5],zero,zero,zero,xmm3[6],zero,zero,zero,xmm3[7],zero,zero,zero
-; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
-; AVX2-NEXT: vpmaxud %ymm0, %ymm1, %ymm4
+; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm4 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
+; AVX2-NEXT: vpmaxud %ymm4, %ymm1, %ymm4
; AVX2-NEXT: vpcmpeqd %ymm4, %ymm1, %ymm4
; AVX2-NEXT: vpcmpeqd %ymm5, %ymm5, %ymm5
; AVX2-NEXT: vpxor %ymm5, %ymm4, %ymm4
; AVX2-NEXT: vextracti128 $1, %ymm4, %xmm6
; AVX2-NEXT: vpackssdw %xmm6, %xmm4, %xmm4
-; AVX2-NEXT: vpmaxud %ymm3, %ymm2, %ymm6
-; AVX2-NEXT: vpcmpeqd %ymm6, %ymm2, %ymm6
-; AVX2-NEXT: vpxor %ymm5, %ymm6, %ymm5
-; AVX2-NEXT: vextracti128 $1, %ymm5, %xmm6
-; AVX2-NEXT: vpackssdw %xmm6, %xmm5, %xmm5
-; AVX2-NEXT: vpacksswb %xmm5, %xmm4, %xmm4
-; AVX2-NEXT: vpsubd %ymm3, %ymm2, %ymm2
-; AVX2-NEXT: vpsubd %ymm0, %ymm1, %ymm0
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
-; AVX2-NEXT: vpshufb %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
-; AVX2-NEXT: vpand %xmm3, %xmm0, %xmm0
-; AVX2-NEXT: vpshufb %ymm1, %ymm2, %ymm1
+; AVX2-NEXT: vpmaxud %ymm3, %ymm2, %ymm3
+; AVX2-NEXT: vpcmpeqd %ymm3, %ymm2, %ymm3
+; AVX2-NEXT: vpxor %ymm5, %ymm3, %ymm3
+; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm5
+; AVX2-NEXT: vpackssdw %xmm5, %xmm3, %xmm3
+; AVX2-NEXT: vpacksswb %xmm3, %xmm4, %xmm3
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-NEXT: vpshufb %ymm4, %ymm2, %ymm2
+; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm5 = [255,255,255,255,255,255,255,255]
+; AVX2-NEXT: vpand %xmm5, %xmm2, %xmm2
+; AVX2-NEXT: vpshufb %ymm4, %ymm1, %ymm1
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
-; AVX2-NEXT: vpand %xmm3, %xmm1, %xmm1
-; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpandn %xmm0, %xmm4, %xmm0
+; AVX2-NEXT: vpand %xmm5, %xmm1, %xmm1
+; AVX2-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
+; AVX2-NEXT: vpsubb %xmm0, %xmm1, %xmm0
+; AVX2-NEXT: vpandn %xmm0, %xmm3, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: test14:
; AVX512: # %bb.0: # %vector.ph
-; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
-; AVX512-NEXT: vpcmpnltud %zmm0, %zmm1, %k1
-; AVX512-NEXT: vpsubd %zmm0, %zmm1, %zmm0
-; AVX512-NEXT: vpmovdb %zmm0, %xmm0 {%k1} {z}
+; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm2 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512-NEXT: vpcmpnltud %zmm2, %zmm1, %k1
+; AVX512-NEXT: vpmovdb %zmm1, %xmm1
+; AVX512-NEXT: vpsubb %xmm0, %xmm1, %xmm0 {%k1} {z}
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
vector.ph:
@@ -835,123 +818,127 @@ vector.ph:
ret <16 x i8> %res
}
+; FIXME: match this to UMIN+TRUNC+PSUBUS
define <8 x i16> @test15(<8 x i16> %x, <8 x i32> %y) nounwind {
; SSE2-LABEL: test15:
; SSE2: # %bb.0: # %vector.ph
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: pxor %xmm4, %xmm4
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
-; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648,2147483648,2147483648]
-; SSE2-NEXT: movdqa %xmm3, %xmm5
-; SSE2-NEXT: psubd %xmm2, %xmm3
-; SSE2-NEXT: pxor %xmm4, %xmm2
-; SSE2-NEXT: por %xmm4, %xmm5
-; SSE2-NEXT: pcmpgtd %xmm2, %xmm5
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: pxor %xmm4, %xmm2
-; SSE2-NEXT: por %xmm0, %xmm4
-; SSE2-NEXT: pcmpgtd %xmm2, %xmm4
+; SSE2-NEXT: pxor %xmm3, %xmm3
+; SSE2-NEXT: movdqa %xmm0, %xmm4
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; SSE2-NEXT: movdqa %xmm0, %xmm5
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm3[4],xmm5[5],xmm3[5],xmm5[6],xmm3[6],xmm5[7],xmm3[7]
+; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [2147483648,2147483648,2147483648,2147483648]
+; SSE2-NEXT: movdqa %xmm2, %xmm6
+; SSE2-NEXT: pxor %xmm3, %xmm6
+; SSE2-NEXT: por %xmm3, %xmm5
+; SSE2-NEXT: pcmpgtd %xmm6, %xmm5
+; SSE2-NEXT: movdqa %xmm1, %xmm6
+; SSE2-NEXT: pxor %xmm3, %xmm6
+; SSE2-NEXT: por %xmm3, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm6, %xmm4
; SSE2-NEXT: packssdw %xmm5, %xmm4
-; SSE2-NEXT: psubd %xmm1, %xmm0
-; SSE2-NEXT: pslld $16, %xmm3
-; SSE2-NEXT: psrad $16, %xmm3
-; SSE2-NEXT: pslld $16, %xmm0
-; SSE2-NEXT: psrad $16, %xmm0
-; SSE2-NEXT: packssdw %xmm3, %xmm0
+; SSE2-NEXT: pslld $16, %xmm2
+; SSE2-NEXT: psrad $16, %xmm2
+; SSE2-NEXT: pslld $16, %xmm1
+; SSE2-NEXT: psrad $16, %xmm1
+; SSE2-NEXT: packssdw %xmm2, %xmm1
+; SSE2-NEXT: psubw %xmm1, %xmm0
; SSE2-NEXT: pand %xmm4, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test15:
; SSSE3: # %bb.0: # %vector.ph
-; SSSE3-NEXT: pxor %xmm4, %xmm4
-; SSSE3-NEXT: movdqa %xmm0, %xmm3
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
-; SSSE3-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
-; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648,2147483648,2147483648]
+; SSSE3-NEXT: pxor %xmm3, %xmm3
+; SSSE3-NEXT: movdqa %xmm0, %xmm4
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
; SSSE3-NEXT: movdqa %xmm0, %xmm5
-; SSSE3-NEXT: psubd %xmm2, %xmm0
-; SSSE3-NEXT: pxor %xmm4, %xmm2
-; SSSE3-NEXT: por %xmm4, %xmm5
-; SSSE3-NEXT: pcmpgtd %xmm2, %xmm5
-; SSSE3-NEXT: movdqa %xmm1, %xmm2
-; SSSE3-NEXT: pxor %xmm4, %xmm2
+; SSSE3-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm3[4],xmm5[5],xmm3[5],xmm5[6],xmm3[6],xmm5[7],xmm3[7]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [2147483648,2147483648,2147483648,2147483648]
+; SSSE3-NEXT: movdqa %xmm2, %xmm6
+; SSSE3-NEXT: pxor %xmm3, %xmm6
+; SSSE3-NEXT: por %xmm3, %xmm5
+; SSSE3-NEXT: pcmpgtd %xmm6, %xmm5
+; SSSE3-NEXT: movdqa %xmm1, %xmm6
+; SSSE3-NEXT: pxor %xmm3, %xmm6
; SSSE3-NEXT: por %xmm3, %xmm4
-; SSSE3-NEXT: pcmpgtd %xmm2, %xmm4
+; SSSE3-NEXT: pcmpgtd %xmm6, %xmm4
; SSSE3-NEXT: packssdw %xmm5, %xmm4
-; SSSE3-NEXT: psubd %xmm1, %xmm3
-; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
-; SSSE3-NEXT: pshufb %xmm1, %xmm0
-; SSSE3-NEXT: pshufb %xmm1, %xmm3
-; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0]
-; SSSE3-NEXT: pand %xmm4, %xmm3
-; SSSE3-NEXT: movdqa %xmm3, %xmm0
+; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; SSSE3-NEXT: pshufb %xmm3, %xmm2
+; SSSE3-NEXT: pshufb %xmm3, %xmm1
+; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; SSSE3-NEXT: psubw %xmm1, %xmm0
+; SSSE3-NEXT: pand %xmm4, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: test15:
; SSE41: # %bb.0: # %vector.ph
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1]
; SSE41-NEXT: pmovzxwd {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; SSE41-NEXT: movdqa %xmm0, %xmm4
-; SSE41-NEXT: pminud %xmm1, %xmm4
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm4
-; SSE41-NEXT: pcmpeqd %xmm5, %xmm5
-; SSE41-NEXT: pxor %xmm5, %xmm4
+; SSE41-NEXT: pmovzxwd {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; SSE41-NEXT: movdqa %xmm4, %xmm5
+; SSE41-NEXT: pminud %xmm1, %xmm5
+; SSE41-NEXT: pcmpeqd %xmm4, %xmm5
+; SSE41-NEXT: pcmpeqd %xmm4, %xmm4
+; SSE41-NEXT: pxor %xmm4, %xmm5
; SSE41-NEXT: movdqa %xmm3, %xmm6
; SSE41-NEXT: pminud %xmm2, %xmm6
; SSE41-NEXT: pcmpeqd %xmm3, %xmm6
-; SSE41-NEXT: pxor %xmm5, %xmm6
-; SSE41-NEXT: packssdw %xmm6, %xmm4
-; SSE41-NEXT: psubd %xmm2, %xmm3
-; SSE41-NEXT: psubd %xmm1, %xmm0
-; SSE41-NEXT: pxor %xmm1, %xmm1
-; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
-; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0],xmm1[1],xmm3[2],xmm1[3],xmm3[4],xmm1[5],xmm3[6],xmm1[7]
-; SSE41-NEXT: packusdw %xmm3, %xmm0
-; SSE41-NEXT: pand %xmm4, %xmm0
+; SSE41-NEXT: pxor %xmm4, %xmm6
+; SSE41-NEXT: packssdw %xmm6, %xmm5
+; SSE41-NEXT: pxor %xmm3, %xmm3
+; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2],xmm3[3],xmm2[4],xmm3[5],xmm2[6],xmm3[7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm3[1],xmm1[2],xmm3[3],xmm1[4],xmm3[5],xmm1[6],xmm3[7]
+; SSE41-NEXT: packusdw %xmm2, %xmm1
+; SSE41-NEXT: psubw %xmm1, %xmm0
+; SSE41-NEXT: pand %xmm5, %xmm0
; SSE41-NEXT: retq
;
; AVX1-LABEL: test15:
; AVX1: # %bb.0: # %vector.ph
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
-; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT: vpminud %xmm3, %xmm2, %xmm4
-; AVX1-NEXT: vpcmpeqd %xmm4, %xmm2, %xmm4
-; AVX1-NEXT: vpminud %xmm1, %xmm0, %xmm5
-; AVX1-NEXT: vpcmpeqd %xmm5, %xmm0, %xmm5
-; AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpandn %xmm0, %xmm5, %xmm0
-; AVX1-NEXT: vpsubd %xmm3, %xmm2, %xmm1
-; AVX1-NEXT: vpandn %xmm1, %xmm4, %xmm1
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
-; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
+; AVX1-NEXT: vpminud %xmm4, %xmm2, %xmm5
+; AVX1-NEXT: vpcmpeqd %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqd %xmm5, %xmm5, %xmm5
+; AVX1-NEXT: vpxor %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vpminud %xmm1, %xmm3, %xmm6
+; AVX1-NEXT: vpcmpeqd %xmm6, %xmm3, %xmm3
+; AVX1-NEXT: vpxor %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vpackssdw %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; AVX1-NEXT: vpshufb %xmm3, %xmm4, %xmm4
+; AVX1-NEXT: vpshufb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm4[0]
+; AVX1-NEXT: vpsubw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm0, %xmm2, %xmm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: test15:
; AVX2: # %bb.0: # %vector.ph
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX2-NEXT: vpminud %ymm1, %ymm0, %ymm2
-; AVX2-NEXT: vpcmpeqd %ymm2, %ymm0, %ymm2
-; AVX2-NEXT: vpsubd %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpandn %ymm0, %ymm2, %ymm0
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX2-NEXT: vpminud %ymm1, %ymm2, %ymm3
+; AVX2-NEXT: vpcmpeqd %ymm3, %ymm2, %ymm2
+; AVX2-NEXT: vpcmpeqd %ymm3, %ymm3, %ymm3
+; AVX2-NEXT: vpxor %ymm3, %ymm2, %ymm2
+; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3
+; AVX2-NEXT: vpackssdw %xmm3, %xmm2, %xmm2
+; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-NEXT: vpsubw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpand %xmm0, %xmm2, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: test15:
; AVX512: # %bb.0: # %vector.ph
-; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX512-NEXT: vpcmpnleud %ymm1, %ymm0, %k1
-; AVX512-NEXT: vpsubd %ymm1, %ymm0, %ymm0
-; AVX512-NEXT: vpmovdw %ymm0, %xmm0 {%k1} {z}
+; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX512-NEXT: vpcmpnleud %ymm1, %ymm2, %k1
+; AVX512-NEXT: vpmovdw %ymm1, %xmm1
+; AVX512-NEXT: vpsubw %xmm1, %xmm0, %xmm0 {%k1} {z}
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
vector.ph:
@@ -963,80 +950,77 @@ vector.ph:
ret <8 x i16> %res
}
+; FIXME: match this to UMIN+TRUNC+PSUBUS
define <8 x i16> @test16(<8 x i16> %x, <8 x i32> %y) nounwind {
; SSE2-LABEL: test16:
; SSE2: # %bb.0: # %vector.ph
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: pxor %xmm4, %xmm4
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
-; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648,2147483648,2147483648]
-; SSE2-NEXT: movdqa %xmm3, %xmm5
-; SSE2-NEXT: psubd %xmm2, %xmm3
-; SSE2-NEXT: pxor %xmm4, %xmm2
-; SSE2-NEXT: por %xmm4, %xmm5
-; SSE2-NEXT: pcmpgtd %xmm2, %xmm5
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: pxor %xmm4, %xmm2
-; SSE2-NEXT: por %xmm0, %xmm4
-; SSE2-NEXT: pcmpgtd %xmm2, %xmm4
+; SSE2-NEXT: pxor %xmm3, %xmm3
+; SSE2-NEXT: movdqa %xmm0, %xmm4
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; SSE2-NEXT: movdqa %xmm0, %xmm5
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm3[4],xmm5[5],xmm3[5],xmm5[6],xmm3[6],xmm5[7],xmm3[7]
+; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [2147483648,2147483648,2147483648,2147483648]
+; SSE2-NEXT: movdqa %xmm2, %xmm6
+; SSE2-NEXT: pxor %xmm3, %xmm6
+; SSE2-NEXT: por %xmm3, %xmm5
+; SSE2-NEXT: pcmpgtd %xmm6, %xmm5
+; SSE2-NEXT: movdqa %xmm1, %xmm6
+; SSE2-NEXT: pxor %xmm3, %xmm6
+; SSE2-NEXT: por %xmm3, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm6, %xmm4
; SSE2-NEXT: packssdw %xmm5, %xmm4
-; SSE2-NEXT: psubd %xmm1, %xmm0
-; SSE2-NEXT: pslld $16, %xmm3
-; SSE2-NEXT: psrad $16, %xmm3
-; SSE2-NEXT: pslld $16, %xmm0
-; SSE2-NEXT: psrad $16, %xmm0
-; SSE2-NEXT: packssdw %xmm3, %xmm0
+; SSE2-NEXT: pslld $16, %xmm2
+; SSE2-NEXT: psrad $16, %xmm2
+; SSE2-NEXT: pslld $16, %xmm1
+; SSE2-NEXT: psrad $16, %xmm1
+; SSE2-NEXT: packssdw %xmm2, %xmm1
+; SSE2-NEXT: psubw %xmm1, %xmm0
; SSE2-NEXT: pand %xmm4, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: test16:
; SSSE3: # %bb.0: # %vector.ph
-; SSSE3-NEXT: pxor %xmm4, %xmm4
-; SSSE3-NEXT: movdqa %xmm0, %xmm3
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
-; SSSE3-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
-; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648,2147483648,2147483648]
+; SSSE3-NEXT: pxor %xmm3, %xmm3
+; SSSE3-NEXT: movdqa %xmm0, %xmm4
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
; SSSE3-NEXT: movdqa %xmm0, %xmm5
-; SSSE3-NEXT: psubd %xmm2, %xmm0
-; SSSE3-NEXT: pxor %xmm4, %xmm2
-; SSSE3-NEXT: por %xmm4, %xmm5
-; SSSE3-NEXT: pcmpgtd %xmm2, %xmm5
-; SSSE3-NEXT: movdqa %xmm1, %xmm2
-; SSSE3-NEXT: pxor %xmm4, %xmm2
+; SSSE3-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm3[4],xmm5[5],xmm3[5],xmm5[6],xmm3[6],xmm5[7],xmm3[7]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [2147483648,2147483648,2147483648,2147483648]
+; SSSE3-NEXT: movdqa %xmm2, %xmm6
+; SSSE3-NEXT: pxor %xmm3, %xmm6
+; SSSE3-NEXT: por %xmm3, %xmm5
+; SSSE3-NEXT: pcmpgtd %xmm6, %xmm5
+; SSSE3-NEXT: movdqa %xmm1, %xmm6
+; SSSE3-NEXT: pxor %xmm3, %xmm6
; SSSE3-NEXT: por %xmm3, %xmm4
-; SSSE3-NEXT: pcmpgtd %xmm2, %xmm4
+; SSSE3-NEXT: pcmpgtd %xmm6, %xmm4
; SSSE3-NEXT: packssdw %xmm5, %xmm4
-; SSSE3-NEXT: psubd %xmm1, %xmm3
-; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
-; SSSE3-NEXT: pshufb %xmm1, %xmm0
-; SSSE3-NEXT: pshufb %xmm1, %xmm3
-; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0]
-; SSSE3-NEXT: pand %xmm4, %xmm3
-; SSSE3-NEXT: movdqa %xmm3, %xmm0
+; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; SSSE3-NEXT: pshufb %xmm3, %xmm2
+; SSSE3-NEXT: pshufb %xmm3, %xmm1
+; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; SSSE3-NEXT: psubw %xmm1, %xmm0
+; SSSE3-NEXT: pand %xmm4, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: test16:
; SSE41: # %bb.0: # %vector.ph
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1]
; SSE41-NEXT: pmovzxwd {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; SSE41-NEXT: movdqa %xmm0, %xmm4
-; SSE41-NEXT: pminud %xmm1, %xmm4
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm4
+; SSE41-NEXT: pmovzxwd {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; SSE41-NEXT: pmaxud %xmm1, %xmm4
+; SSE41-NEXT: pcmpeqd %xmm1, %xmm4
; SSE41-NEXT: pcmpeqd %xmm5, %xmm5
; SSE41-NEXT: pxor %xmm5, %xmm4
-; SSE41-NEXT: movdqa %xmm3, %xmm6
-; SSE41-NEXT: pminud %xmm2, %xmm6
-; SSE41-NEXT: pcmpeqd %xmm3, %xmm6
-; SSE41-NEXT: pxor %xmm5, %xmm6
-; SSE41-NEXT: packssdw %xmm6, %xmm4
-; SSE41-NEXT: psubd %xmm2, %xmm3
-; SSE41-NEXT: psubd %xmm1, %xmm0
-; SSE41-NEXT: pxor %xmm1, %xmm1
-; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
-; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0],xmm1[1],xmm3[2],xmm1[3],xmm3[4],xmm1[5],xmm3[6],xmm1[7]
-; SSE41-NEXT: packusdw %xmm3, %xmm0
+; SSE41-NEXT: pmaxud %xmm2, %xmm3
+; SSE41-NEXT: pcmpeqd %xmm2, %xmm3
+; SSE41-NEXT: pxor %xmm5, %xmm3
+; SSE41-NEXT: packssdw %xmm3, %xmm4
+; SSE41-NEXT: pxor %xmm3, %xmm3
+; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2],xmm3[3],xmm2[4],xmm3[5],xmm2[6],xmm3[7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm3[1],xmm1[2],xmm3[3],xmm1[4],xmm3[5],xmm1[6],xmm3[7]
+; SSE41-NEXT: packusdw %xmm2, %xmm1
+; SSE41-NEXT: psubw %xmm1, %xmm0
; SSE41-NEXT: pand %xmm4, %xmm0
; SSE41-NEXT: retq
;
@@ -1044,42 +1028,47 @@ define <8 x i16> @test16(<8 x i16> %x, <8 x i32> %y) nounwind {
; AVX1: # %bb.0: # %vector.ph
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
-; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT: vpminud %xmm3, %xmm2, %xmm4
-; AVX1-NEXT: vpcmpeqd %xmm4, %xmm2, %xmm4
-; AVX1-NEXT: vpminud %xmm1, %xmm0, %xmm5
-; AVX1-NEXT: vpcmpeqd %xmm5, %xmm0, %xmm5
-; AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpandn %xmm0, %xmm5, %xmm0
-; AVX1-NEXT: vpsubd %xmm3, %xmm2, %xmm1
-; AVX1-NEXT: vpandn %xmm1, %xmm4, %xmm1
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
-; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
+; AVX1-NEXT: vpmaxud %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vpcmpeqd %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vpcmpeqd %xmm5, %xmm5, %xmm5
+; AVX1-NEXT: vpxor %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vpmaxud %xmm3, %xmm1, %xmm3
+; AVX1-NEXT: vpcmpeqd %xmm3, %xmm1, %xmm3
+; AVX1-NEXT: vpxor %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vpackssdw %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; AVX1-NEXT: vpshufb %xmm3, %xmm4, %xmm4
+; AVX1-NEXT: vpshufb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm4[0]
+; AVX1-NEXT: vpsubw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm0, %xmm2, %xmm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: test16:
; AVX2: # %bb.0: # %vector.ph
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX2-NEXT: vpminud %ymm1, %ymm0, %ymm2
-; AVX2-NEXT: vpcmpeqd %ymm2, %ymm0, %ymm2
-; AVX2-NEXT: vpsubd %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpandn %ymm0, %ymm2, %ymm0
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX2-NEXT: vpmaxud %ymm2, %ymm1, %ymm2
+; AVX2-NEXT: vpcmpeqd %ymm2, %ymm1, %ymm2
+; AVX2-NEXT: vpcmpeqd %ymm3, %ymm3, %ymm3
+; AVX2-NEXT: vpxor %ymm3, %ymm2, %ymm2
+; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3
+; AVX2-NEXT: vpackssdw %xmm3, %xmm2, %xmm2
+; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-NEXT: vpsubw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpand %xmm0, %xmm2, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: test16:
; AVX512: # %bb.0: # %vector.ph
-; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX512-NEXT: vpcmpnleud %ymm1, %ymm0, %k1
-; AVX512-NEXT: vpsubd %ymm1, %ymm0, %ymm0
-; AVX512-NEXT: vpmovdw %ymm0, %xmm0 {%k1} {z}
+; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX512-NEXT: vpcmpltud %ymm2, %ymm1, %k1
+; AVX512-NEXT: vpmovdw %ymm1, %xmm1
+; AVX512-NEXT: vpsubw %xmm1, %xmm0, %xmm0 {%k1} {z}
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
vector.ph:
@@ -2057,60 +2046,57 @@ vector.ph:
define <8 x i16> @psubus_i16_i32_min(<8 x i16> %x, <8 x i32> %y) nounwind {
; SSE2-LABEL: psubus_i16_i32_min:
; SSE2: # %bb.0: # %vector.ph
-; SSE2-NEXT: pxor %xmm4, %xmm4
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
-; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648,2147483648,2147483648]
-; SSE2-NEXT: movdqa %xmm2, %xmm5
-; SSE2-NEXT: pxor %xmm4, %xmm5
-; SSE2-NEXT: movdqa %xmm0, %xmm6
+; SSE2-NEXT: pxor %xmm3, %xmm3
+; SSE2-NEXT: movdqa %xmm0, %xmm4
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
+; SSE2-NEXT: movdqa %xmm0, %xmm5
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm3[0],xmm5[1],xmm3[1],xmm5[2],xmm3[2],xmm5[3],xmm3[3]
+; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [2147483648,2147483648,2147483648,2147483648]
+; SSE2-NEXT: movdqa %xmm1, %xmm3
+; SSE2-NEXT: pxor %xmm6, %xmm3
+; SSE2-NEXT: movdqa %xmm5, %xmm7
+; SSE2-NEXT: por %xmm6, %xmm7
+; SSE2-NEXT: pcmpgtd %xmm7, %xmm3
+; SSE2-NEXT: pand %xmm3, %xmm5
+; SSE2-NEXT: pandn %xmm1, %xmm3
+; SSE2-NEXT: por %xmm5, %xmm3
+; SSE2-NEXT: movdqa %xmm2, %xmm1
+; SSE2-NEXT: pxor %xmm6, %xmm1
; SSE2-NEXT: por %xmm4, %xmm6
-; SSE2-NEXT: pcmpgtd %xmm6, %xmm5
-; SSE2-NEXT: movdqa %xmm0, %xmm6
-; SSE2-NEXT: pand %xmm5, %xmm6
-; SSE2-NEXT: pandn %xmm2, %xmm5
-; SSE2-NEXT: por %xmm6, %xmm5
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: pxor %xmm4, %xmm2
-; SSE2-NEXT: por %xmm3, %xmm4
-; SSE2-NEXT: pcmpgtd %xmm4, %xmm2
-; SSE2-NEXT: movdqa %xmm3, %xmm4
-; SSE2-NEXT: pand %xmm2, %xmm4
-; SSE2-NEXT: pandn %xmm1, %xmm2
-; SSE2-NEXT: por %xmm4, %xmm2
-; SSE2-NEXT: psubd %xmm2, %xmm3
-; SSE2-NEXT: psubd %xmm5, %xmm0
-; SSE2-NEXT: pslld $16, %xmm0
-; SSE2-NEXT: psrad $16, %xmm0
+; SSE2-NEXT: pcmpgtd %xmm6, %xmm1
+; SSE2-NEXT: pand %xmm1, %xmm4
+; SSE2-NEXT: pandn %xmm2, %xmm1
+; SSE2-NEXT: por %xmm4, %xmm1
+; SSE2-NEXT: pslld $16, %xmm1
+; SSE2-NEXT: psrad $16, %xmm1
; SSE2-NEXT: pslld $16, %xmm3
; SSE2-NEXT: psrad $16, %xmm3
-; SSE2-NEXT: packssdw %xmm0, %xmm3
-; SSE2-NEXT: movdqa %xmm3, %xmm0
+; SSE2-NEXT: packssdw %xmm1, %xmm3
+; SSE2-NEXT: psubw %xmm3, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: psubus_i16_i32_min:
; SSSE3: # %bb.0: # %vector.ph
-; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
-; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648,2147483648,2147483648]
-; SSSE3-NEXT: movdqa %xmm2, %xmm5
-; SSSE3-NEXT: pxor %xmm4, %xmm5
-; SSSE3-NEXT: movdqa {{.*#+}} xmm6 = [2147549183,2147549183,2147549183,2147549183]
-; SSSE3-NEXT: movdqa %xmm6, %xmm7
-; SSSE3-NEXT: pcmpgtd %xmm5, %xmm7
-; SSSE3-NEXT: movdqa {{.*#+}} xmm5 = [65535,65535,65535,65535]
-; SSSE3-NEXT: pand %xmm7, %xmm2
-; SSSE3-NEXT: pandn %xmm5, %xmm7
-; SSSE3-NEXT: por %xmm2, %xmm7
-; SSSE3-NEXT: pshufb %xmm3, %xmm7
-; SSSE3-NEXT: pxor %xmm1, %xmm4
+; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [2147483648,2147483648,2147483648,2147483648]
+; SSSE3-NEXT: movdqa %xmm2, %xmm4
+; SSSE3-NEXT: pxor %xmm3, %xmm4
+; SSSE3-NEXT: movdqa {{.*#+}} xmm5 = [2147549183,2147549183,2147549183,2147549183]
+; SSSE3-NEXT: movdqa %xmm5, %xmm6
; SSSE3-NEXT: pcmpgtd %xmm4, %xmm6
-; SSSE3-NEXT: pand %xmm6, %xmm1
-; SSSE3-NEXT: pandn %xmm5, %xmm6
-; SSSE3-NEXT: por %xmm1, %xmm6
-; SSSE3-NEXT: pshufb %xmm3, %xmm6
-; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm6 = xmm6[0],xmm7[0]
-; SSSE3-NEXT: psubusw %xmm6, %xmm0
+; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [65535,65535,65535,65535]
+; SSSE3-NEXT: pand %xmm6, %xmm2
+; SSSE3-NEXT: pandn %xmm4, %xmm6
+; SSSE3-NEXT: por %xmm2, %xmm6
+; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; SSSE3-NEXT: pshufb %xmm2, %xmm6
+; SSSE3-NEXT: pxor %xmm1, %xmm3
+; SSSE3-NEXT: pcmpgtd %xmm3, %xmm5
+; SSSE3-NEXT: pand %xmm5, %xmm1
+; SSSE3-NEXT: pandn %xmm4, %xmm5
+; SSSE3-NEXT: por %xmm1, %xmm5
+; SSSE3-NEXT: pshufb %xmm2, %xmm5
+; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm6[0]
+; SSSE3-NEXT: psubusw %xmm5, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: psubus_i16_i32_min:
More information about the llvm-commits
mailing list