[llvm] 761bbed - [DAG] foldSubToUSubSat - fold sub(a,trunc(umin(zext(a),b))) -> usubsat(a,trunc(umin(b,SatLimit)))
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Sat Feb 20 04:02:19 PST 2021
Author: Simon Pilgrim
Date: 2021-02-20T12:02:07Z
New Revision: 761bbed264f7f524fbf0e57d0bb285b1d6e5816e
URL: https://github.com/llvm/llvm-project/commit/761bbed264f7f524fbf0e57d0bb285b1d6e5816e
DIFF: https://github.com/llvm/llvm-project/commit/761bbed264f7f524fbf0e57d0bb285b1d6e5816e.diff
LOG: [DAG] foldSubToUSubSat - fold sub(a,trunc(umin(zext(a),b))) -> usubsat(a,trunc(umin(b,SatLimit)))
This moves the last custom x86 USUBSAT fold to generic DAGCombine.
Completes PR40111
Differential Revision: https://reviews.llvm.org/D96703
Added:
Modified:
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
llvm/lib/Target/X86/X86ISelLowering.cpp
llvm/test/CodeGen/X86/psubus.ll
Removed:
################################################################################
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index be8fd03abdcb..f42cdde860ec 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -3189,6 +3189,19 @@ SDValue DAGCombiner::foldSubToUSubSat(EVT DstVT, SDNode *N) {
return getTruncatedUSUBSAT(DstVT, SubVT, Op0, MinLHS, DAG, SDLoc(N));
}
+ // sub(a,trunc(umin(zext(a),b))) -> usubsat(a,trunc(umin(b,SatLimit)))
+ if (Op1.getOpcode() == ISD::TRUNCATE &&
+ Op1.getOperand(0).getOpcode() == ISD::UMIN) {
+ SDValue MinLHS = Op1.getOperand(0).getOperand(0);
+ SDValue MinRHS = Op1.getOperand(0).getOperand(1);
+ if (MinLHS.getOpcode() == ISD::ZERO_EXTEND && MinLHS.getOperand(0) == Op0)
+ return getTruncatedUSUBSAT(DstVT, MinLHS.getValueType(), MinLHS, MinRHS,
+ DAG, SDLoc(N));
+ if (MinRHS.getOpcode() == ISD::ZERO_EXTEND && MinRHS.getOperand(0) == Op0)
+ return getTruncatedUSUBSAT(DstVT, MinLHS.getValueType(), MinRHS, MinLHS,
+ DAG, SDLoc(N));
+ }
+
return SDValue();
}
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 008fbc179f5d..ea329a77ef0d 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -49109,101 +49109,6 @@ static SDValue combineAdd(SDNode *N, SelectionDAG &DAG,
return combineAddOrSubToADCOrSBB(N, DAG);
}
-static SDValue combineSubToSubus(SDNode *N, SelectionDAG &DAG,
- const X86Subtarget &Subtarget) {
- SDValue Op0 = N->getOperand(0);
- SDValue Op1 = N->getOperand(1);
- EVT VT = N->getValueType(0);
-
- if (!VT.isVector())
- return SDValue();
-
- // PSUBUS is supported, starting from SSE2.
- EVT EltVT = VT.getVectorElementType();
- if (!(Subtarget.hasSSE2() &&
- (EltVT == MVT::i8 || EltVT == MVT::i16 || VT == MVT::v8i32 ||
- VT == MVT::v8i64 || VT == MVT::v16i32)))
- return SDValue();
-
- SDValue SubusLHS, SubusRHS;
- if (Op1.getOpcode() == ISD::TRUNCATE &&
- Op1.getOperand(0).getOpcode() == ISD::UMIN &&
- (EltVT == MVT::i8 || EltVT == MVT::i16)) {
- // Special case where the UMIN has been truncated. Try to push the truncate
- // further up. This is similar to the i32/i64 special processing.
- SubusLHS = Op0;
- SDValue MinLHS = Op1.getOperand(0).getOperand(0);
- SDValue MinRHS = Op1.getOperand(0).getOperand(1);
- EVT TruncVT = Op1.getOperand(0).getValueType();
- if (!(Subtarget.hasSSE2() &&
- (TruncVT == MVT::v8i32 || TruncVT == MVT::v8i64 ||
- TruncVT == MVT::v16i32)))
- return SDValue();
- SDValue OpToSaturate;
- if (MinLHS.getOpcode() == ISD::ZERO_EXTEND &&
- MinLHS.getOperand(0) == Op0)
- OpToSaturate = MinRHS;
- else if (MinRHS.getOpcode() == ISD::ZERO_EXTEND &&
- MinRHS.getOperand(0) == Op0)
- OpToSaturate = MinLHS;
- else
- return SDValue();
-
- // Saturate the non-extended input and then truncate it.
- SDLoc DL(N);
- SDValue SaturationConst =
- DAG.getConstant(APInt::getLowBitsSet(TruncVT.getScalarSizeInBits(),
- VT.getScalarSizeInBits()),
- DL, TruncVT);
- SDValue UMin = DAG.getNode(ISD::UMIN, DL, TruncVT, OpToSaturate,
- SaturationConst);
- SubusRHS = DAG.getNode(ISD::TRUNCATE, DL, VT, UMin);
- } else
- return SDValue();
-
- // PSUBUS doesn't support v8i32/v8i64/v16i32, but it can be enabled with
- // special preprocessing in some cases.
- if (EltVT == MVT::i8 || EltVT == MVT::i16)
- return DAG.getNode(ISD::USUBSAT, SDLoc(N), VT, SubusLHS, SubusRHS);
-
- assert((VT == MVT::v8i32 || VT == MVT::v16i32 || VT == MVT::v8i64) &&
- "Unexpected VT!");
-
- // Special preprocessing case can be only applied
- // if the value was zero extended from 16 bit,
- // so we require first 16 bits to be zeros for 32 bit
- // values, or first 48 bits for 64 bit values.
- KnownBits Known = DAG.computeKnownBits(SubusLHS);
- unsigned NumZeros = Known.countMinLeadingZeros();
- if (NumZeros < (VT.getScalarSizeInBits() - 16))
- return SDValue();
-
- EVT ExtType = SubusLHS.getValueType();
- EVT ShrinkedType;
- if (VT == MVT::v8i32 || VT == MVT::v8i64)
- ShrinkedType = MVT::v8i16;
- else
- ShrinkedType = NumZeros >= 24 ? MVT::v16i8 : MVT::v16i16;
-
- // If SubusLHS is zeroextended - truncate SubusRHS to it's
- // size SubusRHS = umin(0xFFF.., SubusRHS).
- SDValue SaturationConst =
- DAG.getConstant(APInt::getLowBitsSet(ExtType.getScalarSizeInBits(),
- ShrinkedType.getScalarSizeInBits()),
- SDLoc(SubusLHS), ExtType);
- SDValue UMin = DAG.getNode(ISD::UMIN, SDLoc(SubusLHS), ExtType, SubusRHS,
- SaturationConst);
- SDValue NewSubusLHS =
- DAG.getZExtOrTrunc(SubusLHS, SDLoc(SubusLHS), ShrinkedType);
- SDValue NewSubusRHS = DAG.getZExtOrTrunc(UMin, SDLoc(SubusRHS), ShrinkedType);
- SDValue Psubus = DAG.getNode(ISD::USUBSAT, SDLoc(N), ShrinkedType,
- NewSubusLHS, NewSubusRHS);
-
- // Zero extend the result, it may be used somewhere as 32 bit,
- // if not zext and following trunc will shrink.
- return DAG.getZExtOrTrunc(Psubus, SDLoc(N), ExtType);
-}
-
static SDValue combineSub(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
@@ -49232,10 +49137,6 @@ static SDValue combineSub(SDNode *N, SelectionDAG &DAG,
if (SDValue V = combineAddOrSubToHADDorHSUB(N, DAG, Subtarget))
return V;
- // Try to create PSUBUS if SUB's argument is max/min
- if (SDValue V = combineSubToSubus(N, DAG, Subtarget))
- return V;
-
return combineAddOrSubToADCOrSBB(N, DAG);
}
diff --git a/llvm/test/CodeGen/X86/psubus.ll b/llvm/test/CodeGen/X86/psubus.ll
index d2d8176838fc..7c23305d6724 100644
--- a/llvm/test/CodeGen/X86/psubus.ll
+++ b/llvm/test/CodeGen/X86/psubus.ll
@@ -2496,222 +2496,203 @@ define <8 x i16> @test32(<8 x i16> %a0, <8 x i32> %a1) {
define <8 x i32> @test33(<8 x i32> %a0, <8 x i64> %a1) {
; SSE2OR3-LABEL: test33:
; SSE2OR3: # %bb.0:
-; SSE2OR3-NEXT: pxor %xmm7, %xmm7
-; SSE2OR3-NEXT: movdqa %xmm1, %xmm8
-; SSE2OR3-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm7[0],xmm8[1],xmm7[1]
-; SSE2OR3-NEXT: movdqa %xmm1, %xmm9
-; SSE2OR3-NEXT: punpckhdq {{.*#+}} xmm9 = xmm9[2],xmm7[2],xmm9[3],xmm7[3]
-; SSE2OR3-NEXT: movdqa %xmm0, %xmm10
-; SSE2OR3-NEXT: punpckldq {{.*#+}} xmm10 = xmm10[0],xmm7[0],xmm10[1],xmm7[1]
-; SSE2OR3-NEXT: movdqa %xmm0, %xmm11
-; SSE2OR3-NEXT: punpckhdq {{.*#+}} xmm11 = xmm11[2],xmm7[2],xmm11[3],xmm7[3]
-; SSE2OR3-NEXT: movdqa {{.*#+}} xmm12 = [9223372039002259456,9223372039002259456]
+; SSE2OR3-NEXT: movdqa {{.*#+}} xmm9 = [9223372039002259456,9223372039002259456]
; SSE2OR3-NEXT: movdqa %xmm3, %xmm6
-; SSE2OR3-NEXT: pxor %xmm12, %xmm6
-; SSE2OR3-NEXT: movdqa %xmm11, %xmm7
-; SSE2OR3-NEXT: pxor %xmm12, %xmm7
-; SSE2OR3-NEXT: movdqa %xmm7, %xmm13
-; SSE2OR3-NEXT: pcmpgtd %xmm6, %xmm13
-; SSE2OR3-NEXT: pshufd {{.*#+}} xmm14 = xmm13[0,0,2,2]
-; SSE2OR3-NEXT: pcmpeqd %xmm6, %xmm7
-; SSE2OR3-NEXT: pshufd {{.*#+}} xmm6 = xmm7[1,1,3,3]
-; SSE2OR3-NEXT: pand %xmm14, %xmm6
-; SSE2OR3-NEXT: pshufd {{.*#+}} xmm7 = xmm13[1,1,3,3]
+; SSE2OR3-NEXT: pxor %xmm9, %xmm6
+; SSE2OR3-NEXT: movdqa {{.*#+}} xmm10 = [9223372039002259455,9223372039002259455]
+; SSE2OR3-NEXT: movdqa %xmm10, %xmm7
+; SSE2OR3-NEXT: pcmpgtd %xmm6, %xmm7
+; SSE2OR3-NEXT: pshufd {{.*#+}} xmm8 = xmm7[0,0,2,2]
+; SSE2OR3-NEXT: pcmpeqd %xmm10, %xmm6
+; SSE2OR3-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
+; SSE2OR3-NEXT: pand %xmm8, %xmm6
+; SSE2OR3-NEXT: pshufd {{.*#+}} xmm7 = xmm7[1,1,3,3]
; SSE2OR3-NEXT: por %xmm6, %xmm7
+; SSE2OR3-NEXT: movdqa {{.*#+}} xmm8 = [4294967295,4294967295]
; SSE2OR3-NEXT: pand %xmm7, %xmm3
-; SSE2OR3-NEXT: pandn %xmm11, %xmm7
+; SSE2OR3-NEXT: pandn %xmm8, %xmm7
; SSE2OR3-NEXT: por %xmm3, %xmm7
; SSE2OR3-NEXT: movdqa %xmm2, %xmm3
-; SSE2OR3-NEXT: pxor %xmm12, %xmm3
+; SSE2OR3-NEXT: pxor %xmm9, %xmm3
; SSE2OR3-NEXT: movdqa %xmm10, %xmm6
-; SSE2OR3-NEXT: pxor %xmm12, %xmm6
-; SSE2OR3-NEXT: movdqa %xmm6, %xmm11
-; SSE2OR3-NEXT: pcmpgtd %xmm3, %xmm11
-; SSE2OR3-NEXT: pshufd {{.*#+}} xmm13 = xmm11[0,0,2,2]
-; SSE2OR3-NEXT: pcmpeqd %xmm3, %xmm6
+; SSE2OR3-NEXT: pcmpgtd %xmm3, %xmm6
+; SSE2OR3-NEXT: pshufd {{.*#+}} xmm11 = xmm6[0,0,2,2]
+; SSE2OR3-NEXT: pcmpeqd %xmm10, %xmm3
+; SSE2OR3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; SSE2OR3-NEXT: pand %xmm11, %xmm3
; SSE2OR3-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
-; SSE2OR3-NEXT: pand %xmm13, %xmm6
-; SSE2OR3-NEXT: pshufd {{.*#+}} xmm3 = xmm11[1,1,3,3]
-; SSE2OR3-NEXT: por %xmm6, %xmm3
-; SSE2OR3-NEXT: pand %xmm3, %xmm2
-; SSE2OR3-NEXT: pandn %xmm10, %xmm3
-; SSE2OR3-NEXT: por %xmm2, %xmm3
-; SSE2OR3-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2],xmm7[0,2]
-; SSE2OR3-NEXT: movdqa %xmm5, %xmm2
-; SSE2OR3-NEXT: pxor %xmm12, %xmm2
-; SSE2OR3-NEXT: movdqa %xmm9, %xmm6
-; SSE2OR3-NEXT: pxor %xmm12, %xmm6
-; SSE2OR3-NEXT: movdqa %xmm6, %xmm7
-; SSE2OR3-NEXT: pcmpgtd %xmm2, %xmm7
-; SSE2OR3-NEXT: pshufd {{.*#+}} xmm10 = xmm7[0,0,2,2]
-; SSE2OR3-NEXT: pcmpeqd %xmm2, %xmm6
-; SSE2OR3-NEXT: pshufd {{.*#+}} xmm2 = xmm6[1,1,3,3]
-; SSE2OR3-NEXT: pand %xmm10, %xmm2
-; SSE2OR3-NEXT: pshufd {{.*#+}} xmm6 = xmm7[1,1,3,3]
+; SSE2OR3-NEXT: por %xmm3, %xmm6
+; SSE2OR3-NEXT: pand %xmm6, %xmm2
+; SSE2OR3-NEXT: pandn %xmm8, %xmm6
; SSE2OR3-NEXT: por %xmm2, %xmm6
-; SSE2OR3-NEXT: pand %xmm6, %xmm5
-; SSE2OR3-NEXT: pandn %xmm9, %xmm6
-; SSE2OR3-NEXT: por %xmm5, %xmm6
+; SSE2OR3-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,2],xmm7[0,2]
+; SSE2OR3-NEXT: movdqa %xmm0, %xmm2
+; SSE2OR3-NEXT: psubd %xmm6, %xmm2
+; SSE2OR3-NEXT: pxor %xmm9, %xmm6
+; SSE2OR3-NEXT: pxor %xmm9, %xmm0
+; SSE2OR3-NEXT: pcmpgtd %xmm6, %xmm0
+; SSE2OR3-NEXT: pand %xmm2, %xmm0
+; SSE2OR3-NEXT: movdqa %xmm5, %xmm2
+; SSE2OR3-NEXT: pxor %xmm9, %xmm2
+; SSE2OR3-NEXT: movdqa %xmm10, %xmm3
+; SSE2OR3-NEXT: pcmpgtd %xmm2, %xmm3
+; SSE2OR3-NEXT: pshufd {{.*#+}} xmm6 = xmm3[0,0,2,2]
+; SSE2OR3-NEXT: pcmpeqd %xmm10, %xmm2
+; SSE2OR3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE2OR3-NEXT: pand %xmm6, %xmm2
+; SSE2OR3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; SSE2OR3-NEXT: por %xmm2, %xmm3
+; SSE2OR3-NEXT: pand %xmm3, %xmm5
+; SSE2OR3-NEXT: pandn %xmm8, %xmm3
+; SSE2OR3-NEXT: por %xmm5, %xmm3
; SSE2OR3-NEXT: movdqa %xmm4, %xmm2
-; SSE2OR3-NEXT: pxor %xmm12, %xmm2
-; SSE2OR3-NEXT: pxor %xmm8, %xmm12
-; SSE2OR3-NEXT: movdqa %xmm12, %xmm5
+; SSE2OR3-NEXT: pxor %xmm9, %xmm2
+; SSE2OR3-NEXT: movdqa %xmm10, %xmm5
; SSE2OR3-NEXT: pcmpgtd %xmm2, %xmm5
-; SSE2OR3-NEXT: pshufd {{.*#+}} xmm7 = xmm5[0,0,2,2]
-; SSE2OR3-NEXT: pcmpeqd %xmm2, %xmm12
-; SSE2OR3-NEXT: pshufd {{.*#+}} xmm2 = xmm12[1,1,3,3]
-; SSE2OR3-NEXT: pand %xmm7, %xmm2
+; SSE2OR3-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
+; SSE2OR3-NEXT: pcmpeqd %xmm10, %xmm2
+; SSE2OR3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE2OR3-NEXT: pand %xmm6, %xmm2
; SSE2OR3-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
; SSE2OR3-NEXT: por %xmm2, %xmm5
; SSE2OR3-NEXT: pand %xmm5, %xmm4
; SSE2OR3-NEXT: pandn %xmm8, %xmm5
; SSE2OR3-NEXT: por %xmm4, %xmm5
-; SSE2OR3-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,2],xmm6[0,2]
-; SSE2OR3-NEXT: psubd %xmm3, %xmm0
-; SSE2OR3-NEXT: psubd %xmm5, %xmm1
+; SSE2OR3-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,2],xmm3[0,2]
+; SSE2OR3-NEXT: movdqa %xmm1, %xmm2
+; SSE2OR3-NEXT: psubd %xmm5, %xmm2
+; SSE2OR3-NEXT: pxor %xmm9, %xmm5
+; SSE2OR3-NEXT: pxor %xmm9, %xmm1
+; SSE2OR3-NEXT: pcmpgtd %xmm5, %xmm1
+; SSE2OR3-NEXT: pand %xmm2, %xmm1
; SSE2OR3-NEXT: retq
;
; SSE41-LABEL: test33:
; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm8
-; SSE41-NEXT: pxor %xmm0, %xmm0
-; SSE41-NEXT: movdqa %xmm1, %xmm10
-; SSE41-NEXT: punpckhdq {{.*#+}} xmm10 = xmm10[2],xmm0[2],xmm10[3],xmm0[3]
-; SSE41-NEXT: movdqa %xmm8, %xmm12
-; SSE41-NEXT: punpckhdq {{.*#+}} xmm12 = xmm12[2],xmm0[2],xmm12[3],xmm0[3]
-; SSE41-NEXT: pmovzxdq {{.*#+}} xmm9 = xmm1[0],zero,xmm1[1],zero
-; SSE41-NEXT: pmovzxdq {{.*#+}} xmm11 = xmm8[0],zero,xmm8[1],zero
-; SSE41-NEXT: movdqa {{.*#+}} xmm14 = [9223372039002259456,9223372039002259456]
-; SSE41-NEXT: movdqa %xmm3, %xmm13
-; SSE41-NEXT: pxor %xmm14, %xmm13
-; SSE41-NEXT: movdqa %xmm12, %xmm6
-; SSE41-NEXT: pxor %xmm14, %xmm6
-; SSE41-NEXT: movdqa %xmm6, %xmm0
-; SSE41-NEXT: pcmpeqd %xmm13, %xmm0
-; SSE41-NEXT: pcmpgtd %xmm13, %xmm6
-; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
-; SSE41-NEXT: pand %xmm7, %xmm0
-; SSE41-NEXT: por %xmm6, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm12
+; SSE41-NEXT: movdqa {{.*#+}} xmm9 = [9223372039002259456,9223372039002259456]
+; SSE41-NEXT: movdqa %xmm3, %xmm0
+; SSE41-NEXT: pxor %xmm9, %xmm0
+; SSE41-NEXT: movdqa {{.*#+}} xmm11 = [9223372039002259455,9223372039002259455]
+; SSE41-NEXT: movdqa %xmm11, %xmm10
+; SSE41-NEXT: pcmpeqd %xmm0, %xmm10
+; SSE41-NEXT: movdqa %xmm11, %xmm7
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm7
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm7[0,0,2,2]
+; SSE41-NEXT: pand %xmm10, %xmm0
+; SSE41-NEXT: por %xmm7, %xmm0
+; SSE41-NEXT: movapd {{.*#+}} xmm7 = [4294967295,4294967295]
+; SSE41-NEXT: movapd %xmm7, %xmm10
+; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm10
; SSE41-NEXT: movdqa %xmm2, %xmm0
-; SSE41-NEXT: pxor %xmm14, %xmm0
+; SSE41-NEXT: pxor %xmm9, %xmm0
; SSE41-NEXT: movdqa %xmm11, %xmm3
-; SSE41-NEXT: pxor %xmm14, %xmm3
-; SSE41-NEXT: movdqa %xmm3, %xmm6
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm6
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm3
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,0,2,2]
-; SSE41-NEXT: pand %xmm6, %xmm0
-; SSE41-NEXT: por %xmm3, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm11
-; SSE41-NEXT: shufps {{.*#+}} xmm11 = xmm11[0,2],xmm12[0,2]
-; SSE41-NEXT: movdqa %xmm5, %xmm0
-; SSE41-NEXT: pxor %xmm14, %xmm0
-; SSE41-NEXT: movdqa %xmm10, %xmm2
-; SSE41-NEXT: pxor %xmm14, %xmm2
-; SSE41-NEXT: movdqa %xmm2, %xmm3
; SSE41-NEXT: pcmpeqd %xmm0, %xmm3
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,0,2,2]
+; SSE41-NEXT: movdqa %xmm11, %xmm6
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm6[0,0,2,2]
; SSE41-NEXT: pand %xmm3, %xmm0
-; SSE41-NEXT: por %xmm2, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm5, %xmm10
-; SSE41-NEXT: movdqa %xmm4, %xmm0
-; SSE41-NEXT: pxor %xmm14, %xmm0
-; SSE41-NEXT: pxor %xmm9, %xmm14
-; SSE41-NEXT: movdqa %xmm14, %xmm2
+; SSE41-NEXT: por %xmm6, %xmm0
+; SSE41-NEXT: movapd %xmm7, %xmm3
+; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm3
+; SSE41-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2],xmm10[0,2]
+; SSE41-NEXT: pmaxud %xmm3, %xmm8
+; SSE41-NEXT: psubd %xmm3, %xmm8
+; SSE41-NEXT: movdqa %xmm5, %xmm0
+; SSE41-NEXT: pxor %xmm9, %xmm0
+; SSE41-NEXT: movdqa %xmm11, %xmm2
; SSE41-NEXT: pcmpeqd %xmm0, %xmm2
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm14
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm14[0,0,2,2]
+; SSE41-NEXT: movdqa %xmm11, %xmm3
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm3
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,0,2,2]
; SSE41-NEXT: pand %xmm2, %xmm0
-; SSE41-NEXT: por %xmm14, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm4, %xmm9
-; SSE41-NEXT: shufps {{.*#+}} xmm9 = xmm9[0,2],xmm10[0,2]
-; SSE41-NEXT: psubd %xmm11, %xmm8
-; SSE41-NEXT: psubd %xmm9, %xmm1
+; SSE41-NEXT: por %xmm3, %xmm0
+; SSE41-NEXT: movapd %xmm7, %xmm2
+; SSE41-NEXT: blendvpd %xmm0, %xmm5, %xmm2
+; SSE41-NEXT: pxor %xmm4, %xmm9
+; SSE41-NEXT: movdqa %xmm11, %xmm3
+; SSE41-NEXT: pcmpeqd %xmm9, %xmm3
+; SSE41-NEXT: pcmpgtd %xmm9, %xmm11
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm11[0,0,2,2]
+; SSE41-NEXT: pand %xmm3, %xmm0
+; SSE41-NEXT: por %xmm11, %xmm0
+; SSE41-NEXT: blendvpd %xmm0, %xmm4, %xmm7
+; SSE41-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,2],xmm2[0,2]
+; SSE41-NEXT: pmaxud %xmm7, %xmm1
+; SSE41-NEXT: psubd %xmm7, %xmm1
; SSE41-NEXT: movdqa %xmm8, %xmm0
; SSE41-NEXT: retq
;
; AVX1-LABEL: test33:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm9 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
-; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm8 = xmm5[2],xmm3[2],xmm5[3],xmm3[3]
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm12 = xmm0[0],zero,xmm0[1],zero
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm10 = xmm5[0],zero,xmm5[1],zero
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm4
-; AVX1-NEXT: vpor %xmm3, %xmm10, %xmm7
-; AVX1-NEXT: vpcmpgtq %xmm4, %xmm7, %xmm11
-; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm7
-; AVX1-NEXT: vpxor %xmm3, %xmm7, %xmm4
-; AVX1-NEXT: vpor %xmm3, %xmm8, %xmm6
-; AVX1-NEXT: vpcmpgtq %xmm4, %xmm6, %xmm13
-; AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm6
-; AVX1-NEXT: vpor %xmm3, %xmm12, %xmm4
-; AVX1-NEXT: vpcmpgtq %xmm6, %xmm4, %xmm14
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm6
-; AVX1-NEXT: vpxor %xmm3, %xmm6, %xmm4
-; AVX1-NEXT: vpor %xmm3, %xmm9, %xmm3
-; AVX1-NEXT: vpcmpgtq %xmm4, %xmm3, %xmm3
-; AVX1-NEXT: vblendvpd %xmm3, %xmm6, %xmm9, %xmm3
-; AVX1-NEXT: vblendvpd %xmm14, %xmm1, %xmm12, %xmm1
-; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm3[0,2]
-; AVX1-NEXT: vblendvpd %xmm13, %xmm7, %xmm8, %xmm3
-; AVX1-NEXT: vblendvpd %xmm11, %xmm2, %xmm10, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT: vpxor %xmm4, %xmm3, %xmm5
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [9223372041149743103,9223372041149743103]
+; AVX1-NEXT: vpcmpgtq %xmm5, %xmm6, %xmm5
+; AVX1-NEXT: vmovapd {{.*#+}} xmm7 = [4294967295,4294967295]
+; AVX1-NEXT: vblendvpd %xmm5, %xmm3, %xmm7, %xmm3
+; AVX1-NEXT: vpxor %xmm4, %xmm2, %xmm5
+; AVX1-NEXT: vpcmpgtq %xmm5, %xmm6, %xmm5
+; AVX1-NEXT: vblendvpd %xmm5, %xmm2, %xmm7, %xmm2
; AVX1-NEXT: vshufps {{.*#+}} xmm2 = xmm2[0,2],xmm3[0,2]
-; AVX1-NEXT: vpsubd %xmm2, %xmm5, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpmaxud %xmm2, %xmm3, %xmm3
+; AVX1-NEXT: vpsubd %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpxor %xmm4, %xmm3, %xmm5
+; AVX1-NEXT: vpcmpgtq %xmm5, %xmm6, %xmm5
+; AVX1-NEXT: vblendvpd %xmm5, %xmm3, %xmm7, %xmm3
+; AVX1-NEXT: vpxor %xmm4, %xmm1, %xmm4
+; AVX1-NEXT: vpcmpgtq %xmm4, %xmm6, %xmm4
+; AVX1-NEXT: vblendvpd %xmm4, %xmm1, %xmm7, %xmm1
+; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm3[0,2]
+; AVX1-NEXT: vpmaxud %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-SLOW-LABEL: test33:
; AVX2-SLOW: # %bb.0:
-; AVX2-SLOW-NEXT: vpmovzxdq {{.*#+}} ymm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; AVX2-SLOW-NEXT: vextracti128 $1, %ymm0, %xmm4
-; AVX2-SLOW-NEXT: vpmovzxdq {{.*#+}} ymm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero
-; AVX2-SLOW-NEXT: vpbroadcastq {{.*#+}} ymm5 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
-; AVX2-SLOW-NEXT: vpxor %ymm5, %ymm2, %ymm6
-; AVX2-SLOW-NEXT: vpor %ymm5, %ymm4, %ymm7
-; AVX2-SLOW-NEXT: vpcmpgtq %ymm6, %ymm7, %ymm6
-; AVX2-SLOW-NEXT: vblendvpd %ymm6, %ymm2, %ymm4, %ymm2
-; AVX2-SLOW-NEXT: vpxor %ymm5, %ymm1, %ymm4
-; AVX2-SLOW-NEXT: vpor %ymm5, %ymm3, %ymm5
+; AVX2-SLOW-NEXT: vpbroadcastq {{.*#+}} ymm3 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
+; AVX2-SLOW-NEXT: vpxor %ymm3, %ymm2, %ymm4
+; AVX2-SLOW-NEXT: vpbroadcastq {{.*#+}} ymm5 = [9223372041149743103,9223372041149743103,9223372041149743103,9223372041149743103]
; AVX2-SLOW-NEXT: vpcmpgtq %ymm4, %ymm5, %ymm4
-; AVX2-SLOW-NEXT: vblendvpd %ymm4, %ymm1, %ymm3, %ymm1
+; AVX2-SLOW-NEXT: vbroadcastsd {{.*#+}} ymm6 = [4294967295,4294967295,4294967295,4294967295]
+; AVX2-SLOW-NEXT: vblendvpd %ymm4, %ymm2, %ymm6, %ymm2
+; AVX2-SLOW-NEXT: vpxor %ymm3, %ymm1, %ymm3
+; AVX2-SLOW-NEXT: vpcmpgtq %ymm3, %ymm5, %ymm3
+; AVX2-SLOW-NEXT: vblendvpd %ymm3, %ymm1, %ymm6, %ymm1
; AVX2-SLOW-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm1[2,3],ymm2[2,3]
; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,2],ymm3[0,2],ymm1[4,6],ymm3[4,6]
+; AVX2-SLOW-NEXT: vpmaxud %ymm1, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vpsubd %ymm1, %ymm0, %ymm0
; AVX2-SLOW-NEXT: retq
;
; AVX2-FAST-LABEL: test33:
; AVX2-FAST: # %bb.0:
-; AVX2-FAST-NEXT: vpmovzxdq {{.*#+}} ymm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; AVX2-FAST-NEXT: vextracti128 $1, %ymm0, %xmm4
-; AVX2-FAST-NEXT: vpmovzxdq {{.*#+}} ymm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero
-; AVX2-FAST-NEXT: vpbroadcastq {{.*#+}} ymm5 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
-; AVX2-FAST-NEXT: vpxor %ymm5, %ymm2, %ymm6
-; AVX2-FAST-NEXT: vpor %ymm5, %ymm4, %ymm7
-; AVX2-FAST-NEXT: vpcmpgtq %ymm6, %ymm7, %ymm6
-; AVX2-FAST-NEXT: vblendvpd %ymm6, %ymm2, %ymm4, %ymm2
-; AVX2-FAST-NEXT: vpxor %ymm5, %ymm1, %ymm4
-; AVX2-FAST-NEXT: vpor %ymm5, %ymm3, %ymm5
+; AVX2-FAST-NEXT: vpbroadcastq {{.*#+}} ymm3 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
+; AVX2-FAST-NEXT: vpxor %ymm3, %ymm1, %ymm4
+; AVX2-FAST-NEXT: vpbroadcastq {{.*#+}} ymm5 = [9223372041149743103,9223372041149743103,9223372041149743103,9223372041149743103]
; AVX2-FAST-NEXT: vpcmpgtq %ymm4, %ymm5, %ymm4
-; AVX2-FAST-NEXT: vblendvpd %ymm4, %ymm1, %ymm3, %ymm1
-; AVX2-FAST-NEXT: vmovapd {{.*#+}} ymm3 = [0,2,4,6,4,6,6,7]
-; AVX2-FAST-NEXT: vpermps %ymm1, %ymm3, %ymm1
-; AVX2-FAST-NEXT: vpermps %ymm2, %ymm3, %ymm2
+; AVX2-FAST-NEXT: vbroadcastsd {{.*#+}} ymm6 = [4294967295,4294967295,4294967295,4294967295]
+; AVX2-FAST-NEXT: vblendvpd %ymm4, %ymm1, %ymm6, %ymm1
+; AVX2-FAST-NEXT: vmovapd {{.*#+}} ymm4 = [0,2,4,6,4,6,6,7]
+; AVX2-FAST-NEXT: vpermps %ymm1, %ymm4, %ymm1
+; AVX2-FAST-NEXT: vpxor %ymm3, %ymm2, %ymm3
+; AVX2-FAST-NEXT: vpcmpgtq %ymm3, %ymm5, %ymm3
+; AVX2-FAST-NEXT: vblendvpd %ymm3, %ymm2, %ymm6, %ymm2
+; AVX2-FAST-NEXT: vpermps %ymm2, %ymm4, %ymm2
; AVX2-FAST-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX2-FAST-NEXT: vpmaxud %ymm1, %ymm0, %ymm0
; AVX2-FAST-NEXT: vpsubd %ymm1, %ymm0, %ymm0
; AVX2-FAST-NEXT: retq
;
; AVX512-LABEL: test33:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpmovzxdq {{.*#+}} zmm2 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
-; AVX512-NEXT: vpminuq %zmm2, %zmm1, %zmm1
-; AVX512-NEXT: vpmovqd %zmm1, %ymm1
+; AVX512-NEXT: vpmovusqd %zmm1, %ymm1
+; AVX512-NEXT: vpmaxud %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpsubd %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%zext = zext <8 x i32> %a0 to <8 x i64>
@@ -2727,209 +2708,214 @@ define <8 x i32> @test34(<8 x i32> %a0, <8 x i64> %a1) {
; SSE2OR3-LABEL: test34:
; SSE2OR3: # %bb.0:
; SSE2OR3-NEXT: movdqa {{.*#+}} xmm6 = [1,1,1,1]
-; SSE2OR3-NEXT: pand %xmm6, %xmm0
; SSE2OR3-NEXT: pand %xmm6, %xmm1
-; SSE2OR3-NEXT: pxor %xmm7, %xmm7
-; SSE2OR3-NEXT: movdqa %xmm1, %xmm8
-; SSE2OR3-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm7[0],xmm8[1],xmm7[1]
-; SSE2OR3-NEXT: movdqa %xmm1, %xmm9
-; SSE2OR3-NEXT: punpckhdq {{.*#+}} xmm9 = xmm9[2],xmm7[2],xmm9[3],xmm7[3]
-; SSE2OR3-NEXT: movdqa %xmm0, %xmm10
-; SSE2OR3-NEXT: punpckldq {{.*#+}} xmm10 = xmm10[0],xmm7[0],xmm10[1],xmm7[1]
-; SSE2OR3-NEXT: movdqa %xmm0, %xmm12
-; SSE2OR3-NEXT: punpckhdq {{.*#+}} xmm12 = xmm12[2],xmm7[2],xmm12[3],xmm7[3]
-; SSE2OR3-NEXT: movdqa {{.*#+}} xmm11 = [9223372039002259456,9223372039002259456]
-; SSE2OR3-NEXT: movdqa %xmm3, %xmm7
-; SSE2OR3-NEXT: pxor %xmm11, %xmm7
-; SSE2OR3-NEXT: movdqa %xmm12, %xmm6
-; SSE2OR3-NEXT: por %xmm11, %xmm6
-; SSE2OR3-NEXT: movdqa %xmm7, %xmm13
-; SSE2OR3-NEXT: pcmpgtd %xmm6, %xmm13
-; SSE2OR3-NEXT: pshufd {{.*#+}} xmm14 = xmm13[0,0,2,2]
-; SSE2OR3-NEXT: pcmpeqd %xmm7, %xmm6
+; SSE2OR3-NEXT: pand %xmm6, %xmm0
+; SSE2OR3-NEXT: movdqa {{.*#+}} xmm9 = [9223372039002259456,9223372039002259456]
+; SSE2OR3-NEXT: movdqa %xmm3, %xmm6
+; SSE2OR3-NEXT: pxor %xmm9, %xmm6
+; SSE2OR3-NEXT: movdqa {{.*#+}} xmm10 = [9223372039002259455,9223372039002259455]
+; SSE2OR3-NEXT: movdqa %xmm10, %xmm7
+; SSE2OR3-NEXT: pcmpgtd %xmm6, %xmm7
+; SSE2OR3-NEXT: pshufd {{.*#+}} xmm8 = xmm7[0,0,2,2]
+; SSE2OR3-NEXT: pcmpeqd %xmm10, %xmm6
; SSE2OR3-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
-; SSE2OR3-NEXT: pand %xmm14, %xmm6
-; SSE2OR3-NEXT: pshufd {{.*#+}} xmm7 = xmm13[1,1,3,3]
+; SSE2OR3-NEXT: pand %xmm8, %xmm6
+; SSE2OR3-NEXT: pshufd {{.*#+}} xmm7 = xmm7[1,1,3,3]
; SSE2OR3-NEXT: por %xmm6, %xmm7
-; SSE2OR3-NEXT: pand %xmm7, %xmm12
-; SSE2OR3-NEXT: pandn %xmm3, %xmm7
-; SSE2OR3-NEXT: por %xmm12, %xmm7
+; SSE2OR3-NEXT: movdqa {{.*#+}} xmm8 = [4294967295,4294967295]
+; SSE2OR3-NEXT: pand %xmm7, %xmm3
+; SSE2OR3-NEXT: pandn %xmm8, %xmm7
+; SSE2OR3-NEXT: por %xmm3, %xmm7
; SSE2OR3-NEXT: movdqa %xmm2, %xmm3
-; SSE2OR3-NEXT: pxor %xmm11, %xmm3
+; SSE2OR3-NEXT: pxor %xmm9, %xmm3
; SSE2OR3-NEXT: movdqa %xmm10, %xmm6
-; SSE2OR3-NEXT: por %xmm11, %xmm6
-; SSE2OR3-NEXT: movdqa %xmm3, %xmm12
-; SSE2OR3-NEXT: pcmpgtd %xmm6, %xmm12
-; SSE2OR3-NEXT: pshufd {{.*#+}} xmm13 = xmm12[0,0,2,2]
-; SSE2OR3-NEXT: pcmpeqd %xmm3, %xmm6
+; SSE2OR3-NEXT: pcmpgtd %xmm3, %xmm6
+; SSE2OR3-NEXT: pshufd {{.*#+}} xmm11 = xmm6[0,0,2,2]
+; SSE2OR3-NEXT: pcmpeqd %xmm10, %xmm3
+; SSE2OR3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; SSE2OR3-NEXT: pand %xmm11, %xmm3
; SSE2OR3-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
-; SSE2OR3-NEXT: pand %xmm13, %xmm6
-; SSE2OR3-NEXT: pshufd {{.*#+}} xmm3 = xmm12[1,1,3,3]
-; SSE2OR3-NEXT: por %xmm6, %xmm3
-; SSE2OR3-NEXT: pand %xmm3, %xmm10
-; SSE2OR3-NEXT: pandn %xmm2, %xmm3
-; SSE2OR3-NEXT: por %xmm10, %xmm3
-; SSE2OR3-NEXT: packuswb %xmm7, %xmm3
-; SSE2OR3-NEXT: movdqa %xmm5, %xmm2
-; SSE2OR3-NEXT: pxor %xmm11, %xmm2
-; SSE2OR3-NEXT: movdqa %xmm9, %xmm6
-; SSE2OR3-NEXT: por %xmm11, %xmm6
-; SSE2OR3-NEXT: movdqa %xmm2, %xmm7
-; SSE2OR3-NEXT: pcmpgtd %xmm6, %xmm7
-; SSE2OR3-NEXT: pshufd {{.*#+}} xmm10 = xmm7[0,0,2,2]
-; SSE2OR3-NEXT: pcmpeqd %xmm2, %xmm6
-; SSE2OR3-NEXT: pshufd {{.*#+}} xmm2 = xmm6[1,1,3,3]
-; SSE2OR3-NEXT: pand %xmm10, %xmm2
-; SSE2OR3-NEXT: pshufd {{.*#+}} xmm6 = xmm7[1,1,3,3]
+; SSE2OR3-NEXT: por %xmm3, %xmm6
+; SSE2OR3-NEXT: pand %xmm6, %xmm2
+; SSE2OR3-NEXT: pandn %xmm8, %xmm6
; SSE2OR3-NEXT: por %xmm2, %xmm6
-; SSE2OR3-NEXT: pand %xmm6, %xmm9
-; SSE2OR3-NEXT: pandn %xmm5, %xmm6
-; SSE2OR3-NEXT: por %xmm9, %xmm6
+; SSE2OR3-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,2],xmm7[0,2]
+; SSE2OR3-NEXT: movdqa %xmm0, %xmm2
+; SSE2OR3-NEXT: psubd %xmm6, %xmm2
+; SSE2OR3-NEXT: pxor %xmm9, %xmm6
+; SSE2OR3-NEXT: por %xmm9, %xmm0
+; SSE2OR3-NEXT: pcmpgtd %xmm6, %xmm0
+; SSE2OR3-NEXT: pand %xmm2, %xmm0
+; SSE2OR3-NEXT: movdqa %xmm5, %xmm2
+; SSE2OR3-NEXT: pxor %xmm9, %xmm2
+; SSE2OR3-NEXT: movdqa %xmm10, %xmm3
+; SSE2OR3-NEXT: pcmpgtd %xmm2, %xmm3
+; SSE2OR3-NEXT: pshufd {{.*#+}} xmm6 = xmm3[0,0,2,2]
+; SSE2OR3-NEXT: pcmpeqd %xmm10, %xmm2
+; SSE2OR3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE2OR3-NEXT: pand %xmm6, %xmm2
+; SSE2OR3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; SSE2OR3-NEXT: por %xmm2, %xmm3
+; SSE2OR3-NEXT: pand %xmm3, %xmm5
+; SSE2OR3-NEXT: pandn %xmm8, %xmm3
+; SSE2OR3-NEXT: por %xmm5, %xmm3
; SSE2OR3-NEXT: movdqa %xmm4, %xmm2
-; SSE2OR3-NEXT: pxor %xmm11, %xmm2
-; SSE2OR3-NEXT: por %xmm8, %xmm11
-; SSE2OR3-NEXT: movdqa %xmm2, %xmm5
-; SSE2OR3-NEXT: pcmpgtd %xmm11, %xmm5
-; SSE2OR3-NEXT: pshufd {{.*#+}} xmm7 = xmm5[0,0,2,2]
-; SSE2OR3-NEXT: pcmpeqd %xmm2, %xmm11
-; SSE2OR3-NEXT: pshufd {{.*#+}} xmm2 = xmm11[1,1,3,3]
-; SSE2OR3-NEXT: pand %xmm7, %xmm2
+; SSE2OR3-NEXT: pxor %xmm9, %xmm2
+; SSE2OR3-NEXT: movdqa %xmm10, %xmm5
+; SSE2OR3-NEXT: pcmpgtd %xmm2, %xmm5
+; SSE2OR3-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
+; SSE2OR3-NEXT: pcmpeqd %xmm10, %xmm2
+; SSE2OR3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE2OR3-NEXT: pand %xmm6, %xmm2
; SSE2OR3-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
; SSE2OR3-NEXT: por %xmm2, %xmm5
-; SSE2OR3-NEXT: pand %xmm5, %xmm8
-; SSE2OR3-NEXT: pandn %xmm4, %xmm5
-; SSE2OR3-NEXT: por %xmm8, %xmm5
-; SSE2OR3-NEXT: packuswb %xmm6, %xmm5
-; SSE2OR3-NEXT: psubd %xmm3, %xmm0
-; SSE2OR3-NEXT: psubd %xmm5, %xmm1
+; SSE2OR3-NEXT: pand %xmm5, %xmm4
+; SSE2OR3-NEXT: pandn %xmm8, %xmm5
+; SSE2OR3-NEXT: por %xmm4, %xmm5
+; SSE2OR3-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,2],xmm3[0,2]
+; SSE2OR3-NEXT: movdqa %xmm1, %xmm2
+; SSE2OR3-NEXT: psubd %xmm5, %xmm2
+; SSE2OR3-NEXT: pxor %xmm9, %xmm5
+; SSE2OR3-NEXT: por %xmm9, %xmm1
+; SSE2OR3-NEXT: pcmpgtd %xmm5, %xmm1
+; SSE2OR3-NEXT: pand %xmm2, %xmm1
; SSE2OR3-NEXT: retq
;
; SSE41-LABEL: test34:
; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm0, %xmm11
+; SSE41-NEXT: movdqa %xmm0, %xmm8
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [1,1,1,1]
-; SSE41-NEXT: pand %xmm0, %xmm11
; SSE41-NEXT: pand %xmm0, %xmm1
-; SSE41-NEXT: pmovzxdq {{.*#+}} xmm8 = xmm1[0],zero,xmm1[1],zero
-; SSE41-NEXT: pxor %xmm0, %xmm0
-; SSE41-NEXT: movdqa %xmm1, %xmm9
-; SSE41-NEXT: punpckhdq {{.*#+}} xmm9 = xmm9[2],xmm0[2],xmm9[3],xmm0[3]
-; SSE41-NEXT: pmovzxdq {{.*#+}} xmm10 = xmm11[0],zero,xmm11[1],zero
-; SSE41-NEXT: movdqa %xmm11, %xmm12
-; SSE41-NEXT: punpckhdq {{.*#+}} xmm12 = xmm12[2],xmm0[2],xmm12[3],xmm0[3]
-; SSE41-NEXT: movdqa {{.*#+}} xmm7 = [9223372039002259456,9223372039002259456]
-; SSE41-NEXT: movdqa %xmm3, %xmm6
-; SSE41-NEXT: pxor %xmm7, %xmm6
-; SSE41-NEXT: movdqa %xmm12, %xmm0
-; SSE41-NEXT: por %xmm7, %xmm0
-; SSE41-NEXT: movdqa %xmm6, %xmm13
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm13
-; SSE41-NEXT: pcmpgtd %xmm0, %xmm6
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm6[0,0,2,2]
-; SSE41-NEXT: pand %xmm13, %xmm0
-; SSE41-NEXT: por %xmm6, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm12, %xmm3
-; SSE41-NEXT: movdqa %xmm2, %xmm6
-; SSE41-NEXT: pxor %xmm7, %xmm6
-; SSE41-NEXT: movdqa %xmm10, %xmm0
+; SSE41-NEXT: pand %xmm0, %xmm8
+; SSE41-NEXT: movdqa {{.*#+}} xmm9 = [9223372039002259456,9223372039002259456]
+; SSE41-NEXT: movdqa %xmm3, %xmm0
+; SSE41-NEXT: pxor %xmm9, %xmm0
+; SSE41-NEXT: movdqa {{.*#+}} xmm11 = [9223372039002259455,9223372039002259455]
+; SSE41-NEXT: movdqa %xmm11, %xmm10
+; SSE41-NEXT: pcmpeqd %xmm0, %xmm10
+; SSE41-NEXT: movdqa %xmm11, %xmm7
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm7
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm7[0,0,2,2]
+; SSE41-NEXT: pand %xmm10, %xmm0
; SSE41-NEXT: por %xmm7, %xmm0
-; SSE41-NEXT: movdqa %xmm6, %xmm12
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm12
+; SSE41-NEXT: movapd {{.*#+}} xmm7 = [4294967295,4294967295]
+; SSE41-NEXT: movapd %xmm7, %xmm10
+; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm10
+; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: pxor %xmm9, %xmm0
+; SSE41-NEXT: movdqa %xmm11, %xmm3
+; SSE41-NEXT: pcmpeqd %xmm0, %xmm3
+; SSE41-NEXT: movdqa %xmm11, %xmm6
; SSE41-NEXT: pcmpgtd %xmm0, %xmm6
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm6[0,0,2,2]
-; SSE41-NEXT: pand %xmm12, %xmm0
+; SSE41-NEXT: pand %xmm3, %xmm0
; SSE41-NEXT: por %xmm6, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm10, %xmm2
-; SSE41-NEXT: packusdw %xmm3, %xmm2
-; SSE41-NEXT: movdqa %xmm5, %xmm3
-; SSE41-NEXT: pxor %xmm7, %xmm3
-; SSE41-NEXT: movdqa %xmm9, %xmm0
-; SSE41-NEXT: por %xmm7, %xmm0
-; SSE41-NEXT: movdqa %xmm3, %xmm6
-; SSE41-NEXT: pcmpeqd %xmm0, %xmm6
+; SSE41-NEXT: movapd %xmm7, %xmm3
+; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm3
+; SSE41-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2],xmm10[0,2]
+; SSE41-NEXT: pmaxud %xmm3, %xmm8
+; SSE41-NEXT: psubd %xmm3, %xmm8
+; SSE41-NEXT: movdqa %xmm5, %xmm0
+; SSE41-NEXT: pxor %xmm9, %xmm0
+; SSE41-NEXT: movdqa %xmm11, %xmm2
+; SSE41-NEXT: pcmpeqd %xmm0, %xmm2
+; SSE41-NEXT: movdqa %xmm11, %xmm3
; SSE41-NEXT: pcmpgtd %xmm0, %xmm3
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,0,2,2]
-; SSE41-NEXT: pand %xmm6, %xmm0
-; SSE41-NEXT: por %xmm3, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm9, %xmm5
-; SSE41-NEXT: movdqa %xmm4, %xmm3
-; SSE41-NEXT: pxor %xmm7, %xmm3
-; SSE41-NEXT: por %xmm8, %xmm7
-; SSE41-NEXT: movdqa %xmm3, %xmm6
-; SSE41-NEXT: pcmpeqd %xmm7, %xmm6
-; SSE41-NEXT: pcmpgtd %xmm7, %xmm3
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,0,2,2]
-; SSE41-NEXT: pand %xmm6, %xmm0
+; SSE41-NEXT: pand %xmm2, %xmm0
; SSE41-NEXT: por %xmm3, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm8, %xmm4
-; SSE41-NEXT: packusdw %xmm5, %xmm4
-; SSE41-NEXT: psubd %xmm2, %xmm11
-; SSE41-NEXT: psubd %xmm4, %xmm1
-; SSE41-NEXT: movdqa %xmm11, %xmm0
+; SSE41-NEXT: movapd %xmm7, %xmm2
+; SSE41-NEXT: blendvpd %xmm0, %xmm5, %xmm2
+; SSE41-NEXT: pxor %xmm4, %xmm9
+; SSE41-NEXT: movdqa %xmm11, %xmm3
+; SSE41-NEXT: pcmpeqd %xmm9, %xmm3
+; SSE41-NEXT: pcmpgtd %xmm9, %xmm11
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm11[0,0,2,2]
+; SSE41-NEXT: pand %xmm3, %xmm0
+; SSE41-NEXT: por %xmm11, %xmm0
+; SSE41-NEXT: blendvpd %xmm0, %xmm4, %xmm7
+; SSE41-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,2],xmm2[0,2]
+; SSE41-NEXT: pmaxud %xmm7, %xmm1
+; SSE41-NEXT: psubd %xmm7, %xmm1
+; SSE41-NEXT: movdqa %xmm8, %xmm0
; SSE41-NEXT: retq
;
; AVX1-LABEL: test34:
; AVX1: # %bb.0:
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
-; AVX1-NEXT: vxorps %xmm3, %xmm3, %xmm3
-; AVX1-NEXT: vunpckhps {{.*#+}} xmm9 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
-; AVX1-NEXT: vunpckhps {{.*#+}} xmm8 = xmm5[2],xmm3[2],xmm5[3],xmm3[3]
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm12 = xmm0[0],zero,xmm0[1],zero
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm10 = xmm5[0],zero,xmm5[1],zero
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm4
-; AVX1-NEXT: vpor %xmm3, %xmm10, %xmm7
-; AVX1-NEXT: vpcmpgtq %xmm7, %xmm4, %xmm11
-; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm7
-; AVX1-NEXT: vpxor %xmm3, %xmm7, %xmm4
-; AVX1-NEXT: vorps %xmm3, %xmm8, %xmm6
-; AVX1-NEXT: vpcmpgtq %xmm6, %xmm4, %xmm13
-; AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm6
-; AVX1-NEXT: vpor %xmm3, %xmm12, %xmm4
-; AVX1-NEXT: vpcmpgtq %xmm4, %xmm6, %xmm14
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm6
-; AVX1-NEXT: vpxor %xmm3, %xmm6, %xmm4
-; AVX1-NEXT: vorps %xmm3, %xmm9, %xmm3
-; AVX1-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm3
-; AVX1-NEXT: vblendvpd %xmm3, %xmm9, %xmm6, %xmm3
-; AVX1-NEXT: vblendvpd %xmm14, %xmm12, %xmm1, %xmm1
-; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vblendvpd %xmm13, %xmm8, %xmm7, %xmm3
-; AVX1-NEXT: vblendvpd %xmm11, %xmm10, %xmm2, %xmm2
-; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpsubd %xmm2, %xmm5, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT: vpxor %xmm4, %xmm3, %xmm5
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [9223372041149743103,9223372041149743103]
+; AVX1-NEXT: vpcmpgtq %xmm5, %xmm6, %xmm5
+; AVX1-NEXT: vmovapd {{.*#+}} xmm7 = [4294967295,4294967295]
+; AVX1-NEXT: vblendvpd %xmm5, %xmm3, %xmm7, %xmm3
+; AVX1-NEXT: vpxor %xmm4, %xmm2, %xmm5
+; AVX1-NEXT: vpcmpgtq %xmm5, %xmm6, %xmm5
+; AVX1-NEXT: vblendvpd %xmm5, %xmm2, %xmm7, %xmm2
+; AVX1-NEXT: vshufps {{.*#+}} xmm2 = xmm2[0,2],xmm3[0,2]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpmaxud %xmm2, %xmm3, %xmm3
+; AVX1-NEXT: vpsubd %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpxor %xmm4, %xmm3, %xmm5
+; AVX1-NEXT: vpcmpgtq %xmm5, %xmm6, %xmm5
+; AVX1-NEXT: vblendvpd %xmm5, %xmm3, %xmm7, %xmm3
+; AVX1-NEXT: vpxor %xmm4, %xmm1, %xmm4
+; AVX1-NEXT: vpcmpgtq %xmm4, %xmm6, %xmm4
+; AVX1-NEXT: vblendvpd %xmm4, %xmm1, %xmm7, %xmm1
+; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm3[0,2]
+; AVX1-NEXT: vpmaxud %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
-; AVX2-LABEL: test34:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1]
-; AVX2-NEXT: vpand %ymm3, %ymm0, %ymm0
-; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm4
-; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero
-; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm5 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
-; AVX2-NEXT: vpxor %ymm5, %ymm2, %ymm6
-; AVX2-NEXT: vpor %ymm5, %ymm4, %ymm7
-; AVX2-NEXT: vpcmpgtq %ymm7, %ymm6, %ymm6
-; AVX2-NEXT: vblendvpd %ymm6, %ymm4, %ymm2, %ymm2
-; AVX2-NEXT: vpxor %ymm5, %ymm1, %ymm4
-; AVX2-NEXT: vpor %ymm5, %ymm3, %ymm5
-; AVX2-NEXT: vpcmpgtq %ymm5, %ymm4, %ymm4
-; AVX2-NEXT: vblendvpd %ymm4, %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vpackusdw %ymm2, %ymm1, %ymm1
-; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,1,3]
-; AVX2-NEXT: vpsubd %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: retq
+; AVX2-SLOW-LABEL: test34:
+; AVX2-SLOW: # %bb.0:
+; AVX2-SLOW-NEXT: vpbroadcastd {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1]
+; AVX2-SLOW-NEXT: vpand %ymm3, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vpbroadcastq {{.*#+}} ymm3 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
+; AVX2-SLOW-NEXT: vpxor %ymm3, %ymm2, %ymm4
+; AVX2-SLOW-NEXT: vpbroadcastq {{.*#+}} ymm5 = [9223372041149743103,9223372041149743103,9223372041149743103,9223372041149743103]
+; AVX2-SLOW-NEXT: vpcmpgtq %ymm4, %ymm5, %ymm4
+; AVX2-SLOW-NEXT: vbroadcastsd {{.*#+}} ymm6 = [4294967295,4294967295,4294967295,4294967295]
+; AVX2-SLOW-NEXT: vblendvpd %ymm4, %ymm2, %ymm6, %ymm2
+; AVX2-SLOW-NEXT: vpxor %ymm3, %ymm1, %ymm3
+; AVX2-SLOW-NEXT: vpcmpgtq %ymm3, %ymm5, %ymm3
+; AVX2-SLOW-NEXT: vblendvpd %ymm3, %ymm1, %ymm6, %ymm1
+; AVX2-SLOW-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm1[2,3],ymm2[2,3]
+; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,2],ymm3[0,2],ymm1[4,6],ymm3[4,6]
+; AVX2-SLOW-NEXT: vpmaxud %ymm1, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vpsubd %ymm1, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: test34:
+; AVX2-FAST: # %bb.0:
+; AVX2-FAST-NEXT: vpbroadcastd {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1]
+; AVX2-FAST-NEXT: vpand %ymm3, %ymm0, %ymm0
+; AVX2-FAST-NEXT: vpbroadcastq {{.*#+}} ymm3 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
+; AVX2-FAST-NEXT: vpxor %ymm3, %ymm1, %ymm4
+; AVX2-FAST-NEXT: vpbroadcastq {{.*#+}} ymm5 = [9223372041149743103,9223372041149743103,9223372041149743103,9223372041149743103]
+; AVX2-FAST-NEXT: vpcmpgtq %ymm4, %ymm5, %ymm4
+; AVX2-FAST-NEXT: vbroadcastsd {{.*#+}} ymm6 = [4294967295,4294967295,4294967295,4294967295]
+; AVX2-FAST-NEXT: vblendvpd %ymm4, %ymm1, %ymm6, %ymm1
+; AVX2-FAST-NEXT: vmovapd {{.*#+}} ymm4 = [0,2,4,6,4,6,6,7]
+; AVX2-FAST-NEXT: vpermps %ymm1, %ymm4, %ymm1
+; AVX2-FAST-NEXT: vpxor %ymm3, %ymm2, %ymm3
+; AVX2-FAST-NEXT: vpcmpgtq %ymm3, %ymm5, %ymm3
+; AVX2-FAST-NEXT: vblendvpd %ymm3, %ymm2, %ymm6, %ymm2
+; AVX2-FAST-NEXT: vpermps %ymm2, %ymm4, %ymm2
+; AVX2-FAST-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX2-FAST-NEXT: vpmaxud %ymm1, %ymm0, %ymm0
+; AVX2-FAST-NEXT: vpsubd %ymm1, %ymm0, %ymm0
+; AVX2-FAST-NEXT: retq
;
; AVX512-LABEL: test34:
; AVX512: # %bb.0:
; AVX512-NEXT: vpandd {{.*}}(%rip){1to8}, %ymm0, %ymm0
-; AVX512-NEXT: vpmovzxdq {{.*#+}} zmm2 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
-; AVX512-NEXT: vpminuq %zmm1, %zmm2, %zmm1
-; AVX512-NEXT: vpmovqd %zmm1, %ymm1
+; AVX512-NEXT: vpmovusqd %zmm1, %ymm1
+; AVX512-NEXT: vpmaxud %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpsubd %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%mask = and <8 x i32> %a0, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
More information about the llvm-commits
mailing list