[llvm] r352409 - [CodeGen][X86] Expand UADDSAT to NOT+UMIN+ADD
Nikita Popov via llvm-commits
llvm-commits at lists.llvm.org
Mon Jan 28 11:19:09 PST 2019
Author: nikic
Date: Mon Jan 28 11:19:09 2019
New Revision: 352409
URL: http://llvm.org/viewvc/llvm-project?rev=352409&view=rev
Log:
[CodeGen][X86] Expand UADDSAT to NOT+UMIN+ADD
Followup to D56636, this time handling the UADDSAT case by expanding
uadd.sat(a, b) to umin(a, ~b) + b.
Differential Revision: https://reviews.llvm.org/D56869
Modified:
llvm/trunk/lib/CodeGen/SelectionDAG/TargetLowering.cpp
llvm/trunk/lib/Target/X86/X86TargetTransformInfo.cpp
llvm/trunk/test/Analysis/CostModel/X86/arith-usat.ll
llvm/trunk/test/CodeGen/X86/uadd_sat.ll
llvm/trunk/test/CodeGen/X86/uadd_sat_vec.ll
Modified: llvm/trunk/lib/CodeGen/SelectionDAG/TargetLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/TargetLowering.cpp?rev=352409&r1=352408&r2=352409&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/TargetLowering.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/TargetLowering.cpp Mon Jan 28 11:19:09 2019
@@ -5287,6 +5287,12 @@ SDValue TargetLowering::expandAddSubSat(
return DAG.getNode(ISD::SUB, dl, VT, Max, RHS);
}
+ if (Opcode == ISD::UADDSAT && isOperationLegalOrCustom(ISD::UMIN, VT)) {
+ SDValue InvRHS = DAG.getNOT(dl, RHS, VT);
+ SDValue Min = DAG.getNode(ISD::UMIN, dl, VT, LHS, InvRHS);
+ return DAG.getNode(ISD::ADD, dl, VT, Min, RHS);
+ }
+
if (VT.isVector()) {
// TODO: Consider not scalarizing here.
return SDValue();
Modified: llvm/trunk/lib/Target/X86/X86TargetTransformInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86TargetTransformInfo.cpp?rev=352409&r1=352408&r2=352409&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86TargetTransformInfo.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86TargetTransformInfo.cpp Mon Jan 28 11:19:09 2019
@@ -1876,6 +1876,10 @@ int X86TTIImpl::getIntrinsicInstrCost(In
{ ISD::USUBSAT, MVT::v2i64, 2 }, // pmaxuq + psubq
{ ISD::USUBSAT, MVT::v4i64, 2 }, // pmaxuq + psubq
{ ISD::USUBSAT, MVT::v8i64, 2 }, // pmaxuq + psubq
+ { ISD::UADDSAT, MVT::v16i32, 3 }, // not + pminud + paddd
+ { ISD::UADDSAT, MVT::v2i64, 3 }, // not + pminuq + paddq
+ { ISD::UADDSAT, MVT::v4i64, 3 }, // not + pminuq + paddq
+ { ISD::UADDSAT, MVT::v8i64, 3 }, // not + pminuq + paddq
};
static const CostTblEntry XOPCostTbl[] = {
{ ISD::BITREVERSE, MVT::v4i64, 4 },
@@ -1917,6 +1921,7 @@ int X86TTIImpl::getIntrinsicInstrCost(In
{ ISD::SSUBSAT, MVT::v32i8, 1 },
{ ISD::UADDSAT, MVT::v16i16, 1 },
{ ISD::UADDSAT, MVT::v32i8, 1 },
+ { ISD::UADDSAT, MVT::v8i32, 3 }, // not + pminud + paddd
{ ISD::USUBSAT, MVT::v16i16, 1 },
{ ISD::USUBSAT, MVT::v32i8, 1 },
{ ISD::USUBSAT, MVT::v8i32, 2 }, // pmaxud + psubd
@@ -1953,6 +1958,7 @@ int X86TTIImpl::getIntrinsicInstrCost(In
{ ISD::SSUBSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert
{ ISD::UADDSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert
{ ISD::UADDSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert
+ { ISD::UADDSAT, MVT::v8i32, 8 }, // 2 x 128-bit Op + extract/insert
{ ISD::USUBSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert
{ ISD::USUBSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert
{ ISD::USUBSAT, MVT::v8i32, 6 }, // 2 x 128-bit Op + extract/insert
@@ -1977,6 +1983,7 @@ int X86TTIImpl::getIntrinsicInstrCost(In
};
static const CostTblEntry SSE42CostTbl[] = {
{ ISD::USUBSAT, MVT::v4i32, 2 }, // pmaxud + psubd
+ { ISD::UADDSAT, MVT::v4i32, 3 }, // not + pminud + paddd
{ ISD::FSQRT, MVT::f32, 18 }, // Nehalem from http://www.agner.org/
{ ISD::FSQRT, MVT::v4f32, 18 }, // Nehalem from http://www.agner.org/
};
Modified: llvm/trunk/test/Analysis/CostModel/X86/arith-usat.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/CostModel/X86/arith-usat.ll?rev=352409&r1=352408&r2=352409&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/CostModel/X86/arith-usat.ll (original)
+++ llvm/trunk/test/Analysis/CostModel/X86/arith-usat.ll Mon Jan 28 11:19:09 2019
@@ -80,8 +80,8 @@ define i32 @add(i32 %arg) {
; AVX1-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %V8I64 = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
; AVX1-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I32 = call i32 @llvm.uadd.sat.i32(i32 undef, i32 undef)
; AVX1-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4I32 = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
-; AVX1-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %V8I32 = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
-; AVX1-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %V16I32 = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; AVX1-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V8I32 = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; AVX1-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V16I32 = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
; AVX1-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I16 = call i16 @llvm.uadd.sat.i16(i16 undef, i16 undef)
; AVX1-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
; AVX1-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16I16 = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
@@ -213,8 +213,8 @@ define i32 @add(i32 %arg) {
; BTVER2-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %V8I64 = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
; BTVER2-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I32 = call i32 @llvm.uadd.sat.i32(i32 undef, i32 undef)
; BTVER2-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4I32 = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
-; BTVER2-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %V8I32 = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
-; BTVER2-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %V16I32 = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
+; BTVER2-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V8I32 = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
+; BTVER2-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V16I32 = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
; BTVER2-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %I16 = call i16 @llvm.uadd.sat.i16(i16 undef, i16 undef)
; BTVER2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8I16 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
; BTVER2-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16I16 = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
Modified: llvm/trunk/test/CodeGen/X86/uadd_sat.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/uadd_sat.ll?rev=352409&r1=352408&r2=352409&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/uadd_sat.ll (original)
+++ llvm/trunk/test/CodeGen/X86/uadd_sat.ll Mon Jan 28 11:19:09 2019
@@ -111,37 +111,18 @@ define <4 x i32> @vec(<4 x i32> %x, <4 x
;
; X64-LABEL: vec:
; X64: # %bb.0:
-; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm1[3,1,2,3]
-; X64-NEXT: movd %xmm2, %eax
-; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm0[3,1,2,3]
-; X64-NEXT: movd %xmm2, %ecx
-; X64-NEXT: addl %eax, %ecx
-; X64-NEXT: movl $-1, %eax
-; X64-NEXT: cmovbl %eax, %ecx
-; X64-NEXT: movd %ecx, %xmm2
-; X64-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1]
-; X64-NEXT: movd %xmm3, %ecx
-; X64-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1]
-; X64-NEXT: movd %xmm3, %edx
-; X64-NEXT: addl %ecx, %edx
-; X64-NEXT: cmovbl %eax, %edx
-; X64-NEXT: movd %edx, %xmm3
-; X64-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
-; X64-NEXT: movd %xmm1, %ecx
-; X64-NEXT: movd %xmm0, %edx
-; X64-NEXT: addl %ecx, %edx
-; X64-NEXT: cmovbl %eax, %edx
-; X64-NEXT: movd %edx, %xmm2
-; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
-; X64-NEXT: movd %xmm1, %ecx
-; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; X64-NEXT: movd %xmm0, %edx
-; X64-NEXT: addl %ecx, %edx
-; X64-NEXT: cmovbl %eax, %edx
-; X64-NEXT: movd %edx, %xmm0
-; X64-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
-; X64-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
-; X64-NEXT: movdqa %xmm2, %xmm0
+; X64-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
+; X64-NEXT: pxor %xmm0, %xmm2
+; X64-NEXT: movdqa {{.*#+}} xmm3 = [2147483647,2147483647,2147483647,2147483647]
+; X64-NEXT: pxor %xmm1, %xmm3
+; X64-NEXT: pcmpgtd %xmm2, %xmm3
+; X64-NEXT: pand %xmm3, %xmm0
+; X64-NEXT: pcmpeqd %xmm2, %xmm2
+; X64-NEXT: pxor %xmm3, %xmm2
+; X64-NEXT: movdqa %xmm1, %xmm3
+; X64-NEXT: pandn %xmm2, %xmm3
+; X64-NEXT: por %xmm3, %xmm0
+; X64-NEXT: paddd %xmm1, %xmm0
; X64-NEXT: retq
%tmp = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> %x, <4 x i32> %y);
ret <4 x i32> %tmp;
Modified: llvm/trunk/test/CodeGen/X86/uadd_sat_vec.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/uadd_sat_vec.ll?rev=352409&r1=352408&r2=352409&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/uadd_sat_vec.ll (original)
+++ llvm/trunk/test/CodeGen/X86/uadd_sat_vec.ll Mon Jan 28 11:19:09 2019
@@ -635,85 +635,118 @@ define <16 x i1> @v16i1(<16 x i1> %x, <1
define <2 x i32> @v2i32(<2 x i32> %x, <2 x i32> %y) nounwind {
; SSE2-LABEL: v2i32:
; SSE2: # %bb.0:
-; SSE2-NEXT: psllq $32, %xmm1
-; SSE2-NEXT: movq %xmm1, %rax
; SSE2-NEXT: psllq $32, %xmm0
-; SSE2-NEXT: movq %xmm0, %rcx
-; SSE2-NEXT: addq %rax, %rcx
-; SSE2-NEXT: movq $-1, %rax
-; SSE2-NEXT: cmovbq %rax, %rcx
-; SSE2-NEXT: movq %rcx, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; SSE2-NEXT: movq %xmm1, %rcx
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; SSE2-NEXT: movq %xmm0, %rdx
-; SSE2-NEXT: addq %rcx, %rdx
-; SSE2-NEXT: cmovbq %rax, %rdx
-; SSE2-NEXT: movq %rdx, %xmm0
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm0[0]
-; SSE2-NEXT: psrlq $32, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm0
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [9223372039002259456,9223372039002259456]
+; SSE2-NEXT: pxor %xmm0, %xmm2
+; SSE2-NEXT: psllq $32, %xmm1
+; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [9223372034707292159,9223372034707292159]
+; SSE2-NEXT: pxor %xmm1, %xmm3
+; SSE2-NEXT: movdqa %xmm3, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm2, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm2, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,1,3,3]
+; SSE2-NEXT: pand %xmm5, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
+; SSE2-NEXT: por %xmm2, %xmm3
+; SSE2-NEXT: pand %xmm3, %xmm0
+; SSE2-NEXT: pcmpeqd %xmm2, %xmm2
+; SSE2-NEXT: pxor %xmm3, %xmm2
+; SSE2-NEXT: movdqa %xmm1, %xmm3
+; SSE2-NEXT: pandn %xmm2, %xmm3
+; SSE2-NEXT: por %xmm3, %xmm0
+; SSE2-NEXT: paddq %xmm1, %xmm0
+; SSE2-NEXT: psrlq $32, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: v2i32:
; SSSE3: # %bb.0:
-; SSSE3-NEXT: psllq $32, %xmm1
-; SSSE3-NEXT: movq %xmm1, %rax
; SSSE3-NEXT: psllq $32, %xmm0
-; SSSE3-NEXT: movq %xmm0, %rcx
-; SSSE3-NEXT: addq %rax, %rcx
-; SSSE3-NEXT: movq $-1, %rax
-; SSSE3-NEXT: cmovbq %rax, %rcx
-; SSSE3-NEXT: movq %rcx, %xmm2
-; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; SSSE3-NEXT: movq %xmm1, %rcx
-; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; SSSE3-NEXT: movq %xmm0, %rdx
-; SSSE3-NEXT: addq %rcx, %rdx
-; SSSE3-NEXT: cmovbq %rax, %rdx
-; SSSE3-NEXT: movq %rdx, %xmm0
-; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm0[0]
-; SSSE3-NEXT: psrlq $32, %xmm2
-; SSSE3-NEXT: movdqa %xmm2, %xmm0
+; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [9223372039002259456,9223372039002259456]
+; SSSE3-NEXT: pxor %xmm0, %xmm2
+; SSSE3-NEXT: psllq $32, %xmm1
+; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [9223372034707292159,9223372034707292159]
+; SSSE3-NEXT: pxor %xmm1, %xmm3
+; SSSE3-NEXT: movdqa %xmm3, %xmm4
+; SSSE3-NEXT: pcmpgtd %xmm2, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm2, %xmm3
+; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,1,3,3]
+; SSSE3-NEXT: pand %xmm5, %xmm2
+; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
+; SSSE3-NEXT: por %xmm2, %xmm3
+; SSSE3-NEXT: pand %xmm3, %xmm0
+; SSSE3-NEXT: pcmpeqd %xmm2, %xmm2
+; SSSE3-NEXT: pxor %xmm3, %xmm2
+; SSSE3-NEXT: movdqa %xmm1, %xmm3
+; SSSE3-NEXT: pandn %xmm2, %xmm3
+; SSSE3-NEXT: por %xmm3, %xmm0
+; SSSE3-NEXT: paddq %xmm1, %xmm0
+; SSSE3-NEXT: psrlq $32, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: v2i32:
; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa %xmm0, %xmm2
+; SSE41-NEXT: psllq $32, %xmm2
+; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [9223372039002259456,9223372039002259456]
+; SSE41-NEXT: pxor %xmm2, %xmm0
; SSE41-NEXT: psllq $32, %xmm1
-; SSE41-NEXT: pextrq $1, %xmm1, %rax
-; SSE41-NEXT: psllq $32, %xmm0
-; SSE41-NEXT: pextrq $1, %xmm0, %rcx
-; SSE41-NEXT: addq %rax, %rcx
-; SSE41-NEXT: movq $-1, %rax
-; SSE41-NEXT: cmovbq %rax, %rcx
-; SSE41-NEXT: movq %rcx, %xmm2
-; SSE41-NEXT: movq %xmm1, %rcx
-; SSE41-NEXT: movq %xmm0, %rdx
-; SSE41-NEXT: addq %rcx, %rdx
-; SSE41-NEXT: cmovbq %rax, %rdx
-; SSE41-NEXT: movq %rdx, %xmm0
-; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
-; SSE41-NEXT: psrlq $32, %xmm0
+; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [9223372034707292159,9223372034707292159]
+; SSE41-NEXT: pxor %xmm1, %xmm3
+; SSE41-NEXT: movdqa %xmm3, %xmm4
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm4
+; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm0, %xmm3
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
+; SSE41-NEXT: pand %xmm5, %xmm0
+; SSE41-NEXT: por %xmm4, %xmm0
+; SSE41-NEXT: pcmpeqd %xmm3, %xmm3
+; SSE41-NEXT: pxor %xmm1, %xmm3
+; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm3
+; SSE41-NEXT: paddq %xmm1, %xmm3
+; SSE41-NEXT: psrlq $32, %xmm3
+; SSE41-NEXT: movdqa %xmm3, %xmm0
; SSE41-NEXT: retq
;
-; AVX-LABEL: v2i32:
-; AVX: # %bb.0:
-; AVX-NEXT: vpsllq $32, %xmm1, %xmm1
-; AVX-NEXT: vpextrq $1, %xmm1, %rax
-; AVX-NEXT: vpsllq $32, %xmm0, %xmm0
-; AVX-NEXT: vpextrq $1, %xmm0, %rcx
-; AVX-NEXT: addq %rax, %rcx
-; AVX-NEXT: movq $-1, %rax
-; AVX-NEXT: cmovbq %rax, %rcx
-; AVX-NEXT: vmovq %rcx, %xmm2
-; AVX-NEXT: vmovq %xmm1, %rcx
-; AVX-NEXT: vmovq %xmm0, %rdx
-; AVX-NEXT: addq %rcx, %rdx
-; AVX-NEXT: cmovbq %rax, %rdx
-; AVX-NEXT: vmovq %rdx, %xmm0
-; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
-; AVX-NEXT: vpsrlq $32, %xmm0, %xmm0
-; AVX-NEXT: retq
+; AVX1-LABEL: v2i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpsllq $32, %xmm1, %xmm1
+; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm2
+; AVX1-NEXT: vpsllq $32, %xmm0, %xmm0
+; AVX1-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm3
+; AVX1-NEXT: vpxor {{.*}}(%rip), %xmm1, %xmm4
+; AVX1-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vblendvpd %xmm3, %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlq $32, %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: v2i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpsllq $32, %xmm1, %xmm1
+; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm2
+; AVX2-NEXT: vpsllq $32, %xmm0, %xmm0
+; AVX2-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm3
+; AVX2-NEXT: vpxor {{.*}}(%rip), %xmm1, %xmm4
+; AVX2-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm3
+; AVX2-NEXT: vblendvpd %xmm3, %xmm0, %xmm2, %xmm0
+; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsrlq $32, %xmm0, %xmm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: v2i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpsllq $32, %xmm0, %xmm0
+; AVX512-NEXT: vpsllq $32, %xmm1, %xmm1
+; AVX512-NEXT: vmovdqa %xmm1, %xmm2
+; AVX512-NEXT: vpternlogq $15, %xmm1, %xmm1, %xmm2
+; AVX512-NEXT: vpminuq %xmm2, %xmm0, %xmm0
+; AVX512-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpsrlq $32, %xmm0, %xmm0
+; AVX512-NEXT: retq
%z = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> %x, <2 x i32> %y)
ret <2 x i32> %z
}
@@ -721,124 +754,67 @@ define <2 x i32> @v2i32(<2 x i32> %x, <2
define <4 x i32> @v4i32(<4 x i32> %x, <4 x i32> %y) nounwind {
; SSE2-LABEL: v4i32:
; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[3,1,2,3]
-; SSE2-NEXT: movd %xmm2, %eax
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[3,1,2,3]
-; SSE2-NEXT: movd %xmm2, %ecx
-; SSE2-NEXT: addl %eax, %ecx
-; SSE2-NEXT: movl $-1, %eax
-; SSE2-NEXT: cmovbl %eax, %ecx
-; SSE2-NEXT: movd %ecx, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1]
-; SSE2-NEXT: movd %xmm3, %ecx
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1]
-; SSE2-NEXT: movd %xmm3, %edx
-; SSE2-NEXT: addl %ecx, %edx
-; SSE2-NEXT: cmovbl %eax, %edx
-; SSE2-NEXT: movd %edx, %xmm3
-; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
-; SSE2-NEXT: movd %xmm1, %ecx
-; SSE2-NEXT: movd %xmm0, %edx
-; SSE2-NEXT: addl %ecx, %edx
-; SSE2-NEXT: cmovbl %eax, %edx
-; SSE2-NEXT: movd %edx, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
-; SSE2-NEXT: movd %xmm1, %ecx
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; SSE2-NEXT: movd %xmm0, %edx
-; SSE2-NEXT: addl %ecx, %edx
-; SSE2-NEXT: cmovbl %eax, %edx
-; SSE2-NEXT: movd %edx, %xmm0
-; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
-; SSE2-NEXT: movdqa %xmm2, %xmm0
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
+; SSE2-NEXT: pxor %xmm0, %xmm2
+; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [2147483647,2147483647,2147483647,2147483647]
+; SSE2-NEXT: pxor %xmm1, %xmm3
+; SSE2-NEXT: pcmpgtd %xmm2, %xmm3
+; SSE2-NEXT: pand %xmm3, %xmm0
+; SSE2-NEXT: pcmpeqd %xmm2, %xmm2
+; SSE2-NEXT: pxor %xmm3, %xmm2
+; SSE2-NEXT: movdqa %xmm1, %xmm3
+; SSE2-NEXT: pandn %xmm2, %xmm3
+; SSE2-NEXT: por %xmm3, %xmm0
+; SSE2-NEXT: paddd %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: v4i32:
; SSSE3: # %bb.0:
-; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[3,1,2,3]
-; SSSE3-NEXT: movd %xmm2, %eax
-; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[3,1,2,3]
-; SSSE3-NEXT: movd %xmm2, %ecx
-; SSSE3-NEXT: addl %eax, %ecx
-; SSSE3-NEXT: movl $-1, %eax
-; SSSE3-NEXT: cmovbl %eax, %ecx
-; SSSE3-NEXT: movd %ecx, %xmm2
-; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1]
-; SSSE3-NEXT: movd %xmm3, %ecx
-; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1]
-; SSSE3-NEXT: movd %xmm3, %edx
-; SSSE3-NEXT: addl %ecx, %edx
-; SSSE3-NEXT: cmovbl %eax, %edx
-; SSSE3-NEXT: movd %edx, %xmm3
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
-; SSSE3-NEXT: movd %xmm1, %ecx
-; SSSE3-NEXT: movd %xmm0, %edx
-; SSSE3-NEXT: addl %ecx, %edx
-; SSSE3-NEXT: cmovbl %eax, %edx
-; SSSE3-NEXT: movd %edx, %xmm2
-; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
-; SSSE3-NEXT: movd %xmm1, %ecx
-; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; SSSE3-NEXT: movd %xmm0, %edx
-; SSSE3-NEXT: addl %ecx, %edx
-; SSSE3-NEXT: cmovbl %eax, %edx
-; SSSE3-NEXT: movd %edx, %xmm0
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
-; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
-; SSSE3-NEXT: movdqa %xmm2, %xmm0
+; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
+; SSSE3-NEXT: pxor %xmm0, %xmm2
+; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [2147483647,2147483647,2147483647,2147483647]
+; SSSE3-NEXT: pxor %xmm1, %xmm3
+; SSSE3-NEXT: pcmpgtd %xmm2, %xmm3
+; SSSE3-NEXT: pand %xmm3, %xmm0
+; SSSE3-NEXT: pcmpeqd %xmm2, %xmm2
+; SSSE3-NEXT: pxor %xmm3, %xmm2
+; SSSE3-NEXT: movdqa %xmm1, %xmm3
+; SSSE3-NEXT: pandn %xmm2, %xmm3
+; SSSE3-NEXT: por %xmm3, %xmm0
+; SSSE3-NEXT: paddd %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: v4i32:
; SSE41: # %bb.0:
-; SSE41-NEXT: pextrd $1, %xmm1, %eax
-; SSE41-NEXT: pextrd $1, %xmm0, %ecx
-; SSE41-NEXT: addl %eax, %ecx
-; SSE41-NEXT: movl $-1, %eax
-; SSE41-NEXT: cmovbl %eax, %ecx
-; SSE41-NEXT: movd %xmm1, %edx
-; SSE41-NEXT: movd %xmm0, %esi
-; SSE41-NEXT: addl %edx, %esi
-; SSE41-NEXT: cmovbl %eax, %esi
-; SSE41-NEXT: movd %esi, %xmm2
-; SSE41-NEXT: pinsrd $1, %ecx, %xmm2
-; SSE41-NEXT: pextrd $2, %xmm1, %ecx
-; SSE41-NEXT: pextrd $2, %xmm0, %edx
-; SSE41-NEXT: addl %ecx, %edx
-; SSE41-NEXT: cmovbl %eax, %edx
-; SSE41-NEXT: pinsrd $2, %edx, %xmm2
-; SSE41-NEXT: pextrd $3, %xmm1, %ecx
-; SSE41-NEXT: pextrd $3, %xmm0, %edx
-; SSE41-NEXT: addl %ecx, %edx
-; SSE41-NEXT: cmovbl %eax, %edx
-; SSE41-NEXT: pinsrd $3, %edx, %xmm2
-; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: pcmpeqd %xmm2, %xmm2
+; SSE41-NEXT: pxor %xmm1, %xmm2
+; SSE41-NEXT: pminud %xmm2, %xmm0
+; SSE41-NEXT: paddd %xmm1, %xmm0
; SSE41-NEXT: retq
;
-; AVX-LABEL: v4i32:
-; AVX: # %bb.0:
-; AVX-NEXT: vpextrd $1, %xmm1, %eax
-; AVX-NEXT: vpextrd $1, %xmm0, %ecx
-; AVX-NEXT: addl %eax, %ecx
-; AVX-NEXT: movl $-1, %eax
-; AVX-NEXT: cmovbl %eax, %ecx
-; AVX-NEXT: vmovd %xmm1, %edx
-; AVX-NEXT: vmovd %xmm0, %esi
-; AVX-NEXT: addl %edx, %esi
-; AVX-NEXT: cmovbl %eax, %esi
-; AVX-NEXT: vmovd %esi, %xmm2
-; AVX-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm2
-; AVX-NEXT: vpextrd $2, %xmm1, %ecx
-; AVX-NEXT: vpextrd $2, %xmm0, %edx
-; AVX-NEXT: addl %ecx, %edx
-; AVX-NEXT: cmovbl %eax, %edx
-; AVX-NEXT: vpinsrd $2, %edx, %xmm2, %xmm2
-; AVX-NEXT: vpextrd $3, %xmm1, %ecx
-; AVX-NEXT: vpextrd $3, %xmm0, %edx
-; AVX-NEXT: addl %ecx, %edx
-; AVX-NEXT: cmovbl %eax, %edx
-; AVX-NEXT: vpinsrd $3, %edx, %xmm2, %xmm0
-; AVX-NEXT: retq
+; AVX1-LABEL: v4i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm2
+; AVX1-NEXT: vpminud %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: v4i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm2
+; AVX2-NEXT: vpminud %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: v4i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vmovdqa %xmm1, %xmm2
+; AVX512-NEXT: vpternlogq $15, %xmm1, %xmm1, %xmm2
+; AVX512-NEXT: vpminud %xmm2, %xmm0, %xmm0
+; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: retq
%z = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> %x, <4 x i32> %y)
ret <4 x i32> %z
}
@@ -846,323 +822,99 @@ define <4 x i32> @v4i32(<4 x i32> %x, <4
define <8 x i32> @v8i32(<8 x i32> %x, <8 x i32> %y) nounwind {
; SSE2-LABEL: v8i32:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm0, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[3,1,2,3]
-; SSE2-NEXT: movd %xmm0, %eax
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm4[3,1,2,3]
-; SSE2-NEXT: movd %xmm0, %ecx
-; SSE2-NEXT: addl %eax, %ecx
-; SSE2-NEXT: movl $-1, %eax
-; SSE2-NEXT: cmovbl %eax, %ecx
-; SSE2-NEXT: movd %ecx, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm2[2,3,0,1]
-; SSE2-NEXT: movd %xmm5, %ecx
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[2,3,0,1]
-; SSE2-NEXT: movd %xmm5, %edx
-; SSE2-NEXT: addl %ecx, %edx
-; SSE2-NEXT: cmovbl %eax, %edx
-; SSE2-NEXT: movd %edx, %xmm5
-; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
-; SSE2-NEXT: movd %xmm2, %ecx
-; SSE2-NEXT: movd %xmm4, %edx
-; SSE2-NEXT: addl %ecx, %edx
-; SSE2-NEXT: cmovbl %eax, %edx
-; SSE2-NEXT: movd %edx, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,2,3]
-; SSE2-NEXT: movd %xmm2, %ecx
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,2,3]
-; SSE2-NEXT: movd %xmm2, %edx
-; SSE2-NEXT: addl %ecx, %edx
-; SSE2-NEXT: cmovbl %eax, %edx
-; SSE2-NEXT: movd %edx, %xmm2
-; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm5[0]
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[3,1,2,3]
-; SSE2-NEXT: movd %xmm2, %ecx
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[3,1,2,3]
-; SSE2-NEXT: movd %xmm2, %edx
-; SSE2-NEXT: addl %ecx, %edx
-; SSE2-NEXT: cmovbl %eax, %edx
-; SSE2-NEXT: movd %edx, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[2,3,0,1]
-; SSE2-NEXT: movd %xmm4, %ecx
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1]
-; SSE2-NEXT: movd %xmm4, %edx
-; SSE2-NEXT: addl %ecx, %edx
-; SSE2-NEXT: cmovbl %eax, %edx
-; SSE2-NEXT: movd %edx, %xmm4
-; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
-; SSE2-NEXT: movd %xmm3, %ecx
-; SSE2-NEXT: movd %xmm1, %edx
-; SSE2-NEXT: addl %ecx, %edx
-; SSE2-NEXT: cmovbl %eax, %edx
-; SSE2-NEXT: movd %edx, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,2,3]
-; SSE2-NEXT: movd %xmm3, %ecx
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
-; SSE2-NEXT: movd %xmm1, %edx
-; SSE2-NEXT: addl %ecx, %edx
-; SSE2-NEXT: cmovbl %eax, %edx
-; SSE2-NEXT: movd %edx, %xmm1
-; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm4[0]
-; SSE2-NEXT: movdqa %xmm2, %xmm1
+; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648,2147483648,2147483648]
+; SSE2-NEXT: movdqa %xmm0, %xmm5
+; SSE2-NEXT: pxor %xmm4, %xmm5
+; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [2147483647,2147483647,2147483647,2147483647]
+; SSE2-NEXT: movdqa %xmm2, %xmm7
+; SSE2-NEXT: pxor %xmm6, %xmm7
+; SSE2-NEXT: pcmpgtd %xmm5, %xmm7
+; SSE2-NEXT: pand %xmm7, %xmm0
+; SSE2-NEXT: pcmpeqd %xmm8, %xmm8
+; SSE2-NEXT: pxor %xmm8, %xmm7
+; SSE2-NEXT: movdqa %xmm2, %xmm5
+; SSE2-NEXT: pandn %xmm7, %xmm5
+; SSE2-NEXT: por %xmm5, %xmm0
+; SSE2-NEXT: paddd %xmm2, %xmm0
+; SSE2-NEXT: pxor %xmm1, %xmm4
+; SSE2-NEXT: pxor %xmm3, %xmm6
+; SSE2-NEXT: pcmpgtd %xmm4, %xmm6
+; SSE2-NEXT: pand %xmm6, %xmm1
+; SSE2-NEXT: pxor %xmm8, %xmm6
+; SSE2-NEXT: movdqa %xmm3, %xmm2
+; SSE2-NEXT: pandn %xmm6, %xmm2
+; SSE2-NEXT: por %xmm2, %xmm1
+; SSE2-NEXT: paddd %xmm3, %xmm1
; SSE2-NEXT: retq
;
; SSSE3-LABEL: v8i32:
; SSSE3: # %bb.0:
-; SSSE3-NEXT: movdqa %xmm0, %xmm4
-; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[3,1,2,3]
-; SSSE3-NEXT: movd %xmm0, %eax
-; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm4[3,1,2,3]
-; SSSE3-NEXT: movd %xmm0, %ecx
-; SSSE3-NEXT: addl %eax, %ecx
-; SSSE3-NEXT: movl $-1, %eax
-; SSSE3-NEXT: cmovbl %eax, %ecx
-; SSSE3-NEXT: movd %ecx, %xmm0
-; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm2[2,3,0,1]
-; SSSE3-NEXT: movd %xmm5, %ecx
-; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm4[2,3,0,1]
-; SSSE3-NEXT: movd %xmm5, %edx
-; SSSE3-NEXT: addl %ecx, %edx
-; SSSE3-NEXT: cmovbl %eax, %edx
-; SSSE3-NEXT: movd %edx, %xmm5
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
-; SSSE3-NEXT: movd %xmm2, %ecx
-; SSSE3-NEXT: movd %xmm4, %edx
-; SSSE3-NEXT: addl %ecx, %edx
-; SSSE3-NEXT: cmovbl %eax, %edx
-; SSSE3-NEXT: movd %edx, %xmm0
-; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,2,3]
-; SSSE3-NEXT: movd %xmm2, %ecx
-; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,2,3]
-; SSSE3-NEXT: movd %xmm2, %edx
-; SSSE3-NEXT: addl %ecx, %edx
-; SSSE3-NEXT: cmovbl %eax, %edx
-; SSSE3-NEXT: movd %edx, %xmm2
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm5[0]
-; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm3[3,1,2,3]
-; SSSE3-NEXT: movd %xmm2, %ecx
-; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[3,1,2,3]
-; SSSE3-NEXT: movd %xmm2, %edx
-; SSSE3-NEXT: addl %ecx, %edx
-; SSSE3-NEXT: cmovbl %eax, %edx
-; SSSE3-NEXT: movd %edx, %xmm2
-; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm3[2,3,0,1]
-; SSSE3-NEXT: movd %xmm4, %ecx
-; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1]
-; SSSE3-NEXT: movd %xmm4, %edx
-; SSSE3-NEXT: addl %ecx, %edx
-; SSSE3-NEXT: cmovbl %eax, %edx
-; SSSE3-NEXT: movd %edx, %xmm4
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
-; SSSE3-NEXT: movd %xmm3, %ecx
-; SSSE3-NEXT: movd %xmm1, %edx
-; SSSE3-NEXT: addl %ecx, %edx
-; SSSE3-NEXT: cmovbl %eax, %edx
-; SSSE3-NEXT: movd %edx, %xmm2
-; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,2,3]
-; SSSE3-NEXT: movd %xmm3, %ecx
-; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
-; SSSE3-NEXT: movd %xmm1, %edx
-; SSSE3-NEXT: addl %ecx, %edx
-; SSSE3-NEXT: cmovbl %eax, %edx
-; SSSE3-NEXT: movd %edx, %xmm1
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm4[0]
-; SSSE3-NEXT: movdqa %xmm2, %xmm1
+; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648,2147483648,2147483648]
+; SSSE3-NEXT: movdqa %xmm0, %xmm5
+; SSSE3-NEXT: pxor %xmm4, %xmm5
+; SSSE3-NEXT: movdqa {{.*#+}} xmm6 = [2147483647,2147483647,2147483647,2147483647]
+; SSSE3-NEXT: movdqa %xmm2, %xmm7
+; SSSE3-NEXT: pxor %xmm6, %xmm7
+; SSSE3-NEXT: pcmpgtd %xmm5, %xmm7
+; SSSE3-NEXT: pand %xmm7, %xmm0
+; SSSE3-NEXT: pcmpeqd %xmm8, %xmm8
+; SSSE3-NEXT: pxor %xmm8, %xmm7
+; SSSE3-NEXT: movdqa %xmm2, %xmm5
+; SSSE3-NEXT: pandn %xmm7, %xmm5
+; SSSE3-NEXT: por %xmm5, %xmm0
+; SSSE3-NEXT: paddd %xmm2, %xmm0
+; SSSE3-NEXT: pxor %xmm1, %xmm4
+; SSSE3-NEXT: pxor %xmm3, %xmm6
+; SSSE3-NEXT: pcmpgtd %xmm4, %xmm6
+; SSSE3-NEXT: pand %xmm6, %xmm1
+; SSSE3-NEXT: pxor %xmm8, %xmm6
+; SSSE3-NEXT: movdqa %xmm3, %xmm2
+; SSSE3-NEXT: pandn %xmm6, %xmm2
+; SSSE3-NEXT: por %xmm2, %xmm1
+; SSSE3-NEXT: paddd %xmm3, %xmm1
; SSSE3-NEXT: retq
;
; SSE41-LABEL: v8i32:
; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm0, %xmm4
-; SSE41-NEXT: pextrd $1, %xmm2, %eax
-; SSE41-NEXT: pextrd $1, %xmm0, %ecx
-; SSE41-NEXT: addl %eax, %ecx
-; SSE41-NEXT: movl $-1, %eax
-; SSE41-NEXT: cmovbl %eax, %ecx
-; SSE41-NEXT: movd %xmm2, %edx
-; SSE41-NEXT: movd %xmm0, %esi
-; SSE41-NEXT: addl %edx, %esi
-; SSE41-NEXT: cmovbl %eax, %esi
-; SSE41-NEXT: movd %esi, %xmm0
-; SSE41-NEXT: pinsrd $1, %ecx, %xmm0
-; SSE41-NEXT: pextrd $2, %xmm2, %ecx
-; SSE41-NEXT: pextrd $2, %xmm4, %edx
-; SSE41-NEXT: addl %ecx, %edx
-; SSE41-NEXT: cmovbl %eax, %edx
-; SSE41-NEXT: pinsrd $2, %edx, %xmm0
-; SSE41-NEXT: pextrd $3, %xmm2, %ecx
-; SSE41-NEXT: pextrd $3, %xmm4, %edx
-; SSE41-NEXT: addl %ecx, %edx
-; SSE41-NEXT: cmovbl %eax, %edx
-; SSE41-NEXT: pinsrd $3, %edx, %xmm0
-; SSE41-NEXT: pextrd $1, %xmm3, %ecx
-; SSE41-NEXT: pextrd $1, %xmm1, %edx
-; SSE41-NEXT: addl %ecx, %edx
-; SSE41-NEXT: cmovbl %eax, %edx
-; SSE41-NEXT: movd %xmm3, %ecx
-; SSE41-NEXT: movd %xmm1, %esi
-; SSE41-NEXT: addl %ecx, %esi
-; SSE41-NEXT: cmovbl %eax, %esi
-; SSE41-NEXT: movd %esi, %xmm2
-; SSE41-NEXT: pinsrd $1, %edx, %xmm2
-; SSE41-NEXT: pextrd $2, %xmm3, %ecx
-; SSE41-NEXT: pextrd $2, %xmm1, %edx
-; SSE41-NEXT: addl %ecx, %edx
-; SSE41-NEXT: cmovbl %eax, %edx
-; SSE41-NEXT: pinsrd $2, %edx, %xmm2
-; SSE41-NEXT: pextrd $3, %xmm3, %ecx
-; SSE41-NEXT: pextrd $3, %xmm1, %edx
-; SSE41-NEXT: addl %ecx, %edx
-; SSE41-NEXT: cmovbl %eax, %edx
-; SSE41-NEXT: pinsrd $3, %edx, %xmm2
-; SSE41-NEXT: movdqa %xmm2, %xmm1
+; SSE41-NEXT: pcmpeqd %xmm4, %xmm4
+; SSE41-NEXT: movdqa %xmm2, %xmm5
+; SSE41-NEXT: pxor %xmm4, %xmm5
+; SSE41-NEXT: pminud %xmm5, %xmm0
+; SSE41-NEXT: paddd %xmm2, %xmm0
+; SSE41-NEXT: pxor %xmm3, %xmm4
+; SSE41-NEXT: pminud %xmm4, %xmm1
+; SSE41-NEXT: paddd %xmm3, %xmm1
; SSE41-NEXT: retq
;
; AVX1-LABEL: v8i32:
; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT: vpextrd $1, %xmm2, %eax
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vpextrd $1, %xmm3, %ecx
-; AVX1-NEXT: addl %eax, %ecx
-; AVX1-NEXT: movl $-1, %eax
-; AVX1-NEXT: cmovbl %eax, %ecx
-; AVX1-NEXT: vmovd %xmm2, %edx
-; AVX1-NEXT: vmovd %xmm3, %esi
-; AVX1-NEXT: addl %edx, %esi
-; AVX1-NEXT: cmovbl %eax, %esi
-; AVX1-NEXT: vmovd %esi, %xmm4
-; AVX1-NEXT: vpinsrd $1, %ecx, %xmm4, %xmm4
-; AVX1-NEXT: vpextrd $2, %xmm2, %ecx
-; AVX1-NEXT: vpextrd $2, %xmm3, %edx
-; AVX1-NEXT: addl %ecx, %edx
-; AVX1-NEXT: cmovbl %eax, %edx
-; AVX1-NEXT: vpinsrd $2, %edx, %xmm4, %xmm4
-; AVX1-NEXT: vpextrd $3, %xmm2, %ecx
-; AVX1-NEXT: vpextrd $3, %xmm3, %edx
-; AVX1-NEXT: addl %ecx, %edx
-; AVX1-NEXT: cmovbl %eax, %edx
-; AVX1-NEXT: vpinsrd $3, %edx, %xmm4, %xmm2
-; AVX1-NEXT: vpextrd $1, %xmm1, %ecx
-; AVX1-NEXT: vpextrd $1, %xmm0, %edx
-; AVX1-NEXT: addl %ecx, %edx
-; AVX1-NEXT: cmovbl %eax, %edx
-; AVX1-NEXT: vmovd %xmm1, %ecx
-; AVX1-NEXT: vmovd %xmm0, %esi
-; AVX1-NEXT: addl %ecx, %esi
-; AVX1-NEXT: cmovbl %eax, %esi
-; AVX1-NEXT: vmovd %esi, %xmm3
-; AVX1-NEXT: vpinsrd $1, %edx, %xmm3, %xmm3
-; AVX1-NEXT: vpextrd $2, %xmm1, %ecx
-; AVX1-NEXT: vpextrd $2, %xmm0, %edx
-; AVX1-NEXT: addl %ecx, %edx
-; AVX1-NEXT: cmovbl %eax, %edx
-; AVX1-NEXT: vpinsrd $2, %edx, %xmm3, %xmm3
-; AVX1-NEXT: vpextrd $3, %xmm1, %ecx
-; AVX1-NEXT: vpextrd $3, %xmm0, %edx
-; AVX1-NEXT: addl %ecx, %edx
-; AVX1-NEXT: cmovbl %eax, %edx
-; AVX1-NEXT: vpinsrd $3, %edx, %xmm3, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vxorps %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vcmptrueps %ymm2, %ymm2, %ymm2
+; AVX1-NEXT: vxorps %ymm2, %ymm1, %ymm2
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT: vpminud %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
+; AVX1-NEXT: vpaddd %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vpminud %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: v8i32:
; AVX2: # %bb.0:
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
-; AVX2-NEXT: vpextrd $1, %xmm2, %eax
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm3
-; AVX2-NEXT: vpextrd $1, %xmm3, %ecx
-; AVX2-NEXT: addl %eax, %ecx
-; AVX2-NEXT: movl $-1, %eax
-; AVX2-NEXT: cmovbl %eax, %ecx
-; AVX2-NEXT: vmovd %xmm2, %edx
-; AVX2-NEXT: vmovd %xmm3, %esi
-; AVX2-NEXT: addl %edx, %esi
-; AVX2-NEXT: cmovbl %eax, %esi
-; AVX2-NEXT: vmovd %esi, %xmm4
-; AVX2-NEXT: vpinsrd $1, %ecx, %xmm4, %xmm4
-; AVX2-NEXT: vpextrd $2, %xmm2, %ecx
-; AVX2-NEXT: vpextrd $2, %xmm3, %edx
-; AVX2-NEXT: addl %ecx, %edx
-; AVX2-NEXT: cmovbl %eax, %edx
-; AVX2-NEXT: vpinsrd $2, %edx, %xmm4, %xmm4
-; AVX2-NEXT: vpextrd $3, %xmm2, %ecx
-; AVX2-NEXT: vpextrd $3, %xmm3, %edx
-; AVX2-NEXT: addl %ecx, %edx
-; AVX2-NEXT: cmovbl %eax, %edx
-; AVX2-NEXT: vpinsrd $3, %edx, %xmm4, %xmm2
-; AVX2-NEXT: vpextrd $1, %xmm1, %ecx
-; AVX2-NEXT: vpextrd $1, %xmm0, %edx
-; AVX2-NEXT: addl %ecx, %edx
-; AVX2-NEXT: cmovbl %eax, %edx
-; AVX2-NEXT: vmovd %xmm1, %ecx
-; AVX2-NEXT: vmovd %xmm0, %esi
-; AVX2-NEXT: addl %ecx, %esi
-; AVX2-NEXT: cmovbl %eax, %esi
-; AVX2-NEXT: vmovd %esi, %xmm3
-; AVX2-NEXT: vpinsrd $1, %edx, %xmm3, %xmm3
-; AVX2-NEXT: vpextrd $2, %xmm1, %ecx
-; AVX2-NEXT: vpextrd $2, %xmm0, %edx
-; AVX2-NEXT: addl %ecx, %edx
-; AVX2-NEXT: cmovbl %eax, %edx
-; AVX2-NEXT: vpinsrd $2, %edx, %xmm3, %xmm3
-; AVX2-NEXT: vpextrd $3, %xmm1, %ecx
-; AVX2-NEXT: vpextrd $3, %xmm0, %edx
-; AVX2-NEXT: addl %ecx, %edx
-; AVX2-NEXT: cmovbl %eax, %edx
-; AVX2-NEXT: vpinsrd $3, %edx, %xmm3, %xmm0
-; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
+; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm2
+; AVX2-NEXT: vpminud %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: v8i32:
; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm2
-; AVX512-NEXT: vpextrd $1, %xmm2, %eax
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm3
-; AVX512-NEXT: vpextrd $1, %xmm3, %ecx
-; AVX512-NEXT: addl %eax, %ecx
-; AVX512-NEXT: movl $-1, %eax
-; AVX512-NEXT: cmovbl %eax, %ecx
-; AVX512-NEXT: vmovd %xmm2, %edx
-; AVX512-NEXT: vmovd %xmm3, %esi
-; AVX512-NEXT: addl %edx, %esi
-; AVX512-NEXT: cmovbl %eax, %esi
-; AVX512-NEXT: vmovd %esi, %xmm4
-; AVX512-NEXT: vpinsrd $1, %ecx, %xmm4, %xmm4
-; AVX512-NEXT: vpextrd $2, %xmm2, %ecx
-; AVX512-NEXT: vpextrd $2, %xmm3, %edx
-; AVX512-NEXT: addl %ecx, %edx
-; AVX512-NEXT: cmovbl %eax, %edx
-; AVX512-NEXT: vpinsrd $2, %edx, %xmm4, %xmm4
-; AVX512-NEXT: vpextrd $3, %xmm2, %ecx
-; AVX512-NEXT: vpextrd $3, %xmm3, %edx
-; AVX512-NEXT: addl %ecx, %edx
-; AVX512-NEXT: cmovbl %eax, %edx
-; AVX512-NEXT: vpinsrd $3, %edx, %xmm4, %xmm2
-; AVX512-NEXT: vpextrd $1, %xmm1, %ecx
-; AVX512-NEXT: vpextrd $1, %xmm0, %edx
-; AVX512-NEXT: addl %ecx, %edx
-; AVX512-NEXT: cmovbl %eax, %edx
-; AVX512-NEXT: vmovd %xmm1, %ecx
-; AVX512-NEXT: vmovd %xmm0, %esi
-; AVX512-NEXT: addl %ecx, %esi
-; AVX512-NEXT: cmovbl %eax, %esi
-; AVX512-NEXT: vmovd %esi, %xmm3
-; AVX512-NEXT: vpinsrd $1, %edx, %xmm3, %xmm3
-; AVX512-NEXT: vpextrd $2, %xmm1, %ecx
-; AVX512-NEXT: vpextrd $2, %xmm0, %edx
-; AVX512-NEXT: addl %ecx, %edx
-; AVX512-NEXT: cmovbl %eax, %edx
-; AVX512-NEXT: vpinsrd $2, %edx, %xmm3, %xmm3
-; AVX512-NEXT: vpextrd $3, %xmm1, %ecx
-; AVX512-NEXT: vpextrd $3, %xmm0, %edx
-; AVX512-NEXT: addl %ecx, %edx
-; AVX512-NEXT: cmovbl %eax, %edx
-; AVX512-NEXT: vpinsrd $3, %edx, %xmm3, %xmm0
-; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
+; AVX512-NEXT: vmovdqa %ymm1, %ymm2
+; AVX512-NEXT: vpternlogq $15, %ymm1, %ymm1, %ymm2
+; AVX512-NEXT: vpminud %ymm2, %ymm0, %ymm0
+; AVX512-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%z = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> %x, <8 x i32> %y)
ret <8 x i32> %z
@@ -1171,617 +923,163 @@ define <8 x i32> @v8i32(<8 x i32> %x, <8
define <16 x i32> @v16i32(<16 x i32> %x, <16 x i32> %y) nounwind {
; SSE2-LABEL: v16i32:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm1, %xmm8
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm4[3,1,2,3]
-; SSE2-NEXT: movd %xmm0, %eax
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[3,1,2,3]
-; SSE2-NEXT: movd %xmm0, %ecx
-; SSE2-NEXT: addl %eax, %ecx
-; SSE2-NEXT: movl $-1, %eax
-; SSE2-NEXT: cmovbl %eax, %ecx
-; SSE2-NEXT: movd %ecx, %xmm9
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm4[2,3,0,1]
-; SSE2-NEXT: movd %xmm0, %ecx
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
-; SSE2-NEXT: movd %xmm0, %edx
-; SSE2-NEXT: addl %ecx, %edx
-; SSE2-NEXT: cmovbl %eax, %edx
-; SSE2-NEXT: movd %edx, %xmm10
-; SSE2-NEXT: punpckldq {{.*#+}} xmm10 = xmm10[0],xmm9[0],xmm10[1],xmm9[1]
-; SSE2-NEXT: movd %xmm4, %ecx
-; SSE2-NEXT: movd %xmm1, %edx
-; SSE2-NEXT: addl %ecx, %edx
-; SSE2-NEXT: cmovbl %eax, %edx
-; SSE2-NEXT: movd %edx, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,2,3]
-; SSE2-NEXT: movd %xmm4, %ecx
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
-; SSE2-NEXT: movd %xmm1, %edx
-; SSE2-NEXT: addl %ecx, %edx
-; SSE2-NEXT: cmovbl %eax, %edx
-; SSE2-NEXT: movd %edx, %xmm1
-; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm10[0]
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm5[3,1,2,3]
-; SSE2-NEXT: movd %xmm1, %ecx
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm8[3,1,2,3]
-; SSE2-NEXT: movd %xmm1, %edx
-; SSE2-NEXT: addl %ecx, %edx
-; SSE2-NEXT: cmovbl %eax, %edx
-; SSE2-NEXT: movd %edx, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm5[2,3,0,1]
-; SSE2-NEXT: movd %xmm4, %ecx
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm8[2,3,0,1]
-; SSE2-NEXT: movd %xmm4, %edx
-; SSE2-NEXT: addl %ecx, %edx
-; SSE2-NEXT: cmovbl %eax, %edx
-; SSE2-NEXT: movd %edx, %xmm4
-; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
-; SSE2-NEXT: movd %xmm5, %ecx
-; SSE2-NEXT: movd %xmm8, %edx
-; SSE2-NEXT: addl %ecx, %edx
-; SSE2-NEXT: cmovbl %eax, %edx
-; SSE2-NEXT: movd %edx, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,2,3]
-; SSE2-NEXT: movd %xmm5, %ecx
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm8[1,1,2,3]
-; SSE2-NEXT: movd %xmm5, %edx
-; SSE2-NEXT: addl %ecx, %edx
-; SSE2-NEXT: cmovbl %eax, %edx
-; SSE2-NEXT: movd %edx, %xmm5
-; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm4[0]
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm6[3,1,2,3]
-; SSE2-NEXT: movd %xmm4, %ecx
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm2[3,1,2,3]
-; SSE2-NEXT: movd %xmm4, %edx
-; SSE2-NEXT: addl %ecx, %edx
-; SSE2-NEXT: cmovbl %eax, %edx
-; SSE2-NEXT: movd %edx, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm6[2,3,0,1]
-; SSE2-NEXT: movd %xmm5, %ecx
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm2[2,3,0,1]
-; SSE2-NEXT: movd %xmm5, %edx
-; SSE2-NEXT: addl %ecx, %edx
-; SSE2-NEXT: cmovbl %eax, %edx
-; SSE2-NEXT: movd %edx, %xmm5
-; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
-; SSE2-NEXT: movd %xmm6, %ecx
-; SSE2-NEXT: movd %xmm2, %edx
-; SSE2-NEXT: addl %ecx, %edx
-; SSE2-NEXT: cmovbl %eax, %edx
-; SSE2-NEXT: movd %edx, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,2,3]
-; SSE2-NEXT: movd %xmm6, %ecx
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,2,3]
-; SSE2-NEXT: movd %xmm2, %edx
-; SSE2-NEXT: addl %ecx, %edx
-; SSE2-NEXT: cmovbl %eax, %edx
-; SSE2-NEXT: movd %edx, %xmm2
-; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm5[0]
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm7[3,1,2,3]
-; SSE2-NEXT: movd %xmm2, %ecx
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[3,1,2,3]
-; SSE2-NEXT: movd %xmm2, %edx
-; SSE2-NEXT: addl %ecx, %edx
-; SSE2-NEXT: cmovbl %eax, %edx
-; SSE2-NEXT: movd %edx, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm7[2,3,0,1]
-; SSE2-NEXT: movd %xmm5, %ecx
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm3[2,3,0,1]
-; SSE2-NEXT: movd %xmm5, %edx
-; SSE2-NEXT: addl %ecx, %edx
-; SSE2-NEXT: cmovbl %eax, %edx
-; SSE2-NEXT: movd %edx, %xmm6
-; SSE2-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1]
-; SSE2-NEXT: movd %xmm7, %ecx
-; SSE2-NEXT: movd %xmm3, %edx
-; SSE2-NEXT: addl %ecx, %edx
-; SSE2-NEXT: cmovbl %eax, %edx
-; SSE2-NEXT: movd %edx, %xmm5
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm7[1,1,2,3]
-; SSE2-NEXT: movd %xmm2, %ecx
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,1,2,3]
-; SSE2-NEXT: movd %xmm2, %edx
-; SSE2-NEXT: addl %ecx, %edx
-; SSE2-NEXT: cmovbl %eax, %edx
-; SSE2-NEXT: movd %edx, %xmm2
-; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm6[0]
-; SSE2-NEXT: movdqa %xmm4, %xmm2
-; SSE2-NEXT: movdqa %xmm5, %xmm3
+; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [2147483648,2147483648,2147483648,2147483648]
+; SSE2-NEXT: movdqa %xmm0, %xmm10
+; SSE2-NEXT: pxor %xmm9, %xmm10
+; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [2147483647,2147483647,2147483647,2147483647]
+; SSE2-NEXT: movdqa %xmm4, %xmm11
+; SSE2-NEXT: pxor %xmm8, %xmm11
+; SSE2-NEXT: pcmpgtd %xmm10, %xmm11
+; SSE2-NEXT: pand %xmm11, %xmm0
+; SSE2-NEXT: pcmpeqd %xmm10, %xmm10
+; SSE2-NEXT: pxor %xmm10, %xmm11
+; SSE2-NEXT: movdqa %xmm4, %xmm12
+; SSE2-NEXT: pandn %xmm11, %xmm12
+; SSE2-NEXT: por %xmm12, %xmm0
+; SSE2-NEXT: paddd %xmm4, %xmm0
+; SSE2-NEXT: movdqa %xmm1, %xmm11
+; SSE2-NEXT: pxor %xmm9, %xmm11
+; SSE2-NEXT: movdqa %xmm5, %xmm12
+; SSE2-NEXT: pxor %xmm8, %xmm12
+; SSE2-NEXT: pcmpgtd %xmm11, %xmm12
+; SSE2-NEXT: pand %xmm12, %xmm1
+; SSE2-NEXT: pxor %xmm10, %xmm12
+; SSE2-NEXT: movdqa %xmm5, %xmm4
+; SSE2-NEXT: pandn %xmm12, %xmm4
+; SSE2-NEXT: por %xmm4, %xmm1
+; SSE2-NEXT: paddd %xmm5, %xmm1
+; SSE2-NEXT: movdqa %xmm2, %xmm4
+; SSE2-NEXT: pxor %xmm9, %xmm4
+; SSE2-NEXT: movdqa %xmm6, %xmm5
+; SSE2-NEXT: pxor %xmm8, %xmm5
+; SSE2-NEXT: pcmpgtd %xmm4, %xmm5
+; SSE2-NEXT: pand %xmm5, %xmm2
+; SSE2-NEXT: pxor %xmm10, %xmm5
+; SSE2-NEXT: movdqa %xmm6, %xmm4
+; SSE2-NEXT: pandn %xmm5, %xmm4
+; SSE2-NEXT: por %xmm4, %xmm2
+; SSE2-NEXT: paddd %xmm6, %xmm2
+; SSE2-NEXT: pxor %xmm3, %xmm9
+; SSE2-NEXT: pxor %xmm7, %xmm8
+; SSE2-NEXT: pcmpgtd %xmm9, %xmm8
+; SSE2-NEXT: pand %xmm8, %xmm3
+; SSE2-NEXT: pxor %xmm10, %xmm8
+; SSE2-NEXT: movdqa %xmm7, %xmm4
+; SSE2-NEXT: pandn %xmm8, %xmm4
+; SSE2-NEXT: por %xmm4, %xmm3
+; SSE2-NEXT: paddd %xmm7, %xmm3
; SSE2-NEXT: retq
;
; SSSE3-LABEL: v16i32:
; SSSE3: # %bb.0:
-; SSSE3-NEXT: movdqa %xmm1, %xmm8
-; SSSE3-NEXT: movdqa %xmm0, %xmm1
-; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm4[3,1,2,3]
-; SSSE3-NEXT: movd %xmm0, %eax
-; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[3,1,2,3]
-; SSSE3-NEXT: movd %xmm0, %ecx
-; SSSE3-NEXT: addl %eax, %ecx
-; SSSE3-NEXT: movl $-1, %eax
-; SSSE3-NEXT: cmovbl %eax, %ecx
-; SSSE3-NEXT: movd %ecx, %xmm9
-; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm4[2,3,0,1]
-; SSSE3-NEXT: movd %xmm0, %ecx
-; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
-; SSSE3-NEXT: movd %xmm0, %edx
-; SSSE3-NEXT: addl %ecx, %edx
-; SSSE3-NEXT: cmovbl %eax, %edx
-; SSSE3-NEXT: movd %edx, %xmm10
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm10 = xmm10[0],xmm9[0],xmm10[1],xmm9[1]
-; SSSE3-NEXT: movd %xmm4, %ecx
-; SSSE3-NEXT: movd %xmm1, %edx
-; SSSE3-NEXT: addl %ecx, %edx
-; SSSE3-NEXT: cmovbl %eax, %edx
-; SSSE3-NEXT: movd %edx, %xmm0
-; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,2,3]
-; SSSE3-NEXT: movd %xmm4, %ecx
-; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
-; SSSE3-NEXT: movd %xmm1, %edx
-; SSSE3-NEXT: addl %ecx, %edx
-; SSSE3-NEXT: cmovbl %eax, %edx
-; SSSE3-NEXT: movd %edx, %xmm1
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm10[0]
-; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm5[3,1,2,3]
-; SSSE3-NEXT: movd %xmm1, %ecx
-; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm8[3,1,2,3]
-; SSSE3-NEXT: movd %xmm1, %edx
-; SSSE3-NEXT: addl %ecx, %edx
-; SSSE3-NEXT: cmovbl %eax, %edx
-; SSSE3-NEXT: movd %edx, %xmm1
-; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm5[2,3,0,1]
-; SSSE3-NEXT: movd %xmm4, %ecx
-; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm8[2,3,0,1]
-; SSSE3-NEXT: movd %xmm4, %edx
-; SSSE3-NEXT: addl %ecx, %edx
-; SSSE3-NEXT: cmovbl %eax, %edx
-; SSSE3-NEXT: movd %edx, %xmm4
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
-; SSSE3-NEXT: movd %xmm5, %ecx
-; SSSE3-NEXT: movd %xmm8, %edx
-; SSSE3-NEXT: addl %ecx, %edx
-; SSSE3-NEXT: cmovbl %eax, %edx
-; SSSE3-NEXT: movd %edx, %xmm1
-; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,2,3]
-; SSSE3-NEXT: movd %xmm5, %ecx
-; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm8[1,1,2,3]
-; SSSE3-NEXT: movd %xmm5, %edx
-; SSSE3-NEXT: addl %ecx, %edx
-; SSSE3-NEXT: cmovbl %eax, %edx
-; SSSE3-NEXT: movd %edx, %xmm5
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1]
-; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm4[0]
-; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm6[3,1,2,3]
-; SSSE3-NEXT: movd %xmm4, %ecx
-; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm2[3,1,2,3]
-; SSSE3-NEXT: movd %xmm4, %edx
-; SSSE3-NEXT: addl %ecx, %edx
-; SSSE3-NEXT: cmovbl %eax, %edx
-; SSSE3-NEXT: movd %edx, %xmm4
-; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm6[2,3,0,1]
-; SSSE3-NEXT: movd %xmm5, %ecx
-; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm2[2,3,0,1]
-; SSSE3-NEXT: movd %xmm5, %edx
-; SSSE3-NEXT: addl %ecx, %edx
-; SSSE3-NEXT: cmovbl %eax, %edx
-; SSSE3-NEXT: movd %edx, %xmm5
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
-; SSSE3-NEXT: movd %xmm6, %ecx
-; SSSE3-NEXT: movd %xmm2, %edx
-; SSSE3-NEXT: addl %ecx, %edx
-; SSSE3-NEXT: cmovbl %eax, %edx
-; SSSE3-NEXT: movd %edx, %xmm4
-; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,2,3]
-; SSSE3-NEXT: movd %xmm6, %ecx
-; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,2,3]
-; SSSE3-NEXT: movd %xmm2, %edx
-; SSSE3-NEXT: addl %ecx, %edx
-; SSSE3-NEXT: cmovbl %eax, %edx
-; SSSE3-NEXT: movd %edx, %xmm2
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
-; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm5[0]
-; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm7[3,1,2,3]
-; SSSE3-NEXT: movd %xmm2, %ecx
-; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm3[3,1,2,3]
-; SSSE3-NEXT: movd %xmm2, %edx
-; SSSE3-NEXT: addl %ecx, %edx
-; SSSE3-NEXT: cmovbl %eax, %edx
-; SSSE3-NEXT: movd %edx, %xmm2
-; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm7[2,3,0,1]
-; SSSE3-NEXT: movd %xmm5, %ecx
-; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm3[2,3,0,1]
-; SSSE3-NEXT: movd %xmm5, %edx
-; SSSE3-NEXT: addl %ecx, %edx
-; SSSE3-NEXT: cmovbl %eax, %edx
-; SSSE3-NEXT: movd %edx, %xmm6
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1]
-; SSSE3-NEXT: movd %xmm7, %ecx
-; SSSE3-NEXT: movd %xmm3, %edx
-; SSSE3-NEXT: addl %ecx, %edx
-; SSSE3-NEXT: cmovbl %eax, %edx
-; SSSE3-NEXT: movd %edx, %xmm5
-; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm7[1,1,2,3]
-; SSSE3-NEXT: movd %xmm2, %ecx
-; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,1,2,3]
-; SSSE3-NEXT: movd %xmm2, %edx
-; SSSE3-NEXT: addl %ecx, %edx
-; SSSE3-NEXT: cmovbl %eax, %edx
-; SSSE3-NEXT: movd %edx, %xmm2
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1]
-; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm6[0]
-; SSSE3-NEXT: movdqa %xmm4, %xmm2
-; SSSE3-NEXT: movdqa %xmm5, %xmm3
+; SSSE3-NEXT: movdqa {{.*#+}} xmm9 = [2147483648,2147483648,2147483648,2147483648]
+; SSSE3-NEXT: movdqa %xmm0, %xmm10
+; SSSE3-NEXT: pxor %xmm9, %xmm10
+; SSSE3-NEXT: movdqa {{.*#+}} xmm8 = [2147483647,2147483647,2147483647,2147483647]
+; SSSE3-NEXT: movdqa %xmm4, %xmm11
+; SSSE3-NEXT: pxor %xmm8, %xmm11
+; SSSE3-NEXT: pcmpgtd %xmm10, %xmm11
+; SSSE3-NEXT: pand %xmm11, %xmm0
+; SSSE3-NEXT: pcmpeqd %xmm10, %xmm10
+; SSSE3-NEXT: pxor %xmm10, %xmm11
+; SSSE3-NEXT: movdqa %xmm4, %xmm12
+; SSSE3-NEXT: pandn %xmm11, %xmm12
+; SSSE3-NEXT: por %xmm12, %xmm0
+; SSSE3-NEXT: paddd %xmm4, %xmm0
+; SSSE3-NEXT: movdqa %xmm1, %xmm11
+; SSSE3-NEXT: pxor %xmm9, %xmm11
+; SSSE3-NEXT: movdqa %xmm5, %xmm12
+; SSSE3-NEXT: pxor %xmm8, %xmm12
+; SSSE3-NEXT: pcmpgtd %xmm11, %xmm12
+; SSSE3-NEXT: pand %xmm12, %xmm1
+; SSSE3-NEXT: pxor %xmm10, %xmm12
+; SSSE3-NEXT: movdqa %xmm5, %xmm4
+; SSSE3-NEXT: pandn %xmm12, %xmm4
+; SSSE3-NEXT: por %xmm4, %xmm1
+; SSSE3-NEXT: paddd %xmm5, %xmm1
+; SSSE3-NEXT: movdqa %xmm2, %xmm4
+; SSSE3-NEXT: pxor %xmm9, %xmm4
+; SSSE3-NEXT: movdqa %xmm6, %xmm5
+; SSSE3-NEXT: pxor %xmm8, %xmm5
+; SSSE3-NEXT: pcmpgtd %xmm4, %xmm5
+; SSSE3-NEXT: pand %xmm5, %xmm2
+; SSSE3-NEXT: pxor %xmm10, %xmm5
+; SSSE3-NEXT: movdqa %xmm6, %xmm4
+; SSSE3-NEXT: pandn %xmm5, %xmm4
+; SSSE3-NEXT: por %xmm4, %xmm2
+; SSSE3-NEXT: paddd %xmm6, %xmm2
+; SSSE3-NEXT: pxor %xmm3, %xmm9
+; SSSE3-NEXT: pxor %xmm7, %xmm8
+; SSSE3-NEXT: pcmpgtd %xmm9, %xmm8
+; SSSE3-NEXT: pand %xmm8, %xmm3
+; SSSE3-NEXT: pxor %xmm10, %xmm8
+; SSSE3-NEXT: movdqa %xmm7, %xmm4
+; SSSE3-NEXT: pandn %xmm8, %xmm4
+; SSSE3-NEXT: por %xmm4, %xmm3
+; SSSE3-NEXT: paddd %xmm7, %xmm3
; SSSE3-NEXT: retq
;
; SSE41-LABEL: v16i32:
; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm1, %xmm8
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: pextrd $1, %xmm4, %eax
-; SSE41-NEXT: pextrd $1, %xmm0, %ecx
-; SSE41-NEXT: addl %eax, %ecx
-; SSE41-NEXT: movl $-1, %eax
-; SSE41-NEXT: cmovbl %eax, %ecx
-; SSE41-NEXT: movd %xmm4, %edx
-; SSE41-NEXT: movd %xmm0, %esi
-; SSE41-NEXT: addl %edx, %esi
-; SSE41-NEXT: cmovbl %eax, %esi
-; SSE41-NEXT: movd %esi, %xmm0
-; SSE41-NEXT: pinsrd $1, %ecx, %xmm0
-; SSE41-NEXT: pextrd $2, %xmm4, %ecx
-; SSE41-NEXT: pextrd $2, %xmm1, %edx
-; SSE41-NEXT: addl %ecx, %edx
-; SSE41-NEXT: cmovbl %eax, %edx
-; SSE41-NEXT: pinsrd $2, %edx, %xmm0
-; SSE41-NEXT: pextrd $3, %xmm4, %ecx
-; SSE41-NEXT: pextrd $3, %xmm1, %edx
-; SSE41-NEXT: addl %ecx, %edx
-; SSE41-NEXT: cmovbl %eax, %edx
-; SSE41-NEXT: pinsrd $3, %edx, %xmm0
-; SSE41-NEXT: pextrd $1, %xmm5, %ecx
-; SSE41-NEXT: pextrd $1, %xmm8, %edx
-; SSE41-NEXT: addl %ecx, %edx
-; SSE41-NEXT: cmovbl %eax, %edx
-; SSE41-NEXT: movd %xmm5, %ecx
-; SSE41-NEXT: movd %xmm8, %esi
-; SSE41-NEXT: addl %ecx, %esi
-; SSE41-NEXT: cmovbl %eax, %esi
-; SSE41-NEXT: movd %esi, %xmm1
-; SSE41-NEXT: pinsrd $1, %edx, %xmm1
-; SSE41-NEXT: pextrd $2, %xmm5, %ecx
-; SSE41-NEXT: pextrd $2, %xmm8, %edx
-; SSE41-NEXT: addl %ecx, %edx
-; SSE41-NEXT: cmovbl %eax, %edx
-; SSE41-NEXT: pinsrd $2, %edx, %xmm1
-; SSE41-NEXT: pextrd $3, %xmm5, %ecx
-; SSE41-NEXT: pextrd $3, %xmm8, %edx
-; SSE41-NEXT: addl %ecx, %edx
-; SSE41-NEXT: cmovbl %eax, %edx
-; SSE41-NEXT: pinsrd $3, %edx, %xmm1
-; SSE41-NEXT: pextrd $1, %xmm6, %ecx
-; SSE41-NEXT: pextrd $1, %xmm2, %edx
-; SSE41-NEXT: addl %ecx, %edx
-; SSE41-NEXT: cmovbl %eax, %edx
-; SSE41-NEXT: movd %xmm6, %ecx
-; SSE41-NEXT: movd %xmm2, %esi
-; SSE41-NEXT: addl %ecx, %esi
-; SSE41-NEXT: cmovbl %eax, %esi
-; SSE41-NEXT: movd %esi, %xmm4
-; SSE41-NEXT: pinsrd $1, %edx, %xmm4
-; SSE41-NEXT: pextrd $2, %xmm6, %ecx
-; SSE41-NEXT: pextrd $2, %xmm2, %edx
-; SSE41-NEXT: addl %ecx, %edx
-; SSE41-NEXT: cmovbl %eax, %edx
-; SSE41-NEXT: pinsrd $2, %edx, %xmm4
-; SSE41-NEXT: pextrd $3, %xmm6, %ecx
-; SSE41-NEXT: pextrd $3, %xmm2, %edx
-; SSE41-NEXT: addl %ecx, %edx
-; SSE41-NEXT: cmovbl %eax, %edx
-; SSE41-NEXT: pinsrd $3, %edx, %xmm4
-; SSE41-NEXT: pextrd $1, %xmm7, %ecx
-; SSE41-NEXT: pextrd $1, %xmm3, %edx
-; SSE41-NEXT: addl %ecx, %edx
-; SSE41-NEXT: cmovbl %eax, %edx
-; SSE41-NEXT: movd %xmm7, %ecx
-; SSE41-NEXT: movd %xmm3, %esi
-; SSE41-NEXT: addl %ecx, %esi
-; SSE41-NEXT: cmovbl %eax, %esi
-; SSE41-NEXT: movd %esi, %xmm5
-; SSE41-NEXT: pinsrd $1, %edx, %xmm5
-; SSE41-NEXT: pextrd $2, %xmm7, %ecx
-; SSE41-NEXT: pextrd $2, %xmm3, %edx
-; SSE41-NEXT: addl %ecx, %edx
-; SSE41-NEXT: cmovbl %eax, %edx
-; SSE41-NEXT: pinsrd $2, %edx, %xmm5
-; SSE41-NEXT: pextrd $3, %xmm7, %ecx
-; SSE41-NEXT: pextrd $3, %xmm3, %edx
-; SSE41-NEXT: addl %ecx, %edx
-; SSE41-NEXT: cmovbl %eax, %edx
-; SSE41-NEXT: pinsrd $3, %edx, %xmm5
-; SSE41-NEXT: movdqa %xmm4, %xmm2
-; SSE41-NEXT: movdqa %xmm5, %xmm3
+; SSE41-NEXT: pcmpeqd %xmm8, %xmm8
+; SSE41-NEXT: movdqa %xmm4, %xmm9
+; SSE41-NEXT: pxor %xmm8, %xmm9
+; SSE41-NEXT: pminud %xmm9, %xmm0
+; SSE41-NEXT: paddd %xmm4, %xmm0
+; SSE41-NEXT: movdqa %xmm5, %xmm4
+; SSE41-NEXT: pxor %xmm8, %xmm4
+; SSE41-NEXT: pminud %xmm4, %xmm1
+; SSE41-NEXT: paddd %xmm5, %xmm1
+; SSE41-NEXT: movdqa %xmm6, %xmm4
+; SSE41-NEXT: pxor %xmm8, %xmm4
+; SSE41-NEXT: pminud %xmm4, %xmm2
+; SSE41-NEXT: paddd %xmm6, %xmm2
+; SSE41-NEXT: pxor %xmm7, %xmm8
+; SSE41-NEXT: pminud %xmm8, %xmm3
+; SSE41-NEXT: paddd %xmm7, %xmm3
; SSE41-NEXT: retq
;
; AVX1-LABEL: v16i32:
; AVX1: # %bb.0:
+; AVX1-NEXT: vxorps %xmm4, %xmm4, %xmm4
+; AVX1-NEXT: vcmptrueps %ymm4, %ymm4, %ymm4
+; AVX1-NEXT: vxorps %ymm4, %ymm2, %ymm5
+; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm6
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm7
+; AVX1-NEXT: vpminud %xmm6, %xmm7, %xmm6
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm7
+; AVX1-NEXT: vpaddd %xmm7, %xmm6, %xmm6
+; AVX1-NEXT: vpminud %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vpaddd %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm0
+; AVX1-NEXT: vxorps %ymm4, %ymm3, %ymm2
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
-; AVX1-NEXT: vpextrd $1, %xmm4, %eax
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
-; AVX1-NEXT: vpextrd $1, %xmm5, %ecx
-; AVX1-NEXT: addl %eax, %ecx
-; AVX1-NEXT: movl $-1, %eax
-; AVX1-NEXT: cmovbl %eax, %ecx
-; AVX1-NEXT: vmovd %xmm4, %edx
-; AVX1-NEXT: vmovd %xmm5, %esi
-; AVX1-NEXT: addl %edx, %esi
-; AVX1-NEXT: cmovbl %eax, %esi
-; AVX1-NEXT: vmovd %esi, %xmm6
-; AVX1-NEXT: vpinsrd $1, %ecx, %xmm6, %xmm6
-; AVX1-NEXT: vpextrd $2, %xmm4, %ecx
-; AVX1-NEXT: vpextrd $2, %xmm5, %edx
-; AVX1-NEXT: addl %ecx, %edx
-; AVX1-NEXT: cmovbl %eax, %edx
-; AVX1-NEXT: vpinsrd $2, %edx, %xmm6, %xmm6
-; AVX1-NEXT: vpextrd $3, %xmm4, %ecx
-; AVX1-NEXT: vpextrd $3, %xmm5, %edx
-; AVX1-NEXT: addl %ecx, %edx
-; AVX1-NEXT: cmovbl %eax, %edx
-; AVX1-NEXT: vpinsrd $3, %edx, %xmm6, %xmm4
-; AVX1-NEXT: vpextrd $1, %xmm2, %ecx
-; AVX1-NEXT: vpextrd $1, %xmm0, %edx
-; AVX1-NEXT: addl %ecx, %edx
-; AVX1-NEXT: cmovbl %eax, %edx
-; AVX1-NEXT: vmovd %xmm2, %ecx
-; AVX1-NEXT: vmovd %xmm0, %esi
-; AVX1-NEXT: addl %ecx, %esi
-; AVX1-NEXT: cmovbl %eax, %esi
-; AVX1-NEXT: vmovd %esi, %xmm5
-; AVX1-NEXT: vpinsrd $1, %edx, %xmm5, %xmm5
-; AVX1-NEXT: vpextrd $2, %xmm2, %ecx
-; AVX1-NEXT: vpextrd $2, %xmm0, %edx
-; AVX1-NEXT: addl %ecx, %edx
-; AVX1-NEXT: cmovbl %eax, %edx
-; AVX1-NEXT: vpinsrd $2, %edx, %xmm5, %xmm5
-; AVX1-NEXT: vpextrd $3, %xmm2, %ecx
-; AVX1-NEXT: vpextrd $3, %xmm0, %edx
-; AVX1-NEXT: addl %ecx, %edx
-; AVX1-NEXT: cmovbl %eax, %edx
-; AVX1-NEXT: vpinsrd $3, %edx, %xmm5, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0
-; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm2
-; AVX1-NEXT: vpextrd $1, %xmm2, %ecx
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
-; AVX1-NEXT: vpextrd $1, %xmm4, %edx
-; AVX1-NEXT: addl %ecx, %edx
-; AVX1-NEXT: cmovbl %eax, %edx
-; AVX1-NEXT: vmovd %xmm2, %ecx
-; AVX1-NEXT: vmovd %xmm4, %esi
-; AVX1-NEXT: addl %ecx, %esi
-; AVX1-NEXT: cmovbl %eax, %esi
-; AVX1-NEXT: vmovd %esi, %xmm5
-; AVX1-NEXT: vpinsrd $1, %edx, %xmm5, %xmm5
-; AVX1-NEXT: vpextrd $2, %xmm2, %ecx
-; AVX1-NEXT: vpextrd $2, %xmm4, %edx
-; AVX1-NEXT: addl %ecx, %edx
-; AVX1-NEXT: cmovbl %eax, %edx
-; AVX1-NEXT: vpinsrd $2, %edx, %xmm5, %xmm5
-; AVX1-NEXT: vpextrd $3, %xmm2, %ecx
-; AVX1-NEXT: vpextrd $3, %xmm4, %edx
-; AVX1-NEXT: addl %ecx, %edx
-; AVX1-NEXT: cmovbl %eax, %edx
-; AVX1-NEXT: vpinsrd $3, %edx, %xmm5, %xmm2
-; AVX1-NEXT: vpextrd $1, %xmm3, %ecx
-; AVX1-NEXT: vpextrd $1, %xmm1, %edx
-; AVX1-NEXT: addl %ecx, %edx
-; AVX1-NEXT: cmovbl %eax, %edx
-; AVX1-NEXT: vmovd %xmm3, %ecx
-; AVX1-NEXT: vmovd %xmm1, %esi
-; AVX1-NEXT: addl %ecx, %esi
-; AVX1-NEXT: cmovbl %eax, %esi
-; AVX1-NEXT: vmovd %esi, %xmm4
-; AVX1-NEXT: vpinsrd $1, %edx, %xmm4, %xmm4
-; AVX1-NEXT: vpextrd $2, %xmm3, %ecx
-; AVX1-NEXT: vpextrd $2, %xmm1, %edx
-; AVX1-NEXT: addl %ecx, %edx
-; AVX1-NEXT: cmovbl %eax, %edx
-; AVX1-NEXT: vpinsrd $2, %edx, %xmm4, %xmm4
-; AVX1-NEXT: vpextrd $3, %xmm3, %ecx
-; AVX1-NEXT: vpextrd $3, %xmm1, %edx
-; AVX1-NEXT: addl %ecx, %edx
-; AVX1-NEXT: cmovbl %eax, %edx
-; AVX1-NEXT: vpinsrd $3, %edx, %xmm4, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
+; AVX1-NEXT: vpminud %xmm4, %xmm5, %xmm4
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm5
+; AVX1-NEXT: vpaddd %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vpminud %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpaddd %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm1, %ymm1
; AVX1-NEXT: retq
;
; AVX2-LABEL: v16i32:
; AVX2: # %bb.0:
-; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm4
-; AVX2-NEXT: vpextrd $1, %xmm4, %eax
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm5
-; AVX2-NEXT: vpextrd $1, %xmm5, %ecx
-; AVX2-NEXT: addl %eax, %ecx
-; AVX2-NEXT: movl $-1, %eax
-; AVX2-NEXT: cmovbl %eax, %ecx
-; AVX2-NEXT: vmovd %xmm4, %edx
-; AVX2-NEXT: vmovd %xmm5, %esi
-; AVX2-NEXT: addl %edx, %esi
-; AVX2-NEXT: cmovbl %eax, %esi
-; AVX2-NEXT: vmovd %esi, %xmm6
-; AVX2-NEXT: vpinsrd $1, %ecx, %xmm6, %xmm6
-; AVX2-NEXT: vpextrd $2, %xmm4, %ecx
-; AVX2-NEXT: vpextrd $2, %xmm5, %edx
-; AVX2-NEXT: addl %ecx, %edx
-; AVX2-NEXT: cmovbl %eax, %edx
-; AVX2-NEXT: vpinsrd $2, %edx, %xmm6, %xmm6
-; AVX2-NEXT: vpextrd $3, %xmm4, %ecx
-; AVX2-NEXT: vpextrd $3, %xmm5, %edx
-; AVX2-NEXT: addl %ecx, %edx
-; AVX2-NEXT: cmovbl %eax, %edx
-; AVX2-NEXT: vpinsrd $3, %edx, %xmm6, %xmm4
-; AVX2-NEXT: vpextrd $1, %xmm2, %ecx
-; AVX2-NEXT: vpextrd $1, %xmm0, %edx
-; AVX2-NEXT: addl %ecx, %edx
-; AVX2-NEXT: cmovbl %eax, %edx
-; AVX2-NEXT: vmovd %xmm2, %ecx
-; AVX2-NEXT: vmovd %xmm0, %esi
-; AVX2-NEXT: addl %ecx, %esi
-; AVX2-NEXT: cmovbl %eax, %esi
-; AVX2-NEXT: vmovd %esi, %xmm5
-; AVX2-NEXT: vpinsrd $1, %edx, %xmm5, %xmm5
-; AVX2-NEXT: vpextrd $2, %xmm2, %ecx
-; AVX2-NEXT: vpextrd $2, %xmm0, %edx
-; AVX2-NEXT: addl %ecx, %edx
-; AVX2-NEXT: cmovbl %eax, %edx
-; AVX2-NEXT: vpinsrd $2, %edx, %xmm5, %xmm5
-; AVX2-NEXT: vpextrd $3, %xmm2, %ecx
-; AVX2-NEXT: vpextrd $3, %xmm0, %edx
-; AVX2-NEXT: addl %ecx, %edx
-; AVX2-NEXT: cmovbl %eax, %edx
-; AVX2-NEXT: vpinsrd $3, %edx, %xmm5, %xmm0
-; AVX2-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm2
-; AVX2-NEXT: vpextrd $1, %xmm2, %ecx
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm4
-; AVX2-NEXT: vpextrd $1, %xmm4, %edx
-; AVX2-NEXT: addl %ecx, %edx
-; AVX2-NEXT: cmovbl %eax, %edx
-; AVX2-NEXT: vmovd %xmm2, %ecx
-; AVX2-NEXT: vmovd %xmm4, %esi
-; AVX2-NEXT: addl %ecx, %esi
-; AVX2-NEXT: cmovbl %eax, %esi
-; AVX2-NEXT: vmovd %esi, %xmm5
-; AVX2-NEXT: vpinsrd $1, %edx, %xmm5, %xmm5
-; AVX2-NEXT: vpextrd $2, %xmm2, %ecx
-; AVX2-NEXT: vpextrd $2, %xmm4, %edx
-; AVX2-NEXT: addl %ecx, %edx
-; AVX2-NEXT: cmovbl %eax, %edx
-; AVX2-NEXT: vpinsrd $2, %edx, %xmm5, %xmm5
-; AVX2-NEXT: vpextrd $3, %xmm2, %ecx
-; AVX2-NEXT: vpextrd $3, %xmm4, %edx
-; AVX2-NEXT: addl %ecx, %edx
-; AVX2-NEXT: cmovbl %eax, %edx
-; AVX2-NEXT: vpinsrd $3, %edx, %xmm5, %xmm2
-; AVX2-NEXT: vpextrd $1, %xmm3, %ecx
-; AVX2-NEXT: vpextrd $1, %xmm1, %edx
-; AVX2-NEXT: addl %ecx, %edx
-; AVX2-NEXT: cmovbl %eax, %edx
-; AVX2-NEXT: vmovd %xmm3, %ecx
-; AVX2-NEXT: vmovd %xmm1, %esi
-; AVX2-NEXT: addl %ecx, %esi
-; AVX2-NEXT: cmovbl %eax, %esi
-; AVX2-NEXT: vmovd %esi, %xmm4
-; AVX2-NEXT: vpinsrd $1, %edx, %xmm4, %xmm4
-; AVX2-NEXT: vpextrd $2, %xmm3, %ecx
-; AVX2-NEXT: vpextrd $2, %xmm1, %edx
-; AVX2-NEXT: addl %ecx, %edx
-; AVX2-NEXT: cmovbl %eax, %edx
-; AVX2-NEXT: vpinsrd $2, %edx, %xmm4, %xmm4
-; AVX2-NEXT: vpextrd $3, %xmm3, %ecx
-; AVX2-NEXT: vpextrd $3, %xmm1, %edx
-; AVX2-NEXT: addl %ecx, %edx
-; AVX2-NEXT: cmovbl %eax, %edx
-; AVX2-NEXT: vpinsrd $3, %edx, %xmm4, %xmm1
-; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
+; AVX2-NEXT: vpcmpeqd %ymm4, %ymm4, %ymm4
+; AVX2-NEXT: vpxor %ymm4, %ymm2, %ymm5
+; AVX2-NEXT: vpminud %ymm5, %ymm0, %ymm0
+; AVX2-NEXT: vpaddd %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpxor %ymm4, %ymm3, %ymm2
+; AVX2-NEXT: vpminud %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpaddd %ymm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: v16i32:
; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti32x4 $3, %zmm1, %xmm2
-; AVX512-NEXT: vpextrd $1, %xmm2, %eax
-; AVX512-NEXT: vextracti32x4 $3, %zmm0, %xmm3
-; AVX512-NEXT: vpextrd $1, %xmm3, %ecx
-; AVX512-NEXT: addl %eax, %ecx
-; AVX512-NEXT: movl $-1, %eax
-; AVX512-NEXT: cmovbl %eax, %ecx
-; AVX512-NEXT: vmovd %xmm2, %edx
-; AVX512-NEXT: vmovd %xmm3, %esi
-; AVX512-NEXT: addl %edx, %esi
-; AVX512-NEXT: cmovbl %eax, %esi
-; AVX512-NEXT: vmovd %esi, %xmm4
-; AVX512-NEXT: vpinsrd $1, %ecx, %xmm4, %xmm4
-; AVX512-NEXT: vpextrd $2, %xmm2, %ecx
-; AVX512-NEXT: vpextrd $2, %xmm3, %edx
-; AVX512-NEXT: addl %ecx, %edx
-; AVX512-NEXT: cmovbl %eax, %edx
-; AVX512-NEXT: vpinsrd $2, %edx, %xmm4, %xmm4
-; AVX512-NEXT: vpextrd $3, %xmm2, %ecx
-; AVX512-NEXT: vpextrd $3, %xmm3, %edx
-; AVX512-NEXT: addl %ecx, %edx
-; AVX512-NEXT: cmovbl %eax, %edx
-; AVX512-NEXT: vpinsrd $3, %edx, %xmm4, %xmm2
-; AVX512-NEXT: vextracti32x4 $2, %zmm1, %xmm3
-; AVX512-NEXT: vpextrd $1, %xmm3, %ecx
-; AVX512-NEXT: vextracti32x4 $2, %zmm0, %xmm4
-; AVX512-NEXT: vpextrd $1, %xmm4, %edx
-; AVX512-NEXT: addl %ecx, %edx
-; AVX512-NEXT: cmovbl %eax, %edx
-; AVX512-NEXT: vmovd %xmm3, %ecx
-; AVX512-NEXT: vmovd %xmm4, %esi
-; AVX512-NEXT: addl %ecx, %esi
-; AVX512-NEXT: cmovbl %eax, %esi
-; AVX512-NEXT: vmovd %esi, %xmm5
-; AVX512-NEXT: vpinsrd $1, %edx, %xmm5, %xmm5
-; AVX512-NEXT: vpextrd $2, %xmm3, %ecx
-; AVX512-NEXT: vpextrd $2, %xmm4, %edx
-; AVX512-NEXT: addl %ecx, %edx
-; AVX512-NEXT: cmovbl %eax, %edx
-; AVX512-NEXT: vpinsrd $2, %edx, %xmm5, %xmm5
-; AVX512-NEXT: vpextrd $3, %xmm3, %ecx
-; AVX512-NEXT: vpextrd $3, %xmm4, %edx
-; AVX512-NEXT: addl %ecx, %edx
-; AVX512-NEXT: cmovbl %eax, %edx
-; AVX512-NEXT: vpinsrd $3, %edx, %xmm5, %xmm3
-; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm3, %ymm2
-; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm3
-; AVX512-NEXT: vpextrd $1, %xmm3, %ecx
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm4
-; AVX512-NEXT: vpextrd $1, %xmm4, %edx
-; AVX512-NEXT: addl %ecx, %edx
-; AVX512-NEXT: cmovbl %eax, %edx
-; AVX512-NEXT: vmovd %xmm3, %ecx
-; AVX512-NEXT: vmovd %xmm4, %esi
-; AVX512-NEXT: addl %ecx, %esi
-; AVX512-NEXT: cmovbl %eax, %esi
-; AVX512-NEXT: vmovd %esi, %xmm5
-; AVX512-NEXT: vpinsrd $1, %edx, %xmm5, %xmm5
-; AVX512-NEXT: vpextrd $2, %xmm3, %ecx
-; AVX512-NEXT: vpextrd $2, %xmm4, %edx
-; AVX512-NEXT: addl %ecx, %edx
-; AVX512-NEXT: cmovbl %eax, %edx
-; AVX512-NEXT: vpinsrd $2, %edx, %xmm5, %xmm5
-; AVX512-NEXT: vpextrd $3, %xmm3, %ecx
-; AVX512-NEXT: vpextrd $3, %xmm4, %edx
-; AVX512-NEXT: addl %ecx, %edx
-; AVX512-NEXT: cmovbl %eax, %edx
-; AVX512-NEXT: vpinsrd $3, %edx, %xmm5, %xmm3
-; AVX512-NEXT: vpextrd $1, %xmm1, %ecx
-; AVX512-NEXT: vpextrd $1, %xmm0, %edx
-; AVX512-NEXT: addl %ecx, %edx
-; AVX512-NEXT: cmovbl %eax, %edx
-; AVX512-NEXT: vmovd %xmm1, %ecx
-; AVX512-NEXT: vmovd %xmm0, %esi
-; AVX512-NEXT: addl %ecx, %esi
-; AVX512-NEXT: cmovbl %eax, %esi
-; AVX512-NEXT: vmovd %esi, %xmm4
-; AVX512-NEXT: vpinsrd $1, %edx, %xmm4, %xmm4
-; AVX512-NEXT: vpextrd $2, %xmm1, %ecx
-; AVX512-NEXT: vpextrd $2, %xmm0, %edx
-; AVX512-NEXT: addl %ecx, %edx
-; AVX512-NEXT: cmovbl %eax, %edx
-; AVX512-NEXT: vpinsrd $2, %edx, %xmm4, %xmm4
-; AVX512-NEXT: vpextrd $3, %xmm1, %ecx
-; AVX512-NEXT: vpextrd $3, %xmm0, %edx
-; AVX512-NEXT: addl %ecx, %edx
-; AVX512-NEXT: cmovbl %eax, %edx
-; AVX512-NEXT: vpinsrd $3, %edx, %xmm4, %xmm0
-; AVX512-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm0
-; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512-NEXT: vmovdqa64 %zmm1, %zmm2
+; AVX512-NEXT: vpternlogq $15, %zmm1, %zmm1, %zmm2
+; AVX512-NEXT: vpminud %zmm2, %zmm0, %zmm0
+; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0
; AVX512-NEXT: retq
%z = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> %x, <16 x i32> %y)
ret <16 x i32> %z
@@ -1790,73 +1088,100 @@ define <16 x i32> @v16i32(<16 x i32> %x,
define <2 x i64> @v2i64(<2 x i64> %x, <2 x i64> %y) nounwind {
; SSE2-LABEL: v2i64:
; SSE2: # %bb.0:
-; SSE2-NEXT: movq %xmm1, %rax
-; SSE2-NEXT: movq %xmm0, %rcx
-; SSE2-NEXT: addq %rax, %rcx
-; SSE2-NEXT: movq $-1, %rax
-; SSE2-NEXT: cmovbq %rax, %rcx
-; SSE2-NEXT: movq %rcx, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; SSE2-NEXT: movq %xmm1, %rcx
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; SSE2-NEXT: movq %xmm0, %rdx
-; SSE2-NEXT: addq %rcx, %rdx
-; SSE2-NEXT: cmovbq %rax, %rdx
-; SSE2-NEXT: movq %rdx, %xmm0
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm0[0]
-; SSE2-NEXT: movdqa %xmm2, %xmm0
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [9223372039002259456,9223372039002259456]
+; SSE2-NEXT: pxor %xmm0, %xmm2
+; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [9223372034707292159,9223372034707292159]
+; SSE2-NEXT: pxor %xmm1, %xmm3
+; SSE2-NEXT: movdqa %xmm3, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm2, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm2, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,1,3,3]
+; SSE2-NEXT: pand %xmm5, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
+; SSE2-NEXT: por %xmm2, %xmm3
+; SSE2-NEXT: pand %xmm3, %xmm0
+; SSE2-NEXT: pcmpeqd %xmm2, %xmm2
+; SSE2-NEXT: pxor %xmm3, %xmm2
+; SSE2-NEXT: movdqa %xmm1, %xmm3
+; SSE2-NEXT: pandn %xmm2, %xmm3
+; SSE2-NEXT: por %xmm3, %xmm0
+; SSE2-NEXT: paddq %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: v2i64:
; SSSE3: # %bb.0:
-; SSSE3-NEXT: movq %xmm1, %rax
-; SSSE3-NEXT: movq %xmm0, %rcx
-; SSSE3-NEXT: addq %rax, %rcx
-; SSSE3-NEXT: movq $-1, %rax
-; SSSE3-NEXT: cmovbq %rax, %rcx
-; SSSE3-NEXT: movq %rcx, %xmm2
-; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; SSSE3-NEXT: movq %xmm1, %rcx
-; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; SSSE3-NEXT: movq %xmm0, %rdx
-; SSSE3-NEXT: addq %rcx, %rdx
-; SSSE3-NEXT: cmovbq %rax, %rdx
-; SSSE3-NEXT: movq %rdx, %xmm0
-; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm0[0]
-; SSSE3-NEXT: movdqa %xmm2, %xmm0
+; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [9223372039002259456,9223372039002259456]
+; SSSE3-NEXT: pxor %xmm0, %xmm2
+; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [9223372034707292159,9223372034707292159]
+; SSSE3-NEXT: pxor %xmm1, %xmm3
+; SSSE3-NEXT: movdqa %xmm3, %xmm4
+; SSSE3-NEXT: pcmpgtd %xmm2, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm2, %xmm3
+; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,1,3,3]
+; SSSE3-NEXT: pand %xmm5, %xmm2
+; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
+; SSSE3-NEXT: por %xmm2, %xmm3
+; SSSE3-NEXT: pand %xmm3, %xmm0
+; SSSE3-NEXT: pcmpeqd %xmm2, %xmm2
+; SSSE3-NEXT: pxor %xmm3, %xmm2
+; SSSE3-NEXT: movdqa %xmm1, %xmm3
+; SSSE3-NEXT: pandn %xmm2, %xmm3
+; SSSE3-NEXT: por %xmm3, %xmm0
+; SSSE3-NEXT: paddq %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: v2i64:
; SSE41: # %bb.0:
-; SSE41-NEXT: pextrq $1, %xmm1, %rax
-; SSE41-NEXT: pextrq $1, %xmm0, %rcx
-; SSE41-NEXT: addq %rax, %rcx
-; SSE41-NEXT: movq $-1, %rax
-; SSE41-NEXT: cmovbq %rax, %rcx
-; SSE41-NEXT: movq %rcx, %xmm2
-; SSE41-NEXT: movq %xmm1, %rcx
-; SSE41-NEXT: movq %xmm0, %rdx
-; SSE41-NEXT: addq %rcx, %rdx
-; SSE41-NEXT: cmovbq %rax, %rdx
-; SSE41-NEXT: movq %rdx, %xmm0
-; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; SSE41-NEXT: movdqa %xmm0, %xmm2
+; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [9223372039002259456,9223372039002259456]
+; SSE41-NEXT: pxor %xmm2, %xmm0
+; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [9223372034707292159,9223372034707292159]
+; SSE41-NEXT: pxor %xmm1, %xmm3
+; SSE41-NEXT: movdqa %xmm3, %xmm4
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm4
+; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm0, %xmm3
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
+; SSE41-NEXT: pand %xmm5, %xmm0
+; SSE41-NEXT: por %xmm4, %xmm0
+; SSE41-NEXT: pcmpeqd %xmm3, %xmm3
+; SSE41-NEXT: pxor %xmm1, %xmm3
+; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm3
+; SSE41-NEXT: paddq %xmm1, %xmm3
+; SSE41-NEXT: movdqa %xmm3, %xmm0
; SSE41-NEXT: retq
;
-; AVX-LABEL: v2i64:
-; AVX: # %bb.0:
-; AVX-NEXT: vpextrq $1, %xmm1, %rax
-; AVX-NEXT: vpextrq $1, %xmm0, %rcx
-; AVX-NEXT: addq %rax, %rcx
-; AVX-NEXT: movq $-1, %rax
-; AVX-NEXT: cmovbq %rax, %rcx
-; AVX-NEXT: vmovq %rcx, %xmm2
-; AVX-NEXT: vmovq %xmm1, %rcx
-; AVX-NEXT: vmovq %xmm0, %rdx
-; AVX-NEXT: addq %rcx, %rdx
-; AVX-NEXT: cmovbq %rax, %rdx
-; AVX-NEXT: vmovq %rdx, %xmm0
-; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
-; AVX-NEXT: retq
+; AVX1-LABEL: v2i64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm2
+; AVX1-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm3
+; AVX1-NEXT: vpxor {{.*}}(%rip), %xmm1, %xmm4
+; AVX1-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vblendvpd %xmm3, %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: v2i64:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm2
+; AVX2-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm3
+; AVX2-NEXT: vpxor {{.*}}(%rip), %xmm1, %xmm4
+; AVX2-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm3
+; AVX2-NEXT: vblendvpd %xmm3, %xmm0, %xmm2, %xmm0
+; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: v2i64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vmovdqa %xmm1, %xmm2
+; AVX512-NEXT: vpternlogq $15, %xmm1, %xmm1, %xmm2
+; AVX512-NEXT: vpminuq %xmm2, %xmm0, %xmm0
+; AVX512-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: retq
%z = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> %x, <2 x i64> %y)
ret <2 x i64> %z
}
@@ -1864,185 +1189,164 @@ define <2 x i64> @v2i64(<2 x i64> %x, <2
define <4 x i64> @v4i64(<4 x i64> %x, <4 x i64> %y) nounwind {
; SSE2-LABEL: v4i64:
; SSE2: # %bb.0:
-; SSE2-NEXT: movq %xmm2, %rax
-; SSE2-NEXT: movq %xmm0, %rcx
-; SSE2-NEXT: addq %rax, %rcx
-; SSE2-NEXT: movq $-1, %rax
-; SSE2-NEXT: cmovbq %rax, %rcx
-; SSE2-NEXT: movq %rcx, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
-; SSE2-NEXT: movq %xmm2, %rcx
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; SSE2-NEXT: movq %xmm0, %rdx
-; SSE2-NEXT: addq %rcx, %rdx
-; SSE2-NEXT: cmovbq %rax, %rdx
-; SSE2-NEXT: movq %rdx, %xmm0
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm0[0]
-; SSE2-NEXT: movq %xmm3, %rcx
-; SSE2-NEXT: movq %xmm1, %rdx
-; SSE2-NEXT: addq %rcx, %rdx
-; SSE2-NEXT: cmovbq %rax, %rdx
-; SSE2-NEXT: movq %rdx, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,0,1]
-; SSE2-NEXT: movq %xmm0, %rcx
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
-; SSE2-NEXT: movq %xmm0, %rdx
-; SSE2-NEXT: addq %rcx, %rdx
-; SSE2-NEXT: cmovbq %rax, %rdx
-; SSE2-NEXT: movq %rdx, %xmm0
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm0[0]
-; SSE2-NEXT: movdqa %xmm4, %xmm0
-; SSE2-NEXT: movdqa %xmm2, %xmm1
+; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [9223372039002259456,9223372039002259456]
+; SSE2-NEXT: movdqa %xmm0, %xmm6
+; SSE2-NEXT: pxor %xmm8, %xmm6
+; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [9223372034707292159,9223372034707292159]
+; SSE2-NEXT: movdqa %xmm2, %xmm7
+; SSE2-NEXT: pxor %xmm5, %xmm7
+; SSE2-NEXT: movdqa %xmm7, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm6, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm9 = xmm4[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm6, %xmm7
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm7[1,1,3,3]
+; SSE2-NEXT: pand %xmm9, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSE2-NEXT: por %xmm6, %xmm4
+; SSE2-NEXT: pand %xmm4, %xmm0
+; SSE2-NEXT: pcmpeqd %xmm6, %xmm6
+; SSE2-NEXT: pxor %xmm6, %xmm4
+; SSE2-NEXT: movdqa %xmm2, %xmm7
+; SSE2-NEXT: pandn %xmm4, %xmm7
+; SSE2-NEXT: por %xmm7, %xmm0
+; SSE2-NEXT: paddq %xmm2, %xmm0
+; SSE2-NEXT: pxor %xmm1, %xmm8
+; SSE2-NEXT: pxor %xmm3, %xmm5
+; SSE2-NEXT: movdqa %xmm5, %xmm2
+; SSE2-NEXT: pcmpgtd %xmm8, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm8, %xmm5
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
+; SSE2-NEXT: pand %xmm4, %xmm5
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE2-NEXT: por %xmm5, %xmm2
+; SSE2-NEXT: pand %xmm2, %xmm1
+; SSE2-NEXT: pxor %xmm6, %xmm2
+; SSE2-NEXT: movdqa %xmm3, %xmm4
+; SSE2-NEXT: pandn %xmm2, %xmm4
+; SSE2-NEXT: por %xmm4, %xmm1
+; SSE2-NEXT: paddq %xmm3, %xmm1
; SSE2-NEXT: retq
;
; SSSE3-LABEL: v4i64:
; SSSE3: # %bb.0:
-; SSSE3-NEXT: movq %xmm2, %rax
-; SSSE3-NEXT: movq %xmm0, %rcx
-; SSSE3-NEXT: addq %rax, %rcx
-; SSSE3-NEXT: movq $-1, %rax
-; SSSE3-NEXT: cmovbq %rax, %rcx
-; SSSE3-NEXT: movq %rcx, %xmm4
-; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
-; SSSE3-NEXT: movq %xmm2, %rcx
-; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; SSSE3-NEXT: movq %xmm0, %rdx
-; SSSE3-NEXT: addq %rcx, %rdx
-; SSSE3-NEXT: cmovbq %rax, %rdx
-; SSSE3-NEXT: movq %rdx, %xmm0
-; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm0[0]
-; SSSE3-NEXT: movq %xmm3, %rcx
-; SSSE3-NEXT: movq %xmm1, %rdx
-; SSSE3-NEXT: addq %rcx, %rdx
-; SSSE3-NEXT: cmovbq %rax, %rdx
-; SSSE3-NEXT: movq %rdx, %xmm2
-; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,0,1]
-; SSSE3-NEXT: movq %xmm0, %rcx
-; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
-; SSSE3-NEXT: movq %xmm0, %rdx
-; SSSE3-NEXT: addq %rcx, %rdx
-; SSSE3-NEXT: cmovbq %rax, %rdx
-; SSSE3-NEXT: movq %rdx, %xmm0
-; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm0[0]
-; SSSE3-NEXT: movdqa %xmm4, %xmm0
-; SSSE3-NEXT: movdqa %xmm2, %xmm1
+; SSSE3-NEXT: movdqa {{.*#+}} xmm8 = [9223372039002259456,9223372039002259456]
+; SSSE3-NEXT: movdqa %xmm0, %xmm6
+; SSSE3-NEXT: pxor %xmm8, %xmm6
+; SSSE3-NEXT: movdqa {{.*#+}} xmm5 = [9223372034707292159,9223372034707292159]
+; SSSE3-NEXT: movdqa %xmm2, %xmm7
+; SSSE3-NEXT: pxor %xmm5, %xmm7
+; SSSE3-NEXT: movdqa %xmm7, %xmm4
+; SSSE3-NEXT: pcmpgtd %xmm6, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm9 = xmm4[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm6, %xmm7
+; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm7[1,1,3,3]
+; SSSE3-NEXT: pand %xmm9, %xmm6
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSSE3-NEXT: por %xmm6, %xmm4
+; SSSE3-NEXT: pand %xmm4, %xmm0
+; SSSE3-NEXT: pcmpeqd %xmm6, %xmm6
+; SSSE3-NEXT: pxor %xmm6, %xmm4
+; SSSE3-NEXT: movdqa %xmm2, %xmm7
+; SSSE3-NEXT: pandn %xmm4, %xmm7
+; SSSE3-NEXT: por %xmm7, %xmm0
+; SSSE3-NEXT: paddq %xmm2, %xmm0
+; SSSE3-NEXT: pxor %xmm1, %xmm8
+; SSSE3-NEXT: pxor %xmm3, %xmm5
+; SSSE3-NEXT: movdqa %xmm5, %xmm2
+; SSSE3-NEXT: pcmpgtd %xmm8, %xmm2
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm8, %xmm5
+; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
+; SSSE3-NEXT: pand %xmm4, %xmm5
+; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSSE3-NEXT: por %xmm5, %xmm2
+; SSSE3-NEXT: pand %xmm2, %xmm1
+; SSSE3-NEXT: pxor %xmm6, %xmm2
+; SSSE3-NEXT: movdqa %xmm3, %xmm4
+; SSSE3-NEXT: pandn %xmm2, %xmm4
+; SSSE3-NEXT: por %xmm4, %xmm1
+; SSSE3-NEXT: paddq %xmm3, %xmm1
; SSSE3-NEXT: retq
;
; SSE41-LABEL: v4i64:
; SSE41: # %bb.0:
-; SSE41-NEXT: pextrq $1, %xmm2, %rax
-; SSE41-NEXT: pextrq $1, %xmm0, %rcx
-; SSE41-NEXT: addq %rax, %rcx
-; SSE41-NEXT: movq $-1, %rax
-; SSE41-NEXT: cmovbq %rax, %rcx
-; SSE41-NEXT: movq %rcx, %xmm4
-; SSE41-NEXT: movq %xmm2, %rcx
-; SSE41-NEXT: movq %xmm0, %rdx
-; SSE41-NEXT: addq %rcx, %rdx
-; SSE41-NEXT: cmovbq %rax, %rdx
-; SSE41-NEXT: movq %rdx, %xmm0
-; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0]
-; SSE41-NEXT: pextrq $1, %xmm3, %rcx
-; SSE41-NEXT: pextrq $1, %xmm1, %rdx
-; SSE41-NEXT: addq %rcx, %rdx
-; SSE41-NEXT: cmovbq %rax, %rdx
-; SSE41-NEXT: movq %rdx, %xmm2
-; SSE41-NEXT: movq %xmm3, %rcx
-; SSE41-NEXT: movq %xmm1, %rdx
-; SSE41-NEXT: addq %rcx, %rdx
-; SSE41-NEXT: cmovbq %rax, %rdx
-; SSE41-NEXT: movq %rdx, %xmm1
-; SSE41-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; SSE41-NEXT: movdqa %xmm0, %xmm8
+; SSE41-NEXT: movdqa {{.*#+}} xmm7 = [9223372039002259456,9223372039002259456]
+; SSE41-NEXT: pxor %xmm7, %xmm0
+; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [9223372034707292159,9223372034707292159]
+; SSE41-NEXT: movdqa %xmm2, %xmm5
+; SSE41-NEXT: pxor %xmm4, %xmm5
+; SSE41-NEXT: movdqa %xmm5, %xmm6
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm9 = xmm6[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm0, %xmm5
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
+; SSE41-NEXT: pand %xmm9, %xmm0
+; SSE41-NEXT: por %xmm6, %xmm0
+; SSE41-NEXT: pcmpeqd %xmm5, %xmm5
+; SSE41-NEXT: movdqa %xmm2, %xmm6
+; SSE41-NEXT: pxor %xmm5, %xmm6
+; SSE41-NEXT: blendvpd %xmm0, %xmm8, %xmm6
+; SSE41-NEXT: paddq %xmm2, %xmm6
+; SSE41-NEXT: pxor %xmm1, %xmm7
+; SSE41-NEXT: pxor %xmm3, %xmm4
+; SSE41-NEXT: movdqa %xmm4, %xmm2
+; SSE41-NEXT: pcmpgtd %xmm7, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm8 = xmm2[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm7, %xmm4
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
+; SSE41-NEXT: pand %xmm8, %xmm0
+; SSE41-NEXT: por %xmm2, %xmm0
+; SSE41-NEXT: pxor %xmm3, %xmm5
+; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm5
+; SSE41-NEXT: paddq %xmm3, %xmm5
+; SSE41-NEXT: movdqa %xmm6, %xmm0
+; SSE41-NEXT: movdqa %xmm5, %xmm1
; SSE41-NEXT: retq
;
; AVX1-LABEL: v4i64:
; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT: vpextrq $1, %xmm2, %rax
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vpextrq $1, %xmm3, %rcx
-; AVX1-NEXT: addq %rax, %rcx
-; AVX1-NEXT: movq $-1, %rax
-; AVX1-NEXT: cmovbq %rax, %rcx
-; AVX1-NEXT: vmovq %rcx, %xmm4
-; AVX1-NEXT: vmovq %xmm2, %rcx
-; AVX1-NEXT: vmovq %xmm3, %rdx
-; AVX1-NEXT: addq %rcx, %rdx
-; AVX1-NEXT: cmovbq %rax, %rdx
-; AVX1-NEXT: vmovq %rdx, %xmm2
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm4[0]
-; AVX1-NEXT: vpextrq $1, %xmm1, %rcx
-; AVX1-NEXT: vpextrq $1, %xmm0, %rdx
-; AVX1-NEXT: addq %rcx, %rdx
-; AVX1-NEXT: cmovbq %rax, %rdx
-; AVX1-NEXT: vmovq %rdx, %xmm3
-; AVX1-NEXT: vmovq %xmm1, %rcx
-; AVX1-NEXT: vmovq %xmm0, %rdx
-; AVX1-NEXT: addq %rcx, %rdx
-; AVX1-NEXT: cmovbq %rax, %rdx
-; AVX1-NEXT: vmovq %rdx, %xmm0
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vxorps %xmm4, %xmm4, %xmm4
+; AVX1-NEXT: vcmptrueps %ymm4, %ymm4, %ymm4
+; AVX1-NEXT: vxorps %ymm4, %ymm1, %ymm4
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm5
+; AVX1-NEXT: vpxor %xmm3, %xmm5, %xmm5
+; AVX1-NEXT: vpcmpgtq %xmm2, %xmm5, %xmm2
+; AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm5
+; AVX1-NEXT: vxorps %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
+; AVX1-NEXT: vblendvpd %ymm2, %ymm0, %ymm4, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpaddq %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: v4i64:
; AVX2: # %bb.0:
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
-; AVX2-NEXT: vpextrq $1, %xmm2, %rax
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm3
-; AVX2-NEXT: vpextrq $1, %xmm3, %rcx
-; AVX2-NEXT: addq %rax, %rcx
-; AVX2-NEXT: movq $-1, %rax
-; AVX2-NEXT: cmovbq %rax, %rcx
-; AVX2-NEXT: vmovq %rcx, %xmm4
-; AVX2-NEXT: vmovq %xmm2, %rcx
-; AVX2-NEXT: vmovq %xmm3, %rdx
-; AVX2-NEXT: addq %rcx, %rdx
-; AVX2-NEXT: cmovbq %rax, %rdx
-; AVX2-NEXT: vmovq %rdx, %xmm2
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm4[0]
-; AVX2-NEXT: vpextrq $1, %xmm1, %rcx
-; AVX2-NEXT: vpextrq $1, %xmm0, %rdx
-; AVX2-NEXT: addq %rcx, %rdx
-; AVX2-NEXT: cmovbq %rax, %rdx
-; AVX2-NEXT: vmovq %rdx, %xmm3
-; AVX2-NEXT: vmovq %xmm1, %rcx
-; AVX2-NEXT: vmovq %xmm0, %rdx
-; AVX2-NEXT: addq %rcx, %rdx
-; AVX2-NEXT: cmovbq %rax, %rdx
-; AVX2-NEXT: vmovq %rdx, %xmm0
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0]
-; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
+; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
+; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm2
+; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm3 = [9223372036854775807,9223372036854775807,9223372036854775807,9223372036854775807]
+; AVX2-NEXT: vpxor %ymm3, %ymm1, %ymm3
+; AVX2-NEXT: vpcmpgtq %ymm2, %ymm3, %ymm2
+; AVX2-NEXT: vpcmpeqd %ymm3, %ymm3, %ymm3
+; AVX2-NEXT: vpxor %ymm3, %ymm1, %ymm3
+; AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm3, %ymm0
+; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: v4i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm2
-; AVX512-NEXT: vpextrq $1, %xmm2, %rax
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm3
-; AVX512-NEXT: vpextrq $1, %xmm3, %rcx
-; AVX512-NEXT: addq %rax, %rcx
-; AVX512-NEXT: movq $-1, %rax
-; AVX512-NEXT: cmovbq %rax, %rcx
-; AVX512-NEXT: vmovq %rcx, %xmm4
-; AVX512-NEXT: vmovq %xmm2, %rcx
-; AVX512-NEXT: vmovq %xmm3, %rdx
-; AVX512-NEXT: addq %rcx, %rdx
-; AVX512-NEXT: cmovbq %rax, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm2
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm4[0]
-; AVX512-NEXT: vpextrq $1, %xmm1, %rcx
-; AVX512-NEXT: vpextrq $1, %xmm0, %rdx
-; AVX512-NEXT: addq %rcx, %rdx
-; AVX512-NEXT: cmovbq %rax, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm3
-; AVX512-NEXT: vmovq %xmm1, %rcx
-; AVX512-NEXT: vmovq %xmm0, %rdx
-; AVX512-NEXT: addq %rcx, %rdx
-; AVX512-NEXT: cmovbq %rax, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm0
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0]
-; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
+; AVX512-NEXT: vmovdqa %ymm1, %ymm2
+; AVX512-NEXT: vpternlogq $15, %ymm1, %ymm1, %ymm2
+; AVX512-NEXT: vpminuq %ymm2, %ymm0, %ymm0
+; AVX512-NEXT: vpaddq %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%z = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> %x, <4 x i64> %y)
ret <4 x i64> %z
@@ -2051,341 +1355,290 @@ define <4 x i64> @v4i64(<4 x i64> %x, <4
define <8 x i64> @v8i64(<8 x i64> %x, <8 x i64> %y) nounwind {
; SSE2-LABEL: v8i64:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm1, %xmm8
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: movq %xmm4, %rax
-; SSE2-NEXT: movq %xmm0, %rcx
-; SSE2-NEXT: addq %rax, %rcx
-; SSE2-NEXT: movq $-1, %rax
-; SSE2-NEXT: cmovbq %rax, %rcx
-; SSE2-NEXT: movq %rcx, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,3,0,1]
-; SSE2-NEXT: movq %xmm4, %rcx
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; SSE2-NEXT: movq %xmm1, %rdx
-; SSE2-NEXT: addq %rcx, %rdx
-; SSE2-NEXT: cmovbq %rax, %rdx
-; SSE2-NEXT: movq %rdx, %xmm1
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSE2-NEXT: movq %xmm5, %rcx
-; SSE2-NEXT: movq %xmm8, %rdx
-; SSE2-NEXT: addq %rcx, %rdx
-; SSE2-NEXT: cmovbq %rax, %rdx
-; SSE2-NEXT: movq %rdx, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm5[2,3,0,1]
-; SSE2-NEXT: movq %xmm4, %rcx
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm8[2,3,0,1]
-; SSE2-NEXT: movq %xmm4, %rdx
-; SSE2-NEXT: addq %rcx, %rdx
-; SSE2-NEXT: cmovbq %rax, %rdx
-; SSE2-NEXT: movq %rdx, %xmm4
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm4[0]
-; SSE2-NEXT: movq %xmm6, %rcx
-; SSE2-NEXT: movq %xmm2, %rdx
-; SSE2-NEXT: addq %rcx, %rdx
-; SSE2-NEXT: cmovbq %rax, %rdx
-; SSE2-NEXT: movq %rdx, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm6[2,3,0,1]
-; SSE2-NEXT: movq %xmm5, %rcx
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
-; SSE2-NEXT: movq %xmm2, %rdx
-; SSE2-NEXT: addq %rcx, %rdx
-; SSE2-NEXT: cmovbq %rax, %rdx
-; SSE2-NEXT: movq %rdx, %xmm2
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm2[0]
-; SSE2-NEXT: movq %xmm7, %rcx
-; SSE2-NEXT: movq %xmm3, %rdx
-; SSE2-NEXT: addq %rcx, %rdx
-; SSE2-NEXT: cmovbq %rax, %rdx
-; SSE2-NEXT: movq %rdx, %xmm5
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm7[2,3,0,1]
-; SSE2-NEXT: movq %xmm2, %rcx
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[2,3,0,1]
-; SSE2-NEXT: movq %xmm2, %rdx
-; SSE2-NEXT: addq %rcx, %rdx
-; SSE2-NEXT: cmovbq %rax, %rdx
-; SSE2-NEXT: movq %rdx, %xmm2
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm2[0]
-; SSE2-NEXT: movdqa %xmm4, %xmm2
-; SSE2-NEXT: movdqa %xmm5, %xmm3
+; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [9223372039002259456,9223372039002259456]
+; SSE2-NEXT: movdqa %xmm0, %xmm10
+; SSE2-NEXT: pxor %xmm8, %xmm10
+; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [9223372034707292159,9223372034707292159]
+; SSE2-NEXT: movdqa %xmm4, %xmm11
+; SSE2-NEXT: pxor %xmm9, %xmm11
+; SSE2-NEXT: movdqa %xmm11, %xmm12
+; SSE2-NEXT: pcmpgtd %xmm10, %xmm12
+; SSE2-NEXT: pshufd {{.*#+}} xmm13 = xmm12[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm10, %xmm11
+; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm11[1,1,3,3]
+; SSE2-NEXT: pand %xmm13, %xmm10
+; SSE2-NEXT: pshufd {{.*#+}} xmm11 = xmm12[1,1,3,3]
+; SSE2-NEXT: por %xmm10, %xmm11
+; SSE2-NEXT: pand %xmm11, %xmm0
+; SSE2-NEXT: pcmpeqd %xmm10, %xmm10
+; SSE2-NEXT: pxor %xmm10, %xmm11
+; SSE2-NEXT: movdqa %xmm4, %xmm12
+; SSE2-NEXT: pandn %xmm11, %xmm12
+; SSE2-NEXT: por %xmm12, %xmm0
+; SSE2-NEXT: paddq %xmm4, %xmm0
+; SSE2-NEXT: movdqa %xmm1, %xmm11
+; SSE2-NEXT: pxor %xmm8, %xmm11
+; SSE2-NEXT: movdqa %xmm5, %xmm4
+; SSE2-NEXT: pxor %xmm9, %xmm4
+; SSE2-NEXT: movdqa %xmm4, %xmm12
+; SSE2-NEXT: pcmpgtd %xmm11, %xmm12
+; SSE2-NEXT: pshufd {{.*#+}} xmm13 = xmm12[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm11, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm11 = xmm4[1,1,3,3]
+; SSE2-NEXT: pand %xmm13, %xmm11
+; SSE2-NEXT: pshufd {{.*#+}} xmm12 = xmm12[1,1,3,3]
+; SSE2-NEXT: por %xmm11, %xmm12
+; SSE2-NEXT: pand %xmm12, %xmm1
+; SSE2-NEXT: pxor %xmm10, %xmm12
+; SSE2-NEXT: movdqa %xmm5, %xmm4
+; SSE2-NEXT: pandn %xmm12, %xmm4
+; SSE2-NEXT: por %xmm4, %xmm1
+; SSE2-NEXT: paddq %xmm5, %xmm1
+; SSE2-NEXT: movdqa %xmm2, %xmm4
+; SSE2-NEXT: pxor %xmm8, %xmm4
+; SSE2-NEXT: movdqa %xmm6, %xmm5
+; SSE2-NEXT: pxor %xmm9, %xmm5
+; SSE2-NEXT: movdqa %xmm5, %xmm11
+; SSE2-NEXT: pcmpgtd %xmm4, %xmm11
+; SSE2-NEXT: pshufd {{.*#+}} xmm12 = xmm11[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm4, %xmm5
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3]
+; SSE2-NEXT: pand %xmm12, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm11[1,1,3,3]
+; SSE2-NEXT: por %xmm4, %xmm5
+; SSE2-NEXT: pand %xmm5, %xmm2
+; SSE2-NEXT: pxor %xmm10, %xmm5
+; SSE2-NEXT: movdqa %xmm6, %xmm4
+; SSE2-NEXT: pandn %xmm5, %xmm4
+; SSE2-NEXT: por %xmm4, %xmm2
+; SSE2-NEXT: paddq %xmm6, %xmm2
+; SSE2-NEXT: pxor %xmm3, %xmm8
+; SSE2-NEXT: pxor %xmm7, %xmm9
+; SSE2-NEXT: movdqa %xmm9, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm8, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm8, %xmm9
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm9[1,1,3,3]
+; SSE2-NEXT: pand %xmm5, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSE2-NEXT: por %xmm6, %xmm4
+; SSE2-NEXT: pand %xmm4, %xmm3
+; SSE2-NEXT: pxor %xmm10, %xmm4
+; SSE2-NEXT: movdqa %xmm7, %xmm5
+; SSE2-NEXT: pandn %xmm4, %xmm5
+; SSE2-NEXT: por %xmm5, %xmm3
+; SSE2-NEXT: paddq %xmm7, %xmm3
; SSE2-NEXT: retq
;
; SSSE3-LABEL: v8i64:
; SSSE3: # %bb.0:
-; SSSE3-NEXT: movdqa %xmm1, %xmm8
-; SSSE3-NEXT: movdqa %xmm0, %xmm1
-; SSSE3-NEXT: movq %xmm4, %rax
-; SSSE3-NEXT: movq %xmm0, %rcx
-; SSSE3-NEXT: addq %rax, %rcx
-; SSSE3-NEXT: movq $-1, %rax
-; SSSE3-NEXT: cmovbq %rax, %rcx
-; SSSE3-NEXT: movq %rcx, %xmm0
-; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,3,0,1]
-; SSSE3-NEXT: movq %xmm4, %rcx
-; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; SSSE3-NEXT: movq %xmm1, %rdx
-; SSSE3-NEXT: addq %rcx, %rdx
-; SSSE3-NEXT: cmovbq %rax, %rdx
-; SSSE3-NEXT: movq %rdx, %xmm1
-; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSSE3-NEXT: movq %xmm5, %rcx
-; SSSE3-NEXT: movq %xmm8, %rdx
-; SSSE3-NEXT: addq %rcx, %rdx
-; SSSE3-NEXT: cmovbq %rax, %rdx
-; SSSE3-NEXT: movq %rdx, %xmm1
-; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm5[2,3,0,1]
-; SSSE3-NEXT: movq %xmm4, %rcx
-; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm8[2,3,0,1]
-; SSSE3-NEXT: movq %xmm4, %rdx
-; SSSE3-NEXT: addq %rcx, %rdx
-; SSSE3-NEXT: cmovbq %rax, %rdx
-; SSSE3-NEXT: movq %rdx, %xmm4
-; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm4[0]
-; SSSE3-NEXT: movq %xmm6, %rcx
-; SSSE3-NEXT: movq %xmm2, %rdx
-; SSSE3-NEXT: addq %rcx, %rdx
-; SSSE3-NEXT: cmovbq %rax, %rdx
-; SSSE3-NEXT: movq %rdx, %xmm4
-; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm6[2,3,0,1]
-; SSSE3-NEXT: movq %xmm5, %rcx
-; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
-; SSSE3-NEXT: movq %xmm2, %rdx
-; SSSE3-NEXT: addq %rcx, %rdx
-; SSSE3-NEXT: cmovbq %rax, %rdx
-; SSSE3-NEXT: movq %rdx, %xmm2
-; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm2[0]
-; SSSE3-NEXT: movq %xmm7, %rcx
-; SSSE3-NEXT: movq %xmm3, %rdx
-; SSSE3-NEXT: addq %rcx, %rdx
-; SSSE3-NEXT: cmovbq %rax, %rdx
-; SSSE3-NEXT: movq %rdx, %xmm5
-; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm7[2,3,0,1]
-; SSSE3-NEXT: movq %xmm2, %rcx
-; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm3[2,3,0,1]
-; SSSE3-NEXT: movq %xmm2, %rdx
-; SSSE3-NEXT: addq %rcx, %rdx
-; SSSE3-NEXT: cmovbq %rax, %rdx
-; SSSE3-NEXT: movq %rdx, %xmm2
-; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm2[0]
-; SSSE3-NEXT: movdqa %xmm4, %xmm2
-; SSSE3-NEXT: movdqa %xmm5, %xmm3
+; SSSE3-NEXT: movdqa {{.*#+}} xmm8 = [9223372039002259456,9223372039002259456]
+; SSSE3-NEXT: movdqa %xmm0, %xmm10
+; SSSE3-NEXT: pxor %xmm8, %xmm10
+; SSSE3-NEXT: movdqa {{.*#+}} xmm9 = [9223372034707292159,9223372034707292159]
+; SSSE3-NEXT: movdqa %xmm4, %xmm11
+; SSSE3-NEXT: pxor %xmm9, %xmm11
+; SSSE3-NEXT: movdqa %xmm11, %xmm12
+; SSSE3-NEXT: pcmpgtd %xmm10, %xmm12
+; SSSE3-NEXT: pshufd {{.*#+}} xmm13 = xmm12[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm10, %xmm11
+; SSSE3-NEXT: pshufd {{.*#+}} xmm10 = xmm11[1,1,3,3]
+; SSSE3-NEXT: pand %xmm13, %xmm10
+; SSSE3-NEXT: pshufd {{.*#+}} xmm11 = xmm12[1,1,3,3]
+; SSSE3-NEXT: por %xmm10, %xmm11
+; SSSE3-NEXT: pand %xmm11, %xmm0
+; SSSE3-NEXT: pcmpeqd %xmm10, %xmm10
+; SSSE3-NEXT: pxor %xmm10, %xmm11
+; SSSE3-NEXT: movdqa %xmm4, %xmm12
+; SSSE3-NEXT: pandn %xmm11, %xmm12
+; SSSE3-NEXT: por %xmm12, %xmm0
+; SSSE3-NEXT: paddq %xmm4, %xmm0
+; SSSE3-NEXT: movdqa %xmm1, %xmm11
+; SSSE3-NEXT: pxor %xmm8, %xmm11
+; SSSE3-NEXT: movdqa %xmm5, %xmm4
+; SSSE3-NEXT: pxor %xmm9, %xmm4
+; SSSE3-NEXT: movdqa %xmm4, %xmm12
+; SSSE3-NEXT: pcmpgtd %xmm11, %xmm12
+; SSSE3-NEXT: pshufd {{.*#+}} xmm13 = xmm12[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm11, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm11 = xmm4[1,1,3,3]
+; SSSE3-NEXT: pand %xmm13, %xmm11
+; SSSE3-NEXT: pshufd {{.*#+}} xmm12 = xmm12[1,1,3,3]
+; SSSE3-NEXT: por %xmm11, %xmm12
+; SSSE3-NEXT: pand %xmm12, %xmm1
+; SSSE3-NEXT: pxor %xmm10, %xmm12
+; SSSE3-NEXT: movdqa %xmm5, %xmm4
+; SSSE3-NEXT: pandn %xmm12, %xmm4
+; SSSE3-NEXT: por %xmm4, %xmm1
+; SSSE3-NEXT: paddq %xmm5, %xmm1
+; SSSE3-NEXT: movdqa %xmm2, %xmm4
+; SSSE3-NEXT: pxor %xmm8, %xmm4
+; SSSE3-NEXT: movdqa %xmm6, %xmm5
+; SSSE3-NEXT: pxor %xmm9, %xmm5
+; SSSE3-NEXT: movdqa %xmm5, %xmm11
+; SSSE3-NEXT: pcmpgtd %xmm4, %xmm11
+; SSSE3-NEXT: pshufd {{.*#+}} xmm12 = xmm11[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm4, %xmm5
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3]
+; SSSE3-NEXT: pand %xmm12, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm11[1,1,3,3]
+; SSSE3-NEXT: por %xmm4, %xmm5
+; SSSE3-NEXT: pand %xmm5, %xmm2
+; SSSE3-NEXT: pxor %xmm10, %xmm5
+; SSSE3-NEXT: movdqa %xmm6, %xmm4
+; SSSE3-NEXT: pandn %xmm5, %xmm4
+; SSSE3-NEXT: por %xmm4, %xmm2
+; SSSE3-NEXT: paddq %xmm6, %xmm2
+; SSSE3-NEXT: pxor %xmm3, %xmm8
+; SSSE3-NEXT: pxor %xmm7, %xmm9
+; SSSE3-NEXT: movdqa %xmm9, %xmm4
+; SSSE3-NEXT: pcmpgtd %xmm8, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm8, %xmm9
+; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm9[1,1,3,3]
+; SSSE3-NEXT: pand %xmm5, %xmm6
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSSE3-NEXT: por %xmm6, %xmm4
+; SSSE3-NEXT: pand %xmm4, %xmm3
+; SSSE3-NEXT: pxor %xmm10, %xmm4
+; SSSE3-NEXT: movdqa %xmm7, %xmm5
+; SSSE3-NEXT: pandn %xmm4, %xmm5
+; SSSE3-NEXT: por %xmm5, %xmm3
+; SSSE3-NEXT: paddq %xmm7, %xmm3
; SSSE3-NEXT: retq
;
; SSE41-LABEL: v8i64:
; SSE41: # %bb.0:
-; SSE41-NEXT: pextrq $1, %xmm4, %rax
-; SSE41-NEXT: pextrq $1, %xmm0, %rcx
-; SSE41-NEXT: addq %rax, %rcx
-; SSE41-NEXT: movq $-1, %rax
-; SSE41-NEXT: cmovbq %rax, %rcx
-; SSE41-NEXT: movq %rcx, %xmm8
-; SSE41-NEXT: movq %xmm4, %rcx
-; SSE41-NEXT: movq %xmm0, %rdx
-; SSE41-NEXT: addq %rcx, %rdx
-; SSE41-NEXT: cmovbq %rax, %rdx
-; SSE41-NEXT: movq %rdx, %xmm0
-; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm8[0]
-; SSE41-NEXT: pextrq $1, %xmm5, %rcx
-; SSE41-NEXT: pextrq $1, %xmm1, %rdx
-; SSE41-NEXT: addq %rcx, %rdx
-; SSE41-NEXT: cmovbq %rax, %rdx
-; SSE41-NEXT: movq %rdx, %xmm4
-; SSE41-NEXT: movq %xmm5, %rcx
-; SSE41-NEXT: movq %xmm1, %rdx
-; SSE41-NEXT: addq %rcx, %rdx
-; SSE41-NEXT: cmovbq %rax, %rdx
-; SSE41-NEXT: movq %rdx, %xmm1
-; SSE41-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm4[0]
-; SSE41-NEXT: pextrq $1, %xmm6, %rcx
-; SSE41-NEXT: pextrq $1, %xmm2, %rdx
-; SSE41-NEXT: addq %rcx, %rdx
-; SSE41-NEXT: cmovbq %rax, %rdx
-; SSE41-NEXT: movq %rdx, %xmm4
-; SSE41-NEXT: movq %xmm6, %rcx
-; SSE41-NEXT: movq %xmm2, %rdx
-; SSE41-NEXT: addq %rcx, %rdx
-; SSE41-NEXT: cmovbq %rax, %rdx
-; SSE41-NEXT: movq %rdx, %xmm2
-; SSE41-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm4[0]
-; SSE41-NEXT: pextrq $1, %xmm7, %rcx
-; SSE41-NEXT: pextrq $1, %xmm3, %rdx
-; SSE41-NEXT: addq %rcx, %rdx
-; SSE41-NEXT: cmovbq %rax, %rdx
-; SSE41-NEXT: movq %rdx, %xmm4
-; SSE41-NEXT: movq %xmm7, %rcx
-; SSE41-NEXT: movq %xmm3, %rdx
-; SSE41-NEXT: addq %rcx, %rdx
-; SSE41-NEXT: cmovbq %rax, %rdx
-; SSE41-NEXT: movq %rdx, %xmm3
-; SSE41-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm4[0]
+; SSE41-NEXT: movdqa %xmm1, %xmm9
+; SSE41-NEXT: movdqa %xmm0, %xmm13
+; SSE41-NEXT: movdqa {{.*#+}} xmm11 = [9223372039002259456,9223372039002259456]
+; SSE41-NEXT: pxor %xmm11, %xmm0
+; SSE41-NEXT: movdqa {{.*#+}} xmm12 = [9223372034707292159,9223372034707292159]
+; SSE41-NEXT: movdqa %xmm4, %xmm1
+; SSE41-NEXT: pxor %xmm12, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm8
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm8
+; SSE41-NEXT: pshufd {{.*#+}} xmm10 = xmm8[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm0, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT: pand %xmm10, %xmm0
+; SSE41-NEXT: por %xmm8, %xmm0
+; SSE41-NEXT: pcmpeqd %xmm10, %xmm10
+; SSE41-NEXT: movdqa %xmm4, %xmm8
+; SSE41-NEXT: pxor %xmm10, %xmm8
+; SSE41-NEXT: blendvpd %xmm0, %xmm13, %xmm8
+; SSE41-NEXT: paddq %xmm4, %xmm8
+; SSE41-NEXT: movdqa %xmm9, %xmm0
+; SSE41-NEXT: pxor %xmm11, %xmm0
+; SSE41-NEXT: movdqa %xmm5, %xmm1
+; SSE41-NEXT: pxor %xmm12, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm4
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm4
+; SSE41-NEXT: pshufd {{.*#+}} xmm13 = xmm4[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm0, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT: pand %xmm13, %xmm0
+; SSE41-NEXT: por %xmm4, %xmm0
+; SSE41-NEXT: movdqa %xmm5, %xmm1
+; SSE41-NEXT: pxor %xmm10, %xmm1
+; SSE41-NEXT: blendvpd %xmm0, %xmm9, %xmm1
+; SSE41-NEXT: paddq %xmm5, %xmm1
+; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: pxor %xmm11, %xmm0
+; SSE41-NEXT: movdqa %xmm6, %xmm4
+; SSE41-NEXT: pxor %xmm12, %xmm4
+; SSE41-NEXT: movdqa %xmm4, %xmm5
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm5
+; SSE41-NEXT: pshufd {{.*#+}} xmm9 = xmm5[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm0, %xmm4
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
+; SSE41-NEXT: pand %xmm9, %xmm0
+; SSE41-NEXT: por %xmm5, %xmm0
+; SSE41-NEXT: movdqa %xmm6, %xmm4
+; SSE41-NEXT: pxor %xmm10, %xmm4
+; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm4
+; SSE41-NEXT: paddq %xmm6, %xmm4
+; SSE41-NEXT: pxor %xmm3, %xmm11
+; SSE41-NEXT: pxor %xmm7, %xmm12
+; SSE41-NEXT: movdqa %xmm12, %xmm2
+; SSE41-NEXT: pcmpgtd %xmm11, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm2[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm11, %xmm12
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm12[1,1,3,3]
+; SSE41-NEXT: pand %xmm5, %xmm0
+; SSE41-NEXT: por %xmm2, %xmm0
+; SSE41-NEXT: pxor %xmm7, %xmm10
+; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm10
+; SSE41-NEXT: paddq %xmm7, %xmm10
+; SSE41-NEXT: movdqa %xmm8, %xmm0
+; SSE41-NEXT: movdqa %xmm4, %xmm2
+; SSE41-NEXT: movdqa %xmm10, %xmm3
; SSE41-NEXT: retq
;
; AVX1-LABEL: v8i64:
; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
-; AVX1-NEXT: vpextrq $1, %xmm4, %rax
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
-; AVX1-NEXT: vpextrq $1, %xmm5, %rcx
-; AVX1-NEXT: addq %rax, %rcx
-; AVX1-NEXT: movq $-1, %rax
-; AVX1-NEXT: cmovbq %rax, %rcx
-; AVX1-NEXT: vmovq %rcx, %xmm6
-; AVX1-NEXT: vmovq %xmm4, %rcx
-; AVX1-NEXT: vmovq %xmm5, %rdx
-; AVX1-NEXT: addq %rcx, %rdx
-; AVX1-NEXT: cmovbq %rax, %rdx
-; AVX1-NEXT: vmovq %rdx, %xmm4
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm6[0]
-; AVX1-NEXT: vpextrq $1, %xmm2, %rcx
-; AVX1-NEXT: vpextrq $1, %xmm0, %rdx
-; AVX1-NEXT: addq %rcx, %rdx
-; AVX1-NEXT: cmovbq %rax, %rdx
-; AVX1-NEXT: vmovq %rdx, %xmm5
-; AVX1-NEXT: vmovq %xmm2, %rcx
-; AVX1-NEXT: vmovq %xmm0, %rdx
-; AVX1-NEXT: addq %rcx, %rdx
-; AVX1-NEXT: cmovbq %rax, %rdx
-; AVX1-NEXT: vmovq %rdx, %xmm0
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm5[0]
-; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0
-; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm2
-; AVX1-NEXT: vpextrq $1, %xmm2, %rcx
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
-; AVX1-NEXT: vpextrq $1, %xmm4, %rdx
-; AVX1-NEXT: addq %rcx, %rdx
-; AVX1-NEXT: cmovbq %rax, %rdx
-; AVX1-NEXT: vmovq %rdx, %xmm5
-; AVX1-NEXT: vmovq %xmm2, %rcx
-; AVX1-NEXT: vmovq %xmm4, %rdx
-; AVX1-NEXT: addq %rcx, %rdx
-; AVX1-NEXT: cmovbq %rax, %rdx
-; AVX1-NEXT: vmovq %rdx, %xmm2
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm5[0]
-; AVX1-NEXT: vpextrq $1, %xmm3, %rcx
-; AVX1-NEXT: vpextrq $1, %xmm1, %rdx
-; AVX1-NEXT: addq %rcx, %rdx
-; AVX1-NEXT: cmovbq %rax, %rdx
-; AVX1-NEXT: vmovq %rdx, %xmm4
-; AVX1-NEXT: vmovq %xmm3, %rcx
-; AVX1-NEXT: vmovq %xmm1, %rdx
-; AVX1-NEXT: addq %rcx, %rdx
-; AVX1-NEXT: cmovbq %rax, %rdx
-; AVX1-NEXT: vmovq %rdx, %xmm1
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm4[0]
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT: vpxor %xmm4, %xmm5, %xmm5
+; AVX1-NEXT: vxorps %xmm6, %xmm6, %xmm6
+; AVX1-NEXT: vcmptrueps %ymm6, %ymm6, %ymm8
+; AVX1-NEXT: vxorps %ymm8, %ymm2, %ymm7
+; AVX1-NEXT: vextractf128 $1, %ymm7, %xmm6
+; AVX1-NEXT: vpxor %xmm4, %xmm6, %xmm6
+; AVX1-NEXT: vpcmpgtq %xmm5, %xmm6, %xmm9
+; AVX1-NEXT: vpxor %xmm4, %xmm0, %xmm6
+; AVX1-NEXT: vxorps %xmm4, %xmm7, %xmm5
+; AVX1-NEXT: vpcmpgtq %xmm6, %xmm5, %xmm5
+; AVX1-NEXT: vinsertf128 $1, %xmm9, %ymm5, %ymm5
+; AVX1-NEXT: vblendvpd %ymm5, %ymm0, %ymm7, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm6
+; AVX1-NEXT: vpaddq %xmm6, %xmm5, %xmm5
+; AVX1-NEXT: vpaddq %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vpxor %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vxorps %ymm8, %ymm3, %ymm5
+; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm6
+; AVX1-NEXT: vpxor %xmm4, %xmm6, %xmm6
+; AVX1-NEXT: vpcmpgtq %xmm2, %xmm6, %xmm2
+; AVX1-NEXT: vpxor %xmm4, %xmm1, %xmm6
+; AVX1-NEXT: vxorps %xmm4, %xmm5, %xmm4
+; AVX1-NEXT: vpcmpgtq %xmm6, %xmm4, %xmm4
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm4, %ymm2
+; AVX1-NEXT: vblendvpd %ymm2, %ymm1, %ymm5, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
+; AVX1-NEXT: vpaddq %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpaddq %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX1-NEXT: retq
;
; AVX2-LABEL: v8i64:
; AVX2: # %bb.0:
-; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm4
-; AVX2-NEXT: vpextrq $1, %xmm4, %rax
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm5
-; AVX2-NEXT: vpextrq $1, %xmm5, %rcx
-; AVX2-NEXT: addq %rax, %rcx
-; AVX2-NEXT: movq $-1, %rax
-; AVX2-NEXT: cmovbq %rax, %rcx
-; AVX2-NEXT: vmovq %rcx, %xmm6
-; AVX2-NEXT: vmovq %xmm4, %rcx
-; AVX2-NEXT: vmovq %xmm5, %rdx
-; AVX2-NEXT: addq %rcx, %rdx
-; AVX2-NEXT: cmovbq %rax, %rdx
-; AVX2-NEXT: vmovq %rdx, %xmm4
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm6[0]
-; AVX2-NEXT: vpextrq $1, %xmm2, %rcx
-; AVX2-NEXT: vpextrq $1, %xmm0, %rdx
-; AVX2-NEXT: addq %rcx, %rdx
-; AVX2-NEXT: cmovbq %rax, %rdx
-; AVX2-NEXT: vmovq %rdx, %xmm5
-; AVX2-NEXT: vmovq %xmm2, %rcx
-; AVX2-NEXT: vmovq %xmm0, %rdx
-; AVX2-NEXT: addq %rcx, %rdx
-; AVX2-NEXT: cmovbq %rax, %rdx
-; AVX2-NEXT: vmovq %rdx, %xmm0
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm5[0]
-; AVX2-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm2
-; AVX2-NEXT: vpextrq $1, %xmm2, %rcx
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm4
-; AVX2-NEXT: vpextrq $1, %xmm4, %rdx
-; AVX2-NEXT: addq %rcx, %rdx
-; AVX2-NEXT: cmovbq %rax, %rdx
-; AVX2-NEXT: vmovq %rdx, %xmm5
-; AVX2-NEXT: vmovq %xmm2, %rcx
-; AVX2-NEXT: vmovq %xmm4, %rdx
-; AVX2-NEXT: addq %rcx, %rdx
-; AVX2-NEXT: cmovbq %rax, %rdx
-; AVX2-NEXT: vmovq %rdx, %xmm2
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm5[0]
-; AVX2-NEXT: vpextrq $1, %xmm3, %rcx
-; AVX2-NEXT: vpextrq $1, %xmm1, %rdx
-; AVX2-NEXT: addq %rcx, %rdx
-; AVX2-NEXT: cmovbq %rax, %rdx
-; AVX2-NEXT: vmovq %rdx, %xmm4
-; AVX2-NEXT: vmovq %xmm3, %rcx
-; AVX2-NEXT: vmovq %xmm1, %rdx
-; AVX2-NEXT: addq %rcx, %rdx
-; AVX2-NEXT: cmovbq %rax, %rdx
-; AVX2-NEXT: vmovq %rdx, %xmm1
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm4[0]
-; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
+; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm4 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
+; AVX2-NEXT: vpxor %ymm4, %ymm0, %ymm5
+; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm6 = [9223372036854775807,9223372036854775807,9223372036854775807,9223372036854775807]
+; AVX2-NEXT: vpxor %ymm6, %ymm2, %ymm7
+; AVX2-NEXT: vpcmpgtq %ymm5, %ymm7, %ymm5
+; AVX2-NEXT: vpcmpeqd %ymm7, %ymm7, %ymm7
+; AVX2-NEXT: vpxor %ymm7, %ymm2, %ymm8
+; AVX2-NEXT: vblendvpd %ymm5, %ymm0, %ymm8, %ymm0
+; AVX2-NEXT: vpaddq %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpxor %ymm4, %ymm1, %ymm2
+; AVX2-NEXT: vpxor %ymm6, %ymm3, %ymm4
+; AVX2-NEXT: vpcmpgtq %ymm2, %ymm4, %ymm2
+; AVX2-NEXT: vpxor %ymm7, %ymm3, %ymm4
+; AVX2-NEXT: vblendvpd %ymm2, %ymm1, %ymm4, %ymm1
+; AVX2-NEXT: vpaddq %ymm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: v8i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti32x4 $3, %zmm1, %xmm2
-; AVX512-NEXT: vpextrq $1, %xmm2, %rax
-; AVX512-NEXT: vextracti32x4 $3, %zmm0, %xmm3
-; AVX512-NEXT: vpextrq $1, %xmm3, %rcx
-; AVX512-NEXT: addq %rax, %rcx
-; AVX512-NEXT: movq $-1, %rax
-; AVX512-NEXT: cmovbq %rax, %rcx
-; AVX512-NEXT: vmovq %rcx, %xmm4
-; AVX512-NEXT: vmovq %xmm2, %rcx
-; AVX512-NEXT: vmovq %xmm3, %rdx
-; AVX512-NEXT: addq %rcx, %rdx
-; AVX512-NEXT: cmovbq %rax, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm2
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm4[0]
-; AVX512-NEXT: vextracti32x4 $2, %zmm1, %xmm3
-; AVX512-NEXT: vpextrq $1, %xmm3, %rcx
-; AVX512-NEXT: vextracti32x4 $2, %zmm0, %xmm4
-; AVX512-NEXT: vpextrq $1, %xmm4, %rdx
-; AVX512-NEXT: addq %rcx, %rdx
-; AVX512-NEXT: cmovbq %rax, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm5
-; AVX512-NEXT: vmovq %xmm3, %rcx
-; AVX512-NEXT: vmovq %xmm4, %rdx
-; AVX512-NEXT: addq %rcx, %rdx
-; AVX512-NEXT: cmovbq %rax, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm3
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm5[0]
-; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm3, %ymm2
-; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm3
-; AVX512-NEXT: vpextrq $1, %xmm3, %rcx
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm4
-; AVX512-NEXT: vpextrq $1, %xmm4, %rdx
-; AVX512-NEXT: addq %rcx, %rdx
-; AVX512-NEXT: cmovbq %rax, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm5
-; AVX512-NEXT: vmovq %xmm3, %rcx
-; AVX512-NEXT: vmovq %xmm4, %rdx
-; AVX512-NEXT: addq %rcx, %rdx
-; AVX512-NEXT: cmovbq %rax, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm3
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm5[0]
-; AVX512-NEXT: vpextrq $1, %xmm1, %rcx
-; AVX512-NEXT: vpextrq $1, %xmm0, %rdx
-; AVX512-NEXT: addq %rcx, %rdx
-; AVX512-NEXT: cmovbq %rax, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm4
-; AVX512-NEXT: vmovq %xmm1, %rcx
-; AVX512-NEXT: vmovq %xmm0, %rdx
-; AVX512-NEXT: addq %rcx, %rdx
-; AVX512-NEXT: cmovbq %rax, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm0
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0]
-; AVX512-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm0
-; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512-NEXT: vmovdqa64 %zmm1, %zmm2
+; AVX512-NEXT: vpternlogq $15, %zmm1, %zmm1, %zmm2
+; AVX512-NEXT: vpminuq %zmm2, %zmm0, %zmm0
+; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0
; AVX512-NEXT: retq
%z = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> %x, <8 x i64> %y)
ret <8 x i64> %z
More information about the llvm-commits
mailing list