[llvm] 2b46417 - [X86][SSE] Attempt to lower vec_reduce_add patterns with PSADBW for zero-extended vXi8 sources
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Sun Feb 27 07:18:04 PST 2022
Author: Simon Pilgrim
Date: 2022-02-27T15:17:42Z
New Revision: 2b46417aa2d42d5d2a14df1675cfee547fd46556
URL: https://github.com/llvm/llvm-project/commit/2b46417aa2d42d5d2a14df1675cfee547fd46556
DIFF: https://github.com/llvm/llvm-project/commit/2b46417aa2d42d5d2a14df1675cfee547fd46556.diff
LOG: [X86][SSE] Attempt to lower vec_reduce_add patterns with PSADBW for zero-extended vXi8 sources
For i16/32/64 vectors, if the upper bits are known to be zero, then we can try to truncate to vXi8 (if its worth it) and perform this as a PSADBW to add+zext each v4i8 subvector to a i64 sum, which we can then reduce together.
This addresses some of the PR42674 test cases where the source data was vXi8 but had been extended to match a wider unsigned integer accumulator.
Differential Revision: https://reviews.llvm.org/D120193
Added:
Modified:
llvm/lib/Target/X86/X86ISelLowering.cpp
llvm/test/CodeGen/X86/vector-reduce-add-mask.ll
llvm/test/CodeGen/X86/vector-reduce-add-zext.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 1253e5ad73668..3fa92928e93d0 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -43009,10 +43009,11 @@ static SDValue combineArithReduction(SDNode *ExtElt, SelectionDAG &DAG,
SDLoc DL(ExtElt);
unsigned NumElts = VecVT.getVectorNumElements();
+ unsigned EltSizeInBits = VecVT.getScalarSizeInBits();
// Extend v4i8/v8i8 vector to v16i8, with undef upper 64-bits.
auto WidenToV16I8 = [&](SDValue V, bool ZeroExtend) {
- if (VecVT == MVT::v4i8) {
+ if (V.getValueType() == MVT::v4i8) {
if (ZeroExtend && Subtarget.hasSSE41()) {
V = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, MVT::v4i32,
DAG.getConstant(0, DL, MVT::v4i32),
@@ -43095,6 +43096,48 @@ static SDValue combineArithReduction(SDNode *ExtElt, SelectionDAG &DAG,
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
}
+ // See if we can use vXi8 PSADBW add reduction for larger zext types.
+ // If the source vector values are 0-255, then we can use PSADBW to
+ // sum+zext v8i8 subvectors to vXi64, then perform the reduction.
+ // TODO: See if its worth avoiding vXi16/i32 truncations?
+ if (Opc == ISD::ADD && NumElts >= 4 && EltSizeInBits >= 16 &&
+ DAG.computeKnownBits(Rdx).getMaxValue().ule(255) &&
+ (EltSizeInBits == 16 || Rdx.getOpcode() == ISD::ZERO_EXTEND ||
+ Subtarget.hasAVX512())) {
+ EVT ByteVT = VecVT.changeVectorElementType(MVT::i8);
+ Rdx = DAG.getNode(ISD::TRUNCATE, DL, ByteVT, Rdx);
+ if (ByteVT.getSizeInBits() < 128)
+ Rdx = WidenToV16I8(Rdx, true);
+
+ // Build the PSADBW, split as 128/256/512 bits for SSE/AVX2/AVX512BW.
+ auto PSADBWBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
+ ArrayRef<SDValue> Ops) {
+ MVT VT = MVT::getVectorVT(MVT::i64, Ops[0].getValueSizeInBits() / 64);
+ SDValue Zero = DAG.getConstant(0, DL, Ops[0].getValueType());
+ return DAG.getNode(X86ISD::PSADBW, DL, VT, Ops[0], Zero);
+ };
+ MVT SadVT = MVT::getVectorVT(MVT::i64, Rdx.getValueSizeInBits() / 64);
+ Rdx = SplitOpsAndApply(DAG, Subtarget, DL, SadVT, {Rdx}, PSADBWBuilder);
+
+ // TODO: We could truncate to vXi16/vXi32 before performing the reduction.
+ while (Rdx.getValueSizeInBits() > 128) {
+ SDValue Lo, Hi;
+ std::tie(Lo, Hi) = splitVector(Rdx, DAG, DL);
+ VecVT = Lo.getValueType();
+ Rdx = DAG.getNode(ISD::ADD, DL, VecVT, Lo, Hi);
+ }
+ assert(Rdx.getValueType() == MVT::v2i64 && "v2i64 reduction expected");
+
+ if (NumElts > 8) {
+ SDValue RdxHi = DAG.getVectorShuffle(MVT::v2i64, DL, Rdx, Rdx, {1, -1});
+ Rdx = DAG.getNode(ISD::ADD, DL, MVT::v2i64, Rdx, RdxHi);
+ }
+
+ VecVT = MVT::getVectorVT(VT.getSimpleVT(), 128 / VT.getSizeInBits());
+ Rdx = DAG.getBitcast(VecVT, Rdx);
+ return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
+ }
+
// Only use (F)HADD opcodes if they aren't microcoded or minimizes codesize.
if (!shouldUseHorizontalOp(true, DAG, Subtarget))
return SDValue();
diff --git a/llvm/test/CodeGen/X86/vector-reduce-add-mask.ll b/llvm/test/CodeGen/X86/vector-reduce-add-mask.ll
index 27abcf095946d..c25ccb6b06e75 100644
--- a/llvm/test/CodeGen/X86/vector-reduce-add-mask.ll
+++ b/llvm/test/CodeGen/X86/vector-reduce-add-mask.ll
@@ -83,16 +83,27 @@ define i64 @test_v4i64_v4i16(<4 x i64> %a0) {
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
-; AVX512-LABEL: test_v4i64_v4i16:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpaddq %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX512-NEXT: vpaddq %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovq %xmm0, %rax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
+; AVX512BW-LABEL: test_v4i64_v4i16:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX512BW-NEXT: vpmovqb %zmm0, %xmm0
+; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512BW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
+; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512BW-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
+; AVX512BW-NEXT: vmovq %xmm0, %rax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512BWVL-LABEL: test_v4i64_v4i16:
+; AVX512BWVL: # %bb.0:
+; AVX512BWVL-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX512BWVL-NEXT: vpmovqb %ymm0, %xmm0
+; AVX512BWVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512BWVL-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
+; AVX512BWVL-NEXT: vmovq %xmm0, %rax
+; AVX512BWVL-NEXT: vzeroupper
+; AVX512BWVL-NEXT: retq
%1 = and <4 x i64> %a0, <i64 15, i64 31, i64 63, i64 127>
%2 = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %1)
ret i64 %2
@@ -160,12 +171,9 @@ define i64 @test_v8i64_v8i8(<8 x i64> %a0) {
; AVX512-LABEL: test_v8i64_v8i8:
; AVX512: # %bb.0:
; AVX512-NEXT: vpsrlq $60, %zmm0, %zmm0
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpaddq %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX512-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpmovqb %zmm0, %xmm0
+; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vmovq %xmm0, %rax
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
@@ -265,14 +273,12 @@ define i64 @test_v16i64_v16i8(<16 x i64> %a0) {
;
; AVX512-LABEL: test_v16i64_v16i8:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpbroadcastq {{.*#+}} zmm2 = [1,1,1,1,1,1,1,1]
-; AVX512-NEXT: vpandq %zmm2, %zmm1, %zmm1
-; AVX512-NEXT: vpandq %zmm2, %zmm0, %zmm0
-; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpmovqb %zmm1, %xmm1
+; AVX512-NEXT: vpmovqb %zmm0, %xmm0
+; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
; AVX512-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vmovq %xmm0, %rax
@@ -388,15 +394,23 @@ define i32 @test_v4i32(<4 x i32> %a0) {
; AVX2-NEXT: vmovd %xmm0, %eax
; AVX2-NEXT: retq
;
-; AVX512-LABEL: test_v4i32:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpsrld $31, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: retq
+; AVX512BW-LABEL: test_v4i32:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpsrld $31, %xmm0, %xmm0
+; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512BW-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
+; AVX512BW-NEXT: vmovd %xmm0, %eax
+; AVX512BW-NEXT: retq
+;
+; AVX512BWVL-LABEL: test_v4i32:
+; AVX512BWVL: # %bb.0:
+; AVX512BWVL-NEXT: vpsrld $31, %xmm0, %xmm0
+; AVX512BWVL-NEXT: vpmovdb %xmm0, %xmm0
+; AVX512BWVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512BWVL-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
+; AVX512BWVL-NEXT: vmovd %xmm0, %eax
+; AVX512BWVL-NEXT: retq
%1 = lshr <4 x i32> %a0, <i32 31, i32 31, i32 31, i32 31>
%2 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %1)
ret i32 %2
@@ -464,18 +478,25 @@ define i32 @test_v8i32_v8i8(<8 x i32> %a0) {
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
-; AVX512-LABEL: test_v8i32_v8i8:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
+; AVX512BW-LABEL: test_v8i32_v8i8:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX512BW-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512BW-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
+; AVX512BW-NEXT: vmovd %xmm0, %eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512BWVL-LABEL: test_v8i32_v8i8:
+; AVX512BWVL: # %bb.0:
+; AVX512BWVL-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX512BWVL-NEXT: vpmovdb %ymm0, %xmm0
+; AVX512BWVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512BWVL-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
+; AVX512BWVL-NEXT: vmovd %xmm0, %eax
+; AVX512BWVL-NEXT: vzeroupper
+; AVX512BWVL-NEXT: retq
%1 = and <8 x i32> %a0, <i32 0, i32 1, i32 2, i32 4, i32 8, i32 16, i32 32, i32 64>
%2 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %1)
ret i32 %2
@@ -569,15 +590,11 @@ define i32 @test_v16i32_v16i8(<16 x i32> %a0) {
;
; AVX512-LABEL: test_v16i32_v16i8:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vmovd %xmm0, %eax
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
@@ -710,18 +727,15 @@ define i32 @test_v32i32_v32i8(<32 x i32> %a0) {
;
; AVX512-LABEL: test_v32i32_v32i8:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpbroadcastd {{.*#+}} zmm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512-NEXT: vpandd %zmm2, %zmm1, %zmm1
-; AVX512-NEXT: vpandd %zmm2, %zmm0, %zmm0
-; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512-NEXT: vpmovdb %zmm1, %xmm1
+; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vpsadbw %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vmovd %xmm0, %eax
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
@@ -894,13 +908,9 @@ define i16 @test_v8i16_v8i8(<8 x i16> %a0) {
; SSE2-LABEL: test_v8i16_v8i8:
; SSE2: # %bb.0:
; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; SSE2-NEXT: paddw %xmm0, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
-; SSE2-NEXT: paddw %xmm1, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrld $16, %xmm1
-; SSE2-NEXT: paddw %xmm0, %xmm1
+; SSE2-NEXT: packuswb %xmm0, %xmm0
+; SSE2-NEXT: pxor %xmm1, %xmm1
+; SSE2-NEXT: psadbw %xmm0, %xmm1
; SSE2-NEXT: movd %xmm1, %eax
; SSE2-NEXT: # kill: def $ax killed $ax killed $eax
; SSE2-NEXT: retq
@@ -908,65 +918,22 @@ define i16 @test_v8i16_v8i8(<8 x i16> %a0) {
; SSE41-LABEL: test_v8i16_v8i8:
; SSE41: # %bb.0:
; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; SSE41-NEXT: paddw %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
-; SSE41-NEXT: paddw %xmm1, %xmm0
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: psrld $16, %xmm1
-; SSE41-NEXT: paddw %xmm0, %xmm1
+; SSE41-NEXT: packuswb %xmm0, %xmm0
+; SSE41-NEXT: pxor %xmm1, %xmm1
+; SSE41-NEXT: psadbw %xmm0, %xmm1
; SSE41-NEXT: movd %xmm1, %eax
; SSE41-NEXT: # kill: def $ax killed $ax killed $eax
; SSE41-NEXT: retq
;
-; AVX1-SLOW-LABEL: test_v8i16_v8i8:
-; AVX1-SLOW: # %bb.0:
-; AVX1-SLOW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX1-SLOW-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX1-SLOW-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX1-SLOW-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX1-SLOW-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX1-SLOW-NEXT: vmovd %xmm0, %eax
-; AVX1-SLOW-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX1-SLOW-NEXT: retq
-;
-; AVX1-FAST-LABEL: test_v8i16_v8i8:
-; AVX1-FAST: # %bb.0:
-; AVX1-FAST-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; AVX1-FAST-NEXT: vphaddw %xmm0, %xmm0, %xmm0
-; AVX1-FAST-NEXT: vphaddw %xmm0, %xmm0, %xmm0
-; AVX1-FAST-NEXT: vphaddw %xmm0, %xmm0, %xmm0
-; AVX1-FAST-NEXT: vmovd %xmm0, %eax
-; AVX1-FAST-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX1-FAST-NEXT: retq
-;
-; AVX2-LABEL: test_v8i16_v8i8:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v8i16_v8i8:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX512-NEXT: retq
+; AVX-LABEL: test_v8i16_v8i8:
+; AVX: # %bb.0:
+; AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: vpackuswb %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vmovd %xmm0, %eax
+; AVX-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX-NEXT: retq
%1 = and <8 x i16> %a0, <i16 0, i16 1, i16 2, i16 4, i16 8, i16 16, i16 32, i16 64>
%2 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %1)
ret i16 %2
@@ -975,98 +942,90 @@ define i16 @test_v8i16_v8i8(<8 x i16> %a0) {
define i16 @test_v16i16_v16i8(<16 x i16> %a0) {
; SSE2-LABEL: test_v16i16_v16i8:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [0,1,2,4,8,16,32,64]
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
; SSE2-NEXT: pand %xmm2, %xmm1
; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: paddw %xmm1, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; SSE2-NEXT: paddw %xmm0, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
-; SSE2-NEXT: paddw %xmm1, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrld $16, %xmm1
-; SSE2-NEXT: paddw %xmm0, %xmm1
-; SSE2-NEXT: movd %xmm1, %eax
+; SSE2-NEXT: packuswb %xmm1, %xmm0
+; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT: pxor %xmm1, %xmm1
+; SSE2-NEXT: psadbw %xmm0, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
+; SSE2-NEXT: paddq %xmm1, %xmm0
+; SSE2-NEXT: movd %xmm0, %eax
; SSE2-NEXT: # kill: def $ax killed $ax killed $eax
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_v16i16_v16i8:
; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [0,1,2,4,8,16,32,64]
+; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
; SSE41-NEXT: pand %xmm2, %xmm1
; SSE41-NEXT: pand %xmm2, %xmm0
-; SSE41-NEXT: paddw %xmm1, %xmm0
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; SSE41-NEXT: paddw %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
-; SSE41-NEXT: paddw %xmm1, %xmm0
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: psrld $16, %xmm1
-; SSE41-NEXT: paddw %xmm0, %xmm1
-; SSE41-NEXT: movd %xmm1, %eax
+; SSE41-NEXT: packuswb %xmm1, %xmm0
+; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE41-NEXT: pxor %xmm1, %xmm1
+; SSE41-NEXT: psadbw %xmm0, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
+; SSE41-NEXT: paddq %xmm1, %xmm0
+; SSE41-NEXT: movd %xmm0, %eax
; SSE41-NEXT: # kill: def $ax killed $ax killed $eax
; SSE41-NEXT: retq
;
-; AVX1-SLOW-LABEL: test_v16i16_v16i8:
-; AVX1-SLOW: # %bb.0:
-; AVX1-SLOW-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; AVX1-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-SLOW-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX1-SLOW-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX1-SLOW-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX1-SLOW-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX1-SLOW-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX1-SLOW-NEXT: vmovd %xmm0, %eax
-; AVX1-SLOW-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX1-SLOW-NEXT: vzeroupper
-; AVX1-SLOW-NEXT: retq
-;
-; AVX1-FAST-LABEL: test_v16i16_v16i8:
-; AVX1-FAST: # %bb.0:
-; AVX1-FAST-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; AVX1-FAST-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-FAST-NEXT: vphaddw %xmm0, %xmm1, %xmm0
-; AVX1-FAST-NEXT: vphaddw %xmm0, %xmm0, %xmm0
-; AVX1-FAST-NEXT: vphaddw %xmm0, %xmm0, %xmm0
-; AVX1-FAST-NEXT: vphaddw %xmm0, %xmm0, %xmm0
-; AVX1-FAST-NEXT: vmovd %xmm0, %eax
-; AVX1-FAST-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX1-FAST-NEXT: vzeroupper
-; AVX1-FAST-NEXT: retq
+; AVX1-LABEL: test_v16i16_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vmovd %xmm0, %eax
+; AVX1-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
;
; AVX2-LABEL: test_v16i16_v16i8:
; AVX2: # %bb.0:
; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vmovd %xmm0, %eax
; AVX2-NEXT: # kill: def $ax killed $ax killed $eax
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
-; AVX512-LABEL: test_v16i16_v16i8:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
+; AVX512BW-LABEL: test_v16i16_v16i8:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512BW-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
+; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVX512BW-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX512BW-NEXT: vmovd %xmm0, %eax
+; AVX512BW-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512BWVL-LABEL: test_v16i16_v16i8:
+; AVX512BWVL: # %bb.0:
+; AVX512BWVL-NEXT: vpmovwb %ymm0, %xmm0
+; AVX512BWVL-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX512BWVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512BWVL-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
+; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVX512BWVL-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX512BWVL-NEXT: vmovd %xmm0, %eax
+; AVX512BWVL-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX512BWVL-NEXT: vzeroupper
+; AVX512BWVL-NEXT: retq
%1 = and <16 x i16> %a0, <i16 0, i16 1, i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 0, i16 1, i16 2, i16 4, i16 8, i16 16, i16 32, i16 64>
%2 = call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> %1)
ret i16 %2
@@ -1075,100 +1034,73 @@ define i16 @test_v16i16_v16i8(<16 x i16> %a0) {
define i16 @test_v32i16_v32i8(<32 x i16> %a0) {
; SSE2-LABEL: test_v32i16_v32i8:
; SSE2: # %bb.0:
-; SSE2-NEXT: psrlw $8, %xmm2
+; SSE2-NEXT: psrlw $8, %xmm1
; SSE2-NEXT: psrlw $8, %xmm0
+; SSE2-NEXT: packuswb %xmm1, %xmm0
; SSE2-NEXT: psrlw $8, %xmm3
-; SSE2-NEXT: psrlw $8, %xmm1
-; SSE2-NEXT: paddw %xmm3, %xmm1
-; SSE2-NEXT: paddw %xmm2, %xmm1
-; SSE2-NEXT: paddw %xmm0, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
-; SSE2-NEXT: paddw %xmm1, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; SSE2-NEXT: paddw %xmm0, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: psrld $16, %xmm0
-; SSE2-NEXT: paddw %xmm1, %xmm0
-; SSE2-NEXT: movd %xmm0, %eax
+; SSE2-NEXT: psrlw $8, %xmm2
+; SSE2-NEXT: packuswb %xmm3, %xmm2
+; SSE2-NEXT: pxor %xmm1, %xmm1
+; SSE2-NEXT: psadbw %xmm1, %xmm2
+; SSE2-NEXT: psadbw %xmm1, %xmm0
+; SSE2-NEXT: paddq %xmm2, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; SSE2-NEXT: paddq %xmm0, %xmm1
+; SSE2-NEXT: movd %xmm1, %eax
; SSE2-NEXT: # kill: def $ax killed $ax killed $eax
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_v32i16_v32i8:
; SSE41: # %bb.0:
-; SSE41-NEXT: psrlw $8, %xmm2
+; SSE41-NEXT: psrlw $8, %xmm1
; SSE41-NEXT: psrlw $8, %xmm0
+; SSE41-NEXT: packuswb %xmm1, %xmm0
; SSE41-NEXT: psrlw $8, %xmm3
-; SSE41-NEXT: psrlw $8, %xmm1
-; SSE41-NEXT: paddw %xmm3, %xmm1
-; SSE41-NEXT: paddw %xmm2, %xmm1
-; SSE41-NEXT: paddw %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
-; SSE41-NEXT: paddw %xmm1, %xmm0
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; SSE41-NEXT: paddw %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: psrld $16, %xmm0
-; SSE41-NEXT: paddw %xmm1, %xmm0
-; SSE41-NEXT: movd %xmm0, %eax
+; SSE41-NEXT: psrlw $8, %xmm2
+; SSE41-NEXT: packuswb %xmm3, %xmm2
+; SSE41-NEXT: pxor %xmm1, %xmm1
+; SSE41-NEXT: psadbw %xmm1, %xmm2
+; SSE41-NEXT: psadbw %xmm1, %xmm0
+; SSE41-NEXT: paddq %xmm2, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; SSE41-NEXT: paddq %xmm0, %xmm1
+; SSE41-NEXT: movd %xmm1, %eax
; SSE41-NEXT: # kill: def $ax killed $ax killed $eax
; SSE41-NEXT: retq
;
-; AVX1-SLOW-LABEL: test_v32i16_v32i8:
-; AVX1-SLOW: # %bb.0:
-; AVX1-SLOW-NEXT: vpsrlw $8, %xmm1, %xmm2
-; AVX1-SLOW-NEXT: vpsrlw $8, %xmm0, %xmm3
-; AVX1-SLOW-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX1-SLOW-NEXT: vpsrlw $8, %xmm1, %xmm1
-; AVX1-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-SLOW-NEXT: vpsrlw $8, %xmm0, %xmm0
-; AVX1-SLOW-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX1-SLOW-NEXT: vpaddw %xmm0, %xmm2, %xmm0
-; AVX1-SLOW-NEXT: vpaddw %xmm0, %xmm3, %xmm0
-; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX1-SLOW-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX1-SLOW-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX1-SLOW-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX1-SLOW-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX1-SLOW-NEXT: vmovd %xmm0, %eax
-; AVX1-SLOW-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX1-SLOW-NEXT: vzeroupper
-; AVX1-SLOW-NEXT: retq
-;
-; AVX1-FAST-LABEL: test_v32i16_v32i8:
-; AVX1-FAST: # %bb.0:
-; AVX1-FAST-NEXT: vpsrlw $8, %xmm1, %xmm2
-; AVX1-FAST-NEXT: vpsrlw $8, %xmm0, %xmm3
-; AVX1-FAST-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX1-FAST-NEXT: vpsrlw $8, %xmm1, %xmm1
-; AVX1-FAST-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-FAST-NEXT: vpsrlw $8, %xmm0, %xmm0
-; AVX1-FAST-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX1-FAST-NEXT: vpaddw %xmm0, %xmm2, %xmm0
-; AVX1-FAST-NEXT: vpaddw %xmm0, %xmm3, %xmm0
-; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX1-FAST-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX1-FAST-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX1-FAST-NEXT: vphaddw %xmm0, %xmm0, %xmm0
-; AVX1-FAST-NEXT: vmovd %xmm0, %eax
-; AVX1-FAST-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX1-FAST-NEXT: vzeroupper
-; AVX1-FAST-NEXT: retq
+; AVX1-LABEL: test_v32i16_v32i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
+; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
+; AVX1-NEXT: vpsrlw $8, %xmm1, %xmm1
+; AVX1-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpsadbw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpsadbw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vmovd %xmm0, %eax
+; AVX1-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
;
; AVX2-LABEL: test_v32i16_v32i8:
; AVX2: # %bb.0:
; AVX2-NEXT: vpsrlw $8, %ymm1, %ymm1
; AVX2-NEXT: vpsrlw $8, %ymm0, %ymm0
-; AVX2-NEXT: vpaddw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpackuswb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpsadbw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vmovd %xmm0, %eax
; AVX2-NEXT: # kill: def $ax killed $ax killed $eax
; AVX2-NEXT: vzeroupper
@@ -1177,16 +1109,13 @@ define i16 @test_v32i16_v32i8(<32 x i16> %a0) {
; AVX512-LABEL: test_v32i16_v32i8:
; AVX512: # %bb.0:
; AVX512-NEXT: vpsrlw $8, %zmm0, %zmm0
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpaddw %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vpsadbw %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vmovd %xmm0, %eax
; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
; AVX512-NEXT: vzeroupper
@@ -1200,136 +1129,110 @@ define i16 @test_v64i16_v64i8(<64 x i16> %a0) {
; SSE2-LABEL: test_v64i16_v64i8:
; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [127,127,127,127,127,127,127,127]
-; SSE2-NEXT: pand %xmm8, %xmm5
; SSE2-NEXT: pand %xmm8, %xmm1
-; SSE2-NEXT: pand %xmm8, %xmm7
-; SSE2-NEXT: pand %xmm8, %xmm3
-; SSE2-NEXT: paddw %xmm7, %xmm3
-; SSE2-NEXT: paddw %xmm5, %xmm3
-; SSE2-NEXT: paddw %xmm1, %xmm3
-; SSE2-NEXT: pand %xmm8, %xmm4
; SSE2-NEXT: pand %xmm8, %xmm0
-; SSE2-NEXT: pand %xmm8, %xmm6
+; SSE2-NEXT: packuswb %xmm1, %xmm0
+; SSE2-NEXT: pand %xmm8, %xmm5
+; SSE2-NEXT: pand %xmm8, %xmm4
+; SSE2-NEXT: packuswb %xmm5, %xmm4
+; SSE2-NEXT: pand %xmm8, %xmm3
; SSE2-NEXT: pand %xmm8, %xmm2
-; SSE2-NEXT: paddw %xmm6, %xmm2
-; SSE2-NEXT: paddw %xmm4, %xmm2
-; SSE2-NEXT: paddw %xmm3, %xmm2
-; SSE2-NEXT: paddw %xmm0, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,2,3]
-; SSE2-NEXT: paddw %xmm2, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; SSE2-NEXT: paddw %xmm0, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: psrld $16, %xmm0
-; SSE2-NEXT: paddw %xmm1, %xmm0
-; SSE2-NEXT: movd %xmm0, %eax
+; SSE2-NEXT: packuswb %xmm3, %xmm2
+; SSE2-NEXT: pand %xmm8, %xmm7
+; SSE2-NEXT: pand %xmm8, %xmm6
+; SSE2-NEXT: packuswb %xmm7, %xmm6
+; SSE2-NEXT: pxor %xmm1, %xmm1
+; SSE2-NEXT: psadbw %xmm1, %xmm6
+; SSE2-NEXT: psadbw %xmm1, %xmm2
+; SSE2-NEXT: paddq %xmm6, %xmm2
+; SSE2-NEXT: psadbw %xmm1, %xmm4
+; SSE2-NEXT: paddq %xmm2, %xmm4
+; SSE2-NEXT: psadbw %xmm1, %xmm0
+; SSE2-NEXT: paddq %xmm4, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; SSE2-NEXT: paddq %xmm0, %xmm1
+; SSE2-NEXT: movd %xmm1, %eax
; SSE2-NEXT: # kill: def $ax killed $ax killed $eax
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_v64i16_v64i8:
; SSE41: # %bb.0:
; SSE41-NEXT: movdqa {{.*#+}} xmm8 = [127,127,127,127,127,127,127,127]
-; SSE41-NEXT: pand %xmm8, %xmm5
; SSE41-NEXT: pand %xmm8, %xmm1
-; SSE41-NEXT: pand %xmm8, %xmm7
-; SSE41-NEXT: pand %xmm8, %xmm3
-; SSE41-NEXT: paddw %xmm7, %xmm3
-; SSE41-NEXT: paddw %xmm5, %xmm3
-; SSE41-NEXT: paddw %xmm1, %xmm3
-; SSE41-NEXT: pand %xmm8, %xmm4
; SSE41-NEXT: pand %xmm8, %xmm0
-; SSE41-NEXT: pand %xmm8, %xmm6
+; SSE41-NEXT: packuswb %xmm1, %xmm0
+; SSE41-NEXT: pand %xmm8, %xmm5
+; SSE41-NEXT: pand %xmm8, %xmm4
+; SSE41-NEXT: packuswb %xmm5, %xmm4
+; SSE41-NEXT: pand %xmm8, %xmm3
; SSE41-NEXT: pand %xmm8, %xmm2
-; SSE41-NEXT: paddw %xmm6, %xmm2
-; SSE41-NEXT: paddw %xmm4, %xmm2
-; SSE41-NEXT: paddw %xmm3, %xmm2
-; SSE41-NEXT: paddw %xmm0, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,2,3]
-; SSE41-NEXT: paddw %xmm2, %xmm0
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; SSE41-NEXT: paddw %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: psrld $16, %xmm0
-; SSE41-NEXT: paddw %xmm1, %xmm0
-; SSE41-NEXT: movd %xmm0, %eax
+; SSE41-NEXT: packuswb %xmm3, %xmm2
+; SSE41-NEXT: pand %xmm8, %xmm7
+; SSE41-NEXT: pand %xmm8, %xmm6
+; SSE41-NEXT: packuswb %xmm7, %xmm6
+; SSE41-NEXT: pxor %xmm1, %xmm1
+; SSE41-NEXT: psadbw %xmm1, %xmm6
+; SSE41-NEXT: psadbw %xmm1, %xmm2
+; SSE41-NEXT: paddq %xmm6, %xmm2
+; SSE41-NEXT: psadbw %xmm1, %xmm4
+; SSE41-NEXT: paddq %xmm2, %xmm4
+; SSE41-NEXT: psadbw %xmm1, %xmm0
+; SSE41-NEXT: paddq %xmm4, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; SSE41-NEXT: paddq %xmm0, %xmm1
+; SSE41-NEXT: movd %xmm1, %eax
; SSE41-NEXT: # kill: def $ax killed $ax killed $eax
; SSE41-NEXT: retq
;
-; AVX1-SLOW-LABEL: test_v64i16_v64i8:
-; AVX1-SLOW: # %bb.0:
-; AVX1-SLOW-NEXT: vmovaps {{.*#+}} ymm4 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX1-SLOW-NEXT: vandps %ymm4, %ymm0, %ymm0
-; AVX1-SLOW-NEXT: vandps %ymm4, %ymm2, %ymm2
-; AVX1-SLOW-NEXT: vandps %ymm4, %ymm1, %ymm1
-; AVX1-SLOW-NEXT: vandps %ymm4, %ymm3, %ymm3
-; AVX1-SLOW-NEXT: vpaddw %xmm3, %xmm1, %xmm4
-; AVX1-SLOW-NEXT: vextractf128 $1, %ymm3, %xmm3
-; AVX1-SLOW-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX1-SLOW-NEXT: vpaddw %xmm3, %xmm1, %xmm1
-; AVX1-SLOW-NEXT: vextractf128 $1, %ymm2, %xmm3
-; AVX1-SLOW-NEXT: vpaddw %xmm1, %xmm3, %xmm1
-; AVX1-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-SLOW-NEXT: vpaddw %xmm1, %xmm3, %xmm1
-; AVX1-SLOW-NEXT: vpaddw %xmm4, %xmm2, %xmm2
-; AVX1-SLOW-NEXT: vpaddw %xmm1, %xmm2, %xmm1
-; AVX1-SLOW-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX1-SLOW-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX1-SLOW-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX1-SLOW-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX1-SLOW-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX1-SLOW-NEXT: vmovd %xmm0, %eax
-; AVX1-SLOW-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX1-SLOW-NEXT: vzeroupper
-; AVX1-SLOW-NEXT: retq
-;
-; AVX1-FAST-LABEL: test_v64i16_v64i8:
-; AVX1-FAST: # %bb.0:
-; AVX1-FAST-NEXT: vmovaps {{.*#+}} ymm4 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX1-FAST-NEXT: vandps %ymm4, %ymm0, %ymm0
-; AVX1-FAST-NEXT: vandps %ymm4, %ymm2, %ymm2
-; AVX1-FAST-NEXT: vandps %ymm4, %ymm1, %ymm1
-; AVX1-FAST-NEXT: vandps %ymm4, %ymm3, %ymm3
-; AVX1-FAST-NEXT: vpaddw %xmm3, %xmm1, %xmm4
-; AVX1-FAST-NEXT: vextractf128 $1, %ymm3, %xmm3
-; AVX1-FAST-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX1-FAST-NEXT: vpaddw %xmm3, %xmm1, %xmm1
-; AVX1-FAST-NEXT: vextractf128 $1, %ymm2, %xmm3
-; AVX1-FAST-NEXT: vpaddw %xmm1, %xmm3, %xmm1
-; AVX1-FAST-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-FAST-NEXT: vpaddw %xmm1, %xmm3, %xmm1
-; AVX1-FAST-NEXT: vpaddw %xmm4, %xmm2, %xmm2
-; AVX1-FAST-NEXT: vpaddw %xmm1, %xmm2, %xmm1
-; AVX1-FAST-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX1-FAST-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX1-FAST-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX1-FAST-NEXT: vphaddw %xmm0, %xmm0, %xmm0
-; AVX1-FAST-NEXT: vmovd %xmm0, %eax
-; AVX1-FAST-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX1-FAST-NEXT: vzeroupper
-; AVX1-FAST-NEXT: retq
+; AVX1-LABEL: test_v64i16_v64i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmovaps {{.*#+}} ymm4 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX1-NEXT: vandps %ymm4, %ymm0, %ymm0
+; AVX1-NEXT: vandps %ymm4, %ymm2, %ymm2
+; AVX1-NEXT: vandps %ymm4, %ymm1, %ymm1
+; AVX1-NEXT: vandps %ymm4, %ymm3, %ymm3
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
+; AVX1-NEXT: vpackuswb %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4
+; AVX1-NEXT: vpsadbw %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
+; AVX1-NEXT: vpackuswb %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vpsadbw %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vpaddq %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpsadbw %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpaddq %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpsadbw %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vmovd %xmm0, %eax
+; AVX1-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
;
; AVX2-LABEL: test_v64i16_v64i8:
; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa {{.*#+}} ymm4 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX2-NEXT: vpand %ymm4, %ymm2, %ymm2
-; AVX2-NEXT: vpand %ymm4, %ymm0, %ymm0
-; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
; AVX2-NEXT: vpand %ymm4, %ymm1, %ymm1
-; AVX2-NEXT: vpaddw %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vpaddw %ymm1, %ymm2, %ymm1
-; AVX2-NEXT: vpaddw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpand %ymm4, %ymm0, %ymm0
+; AVX2-NEXT: vpackuswb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm1
+; AVX2-NEXT: vpand %ymm4, %ymm2, %ymm2
+; AVX2-NEXT: vpackuswb %ymm1, %ymm2, %ymm1
+; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,1,3]
+; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX2-NEXT: vpsadbw %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vpsadbw %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vmovd %xmm0, %eax
; AVX2-NEXT: # kill: def $ax killed $ax killed $eax
; AVX2-NEXT: vzeroupper
@@ -1337,20 +1240,18 @@ define i16 @test_v64i16_v64i8(<64 x i16> %a0) {
;
; AVX512-LABEL: test_v64i16_v64i8:
; AVX512: # %bb.0:
-; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm2 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX512-NEXT: vpandq %zmm2, %zmm1, %zmm1
-; AVX512-NEXT: vpandq %zmm2, %zmm0, %zmm0
-; AVX512-NEXT: vpaddw %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512-NEXT: vpmovwb %zmm1, %ymm1
+; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
+; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vpsadbw %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpaddw %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpaddq %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vmovd %xmm0, %eax
; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
; AVX512-NEXT: vzeroupper
diff --git a/llvm/test/CodeGen/X86/vector-reduce-add-zext.ll b/llvm/test/CodeGen/X86/vector-reduce-add-zext.ll
index 3dc4702f216ca..89d299ff1eb45 100644
--- a/llvm/test/CodeGen/X86/vector-reduce-add-zext.ll
+++ b/llvm/test/CodeGen/X86/vector-reduce-add-zext.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE41
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1,AVX1-SLOW
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+fast-hops | FileCheck %s --check-prefixes=AVX,AVX1,AVX1-FAST
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
@@ -104,230 +104,42 @@ define i64 @test_v4i64_v4i16(<4 x i16> %a0) {
}
define i64 @test_v8i64_v8i8(<8 x i8> %a0) {
-; SSE2-LABEL: test_v8i64_v8i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pxor %xmm1, %xmm1
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; SSE2-NEXT: movdqa %xmm2, %xmm3
-; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; SSE2-NEXT: movdqa %xmm0, %xmm4
-; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; SSE2-NEXT: paddq %xmm2, %xmm0
-; SSE2-NEXT: paddq %xmm3, %xmm0
-; SSE2-NEXT: paddq %xmm4, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; SSE2-NEXT: paddq %xmm0, %xmm1
-; SSE2-NEXT: movq %xmm1, %rax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v8i64_v8i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pmovzxbq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
-; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,1,1]
-; SSE41-NEXT: pmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
-; SSE41-NEXT: movdqa %xmm0, %xmm3
-; SSE41-NEXT: psrlq $48, %xmm3
-; SSE41-NEXT: pmovzxbq {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,zero,zero,zero,zero,xmm3[1],zero,zero,zero,zero,zero,zero,zero
-; SSE41-NEXT: psrld $16, %xmm0
-; SSE41-NEXT: pmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
-; SSE41-NEXT: paddq %xmm3, %xmm0
-; SSE41-NEXT: paddq %xmm2, %xmm0
-; SSE41-NEXT: paddq %xmm1, %xmm0
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; SSE41-NEXT: paddq %xmm0, %xmm1
-; SSE41-NEXT: movq %xmm1, %rax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v8i64_v8i8:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
-; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,1,1]
-; AVX1-NEXT: vpsrld $16, %xmm2, %xmm3
-; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,zero,zero,zero,zero,xmm3[1],zero,zero,zero,zero,zero,zero,zero
-; AVX1-NEXT: vpaddq %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
-; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
-; AVX1-NEXT: vpaddq %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vmovq %xmm0, %rax
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v8i64_v8i8:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpmovzxbq {{.*#+}} ymm1 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero
-; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
-; AVX2-NEXT: vpmovzxbq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero
-; AVX2-NEXT: vpaddq %ymm0, %ymm1, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vmovq %xmm0, %rax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v8i64_v8i8:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpmovzxbq {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero,xmm0[4],zero,zero,zero,zero,zero,zero,zero,xmm0[5],zero,zero,zero,zero,zero,zero,zero,xmm0[6],zero,zero,zero,zero,zero,zero,zero,xmm0[7],zero,zero,zero,zero,zero,zero,zero
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpaddq %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX512-NEXT: vpaddq %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovq %xmm0, %rax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
+; SSE-LABEL: test_v8i64_v8i8:
+; SSE: # %bb.0:
+; SSE-NEXT: pxor %xmm1, %xmm1
+; SSE-NEXT: psadbw %xmm0, %xmm1
+; SSE-NEXT: movq %xmm1, %rax
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_v8i64_v8i8:
+; AVX: # %bb.0:
+; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vmovq %xmm0, %rax
+; AVX-NEXT: retq
%1 = zext <8 x i8> %a0 to <8 x i64>
%2 = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> %1)
ret i64 %2
}
define i64 @test_v16i64_v16i8(<16 x i8> %a0) {
-; SSE2-LABEL: test_v16i64_v16i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pxor %xmm3, %xmm3
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm3[8],xmm1[9],xmm3[9],xmm1[10],xmm3[10],xmm1[11],xmm3[11],xmm1[12],xmm3[12],xmm1[13],xmm3[13],xmm1[14],xmm3[14],xmm1[15],xmm3[15]
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
-; SSE2-NEXT: movdqa %xmm2, %xmm8
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm8 = xmm8[2],xmm3[2],xmm8[3],xmm3[3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
-; SSE2-NEXT: movdqa %xmm0, %xmm4
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
-; SSE2-NEXT: movdqa %xmm4, %xmm6
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm6 = xmm6[2],xmm3[2],xmm6[3],xmm3[3]
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
-; SSE2-NEXT: movdqa %xmm1, %xmm7
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm7 = xmm7[2],xmm3[2],xmm7[3],xmm3[3]
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
-; SSE2-NEXT: movdqa %xmm0, %xmm5
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm5 = xmm5[2],xmm3[2],xmm5[3],xmm3[3]
-; SSE2-NEXT: paddq %xmm7, %xmm5
-; SSE2-NEXT: paddq %xmm8, %xmm5
-; SSE2-NEXT: paddq %xmm6, %xmm5
-; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
-; SSE2-NEXT: paddq %xmm1, %xmm0
-; SSE2-NEXT: paddq %xmm2, %xmm0
-; SSE2-NEXT: paddq %xmm5, %xmm0
-; SSE2-NEXT: paddq %xmm4, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; SSE2-NEXT: paddq %xmm0, %xmm1
-; SSE2-NEXT: movq %xmm1, %rax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v16i64_v16i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm0, %xmm2
-; SSE41-NEXT: psrldq {{.*#+}} xmm2 = xmm2[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; SSE41-NEXT: pmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
-; SSE41-NEXT: movdqa %xmm0, %xmm3
-; SSE41-NEXT: psrlq $48, %xmm3
-; SSE41-NEXT: pmovzxbq {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,zero,zero,zero,zero,xmm3[1],zero,zero,zero,zero,zero,zero,zero
-; SSE41-NEXT: paddq %xmm2, %xmm3
-; SSE41-NEXT: pmovzxbq {{.*#+}} xmm2 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
-; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
-; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm0[3,3,3,3]
-; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm0[1,1,1,1]
-; SSE41-NEXT: psrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; SSE41-NEXT: pmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
-; SSE41-NEXT: paddq %xmm0, %xmm3
-; SSE41-NEXT: psrld $16, %xmm1
-; SSE41-NEXT: pmovzxbq {{.*#+}} xmm0 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
-; SSE41-NEXT: paddq %xmm0, %xmm3
-; SSE41-NEXT: pmovzxbq {{.*#+}} xmm0 = xmm4[0],zero,zero,zero,zero,zero,zero,zero,xmm4[1],zero,zero,zero,zero,zero,zero,zero
-; SSE41-NEXT: pmovzxbq {{.*#+}} xmm1 = xmm5[0],zero,zero,zero,zero,zero,zero,zero,xmm5[1],zero,zero,zero,zero,zero,zero,zero
-; SSE41-NEXT: pmovzxbq {{.*#+}} xmm4 = xmm6[0],zero,zero,zero,zero,zero,zero,zero,xmm6[1],zero,zero,zero,zero,zero,zero,zero
-; SSE41-NEXT: paddq %xmm1, %xmm4
-; SSE41-NEXT: paddq %xmm0, %xmm4
-; SSE41-NEXT: paddq %xmm3, %xmm4
-; SSE41-NEXT: paddq %xmm2, %xmm4
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[2,3,2,3]
-; SSE41-NEXT: paddq %xmm4, %xmm0
-; SSE41-NEXT: movq %xmm0, %rax
-; SSE41-NEXT: retq
-;
-; AVX1-LABEL: test_v16i64_v16i8:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,1,1]
-; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
-; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[3,3,3,3]
-; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero
-; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,3,2,3]
-; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
-; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
-; AVX1-NEXT: vpsrld $16, %xmm0, %xmm5
-; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm5 = xmm5[0],zero,zero,zero,zero,zero,zero,zero,xmm5[1],zero,zero,zero,zero,zero,zero,zero
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm6 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm6[3,3,3,3]
-; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero
-; AVX1-NEXT: vpaddq %xmm3, %xmm7, %xmm3
-; AVX1-NEXT: vpaddq %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpaddq %xmm2, %xmm5, %xmm2
-; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm6[2,3,2,3]
-; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero
-; AVX1-NEXT: vpaddq %xmm4, %xmm3, %xmm3
-; AVX1-NEXT: vpaddq %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpaddq %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
-; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vmovq %xmm0, %rax
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_v16i64_v16i8:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
-; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[2,3,2,3]
-; AVX2-NEXT: vpmovzxwq {{.*#+}} ymm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; AVX2-NEXT: vpmovzxwq {{.*#+}} ymm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
-; AVX2-NEXT: vpaddq %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vpmovzxbq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero
-; AVX2-NEXT: vpmovzxwq {{.*#+}} ymm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
-; AVX2-NEXT: vpaddq %ymm1, %ymm2, %ymm1
-; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vmovq %xmm0, %rax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v16i64_v16i8:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpmovzxwq {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero
-; AVX512-NEXT: vpmovzxwq {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
-; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpaddq %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX512-NEXT: vpaddq %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovq %xmm0, %rax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
+; SSE-LABEL: test_v16i64_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: pxor %xmm1, %xmm1
+; SSE-NEXT: psadbw %xmm0, %xmm1
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
+; SSE-NEXT: paddq %xmm1, %xmm0
+; SSE-NEXT: movq %xmm0, %rax
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_v16i64_v16i8:
+; AVX: # %bb.0:
+; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVX-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vmovq %xmm0, %rax
+; AVX-NEXT: retq
%1 = zext <16 x i8> %a0 to <16 x i64>
%2 = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> %1)
ret i64 %2
@@ -395,60 +207,41 @@ define i32 @test_v4i32(<4 x i8> %a0) {
; SSE2-LABEL: test_v4i32:
; SSE2: # %bb.0:
; SSE2-NEXT: pxor %xmm1, %xmm1
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; SSE2-NEXT: paddd %xmm0, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
-; SSE2-NEXT: paddd %xmm1, %xmm0
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: movd %xmm0, %eax
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_v4i32:
; SSE41: # %bb.0:
-; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; SSE41-NEXT: paddd %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
-; SSE41-NEXT: paddd %xmm1, %xmm0
+; SSE41-NEXT: pxor %xmm1, %xmm1
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
+; SSE41-NEXT: psadbw %xmm1, %xmm0
; SSE41-NEXT: movd %xmm0, %eax
; SSE41-NEXT: retq
;
-; AVX1-SLOW-LABEL: test_v4i32:
-; AVX1-SLOW: # %bb.0:
-; AVX1-SLOW-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX1-SLOW-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX1-SLOW-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVX1-SLOW-NEXT: vmovd %xmm0, %eax
-; AVX1-SLOW-NEXT: retq
-;
-; AVX1-FAST-LABEL: test_v4i32:
-; AVX1-FAST: # %bb.0:
-; AVX1-FAST-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; AVX1-FAST-NEXT: vphaddd %xmm0, %xmm0, %xmm0
-; AVX1-FAST-NEXT: vphaddd %xmm0, %xmm0, %xmm0
-; AVX1-FAST-NEXT: vmovd %xmm0, %eax
-; AVX1-FAST-NEXT: retq
+; AVX1-LABEL: test_v4i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
+; AVX1-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vmovd %xmm0, %eax
+; AVX1-NEXT: retq
;
; AVX2-LABEL: test_v4i32:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
+; AVX2-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vmovd %xmm0, %eax
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v4i32:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
+; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vmovd %xmm0, %eax
; AVX512-NEXT: retq
%1 = zext <4 x i8> %a0 to <4 x i32>
@@ -457,367 +250,92 @@ define i32 @test_v4i32(<4 x i8> %a0) {
}
define i32 @test_v8i32_v8i8(<8 x i8> %a0) {
-; SSE2-LABEL: test_v8i32_v8i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pxor %xmm1, %xmm1
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; SSE2-NEXT: paddd %xmm2, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; SSE2-NEXT: paddd %xmm0, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
-; SSE2-NEXT: paddd %xmm1, %xmm0
-; SSE2-NEXT: movd %xmm0, %eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v8i32_v8i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
-; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; SSE41-NEXT: paddd %xmm1, %xmm0
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; SSE41-NEXT: paddd %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
-; SSE41-NEXT: paddd %xmm1, %xmm0
-; SSE41-NEXT: movd %xmm0, %eax
-; SSE41-NEXT: retq
-;
-; AVX1-SLOW-LABEL: test_v8i32_v8i8:
-; AVX1-SLOW: # %bb.0:
-; AVX1-SLOW-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
-; AVX1-SLOW-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; AVX1-SLOW-NEXT: vpaddd %xmm0, %xmm1, %xmm0
-; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX1-SLOW-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX1-SLOW-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVX1-SLOW-NEXT: vmovd %xmm0, %eax
-; AVX1-SLOW-NEXT: retq
-;
-; AVX1-FAST-LABEL: test_v8i32_v8i8:
-; AVX1-FAST: # %bb.0:
-; AVX1-FAST-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
-; AVX1-FAST-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; AVX1-FAST-NEXT: vphaddd %xmm1, %xmm0, %xmm0
-; AVX1-FAST-NEXT: vphaddd %xmm0, %xmm0, %xmm0
-; AVX1-FAST-NEXT: vphaddd %xmm0, %xmm0, %xmm0
-; AVX1-FAST-NEXT: vmovd %xmm0, %eax
-; AVX1-FAST-NEXT: retq
-;
-; AVX2-LABEL: test_v8i32_v8i8:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v8i32_v8i8:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
+; SSE-LABEL: test_v8i32_v8i8:
+; SSE: # %bb.0:
+; SSE-NEXT: pxor %xmm1, %xmm1
+; SSE-NEXT: psadbw %xmm0, %xmm1
+; SSE-NEXT: movd %xmm1, %eax
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_v8i32_v8i8:
+; AVX: # %bb.0:
+; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vmovd %xmm0, %eax
+; AVX-NEXT: retq
%1 = zext <8 x i8> %a0 to <8 x i32>
%2 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %1)
ret i32 %2
}
define i32 @test_v16i32_v16i8(<16 x i8> %a0) {
-; SSE2-LABEL: test_v16i32_v16i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pxor %xmm1, %xmm1
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
-; SSE2-NEXT: movdqa %xmm2, %xmm3
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSE2-NEXT: movdqa %xmm0, %xmm4
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3]
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSE2-NEXT: paddd %xmm2, %xmm0
-; SSE2-NEXT: paddd %xmm3, %xmm0
-; SSE2-NEXT: paddd %xmm4, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; SSE2-NEXT: paddd %xmm0, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
-; SSE2-NEXT: paddd %xmm1, %xmm0
-; SSE2-NEXT: movd %xmm0, %eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v16i32_v16i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; SSE41-NEXT: pmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
-; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[3,3,3,3]
-; SSE41-NEXT: pmovzxbd {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
-; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; SSE41-NEXT: paddd %xmm3, %xmm0
-; SSE41-NEXT: paddd %xmm2, %xmm0
-; SSE41-NEXT: paddd %xmm1, %xmm0
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; SSE41-NEXT: paddd %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
-; SSE41-NEXT: paddd %xmm1, %xmm0
-; SSE41-NEXT: movd %xmm0, %eax
-; SSE41-NEXT: retq
-;
-; AVX1-SLOW-LABEL: test_v16i32_v16i8:
-; AVX1-SLOW: # %bb.0:
-; AVX1-SLOW-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; AVX1-SLOW-NEXT: vpmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
-; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[3,3,3,3]
-; AVX1-SLOW-NEXT: vpmovzxbd {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
-; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
-; AVX1-SLOW-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; AVX1-SLOW-NEXT: vpaddd %xmm3, %xmm0, %xmm0
-; AVX1-SLOW-NEXT: vpaddd %xmm0, %xmm2, %xmm0
-; AVX1-SLOW-NEXT: vpaddd %xmm0, %xmm1, %xmm0
-; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX1-SLOW-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX1-SLOW-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVX1-SLOW-NEXT: vmovd %xmm0, %eax
-; AVX1-SLOW-NEXT: retq
-;
-; AVX1-FAST-LABEL: test_v16i32_v16i8:
-; AVX1-FAST: # %bb.0:
-; AVX1-FAST-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; AVX1-FAST-NEXT: vpmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
-; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[3,3,3,3]
-; AVX1-FAST-NEXT: vpmovzxbd {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
-; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
-; AVX1-FAST-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; AVX1-FAST-NEXT: vpaddd %xmm3, %xmm0, %xmm0
-; AVX1-FAST-NEXT: vpaddd %xmm0, %xmm2, %xmm0
-; AVX1-FAST-NEXT: vpaddd %xmm0, %xmm1, %xmm0
-; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX1-FAST-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVX1-FAST-NEXT: vphaddd %xmm0, %xmm0, %xmm0
-; AVX1-FAST-NEXT: vmovd %xmm0, %eax
-; AVX1-FAST-NEXT: retq
-;
-; AVX2-LABEL: test_v16i32_v16i8:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
-; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
-; AVX2-NEXT: vpaddd %ymm0, %ymm1, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v16i32_v16i8:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
+; SSE-LABEL: test_v16i32_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: pxor %xmm1, %xmm1
+; SSE-NEXT: psadbw %xmm0, %xmm1
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
+; SSE-NEXT: paddq %xmm1, %xmm0
+; SSE-NEXT: movd %xmm0, %eax
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_v16i32_v16i8:
+; AVX: # %bb.0:
+; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVX-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vmovd %xmm0, %eax
+; AVX-NEXT: retq
%1 = zext <16 x i8> %a0 to <16 x i32>
%2 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %1)
ret i32 %2
}
define i32 @test_v32i32_v32i8(<32 x i8> %a0) {
-; SSE2-LABEL: test_v32i32_v32i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pxor %xmm4, %xmm4
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
-; SSE2-NEXT: movdqa %xmm2, %xmm8
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm4[4],xmm8[5],xmm4[5],xmm8[6],xmm4[6],xmm8[7],xmm4[7]
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
-; SSE2-NEXT: movdqa %xmm3, %xmm6
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7]
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm4[8],xmm1[9],xmm4[9],xmm1[10],xmm4[10],xmm1[11],xmm4[11],xmm1[12],xmm4[12],xmm1[13],xmm4[13],xmm1[14],xmm4[14],xmm1[15],xmm4[15]
-; SSE2-NEXT: movdqa %xmm1, %xmm7
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm4[4],xmm7[5],xmm4[5],xmm7[6],xmm4[6],xmm7[7],xmm4[7]
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm4[8],xmm0[9],xmm4[9],xmm0[10],xmm4[10],xmm0[11],xmm4[11],xmm0[12],xmm4[12],xmm0[13],xmm4[13],xmm0[14],xmm4[14],xmm0[15],xmm4[15]
-; SSE2-NEXT: movdqa %xmm0, %xmm5
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
-; SSE2-NEXT: paddd %xmm7, %xmm5
-; SSE2-NEXT: paddd %xmm8, %xmm5
-; SSE2-NEXT: paddd %xmm6, %xmm5
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
-; SSE2-NEXT: paddd %xmm1, %xmm0
-; SSE2-NEXT: paddd %xmm2, %xmm0
-; SSE2-NEXT: paddd %xmm5, %xmm0
-; SSE2-NEXT: paddd %xmm3, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; SSE2-NEXT: paddd %xmm0, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
-; SSE2-NEXT: paddd %xmm1, %xmm0
-; SSE2-NEXT: movd %xmm0, %eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v32i32_v32i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,1,1]
-; SSE41-NEXT: pmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
-; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,1,1]
-; SSE41-NEXT: pmovzxbd {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
-; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm1[3,3,3,3]
-; SSE41-NEXT: pmovzxbd {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero,xmm4[2],zero,zero,zero,xmm4[3],zero,zero,zero
-; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm0[3,3,3,3]
-; SSE41-NEXT: pmovzxbd {{.*#+}} xmm5 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero,xmm5[2],zero,zero,zero,xmm5[3],zero,zero,zero
-; SSE41-NEXT: paddd %xmm4, %xmm5
-; SSE41-NEXT: paddd %xmm2, %xmm5
-; SSE41-NEXT: paddd %xmm3, %xmm5
-; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
-; SSE41-NEXT: pmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
-; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
-; SSE41-NEXT: pmovzxbd {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
-; SSE41-NEXT: paddd %xmm2, %xmm3
-; SSE41-NEXT: pmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
-; SSE41-NEXT: paddd %xmm3, %xmm1
-; SSE41-NEXT: paddd %xmm5, %xmm1
-; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; SSE41-NEXT: paddd %xmm1, %xmm0
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; SSE41-NEXT: paddd %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
-; SSE41-NEXT: paddd %xmm1, %xmm0
-; SSE41-NEXT: movd %xmm0, %eax
-; SSE41-NEXT: retq
-;
-; AVX1-SLOW-LABEL: test_v32i32_v32i8:
-; AVX1-SLOW: # %bb.0:
-; AVX1-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,1,1]
-; AVX1-SLOW-NEXT: vpmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
-; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,1,1]
-; AVX1-SLOW-NEXT: vpmovzxbd {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
-; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[3,3,3,3]
-; AVX1-SLOW-NEXT: vpmovzxbd {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero,xmm4[2],zero,zero,zero,xmm4[3],zero,zero,zero
-; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm5 = xmm0[3,3,3,3]
-; AVX1-SLOW-NEXT: vpmovzxbd {{.*#+}} xmm5 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero,xmm5[2],zero,zero,zero,xmm5[3],zero,zero,zero
-; AVX1-SLOW-NEXT: vpaddd %xmm4, %xmm5, %xmm4
-; AVX1-SLOW-NEXT: vpaddd %xmm4, %xmm2, %xmm2
-; AVX1-SLOW-NEXT: vpaddd %xmm2, %xmm3, %xmm2
-; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
-; AVX1-SLOW-NEXT: vpmovzxbd {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
-; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
-; AVX1-SLOW-NEXT: vpmovzxbd {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero,xmm4[2],zero,zero,zero,xmm4[3],zero,zero,zero
-; AVX1-SLOW-NEXT: vpaddd %xmm3, %xmm4, %xmm3
-; AVX1-SLOW-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
-; AVX1-SLOW-NEXT: vpaddd %xmm3, %xmm1, %xmm1
-; AVX1-SLOW-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; AVX1-SLOW-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; AVX1-SLOW-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX1-SLOW-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX1-SLOW-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVX1-SLOW-NEXT: vmovd %xmm0, %eax
-; AVX1-SLOW-NEXT: vzeroupper
-; AVX1-SLOW-NEXT: retq
-;
-; AVX1-FAST-LABEL: test_v32i32_v32i8:
-; AVX1-FAST: # %bb.0:
-; AVX1-FAST-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,1,1]
-; AVX1-FAST-NEXT: vpmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
-; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,1,1]
-; AVX1-FAST-NEXT: vpmovzxbd {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
-; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[3,3,3,3]
-; AVX1-FAST-NEXT: vpmovzxbd {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero,xmm4[2],zero,zero,zero,xmm4[3],zero,zero,zero
-; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm5 = xmm0[3,3,3,3]
-; AVX1-FAST-NEXT: vpmovzxbd {{.*#+}} xmm5 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero,xmm5[2],zero,zero,zero,xmm5[3],zero,zero,zero
-; AVX1-FAST-NEXT: vpaddd %xmm4, %xmm5, %xmm4
-; AVX1-FAST-NEXT: vpaddd %xmm4, %xmm2, %xmm2
-; AVX1-FAST-NEXT: vpaddd %xmm2, %xmm3, %xmm2
-; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
-; AVX1-FAST-NEXT: vpmovzxbd {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
-; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
-; AVX1-FAST-NEXT: vpmovzxbd {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero,xmm4[2],zero,zero,zero,xmm4[3],zero,zero,zero
-; AVX1-FAST-NEXT: vpaddd %xmm3, %xmm4, %xmm3
-; AVX1-FAST-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
-; AVX1-FAST-NEXT: vpaddd %xmm3, %xmm1, %xmm1
-; AVX1-FAST-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; AVX1-FAST-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; AVX1-FAST-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX1-FAST-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVX1-FAST-NEXT: vphaddd %xmm0, %xmm0, %xmm0
-; AVX1-FAST-NEXT: vmovd %xmm0, %eax
-; AVX1-FAST-NEXT: vzeroupper
-; AVX1-FAST-NEXT: retq
+; SSE-LABEL: test_v32i32_v32i8:
+; SSE: # %bb.0:
+; SSE-NEXT: pxor %xmm2, %xmm2
+; SSE-NEXT: psadbw %xmm2, %xmm1
+; SSE-NEXT: psadbw %xmm2, %xmm0
+; SSE-NEXT: paddq %xmm1, %xmm0
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; SSE-NEXT: paddq %xmm0, %xmm1
+; SSE-NEXT: movd %xmm1, %eax
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_v32i32_v32i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpsadbw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpsadbw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vmovd %xmm0, %eax
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
;
; AVX2-LABEL: test_v32i32_v32i8:
; AVX2: # %bb.0:
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpsadbw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
-; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero
-; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
-; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero,xmm3[4],zero,zero,zero,xmm3[5],zero,zero,zero,xmm3[6],zero,zero,zero,xmm3[7],zero,zero,zero
-; AVX2-NEXT: vpaddd %ymm2, %ymm3, %ymm2
-; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero
-; AVX2-NEXT: vpaddd %ymm2, %ymm1, %ymm1
-; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
-; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vmovd %xmm0, %eax
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v32i32_v32i8:
; AVX512: # %bb.0:
+; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vpsadbw %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
-; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
-; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vmovd %xmm0, %eax
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
@@ -966,291 +484,86 @@ define i16 @test_v4i16_v4i8(<4 x i8> %a0) {
}
define i16 @test_v8i16_v8i8(<8 x i8> %a0) {
-; SSE2-LABEL: test_v8i16_v8i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pxor %xmm1, %xmm1
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; SSE2-NEXT: paddw %xmm0, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
-; SSE2-NEXT: paddw %xmm1, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrld $16, %xmm1
-; SSE2-NEXT: paddw %xmm0, %xmm1
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v8i16_v8i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; SSE41-NEXT: paddw %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
-; SSE41-NEXT: paddw %xmm1, %xmm0
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: psrld $16, %xmm1
-; SSE41-NEXT: paddw %xmm0, %xmm1
-; SSE41-NEXT: movd %xmm1, %eax
-; SSE41-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE41-NEXT: retq
-;
-; AVX1-SLOW-LABEL: test_v8i16_v8i8:
-; AVX1-SLOW: # %bb.0:
-; AVX1-SLOW-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX1-SLOW-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX1-SLOW-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX1-SLOW-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX1-SLOW-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX1-SLOW-NEXT: vmovd %xmm0, %eax
-; AVX1-SLOW-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX1-SLOW-NEXT: retq
-;
-; AVX1-FAST-LABEL: test_v8i16_v8i8:
-; AVX1-FAST: # %bb.0:
-; AVX1-FAST-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX1-FAST-NEXT: vphaddw %xmm0, %xmm0, %xmm0
-; AVX1-FAST-NEXT: vphaddw %xmm0, %xmm0, %xmm0
-; AVX1-FAST-NEXT: vphaddw %xmm0, %xmm0, %xmm0
-; AVX1-FAST-NEXT: vmovd %xmm0, %eax
-; AVX1-FAST-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX1-FAST-NEXT: retq
-;
-; AVX2-LABEL: test_v8i16_v8i8:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v8i16_v8i8:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX512-NEXT: retq
+; SSE-LABEL: test_v8i16_v8i8:
+; SSE: # %bb.0:
+; SSE-NEXT: pxor %xmm1, %xmm1
+; SSE-NEXT: psadbw %xmm0, %xmm1
+; SSE-NEXT: movd %xmm1, %eax
+; SSE-NEXT: # kill: def $ax killed $ax killed $eax
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_v8i16_v8i8:
+; AVX: # %bb.0:
+; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vmovd %xmm0, %eax
+; AVX-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX-NEXT: retq
%1 = zext <8 x i8> %a0 to <8 x i16>
%2 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %1)
ret i16 %2
}
define i16 @test_v16i16_v16i8(<16 x i8> %a0) {
-; SSE2-LABEL: test_v16i16_v16i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pxor %xmm1, %xmm1
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSE2-NEXT: paddw %xmm2, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; SSE2-NEXT: paddw %xmm0, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
-; SSE2-NEXT: paddw %xmm1, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrld $16, %xmm1
-; SSE2-NEXT: paddw %xmm0, %xmm1
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v16i16_v16i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pxor %xmm1, %xmm1
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
-; SSE41-NEXT: paddw %xmm2, %xmm0
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; SSE41-NEXT: paddw %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
-; SSE41-NEXT: paddw %xmm1, %xmm0
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: psrld $16, %xmm1
-; SSE41-NEXT: paddw %xmm0, %xmm1
-; SSE41-NEXT: movd %xmm1, %eax
-; SSE41-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE41-NEXT: retq
-;
-; AVX1-SLOW-LABEL: test_v16i16_v16i8:
-; AVX1-SLOW: # %bb.0:
-; AVX1-SLOW-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX1-SLOW-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
-; AVX1-SLOW-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX1-SLOW-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX1-SLOW-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX1-SLOW-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX1-SLOW-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX1-SLOW-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX1-SLOW-NEXT: vmovd %xmm0, %eax
-; AVX1-SLOW-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX1-SLOW-NEXT: retq
-;
-; AVX1-FAST-LABEL: test_v16i16_v16i8:
-; AVX1-FAST: # %bb.0:
-; AVX1-FAST-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX1-FAST-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
-; AVX1-FAST-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX1-FAST-NEXT: vphaddw %xmm0, %xmm1, %xmm0
-; AVX1-FAST-NEXT: vphaddw %xmm0, %xmm0, %xmm0
-; AVX1-FAST-NEXT: vphaddw %xmm0, %xmm0, %xmm0
-; AVX1-FAST-NEXT: vphaddw %xmm0, %xmm0, %xmm0
-; AVX1-FAST-NEXT: vmovd %xmm0, %eax
-; AVX1-FAST-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX1-FAST-NEXT: retq
-;
-; AVX2-LABEL: test_v16i16_v16i8:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_v16i16_v16i8:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
+; SSE-LABEL: test_v16i16_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: pxor %xmm1, %xmm1
+; SSE-NEXT: psadbw %xmm0, %xmm1
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
+; SSE-NEXT: paddq %xmm1, %xmm0
+; SSE-NEXT: movd %xmm0, %eax
+; SSE-NEXT: # kill: def $ax killed $ax killed $eax
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_v16i16_v16i8:
+; AVX: # %bb.0:
+; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVX-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vmovd %xmm0, %eax
+; AVX-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX-NEXT: retq
%1 = zext <16 x i8> %a0 to <16 x i16>
%2 = call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> %1)
ret i16 %2
}
define i16 @test_v32i16_v32i8(<32 x i8> %a0) {
-; SSE2-LABEL: test_v32i16_v32i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pxor %xmm2, %xmm2
-; SSE2-NEXT: movdqa %xmm1, %xmm3
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
-; SSE2-NEXT: movdqa %xmm0, %xmm4
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3],xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
-; SSE2-NEXT: paddw %xmm1, %xmm0
-; SSE2-NEXT: paddw %xmm3, %xmm0
-; SSE2-NEXT: paddw %xmm4, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; SSE2-NEXT: paddw %xmm0, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
-; SSE2-NEXT: paddw %xmm1, %xmm0
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrld $16, %xmm1
-; SSE2-NEXT: paddw %xmm0, %xmm1
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v32i16_v32i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pxor %xmm2, %xmm2
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; SSE41-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
-; SSE41-NEXT: paddw %xmm1, %xmm0
-; SSE41-NEXT: paddw %xmm3, %xmm0
-; SSE41-NEXT: paddw %xmm4, %xmm0
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; SSE41-NEXT: paddw %xmm0, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
-; SSE41-NEXT: paddw %xmm1, %xmm0
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: psrld $16, %xmm1
-; SSE41-NEXT: paddw %xmm0, %xmm1
-; SSE41-NEXT: movd %xmm1, %eax
-; SSE41-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE41-NEXT: retq
-;
-; AVX1-SLOW-LABEL: test_v32i16_v32i8:
-; AVX1-SLOW: # %bb.0:
-; AVX1-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-SLOW-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; AVX1-SLOW-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
-; AVX1-SLOW-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
-; AVX1-SLOW-NEXT: vpaddw %xmm3, %xmm2, %xmm2
-; AVX1-SLOW-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX1-SLOW-NEXT: vpaddw %xmm2, %xmm1, %xmm1
-; AVX1-SLOW-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX1-SLOW-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX1-SLOW-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX1-SLOW-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX1-SLOW-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX1-SLOW-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX1-SLOW-NEXT: vmovd %xmm0, %eax
-; AVX1-SLOW-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX1-SLOW-NEXT: vzeroupper
-; AVX1-SLOW-NEXT: retq
-;
-; AVX1-FAST-LABEL: test_v32i16_v32i8:
-; AVX1-FAST: # %bb.0:
-; AVX1-FAST-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-FAST-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; AVX1-FAST-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
-; AVX1-FAST-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
-; AVX1-FAST-NEXT: vpaddw %xmm3, %xmm2, %xmm2
-; AVX1-FAST-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX1-FAST-NEXT: vpaddw %xmm2, %xmm1, %xmm1
-; AVX1-FAST-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX1-FAST-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX1-FAST-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX1-FAST-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX1-FAST-NEXT: vphaddw %xmm0, %xmm0, %xmm0
-; AVX1-FAST-NEXT: vmovd %xmm0, %eax
-; AVX1-FAST-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX1-FAST-NEXT: vzeroupper
-; AVX1-FAST-NEXT: retq
+; SSE-LABEL: test_v32i16_v32i8:
+; SSE: # %bb.0:
+; SSE-NEXT: pxor %xmm2, %xmm2
+; SSE-NEXT: psadbw %xmm2, %xmm1
+; SSE-NEXT: psadbw %xmm2, %xmm0
+; SSE-NEXT: paddq %xmm1, %xmm0
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; SSE-NEXT: paddq %xmm0, %xmm1
+; SSE-NEXT: movd %xmm1, %eax
+; SSE-NEXT: # kill: def $ax killed $ax killed $eax
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_v32i16_v32i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpsadbw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpsadbw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vmovd %xmm0, %eax
+; AVX1-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
;
; AVX2-LABEL: test_v32i16_v32i8:
; AVX2: # %bb.0:
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpsadbw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVX2-NEXT: vpaddw %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vmovd %xmm0, %eax
; AVX2-NEXT: # kill: def $ax killed $ax killed $eax
; AVX2-NEXT: vzeroupper
@@ -1258,17 +571,12 @@ define i16 @test_v32i16_v32i8(<32 x i8> %a0) {
;
; AVX512-LABEL: test_v32i16_v32i8:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpaddw %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vpsadbw %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vmovd %xmm0, %eax
; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
; AVX512-NEXT: vzeroupper
@@ -1279,148 +587,51 @@ define i16 @test_v32i16_v32i8(<32 x i8> %a0) {
}
define i16 @test_v64i16_v64i8(<64 x i8> %a0) {
-; SSE2-LABEL: test_v64i16_v64i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pxor %xmm8, %xmm8
-; SSE2-NEXT: movdqa %xmm2, %xmm5
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm8[8],xmm5[9],xmm8[9],xmm5[10],xmm8[10],xmm5[11],xmm8[11],xmm5[12],xmm8[12],xmm5[13],xmm8[13],xmm5[14],xmm8[14],xmm5[15],xmm8[15]
-; SSE2-NEXT: movdqa %xmm0, %xmm6
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm8[8],xmm6[9],xmm8[9],xmm6[10],xmm8[10],xmm6[11],xmm8[11],xmm6[12],xmm8[12],xmm6[13],xmm8[13],xmm6[14],xmm8[14],xmm6[15],xmm8[15]
-; SSE2-NEXT: movdqa %xmm3, %xmm7
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm8[8],xmm7[9],xmm8[9],xmm7[10],xmm8[10],xmm7[11],xmm8[11],xmm7[12],xmm8[12],xmm7[13],xmm8[13],xmm7[14],xmm8[14],xmm7[15],xmm8[15]
-; SSE2-NEXT: movdqa %xmm1, %xmm4
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm8[8],xmm4[9],xmm8[9],xmm4[10],xmm8[10],xmm4[11],xmm8[11],xmm4[12],xmm8[12],xmm4[13],xmm8[13],xmm4[14],xmm8[14],xmm4[15],xmm8[15]
-; SSE2-NEXT: paddw %xmm7, %xmm4
-; SSE2-NEXT: paddw %xmm5, %xmm4
-; SSE2-NEXT: paddw %xmm6, %xmm4
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm8[0],xmm2[1],xmm8[1],xmm2[2],xmm8[2],xmm2[3],xmm8[3],xmm2[4],xmm8[4],xmm2[5],xmm8[5],xmm2[6],xmm8[6],xmm2[7],xmm8[7]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1],xmm0[2],xmm8[2],xmm0[3],xmm8[3],xmm0[4],xmm8[4],xmm0[5],xmm8[5],xmm0[6],xmm8[6],xmm0[7],xmm8[7]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1],xmm3[2],xmm8[2],xmm3[3],xmm8[3],xmm3[4],xmm8[4],xmm3[5],xmm8[5],xmm3[6],xmm8[6],xmm3[7],xmm8[7]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1],xmm1[2],xmm8[2],xmm1[3],xmm8[3],xmm1[4],xmm8[4],xmm1[5],xmm8[5],xmm1[6],xmm8[6],xmm1[7],xmm8[7]
-; SSE2-NEXT: paddw %xmm3, %xmm1
-; SSE2-NEXT: paddw %xmm2, %xmm1
-; SSE2-NEXT: paddw %xmm4, %xmm1
-; SSE2-NEXT: paddw %xmm0, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
-; SSE2-NEXT: paddw %xmm1, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; SSE2-NEXT: paddw %xmm0, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: psrld $16, %xmm0
-; SSE2-NEXT: paddw %xmm1, %xmm0
-; SSE2-NEXT: movd %xmm0, %eax
-; SSE2-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: test_v64i16_v64i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pxor %xmm4, %xmm4
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm5 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; SSE41-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm4[8],xmm2[9],xmm4[9],xmm2[10],xmm4[10],xmm2[11],xmm4[11],xmm2[12],xmm4[12],xmm2[13],xmm4[13],xmm2[14],xmm4[14],xmm2[15],xmm4[15]
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm8 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm4[8],xmm0[9],xmm4[9],xmm0[10],xmm4[10],xmm0[11],xmm4[11],xmm0[12],xmm4[12],xmm0[13],xmm4[13],xmm0[14],xmm4[14],xmm0[15],xmm4[15]
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm7 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
-; SSE41-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15]
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm6 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; SSE41-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm4[8],xmm1[9],xmm4[9],xmm1[10],xmm4[10],xmm1[11],xmm4[11],xmm1[12],xmm4[12],xmm1[13],xmm4[13],xmm1[14],xmm4[14],xmm1[15],xmm4[15]
-; SSE41-NEXT: paddw %xmm3, %xmm1
-; SSE41-NEXT: paddw %xmm2, %xmm1
-; SSE41-NEXT: paddw %xmm0, %xmm1
-; SSE41-NEXT: paddw %xmm7, %xmm6
-; SSE41-NEXT: paddw %xmm5, %xmm6
-; SSE41-NEXT: paddw %xmm1, %xmm6
-; SSE41-NEXT: paddw %xmm8, %xmm6
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm6[2,3,2,3]
-; SSE41-NEXT: paddw %xmm6, %xmm0
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; SSE41-NEXT: paddw %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: psrld $16, %xmm0
-; SSE41-NEXT: paddw %xmm1, %xmm0
-; SSE41-NEXT: movd %xmm0, %eax
-; SSE41-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE41-NEXT: retq
-;
-; AVX1-SLOW-LABEL: test_v64i16_v64i8:
-; AVX1-SLOW: # %bb.0:
-; AVX1-SLOW-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; AVX1-SLOW-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
-; AVX1-SLOW-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
-; AVX1-SLOW-NEXT: vextractf128 $1, %ymm1, %xmm5
-; AVX1-SLOW-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm5[8],xmm2[8],xmm5[9],xmm2[9],xmm5[10],xmm2[10],xmm5[11],xmm2[11],xmm5[12],xmm2[12],xmm5[13],xmm2[13],xmm5[14],xmm2[14],xmm5[15],xmm2[15]
-; AVX1-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm7
-; AVX1-SLOW-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm7[8],xmm2[8],xmm7[9],xmm2[9],xmm7[10],xmm2[10],xmm7[11],xmm2[11],xmm7[12],xmm2[12],xmm7[13],xmm2[13],xmm7[14],xmm2[14],xmm7[15],xmm2[15]
-; AVX1-SLOW-NEXT: vpaddw %xmm6, %xmm2, %xmm2
-; AVX1-SLOW-NEXT: vpaddw %xmm2, %xmm3, %xmm2
-; AVX1-SLOW-NEXT: vpaddw %xmm2, %xmm4, %xmm2
-; AVX1-SLOW-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX1-SLOW-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX1-SLOW-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
-; AVX1-SLOW-NEXT: vpmovzxbw {{.*#+}} xmm4 = xmm7[0],zero,xmm7[1],zero,xmm7[2],zero,xmm7[3],zero,xmm7[4],zero,xmm7[5],zero,xmm7[6],zero,xmm7[7],zero
-; AVX1-SLOW-NEXT: vpaddw %xmm3, %xmm4, %xmm3
-; AVX1-SLOW-NEXT: vpaddw %xmm3, %xmm1, %xmm1
-; AVX1-SLOW-NEXT: vpaddw %xmm2, %xmm1, %xmm1
-; AVX1-SLOW-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX1-SLOW-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX1-SLOW-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX1-SLOW-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX1-SLOW-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX1-SLOW-NEXT: vmovd %xmm0, %eax
-; AVX1-SLOW-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX1-SLOW-NEXT: vzeroupper
-; AVX1-SLOW-NEXT: retq
-;
-; AVX1-FAST-LABEL: test_v64i16_v64i8:
-; AVX1-FAST: # %bb.0:
-; AVX1-FAST-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; AVX1-FAST-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
-; AVX1-FAST-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
-; AVX1-FAST-NEXT: vextractf128 $1, %ymm1, %xmm5
-; AVX1-FAST-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm5[8],xmm2[8],xmm5[9],xmm2[9],xmm5[10],xmm2[10],xmm5[11],xmm2[11],xmm5[12],xmm2[12],xmm5[13],xmm2[13],xmm5[14],xmm2[14],xmm5[15],xmm2[15]
-; AVX1-FAST-NEXT: vextractf128 $1, %ymm0, %xmm7
-; AVX1-FAST-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm7[8],xmm2[8],xmm7[9],xmm2[9],xmm7[10],xmm2[10],xmm7[11],xmm2[11],xmm7[12],xmm2[12],xmm7[13],xmm2[13],xmm7[14],xmm2[14],xmm7[15],xmm2[15]
-; AVX1-FAST-NEXT: vpaddw %xmm6, %xmm2, %xmm2
-; AVX1-FAST-NEXT: vpaddw %xmm2, %xmm3, %xmm2
-; AVX1-FAST-NEXT: vpaddw %xmm2, %xmm4, %xmm2
-; AVX1-FAST-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX1-FAST-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX1-FAST-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
-; AVX1-FAST-NEXT: vpmovzxbw {{.*#+}} xmm4 = xmm7[0],zero,xmm7[1],zero,xmm7[2],zero,xmm7[3],zero,xmm7[4],zero,xmm7[5],zero,xmm7[6],zero,xmm7[7],zero
-; AVX1-FAST-NEXT: vpaddw %xmm3, %xmm4, %xmm3
-; AVX1-FAST-NEXT: vpaddw %xmm3, %xmm1, %xmm1
-; AVX1-FAST-NEXT: vpaddw %xmm2, %xmm1, %xmm1
-; AVX1-FAST-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX1-FAST-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX1-FAST-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX1-FAST-NEXT: vphaddw %xmm0, %xmm0, %xmm0
-; AVX1-FAST-NEXT: vmovd %xmm0, %eax
-; AVX1-FAST-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX1-FAST-NEXT: vzeroupper
-; AVX1-FAST-NEXT: retq
+; SSE-LABEL: test_v64i16_v64i8:
+; SSE: # %bb.0:
+; SSE-NEXT: pxor %xmm4, %xmm4
+; SSE-NEXT: psadbw %xmm4, %xmm3
+; SSE-NEXT: psadbw %xmm4, %xmm1
+; SSE-NEXT: paddq %xmm3, %xmm1
+; SSE-NEXT: psadbw %xmm4, %xmm2
+; SSE-NEXT: paddq %xmm1, %xmm2
+; SSE-NEXT: psadbw %xmm4, %xmm0
+; SSE-NEXT: paddq %xmm2, %xmm0
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; SSE-NEXT: paddq %xmm0, %xmm1
+; SSE-NEXT: movd %xmm1, %eax
+; SSE-NEXT: # kill: def $ax killed $ax killed $eax
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_v64i16_v64i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpsadbw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT: vpsadbw %xmm3, %xmm4, %xmm4
+; AVX1-NEXT: vpaddq %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vpsadbw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpaddq %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpsadbw %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vmovd %xmm0, %eax
+; AVX1-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
;
; AVX2-LABEL: test_v64i16_v64i8:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1
-; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
-; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVX2-NEXT: vpaddw %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpaddw %ymm0, %ymm2, %ymm0
-; AVX2-NEXT: vpaddw %ymm0, %ymm3, %ymm0
+; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX2-NEXT: vpsadbw %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpsadbw %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vmovd %xmm0, %eax
; AVX2-NEXT: # kill: def $ax killed $ax killed $eax
; AVX2-NEXT: vzeroupper
@@ -1428,20 +639,14 @@ define i16 @test_v64i16_v64i8(<64 x i8> %a0) {
;
; AVX512-LABEL: test_v64i16_v64i8:
; AVX512: # %bb.0:
+; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vpsadbw %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
-; AVX512-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
-; AVX512-NEXT: vpaddw %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpaddw %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpaddq %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vmovd %xmm0, %eax
; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
; AVX512-NEXT: vzeroupper
More information about the llvm-commits
mailing list