[llvm] r306289 - [x86] transform vector inc/dec to use -1 constant (PR33483)
Sanjay Patel via llvm-commits
llvm-commits at lists.llvm.org
Mon Jun 26 07:19:26 PDT 2017
Author: spatel
Date: Mon Jun 26 07:19:26 2017
New Revision: 306289
URL: http://llvm.org/viewvc/llvm-project?rev=306289&view=rev
Log:
[x86] transform vector inc/dec to use -1 constant (PR33483)
Convert vector increment or decrement to sub/add with an all-ones constant:
add X, <1, 1...> --> sub X, <-1, -1...>
sub X, <1, 1...> --> add X, <-1, -1...>
The all-ones vector constant can be materialized using a pcmpeq instruction that is
commonly recognized as an idiom (has no register dependency), so that's better than
loading a splat 1 constant.
AVX512 uses 'vpternlogd' for 512-bit vectors because there is apparently no better
way to produce 512 one-bits.
The general advantages of this lowering are:
1. pcmpeq has lower latency than a memop on every uarch I looked at in Agner's tables,
so in theory, this could be better for perf, but...
2. That seems unlikely to affect any OOO implementation, and I can't measure any real
perf difference from this transform on Haswell or Jaguar, but...
3. It doesn't look like it from the diffs, but this is an overall size win because we
eliminate 16 - 64 constant bytes in the case of a vector load. If we're broadcasting
a scalar load (which might itself be a bug), then we're replacing a scalar constant
load + broadcast with a single cheap op, so that should always be smaller/better too.
4. This makes the DAG/isel output more consistent - we use pcmpeq already for padd x, -1
and psub x, -1, so we should use that form for +1 too because we can. If there's some
reason to favor a constant load on some CPU, let's make the reverse transform for all
of these cases (either here in the DAG or in a later machine pass).
This should fix:
https://bugs.llvm.org/show_bug.cgi?id=33483
Differential Revision: https://reviews.llvm.org/D34336
Modified:
llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
llvm/trunk/test/CodeGen/X86/avg.ll
llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll
llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86.ll
llvm/trunk/test/CodeGen/X86/avx-logic.ll
llvm/trunk/test/CodeGen/X86/avx-vperm2x128.ll
llvm/trunk/test/CodeGen/X86/avx2-intrinsics-x86-upgrade.ll
llvm/trunk/test/CodeGen/X86/avx2-logic.ll
llvm/trunk/test/CodeGen/X86/select.ll
llvm/trunk/test/CodeGen/X86/sse2-intrinsics-x86-upgrade.ll
llvm/trunk/test/CodeGen/X86/vec_ctbits.ll
llvm/trunk/test/CodeGen/X86/vector-tzcnt-128.ll
llvm/trunk/test/CodeGen/X86/vector-tzcnt-256.ll
llvm/trunk/test/CodeGen/X86/vector-tzcnt-512.ll
llvm/trunk/test/CodeGen/X86/widen_arith-1.ll
llvm/trunk/test/CodeGen/X86/widen_arith-2.ll
llvm/trunk/test/CodeGen/X86/widen_arith-3.ll
llvm/trunk/test/CodeGen/X86/widen_cast-2.ll
llvm/trunk/test/CodeGen/X86/widen_cast-3.ll
llvm/trunk/test/CodeGen/X86/widen_cast-4.ll
llvm/trunk/test/CodeGen/X86/widen_conv-1.ll
Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=306289&r1=306288&r2=306289&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Mon Jun 26 07:19:26 2017
@@ -35065,6 +35065,32 @@ static SDValue combineLoopSADPattern(SDN
return DAG.getNode(ISD::ADD, DL, VT, Sad, Phi);
}
+/// Convert vector increment or decrement to sub/add with an all-ones constant:
+/// add X, <1, 1...> --> sub X, <-1, -1...>
+/// sub X, <1, 1...> --> add X, <-1, -1...>
+/// The all-ones vector constant can be materialized using a pcmpeq instruction
+/// that is commonly recognized as an idiom (has no register dependency), so
+/// that's better/smaller than loading a splat 1 constant.
+static SDValue combineIncDecVector(SDNode *N, SelectionDAG &DAG) {
+ assert(N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB &&
+ "Unexpected opcode for increment/decrement transform");
+
+ // Pseudo-legality check: getOnesVector() expects one of these types, so bail
+ // out and wait for legalization if we have an unsupported vector length.
+ EVT VT = N->getValueType(0);
+ if (!VT.is128BitVector() && !VT.is256BitVector() && !VT.is512BitVector())
+ return SDValue();
+
+ SDNode *N1 = N->getOperand(1).getNode();
+ APInt SplatVal;
+ if (!ISD::isConstantSplatVector(N1, SplatVal) || !SplatVal.isOneValue())
+ return SDValue();
+
+ SDValue AllOnesVec = getOnesVector(VT, DAG, SDLoc(N));
+ unsigned NewOpcode = N->getOpcode() == ISD::ADD ? ISD::SUB : ISD::ADD;
+ return DAG.getNode(NewOpcode, SDLoc(N), VT, N->getOperand(0), AllOnesVec);
+}
+
static SDValue combineAdd(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
const SDNodeFlags Flags = N->getFlags();
@@ -35084,6 +35110,9 @@ static SDValue combineAdd(SDNode *N, Sel
isHorizontalBinOp(Op0, Op1, true))
return DAG.getNode(X86ISD::HADD, SDLoc(N), VT, Op0, Op1);
+ if (SDValue V = combineIncDecVector(N, DAG))
+ return V;
+
return combineAddOrSubToADCOrSBB(N, DAG);
}
@@ -35117,6 +35146,9 @@ static SDValue combineSub(SDNode *N, Sel
isHorizontalBinOp(Op0, Op1, false))
return DAG.getNode(X86ISD::HSUB, SDLoc(N), VT, Op0, Op1);
+ if (SDValue V = combineIncDecVector(N, DAG))
+ return V;
+
return combineAddOrSubToADCOrSBB(N, DAG);
}
Modified: llvm/trunk/test/CodeGen/X86/avg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avg.ll?rev=306289&r1=306288&r2=306289&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avg.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avg.ll Mon Jun 26 07:19:26 2017
@@ -90,90 +90,89 @@ define void @avg_v16i8(<16 x i8>* %a, <1
define void @avg_v32i8(<32 x i8>* %a, <32 x i8>* %b) {
; SSE2-LABEL: avg_v32i8:
; SSE2: # BB#0:
-; SSE2-NEXT: movdqa (%rdi), %xmm9
-; SSE2-NEXT: movdqa 16(%rdi), %xmm12
-; SSE2-NEXT: movdqa (%rsi), %xmm4
+; SSE2-NEXT: movdqa (%rdi), %xmm3
+; SSE2-NEXT: movdqa 16(%rdi), %xmm8
+; SSE2-NEXT: movdqa (%rsi), %xmm0
; SSE2-NEXT: movdqa 16(%rsi), %xmm1
-; SSE2-NEXT: pxor %xmm0, %xmm0
-; SSE2-NEXT: movdqa %xmm9, %xmm11
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm11 = xmm11[8],xmm0[8],xmm11[9],xmm0[9],xmm11[10],xmm0[10],xmm11[11],xmm0[11],xmm11[12],xmm0[12],xmm11[13],xmm0[13],xmm11[14],xmm0[14],xmm11[15],xmm0[15]
-; SSE2-NEXT: movdqa %xmm11, %xmm15
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm15 = xmm15[4],xmm0[4],xmm15[5],xmm0[5],xmm15[6],xmm0[6],xmm15[7],xmm0[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm0[0],xmm11[1],xmm0[1],xmm11[2],xmm0[2],xmm11[3],xmm0[3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm9 = xmm9[0],xmm0[0],xmm9[1],xmm0[1],xmm9[2],xmm0[2],xmm9[3],xmm0[3],xmm9[4],xmm0[4],xmm9[5],xmm0[5],xmm9[6],xmm0[6],xmm9[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm9, %xmm14
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm14 = xmm14[4],xmm0[4],xmm14[5],xmm0[5],xmm14[6],xmm0[6],xmm14[7],xmm0[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm0[0],xmm9[1],xmm0[1],xmm9[2],xmm0[2],xmm9[3],xmm0[3]
-; SSE2-NEXT: movdqa %xmm12, %xmm10
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm0[8],xmm10[9],xmm0[9],xmm10[10],xmm0[10],xmm10[11],xmm0[11],xmm10[12],xmm0[12],xmm10[13],xmm0[13],xmm10[14],xmm0[14],xmm10[15],xmm0[15]
-; SSE2-NEXT: movdqa %xmm10, %xmm13
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm13 = xmm13[4],xmm0[4],xmm13[5],xmm0[5],xmm13[6],xmm0[6],xmm13[7],xmm0[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm0[0],xmm10[1],xmm0[1],xmm10[2],xmm0[2],xmm10[3],xmm0[3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm12 = xmm12[0],xmm0[0],xmm12[1],xmm0[1],xmm12[2],xmm0[2],xmm12[3],xmm0[3],xmm12[4],xmm0[4],xmm12[5],xmm0[5],xmm12[6],xmm0[6],xmm12[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm12, %xmm2
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm12 = xmm12[0],xmm0[0],xmm12[1],xmm0[1],xmm12[2],xmm0[2],xmm12[3],xmm0[3]
-; SSE2-NEXT: movdqa %xmm4, %xmm3
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
-; SSE2-NEXT: movdqa %xmm3, %xmm7
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3],xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm4, %xmm5
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
-; SSE2-NEXT: movdqa %xmm2, %xmm8
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm0[4],xmm8[5],xmm0[5],xmm8[6],xmm0[6],xmm8[7],xmm0[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm1, %xmm6
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [1,1,1,1]
-; SSE2-NEXT: paddd %xmm0, %xmm7
-; SSE2-NEXT: paddd %xmm15, %xmm7
-; SSE2-NEXT: paddd %xmm0, %xmm3
-; SSE2-NEXT: paddd %xmm11, %xmm3
-; SSE2-NEXT: paddd %xmm0, %xmm5
-; SSE2-NEXT: paddd %xmm14, %xmm5
-; SSE2-NEXT: paddd %xmm0, %xmm4
-; SSE2-NEXT: paddd %xmm9, %xmm4
-; SSE2-NEXT: paddd %xmm0, %xmm8
-; SSE2-NEXT: paddd %xmm13, %xmm8
-; SSE2-NEXT: paddd %xmm0, %xmm2
-; SSE2-NEXT: paddd %xmm10, %xmm2
-; SSE2-NEXT: paddd %xmm0, %xmm6
-; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm6 # 16-byte Folded Reload
-; SSE2-NEXT: paddd %xmm0, %xmm1
-; SSE2-NEXT: paddd %xmm12, %xmm1
-; SSE2-NEXT: psrld $1, %xmm3
+; SSE2-NEXT: pxor %xmm4, %xmm4
+; SSE2-NEXT: movdqa %xmm3, %xmm5
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm4[8],xmm5[9],xmm4[9],xmm5[10],xmm4[10],xmm5[11],xmm4[11],xmm5[12],xmm4[12],xmm5[13],xmm4[13],xmm5[14],xmm4[14],xmm5[15],xmm4[15]
+; SSE2-NEXT: movdqa %xmm5, %xmm6
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
+; SSE2-NEXT: movdqa %xmm3, %xmm12
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm4[4],xmm12[5],xmm4[5],xmm12[6],xmm4[6],xmm12[7],xmm4[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; SSE2-NEXT: movdqa %xmm8, %xmm7
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm4[8],xmm7[9],xmm4[9],xmm7[10],xmm4[10],xmm7[11],xmm4[11],xmm7[12],xmm4[12],xmm7[13],xmm4[13],xmm7[14],xmm4[14],xmm7[15],xmm4[15]
+; SSE2-NEXT: movdqa %xmm7, %xmm11
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm4[4],xmm11[5],xmm4[5],xmm11[6],xmm4[6],xmm11[7],xmm4[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm4[0],xmm7[1],xmm4[1],xmm7[2],xmm4[2],xmm7[3],xmm4[3]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm4[0],xmm8[1],xmm4[1],xmm8[2],xmm4[2],xmm8[3],xmm4[3],xmm8[4],xmm4[4],xmm8[5],xmm4[5],xmm8[6],xmm4[6],xmm8[7],xmm4[7]
+; SSE2-NEXT: movdqa %xmm8, %xmm10
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm4[4],xmm10[5],xmm4[5],xmm10[6],xmm4[6],xmm10[7],xmm4[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm4[0],xmm8[1],xmm4[1],xmm8[2],xmm4[2],xmm8[3],xmm4[3]
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm4[8],xmm2[9],xmm4[9],xmm2[10],xmm4[10],xmm2[11],xmm4[11],xmm2[12],xmm4[12],xmm2[13],xmm4[13],xmm2[14],xmm4[14],xmm2[15],xmm4[15]
+; SSE2-NEXT: movdqa %xmm2, %xmm9
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm4[4],xmm9[5],xmm4[5],xmm9[6],xmm4[6],xmm9[7],xmm4[7]
+; SSE2-NEXT: paddd %xmm6, %xmm9
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
+; SSE2-NEXT: paddd %xmm5, %xmm2
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; SSE2-NEXT: movdqa %xmm0, %xmm5
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
+; SSE2-NEXT: paddd %xmm12, %xmm5
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
+; SSE2-NEXT: paddd %xmm3, %xmm0
+; SSE2-NEXT: movdqa %xmm1, %xmm3
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15]
+; SSE2-NEXT: movdqa %xmm3, %xmm6
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7]
+; SSE2-NEXT: paddd %xmm11, %xmm6
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; SSE2-NEXT: paddd %xmm7, %xmm3
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
+; SSE2-NEXT: movdqa %xmm1, %xmm7
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm4[4],xmm7[5],xmm4[5],xmm7[6],xmm4[6],xmm7[7],xmm4[7]
+; SSE2-NEXT: paddd %xmm10, %xmm7
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
+; SSE2-NEXT: paddd %xmm8, %xmm1
+; SSE2-NEXT: pcmpeqd %xmm4, %xmm4
+; SSE2-NEXT: psubd %xmm4, %xmm9
+; SSE2-NEXT: psubd %xmm4, %xmm2
+; SSE2-NEXT: psubd %xmm4, %xmm5
+; SSE2-NEXT: psubd %xmm4, %xmm0
+; SSE2-NEXT: psubd %xmm4, %xmm6
+; SSE2-NEXT: psubd %xmm4, %xmm3
+; SSE2-NEXT: psubd %xmm4, %xmm7
+; SSE2-NEXT: psubd %xmm4, %xmm1
+; SSE2-NEXT: psrld $1, %xmm1
; SSE2-NEXT: psrld $1, %xmm7
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
-; SSE2-NEXT: pand %xmm0, %xmm7
-; SSE2-NEXT: pand %xmm0, %xmm3
-; SSE2-NEXT: packuswb %xmm7, %xmm3
-; SSE2-NEXT: psrld $1, %xmm4
+; SSE2-NEXT: psrld $1, %xmm3
+; SSE2-NEXT: psrld $1, %xmm6
+; SSE2-NEXT: psrld $1, %xmm0
; SSE2-NEXT: psrld $1, %xmm5
-; SSE2-NEXT: pand %xmm0, %xmm5
-; SSE2-NEXT: pand %xmm0, %xmm4
-; SSE2-NEXT: packuswb %xmm5, %xmm4
-; SSE2-NEXT: packuswb %xmm3, %xmm4
; SSE2-NEXT: psrld $1, %xmm2
-; SSE2-NEXT: psrld $1, %xmm8
-; SSE2-NEXT: pand %xmm0, %xmm8
-; SSE2-NEXT: pand %xmm0, %xmm2
-; SSE2-NEXT: packuswb %xmm8, %xmm2
-; SSE2-NEXT: psrld $1, %xmm1
-; SSE2-NEXT: psrld $1, %xmm6
-; SSE2-NEXT: pand %xmm0, %xmm6
-; SSE2-NEXT: pand %xmm0, %xmm1
-; SSE2-NEXT: packuswb %xmm6, %xmm1
-; SSE2-NEXT: packuswb %xmm2, %xmm1
+; SSE2-NEXT: psrld $1, %xmm9
+; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
+; SSE2-NEXT: pand %xmm4, %xmm9
+; SSE2-NEXT: pand %xmm4, %xmm2
+; SSE2-NEXT: packuswb %xmm9, %xmm2
+; SSE2-NEXT: pand %xmm4, %xmm5
+; SSE2-NEXT: pand %xmm4, %xmm0
+; SSE2-NEXT: packuswb %xmm5, %xmm0
+; SSE2-NEXT: packuswb %xmm2, %xmm0
+; SSE2-NEXT: pand %xmm4, %xmm6
+; SSE2-NEXT: pand %xmm4, %xmm3
+; SSE2-NEXT: packuswb %xmm6, %xmm3
+; SSE2-NEXT: pand %xmm4, %xmm7
+; SSE2-NEXT: pand %xmm4, %xmm1
+; SSE2-NEXT: packuswb %xmm7, %xmm1
+; SSE2-NEXT: packuswb %xmm3, %xmm1
; SSE2-NEXT: movdqu %xmm1, (%rax)
-; SSE2-NEXT: movdqu %xmm4, (%rax)
+; SSE2-NEXT: movdqu %xmm0, (%rax)
; SSE2-NEXT: retq
;
; AVX1-LABEL: avg_v32i8:
@@ -184,57 +183,57 @@ define void @avg_v32i8(<32 x i8>* %a, <3
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm3 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm5 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm6 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [1,1,1,1]
-; AVX1-NEXT: vpaddd %xmm6, %xmm7, %xmm7
-; AVX1-NEXT: vpaddd %xmm7, %xmm0, %xmm0
+; AVX1-NEXT: vpaddd %xmm7, %xmm0, %xmm9
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; AVX1-NEXT: vpaddd %xmm6, %xmm7, %xmm7
; AVX1-NEXT: vpaddd %xmm7, %xmm1, %xmm1
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; AVX1-NEXT: vpaddd %xmm6, %xmm7, %xmm7
-; AVX1-NEXT: vpaddd %xmm7, %xmm2, %xmm9
+; AVX1-NEXT: vpaddd %xmm7, %xmm2, %xmm2
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; AVX1-NEXT: vpaddd %xmm6, %xmm7, %xmm7
; AVX1-NEXT: vpaddd %xmm7, %xmm3, %xmm3
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; AVX1-NEXT: vpaddd %xmm6, %xmm7, %xmm7
; AVX1-NEXT: vpaddd %xmm7, %xmm4, %xmm4
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; AVX1-NEXT: vpaddd %xmm6, %xmm7, %xmm7
; AVX1-NEXT: vpaddd %xmm7, %xmm5, %xmm5
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; AVX1-NEXT: vpaddd %xmm6, %xmm7, %xmm7
+; AVX1-NEXT: vpaddd %xmm7, %xmm6, %xmm6
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX1-NEXT: vpaddd %xmm7, %xmm8, %xmm7
-; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; AVX1-NEXT: vpaddd %xmm6, %xmm2, %xmm2
-; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm6 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; AVX1-NEXT: vpaddd %xmm2, %xmm6, %xmm2
-; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1
-; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
-; AVX1-NEXT: vpand %xmm6, %xmm0, %xmm0
-; AVX1-NEXT: vpand %xmm6, %xmm1, %xmm1
-; AVX1-NEXT: vpackuswb %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vpsrld $1, %xmm3, %xmm1
-; AVX1-NEXT: vpsrld $1, %xmm9, %xmm3
-; AVX1-NEXT: vpand %xmm6, %xmm3, %xmm3
-; AVX1-NEXT: vpand %xmm6, %xmm1, %xmm1
-; AVX1-NEXT: vpackuswb %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpackuswb %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vpsrld $1, %xmm5, %xmm1
-; AVX1-NEXT: vpsrld $1, %xmm4, %xmm3
-; AVX1-NEXT: vpand %xmm6, %xmm3, %xmm3
-; AVX1-NEXT: vpand %xmm6, %xmm1, %xmm1
-; AVX1-NEXT: vpackuswb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX1-NEXT: vpsubd %xmm0, %xmm9, %xmm8
+; AVX1-NEXT: vpsubd %xmm0, %xmm1, %xmm1
+; AVX1-NEXT: vpsubd %xmm0, %xmm2, %xmm2
+; AVX1-NEXT: vpsubd %xmm0, %xmm3, %xmm3
+; AVX1-NEXT: vpsubd %xmm0, %xmm4, %xmm4
+; AVX1-NEXT: vpsubd %xmm0, %xmm5, %xmm5
+; AVX1-NEXT: vpsubd %xmm0, %xmm6, %xmm6
+; AVX1-NEXT: vpsubd %xmm0, %xmm7, %xmm0
+; AVX1-NEXT: vpsrld $1, %xmm0, %xmm9
+; AVX1-NEXT: vpsrld $1, %xmm6, %xmm6
+; AVX1-NEXT: vpsrld $1, %xmm5, %xmm5
+; AVX1-NEXT: vpsrld $1, %xmm4, %xmm4
+; AVX1-NEXT: vpsrld $1, %xmm3, %xmm3
; AVX1-NEXT: vpsrld $1, %xmm2, %xmm2
-; AVX1-NEXT: vpsrld $1, %xmm7, %xmm3
-; AVX1-NEXT: vpand %xmm6, %xmm3, %xmm3
-; AVX1-NEXT: vpand %xmm6, %xmm2, %xmm2
-; AVX1-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsrld $1, %xmm8, %xmm7
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm0 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
+; AVX1-NEXT: vpand %xmm0, %xmm7, %xmm7
+; AVX1-NEXT: vpand %xmm0, %xmm1, %xmm1
+; AVX1-NEXT: vpackuswb %xmm7, %xmm1, %xmm1
+; AVX1-NEXT: vpand %xmm0, %xmm2, %xmm2
+; AVX1-NEXT: vpand %xmm0, %xmm3, %xmm3
+; AVX1-NEXT: vpackuswb %xmm2, %xmm3, %xmm2
; AVX1-NEXT: vpackuswb %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: vpand %xmm0, %xmm4, %xmm2
+; AVX1-NEXT: vpand %xmm0, %xmm5, %xmm3
+; AVX1-NEXT: vpackuswb %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpand %xmm0, %xmm6, %xmm3
+; AVX1-NEXT: vpand %xmm0, %xmm9, %xmm0
+; AVX1-NEXT: vpackuswb %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: vmovups %ymm0, (%rax)
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
@@ -269,323 +268,329 @@ define void @avg_v32i8(<32 x i8>* %a, <3
define void @avg_v64i8(<64 x i8>* %a, <64 x i8>* %b) {
; SSE2-LABEL: avg_v64i8:
; SSE2: # BB#0:
-; SSE2-NEXT: subq $152, %rsp
-; SSE2-NEXT: .Lcfi0:
-; SSE2-NEXT: .cfi_def_cfa_offset 160
-; SSE2-NEXT: movdqa (%rdi), %xmm4
-; SSE2-NEXT: movdqa 16(%rdi), %xmm3
-; SSE2-NEXT: movdqa 32(%rdi), %xmm2
-; SSE2-NEXT: movdqa 48(%rdi), %xmm1
+; SSE2-NEXT: movdqa (%rdi), %xmm6
+; SSE2-NEXT: movdqa 16(%rdi), %xmm2
+; SSE2-NEXT: movdqa 32(%rdi), %xmm1
+; SSE2-NEXT: movdqa 48(%rdi), %xmm0
+; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa (%rsi), %xmm5
+; SSE2-NEXT: movdqa 16(%rsi), %xmm13
+; SSE2-NEXT: movdqa 32(%rsi), %xmm11
; SSE2-NEXT: pxor %xmm0, %xmm0
-; SSE2-NEXT: movdqa %xmm4, %xmm5
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15]
-; SSE2-NEXT: movdqa %xmm5, %xmm6
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm6, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3]
-; SSE2-NEXT: movdqa %xmm5, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3],xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm4, %xmm5
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm5, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
-; SSE2-NEXT: movdqa %xmm4, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm3, %xmm4
+; SSE2-NEXT: movdqa %xmm6, %xmm4
; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15]
-; SSE2-NEXT: movdqa %xmm4, %xmm5
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm5, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm4, %xmm7
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
-; SSE2-NEXT: movdqa %xmm4, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm3, %xmm4
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm4, (%rsp) # 16-byte Spill
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
-; SSE2-NEXT: movdqa %xmm3, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm2, %xmm3
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
-; SSE2-NEXT: movdqa %xmm3, %xmm4
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm4, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
-; SSE2-NEXT: movdqa %xmm3, {{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3],xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm6, %xmm12
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm0[4],xmm12[5],xmm0[5],xmm12[6],xmm0[6],xmm12[7],xmm0[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3]
+; SSE2-NEXT: movdqa %xmm2, %xmm15
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm15 = xmm15[8],xmm0[8],xmm15[9],xmm0[9],xmm15[10],xmm0[10],xmm15[11],xmm0[11],xmm15[12],xmm0[12],xmm15[13],xmm0[13],xmm15[14],xmm0[14],xmm15[15],xmm0[15]
+; SSE2-NEXT: movdqa %xmm15, %xmm14
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm14 = xmm14[4],xmm0[4],xmm14[5],xmm0[5],xmm14[6],xmm0[6],xmm14[7],xmm0[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm15 = xmm15[0],xmm0[0],xmm15[1],xmm0[1],xmm15[2],xmm0[2],xmm15[3],xmm0[3]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm2, %xmm3
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm3, {{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm2, %xmm8
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm0[4],xmm8[5],xmm0[5],xmm8[6],xmm0[6],xmm8[7],xmm0[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; SSE2-NEXT: movdqa %xmm2, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
-; SSE2-NEXT: movdqa %xmm2, %xmm3
+; SSE2-NEXT: movdqa %xmm5, %xmm10
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm0[8],xmm10[9],xmm0[9],xmm10[10],xmm0[10],xmm10[11],xmm0[11],xmm10[12],xmm0[12],xmm10[13],xmm0[13],xmm10[14],xmm0[14],xmm10[15],xmm0[15]
+; SSE2-NEXT: movdqa %xmm10, %xmm3
; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm3, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; SSE2-NEXT: movdqa %xmm2, {{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: paddd %xmm7, %xmm3
+; SSE2-NEXT: movdqa %xmm3, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm1, %xmm7
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm0[8],xmm7[9],xmm0[9],xmm7[10],xmm0[10],xmm7[11],xmm0[11],xmm7[12],xmm0[12],xmm7[13],xmm0[13],xmm7[14],xmm0[14],xmm7[15],xmm0[15]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm0[0],xmm10[1],xmm0[1],xmm10[2],xmm0[2],xmm10[3],xmm0[3]
+; SSE2-NEXT: paddd %xmm4, %xmm10
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3],xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm5, %xmm3
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE2-NEXT: paddd %xmm12, %xmm3
+; SSE2-NEXT: movdqa %xmm3, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3]
+; SSE2-NEXT: paddd %xmm6, %xmm5
+; SSE2-NEXT: movdqa %xmm5, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm13, %xmm4
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15]
+; SSE2-NEXT: movdqa %xmm4, %xmm12
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm0[4],xmm12[5],xmm0[5],xmm12[6],xmm0[6],xmm12[7],xmm0[7]
+; SSE2-NEXT: paddd %xmm14, %xmm12
+; SSE2-NEXT: movdqa %xmm7, %xmm5
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1],xmm7[2],xmm0[2],xmm7[3],xmm0[3]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
+; SSE2-NEXT: paddd %xmm15, %xmm4
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm13 = xmm13[0],xmm0[0],xmm13[1],xmm0[1],xmm13[2],xmm0[2],xmm13[3],xmm0[3],xmm13[4],xmm0[4],xmm13[5],xmm0[5],xmm13[6],xmm0[6],xmm13[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm13, %xmm15
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm15 = xmm15[4],xmm0[4],xmm15[5],xmm0[5],xmm15[6],xmm0[6],xmm15[7],xmm0[7]
+; SSE2-NEXT: paddd %xmm8, %xmm15
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm13 = xmm13[0],xmm0[0],xmm13[1],xmm0[1],xmm13[2],xmm0[2],xmm13[3],xmm0[3]
+; SSE2-NEXT: paddd %xmm2, %xmm13
+; SSE2-NEXT: movdqa %xmm11, %xmm6
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm0[8],xmm6[9],xmm0[9],xmm6[10],xmm0[10],xmm6[11],xmm0[11],xmm6[12],xmm0[12],xmm6[13],xmm0[13],xmm6[14],xmm0[14],xmm6[15],xmm0[15]
+; SSE2-NEXT: movdqa %xmm6, %xmm9
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm0[4],xmm9[5],xmm0[5],xmm9[6],xmm0[6],xmm9[7],xmm0[7]
+; SSE2-NEXT: paddd %xmm5, %xmm9
; SSE2-NEXT: movdqa %xmm1, %xmm2
; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm2, {{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSE2-NEXT: movdqa %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa (%rsi), %xmm10
-; SSE2-NEXT: movdqa %xmm10, %xmm4
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15]
-; SSE2-NEXT: movdqa %xmm4, %xmm11
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm0[4],xmm11[5],xmm0[5],xmm11[6],xmm0[6],xmm11[7],xmm0[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm10 = xmm10[0],xmm0[0],xmm10[1],xmm0[1],xmm10[2],xmm0[2],xmm10[3],xmm0[3],xmm10[4],xmm0[4],xmm10[5],xmm0[5],xmm10[6],xmm0[6],xmm10[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm10, %xmm12
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm0[4],xmm12[5],xmm0[5],xmm12[6],xmm0[6],xmm12[7],xmm0[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm0[0],xmm10[1],xmm0[1],xmm10[2],xmm0[2],xmm10[3],xmm0[3]
-; SSE2-NEXT: movdqa 16(%rsi), %xmm15
-; SSE2-NEXT: movdqa %xmm15, %xmm7
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm0[8],xmm7[9],xmm0[9],xmm7[10],xmm0[10],xmm7[11],xmm0[11],xmm7[12],xmm0[12],xmm7[13],xmm0[13],xmm7[14],xmm0[14],xmm7[15],xmm0[15]
-; SSE2-NEXT: movdqa %xmm7, %xmm14
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3]
+; SSE2-NEXT: paddd %xmm7, %xmm6
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0],xmm0[0],xmm11[1],xmm0[1],xmm11[2],xmm0[2],xmm11[3],xmm0[3],xmm11[4],xmm0[4],xmm11[5],xmm0[5],xmm11[6],xmm0[6],xmm11[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm11, %xmm14
; SSE2-NEXT: punpckhwd {{.*#+}} xmm14 = xmm14[4],xmm0[4],xmm14[5],xmm0[5],xmm14[6],xmm0[6],xmm14[7],xmm0[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1],xmm7[2],xmm0[2],xmm7[3],xmm0[3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm0[0],xmm15[1],xmm0[1],xmm15[2],xmm0[2],xmm15[3],xmm0[3],xmm15[4],xmm0[4],xmm15[5],xmm0[5],xmm15[6],xmm0[6],xmm15[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm15, %xmm6
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm15 = xmm15[0],xmm0[0],xmm15[1],xmm0[1],xmm15[2],xmm0[2],xmm15[3],xmm0[3]
-; SSE2-NEXT: movdqa 32(%rsi), %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm8
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8],xmm0[8],xmm8[9],xmm0[9],xmm8[10],xmm0[10],xmm8[11],xmm0[11],xmm8[12],xmm0[12],xmm8[13],xmm0[13],xmm8[14],xmm0[14],xmm8[15],xmm0[15]
-; SSE2-NEXT: movdqa %xmm8, %xmm3
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm3, %xmm13
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm0[0],xmm8[1],xmm0[1],xmm8[2],xmm0[2],xmm8[3],xmm0[3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-NEXT: paddd %xmm2, %xmm14
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm5 # 16-byte Reload
+; SSE2-NEXT: movdqa %xmm5, %xmm2
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm0[0],xmm11[1],xmm0[1],xmm11[2],xmm0[2],xmm11[3],xmm0[3]
+; SSE2-NEXT: paddd %xmm1, %xmm11
; SSE2-NEXT: movdqa %xmm2, %xmm1
; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa 48(%rsi), %xmm7
+; SSE2-NEXT: movdqa %xmm7, %xmm3
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
+; SSE2-NEXT: movdqa %xmm3, %xmm8
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm0[4],xmm8[5],xmm0[5],xmm8[6],xmm0[6],xmm8[7],xmm0[7]
+; SSE2-NEXT: paddd %xmm1, %xmm8
; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; SSE2-NEXT: movdqa 48(%rsi), %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm9
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm0[8],xmm9[9],xmm0[9],xmm9[10],xmm0[10],xmm9[11],xmm0[11],xmm9[12],xmm0[12],xmm9[13],xmm0[13],xmm9[14],xmm0[14],xmm9[15],xmm0[15]
-; SSE2-NEXT: movdqa %xmm9, %xmm5
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
+; SSE2-NEXT: paddd %xmm2, %xmm3
+; SSE2-NEXT: movdqa %xmm5, %xmm2
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm2, %xmm1
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1],xmm7[2],xmm0[2],xmm7[3],xmm0[3],xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm7, %xmm5
; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm0[0],xmm9[1],xmm0[1],xmm9[2],xmm0[2],xmm9[3],xmm0[3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm1, %xmm3
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [1,1,1,1]
-; SSE2-NEXT: paddd %xmm0, %xmm11
-; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm11 # 16-byte Folded Reload
-; SSE2-NEXT: paddd %xmm0, %xmm4
-; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm4 # 16-byte Folded Reload
-; SSE2-NEXT: paddd %xmm0, %xmm12
-; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm12 # 16-byte Folded Reload
-; SSE2-NEXT: paddd %xmm0, %xmm10
-; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm10 # 16-byte Folded Reload
-; SSE2-NEXT: paddd %xmm0, %xmm14
-; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm14 # 16-byte Folded Reload
-; SSE2-NEXT: paddd %xmm0, %xmm7
-; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm7 # 16-byte Folded Reload
-; SSE2-NEXT: paddd %xmm0, %xmm6
-; SSE2-NEXT: paddd (%rsp), %xmm6 # 16-byte Folded Reload
-; SSE2-NEXT: movdqa %xmm6, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: paddd %xmm0, %xmm15
-; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm15 # 16-byte Folded Reload
-; SSE2-NEXT: paddd %xmm0, %xmm13
-; SSE2-NEXT: paddd {{[0-9]+}}(%rsp), %xmm13 # 16-byte Folded Reload
-; SSE2-NEXT: movdqa %xmm13, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: paddd %xmm0, %xmm8
-; SSE2-NEXT: paddd {{[0-9]+}}(%rsp), %xmm8 # 16-byte Folded Reload
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm6 # 16-byte Reload
-; SSE2-NEXT: paddd %xmm0, %xmm6
-; SSE2-NEXT: paddd {{[0-9]+}}(%rsp), %xmm6 # 16-byte Folded Reload
-; SSE2-NEXT: paddd %xmm0, %xmm2
-; SSE2-NEXT: paddd {{[0-9]+}}(%rsp), %xmm2 # 16-byte Folded Reload
-; SSE2-NEXT: paddd %xmm0, %xmm5
-; SSE2-NEXT: paddd {{[0-9]+}}(%rsp), %xmm5 # 16-byte Folded Reload
-; SSE2-NEXT: paddd %xmm0, %xmm9
-; SSE2-NEXT: paddd {{[0-9]+}}(%rsp), %xmm9 # 16-byte Folded Reload
-; SSE2-NEXT: paddd %xmm0, %xmm3
-; SSE2-NEXT: paddd {{[0-9]+}}(%rsp), %xmm3 # 16-byte Folded Reload
-; SSE2-NEXT: movdqa %xmm3, %xmm13
-; SSE2-NEXT: paddd %xmm0, %xmm1
-; SSE2-NEXT: paddd {{[0-9]+}}(%rsp), %xmm1 # 16-byte Folded Reload
-; SSE2-NEXT: psrld $1, %xmm4
-; SSE2-NEXT: psrld $1, %xmm11
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
-; SSE2-NEXT: pand %xmm0, %xmm11
-; SSE2-NEXT: pand %xmm0, %xmm4
-; SSE2-NEXT: packuswb %xmm11, %xmm4
+; SSE2-NEXT: paddd %xmm1, %xmm5
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1],xmm7[2],xmm0[2],xmm7[3],xmm0[3]
+; SSE2-NEXT: paddd %xmm2, %xmm7
+; SSE2-NEXT: pcmpeqd %xmm0, %xmm0
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
+; SSE2-NEXT: psubd %xmm0, %xmm1
+; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: psubd %xmm0, %xmm10
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
+; SSE2-NEXT: psubd %xmm0, %xmm1
+; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload
+; SSE2-NEXT: psubd %xmm0, %xmm2
+; SSE2-NEXT: psubd %xmm0, %xmm12
+; SSE2-NEXT: psubd %xmm0, %xmm4
+; SSE2-NEXT: psubd %xmm0, %xmm15
+; SSE2-NEXT: psubd %xmm0, %xmm13
+; SSE2-NEXT: psubd %xmm0, %xmm9
+; SSE2-NEXT: psubd %xmm0, %xmm6
+; SSE2-NEXT: psubd %xmm0, %xmm14
+; SSE2-NEXT: psubd %xmm0, %xmm11
+; SSE2-NEXT: psubd %xmm0, %xmm8
+; SSE2-NEXT: psubd %xmm0, %xmm3
+; SSE2-NEXT: psubd %xmm0, %xmm5
+; SSE2-NEXT: psubd %xmm0, %xmm7
; SSE2-NEXT: psrld $1, %xmm10
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
+; SSE2-NEXT: psrld $1, %xmm1
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
+; SSE2-NEXT: pand %xmm0, %xmm1
+; SSE2-NEXT: pand %xmm0, %xmm10
+; SSE2-NEXT: packuswb %xmm1, %xmm10
+; SSE2-NEXT: psrld $1, %xmm2
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
+; SSE2-NEXT: psrld $1, %xmm1
+; SSE2-NEXT: pand %xmm0, %xmm1
+; SSE2-NEXT: pand %xmm0, %xmm2
+; SSE2-NEXT: packuswb %xmm1, %xmm2
+; SSE2-NEXT: packuswb %xmm10, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm1
+; SSE2-NEXT: psrld $1, %xmm4
; SSE2-NEXT: psrld $1, %xmm12
; SSE2-NEXT: pand %xmm0, %xmm12
-; SSE2-NEXT: pand %xmm0, %xmm10
-; SSE2-NEXT: packuswb %xmm12, %xmm10
-; SSE2-NEXT: packuswb %xmm4, %xmm10
-; SSE2-NEXT: psrld $1, %xmm7
+; SSE2-NEXT: pand %xmm0, %xmm4
+; SSE2-NEXT: packuswb %xmm12, %xmm4
+; SSE2-NEXT: psrld $1, %xmm13
+; SSE2-NEXT: psrld $1, %xmm15
+; SSE2-NEXT: pand %xmm0, %xmm15
+; SSE2-NEXT: pand %xmm0, %xmm13
+; SSE2-NEXT: packuswb %xmm15, %xmm13
+; SSE2-NEXT: packuswb %xmm4, %xmm13
+; SSE2-NEXT: psrld $1, %xmm6
+; SSE2-NEXT: psrld $1, %xmm9
+; SSE2-NEXT: pand %xmm0, %xmm9
+; SSE2-NEXT: pand %xmm0, %xmm6
+; SSE2-NEXT: packuswb %xmm9, %xmm6
+; SSE2-NEXT: psrld $1, %xmm11
; SSE2-NEXT: psrld $1, %xmm14
; SSE2-NEXT: pand %xmm0, %xmm14
-; SSE2-NEXT: pand %xmm0, %xmm7
-; SSE2-NEXT: packuswb %xmm14, %xmm7
-; SSE2-NEXT: psrld $1, %xmm15
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload
+; SSE2-NEXT: pand %xmm0, %xmm11
+; SSE2-NEXT: packuswb %xmm14, %xmm11
+; SSE2-NEXT: packuswb %xmm6, %xmm11
; SSE2-NEXT: psrld $1, %xmm3
-; SSE2-NEXT: pand %xmm0, %xmm3
-; SSE2-NEXT: pand %xmm0, %xmm15
-; SSE2-NEXT: packuswb %xmm3, %xmm15
-; SSE2-NEXT: packuswb %xmm7, %xmm15
; SSE2-NEXT: psrld $1, %xmm8
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm4 # 16-byte Reload
-; SSE2-NEXT: psrld $1, %xmm4
-; SSE2-NEXT: pand %xmm0, %xmm4
; SSE2-NEXT: pand %xmm0, %xmm8
-; SSE2-NEXT: packuswb %xmm4, %xmm8
-; SSE2-NEXT: psrld $1, %xmm2
-; SSE2-NEXT: movdqa %xmm6, %xmm3
-; SSE2-NEXT: psrld $1, %xmm3
; SSE2-NEXT: pand %xmm0, %xmm3
-; SSE2-NEXT: pand %xmm0, %xmm2
-; SSE2-NEXT: packuswb %xmm3, %xmm2
-; SSE2-NEXT: packuswb %xmm8, %xmm2
-; SSE2-NEXT: psrld $1, %xmm9
-; SSE2-NEXT: movdqa %xmm5, %xmm4
-; SSE2-NEXT: psrld $1, %xmm4
-; SSE2-NEXT: pand %xmm0, %xmm4
-; SSE2-NEXT: pand %xmm0, %xmm9
-; SSE2-NEXT: packuswb %xmm4, %xmm9
-; SSE2-NEXT: psrld $1, %xmm1
-; SSE2-NEXT: psrld $1, %xmm13
-; SSE2-NEXT: pand %xmm0, %xmm13
-; SSE2-NEXT: pand %xmm0, %xmm1
-; SSE2-NEXT: packuswb %xmm13, %xmm1
-; SSE2-NEXT: packuswb %xmm9, %xmm1
+; SSE2-NEXT: packuswb %xmm8, %xmm3
+; SSE2-NEXT: psrld $1, %xmm7
+; SSE2-NEXT: psrld $1, %xmm5
+; SSE2-NEXT: pand %xmm0, %xmm5
+; SSE2-NEXT: pand %xmm0, %xmm7
+; SSE2-NEXT: packuswb %xmm5, %xmm7
+; SSE2-NEXT: packuswb %xmm3, %xmm7
+; SSE2-NEXT: movdqu %xmm7, (%rax)
+; SSE2-NEXT: movdqu %xmm11, (%rax)
+; SSE2-NEXT: movdqu %xmm13, (%rax)
; SSE2-NEXT: movdqu %xmm1, (%rax)
-; SSE2-NEXT: movdqu %xmm2, (%rax)
-; SSE2-NEXT: movdqu %xmm15, (%rax)
-; SSE2-NEXT: movdqu %xmm10, (%rax)
-; SSE2-NEXT: addq $152, %rsp
; SSE2-NEXT: retq
;
; AVX1-LABEL: avg_v64i8:
; AVX1: # BB#0:
-; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm5 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm10 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; AVX1-NEXT: subq $24, %rsp
+; AVX1-NEXT: .Lcfi0:
+; AVX1-NEXT: .cfi_def_cfa_offset 32
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm11 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm3 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm15 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm5 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm6 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm15 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm12 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm13 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm9 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm14 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; AVX1-NEXT: vmovdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; AVX1-NEXT: vmovdqa %xmm7, (%rsp) # 16-byte Spill
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; AVX1-NEXT: vmovdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; AVX1-NEXT: vmovdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; AVX1-NEXT: vpaddd %xmm7, %xmm0, %xmm0
; AVX1-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; AVX1-NEXT: vpaddd %xmm7, %xmm1, %xmm0
+; AVX1-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; AVX1-NEXT: vpaddd %xmm7, %xmm2, %xmm0
+; AVX1-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; AVX1-NEXT: vpaddd %xmm7, %xmm3, %xmm0
+; AVX1-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; AVX1-NEXT: vpaddd %xmm7, %xmm4, %xmm0
; AVX1-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm0 = [1,1,1,1]
-; AVX1-NEXT: vpaddd %xmm0, %xmm4, %xmm4
-; AVX1-NEXT: vpaddd %xmm4, %xmm5, %xmm14
-; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; AVX1-NEXT: vpaddd %xmm0, %xmm4, %xmm4
-; AVX1-NEXT: vpaddd %xmm4, %xmm7, %xmm7
-; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; AVX1-NEXT: vpaddd %xmm0, %xmm4, %xmm4
-; AVX1-NEXT: vpaddd %xmm4, %xmm10, %xmm4
-; AVX1-NEXT: vmovdqa %xmm4, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; AVX1-NEXT: vpaddd %xmm0, %xmm4, %xmm4
-; AVX1-NEXT: vpaddd %xmm4, %xmm2, %xmm9
-; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; AVX1-NEXT: vpaddd %xmm0, %xmm4, %xmm4
-; AVX1-NEXT: vpaddd %xmm4, %xmm11, %xmm11
-; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; AVX1-NEXT: vpaddd %xmm0, %xmm4, %xmm4
-; AVX1-NEXT: vpaddd %xmm4, %xmm3, %xmm3
-; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; AVX1-NEXT: vpaddd %xmm0, %xmm4, %xmm4
-; AVX1-NEXT: vpaddd %xmm4, %xmm15, %xmm15
-; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; AVX1-NEXT: vpaddd %xmm0, %xmm4, %xmm4
-; AVX1-NEXT: vpaddd %xmm4, %xmm6, %xmm6
+; AVX1-NEXT: vpaddd %xmm4, %xmm5, %xmm13
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; AVX1-NEXT: vpaddd %xmm0, %xmm4, %xmm4
-; AVX1-NEXT: vpaddd %xmm4, %xmm8, %xmm2
-; AVX1-NEXT: vmovdqa %xmm2, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; AVX1-NEXT: vpaddd %xmm4, %xmm6, %xmm12
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; AVX1-NEXT: vpaddd %xmm0, %xmm4, %xmm4
-; AVX1-NEXT: vpaddd %xmm4, %xmm1, %xmm8
-; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; AVX1-NEXT: vpaddd %xmm0, %xmm1, %xmm1
-; AVX1-NEXT: vpaddd %xmm1, %xmm12, %xmm12
-; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; AVX1-NEXT: vpaddd %xmm0, %xmm1, %xmm1
-; AVX1-NEXT: vpaddd %xmm1, %xmm13, %xmm4
-; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; AVX1-NEXT: vpaddd %xmm0, %xmm1, %xmm1
-; AVX1-NEXT: vpaddd -{{[0-9]+}}(%rsp), %xmm1, %xmm13 # 16-byte Folded Reload
-; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; AVX1-NEXT: vpaddd %xmm0, %xmm1, %xmm1
-; AVX1-NEXT: vpaddd -{{[0-9]+}}(%rsp), %xmm1, %xmm5 # 16-byte Folded Reload
+; AVX1-NEXT: vpaddd %xmm4, %xmm15, %xmm11
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; AVX1-NEXT: vpaddd %xmm0, %xmm8, %xmm10
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; AVX1-NEXT: vpaddd %xmm0, %xmm1, %xmm1
+; AVX1-NEXT: vpaddd %xmm1, %xmm9, %xmm8
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; AVX1-NEXT: vpaddd %xmm1, %xmm2, %xmm10
-; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; AVX1-NEXT: vpaddd %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; AVX1-NEXT: vpaddd %xmm0, %xmm1, %xmm1
-; AVX1-NEXT: vpsrld $1, %xmm7, %xmm0
+; AVX1-NEXT: vpaddd %xmm2, %xmm14, %xmm9
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm3 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; AVX1-NEXT: vpaddd -{{[0-9]+}}(%rsp), %xmm3, %xmm4 # 16-byte Folded Reload
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; AVX1-NEXT: vpaddd (%rsp), %xmm7, %xmm7 # 16-byte Folded Reload
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm5 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; AVX1-NEXT: vpaddd -{{[0-9]+}}(%rsp), %xmm5, %xmm3 # 16-byte Folded Reload
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm5 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; AVX1-NEXT: vpaddd -{{[0-9]+}}(%rsp), %xmm5, %xmm2 # 16-byte Folded Reload
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm5 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm6 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; AVX1-NEXT: vpaddd %xmm6, %xmm5, %xmm1
+; AVX1-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX1-NEXT: vmovdqa -{{[0-9]+}}(%rsp), %xmm5 # 16-byte Reload
+; AVX1-NEXT: vpsubd %xmm0, %xmm5, %xmm14
+; AVX1-NEXT: vmovdqa -{{[0-9]+}}(%rsp), %xmm5 # 16-byte Reload
+; AVX1-NEXT: vpsubd %xmm0, %xmm5, %xmm5
+; AVX1-NEXT: vmovdqa -{{[0-9]+}}(%rsp), %xmm6 # 16-byte Reload
+; AVX1-NEXT: vpsubd %xmm0, %xmm6, %xmm6
+; AVX1-NEXT: vmovdqa %xmm6, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; AVX1-NEXT: vmovdqa -{{[0-9]+}}(%rsp), %xmm6 # 16-byte Reload
+; AVX1-NEXT: vpsubd %xmm0, %xmm6, %xmm6
+; AVX1-NEXT: vmovdqa %xmm6, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; AVX1-NEXT: vmovdqa -{{[0-9]+}}(%rsp), %xmm6 # 16-byte Reload
+; AVX1-NEXT: vpsubd %xmm0, %xmm6, %xmm15
+; AVX1-NEXT: vmovdqa %xmm15, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; AVX1-NEXT: vpsubd %xmm0, %xmm13, %xmm13
+; AVX1-NEXT: vpsubd %xmm0, %xmm12, %xmm12
+; AVX1-NEXT: vpsubd %xmm0, %xmm11, %xmm11
+; AVX1-NEXT: vpsubd %xmm0, %xmm10, %xmm10
+; AVX1-NEXT: vpsubd %xmm0, %xmm8, %xmm8
+; AVX1-NEXT: vpsubd %xmm0, %xmm9, %xmm9
+; AVX1-NEXT: vpsubd %xmm0, %xmm4, %xmm4
+; AVX1-NEXT: vpsubd %xmm0, %xmm7, %xmm7
+; AVX1-NEXT: vmovdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; AVX1-NEXT: vpsubd %xmm0, %xmm3, %xmm3
+; AVX1-NEXT: vpsubd %xmm0, %xmm2, %xmm2
+; AVX1-NEXT: vmovdqa %xmm2, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; AVX1-NEXT: vpsubd %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vpsrld $1, %xmm5, %xmm1
; AVX1-NEXT: vpsrld $1, %xmm14, %xmm14
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm7 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
-; AVX1-NEXT: vpand %xmm7, %xmm14, %xmm14
-; AVX1-NEXT: vpand %xmm7, %xmm0, %xmm0
-; AVX1-NEXT: vpackuswb %xmm14, %xmm0, %xmm14
-; AVX1-NEXT: vpsrld $1, %xmm9, %xmm0
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
+; AVX1-NEXT: vpand %xmm5, %xmm14, %xmm14
+; AVX1-NEXT: vpand %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vpackuswb %xmm14, %xmm1, %xmm1
+; AVX1-NEXT: vmovdqa -{{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload
+; AVX1-NEXT: vpsrld $1, %xmm2, %xmm6
; AVX1-NEXT: vmovdqa -{{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload
; AVX1-NEXT: vpsrld $1, %xmm2, %xmm2
-; AVX1-NEXT: vpand %xmm7, %xmm2, %xmm2
-; AVX1-NEXT: vpand %xmm7, %xmm0, %xmm0
-; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpackuswb %xmm14, %xmm0, %xmm0
-; AVX1-NEXT: vpsrld $1, %xmm3, %xmm2
-; AVX1-NEXT: vpsrld $1, %xmm11, %xmm3
-; AVX1-NEXT: vpand %xmm7, %xmm3, %xmm3
-; AVX1-NEXT: vpand %xmm7, %xmm2, %xmm2
-; AVX1-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpsrld $1, %xmm6, %xmm3
-; AVX1-NEXT: vpsrld $1, %xmm15, %xmm6
-; AVX1-NEXT: vpand %xmm7, %xmm6, %xmm6
-; AVX1-NEXT: vpand %xmm7, %xmm3, %xmm3
-; AVX1-NEXT: vpackuswb %xmm6, %xmm3, %xmm3
-; AVX1-NEXT: vpackuswb %xmm2, %xmm3, %xmm2
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
+; AVX1-NEXT: vpand %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vpand %xmm5, %xmm6, %xmm6
+; AVX1-NEXT: vpackuswb %xmm2, %xmm6, %xmm2
+; AVX1-NEXT: vpackuswb %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vpsrld $1, %xmm13, %xmm2
+; AVX1-NEXT: vmovdqa -{{[0-9]+}}(%rsp), %xmm6 # 16-byte Reload
+; AVX1-NEXT: vpsrld $1, %xmm6, %xmm6
+; AVX1-NEXT: vpand %xmm5, %xmm6, %xmm6
+; AVX1-NEXT: vpand %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vpackuswb %xmm6, %xmm2, %xmm2
+; AVX1-NEXT: vpsrld $1, %xmm11, %xmm6
+; AVX1-NEXT: vpsrld $1, %xmm12, %xmm7
+; AVX1-NEXT: vpand %xmm5, %xmm7, %xmm7
+; AVX1-NEXT: vpand %xmm5, %xmm6, %xmm6
+; AVX1-NEXT: vpackuswb %xmm7, %xmm6, %xmm6
+; AVX1-NEXT: vpackuswb %xmm2, %xmm6, %xmm2
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
; AVX1-NEXT: vpsrld $1, %xmm8, %xmm2
-; AVX1-NEXT: vmovdqa -{{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload
+; AVX1-NEXT: vpsrld $1, %xmm10, %xmm6
+; AVX1-NEXT: vpand %xmm5, %xmm6, %xmm6
+; AVX1-NEXT: vpand %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vpackuswb %xmm6, %xmm2, %xmm2
+; AVX1-NEXT: vpsrld $1, %xmm4, %xmm4
+; AVX1-NEXT: vpsrld $1, %xmm9, %xmm6
+; AVX1-NEXT: vpand %xmm5, %xmm6, %xmm6
+; AVX1-NEXT: vpand %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vpackuswb %xmm6, %xmm4, %xmm4
+; AVX1-NEXT: vpackuswb %xmm2, %xmm4, %xmm2
; AVX1-NEXT: vpsrld $1, %xmm3, %xmm3
-; AVX1-NEXT: vpand %xmm7, %xmm3, %xmm3
-; AVX1-NEXT: vpand %xmm7, %xmm2, %xmm2
-; AVX1-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpsrld $1, %xmm4, %xmm3
-; AVX1-NEXT: vpsrld $1, %xmm12, %xmm4
-; AVX1-NEXT: vpand %xmm7, %xmm4, %xmm4
-; AVX1-NEXT: vpand %xmm7, %xmm3, %xmm3
+; AVX1-NEXT: vmovdqa -{{[0-9]+}}(%rsp), %xmm4 # 16-byte Reload
+; AVX1-NEXT: vpsrld $1, %xmm4, %xmm4
+; AVX1-NEXT: vpand %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vpackuswb %xmm4, %xmm3, %xmm3
-; AVX1-NEXT: vpackuswb %xmm2, %xmm3, %xmm2
-; AVX1-NEXT: vpsrld $1, %xmm5, %xmm3
-; AVX1-NEXT: vpsrld $1, %xmm13, %xmm4
-; AVX1-NEXT: vpand %xmm7, %xmm4, %xmm4
-; AVX1-NEXT: vpand %xmm7, %xmm3, %xmm3
-; AVX1-NEXT: vpackuswb %xmm4, %xmm3, %xmm3
-; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1
-; AVX1-NEXT: vpsrld $1, %xmm10, %xmm4
-; AVX1-NEXT: vpand %xmm7, %xmm4, %xmm4
-; AVX1-NEXT: vpand %xmm7, %xmm1, %xmm1
-; AVX1-NEXT: vpackuswb %xmm4, %xmm1, %xmm1
-; AVX1-NEXT: vpackuswb %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
-; AVX1-NEXT: vmovups %ymm1, (%rax)
+; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
+; AVX1-NEXT: vmovdqa -{{[0-9]+}}(%rsp), %xmm4 # 16-byte Reload
+; AVX1-NEXT: vpsrld $1, %xmm4, %xmm4
+; AVX1-NEXT: vpand %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vpand %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: vmovups %ymm0, (%rax)
+; AVX1-NEXT: vmovups %ymm1, (%rax)
+; AVX1-NEXT: addq $24, %rsp
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -600,70 +605,70 @@ define void @avg_v64i8(<64 x i8>* %a, <6
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm6 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
-; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm9
-; AVX2-NEXT: vpaddd %ymm9, %ymm8, %ymm8
-; AVX2-NEXT: vpaddd %ymm8, %ymm0, %ymm8
-; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
-; AVX2-NEXT: vpaddd %ymm9, %ymm0, %ymm0
-; AVX2-NEXT: vpaddd %ymm0, %ymm1, %ymm10
-; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
-; AVX2-NEXT: vpaddd %ymm9, %ymm0, %ymm0
-; AVX2-NEXT: vpaddd %ymm0, %ymm2, %ymm11
-; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
-; AVX2-NEXT: vpaddd %ymm9, %ymm0, %ymm0
-; AVX2-NEXT: vpaddd %ymm0, %ymm3, %ymm12
-; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
-; AVX2-NEXT: vpaddd %ymm9, %ymm0, %ymm0
-; AVX2-NEXT: vpaddd %ymm0, %ymm4, %ymm2
-; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
-; AVX2-NEXT: vpaddd %ymm9, %ymm0, %ymm0
-; AVX2-NEXT: vpaddd %ymm0, %ymm5, %ymm4
-; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
-; AVX2-NEXT: vpaddd %ymm9, %ymm0, %ymm0
-; AVX2-NEXT: vpaddd %ymm0, %ymm6, %ymm13
-; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
-; AVX2-NEXT: vpaddd %ymm9, %ymm1, %ymm1
-; AVX2-NEXT: vpaddd %ymm1, %ymm7, %ymm1
-; AVX2-NEXT: vpsrld $1, %ymm10, %ymm6
-; AVX2-NEXT: vpsrld $1, %ymm8, %ymm5
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
-; AVX2-NEXT: vpshufb %ymm3, %ymm5, %ymm5
-; AVX2-NEXT: vpermq {{.*#+}} ymm7 = ymm5[0,2,2,3]
-; AVX2-NEXT: vmovdqa {{.*#+}} xmm5 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
-; AVX2-NEXT: vpshufb %xmm5, %xmm7, %xmm7
-; AVX2-NEXT: vpshufb %ymm3, %ymm6, %ymm6
-; AVX2-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,2,2,3]
-; AVX2-NEXT: vpshufb %xmm5, %xmm6, %xmm6
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm6 = xmm6[0],xmm7[0]
-; AVX2-NEXT: vpsrld $1, %ymm12, %ymm7
-; AVX2-NEXT: vpsrld $1, %ymm11, %ymm8
-; AVX2-NEXT: vpshufb %ymm3, %ymm8, %ymm8
-; AVX2-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,2,2,3]
-; AVX2-NEXT: vpshufb %xmm5, %xmm8, %xmm0
-; AVX2-NEXT: vpshufb %ymm3, %ymm7, %ymm7
-; AVX2-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,2,2,3]
-; AVX2-NEXT: vpshufb %xmm5, %xmm7, %xmm7
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm7[0],xmm0[0]
-; AVX2-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm0
+; AVX2-NEXT: vpaddd %ymm8, %ymm0, %ymm0
+; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
+; AVX2-NEXT: vpaddd %ymm8, %ymm1, %ymm1
+; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
+; AVX2-NEXT: vpaddd %ymm8, %ymm2, %ymm2
+; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
+; AVX2-NEXT: vpaddd %ymm8, %ymm3, %ymm3
+; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
+; AVX2-NEXT: vpaddd %ymm8, %ymm4, %ymm4
+; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
+; AVX2-NEXT: vpaddd %ymm8, %ymm5, %ymm5
+; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
+; AVX2-NEXT: vpaddd %ymm8, %ymm6, %ymm6
+; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
+; AVX2-NEXT: vpaddd %ymm8, %ymm7, %ymm7
+; AVX2-NEXT: vpcmpeqd %ymm8, %ymm8, %ymm8
+; AVX2-NEXT: vpsubd %ymm8, %ymm0, %ymm9
+; AVX2-NEXT: vpsubd %ymm8, %ymm1, %ymm10
+; AVX2-NEXT: vpsubd %ymm8, %ymm2, %ymm2
+; AVX2-NEXT: vpsubd %ymm8, %ymm3, %ymm3
+; AVX2-NEXT: vpsubd %ymm8, %ymm4, %ymm4
+; AVX2-NEXT: vpsubd %ymm8, %ymm5, %ymm5
+; AVX2-NEXT: vpsubd %ymm8, %ymm6, %ymm1
+; AVX2-NEXT: vpsubd %ymm8, %ymm7, %ymm0
+; AVX2-NEXT: vpsrld $1, %ymm0, %ymm11
+; AVX2-NEXT: vpsrld $1, %ymm1, %ymm12
+; AVX2-NEXT: vpsrld $1, %ymm5, %ymm5
; AVX2-NEXT: vpsrld $1, %ymm4, %ymm4
-; AVX2-NEXT: vpsrld $1, %ymm2, %ymm2
-; AVX2-NEXT: vpshufb %ymm3, %ymm2, %ymm2
-; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
-; AVX2-NEXT: vpshufb %xmm5, %xmm2, %xmm2
-; AVX2-NEXT: vpshufb %ymm3, %ymm4, %ymm4
-; AVX2-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,2,2,3]
-; AVX2-NEXT: vpshufb %xmm5, %xmm4, %xmm4
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm4[0],xmm2[0]
-; AVX2-NEXT: vpsrld $1, %ymm1, %ymm1
-; AVX2-NEXT: vpsrld $1, %ymm13, %ymm4
-; AVX2-NEXT: vpshufb %ymm3, %ymm4, %ymm4
-; AVX2-NEXT: vpshufb %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vpermq {{.*#+}} ymm3 = ymm4[0,2,2,3]
-; AVX2-NEXT: vpshufb %xmm5, %xmm3, %xmm3
+; AVX2-NEXT: vpsrld $1, %ymm3, %ymm6
+; AVX2-NEXT: vpsrld $1, %ymm2, %ymm7
+; AVX2-NEXT: vpsrld $1, %ymm10, %ymm8
+; AVX2-NEXT: vpsrld $1, %ymm9, %ymm3
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-NEXT: vpshufb %ymm2, %ymm3, %ymm3
+; AVX2-NEXT: vpermq {{.*#+}} ymm9 = ymm3[0,2,2,3]
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm3, %xmm9, %xmm0
+; AVX2-NEXT: vpshufb %ymm2, %ymm8, %ymm8
+; AVX2-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,2,2,3]
+; AVX2-NEXT: vpshufb %xmm3, %xmm8, %xmm1
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX2-NEXT: vpshufb %ymm2, %ymm7, %ymm1
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
-; AVX2-NEXT: vpshufb %xmm5, %xmm1, %xmm1
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
-; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
+; AVX2-NEXT: vpshufb %xmm3, %xmm1, %xmm1
+; AVX2-NEXT: vpshufb %ymm2, %ymm6, %ymm6
+; AVX2-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,2,2,3]
+; AVX2-NEXT: vpshufb %xmm3, %xmm6, %xmm6
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm6[0],xmm1[0]
+; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX2-NEXT: vpshufb %ymm2, %ymm4, %ymm1
+; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-NEXT: vpshufb %xmm3, %xmm1, %xmm1
+; AVX2-NEXT: vpshufb %ymm2, %ymm5, %ymm4
+; AVX2-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,2,2,3]
+; AVX2-NEXT: vpshufb %xmm3, %xmm4, %xmm4
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm4[0],xmm1[0]
+; AVX2-NEXT: vpshufb %ymm2, %ymm12, %ymm4
+; AVX2-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,2,2,3]
+; AVX2-NEXT: vpshufb %xmm3, %xmm4, %xmm4
+; AVX2-NEXT: vpshufb %ymm2, %ymm11, %ymm2
+; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
+; AVX2-NEXT: vpshufb %xmm3, %xmm2, %xmm2
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm4[0]
+; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
; AVX2-NEXT: vmovdqu %ymm1, (%rax)
; AVX2-NEXT: vmovdqu %ymm0, (%rax)
; AVX2-NEXT: vzeroupper
@@ -676,18 +681,18 @@ define void @avg_v64i8(<64 x i8>* %a, <6
; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm3 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
-; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm5 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
-; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm6 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
-; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
-; AVX512F-NEXT: vpbroadcastd {{.*}}(%rip), %zmm8
-; AVX512F-NEXT: vpaddd %zmm8, %zmm4, %zmm4
; AVX512F-NEXT: vpaddd %zmm4, %zmm0, %zmm0
-; AVX512F-NEXT: vpaddd %zmm8, %zmm5, %zmm4
+; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
; AVX512F-NEXT: vpaddd %zmm4, %zmm1, %zmm1
-; AVX512F-NEXT: vpaddd %zmm8, %zmm6, %zmm4
+; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
; AVX512F-NEXT: vpaddd %zmm4, %zmm2, %zmm2
-; AVX512F-NEXT: vpaddd %zmm8, %zmm7, %zmm4
+; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
; AVX512F-NEXT: vpaddd %zmm4, %zmm3, %zmm3
+; AVX512F-NEXT: vpternlogd $255, %zmm4, %zmm4, %zmm4
+; AVX512F-NEXT: vpsubd %zmm4, %zmm0, %zmm0
+; AVX512F-NEXT: vpsubd %zmm4, %zmm1, %zmm1
+; AVX512F-NEXT: vpsubd %zmm4, %zmm2, %zmm2
+; AVX512F-NEXT: vpsubd %zmm4, %zmm3, %zmm3
; AVX512F-NEXT: vpsrld $1, %zmm3, %zmm3
; AVX512F-NEXT: vpsrld $1, %zmm2, %zmm2
; AVX512F-NEXT: vpsrld $1, %zmm1, %zmm1
@@ -779,32 +784,32 @@ define void @avg_v8i16(<8 x i16>* %a, <8
define void @avg_v16i16(<16 x i16>* %a, <16 x i16>* %b) {
; SSE2-LABEL: avg_v16i16:
; SSE2: # BB#0:
-; SSE2-NEXT: movdqa (%rdi), %xmm5
+; SSE2-NEXT: movdqa (%rdi), %xmm2
; SSE2-NEXT: movdqa 16(%rdi), %xmm4
; SSE2-NEXT: movdqa (%rsi), %xmm0
; SSE2-NEXT: movdqa 16(%rsi), %xmm1
-; SSE2-NEXT: pxor %xmm6, %xmm6
-; SSE2-NEXT: movdqa %xmm5, %xmm7
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3]
-; SSE2-NEXT: movdqa %xmm4, %xmm8
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm6[4],xmm8[5],xmm6[5],xmm8[6],xmm6[6],xmm8[7],xmm6[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3]
+; SSE2-NEXT: pxor %xmm5, %xmm5
+; SSE2-NEXT: movdqa %xmm2, %xmm6
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3]
+; SSE2-NEXT: movdqa %xmm4, %xmm7
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm5[4],xmm7[5],xmm5[5],xmm7[6],xmm5[6],xmm7[7],xmm5[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm6[4],xmm3[5],xmm6[5],xmm3[6],xmm6[6],xmm3[7],xmm6[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3]
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm6[4],xmm2[5],xmm6[5],xmm2[6],xmm6[6],xmm2[7],xmm6[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3]
-; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [1,1,1,1]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
; SSE2-NEXT: paddd %xmm6, %xmm3
-; SSE2-NEXT: paddd %xmm7, %xmm3
-; SSE2-NEXT: paddd %xmm6, %xmm0
-; SSE2-NEXT: paddd %xmm5, %xmm0
-; SSE2-NEXT: paddd %xmm6, %xmm2
-; SSE2-NEXT: paddd %xmm8, %xmm2
-; SSE2-NEXT: paddd %xmm6, %xmm1
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3]
+; SSE2-NEXT: paddd %xmm2, %xmm0
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7]
+; SSE2-NEXT: paddd %xmm7, %xmm2
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3]
; SSE2-NEXT: paddd %xmm4, %xmm1
+; SSE2-NEXT: pcmpeqd %xmm4, %xmm4
+; SSE2-NEXT: psubd %xmm4, %xmm3
+; SSE2-NEXT: psubd %xmm4, %xmm0
+; SSE2-NEXT: psubd %xmm4, %xmm2
+; SSE2-NEXT: psubd %xmm4, %xmm1
; SSE2-NEXT: psrld $1, %xmm1
; SSE2-NEXT: psrld $1, %xmm2
; SSE2-NEXT: psrld $1, %xmm0
@@ -828,20 +833,20 @@ define void @avg_v16i16(<16 x i16>* %a,
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
-; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm8 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
-; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm5 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
-; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm6 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
-; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm7 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1]
-; AVX1-NEXT: vpaddd %xmm3, %xmm4, %xmm4
; AVX1-NEXT: vpaddd %xmm4, %xmm0, %xmm0
-; AVX1-NEXT: vpaddd %xmm3, %xmm5, %xmm4
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX1-NEXT: vpaddd %xmm4, %xmm1, %xmm1
-; AVX1-NEXT: vpaddd %xmm3, %xmm6, %xmm4
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX1-NEXT: vpaddd %xmm4, %xmm2, %xmm2
-; AVX1-NEXT: vpaddd %xmm3, %xmm7, %xmm3
-; AVX1-NEXT: vpaddd %xmm3, %xmm8, %xmm3
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
+; AVX1-NEXT: vpaddd %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
+; AVX1-NEXT: vpsubd %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vpsubd %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vpsubd %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpsubd %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vpsrld $1, %xmm3, %xmm3
; AVX1-NEXT: vpsrld $1, %xmm2, %xmm2
; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1
@@ -888,80 +893,79 @@ define void @avg_v16i16(<16 x i16>* %a,
define void @avg_v32i16(<32 x i16>* %a, <32 x i16>* %b) {
; SSE2-LABEL: avg_v32i16:
; SSE2: # BB#0:
-; SSE2-NEXT: movdqa (%rdi), %xmm11
-; SSE2-NEXT: movdqa 16(%rdi), %xmm10
-; SSE2-NEXT: movdqa 32(%rdi), %xmm9
-; SSE2-NEXT: movdqa 48(%rdi), %xmm4
-; SSE2-NEXT: movdqa (%rsi), %xmm8
+; SSE2-NEXT: movdqa (%rdi), %xmm4
+; SSE2-NEXT: movdqa 16(%rdi), %xmm11
+; SSE2-NEXT: movdqa 32(%rdi), %xmm10
+; SSE2-NEXT: movdqa 48(%rdi), %xmm8
+; SSE2-NEXT: movdqa (%rsi), %xmm9
; SSE2-NEXT: movdqa 16(%rsi), %xmm1
; SSE2-NEXT: movdqa 32(%rsi), %xmm2
; SSE2-NEXT: movdqa 48(%rsi), %xmm3
; SSE2-NEXT: pxor %xmm0, %xmm0
-; SSE2-NEXT: movdqa %xmm11, %xmm15
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm15 = xmm15[4],xmm0[4],xmm15[5],xmm0[5],xmm15[6],xmm0[6],xmm15[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm4, %xmm6
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
+; SSE2-NEXT: movdqa %xmm11, %xmm5
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm0[0],xmm11[1],xmm0[1],xmm11[2],xmm0[2],xmm11[3],xmm0[3]
-; SSE2-NEXT: movdqa %xmm10, %xmm14
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm14 = xmm14[4],xmm0[4],xmm14[5],xmm0[5],xmm14[6],xmm0[6],xmm14[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm10, %xmm12
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm0[4],xmm12[5],xmm0[5],xmm12[6],xmm0[6],xmm12[7],xmm0[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm0[0],xmm10[1],xmm0[1],xmm10[2],xmm0[2],xmm10[3],xmm0[3]
-; SSE2-NEXT: movdqa %xmm9, %xmm13
+; SSE2-NEXT: movdqa %xmm8, %xmm13
; SSE2-NEXT: punpckhwd {{.*#+}} xmm13 = xmm13[4],xmm0[4],xmm13[5],xmm0[5],xmm13[6],xmm0[6],xmm13[7],xmm0[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm0[0],xmm9[1],xmm0[1],xmm9[2],xmm0[2],xmm9[3],xmm0[3]
-; SSE2-NEXT: movdqa %xmm4, %xmm12
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm0[4],xmm12[5],xmm0[5],xmm12[6],xmm0[6],xmm12[7],xmm0[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
-; SSE2-NEXT: movdqa %xmm4, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm8, %xmm7
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm0[0],xmm8[1],xmm0[1],xmm8[2],xmm0[2],xmm8[3],xmm0[3]
+; SSE2-NEXT: movdqa %xmm9, %xmm7
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7]
+; SSE2-NEXT: paddd %xmm6, %xmm7
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm0[0],xmm9[1],xmm0[1],xmm9[2],xmm0[2],xmm9[3],xmm0[3]
+; SSE2-NEXT: paddd %xmm4, %xmm9
; SSE2-NEXT: movdqa %xmm1, %xmm6
; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7]
+; SSE2-NEXT: paddd %xmm5, %xmm6
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; SSE2-NEXT: paddd %xmm11, %xmm1
; SSE2-NEXT: movdqa %xmm2, %xmm5
; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
+; SSE2-NEXT: paddd %xmm12, %xmm5
; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; SSE2-NEXT: paddd %xmm10, %xmm2
; SSE2-NEXT: movdqa %xmm3, %xmm4
; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
+; SSE2-NEXT: paddd %xmm13, %xmm4
; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [1,1,1,1]
-; SSE2-NEXT: paddd %xmm0, %xmm7
-; SSE2-NEXT: paddd %xmm15, %xmm7
-; SSE2-NEXT: paddd %xmm0, %xmm8
-; SSE2-NEXT: paddd %xmm11, %xmm8
-; SSE2-NEXT: paddd %xmm0, %xmm6
-; SSE2-NEXT: paddd %xmm14, %xmm6
-; SSE2-NEXT: paddd %xmm0, %xmm1
-; SSE2-NEXT: paddd %xmm10, %xmm1
-; SSE2-NEXT: paddd %xmm0, %xmm5
-; SSE2-NEXT: paddd %xmm13, %xmm5
-; SSE2-NEXT: paddd %xmm0, %xmm2
-; SSE2-NEXT: paddd %xmm9, %xmm2
-; SSE2-NEXT: paddd %xmm0, %xmm4
-; SSE2-NEXT: paddd %xmm12, %xmm4
-; SSE2-NEXT: paddd %xmm0, %xmm3
-; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm3 # 16-byte Folded Reload
-; SSE2-NEXT: psrld $1, %xmm8
+; SSE2-NEXT: paddd %xmm8, %xmm3
+; SSE2-NEXT: pcmpeqd %xmm0, %xmm0
+; SSE2-NEXT: psubd %xmm0, %xmm7
+; SSE2-NEXT: psubd %xmm0, %xmm9
+; SSE2-NEXT: psubd %xmm0, %xmm6
+; SSE2-NEXT: psubd %xmm0, %xmm1
+; SSE2-NEXT: psubd %xmm0, %xmm5
+; SSE2-NEXT: psubd %xmm0, %xmm2
+; SSE2-NEXT: psubd %xmm0, %xmm4
+; SSE2-NEXT: psubd %xmm0, %xmm3
+; SSE2-NEXT: psrld $1, %xmm3
+; SSE2-NEXT: psrld $1, %xmm4
+; SSE2-NEXT: psrld $1, %xmm2
+; SSE2-NEXT: psrld $1, %xmm5
+; SSE2-NEXT: psrld $1, %xmm1
+; SSE2-NEXT: psrld $1, %xmm6
+; SSE2-NEXT: psrld $1, %xmm9
; SSE2-NEXT: psrld $1, %xmm7
; SSE2-NEXT: pslld $16, %xmm7
; SSE2-NEXT: psrad $16, %xmm7
-; SSE2-NEXT: pslld $16, %xmm8
-; SSE2-NEXT: psrad $16, %xmm8
-; SSE2-NEXT: packssdw %xmm7, %xmm8
-; SSE2-NEXT: psrld $1, %xmm1
-; SSE2-NEXT: psrld $1, %xmm6
+; SSE2-NEXT: pslld $16, %xmm9
+; SSE2-NEXT: psrad $16, %xmm9
+; SSE2-NEXT: packssdw %xmm7, %xmm9
; SSE2-NEXT: pslld $16, %xmm6
; SSE2-NEXT: psrad $16, %xmm6
; SSE2-NEXT: pslld $16, %xmm1
; SSE2-NEXT: psrad $16, %xmm1
; SSE2-NEXT: packssdw %xmm6, %xmm1
-; SSE2-NEXT: psrld $1, %xmm2
-; SSE2-NEXT: psrld $1, %xmm5
; SSE2-NEXT: pslld $16, %xmm5
; SSE2-NEXT: psrad $16, %xmm5
; SSE2-NEXT: pslld $16, %xmm2
; SSE2-NEXT: psrad $16, %xmm2
; SSE2-NEXT: packssdw %xmm5, %xmm2
-; SSE2-NEXT: psrld $1, %xmm3
-; SSE2-NEXT: psrld $1, %xmm4
; SSE2-NEXT: pslld $16, %xmm4
; SSE2-NEXT: psrad $16, %xmm4
; SSE2-NEXT: pslld $16, %xmm3
@@ -970,7 +974,7 @@ define void @avg_v32i16(<32 x i16>* %a,
; SSE2-NEXT: movdqu %xmm3, (%rax)
; SSE2-NEXT: movdqu %xmm2, (%rax)
; SSE2-NEXT: movdqu %xmm1, (%rax)
-; SSE2-NEXT: movdqu %xmm8, (%rax)
+; SSE2-NEXT: movdqu %xmm9, (%rax)
; SSE2-NEXT: retq
;
; AVX1-LABEL: avg_v32i16:
@@ -981,58 +985,58 @@ define void @avg_v32i16(<32 x i16>* %a,
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm5 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm6 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm8 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm7 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [1,1,1,1]
-; AVX1-NEXT: vpaddd %xmm6, %xmm7, %xmm7
-; AVX1-NEXT: vpaddd %xmm7, %xmm0, %xmm0
+; AVX1-NEXT: vpaddd %xmm7, %xmm0, %xmm9
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm7 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
-; AVX1-NEXT: vpaddd %xmm6, %xmm7, %xmm7
; AVX1-NEXT: vpaddd %xmm7, %xmm1, %xmm1
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm7 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
-; AVX1-NEXT: vpaddd %xmm6, %xmm7, %xmm7
; AVX1-NEXT: vpaddd %xmm7, %xmm2, %xmm2
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm7 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
-; AVX1-NEXT: vpaddd %xmm6, %xmm7, %xmm7
; AVX1-NEXT: vpaddd %xmm7, %xmm3, %xmm3
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm7 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
-; AVX1-NEXT: vpaddd %xmm6, %xmm7, %xmm7
-; AVX1-NEXT: vpaddd %xmm7, %xmm4, %xmm9
+; AVX1-NEXT: vpaddd %xmm7, %xmm4, %xmm4
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm7 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
-; AVX1-NEXT: vpaddd %xmm6, %xmm7, %xmm7
; AVX1-NEXT: vpaddd %xmm7, %xmm5, %xmm5
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm7 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
-; AVX1-NEXT: vpaddd %xmm6, %xmm7, %xmm7
+; AVX1-NEXT: vpaddd %xmm7, %xmm6, %xmm6
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm7 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX1-NEXT: vpaddd %xmm7, %xmm8, %xmm7
-; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
-; AVX1-NEXT: vpaddd %xmm6, %xmm4, %xmm4
-; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm6 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
-; AVX1-NEXT: vpaddd %xmm4, %xmm6, %xmm4
-; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1
-; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
-; AVX1-NEXT: vpxor %xmm6, %xmm6, %xmm6
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm6[1],xmm0[2],xmm6[3],xmm0[4],xmm6[5],xmm0[6],xmm6[7]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm6[1],xmm1[2],xmm6[3],xmm1[4],xmm6[5],xmm1[6],xmm6[7]
-; AVX1-NEXT: vpackusdw %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vpsrld $1, %xmm3, %xmm1
+; AVX1-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX1-NEXT: vpsubd %xmm0, %xmm9, %xmm8
+; AVX1-NEXT: vpsubd %xmm0, %xmm1, %xmm1
+; AVX1-NEXT: vpsubd %xmm0, %xmm2, %xmm2
+; AVX1-NEXT: vpsubd %xmm0, %xmm3, %xmm3
+; AVX1-NEXT: vpsubd %xmm0, %xmm4, %xmm4
+; AVX1-NEXT: vpsubd %xmm0, %xmm5, %xmm5
+; AVX1-NEXT: vpsubd %xmm0, %xmm6, %xmm6
+; AVX1-NEXT: vpsubd %xmm0, %xmm7, %xmm0
+; AVX1-NEXT: vpsrld $1, %xmm0, %xmm9
+; AVX1-NEXT: vpsrld $1, %xmm6, %xmm6
+; AVX1-NEXT: vpsrld $1, %xmm5, %xmm5
+; AVX1-NEXT: vpsrld $1, %xmm4, %xmm4
+; AVX1-NEXT: vpsrld $1, %xmm3, %xmm3
; AVX1-NEXT: vpsrld $1, %xmm2, %xmm2
-; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm6[1],xmm2[2],xmm6[3],xmm2[4],xmm6[5],xmm2[6],xmm6[7]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm6[1],xmm1[2],xmm6[3],xmm1[4],xmm6[5],xmm1[6],xmm6[7]
-; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX1-NEXT: vpsrld $1, %xmm5, %xmm1
-; AVX1-NEXT: vpsrld $1, %xmm9, %xmm2
-; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm6[1],xmm2[2],xmm6[3],xmm2[4],xmm6[5],xmm2[6],xmm6[7]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm6[1],xmm1[2],xmm6[3],xmm1[4],xmm6[5],xmm1[6],xmm6[7]
-; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpsrld $1, %xmm4, %xmm2
-; AVX1-NEXT: vpsrld $1, %xmm7, %xmm3
-; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm6[1],xmm3[2],xmm6[3],xmm3[4],xmm6[5],xmm3[6],xmm6[7]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm6[1],xmm2[2],xmm6[3],xmm2[4],xmm6[5],xmm2[6],xmm6[7]
-; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsrld $1, %xmm8, %xmm7
+; AVX1-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; AVX1-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0],xmm0[1],xmm7[2],xmm0[3],xmm7[4],xmm0[5],xmm7[6],xmm0[7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm0[1],xmm1[2],xmm0[3],xmm1[4],xmm0[5],xmm1[6],xmm0[7]
+; AVX1-NEXT: vpackusdw %xmm7, %xmm1, %xmm1
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm0[1],xmm2[2],xmm0[3],xmm2[4],xmm0[5],xmm2[6],xmm0[7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm0[1],xmm3[2],xmm0[3],xmm3[4],xmm0[5],xmm3[6],xmm0[7]
+; AVX1-NEXT: vpackusdw %xmm2, %xmm3, %xmm2
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
-; AVX1-NEXT: vmovups %ymm1, (%rax)
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm4[0],xmm0[1],xmm4[2],xmm0[3],xmm4[4],xmm0[5],xmm4[6],xmm0[7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm5[0],xmm0[1],xmm5[2],xmm0[3],xmm5[4],xmm0[5],xmm5[6],xmm0[7]
+; AVX1-NEXT: vpackusdw %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm6[0],xmm0[1],xmm6[2],xmm0[3],xmm6[4],xmm0[5],xmm6[6],xmm0[7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm9[0],xmm0[1],xmm9[2],xmm0[3],xmm9[4],xmm0[5],xmm9[6],xmm0[7]
+; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: vmovups %ymm0, (%rax)
+; AVX1-NEXT: vmovups %ymm1, (%rax)
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -1043,18 +1047,18 @@ define void @avg_v32i16(<32 x i16>* %a,
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm5 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm6 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm7 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
-; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm8
-; AVX2-NEXT: vpaddd %ymm8, %ymm4, %ymm4
; AVX2-NEXT: vpaddd %ymm4, %ymm0, %ymm0
-; AVX2-NEXT: vpaddd %ymm8, %ymm5, %ymm4
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX2-NEXT: vpaddd %ymm4, %ymm1, %ymm1
-; AVX2-NEXT: vpaddd %ymm8, %ymm6, %ymm4
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX2-NEXT: vpaddd %ymm4, %ymm2, %ymm2
-; AVX2-NEXT: vpaddd %ymm8, %ymm7, %ymm4
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX2-NEXT: vpaddd %ymm4, %ymm3, %ymm3
+; AVX2-NEXT: vpcmpeqd %ymm4, %ymm4, %ymm4
+; AVX2-NEXT: vpsubd %ymm4, %ymm0, %ymm0
+; AVX2-NEXT: vpsubd %ymm4, %ymm1, %ymm1
+; AVX2-NEXT: vpsubd %ymm4, %ymm2, %ymm2
+; AVX2-NEXT: vpsubd %ymm4, %ymm3, %ymm3
; AVX2-NEXT: vpsrld $1, %ymm3, %ymm3
; AVX2-NEXT: vpsrld $1, %ymm2, %ymm2
; AVX2-NEXT: vpsrld $1, %ymm1, %ymm1
@@ -1080,12 +1084,12 @@ define void @avg_v32i16(<32 x i16>* %a,
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
-; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
-; AVX512F-NEXT: vpbroadcastd {{.*}}(%rip), %zmm4
-; AVX512F-NEXT: vpaddd %zmm4, %zmm2, %zmm2
; AVX512F-NEXT: vpaddd %zmm2, %zmm0, %zmm0
-; AVX512F-NEXT: vpaddd %zmm4, %zmm3, %zmm2
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
; AVX512F-NEXT: vpaddd %zmm2, %zmm1, %zmm1
+; AVX512F-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2
+; AVX512F-NEXT: vpsubd %zmm2, %zmm0, %zmm0
+; AVX512F-NEXT: vpsubd %zmm2, %zmm1, %zmm1
; AVX512F-NEXT: vpsrld $1, %zmm1, %zmm1
; AVX512F-NEXT: vpsrld $1, %zmm0, %zmm0
; AVX512F-NEXT: vpmovdw %zmm0, (%rax)
@@ -1197,90 +1201,89 @@ define void @avg_v16i8_2(<16 x i8>* %a,
define void @avg_v32i8_2(<32 x i8>* %a, <32 x i8>* %b) {
; SSE2-LABEL: avg_v32i8_2:
; SSE2: # BB#0:
-; SSE2-NEXT: movdqa (%rdi), %xmm9
-; SSE2-NEXT: movdqa 16(%rdi), %xmm12
-; SSE2-NEXT: movdqa (%rsi), %xmm4
+; SSE2-NEXT: movdqa (%rdi), %xmm3
+; SSE2-NEXT: movdqa 16(%rdi), %xmm8
+; SSE2-NEXT: movdqa (%rsi), %xmm0
; SSE2-NEXT: movdqa 16(%rsi), %xmm1
-; SSE2-NEXT: pxor %xmm0, %xmm0
-; SSE2-NEXT: movdqa %xmm9, %xmm11
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm11 = xmm11[8],xmm0[8],xmm11[9],xmm0[9],xmm11[10],xmm0[10],xmm11[11],xmm0[11],xmm11[12],xmm0[12],xmm11[13],xmm0[13],xmm11[14],xmm0[14],xmm11[15],xmm0[15]
-; SSE2-NEXT: movdqa %xmm11, %xmm15
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm15 = xmm15[4],xmm0[4],xmm15[5],xmm0[5],xmm15[6],xmm0[6],xmm15[7],xmm0[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm0[0],xmm11[1],xmm0[1],xmm11[2],xmm0[2],xmm11[3],xmm0[3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm9 = xmm9[0],xmm0[0],xmm9[1],xmm0[1],xmm9[2],xmm0[2],xmm9[3],xmm0[3],xmm9[4],xmm0[4],xmm9[5],xmm0[5],xmm9[6],xmm0[6],xmm9[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm9, %xmm14
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm14 = xmm14[4],xmm0[4],xmm14[5],xmm0[5],xmm14[6],xmm0[6],xmm14[7],xmm0[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm0[0],xmm9[1],xmm0[1],xmm9[2],xmm0[2],xmm9[3],xmm0[3]
-; SSE2-NEXT: movdqa %xmm12, %xmm10
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm0[8],xmm10[9],xmm0[9],xmm10[10],xmm0[10],xmm10[11],xmm0[11],xmm10[12],xmm0[12],xmm10[13],xmm0[13],xmm10[14],xmm0[14],xmm10[15],xmm0[15]
-; SSE2-NEXT: movdqa %xmm10, %xmm13
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm13 = xmm13[4],xmm0[4],xmm13[5],xmm0[5],xmm13[6],xmm0[6],xmm13[7],xmm0[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm0[0],xmm10[1],xmm0[1],xmm10[2],xmm0[2],xmm10[3],xmm0[3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm12 = xmm12[0],xmm0[0],xmm12[1],xmm0[1],xmm12[2],xmm0[2],xmm12[3],xmm0[3],xmm12[4],xmm0[4],xmm12[5],xmm0[5],xmm12[6],xmm0[6],xmm12[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm12, %xmm2
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm12 = xmm12[0],xmm0[0],xmm12[1],xmm0[1],xmm12[2],xmm0[2],xmm12[3],xmm0[3]
-; SSE2-NEXT: movdqa %xmm4, %xmm3
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
-; SSE2-NEXT: movdqa %xmm3, %xmm7
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3],xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm4, %xmm5
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
-; SSE2-NEXT: movdqa %xmm2, %xmm8
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm0[4],xmm8[5],xmm0[5],xmm8[6],xmm0[6],xmm8[7],xmm0[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm1, %xmm6
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [1,1,1,1]
-; SSE2-NEXT: paddd %xmm0, %xmm7
-; SSE2-NEXT: paddd %xmm15, %xmm7
-; SSE2-NEXT: paddd %xmm0, %xmm3
-; SSE2-NEXT: paddd %xmm11, %xmm3
-; SSE2-NEXT: paddd %xmm0, %xmm5
-; SSE2-NEXT: paddd %xmm14, %xmm5
-; SSE2-NEXT: paddd %xmm0, %xmm4
-; SSE2-NEXT: paddd %xmm9, %xmm4
-; SSE2-NEXT: paddd %xmm0, %xmm8
-; SSE2-NEXT: paddd %xmm13, %xmm8
-; SSE2-NEXT: paddd %xmm0, %xmm2
-; SSE2-NEXT: paddd %xmm10, %xmm2
-; SSE2-NEXT: paddd %xmm0, %xmm6
-; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm6 # 16-byte Folded Reload
-; SSE2-NEXT: paddd %xmm0, %xmm1
-; SSE2-NEXT: paddd %xmm12, %xmm1
-; SSE2-NEXT: psrld $1, %xmm3
-; SSE2-NEXT: psrld $1, %xmm7
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
-; SSE2-NEXT: pand %xmm0, %xmm7
-; SSE2-NEXT: pand %xmm0, %xmm3
-; SSE2-NEXT: packuswb %xmm7, %xmm3
-; SSE2-NEXT: psrld $1, %xmm4
-; SSE2-NEXT: psrld $1, %xmm5
-; SSE2-NEXT: pand %xmm0, %xmm5
-; SSE2-NEXT: pand %xmm0, %xmm4
-; SSE2-NEXT: packuswb %xmm5, %xmm4
-; SSE2-NEXT: packuswb %xmm3, %xmm4
-; SSE2-NEXT: psrld $1, %xmm2
-; SSE2-NEXT: psrld $1, %xmm8
-; SSE2-NEXT: pand %xmm0, %xmm8
-; SSE2-NEXT: pand %xmm0, %xmm2
-; SSE2-NEXT: packuswb %xmm8, %xmm2
+; SSE2-NEXT: pxor %xmm4, %xmm4
+; SSE2-NEXT: movdqa %xmm3, %xmm5
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm4[8],xmm5[9],xmm4[9],xmm5[10],xmm4[10],xmm5[11],xmm4[11],xmm5[12],xmm4[12],xmm5[13],xmm4[13],xmm5[14],xmm4[14],xmm5[15],xmm4[15]
+; SSE2-NEXT: movdqa %xmm5, %xmm6
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
+; SSE2-NEXT: movdqa %xmm3, %xmm12
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm4[4],xmm12[5],xmm4[5],xmm12[6],xmm4[6],xmm12[7],xmm4[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; SSE2-NEXT: movdqa %xmm8, %xmm7
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm4[8],xmm7[9],xmm4[9],xmm7[10],xmm4[10],xmm7[11],xmm4[11],xmm7[12],xmm4[12],xmm7[13],xmm4[13],xmm7[14],xmm4[14],xmm7[15],xmm4[15]
+; SSE2-NEXT: movdqa %xmm7, %xmm11
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm4[4],xmm11[5],xmm4[5],xmm11[6],xmm4[6],xmm11[7],xmm4[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm4[0],xmm7[1],xmm4[1],xmm7[2],xmm4[2],xmm7[3],xmm4[3]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm4[0],xmm8[1],xmm4[1],xmm8[2],xmm4[2],xmm8[3],xmm4[3],xmm8[4],xmm4[4],xmm8[5],xmm4[5],xmm8[6],xmm4[6],xmm8[7],xmm4[7]
+; SSE2-NEXT: movdqa %xmm8, %xmm10
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm4[4],xmm10[5],xmm4[5],xmm10[6],xmm4[6],xmm10[7],xmm4[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm4[0],xmm8[1],xmm4[1],xmm8[2],xmm4[2],xmm8[3],xmm4[3]
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm4[8],xmm2[9],xmm4[9],xmm2[10],xmm4[10],xmm2[11],xmm4[11],xmm2[12],xmm4[12],xmm2[13],xmm4[13],xmm2[14],xmm4[14],xmm2[15],xmm4[15]
+; SSE2-NEXT: movdqa %xmm2, %xmm9
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm4[4],xmm9[5],xmm4[5],xmm9[6],xmm4[6],xmm9[7],xmm4[7]
+; SSE2-NEXT: paddd %xmm6, %xmm9
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
+; SSE2-NEXT: paddd %xmm5, %xmm2
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; SSE2-NEXT: movdqa %xmm0, %xmm5
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
+; SSE2-NEXT: paddd %xmm12, %xmm5
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
+; SSE2-NEXT: paddd %xmm3, %xmm0
+; SSE2-NEXT: movdqa %xmm1, %xmm3
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15]
+; SSE2-NEXT: movdqa %xmm3, %xmm6
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7]
+; SSE2-NEXT: paddd %xmm11, %xmm6
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; SSE2-NEXT: paddd %xmm7, %xmm3
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
+; SSE2-NEXT: movdqa %xmm1, %xmm7
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm4[4],xmm7[5],xmm4[5],xmm7[6],xmm4[6],xmm7[7],xmm4[7]
+; SSE2-NEXT: paddd %xmm10, %xmm7
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
+; SSE2-NEXT: paddd %xmm8, %xmm1
+; SSE2-NEXT: pcmpeqd %xmm4, %xmm4
+; SSE2-NEXT: psubd %xmm4, %xmm9
+; SSE2-NEXT: psubd %xmm4, %xmm2
+; SSE2-NEXT: psubd %xmm4, %xmm5
+; SSE2-NEXT: psubd %xmm4, %xmm0
+; SSE2-NEXT: psubd %xmm4, %xmm6
+; SSE2-NEXT: psubd %xmm4, %xmm3
+; SSE2-NEXT: psubd %xmm4, %xmm7
+; SSE2-NEXT: psubd %xmm4, %xmm1
; SSE2-NEXT: psrld $1, %xmm1
+; SSE2-NEXT: psrld $1, %xmm7
+; SSE2-NEXT: psrld $1, %xmm3
; SSE2-NEXT: psrld $1, %xmm6
-; SSE2-NEXT: pand %xmm0, %xmm6
-; SSE2-NEXT: pand %xmm0, %xmm1
-; SSE2-NEXT: packuswb %xmm6, %xmm1
-; SSE2-NEXT: packuswb %xmm2, %xmm1
+; SSE2-NEXT: psrld $1, %xmm0
+; SSE2-NEXT: psrld $1, %xmm5
+; SSE2-NEXT: psrld $1, %xmm2
+; SSE2-NEXT: psrld $1, %xmm9
+; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
+; SSE2-NEXT: pand %xmm4, %xmm9
+; SSE2-NEXT: pand %xmm4, %xmm2
+; SSE2-NEXT: packuswb %xmm9, %xmm2
+; SSE2-NEXT: pand %xmm4, %xmm5
+; SSE2-NEXT: pand %xmm4, %xmm0
+; SSE2-NEXT: packuswb %xmm5, %xmm0
+; SSE2-NEXT: packuswb %xmm2, %xmm0
+; SSE2-NEXT: pand %xmm4, %xmm6
+; SSE2-NEXT: pand %xmm4, %xmm3
+; SSE2-NEXT: packuswb %xmm6, %xmm3
+; SSE2-NEXT: pand %xmm4, %xmm7
+; SSE2-NEXT: pand %xmm4, %xmm1
+; SSE2-NEXT: packuswb %xmm7, %xmm1
+; SSE2-NEXT: packuswb %xmm3, %xmm1
; SSE2-NEXT: movdqu %xmm1, (%rax)
-; SSE2-NEXT: movdqu %xmm4, (%rax)
+; SSE2-NEXT: movdqu %xmm0, (%rax)
; SSE2-NEXT: retq
;
; AVX1-LABEL: avg_v32i8_2:
@@ -1291,57 +1294,57 @@ define void @avg_v32i8_2(<32 x i8>* %a,
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm3 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm5 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm6 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [1,1,1,1]
-; AVX1-NEXT: vpaddd %xmm6, %xmm7, %xmm7
-; AVX1-NEXT: vpaddd %xmm7, %xmm0, %xmm0
+; AVX1-NEXT: vpaddd %xmm7, %xmm0, %xmm9
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; AVX1-NEXT: vpaddd %xmm6, %xmm7, %xmm7
; AVX1-NEXT: vpaddd %xmm7, %xmm1, %xmm1
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; AVX1-NEXT: vpaddd %xmm6, %xmm7, %xmm7
-; AVX1-NEXT: vpaddd %xmm7, %xmm2, %xmm9
+; AVX1-NEXT: vpaddd %xmm7, %xmm2, %xmm2
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; AVX1-NEXT: vpaddd %xmm6, %xmm7, %xmm7
; AVX1-NEXT: vpaddd %xmm7, %xmm3, %xmm3
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; AVX1-NEXT: vpaddd %xmm6, %xmm7, %xmm7
; AVX1-NEXT: vpaddd %xmm7, %xmm4, %xmm4
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; AVX1-NEXT: vpaddd %xmm6, %xmm7, %xmm7
; AVX1-NEXT: vpaddd %xmm7, %xmm5, %xmm5
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; AVX1-NEXT: vpaddd %xmm6, %xmm7, %xmm7
+; AVX1-NEXT: vpaddd %xmm7, %xmm6, %xmm6
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX1-NEXT: vpaddd %xmm7, %xmm8, %xmm7
-; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; AVX1-NEXT: vpaddd %xmm6, %xmm2, %xmm2
-; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm6 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; AVX1-NEXT: vpaddd %xmm2, %xmm6, %xmm2
-; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1
-; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
-; AVX1-NEXT: vpand %xmm6, %xmm0, %xmm0
-; AVX1-NEXT: vpand %xmm6, %xmm1, %xmm1
-; AVX1-NEXT: vpackuswb %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vpsrld $1, %xmm3, %xmm1
-; AVX1-NEXT: vpsrld $1, %xmm9, %xmm3
-; AVX1-NEXT: vpand %xmm6, %xmm3, %xmm3
-; AVX1-NEXT: vpand %xmm6, %xmm1, %xmm1
-; AVX1-NEXT: vpackuswb %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpackuswb %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vpsrld $1, %xmm5, %xmm1
-; AVX1-NEXT: vpsrld $1, %xmm4, %xmm3
-; AVX1-NEXT: vpand %xmm6, %xmm3, %xmm3
-; AVX1-NEXT: vpand %xmm6, %xmm1, %xmm1
-; AVX1-NEXT: vpackuswb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX1-NEXT: vpsubd %xmm0, %xmm9, %xmm8
+; AVX1-NEXT: vpsubd %xmm0, %xmm1, %xmm1
+; AVX1-NEXT: vpsubd %xmm0, %xmm2, %xmm2
+; AVX1-NEXT: vpsubd %xmm0, %xmm3, %xmm3
+; AVX1-NEXT: vpsubd %xmm0, %xmm4, %xmm4
+; AVX1-NEXT: vpsubd %xmm0, %xmm5, %xmm5
+; AVX1-NEXT: vpsubd %xmm0, %xmm6, %xmm6
+; AVX1-NEXT: vpsubd %xmm0, %xmm7, %xmm0
+; AVX1-NEXT: vpsrld $1, %xmm0, %xmm9
+; AVX1-NEXT: vpsrld $1, %xmm6, %xmm6
+; AVX1-NEXT: vpsrld $1, %xmm5, %xmm5
+; AVX1-NEXT: vpsrld $1, %xmm4, %xmm4
+; AVX1-NEXT: vpsrld $1, %xmm3, %xmm3
; AVX1-NEXT: vpsrld $1, %xmm2, %xmm2
-; AVX1-NEXT: vpsrld $1, %xmm7, %xmm3
-; AVX1-NEXT: vpand %xmm6, %xmm3, %xmm3
-; AVX1-NEXT: vpand %xmm6, %xmm2, %xmm2
-; AVX1-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsrld $1, %xmm8, %xmm7
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm0 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
+; AVX1-NEXT: vpand %xmm0, %xmm7, %xmm7
+; AVX1-NEXT: vpand %xmm0, %xmm1, %xmm1
+; AVX1-NEXT: vpackuswb %xmm7, %xmm1, %xmm1
+; AVX1-NEXT: vpand %xmm0, %xmm2, %xmm2
+; AVX1-NEXT: vpand %xmm0, %xmm3, %xmm3
+; AVX1-NEXT: vpackuswb %xmm2, %xmm3, %xmm2
; AVX1-NEXT: vpackuswb %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: vpand %xmm0, %xmm4, %xmm2
+; AVX1-NEXT: vpand %xmm0, %xmm5, %xmm3
+; AVX1-NEXT: vpackuswb %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpand %xmm0, %xmm6, %xmm3
+; AVX1-NEXT: vpand %xmm0, %xmm9, %xmm0
+; AVX1-NEXT: vpackuswb %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: vmovups %ymm0, (%rax)
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
@@ -1376,260 +1379,245 @@ define void @avg_v32i8_2(<32 x i8>* %a,
define void @avg_v64i8_2(<64 x i8>* %a, <64 x i8>* %b) {
; SSE2-LABEL: avg_v64i8_2:
; SSE2: # BB#0:
-; SSE2-NEXT: movdqa (%rsi), %xmm2
-; SSE2-NEXT: movdqa 16(%rsi), %xmm7
-; SSE2-NEXT: movdqa 32(%rsi), %xmm15
+; SSE2-NEXT: movdqa (%rsi), %xmm14
+; SSE2-NEXT: movdqa 16(%rsi), %xmm12
+; SSE2-NEXT: movdqa 32(%rsi), %xmm2
; SSE2-NEXT: movdqa 48(%rsi), %xmm1
; SSE2-NEXT: pxor %xmm0, %xmm0
-; SSE2-NEXT: movdqa %xmm2, %xmm6
+; SSE2-NEXT: movdqa %xmm14, %xmm7
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm0[8],xmm7[9],xmm0[9],xmm7[10],xmm0[10],xmm7[11],xmm0[11],xmm7[12],xmm0[12],xmm7[13],xmm0[13],xmm7[14],xmm0[14],xmm7[15],xmm0[15]
+; SSE2-NEXT: movdqa %xmm7, %xmm15
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm15 = xmm15[4],xmm0[4],xmm15[5],xmm0[5],xmm15[6],xmm0[6],xmm15[7],xmm0[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1],xmm7[2],xmm0[2],xmm7[3],xmm0[3]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm14 = xmm14[0],xmm0[0],xmm14[1],xmm0[1],xmm14[2],xmm0[2],xmm14[3],xmm0[3],xmm14[4],xmm0[4],xmm14[5],xmm0[5],xmm14[6],xmm0[6],xmm14[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm14, %xmm8
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm0[4],xmm8[5],xmm0[5],xmm8[6],xmm0[6],xmm8[7],xmm0[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm14 = xmm14[0],xmm0[0],xmm14[1],xmm0[1],xmm14[2],xmm0[2],xmm14[3],xmm0[3]
+; SSE2-NEXT: movdqa %xmm12, %xmm6
; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm0[8],xmm6[9],xmm0[9],xmm6[10],xmm0[10],xmm6[11],xmm0[11],xmm6[12],xmm0[12],xmm6[13],xmm0[13],xmm6[14],xmm0[14],xmm6[15],xmm0[15]
-; SSE2-NEXT: movdqa %xmm6, %xmm9
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm0[4],xmm9[5],xmm0[5],xmm9[6],xmm0[6],xmm9[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm6, %xmm13
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm13 = xmm13[4],xmm0[4],xmm13[5],xmm0[5],xmm13[6],xmm0[6],xmm13[7],xmm0[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm12 = xmm12[0],xmm0[0],xmm12[1],xmm0[1],xmm12[2],xmm0[2],xmm12[3],xmm0[3],xmm12[4],xmm0[4],xmm12[5],xmm0[5],xmm12[6],xmm0[6],xmm12[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm12, %xmm9
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm0[4],xmm9[5],xmm0[5],xmm9[6],xmm0[6],xmm9[7],xmm0[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm12 = xmm12[0],xmm0[0],xmm12[1],xmm0[1],xmm12[2],xmm0[2],xmm12[3],xmm0[3]
+; SSE2-NEXT: movdqa %xmm2, %xmm5
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15]
+; SSE2-NEXT: movdqa %xmm5, %xmm11
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm0[4],xmm11[5],xmm0[5],xmm11[6],xmm0[6],xmm11[7],xmm0[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
; SSE2-NEXT: movdqa %xmm2, %xmm10
; SSE2-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm0[4],xmm10[5],xmm0[5],xmm10[6],xmm0[6],xmm10[7],xmm0[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; SSE2-NEXT: movdqa %xmm7, %xmm3
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
-; SSE2-NEXT: movdqa %xmm3, %xmm12
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm0[4],xmm12[5],xmm0[5],xmm12[6],xmm0[6],xmm12[7],xmm0[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1],xmm7[2],xmm0[2],xmm7[3],xmm0[3],xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm7, %xmm13
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm13 = xmm13[4],xmm0[4],xmm13[5],xmm0[5],xmm13[6],xmm0[6],xmm13[7],xmm0[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1],xmm7[2],xmm0[2],xmm7[3],xmm0[3]
-; SSE2-NEXT: movdqa %xmm15, %xmm4
+; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15]
-; SSE2-NEXT: movdqa %xmm4, %xmm14
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm14 = xmm14[4],xmm0[4],xmm14[5],xmm0[5],xmm14[6],xmm0[6],xmm14[7],xmm0[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm0[0],xmm15[1],xmm0[1],xmm15[2],xmm0[2],xmm15[3],xmm0[3],xmm15[4],xmm0[4],xmm15[5],xmm0[5],xmm15[6],xmm0[6],xmm15[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm15, %xmm5
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm5, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm15 = xmm15[0],xmm0[0],xmm15[1],xmm0[1],xmm15[2],xmm0[2],xmm15[3],xmm0[3]
-; SSE2-NEXT: movdqa %xmm1, %xmm5
-; SSE2-NEXT: movdqa %xmm5, %xmm8
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8],xmm0[8],xmm8[9],xmm0[9],xmm8[10],xmm0[10],xmm8[11],xmm0[11],xmm8[12],xmm0[12],xmm8[13],xmm0[13],xmm8[14],xmm0[14],xmm8[15],xmm0[15]
-; SSE2-NEXT: movdqa %xmm8, %xmm1
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm0[0],xmm8[1],xmm0[1],xmm8[2],xmm0[2],xmm8[3],xmm0[3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3],xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm5, %xmm1
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3]
-; SSE2-NEXT: movdqa %xmm5, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [1,1,1,1]
-; SSE2-NEXT: movdqa %xmm9, %xmm0
-; SSE2-NEXT: paddd %xmm1, %xmm0
-; SSE2-NEXT: paddd %xmm9, %xmm0
-; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm6, %xmm9
-; SSE2-NEXT: paddd %xmm1, %xmm9
-; SSE2-NEXT: paddd %xmm6, %xmm9
-; SSE2-NEXT: movdqa %xmm10, %xmm11
-; SSE2-NEXT: paddd %xmm1, %xmm11
-; SSE2-NEXT: paddd %xmm10, %xmm11
-; SSE2-NEXT: movdqa %xmm2, %xmm10
-; SSE2-NEXT: paddd %xmm1, %xmm10
-; SSE2-NEXT: paddd %xmm2, %xmm10
-; SSE2-NEXT: movdqa %xmm12, %xmm0
-; SSE2-NEXT: paddd %xmm1, %xmm0
-; SSE2-NEXT: paddd %xmm12, %xmm0
-; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm3, %xmm6
-; SSE2-NEXT: paddd %xmm1, %xmm6
-; SSE2-NEXT: paddd %xmm3, %xmm6
-; SSE2-NEXT: movdqa %xmm13, %xmm12
-; SSE2-NEXT: paddd %xmm1, %xmm12
-; SSE2-NEXT: paddd %xmm13, %xmm12
-; SSE2-NEXT: movdqa %xmm7, %xmm5
-; SSE2-NEXT: paddd %xmm1, %xmm5
-; SSE2-NEXT: paddd %xmm7, %xmm5
-; SSE2-NEXT: movdqa %xmm14, %xmm13
-; SSE2-NEXT: paddd %xmm1, %xmm13
-; SSE2-NEXT: paddd %xmm14, %xmm13
; SSE2-NEXT: movdqa %xmm4, %xmm3
-; SSE2-NEXT: paddd %xmm1, %xmm3
-; SSE2-NEXT: paddd %xmm4, %xmm3
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
-; SSE2-NEXT: movdqa %xmm0, %xmm14
-; SSE2-NEXT: paddd %xmm1, %xmm14
-; SSE2-NEXT: paddd %xmm0, %xmm14
-; SSE2-NEXT: movdqa %xmm15, %xmm4
-; SSE2-NEXT: paddd %xmm1, %xmm4
-; SSE2-NEXT: paddd %xmm15, %xmm4
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
-; SSE2-NEXT: movdqa %xmm0, %xmm15
-; SSE2-NEXT: paddd %xmm1, %xmm15
-; SSE2-NEXT: paddd %xmm0, %xmm15
-; SSE2-NEXT: movdqa %xmm8, %xmm7
-; SSE2-NEXT: paddd %xmm1, %xmm7
-; SSE2-NEXT: paddd %xmm8, %xmm7
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
-; SSE2-NEXT: movdqa %xmm0, %xmm8
-; SSE2-NEXT: paddd %xmm1, %xmm8
-; SSE2-NEXT: paddd %xmm0, %xmm8
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
-; SSE2-NEXT: paddd %xmm0, %xmm1
-; SSE2-NEXT: paddd %xmm0, %xmm1
-; SSE2-NEXT: psrld $1, %xmm9
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
-; SSE2-NEXT: psrld $1, %xmm0
-; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: pand %xmm2, %xmm9
-; SSE2-NEXT: packuswb %xmm0, %xmm9
-; SSE2-NEXT: psrld $1, %xmm10
-; SSE2-NEXT: psrld $1, %xmm11
-; SSE2-NEXT: pand %xmm2, %xmm11
-; SSE2-NEXT: pand %xmm2, %xmm10
-; SSE2-NEXT: packuswb %xmm11, %xmm10
-; SSE2-NEXT: packuswb %xmm9, %xmm10
-; SSE2-NEXT: psrld $1, %xmm6
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
-; SSE2-NEXT: psrld $1, %xmm0
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: pand %xmm2, %xmm6
-; SSE2-NEXT: packuswb %xmm0, %xmm6
-; SSE2-NEXT: psrld $1, %xmm5
-; SSE2-NEXT: psrld $1, %xmm12
-; SSE2-NEXT: pand %xmm2, %xmm12
-; SSE2-NEXT: pand %xmm2, %xmm5
-; SSE2-NEXT: packuswb %xmm12, %xmm5
-; SSE2-NEXT: packuswb %xmm6, %xmm5
-; SSE2-NEXT: psrld $1, %xmm3
-; SSE2-NEXT: psrld $1, %xmm13
-; SSE2-NEXT: pand %xmm2, %xmm13
-; SSE2-NEXT: pand %xmm2, %xmm3
-; SSE2-NEXT: packuswb %xmm13, %xmm3
-; SSE2-NEXT: psrld $1, %xmm4
-; SSE2-NEXT: psrld $1, %xmm14
-; SSE2-NEXT: pand %xmm2, %xmm14
-; SSE2-NEXT: pand %xmm2, %xmm4
-; SSE2-NEXT: packuswb %xmm14, %xmm4
-; SSE2-NEXT: packuswb %xmm3, %xmm4
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm3, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm1, %xmm3
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; SSE2-NEXT: paddd %xmm1, %xmm1
+; SSE2-NEXT: paddd %xmm3, %xmm3
+; SSE2-NEXT: movdqa %xmm3, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: paddd %xmm4, %xmm4
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload
+; SSE2-NEXT: paddd %xmm3, %xmm3
+; SSE2-NEXT: paddd %xmm2, %xmm2
+; SSE2-NEXT: paddd %xmm10, %xmm10
+; SSE2-NEXT: paddd %xmm5, %xmm5
+; SSE2-NEXT: paddd %xmm11, %xmm11
+; SSE2-NEXT: paddd %xmm12, %xmm12
+; SSE2-NEXT: paddd %xmm9, %xmm9
+; SSE2-NEXT: paddd %xmm6, %xmm6
+; SSE2-NEXT: paddd %xmm13, %xmm13
+; SSE2-NEXT: paddd %xmm14, %xmm14
+; SSE2-NEXT: paddd %xmm8, %xmm8
+; SSE2-NEXT: paddd %xmm7, %xmm7
+; SSE2-NEXT: paddd %xmm15, %xmm15
+; SSE2-NEXT: pcmpeqd %xmm0, %xmm0
+; SSE2-NEXT: psubd %xmm0, %xmm15
+; SSE2-NEXT: psubd %xmm0, %xmm7
+; SSE2-NEXT: psubd %xmm0, %xmm8
+; SSE2-NEXT: psubd %xmm0, %xmm14
+; SSE2-NEXT: psubd %xmm0, %xmm13
+; SSE2-NEXT: psubd %xmm0, %xmm6
+; SSE2-NEXT: psubd %xmm0, %xmm9
+; SSE2-NEXT: psubd %xmm0, %xmm12
+; SSE2-NEXT: psubd %xmm0, %xmm11
+; SSE2-NEXT: psubd %xmm0, %xmm5
+; SSE2-NEXT: psubd %xmm0, %xmm10
+; SSE2-NEXT: psubd %xmm0, %xmm2
+; SSE2-NEXT: psubd %xmm0, %xmm3
+; SSE2-NEXT: movdqa %xmm3, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: psubd %xmm0, %xmm4
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload
+; SSE2-NEXT: psubd %xmm0, %xmm3
+; SSE2-NEXT: psubd %xmm0, %xmm1
; SSE2-NEXT: psrld $1, %xmm7
; SSE2-NEXT: psrld $1, %xmm15
-; SSE2-NEXT: pand %xmm2, %xmm15
-; SSE2-NEXT: pand %xmm2, %xmm7
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
+; SSE2-NEXT: pand %xmm0, %xmm15
+; SSE2-NEXT: pand %xmm0, %xmm7
; SSE2-NEXT: packuswb %xmm15, %xmm7
-; SSE2-NEXT: psrld $1, %xmm1
+; SSE2-NEXT: psrld $1, %xmm14
; SSE2-NEXT: psrld $1, %xmm8
-; SSE2-NEXT: pand %xmm2, %xmm8
-; SSE2-NEXT: pand %xmm2, %xmm1
-; SSE2-NEXT: packuswb %xmm8, %xmm1
-; SSE2-NEXT: packuswb %xmm7, %xmm1
+; SSE2-NEXT: pand %xmm0, %xmm8
+; SSE2-NEXT: pand %xmm0, %xmm14
+; SSE2-NEXT: packuswb %xmm8, %xmm14
+; SSE2-NEXT: packuswb %xmm7, %xmm14
+; SSE2-NEXT: psrld $1, %xmm6
+; SSE2-NEXT: psrld $1, %xmm13
+; SSE2-NEXT: pand %xmm0, %xmm13
+; SSE2-NEXT: pand %xmm0, %xmm6
+; SSE2-NEXT: packuswb %xmm13, %xmm6
+; SSE2-NEXT: psrld $1, %xmm12
+; SSE2-NEXT: psrld $1, %xmm9
+; SSE2-NEXT: pand %xmm0, %xmm9
+; SSE2-NEXT: pand %xmm0, %xmm12
+; SSE2-NEXT: packuswb %xmm9, %xmm12
+; SSE2-NEXT: packuswb %xmm6, %xmm12
+; SSE2-NEXT: psrld $1, %xmm5
+; SSE2-NEXT: psrld $1, %xmm11
+; SSE2-NEXT: pand %xmm0, %xmm11
+; SSE2-NEXT: pand %xmm0, %xmm5
+; SSE2-NEXT: packuswb %xmm11, %xmm5
+; SSE2-NEXT: psrld $1, %xmm2
+; SSE2-NEXT: psrld $1, %xmm10
+; SSE2-NEXT: pand %xmm0, %xmm10
+; SSE2-NEXT: pand %xmm0, %xmm2
+; SSE2-NEXT: packuswb %xmm10, %xmm2
+; SSE2-NEXT: packuswb %xmm5, %xmm2
+; SSE2-NEXT: psrld $1, %xmm4
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm5 # 16-byte Reload
+; SSE2-NEXT: psrld $1, %xmm5
+; SSE2-NEXT: pand %xmm0, %xmm5
+; SSE2-NEXT: pand %xmm0, %xmm4
+; SSE2-NEXT: packuswb %xmm5, %xmm4
+; SSE2-NEXT: psrld $1, %xmm1
+; SSE2-NEXT: movdqa %xmm3, %xmm5
+; SSE2-NEXT: psrld $1, %xmm5
+; SSE2-NEXT: pand %xmm0, %xmm5
+; SSE2-NEXT: pand %xmm0, %xmm1
+; SSE2-NEXT: packuswb %xmm5, %xmm1
+; SSE2-NEXT: packuswb %xmm4, %xmm1
; SSE2-NEXT: movdqu %xmm1, (%rax)
-; SSE2-NEXT: movdqu %xmm4, (%rax)
-; SSE2-NEXT: movdqu %xmm5, (%rax)
-; SSE2-NEXT: movdqu %xmm10, (%rax)
+; SSE2-NEXT: movdqu %xmm2, (%rax)
+; SSE2-NEXT: movdqu %xmm12, (%rax)
+; SSE2-NEXT: movdqu %xmm14, (%rax)
; SSE2-NEXT: retq
;
; AVX1-LABEL: avg_v64i8_2:
; AVX1: # BB#0:
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm9 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm10 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm11 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm12 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm13 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm14 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm15 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm3 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm5 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm14 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm11 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm12 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm15 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm13 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm9 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm10 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [1,1,1,1]
-; AVX1-NEXT: vpaddd %xmm6, %xmm0, %xmm7
-; AVX1-NEXT: vpaddd %xmm7, %xmm0, %xmm0
-; AVX1-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; AVX1-NEXT: vpaddd %xmm6, %xmm1, %xmm0
-; AVX1-NEXT: vpaddd %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vpaddd %xmm6, %xmm2, %xmm1
-; AVX1-NEXT: vpaddd %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vmovdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; AVX1-NEXT: vpaddd %xmm6, %xmm3, %xmm1
-; AVX1-NEXT: vpaddd %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vpaddd %xmm6, %xmm4, %xmm2
-; AVX1-NEXT: vpaddd %xmm2, %xmm4, %xmm2
-; AVX1-NEXT: vmovdqa %xmm2, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; AVX1-NEXT: vpaddd %xmm6, %xmm5, %xmm2
-; AVX1-NEXT: vpaddd %xmm2, %xmm5, %xmm2
-; AVX1-NEXT: vpaddd %xmm6, %xmm14, %xmm3
-; AVX1-NEXT: vpaddd %xmm3, %xmm14, %xmm14
-; AVX1-NEXT: vpaddd %xmm6, %xmm8, %xmm3
-; AVX1-NEXT: vpaddd %xmm3, %xmm8, %xmm5
-; AVX1-NEXT: vpaddd %xmm6, %xmm11, %xmm3
-; AVX1-NEXT: vpaddd %xmm3, %xmm11, %xmm3
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm6 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; AVX1-NEXT: vpaddd %xmm7, %xmm7, %xmm7
+; AVX1-NEXT: vmovdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; AVX1-NEXT: vpaddd %xmm6, %xmm6, %xmm6
+; AVX1-NEXT: vmovdqa %xmm6, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; AVX1-NEXT: vpaddd %xmm5, %xmm5, %xmm6
+; AVX1-NEXT: vpaddd %xmm4, %xmm4, %xmm5
+; AVX1-NEXT: vpaddd %xmm3, %xmm3, %xmm4
+; AVX1-NEXT: vpaddd %xmm2, %xmm2, %xmm3
+; AVX1-NEXT: vpaddd %xmm1, %xmm1, %xmm2
+; AVX1-NEXT: vpaddd %xmm0, %xmm0, %xmm1
+; AVX1-NEXT: vpaddd %xmm15, %xmm15, %xmm15
+; AVX1-NEXT: vpaddd %xmm14, %xmm14, %xmm14
+; AVX1-NEXT: vpaddd %xmm13, %xmm13, %xmm13
+; AVX1-NEXT: vpaddd %xmm12, %xmm12, %xmm12
+; AVX1-NEXT: vpaddd %xmm11, %xmm11, %xmm11
+; AVX1-NEXT: vpaddd %xmm10, %xmm10, %xmm10
+; AVX1-NEXT: vpaddd %xmm9, %xmm9, %xmm9
+; AVX1-NEXT: vpaddd %xmm8, %xmm8, %xmm8
+; AVX1-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX1-NEXT: vpsubd %xmm0, %xmm8, %xmm7
+; AVX1-NEXT: vmovdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; AVX1-NEXT: vpsubd %xmm0, %xmm9, %xmm8
+; AVX1-NEXT: vpsubd %xmm0, %xmm10, %xmm10
+; AVX1-NEXT: vpsubd %xmm0, %xmm11, %xmm9
+; AVX1-NEXT: vpsubd %xmm0, %xmm12, %xmm7
+; AVX1-NEXT: vmovdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; AVX1-NEXT: vpsubd %xmm0, %xmm13, %xmm11
+; AVX1-NEXT: vpsubd %xmm0, %xmm14, %xmm13
+; AVX1-NEXT: vpsubd %xmm0, %xmm15, %xmm12
+; AVX1-NEXT: vpsubd %xmm0, %xmm1, %xmm1
+; AVX1-NEXT: vpsubd %xmm0, %xmm2, %xmm15
+; AVX1-NEXT: vpsubd %xmm0, %xmm3, %xmm2
+; AVX1-NEXT: vpsubd %xmm0, %xmm4, %xmm14
+; AVX1-NEXT: vpsubd %xmm0, %xmm5, %xmm3
; AVX1-NEXT: vmovdqa %xmm3, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; AVX1-NEXT: vpaddd %xmm6, %xmm12, %xmm3
-; AVX1-NEXT: vpaddd %xmm3, %xmm12, %xmm8
-; AVX1-NEXT: vpaddd %xmm6, %xmm15, %xmm3
-; AVX1-NEXT: vpaddd %xmm3, %xmm15, %xmm11
-; AVX1-NEXT: vpaddd %xmm6, %xmm13, %xmm3
-; AVX1-NEXT: vpaddd %xmm3, %xmm13, %xmm13
-; AVX1-NEXT: vpaddd %xmm6, %xmm9, %xmm4
-; AVX1-NEXT: vpaddd %xmm4, %xmm9, %xmm12
-; AVX1-NEXT: vpaddd %xmm6, %xmm10, %xmm4
-; AVX1-NEXT: vpaddd %xmm4, %xmm10, %xmm4
-; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm3 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; AVX1-NEXT: vpaddd %xmm6, %xmm3, %xmm7
-; AVX1-NEXT: vpaddd %xmm7, %xmm3, %xmm15
-; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm3 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; AVX1-NEXT: vpaddd %xmm6, %xmm3, %xmm6
-; AVX1-NEXT: vpaddd %xmm6, %xmm3, %xmm6
-; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
+; AVX1-NEXT: vpsubd %xmm0, %xmm6, %xmm5
; AVX1-NEXT: vmovdqa -{{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload
-; AVX1-NEXT: vpsrld $1, %xmm3, %xmm3
+; AVX1-NEXT: vpsubd %xmm0, %xmm3, %xmm3
+; AVX1-NEXT: vmovdqa %xmm3, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; AVX1-NEXT: vmovdqa -{{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload
+; AVX1-NEXT: vpsubd %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; AVX1-NEXT: vpsrld $1, %xmm8, %xmm6
+; AVX1-NEXT: vmovdqa -{{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
+; AVX1-NEXT: vpsrld $1, %xmm0, %xmm8
; AVX1-NEXT: vmovdqa {{.*#+}} xmm7 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
+; AVX1-NEXT: vpand %xmm7, %xmm8, %xmm8
+; AVX1-NEXT: vpand %xmm7, %xmm6, %xmm6
+; AVX1-NEXT: vpackuswb %xmm8, %xmm6, %xmm8
+; AVX1-NEXT: vpsrld $1, %xmm9, %xmm6
+; AVX1-NEXT: vpsrld $1, %xmm10, %xmm4
+; AVX1-NEXT: vpand %xmm7, %xmm4, %xmm4
+; AVX1-NEXT: vpand %xmm7, %xmm6, %xmm6
+; AVX1-NEXT: vpackuswb %xmm4, %xmm6, %xmm4
+; AVX1-NEXT: vpackuswb %xmm8, %xmm4, %xmm4
+; AVX1-NEXT: vpsrld $1, %xmm11, %xmm6
+; AVX1-NEXT: vmovdqa -{{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
+; AVX1-NEXT: vpsrld $1, %xmm0, %xmm3
; AVX1-NEXT: vpand %xmm7, %xmm3, %xmm3
+; AVX1-NEXT: vpand %xmm7, %xmm6, %xmm6
+; AVX1-NEXT: vpackuswb %xmm3, %xmm6, %xmm3
+; AVX1-NEXT: vpsrld $1, %xmm12, %xmm6
+; AVX1-NEXT: vpsrld $1, %xmm13, %xmm0
; AVX1-NEXT: vpand %xmm7, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm7, %xmm6, %xmm6
+; AVX1-NEXT: vpackuswb %xmm0, %xmm6, %xmm0
; AVX1-NEXT: vpackuswb %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0
+; AVX1-NEXT: vpsrld $1, %xmm15, %xmm3
; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1
-; AVX1-NEXT: vmovdqa -{{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload
-; AVX1-NEXT: vpsrld $1, %xmm3, %xmm3
-; AVX1-NEXT: vpand %xmm7, %xmm3, %xmm3
; AVX1-NEXT: vpand %xmm7, %xmm1, %xmm1
-; AVX1-NEXT: vpackuswb %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpackuswb %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vpsrld $1, %xmm2, %xmm1
-; AVX1-NEXT: vmovdqa -{{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload
-; AVX1-NEXT: vpsrld $1, %xmm2, %xmm2
-; AVX1-NEXT: vpand %xmm7, %xmm2, %xmm2
-; AVX1-NEXT: vpand %xmm7, %xmm1, %xmm1
-; AVX1-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpsrld $1, %xmm5, %xmm2
-; AVX1-NEXT: vpsrld $1, %xmm14, %xmm3
; AVX1-NEXT: vpand %xmm7, %xmm3, %xmm3
-; AVX1-NEXT: vpand %xmm7, %xmm2, %xmm2
-; AVX1-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpackuswb %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX1-NEXT: vpsrld $1, %xmm8, %xmm1
-; AVX1-NEXT: vmovdqa -{{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload
+; AVX1-NEXT: vpackuswb %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpsrld $1, %xmm14, %xmm3
; AVX1-NEXT: vpsrld $1, %xmm2, %xmm2
; AVX1-NEXT: vpand %xmm7, %xmm2, %xmm2
-; AVX1-NEXT: vpand %xmm7, %xmm1, %xmm1
-; AVX1-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpsrld $1, %xmm13, %xmm2
-; AVX1-NEXT: vpsrld $1, %xmm11, %xmm3
; AVX1-NEXT: vpand %xmm7, %xmm3, %xmm3
-; AVX1-NEXT: vpand %xmm7, %xmm2, %xmm2
-; AVX1-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpackuswb %xmm2, %xmm3, %xmm2
; AVX1-NEXT: vpackuswb %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vpsrld $1, %xmm4, %xmm2
-; AVX1-NEXT: vpsrld $1, %xmm12, %xmm3
+; AVX1-NEXT: vpsrld $1, %xmm5, %xmm2
+; AVX1-NEXT: vmovdqa -{{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload
+; AVX1-NEXT: vpsrld $1, %xmm3, %xmm3
; AVX1-NEXT: vpand %xmm7, %xmm3, %xmm3
; AVX1-NEXT: vpand %xmm7, %xmm2, %xmm2
; AVX1-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpsrld $1, %xmm6, %xmm3
-; AVX1-NEXT: vpsrld $1, %xmm15, %xmm4
+; AVX1-NEXT: vmovdqa -{{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload
+; AVX1-NEXT: vpsrld $1, %xmm3, %xmm3
+; AVX1-NEXT: vmovdqa -{{[0-9]+}}(%rsp), %xmm4 # 16-byte Reload
+; AVX1-NEXT: vpsrld $1, %xmm4, %xmm4
; AVX1-NEXT: vpand %xmm7, %xmm4, %xmm4
; AVX1-NEXT: vpand %xmm7, %xmm3, %xmm3
; AVX1-NEXT: vpackuswb %xmm4, %xmm3, %xmm3
@@ -1650,23 +1638,23 @@ define void @avg_v64i8_2(<64 x i8>* %a,
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm5 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm6 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
-; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm8
-; AVX2-NEXT: vpaddd %ymm8, %ymm0, %ymm9
-; AVX2-NEXT: vpaddd %ymm9, %ymm0, %ymm9
-; AVX2-NEXT: vpaddd %ymm8, %ymm1, %ymm0
-; AVX2-NEXT: vpaddd %ymm0, %ymm1, %ymm10
-; AVX2-NEXT: vpaddd %ymm8, %ymm2, %ymm0
-; AVX2-NEXT: vpaddd %ymm0, %ymm2, %ymm2
-; AVX2-NEXT: vpaddd %ymm8, %ymm3, %ymm0
-; AVX2-NEXT: vpaddd %ymm0, %ymm3, %ymm3
-; AVX2-NEXT: vpaddd %ymm8, %ymm4, %ymm0
-; AVX2-NEXT: vpaddd %ymm0, %ymm4, %ymm4
-; AVX2-NEXT: vpaddd %ymm8, %ymm5, %ymm0
-; AVX2-NEXT: vpaddd %ymm0, %ymm5, %ymm5
-; AVX2-NEXT: vpaddd %ymm8, %ymm6, %ymm0
-; AVX2-NEXT: vpaddd %ymm0, %ymm6, %ymm1
-; AVX2-NEXT: vpaddd %ymm8, %ymm7, %ymm0
-; AVX2-NEXT: vpaddd %ymm0, %ymm7, %ymm0
+; AVX2-NEXT: vpaddd %ymm7, %ymm7, %ymm7
+; AVX2-NEXT: vpaddd %ymm6, %ymm6, %ymm6
+; AVX2-NEXT: vpaddd %ymm5, %ymm5, %ymm5
+; AVX2-NEXT: vpaddd %ymm4, %ymm4, %ymm4
+; AVX2-NEXT: vpaddd %ymm3, %ymm3, %ymm3
+; AVX2-NEXT: vpaddd %ymm2, %ymm2, %ymm2
+; AVX2-NEXT: vpaddd %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vpaddd %ymm0, %ymm0, %ymm0
+; AVX2-NEXT: vpcmpeqd %ymm8, %ymm8, %ymm8
+; AVX2-NEXT: vpsubd %ymm8, %ymm0, %ymm9
+; AVX2-NEXT: vpsubd %ymm8, %ymm1, %ymm10
+; AVX2-NEXT: vpsubd %ymm8, %ymm2, %ymm2
+; AVX2-NEXT: vpsubd %ymm8, %ymm3, %ymm3
+; AVX2-NEXT: vpsubd %ymm8, %ymm4, %ymm4
+; AVX2-NEXT: vpsubd %ymm8, %ymm5, %ymm5
+; AVX2-NEXT: vpsubd %ymm8, %ymm6, %ymm1
+; AVX2-NEXT: vpsubd %ymm8, %ymm7, %ymm0
; AVX2-NEXT: vpsrld $1, %ymm0, %ymm11
; AVX2-NEXT: vpsrld $1, %ymm1, %ymm12
; AVX2-NEXT: vpsrld $1, %ymm5, %ymm5
@@ -1718,15 +1706,15 @@ define void @avg_v64i8_2(<64 x i8>* %a,
; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm3 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
-; AVX512F-NEXT: vpbroadcastd {{.*}}(%rip), %zmm4
-; AVX512F-NEXT: vpaddd %zmm4, %zmm0, %zmm5
-; AVX512F-NEXT: vpaddd %zmm5, %zmm0, %zmm0
-; AVX512F-NEXT: vpaddd %zmm4, %zmm1, %zmm5
-; AVX512F-NEXT: vpaddd %zmm5, %zmm1, %zmm1
-; AVX512F-NEXT: vpaddd %zmm4, %zmm2, %zmm5
-; AVX512F-NEXT: vpaddd %zmm5, %zmm2, %zmm2
-; AVX512F-NEXT: vpaddd %zmm4, %zmm3, %zmm4
-; AVX512F-NEXT: vpaddd %zmm4, %zmm3, %zmm3
+; AVX512F-NEXT: vpaddd %zmm3, %zmm3, %zmm3
+; AVX512F-NEXT: vpaddd %zmm2, %zmm2, %zmm2
+; AVX512F-NEXT: vpaddd %zmm1, %zmm1, %zmm1
+; AVX512F-NEXT: vpaddd %zmm0, %zmm0, %zmm0
+; AVX512F-NEXT: vpternlogd $255, %zmm4, %zmm4, %zmm4
+; AVX512F-NEXT: vpsubd %zmm4, %zmm0, %zmm0
+; AVX512F-NEXT: vpsubd %zmm4, %zmm1, %zmm1
+; AVX512F-NEXT: vpsubd %zmm4, %zmm2, %zmm2
+; AVX512F-NEXT: vpsubd %zmm4, %zmm3, %zmm3
; AVX512F-NEXT: vpsrld $1, %zmm3, %zmm3
; AVX512F-NEXT: vpsrld $1, %zmm2, %zmm2
; AVX512F-NEXT: vpsrld $1, %zmm1, %zmm1
@@ -1819,32 +1807,32 @@ define void @avg_v8i16_2(<8 x i16>* %a,
define void @avg_v16i16_2(<16 x i16>* %a, <16 x i16>* %b) {
; SSE2-LABEL: avg_v16i16_2:
; SSE2: # BB#0:
-; SSE2-NEXT: movdqa (%rdi), %xmm5
+; SSE2-NEXT: movdqa (%rdi), %xmm2
; SSE2-NEXT: movdqa 16(%rdi), %xmm4
; SSE2-NEXT: movdqa (%rsi), %xmm0
; SSE2-NEXT: movdqa 16(%rsi), %xmm1
-; SSE2-NEXT: pxor %xmm6, %xmm6
-; SSE2-NEXT: movdqa %xmm5, %xmm7
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3]
-; SSE2-NEXT: movdqa %xmm4, %xmm8
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm6[4],xmm8[5],xmm6[5],xmm8[6],xmm6[6],xmm8[7],xmm6[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3]
+; SSE2-NEXT: pxor %xmm5, %xmm5
+; SSE2-NEXT: movdqa %xmm2, %xmm6
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3]
+; SSE2-NEXT: movdqa %xmm4, %xmm7
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm5[4],xmm7[5],xmm5[5],xmm7[6],xmm5[6],xmm7[7],xmm5[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm6[4],xmm3[5],xmm6[5],xmm3[6],xmm6[6],xmm3[7],xmm6[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3]
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm6[4],xmm2[5],xmm6[5],xmm2[6],xmm6[6],xmm2[7],xmm6[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3]
-; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [1,1,1,1]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
; SSE2-NEXT: paddd %xmm6, %xmm3
-; SSE2-NEXT: paddd %xmm7, %xmm3
-; SSE2-NEXT: paddd %xmm6, %xmm0
-; SSE2-NEXT: paddd %xmm5, %xmm0
-; SSE2-NEXT: paddd %xmm6, %xmm2
-; SSE2-NEXT: paddd %xmm8, %xmm2
-; SSE2-NEXT: paddd %xmm6, %xmm1
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3]
+; SSE2-NEXT: paddd %xmm2, %xmm0
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7]
+; SSE2-NEXT: paddd %xmm7, %xmm2
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3]
; SSE2-NEXT: paddd %xmm4, %xmm1
+; SSE2-NEXT: pcmpeqd %xmm4, %xmm4
+; SSE2-NEXT: psubd %xmm4, %xmm3
+; SSE2-NEXT: psubd %xmm4, %xmm0
+; SSE2-NEXT: psubd %xmm4, %xmm2
+; SSE2-NEXT: psubd %xmm4, %xmm1
; SSE2-NEXT: psrld $1, %xmm1
; SSE2-NEXT: psrld $1, %xmm2
; SSE2-NEXT: psrld $1, %xmm0
@@ -1868,20 +1856,20 @@ define void @avg_v16i16_2(<16 x i16>* %a
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
-; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm8 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
-; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm5 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
-; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm6 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
-; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm7 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1]
-; AVX1-NEXT: vpaddd %xmm3, %xmm4, %xmm4
; AVX1-NEXT: vpaddd %xmm4, %xmm0, %xmm0
-; AVX1-NEXT: vpaddd %xmm3, %xmm5, %xmm4
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX1-NEXT: vpaddd %xmm4, %xmm1, %xmm1
-; AVX1-NEXT: vpaddd %xmm3, %xmm6, %xmm4
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX1-NEXT: vpaddd %xmm4, %xmm2, %xmm2
-; AVX1-NEXT: vpaddd %xmm3, %xmm7, %xmm3
-; AVX1-NEXT: vpaddd %xmm3, %xmm8, %xmm3
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
+; AVX1-NEXT: vpaddd %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
+; AVX1-NEXT: vpsubd %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vpsubd %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vpsubd %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpsubd %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vpsrld $1, %xmm3, %xmm3
; AVX1-NEXT: vpsrld $1, %xmm2, %xmm2
; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1
@@ -1928,80 +1916,79 @@ define void @avg_v16i16_2(<16 x i16>* %a
define void @avg_v32i16_2(<32 x i16>* %a, <32 x i16>* %b) {
; SSE2-LABEL: avg_v32i16_2:
; SSE2: # BB#0:
-; SSE2-NEXT: movdqa (%rdi), %xmm11
-; SSE2-NEXT: movdqa 16(%rdi), %xmm10
-; SSE2-NEXT: movdqa 32(%rdi), %xmm9
-; SSE2-NEXT: movdqa 48(%rdi), %xmm4
-; SSE2-NEXT: movdqa (%rsi), %xmm8
+; SSE2-NEXT: movdqa (%rdi), %xmm4
+; SSE2-NEXT: movdqa 16(%rdi), %xmm11
+; SSE2-NEXT: movdqa 32(%rdi), %xmm10
+; SSE2-NEXT: movdqa 48(%rdi), %xmm8
+; SSE2-NEXT: movdqa (%rsi), %xmm9
; SSE2-NEXT: movdqa 16(%rsi), %xmm1
; SSE2-NEXT: movdqa 32(%rsi), %xmm2
; SSE2-NEXT: movdqa 48(%rsi), %xmm3
; SSE2-NEXT: pxor %xmm0, %xmm0
-; SSE2-NEXT: movdqa %xmm11, %xmm15
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm15 = xmm15[4],xmm0[4],xmm15[5],xmm0[5],xmm15[6],xmm0[6],xmm15[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm4, %xmm6
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
+; SSE2-NEXT: movdqa %xmm11, %xmm5
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm0[0],xmm11[1],xmm0[1],xmm11[2],xmm0[2],xmm11[3],xmm0[3]
-; SSE2-NEXT: movdqa %xmm10, %xmm14
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm14 = xmm14[4],xmm0[4],xmm14[5],xmm0[5],xmm14[6],xmm0[6],xmm14[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm10, %xmm12
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm0[4],xmm12[5],xmm0[5],xmm12[6],xmm0[6],xmm12[7],xmm0[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm0[0],xmm10[1],xmm0[1],xmm10[2],xmm0[2],xmm10[3],xmm0[3]
-; SSE2-NEXT: movdqa %xmm9, %xmm13
+; SSE2-NEXT: movdqa %xmm8, %xmm13
; SSE2-NEXT: punpckhwd {{.*#+}} xmm13 = xmm13[4],xmm0[4],xmm13[5],xmm0[5],xmm13[6],xmm0[6],xmm13[7],xmm0[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm0[0],xmm9[1],xmm0[1],xmm9[2],xmm0[2],xmm9[3],xmm0[3]
-; SSE2-NEXT: movdqa %xmm4, %xmm12
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm0[4],xmm12[5],xmm0[5],xmm12[6],xmm0[6],xmm12[7],xmm0[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
-; SSE2-NEXT: movdqa %xmm4, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm8, %xmm7
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm0[0],xmm8[1],xmm0[1],xmm8[2],xmm0[2],xmm8[3],xmm0[3]
+; SSE2-NEXT: movdqa %xmm9, %xmm7
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7]
+; SSE2-NEXT: paddd %xmm6, %xmm7
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm0[0],xmm9[1],xmm0[1],xmm9[2],xmm0[2],xmm9[3],xmm0[3]
+; SSE2-NEXT: paddd %xmm4, %xmm9
; SSE2-NEXT: movdqa %xmm1, %xmm6
; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7]
+; SSE2-NEXT: paddd %xmm5, %xmm6
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; SSE2-NEXT: paddd %xmm11, %xmm1
; SSE2-NEXT: movdqa %xmm2, %xmm5
; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
+; SSE2-NEXT: paddd %xmm12, %xmm5
; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; SSE2-NEXT: paddd %xmm10, %xmm2
; SSE2-NEXT: movdqa %xmm3, %xmm4
; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
+; SSE2-NEXT: paddd %xmm13, %xmm4
; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [1,1,1,1]
-; SSE2-NEXT: paddd %xmm0, %xmm7
-; SSE2-NEXT: paddd %xmm15, %xmm7
-; SSE2-NEXT: paddd %xmm0, %xmm8
-; SSE2-NEXT: paddd %xmm11, %xmm8
-; SSE2-NEXT: paddd %xmm0, %xmm6
-; SSE2-NEXT: paddd %xmm14, %xmm6
-; SSE2-NEXT: paddd %xmm0, %xmm1
-; SSE2-NEXT: paddd %xmm10, %xmm1
-; SSE2-NEXT: paddd %xmm0, %xmm5
-; SSE2-NEXT: paddd %xmm13, %xmm5
-; SSE2-NEXT: paddd %xmm0, %xmm2
-; SSE2-NEXT: paddd %xmm9, %xmm2
-; SSE2-NEXT: paddd %xmm0, %xmm4
-; SSE2-NEXT: paddd %xmm12, %xmm4
-; SSE2-NEXT: paddd %xmm0, %xmm3
-; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm3 # 16-byte Folded Reload
-; SSE2-NEXT: psrld $1, %xmm8
+; SSE2-NEXT: paddd %xmm8, %xmm3
+; SSE2-NEXT: pcmpeqd %xmm0, %xmm0
+; SSE2-NEXT: psubd %xmm0, %xmm7
+; SSE2-NEXT: psubd %xmm0, %xmm9
+; SSE2-NEXT: psubd %xmm0, %xmm6
+; SSE2-NEXT: psubd %xmm0, %xmm1
+; SSE2-NEXT: psubd %xmm0, %xmm5
+; SSE2-NEXT: psubd %xmm0, %xmm2
+; SSE2-NEXT: psubd %xmm0, %xmm4
+; SSE2-NEXT: psubd %xmm0, %xmm3
+; SSE2-NEXT: psrld $1, %xmm3
+; SSE2-NEXT: psrld $1, %xmm4
+; SSE2-NEXT: psrld $1, %xmm2
+; SSE2-NEXT: psrld $1, %xmm5
+; SSE2-NEXT: psrld $1, %xmm1
+; SSE2-NEXT: psrld $1, %xmm6
+; SSE2-NEXT: psrld $1, %xmm9
; SSE2-NEXT: psrld $1, %xmm7
; SSE2-NEXT: pslld $16, %xmm7
; SSE2-NEXT: psrad $16, %xmm7
-; SSE2-NEXT: pslld $16, %xmm8
-; SSE2-NEXT: psrad $16, %xmm8
-; SSE2-NEXT: packssdw %xmm7, %xmm8
-; SSE2-NEXT: psrld $1, %xmm1
-; SSE2-NEXT: psrld $1, %xmm6
+; SSE2-NEXT: pslld $16, %xmm9
+; SSE2-NEXT: psrad $16, %xmm9
+; SSE2-NEXT: packssdw %xmm7, %xmm9
; SSE2-NEXT: pslld $16, %xmm6
; SSE2-NEXT: psrad $16, %xmm6
; SSE2-NEXT: pslld $16, %xmm1
; SSE2-NEXT: psrad $16, %xmm1
; SSE2-NEXT: packssdw %xmm6, %xmm1
-; SSE2-NEXT: psrld $1, %xmm2
-; SSE2-NEXT: psrld $1, %xmm5
; SSE2-NEXT: pslld $16, %xmm5
; SSE2-NEXT: psrad $16, %xmm5
; SSE2-NEXT: pslld $16, %xmm2
; SSE2-NEXT: psrad $16, %xmm2
; SSE2-NEXT: packssdw %xmm5, %xmm2
-; SSE2-NEXT: psrld $1, %xmm3
-; SSE2-NEXT: psrld $1, %xmm4
; SSE2-NEXT: pslld $16, %xmm4
; SSE2-NEXT: psrad $16, %xmm4
; SSE2-NEXT: pslld $16, %xmm3
@@ -2010,7 +1997,7 @@ define void @avg_v32i16_2(<32 x i16>* %a
; SSE2-NEXT: movdqu %xmm3, (%rax)
; SSE2-NEXT: movdqu %xmm2, (%rax)
; SSE2-NEXT: movdqu %xmm1, (%rax)
-; SSE2-NEXT: movdqu %xmm8, (%rax)
+; SSE2-NEXT: movdqu %xmm9, (%rax)
; SSE2-NEXT: retq
;
; AVX1-LABEL: avg_v32i16_2:
@@ -2021,58 +2008,58 @@ define void @avg_v32i16_2(<32 x i16>* %a
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm5 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm6 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm8 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm7 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [1,1,1,1]
-; AVX1-NEXT: vpaddd %xmm6, %xmm7, %xmm7
-; AVX1-NEXT: vpaddd %xmm7, %xmm0, %xmm0
+; AVX1-NEXT: vpaddd %xmm7, %xmm0, %xmm9
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm7 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
-; AVX1-NEXT: vpaddd %xmm6, %xmm7, %xmm7
; AVX1-NEXT: vpaddd %xmm7, %xmm1, %xmm1
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm7 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
-; AVX1-NEXT: vpaddd %xmm6, %xmm7, %xmm7
; AVX1-NEXT: vpaddd %xmm7, %xmm2, %xmm2
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm7 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
-; AVX1-NEXT: vpaddd %xmm6, %xmm7, %xmm7
; AVX1-NEXT: vpaddd %xmm7, %xmm3, %xmm3
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm7 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
-; AVX1-NEXT: vpaddd %xmm6, %xmm7, %xmm7
-; AVX1-NEXT: vpaddd %xmm7, %xmm4, %xmm9
+; AVX1-NEXT: vpaddd %xmm7, %xmm4, %xmm4
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm7 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
-; AVX1-NEXT: vpaddd %xmm6, %xmm7, %xmm7
; AVX1-NEXT: vpaddd %xmm7, %xmm5, %xmm5
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm7 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
-; AVX1-NEXT: vpaddd %xmm6, %xmm7, %xmm7
+; AVX1-NEXT: vpaddd %xmm7, %xmm6, %xmm6
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm7 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX1-NEXT: vpaddd %xmm7, %xmm8, %xmm7
-; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
-; AVX1-NEXT: vpaddd %xmm6, %xmm4, %xmm4
-; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm6 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
-; AVX1-NEXT: vpaddd %xmm4, %xmm6, %xmm4
-; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1
-; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
-; AVX1-NEXT: vpxor %xmm6, %xmm6, %xmm6
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm6[1],xmm0[2],xmm6[3],xmm0[4],xmm6[5],xmm0[6],xmm6[7]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm6[1],xmm1[2],xmm6[3],xmm1[4],xmm6[5],xmm1[6],xmm6[7]
-; AVX1-NEXT: vpackusdw %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vpsrld $1, %xmm3, %xmm1
+; AVX1-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX1-NEXT: vpsubd %xmm0, %xmm9, %xmm8
+; AVX1-NEXT: vpsubd %xmm0, %xmm1, %xmm1
+; AVX1-NEXT: vpsubd %xmm0, %xmm2, %xmm2
+; AVX1-NEXT: vpsubd %xmm0, %xmm3, %xmm3
+; AVX1-NEXT: vpsubd %xmm0, %xmm4, %xmm4
+; AVX1-NEXT: vpsubd %xmm0, %xmm5, %xmm5
+; AVX1-NEXT: vpsubd %xmm0, %xmm6, %xmm6
+; AVX1-NEXT: vpsubd %xmm0, %xmm7, %xmm0
+; AVX1-NEXT: vpsrld $1, %xmm0, %xmm9
+; AVX1-NEXT: vpsrld $1, %xmm6, %xmm6
+; AVX1-NEXT: vpsrld $1, %xmm5, %xmm5
+; AVX1-NEXT: vpsrld $1, %xmm4, %xmm4
+; AVX1-NEXT: vpsrld $1, %xmm3, %xmm3
; AVX1-NEXT: vpsrld $1, %xmm2, %xmm2
-; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm6[1],xmm2[2],xmm6[3],xmm2[4],xmm6[5],xmm2[6],xmm6[7]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm6[1],xmm1[2],xmm6[3],xmm1[4],xmm6[5],xmm1[6],xmm6[7]
-; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX1-NEXT: vpsrld $1, %xmm5, %xmm1
-; AVX1-NEXT: vpsrld $1, %xmm9, %xmm2
-; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm6[1],xmm2[2],xmm6[3],xmm2[4],xmm6[5],xmm2[6],xmm6[7]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm6[1],xmm1[2],xmm6[3],xmm1[4],xmm6[5],xmm1[6],xmm6[7]
-; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpsrld $1, %xmm4, %xmm2
-; AVX1-NEXT: vpsrld $1, %xmm7, %xmm3
-; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm6[1],xmm3[2],xmm6[3],xmm3[4],xmm6[5],xmm3[6],xmm6[7]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm6[1],xmm2[2],xmm6[3],xmm2[4],xmm6[5],xmm2[6],xmm6[7]
-; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsrld $1, %xmm8, %xmm7
+; AVX1-NEXT: vpxor %xmm0, %xmm0, %xmm0
+; AVX1-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0],xmm0[1],xmm7[2],xmm0[3],xmm7[4],xmm0[5],xmm7[6],xmm0[7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm0[1],xmm1[2],xmm0[3],xmm1[4],xmm0[5],xmm1[6],xmm0[7]
+; AVX1-NEXT: vpackusdw %xmm7, %xmm1, %xmm1
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm0[1],xmm2[2],xmm0[3],xmm2[4],xmm0[5],xmm2[6],xmm0[7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm0[1],xmm3[2],xmm0[3],xmm3[4],xmm0[5],xmm3[6],xmm0[7]
+; AVX1-NEXT: vpackusdw %xmm2, %xmm3, %xmm2
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
-; AVX1-NEXT: vmovups %ymm1, (%rax)
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm4[0],xmm0[1],xmm4[2],xmm0[3],xmm4[4],xmm0[5],xmm4[6],xmm0[7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm5[0],xmm0[1],xmm5[2],xmm0[3],xmm5[4],xmm0[5],xmm5[6],xmm0[7]
+; AVX1-NEXT: vpackusdw %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm6[0],xmm0[1],xmm6[2],xmm0[3],xmm6[4],xmm0[5],xmm6[6],xmm0[7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm9[0],xmm0[1],xmm9[2],xmm0[3],xmm9[4],xmm0[5],xmm9[6],xmm0[7]
+; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: vmovups %ymm0, (%rax)
+; AVX1-NEXT: vmovups %ymm1, (%rax)
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -2083,18 +2070,18 @@ define void @avg_v32i16_2(<32 x i16>* %a
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm5 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm6 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm7 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
-; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm8
-; AVX2-NEXT: vpaddd %ymm8, %ymm4, %ymm4
; AVX2-NEXT: vpaddd %ymm4, %ymm0, %ymm0
-; AVX2-NEXT: vpaddd %ymm8, %ymm5, %ymm4
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX2-NEXT: vpaddd %ymm4, %ymm1, %ymm1
-; AVX2-NEXT: vpaddd %ymm8, %ymm6, %ymm4
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX2-NEXT: vpaddd %ymm4, %ymm2, %ymm2
-; AVX2-NEXT: vpaddd %ymm8, %ymm7, %ymm4
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX2-NEXT: vpaddd %ymm4, %ymm3, %ymm3
+; AVX2-NEXT: vpcmpeqd %ymm4, %ymm4, %ymm4
+; AVX2-NEXT: vpsubd %ymm4, %ymm0, %ymm0
+; AVX2-NEXT: vpsubd %ymm4, %ymm1, %ymm1
+; AVX2-NEXT: vpsubd %ymm4, %ymm2, %ymm2
+; AVX2-NEXT: vpsubd %ymm4, %ymm3, %ymm3
; AVX2-NEXT: vpsrld $1, %ymm3, %ymm3
; AVX2-NEXT: vpsrld $1, %ymm2, %ymm2
; AVX2-NEXT: vpsrld $1, %ymm1, %ymm1
@@ -2120,12 +2107,12 @@ define void @avg_v32i16_2(<32 x i16>* %a
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
-; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
-; AVX512F-NEXT: vpbroadcastd {{.*}}(%rip), %zmm4
-; AVX512F-NEXT: vpaddd %zmm4, %zmm2, %zmm2
; AVX512F-NEXT: vpaddd %zmm2, %zmm0, %zmm0
-; AVX512F-NEXT: vpaddd %zmm4, %zmm3, %zmm2
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
; AVX512F-NEXT: vpaddd %zmm2, %zmm1, %zmm1
+; AVX512F-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2
+; AVX512F-NEXT: vpsubd %zmm2, %zmm0, %zmm0
+; AVX512F-NEXT: vpsubd %zmm2, %zmm1, %zmm1
; AVX512F-NEXT: vpsrld $1, %zmm1, %zmm1
; AVX512F-NEXT: vpsrld $1, %zmm0, %zmm0
; AVX512F-NEXT: vpmovdw %zmm0, (%rax)
Modified: llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll?rev=306289&r1=306288&r2=306289&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll Mon Jun 26 07:19:26 2017
@@ -388,7 +388,8 @@ define void @test_x86_sse2_storeu_dq(i8*
; CHECK-LABEL: test_x86_sse2_storeu_dq:
; CHECK: ## BB#0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT: vpaddb LCPI34_0, %xmm0, %xmm0
+; CHECK-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; CHECK-NEXT: vpsubb %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vmovdqu %xmm0, (%eax)
; CHECK-NEXT: retl
%a2 = add <16 x i8> %a1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
@@ -434,9 +435,9 @@ define void @test_x86_avx_storeu_dq_256(
; CHECK: ## BB#0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1
-; CHECK-NEXT: vmovdqa {{.*#+}} xmm2 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; CHECK-NEXT: vpaddb %xmm2, %xmm1, %xmm1
-; CHECK-NEXT: vpaddb %xmm2, %xmm0, %xmm0
+; CHECK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; CHECK-NEXT: vpsubb %xmm2, %xmm1, %xmm1
+; CHECK-NEXT: vpsubb %xmm2, %xmm0, %xmm0
; CHECK-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; CHECK-NEXT: vmovups %ymm0, (%eax)
; CHECK-NEXT: vzeroupper
Modified: llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86.ll?rev=306289&r1=306288&r2=306289&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86.ll Mon Jun 26 07:19:26 2017
@@ -930,8 +930,8 @@ define void @movnt_dq(i8* %p, <2 x i64>
; AVX-LABEL: movnt_dq:
; AVX: ## BB#0:
; AVX-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; AVX-NEXT: vpaddq LCPI65_0, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xd4,0x05,A,A,A,A]
-; AVX-NEXT: ## fixup A - offset: 4, value: LCPI65_0, kind: FK_Data_4
+; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf1,0x76,0xc9]
+; AVX-NEXT: vpsubq %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xfb,0xc1]
; AVX-NEXT: vmovntdq %ymm0, (%eax) ## encoding: [0xc5,0xfd,0xe7,0x00]
; AVX-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; AVX-NEXT: retl ## encoding: [0xc3]
@@ -939,8 +939,8 @@ define void @movnt_dq(i8* %p, <2 x i64>
; AVX512VL-LABEL: movnt_dq:
; AVX512VL: ## BB#0:
; AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04]
-; AVX512VL-NEXT: vpaddq LCPI65_0, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0x05,A,A,A,A]
-; AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI65_0, kind: FK_Data_4
+; AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf1,0x76,0xc9]
+; AVX512VL-NEXT: vpsubq %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfb,0xc1]
; AVX512VL-NEXT: vmovntdq %ymm0, (%eax) ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe7,0x00]
; AVX512VL-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; AVX512VL-NEXT: retl ## encoding: [0xc3]
Modified: llvm/trunk/test/CodeGen/X86/avx-logic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-logic.ll?rev=306289&r1=306288&r2=306289&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-logic.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-logic.ll Mon Jun 26 07:19:26 2017
@@ -247,7 +247,8 @@ entry:
define <2 x i64> @vpandn(<2 x i64> %a, <2 x i64> %b) nounwind uwtable readnone ssp {
; CHECK-LABEL: vpandn:
; CHECK: # BB#0: # %entry
-; CHECK-NEXT: vpaddq {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; CHECK-NEXT: vpsubq %xmm1, %xmm0, %xmm1
; CHECK-NEXT: vpandn %xmm0, %xmm1, %xmm0
; CHECK-NEXT: retq
entry:
@@ -261,7 +262,8 @@ entry:
define <2 x i64> @vpand(<2 x i64> %a, <2 x i64> %b) nounwind uwtable readnone ssp {
; CHECK-LABEL: vpand:
; CHECK: # BB#0: # %entry
-; CHECK-NEXT: vpaddq {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; CHECK-NEXT: vpsubq %xmm2, %xmm0, %xmm0
; CHECK-NEXT: vpand %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
entry:
Modified: llvm/trunk/test/CodeGen/X86/avx-vperm2x128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-vperm2x128.ll?rev=306289&r1=306288&r2=306289&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-vperm2x128.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-vperm2x128.ll Mon Jun 26 07:19:26 2017
@@ -97,14 +97,16 @@ define <32 x i8> @shuffle_v32i8_2323_dom
; AVX1-LABEL: shuffle_v32i8_2323_domain:
; AVX1: ## BB#0: ## %entry
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vpaddb {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpsubb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v32i8_2323_domain:
; AVX2: ## BB#0: ## %entry
-; AVX2-NEXT: vpaddb {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vpsubb %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
; AVX2-NEXT: retq
entry:
@@ -127,14 +129,15 @@ entry:
define <4 x i64> @shuffle_v4i64_6701_domain(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp {
; AVX1-LABEL: shuffle_v4i64_6701_domain:
; AVX1: ## BB#0: ## %entry
-; AVX1-NEXT: vpaddq {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpsubq %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[0,1]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v4i64_6701_domain:
; AVX2: ## BB#0: ## %entry
-; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm2
-; AVX2-NEXT: vpaddq %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX2-NEXT: vpsubq %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[0,1]
; AVX2-NEXT: retq
entry:
@@ -148,15 +151,16 @@ define <8 x i32> @shuffle_v8i32_u5u7cdef
; AVX1-LABEL: shuffle_v8i32_u5u7cdef:
; AVX1: ## BB#0: ## %entry
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpsubd %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v8i32_u5u7cdef:
; AVX2: ## BB#0: ## %entry
-; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm2
-; AVX2-NEXT: vpaddd %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX2-NEXT: vpsubd %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
; AVX2-NEXT: retq
entry:
@@ -169,13 +173,15 @@ entry:
define <16 x i16> @shuffle_v16i16_4501(<16 x i16> %a, <16 x i16> %b) nounwind uwtable readnone ssp {
; AVX1-LABEL: shuffle_v16i16_4501:
; AVX1: ## BB#0: ## %entry
-; AVX1-NEXT: vpaddw {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpsubw %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_4501:
; AVX2: ## BB#0: ## %entry
-; AVX2-NEXT: vpaddw {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX2-NEXT: vpsubw %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; AVX2-NEXT: retq
entry:
@@ -189,14 +195,16 @@ define <16 x i16> @shuffle_v16i16_4501_m
; AVX1-LABEL: shuffle_v16i16_4501_mem:
; AVX1: ## BB#0: ## %entry
; AVX1-NEXT: vmovdqa (%rdi), %ymm0
-; AVX1-NEXT: vpaddw {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpsubw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = mem[0,1],ymm0[0,1]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_4501_mem:
; AVX2: ## BB#0: ## %entry
; AVX2-NEXT: vmovdqa (%rdi), %ymm0
-; AVX2-NEXT: vpaddw {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vpsubw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = mem[0,1],ymm0[0,1]
; AVX2-NEXT: retq
entry:
Modified: llvm/trunk/test/CodeGen/X86/avx2-intrinsics-x86-upgrade.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx2-intrinsics-x86-upgrade.ll?rev=306289&r1=306288&r2=306289&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx2-intrinsics-x86-upgrade.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx2-intrinsics-x86-upgrade.ll Mon Jun 26 07:19:26 2017
@@ -382,7 +382,8 @@ define void @test_x86_avx_storeu_dq_256(
; CHECK-LABEL: test_x86_avx_storeu_dq_256:
; CHECK: ## BB#0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT: vpaddb LCPI34_0, %ymm0, %ymm0
+; CHECK-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; CHECK-NEXT: vpsubb %ymm1, %ymm0, %ymm0
; CHECK-NEXT: vmovdqu %ymm0, (%eax)
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retl
Modified: llvm/trunk/test/CodeGen/X86/avx2-logic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx2-logic.ll?rev=306289&r1=306288&r2=306289&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx2-logic.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx2-logic.ll Mon Jun 26 07:19:26 2017
@@ -5,14 +5,15 @@
define <4 x i64> @vpandn(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp {
; X32-LABEL: vpandn:
; X32: ## BB#0: ## %entry
-; X32-NEXT: vpaddq LCPI0_0, %ymm0, %ymm1
+; X32-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; X32-NEXT: vpsubq %ymm1, %ymm0, %ymm1
; X32-NEXT: vpandn %ymm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: vpandn:
; X64: ## BB#0: ## %entry
-; X64-NEXT: vpbroadcastq {{.*}}(%rip), %ymm1
-; X64-NEXT: vpaddq %ymm1, %ymm0, %ymm1
+; X64-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; X64-NEXT: vpsubq %ymm1, %ymm0, %ymm1
; X64-NEXT: vpandn %ymm0, %ymm1, %ymm0
; X64-NEXT: retq
entry:
@@ -26,14 +27,15 @@ entry:
define <4 x i64> @vpand(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp {
; X32-LABEL: vpand:
; X32: ## BB#0: ## %entry
-; X32-NEXT: vpaddq LCPI1_0, %ymm0, %ymm0
+; X32-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
+; X32-NEXT: vpsubq %ymm2, %ymm0, %ymm0
; X32-NEXT: vpand %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: vpand:
; X64: ## BB#0: ## %entry
-; X64-NEXT: vpbroadcastq {{.*}}(%rip), %ymm2
-; X64-NEXT: vpaddq %ymm2, %ymm0, %ymm0
+; X64-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
+; X64-NEXT: vpsubq %ymm2, %ymm0, %ymm0
; X64-NEXT: vpand %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -46,14 +48,15 @@ entry:
define <4 x i64> @vpor(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp {
; X32-LABEL: vpor:
; X32: ## BB#0: ## %entry
-; X32-NEXT: vpaddq LCPI2_0, %ymm0, %ymm0
+; X32-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
+; X32-NEXT: vpsubq %ymm2, %ymm0, %ymm0
; X32-NEXT: vpor %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: vpor:
; X64: ## BB#0: ## %entry
-; X64-NEXT: vpbroadcastq {{.*}}(%rip), %ymm2
-; X64-NEXT: vpaddq %ymm2, %ymm0, %ymm0
+; X64-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
+; X64-NEXT: vpsubq %ymm2, %ymm0, %ymm0
; X64-NEXT: vpor %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -66,14 +69,15 @@ entry:
define <4 x i64> @vpxor(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp {
; X32-LABEL: vpxor:
; X32: ## BB#0: ## %entry
-; X32-NEXT: vpaddq LCPI3_0, %ymm0, %ymm0
+; X32-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
+; X32-NEXT: vpsubq %ymm2, %ymm0, %ymm0
; X32-NEXT: vpxor %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: vpxor:
; X64: ## BB#0: ## %entry
-; X64-NEXT: vpbroadcastq {{.*}}(%rip), %ymm2
-; X64-NEXT: vpaddq %ymm2, %ymm0, %ymm0
+; X64-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
+; X64-NEXT: vpsubq %ymm2, %ymm0, %ymm0
; X64-NEXT: vpxor %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
entry:
Modified: llvm/trunk/test/CodeGen/X86/select.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/select.ll?rev=306289&r1=306288&r2=306289&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/select.ll (original)
+++ llvm/trunk/test/CodeGen/X86/select.ll Mon Jun 26 07:19:26 2017
@@ -321,8 +321,9 @@ define void @test8(i1 %c, <6 x i32>* %ds
; GENERIC-NEXT: LBB7_6:
; GENERIC-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
; GENERIC-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; GENERIC-NEXT: psubd {{.*}}(%rip), %xmm1
-; GENERIC-NEXT: psubd {{.*}}(%rip), %xmm0
+; GENERIC-NEXT: pcmpeqd %xmm2, %xmm2
+; GENERIC-NEXT: paddd %xmm2, %xmm1
+; GENERIC-NEXT: paddd %xmm2, %xmm0
; GENERIC-NEXT: movq %xmm0, 16(%rsi)
; GENERIC-NEXT: movdqa %xmm1, (%rsi)
; GENERIC-NEXT: retq
@@ -361,8 +362,9 @@ define void @test8(i1 %c, <6 x i32>* %ds
; ATOM-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
; ATOM-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; ATOM-NEXT: LBB7_6:
-; ATOM-NEXT: psubd {{.*}}(%rip), %xmm0
-; ATOM-NEXT: psubd {{.*}}(%rip), %xmm1
+; ATOM-NEXT: pcmpeqd %xmm2, %xmm2
+; ATOM-NEXT: paddd %xmm2, %xmm0
+; ATOM-NEXT: paddd %xmm2, %xmm1
; ATOM-NEXT: movq %xmm0, 16(%rsi)
; ATOM-NEXT: movdqa %xmm1, (%rsi)
; ATOM-NEXT: retq
Modified: llvm/trunk/test/CodeGen/X86/sse2-intrinsics-x86-upgrade.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse2-intrinsics-x86-upgrade.ll?rev=306289&r1=306288&r2=306289&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse2-intrinsics-x86-upgrade.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse2-intrinsics-x86-upgrade.ll Mon Jun 26 07:19:26 2017
@@ -83,7 +83,8 @@ define void @test_x86_sse2_storeu_dq(i8*
; CHECK-LABEL: test_x86_sse2_storeu_dq:
; CHECK: ## BB#0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT: paddb LCPI7_0, %xmm0
+; CHECK-NEXT: pcmpeqd %xmm1, %xmm1
+; CHECK-NEXT: psubb %xmm1, %xmm0
; CHECK-NEXT: movdqu %xmm0, (%eax)
; CHECK-NEXT: retl
%a2 = add <16 x i8> %a1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
Modified: llvm/trunk/test/CodeGen/X86/vec_ctbits.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_ctbits.ll?rev=306289&r1=306288&r2=306289&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_ctbits.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_ctbits.ll Mon Jun 26 07:19:26 2017
@@ -12,20 +12,21 @@ define <2 x i64> @footz(<2 x i64> %a) no
; CHECK-NEXT: pxor %xmm2, %xmm2
; CHECK-NEXT: psubq %xmm0, %xmm2
; CHECK-NEXT: pand %xmm0, %xmm2
-; CHECK-NEXT: psubq {{.*}}(%rip), %xmm2
-; CHECK-NEXT: movdqa %xmm2, %xmm0
+; CHECK-NEXT: pcmpeqd %xmm3, %xmm3
+; CHECK-NEXT: paddq %xmm2, %xmm3
+; CHECK-NEXT: movdqa %xmm3, %xmm0
; CHECK-NEXT: psrlq $1, %xmm0
; CHECK-NEXT: pand {{.*}}(%rip), %xmm0
-; CHECK-NEXT: psubq %xmm0, %xmm2
+; CHECK-NEXT: psubq %xmm0, %xmm3
; CHECK-NEXT: movdqa {{.*#+}} xmm0 = [3689348814741910323,3689348814741910323]
-; CHECK-NEXT: movdqa %xmm2, %xmm3
-; CHECK-NEXT: pand %xmm0, %xmm3
-; CHECK-NEXT: psrlq $2, %xmm2
+; CHECK-NEXT: movdqa %xmm3, %xmm2
; CHECK-NEXT: pand %xmm0, %xmm2
-; CHECK-NEXT: paddq %xmm3, %xmm2
-; CHECK-NEXT: movdqa %xmm2, %xmm0
+; CHECK-NEXT: psrlq $2, %xmm3
+; CHECK-NEXT: pand %xmm0, %xmm3
+; CHECK-NEXT: paddq %xmm2, %xmm3
+; CHECK-NEXT: movdqa %xmm3, %xmm0
; CHECK-NEXT: psrlq $4, %xmm0
-; CHECK-NEXT: paddq %xmm2, %xmm0
+; CHECK-NEXT: paddq %xmm3, %xmm0
; CHECK-NEXT: pand {{.*}}(%rip), %xmm0
; CHECK-NEXT: psadbw %xmm1, %xmm0
; CHECK-NEXT: retq
@@ -115,20 +116,21 @@ define <2 x i32> @promtz(<2 x i32> %a) n
; CHECK-NEXT: pxor %xmm2, %xmm2
; CHECK-NEXT: psubq %xmm0, %xmm2
; CHECK-NEXT: pand %xmm0, %xmm2
-; CHECK-NEXT: psubq {{.*}}(%rip), %xmm2
-; CHECK-NEXT: movdqa %xmm2, %xmm0
+; CHECK-NEXT: pcmpeqd %xmm3, %xmm3
+; CHECK-NEXT: paddq %xmm2, %xmm3
+; CHECK-NEXT: movdqa %xmm3, %xmm0
; CHECK-NEXT: psrlq $1, %xmm0
; CHECK-NEXT: pand {{.*}}(%rip), %xmm0
-; CHECK-NEXT: psubq %xmm0, %xmm2
+; CHECK-NEXT: psubq %xmm0, %xmm3
; CHECK-NEXT: movdqa {{.*#+}} xmm0 = [3689348814741910323,3689348814741910323]
-; CHECK-NEXT: movdqa %xmm2, %xmm3
-; CHECK-NEXT: pand %xmm0, %xmm3
-; CHECK-NEXT: psrlq $2, %xmm2
+; CHECK-NEXT: movdqa %xmm3, %xmm2
; CHECK-NEXT: pand %xmm0, %xmm2
-; CHECK-NEXT: paddq %xmm3, %xmm2
-; CHECK-NEXT: movdqa %xmm2, %xmm0
+; CHECK-NEXT: psrlq $2, %xmm3
+; CHECK-NEXT: pand %xmm0, %xmm3
+; CHECK-NEXT: paddq %xmm2, %xmm3
+; CHECK-NEXT: movdqa %xmm3, %xmm0
; CHECK-NEXT: psrlq $4, %xmm0
-; CHECK-NEXT: paddq %xmm2, %xmm0
+; CHECK-NEXT: paddq %xmm3, %xmm0
; CHECK-NEXT: pand {{.*}}(%rip), %xmm0
; CHECK-NEXT: psadbw %xmm1, %xmm0
; CHECK-NEXT: retq
Modified: llvm/trunk/test/CodeGen/X86/vector-tzcnt-128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-tzcnt-128.ll?rev=306289&r1=306288&r2=306289&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-tzcnt-128.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-tzcnt-128.ll Mon Jun 26 07:19:26 2017
@@ -19,20 +19,21 @@ define <2 x i64> @testv2i64(<2 x i64> %i
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: psubq %xmm0, %xmm2
; SSE2-NEXT: pand %xmm0, %xmm2
-; SSE2-NEXT: psubq {{.*}}(%rip), %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm0
+; SSE2-NEXT: pcmpeqd %xmm3, %xmm3
+; SSE2-NEXT: paddq %xmm2, %xmm3
+; SSE2-NEXT: movdqa %xmm3, %xmm0
; SSE2-NEXT: psrlq $1, %xmm0
; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE2-NEXT: psubq %xmm0, %xmm2
+; SSE2-NEXT: psubq %xmm0, %xmm3
; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [3689348814741910323,3689348814741910323]
-; SSE2-NEXT: movdqa %xmm2, %xmm3
-; SSE2-NEXT: pand %xmm0, %xmm3
-; SSE2-NEXT: psrlq $2, %xmm2
+; SSE2-NEXT: movdqa %xmm3, %xmm2
; SSE2-NEXT: pand %xmm0, %xmm2
-; SSE2-NEXT: paddq %xmm3, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm0
+; SSE2-NEXT: psrlq $2, %xmm3
+; SSE2-NEXT: pand %xmm0, %xmm3
+; SSE2-NEXT: paddq %xmm2, %xmm3
+; SSE2-NEXT: movdqa %xmm3, %xmm0
; SSE2-NEXT: psrlq $4, %xmm0
-; SSE2-NEXT: paddq %xmm2, %xmm0
+; SSE2-NEXT: paddq %xmm3, %xmm0
; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: retq
@@ -43,20 +44,21 @@ define <2 x i64> @testv2i64(<2 x i64> %i
; SSE3-NEXT: pxor %xmm2, %xmm2
; SSE3-NEXT: psubq %xmm0, %xmm2
; SSE3-NEXT: pand %xmm0, %xmm2
-; SSE3-NEXT: psubq {{.*}}(%rip), %xmm2
-; SSE3-NEXT: movdqa %xmm2, %xmm0
+; SSE3-NEXT: pcmpeqd %xmm3, %xmm3
+; SSE3-NEXT: paddq %xmm2, %xmm3
+; SSE3-NEXT: movdqa %xmm3, %xmm0
; SSE3-NEXT: psrlq $1, %xmm0
; SSE3-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE3-NEXT: psubq %xmm0, %xmm2
+; SSE3-NEXT: psubq %xmm0, %xmm3
; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [3689348814741910323,3689348814741910323]
-; SSE3-NEXT: movdqa %xmm2, %xmm3
-; SSE3-NEXT: pand %xmm0, %xmm3
-; SSE3-NEXT: psrlq $2, %xmm2
+; SSE3-NEXT: movdqa %xmm3, %xmm2
; SSE3-NEXT: pand %xmm0, %xmm2
-; SSE3-NEXT: paddq %xmm3, %xmm2
-; SSE3-NEXT: movdqa %xmm2, %xmm0
+; SSE3-NEXT: psrlq $2, %xmm3
+; SSE3-NEXT: pand %xmm0, %xmm3
+; SSE3-NEXT: paddq %xmm2, %xmm3
+; SSE3-NEXT: movdqa %xmm3, %xmm0
; SSE3-NEXT: psrlq $4, %xmm0
-; SSE3-NEXT: paddq %xmm2, %xmm0
+; SSE3-NEXT: paddq %xmm3, %xmm0
; SSE3-NEXT: pand {{.*}}(%rip), %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: retq
@@ -67,16 +69,17 @@ define <2 x i64> @testv2i64(<2 x i64> %i
; SSSE3-NEXT: pxor %xmm2, %xmm2
; SSSE3-NEXT: psubq %xmm0, %xmm2
; SSSE3-NEXT: pand %xmm0, %xmm2
-; SSSE3-NEXT: psubq {{.*}}(%rip), %xmm2
-; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; SSSE3-NEXT: movdqa %xmm2, %xmm4
-; SSSE3-NEXT: pand %xmm3, %xmm4
+; SSSE3-NEXT: pcmpeqd %xmm3, %xmm3
+; SSSE3-NEXT: paddq %xmm2, %xmm3
+; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSSE3-NEXT: movdqa %xmm3, %xmm4
+; SSSE3-NEXT: pand %xmm2, %xmm4
; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
; SSSE3-NEXT: movdqa %xmm0, %xmm5
; SSSE3-NEXT: pshufb %xmm4, %xmm5
-; SSSE3-NEXT: psrlw $4, %xmm2
-; SSSE3-NEXT: pand %xmm3, %xmm2
-; SSSE3-NEXT: pshufb %xmm2, %xmm0
+; SSSE3-NEXT: psrlw $4, %xmm3
+; SSSE3-NEXT: pand %xmm2, %xmm3
+; SSSE3-NEXT: pshufb %xmm3, %xmm0
; SSSE3-NEXT: paddb %xmm5, %xmm0
; SSSE3-NEXT: psadbw %xmm1, %xmm0
; SSSE3-NEXT: retq
@@ -87,16 +90,17 @@ define <2 x i64> @testv2i64(<2 x i64> %i
; SSE41-NEXT: pxor %xmm2, %xmm2
; SSE41-NEXT: psubq %xmm0, %xmm2
; SSE41-NEXT: pand %xmm0, %xmm2
-; SSE41-NEXT: psubq {{.*}}(%rip), %xmm2
-; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; SSE41-NEXT: movdqa %xmm2, %xmm4
-; SSE41-NEXT: pand %xmm3, %xmm4
+; SSE41-NEXT: pcmpeqd %xmm3, %xmm3
+; SSE41-NEXT: paddq %xmm2, %xmm3
+; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSE41-NEXT: movdqa %xmm3, %xmm4
+; SSE41-NEXT: pand %xmm2, %xmm4
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
; SSE41-NEXT: movdqa %xmm0, %xmm5
; SSE41-NEXT: pshufb %xmm4, %xmm5
-; SSE41-NEXT: psrlw $4, %xmm2
-; SSE41-NEXT: pand %xmm3, %xmm2
-; SSE41-NEXT: pshufb %xmm2, %xmm0
+; SSE41-NEXT: psrlw $4, %xmm3
+; SSE41-NEXT: pand %xmm2, %xmm3
+; SSE41-NEXT: pshufb %xmm3, %xmm0
; SSE41-NEXT: paddb %xmm5, %xmm0
; SSE41-NEXT: psadbw %xmm1, %xmm0
; SSE41-NEXT: retq
@@ -106,7 +110,8 @@ define <2 x i64> @testv2i64(<2 x i64> %i
; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpsubq %xmm0, %xmm1, %xmm2
; AVX-NEXT: vpand %xmm2, %xmm0, %xmm0
-; AVX-NEXT: vpsubq {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX-NEXT: vpaddq %xmm2, %xmm0, %xmm0
; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX-NEXT: vpand %xmm2, %xmm0, %xmm3
; AVX-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -123,7 +128,8 @@ define <2 x i64> @testv2i64(<2 x i64> %i
; AVX512VPOPCNTDQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpsubq %xmm0, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX512VPOPCNTDQ-NEXT: vpsubq {{.*}}(%rip), %xmm0, %xmm0
+; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX512VPOPCNTDQ-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; AVX512VPOPCNTDQ-NEXT: vpopcntq %zmm0, %zmm0
; AVX512VPOPCNTDQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
; AVX512VPOPCNTDQ-NEXT: vzeroupper
@@ -159,20 +165,21 @@ define <2 x i64> @testv2i64u(<2 x i64> %
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: psubq %xmm0, %xmm2
; SSE2-NEXT: pand %xmm0, %xmm2
-; SSE2-NEXT: psubq {{.*}}(%rip), %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm0
+; SSE2-NEXT: pcmpeqd %xmm3, %xmm3
+; SSE2-NEXT: paddq %xmm2, %xmm3
+; SSE2-NEXT: movdqa %xmm3, %xmm0
; SSE2-NEXT: psrlq $1, %xmm0
; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE2-NEXT: psubq %xmm0, %xmm2
+; SSE2-NEXT: psubq %xmm0, %xmm3
; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [3689348814741910323,3689348814741910323]
-; SSE2-NEXT: movdqa %xmm2, %xmm3
-; SSE2-NEXT: pand %xmm0, %xmm3
-; SSE2-NEXT: psrlq $2, %xmm2
+; SSE2-NEXT: movdqa %xmm3, %xmm2
; SSE2-NEXT: pand %xmm0, %xmm2
-; SSE2-NEXT: paddq %xmm3, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm0
+; SSE2-NEXT: psrlq $2, %xmm3
+; SSE2-NEXT: pand %xmm0, %xmm3
+; SSE2-NEXT: paddq %xmm2, %xmm3
+; SSE2-NEXT: movdqa %xmm3, %xmm0
; SSE2-NEXT: psrlq $4, %xmm0
-; SSE2-NEXT: paddq %xmm2, %xmm0
+; SSE2-NEXT: paddq %xmm3, %xmm0
; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; SSE2-NEXT: psadbw %xmm1, %xmm0
; SSE2-NEXT: retq
@@ -183,20 +190,21 @@ define <2 x i64> @testv2i64u(<2 x i64> %
; SSE3-NEXT: pxor %xmm2, %xmm2
; SSE3-NEXT: psubq %xmm0, %xmm2
; SSE3-NEXT: pand %xmm0, %xmm2
-; SSE3-NEXT: psubq {{.*}}(%rip), %xmm2
-; SSE3-NEXT: movdqa %xmm2, %xmm0
+; SSE3-NEXT: pcmpeqd %xmm3, %xmm3
+; SSE3-NEXT: paddq %xmm2, %xmm3
+; SSE3-NEXT: movdqa %xmm3, %xmm0
; SSE3-NEXT: psrlq $1, %xmm0
; SSE3-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE3-NEXT: psubq %xmm0, %xmm2
+; SSE3-NEXT: psubq %xmm0, %xmm3
; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [3689348814741910323,3689348814741910323]
-; SSE3-NEXT: movdqa %xmm2, %xmm3
-; SSE3-NEXT: pand %xmm0, %xmm3
-; SSE3-NEXT: psrlq $2, %xmm2
+; SSE3-NEXT: movdqa %xmm3, %xmm2
; SSE3-NEXT: pand %xmm0, %xmm2
-; SSE3-NEXT: paddq %xmm3, %xmm2
-; SSE3-NEXT: movdqa %xmm2, %xmm0
+; SSE3-NEXT: psrlq $2, %xmm3
+; SSE3-NEXT: pand %xmm0, %xmm3
+; SSE3-NEXT: paddq %xmm2, %xmm3
+; SSE3-NEXT: movdqa %xmm3, %xmm0
; SSE3-NEXT: psrlq $4, %xmm0
-; SSE3-NEXT: paddq %xmm2, %xmm0
+; SSE3-NEXT: paddq %xmm3, %xmm0
; SSE3-NEXT: pand {{.*}}(%rip), %xmm0
; SSE3-NEXT: psadbw %xmm1, %xmm0
; SSE3-NEXT: retq
@@ -207,16 +215,17 @@ define <2 x i64> @testv2i64u(<2 x i64> %
; SSSE3-NEXT: pxor %xmm2, %xmm2
; SSSE3-NEXT: psubq %xmm0, %xmm2
; SSSE3-NEXT: pand %xmm0, %xmm2
-; SSSE3-NEXT: psubq {{.*}}(%rip), %xmm2
-; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; SSSE3-NEXT: movdqa %xmm2, %xmm4
-; SSSE3-NEXT: pand %xmm3, %xmm4
+; SSSE3-NEXT: pcmpeqd %xmm3, %xmm3
+; SSSE3-NEXT: paddq %xmm2, %xmm3
+; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSSE3-NEXT: movdqa %xmm3, %xmm4
+; SSSE3-NEXT: pand %xmm2, %xmm4
; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
; SSSE3-NEXT: movdqa %xmm0, %xmm5
; SSSE3-NEXT: pshufb %xmm4, %xmm5
-; SSSE3-NEXT: psrlw $4, %xmm2
-; SSSE3-NEXT: pand %xmm3, %xmm2
-; SSSE3-NEXT: pshufb %xmm2, %xmm0
+; SSSE3-NEXT: psrlw $4, %xmm3
+; SSSE3-NEXT: pand %xmm2, %xmm3
+; SSSE3-NEXT: pshufb %xmm3, %xmm0
; SSSE3-NEXT: paddb %xmm5, %xmm0
; SSSE3-NEXT: psadbw %xmm1, %xmm0
; SSSE3-NEXT: retq
@@ -227,16 +236,17 @@ define <2 x i64> @testv2i64u(<2 x i64> %
; SSE41-NEXT: pxor %xmm2, %xmm2
; SSE41-NEXT: psubq %xmm0, %xmm2
; SSE41-NEXT: pand %xmm0, %xmm2
-; SSE41-NEXT: psubq {{.*}}(%rip), %xmm2
-; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; SSE41-NEXT: movdqa %xmm2, %xmm4
-; SSE41-NEXT: pand %xmm3, %xmm4
+; SSE41-NEXT: pcmpeqd %xmm3, %xmm3
+; SSE41-NEXT: paddq %xmm2, %xmm3
+; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSE41-NEXT: movdqa %xmm3, %xmm4
+; SSE41-NEXT: pand %xmm2, %xmm4
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
; SSE41-NEXT: movdqa %xmm0, %xmm5
; SSE41-NEXT: pshufb %xmm4, %xmm5
-; SSE41-NEXT: psrlw $4, %xmm2
-; SSE41-NEXT: pand %xmm3, %xmm2
-; SSE41-NEXT: pshufb %xmm2, %xmm0
+; SSE41-NEXT: psrlw $4, %xmm3
+; SSE41-NEXT: pand %xmm2, %xmm3
+; SSE41-NEXT: pshufb %xmm3, %xmm0
; SSE41-NEXT: paddb %xmm5, %xmm0
; SSE41-NEXT: psadbw %xmm1, %xmm0
; SSE41-NEXT: retq
@@ -246,7 +256,8 @@ define <2 x i64> @testv2i64u(<2 x i64> %
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpsubq %xmm0, %xmm1, %xmm2
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpsubq {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpaddq %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm3
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -263,7 +274,8 @@ define <2 x i64> @testv2i64u(<2 x i64> %
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vpsubq %xmm0, %xmm1, %xmm2
; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0
-; AVX2-NEXT: vpsubq {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX2-NEXT: vpaddq %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm3
; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -301,7 +313,8 @@ define <2 x i64> @testv2i64u(<2 x i64> %
; AVX512VPOPCNTDQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpsubq %xmm0, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX512VPOPCNTDQ-NEXT: vpsubq {{.*}}(%rip), %xmm0, %xmm0
+; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX512VPOPCNTDQ-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; AVX512VPOPCNTDQ-NEXT: vpopcntq %zmm0, %zmm0
; AVX512VPOPCNTDQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
; AVX512VPOPCNTDQ-NEXT: vzeroupper
@@ -337,20 +350,21 @@ define <4 x i32> @testv4i32(<4 x i32> %i
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: psubd %xmm0, %xmm2
; SSE2-NEXT: pand %xmm0, %xmm2
-; SSE2-NEXT: psubd {{.*}}(%rip), %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm0
+; SSE2-NEXT: pcmpeqd %xmm3, %xmm3
+; SSE2-NEXT: paddd %xmm2, %xmm3
+; SSE2-NEXT: movdqa %xmm3, %xmm0
; SSE2-NEXT: psrld $1, %xmm0
; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE2-NEXT: psubd %xmm0, %xmm2
+; SSE2-NEXT: psubd %xmm0, %xmm3
; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [858993459,858993459,858993459,858993459]
-; SSE2-NEXT: movdqa %xmm2, %xmm3
-; SSE2-NEXT: pand %xmm0, %xmm3
-; SSE2-NEXT: psrld $2, %xmm2
+; SSE2-NEXT: movdqa %xmm3, %xmm2
; SSE2-NEXT: pand %xmm0, %xmm2
-; SSE2-NEXT: paddd %xmm3, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm0
+; SSE2-NEXT: psrld $2, %xmm3
+; SSE2-NEXT: pand %xmm0, %xmm3
+; SSE2-NEXT: paddd %xmm2, %xmm3
+; SSE2-NEXT: movdqa %xmm3, %xmm0
; SSE2-NEXT: psrld $4, %xmm0
-; SSE2-NEXT: paddd %xmm2, %xmm0
+; SSE2-NEXT: paddd %xmm3, %xmm0
; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
@@ -366,20 +380,21 @@ define <4 x i32> @testv4i32(<4 x i32> %i
; SSE3-NEXT: pxor %xmm2, %xmm2
; SSE3-NEXT: psubd %xmm0, %xmm2
; SSE3-NEXT: pand %xmm0, %xmm2
-; SSE3-NEXT: psubd {{.*}}(%rip), %xmm2
-; SSE3-NEXT: movdqa %xmm2, %xmm0
+; SSE3-NEXT: pcmpeqd %xmm3, %xmm3
+; SSE3-NEXT: paddd %xmm2, %xmm3
+; SSE3-NEXT: movdqa %xmm3, %xmm0
; SSE3-NEXT: psrld $1, %xmm0
; SSE3-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE3-NEXT: psubd %xmm0, %xmm2
+; SSE3-NEXT: psubd %xmm0, %xmm3
; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [858993459,858993459,858993459,858993459]
-; SSE3-NEXT: movdqa %xmm2, %xmm3
-; SSE3-NEXT: pand %xmm0, %xmm3
-; SSE3-NEXT: psrld $2, %xmm2
+; SSE3-NEXT: movdqa %xmm3, %xmm2
; SSE3-NEXT: pand %xmm0, %xmm2
-; SSE3-NEXT: paddd %xmm3, %xmm2
-; SSE3-NEXT: movdqa %xmm2, %xmm0
+; SSE3-NEXT: psrld $2, %xmm3
+; SSE3-NEXT: pand %xmm0, %xmm3
+; SSE3-NEXT: paddd %xmm2, %xmm3
+; SSE3-NEXT: movdqa %xmm3, %xmm0
; SSE3-NEXT: psrld $4, %xmm0
-; SSE3-NEXT: paddd %xmm2, %xmm0
+; SSE3-NEXT: paddd %xmm3, %xmm0
; SSE3-NEXT: pand {{.*}}(%rip), %xmm0
; SSE3-NEXT: movdqa %xmm0, %xmm2
; SSE3-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
@@ -395,16 +410,17 @@ define <4 x i32> @testv4i32(<4 x i32> %i
; SSSE3-NEXT: pxor %xmm2, %xmm2
; SSSE3-NEXT: psubd %xmm0, %xmm2
; SSSE3-NEXT: pand %xmm0, %xmm2
-; SSSE3-NEXT: psubd {{.*}}(%rip), %xmm2
-; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; SSSE3-NEXT: movdqa %xmm2, %xmm4
-; SSSE3-NEXT: pand %xmm3, %xmm4
+; SSSE3-NEXT: pcmpeqd %xmm3, %xmm3
+; SSSE3-NEXT: paddd %xmm2, %xmm3
+; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSSE3-NEXT: movdqa %xmm3, %xmm4
+; SSSE3-NEXT: pand %xmm2, %xmm4
; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
; SSSE3-NEXT: movdqa %xmm0, %xmm5
; SSSE3-NEXT: pshufb %xmm4, %xmm5
-; SSSE3-NEXT: psrlw $4, %xmm2
-; SSSE3-NEXT: pand %xmm3, %xmm2
-; SSSE3-NEXT: pshufb %xmm2, %xmm0
+; SSSE3-NEXT: psrlw $4, %xmm3
+; SSSE3-NEXT: pand %xmm2, %xmm3
+; SSSE3-NEXT: pshufb %xmm3, %xmm0
; SSSE3-NEXT: paddb %xmm5, %xmm0
; SSSE3-NEXT: movdqa %xmm0, %xmm2
; SSSE3-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
@@ -420,16 +436,17 @@ define <4 x i32> @testv4i32(<4 x i32> %i
; SSE41-NEXT: pxor %xmm2, %xmm2
; SSE41-NEXT: psubd %xmm0, %xmm2
; SSE41-NEXT: pand %xmm0, %xmm2
-; SSE41-NEXT: psubd {{.*}}(%rip), %xmm2
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; SSE41-NEXT: movdqa %xmm2, %xmm3
-; SSE41-NEXT: pand %xmm0, %xmm3
+; SSE41-NEXT: pcmpeqd %xmm0, %xmm0
+; SSE41-NEXT: paddd %xmm2, %xmm0
+; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSE41-NEXT: movdqa %xmm0, %xmm3
+; SSE41-NEXT: pand %xmm2, %xmm3
; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
; SSE41-NEXT: movdqa %xmm4, %xmm5
; SSE41-NEXT: pshufb %xmm3, %xmm5
-; SSE41-NEXT: psrlw $4, %xmm2
-; SSE41-NEXT: pand %xmm0, %xmm2
-; SSE41-NEXT: pshufb %xmm2, %xmm4
+; SSE41-NEXT: psrlw $4, %xmm0
+; SSE41-NEXT: pand %xmm2, %xmm0
+; SSE41-NEXT: pshufb %xmm0, %xmm4
; SSE41-NEXT: paddb %xmm5, %xmm4
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm4[0],zero,xmm4[1],zero
; SSE41-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm1[2],xmm4[3],xmm1[3]
@@ -443,7 +460,8 @@ define <4 x i32> @testv4i32(<4 x i32> %i
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpsubd %xmm0, %xmm1, %xmm2
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpsubd {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpaddd %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm3
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -464,8 +482,8 @@ define <4 x i32> @testv4i32(<4 x i32> %i
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vpsubd %xmm0, %xmm1, %xmm2
; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0
-; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2
-; AVX2-NEXT: vpsubd %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX2-NEXT: vpaddd %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm3
; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -486,7 +504,8 @@ define <4 x i32> @testv4i32(<4 x i32> %i
; AVX512CDVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512CDVL-NEXT: vpsubd %xmm0, %xmm1, %xmm2
; AVX512CDVL-NEXT: vpand %xmm2, %xmm0, %xmm0
-; AVX512CDVL-NEXT: vpsubd {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; AVX512CDVL-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512CDVL-NEXT: vpaddd %xmm2, %xmm0, %xmm0
; AVX512CDVL-NEXT: vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512CDVL-NEXT: vpand %xmm2, %xmm0, %xmm3
; AVX512CDVL-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -507,8 +526,8 @@ define <4 x i32> @testv4i32(<4 x i32> %i
; AVX512CD-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512CD-NEXT: vpsubd %xmm0, %xmm1, %xmm2
; AVX512CD-NEXT: vpand %xmm2, %xmm0, %xmm0
-; AVX512CD-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2
-; AVX512CD-NEXT: vpsubd %xmm2, %xmm0, %xmm0
+; AVX512CD-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512CD-NEXT: vpaddd %xmm2, %xmm0, %xmm0
; AVX512CD-NEXT: vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512CD-NEXT: vpand %xmm2, %xmm0, %xmm3
; AVX512CD-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -529,8 +548,8 @@ define <4 x i32> @testv4i32(<4 x i32> %i
; AVX512VPOPCNTDQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpsubd %xmm0, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX512VPOPCNTDQ-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
-; AVX512VPOPCNTDQ-NEXT: vpsubd %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX512VPOPCNTDQ-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0
; AVX512VPOPCNTDQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
; AVX512VPOPCNTDQ-NEXT: vzeroupper
@@ -542,16 +561,17 @@ define <4 x i32> @testv4i32(<4 x i32> %i
; X32-SSE-NEXT: pxor %xmm2, %xmm2
; X32-SSE-NEXT: psubd %xmm0, %xmm2
; X32-SSE-NEXT: pand %xmm0, %xmm2
-; X32-SSE-NEXT: psubd {{\.LCPI.*}}, %xmm2
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm0 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; X32-SSE-NEXT: movdqa %xmm2, %xmm3
-; X32-SSE-NEXT: pand %xmm0, %xmm3
+; X32-SSE-NEXT: pcmpeqd %xmm0, %xmm0
+; X32-SSE-NEXT: paddd %xmm2, %xmm0
+; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; X32-SSE-NEXT: movdqa %xmm0, %xmm3
+; X32-SSE-NEXT: pand %xmm2, %xmm3
; X32-SSE-NEXT: movdqa {{.*#+}} xmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
; X32-SSE-NEXT: movdqa %xmm4, %xmm5
; X32-SSE-NEXT: pshufb %xmm3, %xmm5
-; X32-SSE-NEXT: psrlw $4, %xmm2
-; X32-SSE-NEXT: pand %xmm0, %xmm2
-; X32-SSE-NEXT: pshufb %xmm2, %xmm4
+; X32-SSE-NEXT: psrlw $4, %xmm0
+; X32-SSE-NEXT: pand %xmm2, %xmm0
+; X32-SSE-NEXT: pshufb %xmm0, %xmm4
; X32-SSE-NEXT: paddb %xmm5, %xmm4
; X32-SSE-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm4[0],zero,xmm4[1],zero
; X32-SSE-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm1[2],xmm4[3],xmm1[3]
@@ -570,20 +590,21 @@ define <4 x i32> @testv4i32u(<4 x i32> %
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: psubd %xmm0, %xmm2
; SSE2-NEXT: pand %xmm0, %xmm2
-; SSE2-NEXT: psubd {{.*}}(%rip), %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm0
+; SSE2-NEXT: pcmpeqd %xmm3, %xmm3
+; SSE2-NEXT: paddd %xmm2, %xmm3
+; SSE2-NEXT: movdqa %xmm3, %xmm0
; SSE2-NEXT: psrld $1, %xmm0
; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE2-NEXT: psubd %xmm0, %xmm2
+; SSE2-NEXT: psubd %xmm0, %xmm3
; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [858993459,858993459,858993459,858993459]
-; SSE2-NEXT: movdqa %xmm2, %xmm3
-; SSE2-NEXT: pand %xmm0, %xmm3
-; SSE2-NEXT: psrld $2, %xmm2
+; SSE2-NEXT: movdqa %xmm3, %xmm2
; SSE2-NEXT: pand %xmm0, %xmm2
-; SSE2-NEXT: paddd %xmm3, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm0
+; SSE2-NEXT: psrld $2, %xmm3
+; SSE2-NEXT: pand %xmm0, %xmm3
+; SSE2-NEXT: paddd %xmm2, %xmm3
+; SSE2-NEXT: movdqa %xmm3, %xmm0
; SSE2-NEXT: psrld $4, %xmm0
-; SSE2-NEXT: paddd %xmm2, %xmm0
+; SSE2-NEXT: paddd %xmm3, %xmm0
; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
@@ -599,20 +620,21 @@ define <4 x i32> @testv4i32u(<4 x i32> %
; SSE3-NEXT: pxor %xmm2, %xmm2
; SSE3-NEXT: psubd %xmm0, %xmm2
; SSE3-NEXT: pand %xmm0, %xmm2
-; SSE3-NEXT: psubd {{.*}}(%rip), %xmm2
-; SSE3-NEXT: movdqa %xmm2, %xmm0
+; SSE3-NEXT: pcmpeqd %xmm3, %xmm3
+; SSE3-NEXT: paddd %xmm2, %xmm3
+; SSE3-NEXT: movdqa %xmm3, %xmm0
; SSE3-NEXT: psrld $1, %xmm0
; SSE3-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE3-NEXT: psubd %xmm0, %xmm2
+; SSE3-NEXT: psubd %xmm0, %xmm3
; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [858993459,858993459,858993459,858993459]
-; SSE3-NEXT: movdqa %xmm2, %xmm3
-; SSE3-NEXT: pand %xmm0, %xmm3
-; SSE3-NEXT: psrld $2, %xmm2
+; SSE3-NEXT: movdqa %xmm3, %xmm2
; SSE3-NEXT: pand %xmm0, %xmm2
-; SSE3-NEXT: paddd %xmm3, %xmm2
-; SSE3-NEXT: movdqa %xmm2, %xmm0
+; SSE3-NEXT: psrld $2, %xmm3
+; SSE3-NEXT: pand %xmm0, %xmm3
+; SSE3-NEXT: paddd %xmm2, %xmm3
+; SSE3-NEXT: movdqa %xmm3, %xmm0
; SSE3-NEXT: psrld $4, %xmm0
-; SSE3-NEXT: paddd %xmm2, %xmm0
+; SSE3-NEXT: paddd %xmm3, %xmm0
; SSE3-NEXT: pand {{.*}}(%rip), %xmm0
; SSE3-NEXT: movdqa %xmm0, %xmm2
; SSE3-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
@@ -628,16 +650,17 @@ define <4 x i32> @testv4i32u(<4 x i32> %
; SSSE3-NEXT: pxor %xmm2, %xmm2
; SSSE3-NEXT: psubd %xmm0, %xmm2
; SSSE3-NEXT: pand %xmm0, %xmm2
-; SSSE3-NEXT: psubd {{.*}}(%rip), %xmm2
-; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; SSSE3-NEXT: movdqa %xmm2, %xmm4
-; SSSE3-NEXT: pand %xmm3, %xmm4
+; SSSE3-NEXT: pcmpeqd %xmm3, %xmm3
+; SSSE3-NEXT: paddd %xmm2, %xmm3
+; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSSE3-NEXT: movdqa %xmm3, %xmm4
+; SSSE3-NEXT: pand %xmm2, %xmm4
; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
; SSSE3-NEXT: movdqa %xmm0, %xmm5
; SSSE3-NEXT: pshufb %xmm4, %xmm5
-; SSSE3-NEXT: psrlw $4, %xmm2
-; SSSE3-NEXT: pand %xmm3, %xmm2
-; SSSE3-NEXT: pshufb %xmm2, %xmm0
+; SSSE3-NEXT: psrlw $4, %xmm3
+; SSSE3-NEXT: pand %xmm2, %xmm3
+; SSSE3-NEXT: pshufb %xmm3, %xmm0
; SSSE3-NEXT: paddb %xmm5, %xmm0
; SSSE3-NEXT: movdqa %xmm0, %xmm2
; SSSE3-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
@@ -653,16 +676,17 @@ define <4 x i32> @testv4i32u(<4 x i32> %
; SSE41-NEXT: pxor %xmm2, %xmm2
; SSE41-NEXT: psubd %xmm0, %xmm2
; SSE41-NEXT: pand %xmm0, %xmm2
-; SSE41-NEXT: psubd {{.*}}(%rip), %xmm2
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; SSE41-NEXT: movdqa %xmm2, %xmm3
-; SSE41-NEXT: pand %xmm0, %xmm3
+; SSE41-NEXT: pcmpeqd %xmm0, %xmm0
+; SSE41-NEXT: paddd %xmm2, %xmm0
+; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSE41-NEXT: movdqa %xmm0, %xmm3
+; SSE41-NEXT: pand %xmm2, %xmm3
; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
; SSE41-NEXT: movdqa %xmm4, %xmm5
; SSE41-NEXT: pshufb %xmm3, %xmm5
-; SSE41-NEXT: psrlw $4, %xmm2
-; SSE41-NEXT: pand %xmm0, %xmm2
-; SSE41-NEXT: pshufb %xmm2, %xmm4
+; SSE41-NEXT: psrlw $4, %xmm0
+; SSE41-NEXT: pand %xmm2, %xmm0
+; SSE41-NEXT: pshufb %xmm0, %xmm4
; SSE41-NEXT: paddb %xmm5, %xmm4
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm4[0],zero,xmm4[1],zero
; SSE41-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm1[2],xmm4[3],xmm1[3]
@@ -676,7 +700,8 @@ define <4 x i32> @testv4i32u(<4 x i32> %
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpsubd %xmm0, %xmm1, %xmm2
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpsubd {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpaddd %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm3
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -697,8 +722,8 @@ define <4 x i32> @testv4i32u(<4 x i32> %
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vpsubd %xmm0, %xmm1, %xmm2
; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0
-; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2
-; AVX2-NEXT: vpsubd %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX2-NEXT: vpaddd %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm3
; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -740,8 +765,8 @@ define <4 x i32> @testv4i32u(<4 x i32> %
; AVX512VPOPCNTDQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpsubd %xmm0, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX512VPOPCNTDQ-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
-; AVX512VPOPCNTDQ-NEXT: vpsubd %xmm1, %xmm0, %xmm0
+; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX512VPOPCNTDQ-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0
; AVX512VPOPCNTDQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
; AVX512VPOPCNTDQ-NEXT: vzeroupper
@@ -753,16 +778,17 @@ define <4 x i32> @testv4i32u(<4 x i32> %
; X32-SSE-NEXT: pxor %xmm2, %xmm2
; X32-SSE-NEXT: psubd %xmm0, %xmm2
; X32-SSE-NEXT: pand %xmm0, %xmm2
-; X32-SSE-NEXT: psubd {{\.LCPI.*}}, %xmm2
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm0 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; X32-SSE-NEXT: movdqa %xmm2, %xmm3
-; X32-SSE-NEXT: pand %xmm0, %xmm3
+; X32-SSE-NEXT: pcmpeqd %xmm0, %xmm0
+; X32-SSE-NEXT: paddd %xmm2, %xmm0
+; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; X32-SSE-NEXT: movdqa %xmm0, %xmm3
+; X32-SSE-NEXT: pand %xmm2, %xmm3
; X32-SSE-NEXT: movdqa {{.*#+}} xmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
; X32-SSE-NEXT: movdqa %xmm4, %xmm5
; X32-SSE-NEXT: pshufb %xmm3, %xmm5
-; X32-SSE-NEXT: psrlw $4, %xmm2
-; X32-SSE-NEXT: pand %xmm0, %xmm2
-; X32-SSE-NEXT: pshufb %xmm2, %xmm4
+; X32-SSE-NEXT: psrlw $4, %xmm0
+; X32-SSE-NEXT: pand %xmm2, %xmm0
+; X32-SSE-NEXT: pshufb %xmm0, %xmm4
; X32-SSE-NEXT: paddb %xmm5, %xmm4
; X32-SSE-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm4[0],zero,xmm4[1],zero
; X32-SSE-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm1[2],xmm4[3],xmm1[3]
@@ -780,24 +806,25 @@ define <8 x i16> @testv8i16(<8 x i16> %i
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: psubw %xmm0, %xmm1
; SSE2-NEXT: pand %xmm0, %xmm1
-; SSE2-NEXT: psubw {{.*}}(%rip), %xmm1
+; SSE2-NEXT: pcmpeqd %xmm0, %xmm0
+; SSE2-NEXT: paddw %xmm1, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psrlw $1, %xmm1
+; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
+; SSE2-NEXT: psubw %xmm1, %xmm0
+; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [13107,13107,13107,13107,13107,13107,13107,13107]
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: pand %xmm1, %xmm2
+; SSE2-NEXT: psrlw $2, %xmm0
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: paddw %xmm2, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psrlw $4, %xmm1
+; SSE2-NEXT: paddw %xmm0, %xmm1
+; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: psrlw $1, %xmm0
-; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE2-NEXT: psubw %xmm0, %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [13107,13107,13107,13107,13107,13107,13107,13107]
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: pand %xmm0, %xmm2
-; SSE2-NEXT: psrlw $2, %xmm1
-; SSE2-NEXT: pand %xmm0, %xmm1
-; SSE2-NEXT: paddw %xmm2, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: psrlw $4, %xmm2
-; SSE2-NEXT: paddw %xmm1, %xmm2
-; SSE2-NEXT: pand {{.*}}(%rip), %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm0
; SSE2-NEXT: psllw $8, %xmm0
-; SSE2-NEXT: paddb %xmm2, %xmm0
+; SSE2-NEXT: paddb %xmm1, %xmm0
; SSE2-NEXT: psrlw $8, %xmm0
; SSE2-NEXT: retq
;
@@ -806,24 +833,25 @@ define <8 x i16> @testv8i16(<8 x i16> %i
; SSE3-NEXT: pxor %xmm1, %xmm1
; SSE3-NEXT: psubw %xmm0, %xmm1
; SSE3-NEXT: pand %xmm0, %xmm1
-; SSE3-NEXT: psubw {{.*}}(%rip), %xmm1
+; SSE3-NEXT: pcmpeqd %xmm0, %xmm0
+; SSE3-NEXT: paddw %xmm1, %xmm0
+; SSE3-NEXT: movdqa %xmm0, %xmm1
+; SSE3-NEXT: psrlw $1, %xmm1
+; SSE3-NEXT: pand {{.*}}(%rip), %xmm1
+; SSE3-NEXT: psubw %xmm1, %xmm0
+; SSE3-NEXT: movdqa {{.*#+}} xmm1 = [13107,13107,13107,13107,13107,13107,13107,13107]
+; SSE3-NEXT: movdqa %xmm0, %xmm2
+; SSE3-NEXT: pand %xmm1, %xmm2
+; SSE3-NEXT: psrlw $2, %xmm0
+; SSE3-NEXT: pand %xmm1, %xmm0
+; SSE3-NEXT: paddw %xmm2, %xmm0
+; SSE3-NEXT: movdqa %xmm0, %xmm1
+; SSE3-NEXT: psrlw $4, %xmm1
+; SSE3-NEXT: paddw %xmm0, %xmm1
+; SSE3-NEXT: pand {{.*}}(%rip), %xmm1
; SSE3-NEXT: movdqa %xmm1, %xmm0
-; SSE3-NEXT: psrlw $1, %xmm0
-; SSE3-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE3-NEXT: psubw %xmm0, %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [13107,13107,13107,13107,13107,13107,13107,13107]
-; SSE3-NEXT: movdqa %xmm1, %xmm2
-; SSE3-NEXT: pand %xmm0, %xmm2
-; SSE3-NEXT: psrlw $2, %xmm1
-; SSE3-NEXT: pand %xmm0, %xmm1
-; SSE3-NEXT: paddw %xmm2, %xmm1
-; SSE3-NEXT: movdqa %xmm1, %xmm2
-; SSE3-NEXT: psrlw $4, %xmm2
-; SSE3-NEXT: paddw %xmm1, %xmm2
-; SSE3-NEXT: pand {{.*}}(%rip), %xmm2
-; SSE3-NEXT: movdqa %xmm2, %xmm0
; SSE3-NEXT: psllw $8, %xmm0
-; SSE3-NEXT: paddb %xmm2, %xmm0
+; SSE3-NEXT: paddb %xmm1, %xmm0
; SSE3-NEXT: psrlw $8, %xmm0
; SSE3-NEXT: retq
;
@@ -832,16 +860,17 @@ define <8 x i16> @testv8i16(<8 x i16> %i
; SSSE3-NEXT: pxor %xmm1, %xmm1
; SSSE3-NEXT: psubw %xmm0, %xmm1
; SSSE3-NEXT: pand %xmm0, %xmm1
-; SSSE3-NEXT: psubw {{.*}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; SSSE3-NEXT: movdqa %xmm1, %xmm2
-; SSSE3-NEXT: pand %xmm0, %xmm2
+; SSSE3-NEXT: pcmpeqd %xmm0, %xmm0
+; SSSE3-NEXT: paddw %xmm1, %xmm0
+; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSSE3-NEXT: movdqa %xmm0, %xmm2
+; SSSE3-NEXT: pand %xmm1, %xmm2
; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
; SSSE3-NEXT: movdqa %xmm3, %xmm4
; SSSE3-NEXT: pshufb %xmm2, %xmm4
-; SSSE3-NEXT: psrlw $4, %xmm1
-; SSSE3-NEXT: pand %xmm0, %xmm1
-; SSSE3-NEXT: pshufb %xmm1, %xmm3
+; SSSE3-NEXT: psrlw $4, %xmm0
+; SSSE3-NEXT: pand %xmm1, %xmm0
+; SSSE3-NEXT: pshufb %xmm0, %xmm3
; SSSE3-NEXT: paddb %xmm4, %xmm3
; SSSE3-NEXT: movdqa %xmm3, %xmm0
; SSSE3-NEXT: psllw $8, %xmm0
@@ -854,16 +883,17 @@ define <8 x i16> @testv8i16(<8 x i16> %i
; SSE41-NEXT: pxor %xmm1, %xmm1
; SSE41-NEXT: psubw %xmm0, %xmm1
; SSE41-NEXT: pand %xmm0, %xmm1
-; SSE41-NEXT: psubw {{.*}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; SSE41-NEXT: movdqa %xmm1, %xmm2
-; SSE41-NEXT: pand %xmm0, %xmm2
+; SSE41-NEXT: pcmpeqd %xmm0, %xmm0
+; SSE41-NEXT: paddw %xmm1, %xmm0
+; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSE41-NEXT: movdqa %xmm0, %xmm2
+; SSE41-NEXT: pand %xmm1, %xmm2
; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
; SSE41-NEXT: movdqa %xmm3, %xmm4
; SSE41-NEXT: pshufb %xmm2, %xmm4
-; SSE41-NEXT: psrlw $4, %xmm1
-; SSE41-NEXT: pand %xmm0, %xmm1
-; SSE41-NEXT: pshufb %xmm1, %xmm3
+; SSE41-NEXT: psrlw $4, %xmm0
+; SSE41-NEXT: pand %xmm1, %xmm0
+; SSE41-NEXT: pshufb %xmm0, %xmm3
; SSE41-NEXT: paddb %xmm4, %xmm3
; SSE41-NEXT: movdqa %xmm3, %xmm0
; SSE41-NEXT: psllw $8, %xmm0
@@ -876,7 +906,8 @@ define <8 x i16> @testv8i16(<8 x i16> %i
; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpsubw %xmm0, %xmm1, %xmm1
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpsubw {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpaddw %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -895,7 +926,8 @@ define <8 x i16> @testv8i16(<8 x i16> %i
; AVX512VPOPCNTDQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpsubw %xmm0, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX512VPOPCNTDQ-NEXT: vpsubw {{.*}}(%rip), %xmm0, %xmm0
+; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX512VPOPCNTDQ-NEXT: vpaddw %xmm1, %xmm0, %xmm0
; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VPOPCNTDQ-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -914,16 +946,17 @@ define <8 x i16> @testv8i16(<8 x i16> %i
; X32-SSE-NEXT: pxor %xmm1, %xmm1
; X32-SSE-NEXT: psubw %xmm0, %xmm1
; X32-SSE-NEXT: pand %xmm0, %xmm1
-; X32-SSE-NEXT: psubw {{\.LCPI.*}}, %xmm1
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm0 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; X32-SSE-NEXT: movdqa %xmm1, %xmm2
-; X32-SSE-NEXT: pand %xmm0, %xmm2
+; X32-SSE-NEXT: pcmpeqd %xmm0, %xmm0
+; X32-SSE-NEXT: paddw %xmm1, %xmm0
+; X32-SSE-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; X32-SSE-NEXT: movdqa %xmm0, %xmm2
+; X32-SSE-NEXT: pand %xmm1, %xmm2
; X32-SSE-NEXT: movdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
; X32-SSE-NEXT: movdqa %xmm3, %xmm4
; X32-SSE-NEXT: pshufb %xmm2, %xmm4
-; X32-SSE-NEXT: psrlw $4, %xmm1
-; X32-SSE-NEXT: pand %xmm0, %xmm1
-; X32-SSE-NEXT: pshufb %xmm1, %xmm3
+; X32-SSE-NEXT: psrlw $4, %xmm0
+; X32-SSE-NEXT: pand %xmm1, %xmm0
+; X32-SSE-NEXT: pshufb %xmm0, %xmm3
; X32-SSE-NEXT: paddb %xmm4, %xmm3
; X32-SSE-NEXT: movdqa %xmm3, %xmm0
; X32-SSE-NEXT: psllw $8, %xmm0
@@ -940,24 +973,25 @@ define <8 x i16> @testv8i16u(<8 x i16> %
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: psubw %xmm0, %xmm1
; SSE2-NEXT: pand %xmm0, %xmm1
-; SSE2-NEXT: psubw {{.*}}(%rip), %xmm1
+; SSE2-NEXT: pcmpeqd %xmm0, %xmm0
+; SSE2-NEXT: paddw %xmm1, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psrlw $1, %xmm1
+; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
+; SSE2-NEXT: psubw %xmm1, %xmm0
+; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [13107,13107,13107,13107,13107,13107,13107,13107]
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: pand %xmm1, %xmm2
+; SSE2-NEXT: psrlw $2, %xmm0
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: paddw %xmm2, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psrlw $4, %xmm1
+; SSE2-NEXT: paddw %xmm0, %xmm1
+; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: psrlw $1, %xmm0
-; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE2-NEXT: psubw %xmm0, %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [13107,13107,13107,13107,13107,13107,13107,13107]
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: pand %xmm0, %xmm2
-; SSE2-NEXT: psrlw $2, %xmm1
-; SSE2-NEXT: pand %xmm0, %xmm1
-; SSE2-NEXT: paddw %xmm2, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: psrlw $4, %xmm2
-; SSE2-NEXT: paddw %xmm1, %xmm2
-; SSE2-NEXT: pand {{.*}}(%rip), %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm0
; SSE2-NEXT: psllw $8, %xmm0
-; SSE2-NEXT: paddb %xmm2, %xmm0
+; SSE2-NEXT: paddb %xmm1, %xmm0
; SSE2-NEXT: psrlw $8, %xmm0
; SSE2-NEXT: retq
;
@@ -966,24 +1000,25 @@ define <8 x i16> @testv8i16u(<8 x i16> %
; SSE3-NEXT: pxor %xmm1, %xmm1
; SSE3-NEXT: psubw %xmm0, %xmm1
; SSE3-NEXT: pand %xmm0, %xmm1
-; SSE3-NEXT: psubw {{.*}}(%rip), %xmm1
+; SSE3-NEXT: pcmpeqd %xmm0, %xmm0
+; SSE3-NEXT: paddw %xmm1, %xmm0
+; SSE3-NEXT: movdqa %xmm0, %xmm1
+; SSE3-NEXT: psrlw $1, %xmm1
+; SSE3-NEXT: pand {{.*}}(%rip), %xmm1
+; SSE3-NEXT: psubw %xmm1, %xmm0
+; SSE3-NEXT: movdqa {{.*#+}} xmm1 = [13107,13107,13107,13107,13107,13107,13107,13107]
+; SSE3-NEXT: movdqa %xmm0, %xmm2
+; SSE3-NEXT: pand %xmm1, %xmm2
+; SSE3-NEXT: psrlw $2, %xmm0
+; SSE3-NEXT: pand %xmm1, %xmm0
+; SSE3-NEXT: paddw %xmm2, %xmm0
+; SSE3-NEXT: movdqa %xmm0, %xmm1
+; SSE3-NEXT: psrlw $4, %xmm1
+; SSE3-NEXT: paddw %xmm0, %xmm1
+; SSE3-NEXT: pand {{.*}}(%rip), %xmm1
; SSE3-NEXT: movdqa %xmm1, %xmm0
-; SSE3-NEXT: psrlw $1, %xmm0
-; SSE3-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE3-NEXT: psubw %xmm0, %xmm1
-; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [13107,13107,13107,13107,13107,13107,13107,13107]
-; SSE3-NEXT: movdqa %xmm1, %xmm2
-; SSE3-NEXT: pand %xmm0, %xmm2
-; SSE3-NEXT: psrlw $2, %xmm1
-; SSE3-NEXT: pand %xmm0, %xmm1
-; SSE3-NEXT: paddw %xmm2, %xmm1
-; SSE3-NEXT: movdqa %xmm1, %xmm2
-; SSE3-NEXT: psrlw $4, %xmm2
-; SSE3-NEXT: paddw %xmm1, %xmm2
-; SSE3-NEXT: pand {{.*}}(%rip), %xmm2
-; SSE3-NEXT: movdqa %xmm2, %xmm0
; SSE3-NEXT: psllw $8, %xmm0
-; SSE3-NEXT: paddb %xmm2, %xmm0
+; SSE3-NEXT: paddb %xmm1, %xmm0
; SSE3-NEXT: psrlw $8, %xmm0
; SSE3-NEXT: retq
;
@@ -992,16 +1027,17 @@ define <8 x i16> @testv8i16u(<8 x i16> %
; SSSE3-NEXT: pxor %xmm1, %xmm1
; SSSE3-NEXT: psubw %xmm0, %xmm1
; SSSE3-NEXT: pand %xmm0, %xmm1
-; SSSE3-NEXT: psubw {{.*}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; SSSE3-NEXT: movdqa %xmm1, %xmm2
-; SSSE3-NEXT: pand %xmm0, %xmm2
+; SSSE3-NEXT: pcmpeqd %xmm0, %xmm0
+; SSSE3-NEXT: paddw %xmm1, %xmm0
+; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSSE3-NEXT: movdqa %xmm0, %xmm2
+; SSSE3-NEXT: pand %xmm1, %xmm2
; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
; SSSE3-NEXT: movdqa %xmm3, %xmm4
; SSSE3-NEXT: pshufb %xmm2, %xmm4
-; SSSE3-NEXT: psrlw $4, %xmm1
-; SSSE3-NEXT: pand %xmm0, %xmm1
-; SSSE3-NEXT: pshufb %xmm1, %xmm3
+; SSSE3-NEXT: psrlw $4, %xmm0
+; SSSE3-NEXT: pand %xmm1, %xmm0
+; SSSE3-NEXT: pshufb %xmm0, %xmm3
; SSSE3-NEXT: paddb %xmm4, %xmm3
; SSSE3-NEXT: movdqa %xmm3, %xmm0
; SSSE3-NEXT: psllw $8, %xmm0
@@ -1014,16 +1050,17 @@ define <8 x i16> @testv8i16u(<8 x i16> %
; SSE41-NEXT: pxor %xmm1, %xmm1
; SSE41-NEXT: psubw %xmm0, %xmm1
; SSE41-NEXT: pand %xmm0, %xmm1
-; SSE41-NEXT: psubw {{.*}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; SSE41-NEXT: movdqa %xmm1, %xmm2
-; SSE41-NEXT: pand %xmm0, %xmm2
+; SSE41-NEXT: pcmpeqd %xmm0, %xmm0
+; SSE41-NEXT: paddw %xmm1, %xmm0
+; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSE41-NEXT: movdqa %xmm0, %xmm2
+; SSE41-NEXT: pand %xmm1, %xmm2
; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
; SSE41-NEXT: movdqa %xmm3, %xmm4
; SSE41-NEXT: pshufb %xmm2, %xmm4
-; SSE41-NEXT: psrlw $4, %xmm1
-; SSE41-NEXT: pand %xmm0, %xmm1
-; SSE41-NEXT: pshufb %xmm1, %xmm3
+; SSE41-NEXT: psrlw $4, %xmm0
+; SSE41-NEXT: pand %xmm1, %xmm0
+; SSE41-NEXT: pshufb %xmm0, %xmm3
; SSE41-NEXT: paddb %xmm4, %xmm3
; SSE41-NEXT: movdqa %xmm3, %xmm0
; SSE41-NEXT: psllw $8, %xmm0
@@ -1036,7 +1073,8 @@ define <8 x i16> @testv8i16u(<8 x i16> %
; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpsubw %xmm0, %xmm1, %xmm1
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpsubw {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpaddw %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -1055,7 +1093,8 @@ define <8 x i16> @testv8i16u(<8 x i16> %
; AVX512VPOPCNTDQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpsubw %xmm0, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX512VPOPCNTDQ-NEXT: vpsubw {{.*}}(%rip), %xmm0, %xmm0
+; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX512VPOPCNTDQ-NEXT: vpaddw %xmm1, %xmm0, %xmm0
; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VPOPCNTDQ-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -1074,16 +1113,17 @@ define <8 x i16> @testv8i16u(<8 x i16> %
; X32-SSE-NEXT: pxor %xmm1, %xmm1
; X32-SSE-NEXT: psubw %xmm0, %xmm1
; X32-SSE-NEXT: pand %xmm0, %xmm1
-; X32-SSE-NEXT: psubw {{\.LCPI.*}}, %xmm1
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm0 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; X32-SSE-NEXT: movdqa %xmm1, %xmm2
-; X32-SSE-NEXT: pand %xmm0, %xmm2
+; X32-SSE-NEXT: pcmpeqd %xmm0, %xmm0
+; X32-SSE-NEXT: paddw %xmm1, %xmm0
+; X32-SSE-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; X32-SSE-NEXT: movdqa %xmm0, %xmm2
+; X32-SSE-NEXT: pand %xmm1, %xmm2
; X32-SSE-NEXT: movdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
; X32-SSE-NEXT: movdqa %xmm3, %xmm4
; X32-SSE-NEXT: pshufb %xmm2, %xmm4
-; X32-SSE-NEXT: psrlw $4, %xmm1
-; X32-SSE-NEXT: pand %xmm0, %xmm1
-; X32-SSE-NEXT: pshufb %xmm1, %xmm3
+; X32-SSE-NEXT: psrlw $4, %xmm0
+; X32-SSE-NEXT: pand %xmm1, %xmm0
+; X32-SSE-NEXT: pshufb %xmm0, %xmm3
; X32-SSE-NEXT: paddb %xmm4, %xmm3
; X32-SSE-NEXT: movdqa %xmm3, %xmm0
; X32-SSE-NEXT: psllw $8, %xmm0
@@ -1100,20 +1140,21 @@ define <16 x i8> @testv16i8(<16 x i8> %i
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: psubb %xmm0, %xmm1
; SSE2-NEXT: pand %xmm0, %xmm1
-; SSE2-NEXT: psubb {{.*}}(%rip), %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: pcmpeqd %xmm2, %xmm2
+; SSE2-NEXT: paddb %xmm1, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm0
; SSE2-NEXT: psrlw $1, %xmm0
; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE2-NEXT: psubb %xmm0, %xmm1
+; SSE2-NEXT: psubb %xmm0, %xmm2
; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: pand %xmm0, %xmm2
-; SSE2-NEXT: psrlw $2, %xmm1
+; SSE2-NEXT: movdqa %xmm2, %xmm1
; SSE2-NEXT: pand %xmm0, %xmm1
-; SSE2-NEXT: paddb %xmm2, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: psrlw $2, %xmm2
+; SSE2-NEXT: pand %xmm0, %xmm2
+; SSE2-NEXT: paddb %xmm1, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm0
; SSE2-NEXT: psrlw $4, %xmm0
-; SSE2-NEXT: paddb %xmm1, %xmm0
+; SSE2-NEXT: paddb %xmm2, %xmm0
; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -1122,20 +1163,21 @@ define <16 x i8> @testv16i8(<16 x i8> %i
; SSE3-NEXT: pxor %xmm1, %xmm1
; SSE3-NEXT: psubb %xmm0, %xmm1
; SSE3-NEXT: pand %xmm0, %xmm1
-; SSE3-NEXT: psubb {{.*}}(%rip), %xmm1
-; SSE3-NEXT: movdqa %xmm1, %xmm0
+; SSE3-NEXT: pcmpeqd %xmm2, %xmm2
+; SSE3-NEXT: paddb %xmm1, %xmm2
+; SSE3-NEXT: movdqa %xmm2, %xmm0
; SSE3-NEXT: psrlw $1, %xmm0
; SSE3-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE3-NEXT: psubb %xmm0, %xmm1
+; SSE3-NEXT: psubb %xmm0, %xmm2
; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
-; SSE3-NEXT: movdqa %xmm1, %xmm2
-; SSE3-NEXT: pand %xmm0, %xmm2
-; SSE3-NEXT: psrlw $2, %xmm1
+; SSE3-NEXT: movdqa %xmm2, %xmm1
; SSE3-NEXT: pand %xmm0, %xmm1
-; SSE3-NEXT: paddb %xmm2, %xmm1
-; SSE3-NEXT: movdqa %xmm1, %xmm0
+; SSE3-NEXT: psrlw $2, %xmm2
+; SSE3-NEXT: pand %xmm0, %xmm2
+; SSE3-NEXT: paddb %xmm1, %xmm2
+; SSE3-NEXT: movdqa %xmm2, %xmm0
; SSE3-NEXT: psrlw $4, %xmm0
-; SSE3-NEXT: paddb %xmm1, %xmm0
+; SSE3-NEXT: paddb %xmm2, %xmm0
; SSE3-NEXT: pand {{.*}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -1144,16 +1186,17 @@ define <16 x i8> @testv16i8(<16 x i8> %i
; SSSE3-NEXT: pxor %xmm1, %xmm1
; SSSE3-NEXT: psubb %xmm0, %xmm1
; SSSE3-NEXT: pand %xmm0, %xmm1
-; SSSE3-NEXT: psubb {{.*}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; SSSE3-NEXT: movdqa %xmm1, %xmm3
-; SSSE3-NEXT: pand %xmm2, %xmm3
+; SSSE3-NEXT: pcmpeqd %xmm2, %xmm2
+; SSSE3-NEXT: paddb %xmm1, %xmm2
+; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSSE3-NEXT: movdqa %xmm2, %xmm3
+; SSSE3-NEXT: pand %xmm1, %xmm3
; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
; SSSE3-NEXT: movdqa %xmm0, %xmm4
; SSSE3-NEXT: pshufb %xmm3, %xmm4
-; SSSE3-NEXT: psrlw $4, %xmm1
-; SSSE3-NEXT: pand %xmm2, %xmm1
-; SSSE3-NEXT: pshufb %xmm1, %xmm0
+; SSSE3-NEXT: psrlw $4, %xmm2
+; SSSE3-NEXT: pand %xmm1, %xmm2
+; SSSE3-NEXT: pshufb %xmm2, %xmm0
; SSSE3-NEXT: paddb %xmm4, %xmm0
; SSSE3-NEXT: retq
;
@@ -1162,16 +1205,17 @@ define <16 x i8> @testv16i8(<16 x i8> %i
; SSE41-NEXT: pxor %xmm1, %xmm1
; SSE41-NEXT: psubb %xmm0, %xmm1
; SSE41-NEXT: pand %xmm0, %xmm1
-; SSE41-NEXT: psubb {{.*}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; SSE41-NEXT: movdqa %xmm1, %xmm3
-; SSE41-NEXT: pand %xmm2, %xmm3
+; SSE41-NEXT: pcmpeqd %xmm2, %xmm2
+; SSE41-NEXT: paddb %xmm1, %xmm2
+; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSE41-NEXT: movdqa %xmm2, %xmm3
+; SSE41-NEXT: pand %xmm1, %xmm3
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
; SSE41-NEXT: movdqa %xmm0, %xmm4
; SSE41-NEXT: pshufb %xmm3, %xmm4
-; SSE41-NEXT: psrlw $4, %xmm1
-; SSE41-NEXT: pand %xmm2, %xmm1
-; SSE41-NEXT: pshufb %xmm1, %xmm0
+; SSE41-NEXT: psrlw $4, %xmm2
+; SSE41-NEXT: pand %xmm1, %xmm2
+; SSE41-NEXT: pshufb %xmm2, %xmm0
; SSE41-NEXT: paddb %xmm4, %xmm0
; SSE41-NEXT: retq
;
@@ -1180,7 +1224,8 @@ define <16 x i8> @testv16i8(<16 x i8> %i
; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpsubb %xmm0, %xmm1, %xmm1
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpsubb {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpaddb %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -1196,7 +1241,8 @@ define <16 x i8> @testv16i8(<16 x i8> %i
; AVX512VPOPCNTDQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpsubb %xmm0, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX512VPOPCNTDQ-NEXT: vpsubb {{.*}}(%rip), %xmm0, %xmm0
+; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX512VPOPCNTDQ-NEXT: vpaddb %xmm1, %xmm0, %xmm0
; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VPOPCNTDQ-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -1212,16 +1258,17 @@ define <16 x i8> @testv16i8(<16 x i8> %i
; X32-SSE-NEXT: pxor %xmm1, %xmm1
; X32-SSE-NEXT: psubb %xmm0, %xmm1
; X32-SSE-NEXT: pand %xmm0, %xmm1
-; X32-SSE-NEXT: psubb {{\.LCPI.*}}, %xmm1
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; X32-SSE-NEXT: movdqa %xmm1, %xmm3
-; X32-SSE-NEXT: pand %xmm2, %xmm3
+; X32-SSE-NEXT: pcmpeqd %xmm2, %xmm2
+; X32-SSE-NEXT: paddb %xmm1, %xmm2
+; X32-SSE-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; X32-SSE-NEXT: movdqa %xmm2, %xmm3
+; X32-SSE-NEXT: pand %xmm1, %xmm3
; X32-SSE-NEXT: movdqa {{.*#+}} xmm0 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
; X32-SSE-NEXT: movdqa %xmm0, %xmm4
; X32-SSE-NEXT: pshufb %xmm3, %xmm4
-; X32-SSE-NEXT: psrlw $4, %xmm1
-; X32-SSE-NEXT: pand %xmm2, %xmm1
-; X32-SSE-NEXT: pshufb %xmm1, %xmm0
+; X32-SSE-NEXT: psrlw $4, %xmm2
+; X32-SSE-NEXT: pand %xmm1, %xmm2
+; X32-SSE-NEXT: pshufb %xmm2, %xmm0
; X32-SSE-NEXT: paddb %xmm4, %xmm0
; X32-SSE-NEXT: retl
%out = call <16 x i8> @llvm.cttz.v16i8(<16 x i8> %in, i1 0)
@@ -1234,20 +1281,21 @@ define <16 x i8> @testv16i8u(<16 x i8> %
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: psubb %xmm0, %xmm1
; SSE2-NEXT: pand %xmm0, %xmm1
-; SSE2-NEXT: psubb {{.*}}(%rip), %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: pcmpeqd %xmm2, %xmm2
+; SSE2-NEXT: paddb %xmm1, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm0
; SSE2-NEXT: psrlw $1, %xmm0
; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE2-NEXT: psubb %xmm0, %xmm1
+; SSE2-NEXT: psubb %xmm0, %xmm2
; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: pand %xmm0, %xmm2
-; SSE2-NEXT: psrlw $2, %xmm1
+; SSE2-NEXT: movdqa %xmm2, %xmm1
; SSE2-NEXT: pand %xmm0, %xmm1
-; SSE2-NEXT: paddb %xmm2, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: psrlw $2, %xmm2
+; SSE2-NEXT: pand %xmm0, %xmm2
+; SSE2-NEXT: paddb %xmm1, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm0
; SSE2-NEXT: psrlw $4, %xmm0
-; SSE2-NEXT: paddb %xmm1, %xmm0
+; SSE2-NEXT: paddb %xmm2, %xmm0
; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; SSE2-NEXT: retq
;
@@ -1256,20 +1304,21 @@ define <16 x i8> @testv16i8u(<16 x i8> %
; SSE3-NEXT: pxor %xmm1, %xmm1
; SSE3-NEXT: psubb %xmm0, %xmm1
; SSE3-NEXT: pand %xmm0, %xmm1
-; SSE3-NEXT: psubb {{.*}}(%rip), %xmm1
-; SSE3-NEXT: movdqa %xmm1, %xmm0
+; SSE3-NEXT: pcmpeqd %xmm2, %xmm2
+; SSE3-NEXT: paddb %xmm1, %xmm2
+; SSE3-NEXT: movdqa %xmm2, %xmm0
; SSE3-NEXT: psrlw $1, %xmm0
; SSE3-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE3-NEXT: psubb %xmm0, %xmm1
+; SSE3-NEXT: psubb %xmm0, %xmm2
; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
-; SSE3-NEXT: movdqa %xmm1, %xmm2
-; SSE3-NEXT: pand %xmm0, %xmm2
-; SSE3-NEXT: psrlw $2, %xmm1
+; SSE3-NEXT: movdqa %xmm2, %xmm1
; SSE3-NEXT: pand %xmm0, %xmm1
-; SSE3-NEXT: paddb %xmm2, %xmm1
-; SSE3-NEXT: movdqa %xmm1, %xmm0
+; SSE3-NEXT: psrlw $2, %xmm2
+; SSE3-NEXT: pand %xmm0, %xmm2
+; SSE3-NEXT: paddb %xmm1, %xmm2
+; SSE3-NEXT: movdqa %xmm2, %xmm0
; SSE3-NEXT: psrlw $4, %xmm0
-; SSE3-NEXT: paddb %xmm1, %xmm0
+; SSE3-NEXT: paddb %xmm2, %xmm0
; SSE3-NEXT: pand {{.*}}(%rip), %xmm0
; SSE3-NEXT: retq
;
@@ -1278,16 +1327,17 @@ define <16 x i8> @testv16i8u(<16 x i8> %
; SSSE3-NEXT: pxor %xmm1, %xmm1
; SSSE3-NEXT: psubb %xmm0, %xmm1
; SSSE3-NEXT: pand %xmm0, %xmm1
-; SSSE3-NEXT: psubb {{.*}}(%rip), %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; SSSE3-NEXT: movdqa %xmm1, %xmm3
-; SSSE3-NEXT: pand %xmm2, %xmm3
+; SSSE3-NEXT: pcmpeqd %xmm2, %xmm2
+; SSSE3-NEXT: paddb %xmm1, %xmm2
+; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSSE3-NEXT: movdqa %xmm2, %xmm3
+; SSSE3-NEXT: pand %xmm1, %xmm3
; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
; SSSE3-NEXT: movdqa %xmm0, %xmm4
; SSSE3-NEXT: pshufb %xmm3, %xmm4
-; SSSE3-NEXT: psrlw $4, %xmm1
-; SSSE3-NEXT: pand %xmm2, %xmm1
-; SSSE3-NEXT: pshufb %xmm1, %xmm0
+; SSSE3-NEXT: psrlw $4, %xmm2
+; SSSE3-NEXT: pand %xmm1, %xmm2
+; SSSE3-NEXT: pshufb %xmm2, %xmm0
; SSSE3-NEXT: paddb %xmm4, %xmm0
; SSSE3-NEXT: retq
;
@@ -1296,16 +1346,17 @@ define <16 x i8> @testv16i8u(<16 x i8> %
; SSE41-NEXT: pxor %xmm1, %xmm1
; SSE41-NEXT: psubb %xmm0, %xmm1
; SSE41-NEXT: pand %xmm0, %xmm1
-; SSE41-NEXT: psubb {{.*}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; SSE41-NEXT: movdqa %xmm1, %xmm3
-; SSE41-NEXT: pand %xmm2, %xmm3
+; SSE41-NEXT: pcmpeqd %xmm2, %xmm2
+; SSE41-NEXT: paddb %xmm1, %xmm2
+; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSE41-NEXT: movdqa %xmm2, %xmm3
+; SSE41-NEXT: pand %xmm1, %xmm3
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
; SSE41-NEXT: movdqa %xmm0, %xmm4
; SSE41-NEXT: pshufb %xmm3, %xmm4
-; SSE41-NEXT: psrlw $4, %xmm1
-; SSE41-NEXT: pand %xmm2, %xmm1
-; SSE41-NEXT: pshufb %xmm1, %xmm0
+; SSE41-NEXT: psrlw $4, %xmm2
+; SSE41-NEXT: pand %xmm1, %xmm2
+; SSE41-NEXT: pshufb %xmm2, %xmm0
; SSE41-NEXT: paddb %xmm4, %xmm0
; SSE41-NEXT: retq
;
@@ -1314,7 +1365,8 @@ define <16 x i8> @testv16i8u(<16 x i8> %
; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpsubb %xmm0, %xmm1, %xmm1
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpsubb {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpaddb %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -1330,7 +1382,8 @@ define <16 x i8> @testv16i8u(<16 x i8> %
; AVX512VPOPCNTDQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpsubb %xmm0, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX512VPOPCNTDQ-NEXT: vpsubb {{.*}}(%rip), %xmm0, %xmm0
+; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX512VPOPCNTDQ-NEXT: vpaddb %xmm1, %xmm0, %xmm0
; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VPOPCNTDQ-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -1346,16 +1399,17 @@ define <16 x i8> @testv16i8u(<16 x i8> %
; X32-SSE-NEXT: pxor %xmm1, %xmm1
; X32-SSE-NEXT: psubb %xmm0, %xmm1
; X32-SSE-NEXT: pand %xmm0, %xmm1
-; X32-SSE-NEXT: psubb {{\.LCPI.*}}, %xmm1
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; X32-SSE-NEXT: movdqa %xmm1, %xmm3
-; X32-SSE-NEXT: pand %xmm2, %xmm3
+; X32-SSE-NEXT: pcmpeqd %xmm2, %xmm2
+; X32-SSE-NEXT: paddb %xmm1, %xmm2
+; X32-SSE-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; X32-SSE-NEXT: movdqa %xmm2, %xmm3
+; X32-SSE-NEXT: pand %xmm1, %xmm3
; X32-SSE-NEXT: movdqa {{.*#+}} xmm0 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
; X32-SSE-NEXT: movdqa %xmm0, %xmm4
; X32-SSE-NEXT: pshufb %xmm3, %xmm4
-; X32-SSE-NEXT: psrlw $4, %xmm1
-; X32-SSE-NEXT: pand %xmm2, %xmm1
-; X32-SSE-NEXT: pshufb %xmm1, %xmm0
+; X32-SSE-NEXT: psrlw $4, %xmm2
+; X32-SSE-NEXT: pand %xmm1, %xmm2
+; X32-SSE-NEXT: pshufb %xmm2, %xmm0
; X32-SSE-NEXT: paddb %xmm4, %xmm0
; X32-SSE-NEXT: retl
%out = call <16 x i8> @llvm.cttz.v16i8(<16 x i8> %in, i1 -1)
Modified: llvm/trunk/test/CodeGen/X86/vector-tzcnt-256.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-tzcnt-256.ll?rev=306289&r1=306288&r2=306289&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-tzcnt-256.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-tzcnt-256.ll Mon Jun 26 07:19:26 2017
@@ -15,8 +15,8 @@ define <4 x i64> @testv4i64(<4 x i64> %i
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpsubq %xmm1, %xmm2, %xmm3
; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1]
-; AVX1-NEXT: vpsubq %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpaddq %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm5
; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -28,7 +28,7 @@ define <4 x i64> @testv4i64(<4 x i64> %i
; AVX1-NEXT: vpsadbw %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpsubq %xmm0, %xmm2, %xmm5
; AVX1-NEXT: vpand %xmm5, %xmm0, %xmm0
-; AVX1-NEXT: vpsubq %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpaddq %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm3
; AVX1-NEXT: vpshufb %xmm3, %xmm6, %xmm3
; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm0
@@ -44,8 +44,8 @@ define <4 x i64> @testv4i64(<4 x i64> %i
; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1
; AVX2-NEXT: vpsubq %ymm0, %ymm1, %ymm2
; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm2
-; AVX2-NEXT: vpsubq %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX2-NEXT: vpaddq %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm3
; AVX2-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -62,7 +62,8 @@ define <4 x i64> @testv4i64(<4 x i64> %i
; AVX512CDVL-NEXT: vpxor %ymm1, %ymm1, %ymm1
; AVX512CDVL-NEXT: vpsubq %ymm0, %ymm1, %ymm2
; AVX512CDVL-NEXT: vpand %ymm2, %ymm0, %ymm0
-; AVX512CDVL-NEXT: vpsubq {{.*}}(%rip){1to4}, %ymm0, %ymm0
+; AVX512CDVL-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX512CDVL-NEXT: vpaddq %ymm2, %ymm0, %ymm0
; AVX512CDVL-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512CDVL-NEXT: vpand %ymm2, %ymm0, %ymm3
; AVX512CDVL-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -79,8 +80,8 @@ define <4 x i64> @testv4i64(<4 x i64> %i
; AVX512CD-NEXT: vpxor %ymm1, %ymm1, %ymm1
; AVX512CD-NEXT: vpsubq %ymm0, %ymm1, %ymm2
; AVX512CD-NEXT: vpand %ymm2, %ymm0, %ymm0
-; AVX512CD-NEXT: vpbroadcastq {{.*}}(%rip), %ymm2
-; AVX512CD-NEXT: vpsubq %ymm2, %ymm0, %ymm0
+; AVX512CD-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX512CD-NEXT: vpaddq %ymm2, %ymm0, %ymm0
; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512CD-NEXT: vpand %ymm2, %ymm0, %ymm3
; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -97,8 +98,8 @@ define <4 x i64> @testv4i64(<4 x i64> %i
; AVX512VPOPCNTDQ-NEXT: vpxor %ymm1, %ymm1, %ymm1
; AVX512VPOPCNTDQ-NEXT: vpsubq %ymm0, %ymm1, %ymm1
; AVX512VPOPCNTDQ-NEXT: vpand %ymm1, %ymm0, %ymm0
-; AVX512VPOPCNTDQ-NEXT: vpbroadcastq {{.*}}(%rip), %ymm1
-; AVX512VPOPCNTDQ-NEXT: vpsubq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX512VPOPCNTDQ-NEXT: vpaddq %ymm1, %ymm0, %ymm0
; AVX512VPOPCNTDQ-NEXT: vpopcntq %zmm0, %zmm0
; AVX512VPOPCNTDQ-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
; AVX512VPOPCNTDQ-NEXT: retq
@@ -130,8 +131,8 @@ define <4 x i64> @testv4i64u(<4 x i64> %
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpsubq %xmm1, %xmm2, %xmm3
; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1]
-; AVX1-NEXT: vpsubq %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpaddq %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm5
; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -143,7 +144,7 @@ define <4 x i64> @testv4i64u(<4 x i64> %
; AVX1-NEXT: vpsadbw %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpsubq %xmm0, %xmm2, %xmm5
; AVX1-NEXT: vpand %xmm5, %xmm0, %xmm0
-; AVX1-NEXT: vpsubq %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpaddq %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm3
; AVX1-NEXT: vpshufb %xmm3, %xmm6, %xmm3
; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm0
@@ -159,8 +160,8 @@ define <4 x i64> @testv4i64u(<4 x i64> %
; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1
; AVX2-NEXT: vpsubq %ymm0, %ymm1, %ymm2
; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm2
-; AVX2-NEXT: vpsubq %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX2-NEXT: vpaddq %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm3
; AVX2-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -197,8 +198,8 @@ define <4 x i64> @testv4i64u(<4 x i64> %
; AVX512VPOPCNTDQ-NEXT: vpxor %ymm1, %ymm1, %ymm1
; AVX512VPOPCNTDQ-NEXT: vpsubq %ymm0, %ymm1, %ymm1
; AVX512VPOPCNTDQ-NEXT: vpand %ymm1, %ymm0, %ymm0
-; AVX512VPOPCNTDQ-NEXT: vpbroadcastq {{.*}}(%rip), %ymm1
-; AVX512VPOPCNTDQ-NEXT: vpsubq %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX512VPOPCNTDQ-NEXT: vpaddq %ymm1, %ymm0, %ymm0
; AVX512VPOPCNTDQ-NEXT: vpopcntq %zmm0, %zmm0
; AVX512VPOPCNTDQ-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
; AVX512VPOPCNTDQ-NEXT: retq
@@ -230,8 +231,8 @@ define <8 x i32> @testv8i32(<8 x i32> %i
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpsubd %xmm1, %xmm2, %xmm3
; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1]
-; AVX1-NEXT: vpsubd %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpaddd %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm5
; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -247,7 +248,7 @@ define <8 x i32> @testv8i32(<8 x i32> %i
; AVX1-NEXT: vpackuswb %xmm5, %xmm1, %xmm1
; AVX1-NEXT: vpsubd %xmm0, %xmm2, %xmm5
; AVX1-NEXT: vpand %xmm5, %xmm0, %xmm0
-; AVX1-NEXT: vpsubd %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpaddd %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm3
; AVX1-NEXT: vpshufb %xmm3, %xmm6, %xmm3
; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm0
@@ -267,8 +268,8 @@ define <8 x i32> @testv8i32(<8 x i32> %i
; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1
; AVX2-NEXT: vpsubd %ymm0, %ymm1, %ymm2
; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm2
-; AVX2-NEXT: vpsubd %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX2-NEXT: vpaddd %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm3
; AVX2-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -289,7 +290,8 @@ define <8 x i32> @testv8i32(<8 x i32> %i
; AVX512CDVL-NEXT: vpxor %ymm1, %ymm1, %ymm1
; AVX512CDVL-NEXT: vpsubd %ymm0, %ymm1, %ymm2
; AVX512CDVL-NEXT: vpand %ymm2, %ymm0, %ymm0
-; AVX512CDVL-NEXT: vpsubd {{.*}}(%rip){1to8}, %ymm0, %ymm0
+; AVX512CDVL-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX512CDVL-NEXT: vpaddd %ymm2, %ymm0, %ymm0
; AVX512CDVL-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512CDVL-NEXT: vpand %ymm2, %ymm0, %ymm3
; AVX512CDVL-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -310,8 +312,8 @@ define <8 x i32> @testv8i32(<8 x i32> %i
; AVX512CD-NEXT: vpxor %ymm1, %ymm1, %ymm1
; AVX512CD-NEXT: vpsubd %ymm0, %ymm1, %ymm2
; AVX512CD-NEXT: vpand %ymm2, %ymm0, %ymm0
-; AVX512CD-NEXT: vpbroadcastd {{.*}}(%rip), %ymm2
-; AVX512CD-NEXT: vpsubd %ymm2, %ymm0, %ymm0
+; AVX512CD-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX512CD-NEXT: vpaddd %ymm2, %ymm0, %ymm0
; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512CD-NEXT: vpand %ymm2, %ymm0, %ymm3
; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -332,8 +334,8 @@ define <8 x i32> @testv8i32(<8 x i32> %i
; AVX512VPOPCNTDQ-NEXT: vpxor %ymm1, %ymm1, %ymm1
; AVX512VPOPCNTDQ-NEXT: vpsubd %ymm0, %ymm1, %ymm1
; AVX512VPOPCNTDQ-NEXT: vpand %ymm1, %ymm0, %ymm0
-; AVX512VPOPCNTDQ-NEXT: vpbroadcastd {{.*}}(%rip), %ymm1
-; AVX512VPOPCNTDQ-NEXT: vpsubd %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX512VPOPCNTDQ-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0
; AVX512VPOPCNTDQ-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
; AVX512VPOPCNTDQ-NEXT: retq
@@ -343,8 +345,8 @@ define <8 x i32> @testv8i32(<8 x i32> %i
; X32-AVX-NEXT: vpxor %ymm1, %ymm1, %ymm1
; X32-AVX-NEXT: vpsubd %ymm0, %ymm1, %ymm2
; X32-AVX-NEXT: vpand %ymm2, %ymm0, %ymm0
-; X32-AVX-NEXT: vpbroadcastd {{\.LCPI.*}}, %ymm2
-; X32-AVX-NEXT: vpsubd %ymm2, %ymm0, %ymm0
+; X32-AVX-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
+; X32-AVX-NEXT: vpaddd %ymm2, %ymm0, %ymm0
; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; X32-AVX-NEXT: vpand %ymm2, %ymm0, %ymm3
; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -370,8 +372,8 @@ define <8 x i32> @testv8i32u(<8 x i32> %
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpsubd %xmm1, %xmm2, %xmm3
; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1]
-; AVX1-NEXT: vpsubd %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpaddd %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm5
; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -387,7 +389,7 @@ define <8 x i32> @testv8i32u(<8 x i32> %
; AVX1-NEXT: vpackuswb %xmm5, %xmm1, %xmm1
; AVX1-NEXT: vpsubd %xmm0, %xmm2, %xmm5
; AVX1-NEXT: vpand %xmm5, %xmm0, %xmm0
-; AVX1-NEXT: vpsubd %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpaddd %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm3
; AVX1-NEXT: vpshufb %xmm3, %xmm6, %xmm3
; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm0
@@ -407,8 +409,8 @@ define <8 x i32> @testv8i32u(<8 x i32> %
; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1
; AVX2-NEXT: vpsubd %ymm0, %ymm1, %ymm2
; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm2
-; AVX2-NEXT: vpsubd %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX2-NEXT: vpaddd %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm3
; AVX2-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -449,8 +451,8 @@ define <8 x i32> @testv8i32u(<8 x i32> %
; AVX512VPOPCNTDQ-NEXT: vpxor %ymm1, %ymm1, %ymm1
; AVX512VPOPCNTDQ-NEXT: vpsubd %ymm0, %ymm1, %ymm1
; AVX512VPOPCNTDQ-NEXT: vpand %ymm1, %ymm0, %ymm0
-; AVX512VPOPCNTDQ-NEXT: vpbroadcastd {{.*}}(%rip), %ymm1
-; AVX512VPOPCNTDQ-NEXT: vpsubd %ymm1, %ymm0, %ymm0
+; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX512VPOPCNTDQ-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0
; AVX512VPOPCNTDQ-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
; AVX512VPOPCNTDQ-NEXT: retq
@@ -460,8 +462,8 @@ define <8 x i32> @testv8i32u(<8 x i32> %
; X32-AVX-NEXT: vpxor %ymm1, %ymm1, %ymm1
; X32-AVX-NEXT: vpsubd %ymm0, %ymm1, %ymm2
; X32-AVX-NEXT: vpand %ymm2, %ymm0, %ymm0
-; X32-AVX-NEXT: vpbroadcastd {{\.LCPI.*}}, %ymm2
-; X32-AVX-NEXT: vpsubd %ymm2, %ymm0, %ymm0
+; X32-AVX-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
+; X32-AVX-NEXT: vpaddd %ymm2, %ymm0, %ymm0
; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; X32-AVX-NEXT: vpand %ymm2, %ymm0, %ymm3
; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -486,8 +488,8 @@ define <16 x i16> @testv16i16(<16 x i16>
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpsubw %xmm0, %xmm1, %xmm2
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm2
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1]
-; AVX1-NEXT: vpsubw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpaddw %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm5
; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -502,7 +504,7 @@ define <16 x i16> @testv16i16(<16 x i16>
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpsubw %xmm0, %xmm1, %xmm1
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsubw %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpaddw %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm6, %xmm1
; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm0
@@ -520,7 +522,8 @@ define <16 x i16> @testv16i16(<16 x i16>
; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1
; AVX2-NEXT: vpsubw %ymm0, %ymm1, %ymm1
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpsubw {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vpaddw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -539,7 +542,8 @@ define <16 x i16> @testv16i16(<16 x i16>
; AVX512CDVL-NEXT: vpxor %ymm1, %ymm1, %ymm1
; AVX512CDVL-NEXT: vpsubw %ymm0, %ymm1, %ymm1
; AVX512CDVL-NEXT: vpand %ymm1, %ymm0, %ymm0
-; AVX512CDVL-NEXT: vpsubw {{.*}}(%rip), %ymm0, %ymm0
+; AVX512CDVL-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX512CDVL-NEXT: vpaddw %ymm1, %ymm0, %ymm0
; AVX512CDVL-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512CDVL-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX512CDVL-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -558,7 +562,8 @@ define <16 x i16> @testv16i16(<16 x i16>
; AVX512CD-NEXT: vpxor %ymm1, %ymm1, %ymm1
; AVX512CD-NEXT: vpsubw %ymm0, %ymm1, %ymm1
; AVX512CD-NEXT: vpand %ymm1, %ymm0, %ymm0
-; AVX512CD-NEXT: vpsubw {{.*}}(%rip), %ymm0, %ymm0
+; AVX512CD-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX512CD-NEXT: vpaddw %ymm1, %ymm0, %ymm0
; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512CD-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -577,7 +582,8 @@ define <16 x i16> @testv16i16(<16 x i16>
; AVX512VPOPCNTDQ-NEXT: vpxor %ymm1, %ymm1, %ymm1
; AVX512VPOPCNTDQ-NEXT: vpsubw %ymm0, %ymm1, %ymm1
; AVX512VPOPCNTDQ-NEXT: vpand %ymm1, %ymm0, %ymm0
-; AVX512VPOPCNTDQ-NEXT: vpsubw {{.*}}(%rip), %ymm0, %ymm0
+; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX512VPOPCNTDQ-NEXT: vpaddw %ymm1, %ymm0, %ymm0
; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VPOPCNTDQ-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -596,7 +602,8 @@ define <16 x i16> @testv16i16(<16 x i16>
; X32-AVX-NEXT: vpxor %ymm1, %ymm1, %ymm1
; X32-AVX-NEXT: vpsubw %ymm0, %ymm1, %ymm1
; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm0
-; X32-AVX-NEXT: vpsubw {{\.LCPI.*}}, %ymm0, %ymm0
+; X32-AVX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; X32-AVX-NEXT: vpaddw %ymm1, %ymm0, %ymm0
; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm2
; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -619,8 +626,8 @@ define <16 x i16> @testv16i16u(<16 x i16
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpsubw %xmm0, %xmm1, %xmm2
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm2
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1]
-; AVX1-NEXT: vpsubw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpaddw %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm5
; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -635,7 +642,7 @@ define <16 x i16> @testv16i16u(<16 x i16
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpsubw %xmm0, %xmm1, %xmm1
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsubw %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpaddw %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm6, %xmm1
; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm0
@@ -653,7 +660,8 @@ define <16 x i16> @testv16i16u(<16 x i16
; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1
; AVX2-NEXT: vpsubw %ymm0, %ymm1, %ymm1
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpsubw {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vpaddw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -672,7 +680,8 @@ define <16 x i16> @testv16i16u(<16 x i16
; AVX512CDVL-NEXT: vpxor %ymm1, %ymm1, %ymm1
; AVX512CDVL-NEXT: vpsubw %ymm0, %ymm1, %ymm1
; AVX512CDVL-NEXT: vpand %ymm1, %ymm0, %ymm0
-; AVX512CDVL-NEXT: vpsubw {{.*}}(%rip), %ymm0, %ymm0
+; AVX512CDVL-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX512CDVL-NEXT: vpaddw %ymm1, %ymm0, %ymm0
; AVX512CDVL-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512CDVL-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX512CDVL-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -691,7 +700,8 @@ define <16 x i16> @testv16i16u(<16 x i16
; AVX512CD-NEXT: vpxor %ymm1, %ymm1, %ymm1
; AVX512CD-NEXT: vpsubw %ymm0, %ymm1, %ymm1
; AVX512CD-NEXT: vpand %ymm1, %ymm0, %ymm0
-; AVX512CD-NEXT: vpsubw {{.*}}(%rip), %ymm0, %ymm0
+; AVX512CD-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX512CD-NEXT: vpaddw %ymm1, %ymm0, %ymm0
; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512CD-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -710,7 +720,8 @@ define <16 x i16> @testv16i16u(<16 x i16
; AVX512VPOPCNTDQ-NEXT: vpxor %ymm1, %ymm1, %ymm1
; AVX512VPOPCNTDQ-NEXT: vpsubw %ymm0, %ymm1, %ymm1
; AVX512VPOPCNTDQ-NEXT: vpand %ymm1, %ymm0, %ymm0
-; AVX512VPOPCNTDQ-NEXT: vpsubw {{.*}}(%rip), %ymm0, %ymm0
+; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX512VPOPCNTDQ-NEXT: vpaddw %ymm1, %ymm0, %ymm0
; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VPOPCNTDQ-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -729,7 +740,8 @@ define <16 x i16> @testv16i16u(<16 x i16
; X32-AVX-NEXT: vpxor %ymm1, %ymm1, %ymm1
; X32-AVX-NEXT: vpsubw %ymm0, %ymm1, %ymm1
; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm0
-; X32-AVX-NEXT: vpsubw {{\.LCPI.*}}, %ymm0, %ymm0
+; X32-AVX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; X32-AVX-NEXT: vpaddw %ymm1, %ymm0, %ymm0
; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm2
; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -753,8 +765,8 @@ define <32 x i8> @testv32i8(<32 x i8> %i
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpsubb %xmm1, %xmm2, %xmm3
; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX1-NEXT: vpsubb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpaddb %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm5
; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -765,7 +777,7 @@ define <32 x i8> @testv32i8(<32 x i8> %i
; AVX1-NEXT: vpaddb %xmm5, %xmm1, %xmm1
; AVX1-NEXT: vpsubb %xmm0, %xmm2, %xmm2
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpsubb %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpaddb %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm2
; AVX1-NEXT: vpshufb %xmm2, %xmm6, %xmm2
; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm0
@@ -780,7 +792,8 @@ define <32 x i8> @testv32i8(<32 x i8> %i
; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1
; AVX2-NEXT: vpsubb %ymm0, %ymm1, %ymm1
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpsubb {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vpaddb %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -796,7 +809,8 @@ define <32 x i8> @testv32i8(<32 x i8> %i
; AVX512CDVL-NEXT: vpxor %ymm1, %ymm1, %ymm1
; AVX512CDVL-NEXT: vpsubb %ymm0, %ymm1, %ymm1
; AVX512CDVL-NEXT: vpand %ymm1, %ymm0, %ymm0
-; AVX512CDVL-NEXT: vpsubb {{.*}}(%rip), %ymm0, %ymm0
+; AVX512CDVL-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX512CDVL-NEXT: vpaddb %ymm1, %ymm0, %ymm0
; AVX512CDVL-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512CDVL-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX512CDVL-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -812,7 +826,8 @@ define <32 x i8> @testv32i8(<32 x i8> %i
; AVX512CD-NEXT: vpxor %ymm1, %ymm1, %ymm1
; AVX512CD-NEXT: vpsubb %ymm0, %ymm1, %ymm1
; AVX512CD-NEXT: vpand %ymm1, %ymm0, %ymm0
-; AVX512CD-NEXT: vpsubb {{.*}}(%rip), %ymm0, %ymm0
+; AVX512CD-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX512CD-NEXT: vpaddb %ymm1, %ymm0, %ymm0
; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512CD-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -828,7 +843,8 @@ define <32 x i8> @testv32i8(<32 x i8> %i
; AVX512VPOPCNTDQ-NEXT: vpxor %ymm1, %ymm1, %ymm1
; AVX512VPOPCNTDQ-NEXT: vpsubb %ymm0, %ymm1, %ymm1
; AVX512VPOPCNTDQ-NEXT: vpand %ymm1, %ymm0, %ymm0
-; AVX512VPOPCNTDQ-NEXT: vpsubb {{.*}}(%rip), %ymm0, %ymm0
+; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX512VPOPCNTDQ-NEXT: vpaddb %ymm1, %ymm0, %ymm0
; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VPOPCNTDQ-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -844,7 +860,8 @@ define <32 x i8> @testv32i8(<32 x i8> %i
; X32-AVX-NEXT: vpxor %ymm1, %ymm1, %ymm1
; X32-AVX-NEXT: vpsubb %ymm0, %ymm1, %ymm1
; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm0
-; X32-AVX-NEXT: vpsubb {{\.LCPI.*}}, %ymm0, %ymm0
+; X32-AVX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; X32-AVX-NEXT: vpaddb %ymm1, %ymm0, %ymm0
; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm2
; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -865,8 +882,8 @@ define <32 x i8> @testv32i8u(<32 x i8> %
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpsubb %xmm1, %xmm2, %xmm3
; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX1-NEXT: vpsubb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpaddb %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm5
; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -877,7 +894,7 @@ define <32 x i8> @testv32i8u(<32 x i8> %
; AVX1-NEXT: vpaddb %xmm5, %xmm1, %xmm1
; AVX1-NEXT: vpsubb %xmm0, %xmm2, %xmm2
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpsubb %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpaddb %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm2
; AVX1-NEXT: vpshufb %xmm2, %xmm6, %xmm2
; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm0
@@ -892,7 +909,8 @@ define <32 x i8> @testv32i8u(<32 x i8> %
; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1
; AVX2-NEXT: vpsubb %ymm0, %ymm1, %ymm1
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpsubb {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vpaddb %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -908,7 +926,8 @@ define <32 x i8> @testv32i8u(<32 x i8> %
; AVX512CDVL-NEXT: vpxor %ymm1, %ymm1, %ymm1
; AVX512CDVL-NEXT: vpsubb %ymm0, %ymm1, %ymm1
; AVX512CDVL-NEXT: vpand %ymm1, %ymm0, %ymm0
-; AVX512CDVL-NEXT: vpsubb {{.*}}(%rip), %ymm0, %ymm0
+; AVX512CDVL-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX512CDVL-NEXT: vpaddb %ymm1, %ymm0, %ymm0
; AVX512CDVL-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512CDVL-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX512CDVL-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -924,7 +943,8 @@ define <32 x i8> @testv32i8u(<32 x i8> %
; AVX512CD-NEXT: vpxor %ymm1, %ymm1, %ymm1
; AVX512CD-NEXT: vpsubb %ymm0, %ymm1, %ymm1
; AVX512CD-NEXT: vpand %ymm1, %ymm0, %ymm0
-; AVX512CD-NEXT: vpsubb {{.*}}(%rip), %ymm0, %ymm0
+; AVX512CD-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX512CD-NEXT: vpaddb %ymm1, %ymm0, %ymm0
; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512CD-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -940,7 +960,8 @@ define <32 x i8> @testv32i8u(<32 x i8> %
; AVX512VPOPCNTDQ-NEXT: vpxor %ymm1, %ymm1, %ymm1
; AVX512VPOPCNTDQ-NEXT: vpsubb %ymm0, %ymm1, %ymm1
; AVX512VPOPCNTDQ-NEXT: vpand %ymm1, %ymm0, %ymm0
-; AVX512VPOPCNTDQ-NEXT: vpsubb {{.*}}(%rip), %ymm0, %ymm0
+; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX512VPOPCNTDQ-NEXT: vpaddb %ymm1, %ymm0, %ymm0
; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VPOPCNTDQ-NEXT: vpand %ymm1, %ymm0, %ymm2
; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -956,7 +977,8 @@ define <32 x i8> @testv32i8u(<32 x i8> %
; X32-AVX-NEXT: vpxor %ymm1, %ymm1, %ymm1
; X32-AVX-NEXT: vpsubb %ymm0, %ymm1, %ymm1
; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm0
-; X32-AVX-NEXT: vpsubb {{\.LCPI.*}}, %ymm0, %ymm0
+; X32-AVX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; X32-AVX-NEXT: vpaddb %ymm1, %ymm0, %ymm0
; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm2
; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
Modified: llvm/trunk/test/CodeGen/X86/vector-tzcnt-512.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-tzcnt-512.ll?rev=306289&r1=306288&r2=306289&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-tzcnt-512.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-tzcnt-512.ll Mon Jun 26 07:19:26 2017
@@ -10,7 +10,8 @@ define <8 x i64> @testv8i64(<8 x i64> %i
; AVX512CD-NEXT: vpxord %zmm1, %zmm1, %zmm1
; AVX512CD-NEXT: vpsubq %zmm0, %zmm1, %zmm1
; AVX512CD-NEXT: vpandq %zmm1, %zmm0, %zmm0
-; AVX512CD-NEXT: vpsubq {{.*}}(%rip){1to8}, %zmm0, %zmm0
+; AVX512CD-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512CD-NEXT: vpaddq %zmm1, %zmm0, %zmm0
; AVX512CD-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512CD-NEXT: vpand %ymm2, %ymm1, %ymm3
@@ -37,7 +38,8 @@ define <8 x i64> @testv8i64(<8 x i64> %i
; AVX512CDBW-NEXT: vpxord %zmm1, %zmm1, %zmm1
; AVX512CDBW-NEXT: vpsubq %zmm0, %zmm1, %zmm2
; AVX512CDBW-NEXT: vpandq %zmm2, %zmm0, %zmm0
-; AVX512CDBW-NEXT: vpsubq {{.*}}(%rip){1to8}, %zmm0, %zmm0
+; AVX512CDBW-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2
+; AVX512CDBW-NEXT: vpaddq %zmm2, %zmm0, %zmm0
; AVX512CDBW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512CDBW-NEXT: vpandq %zmm2, %zmm0, %zmm3
; AVX512CDBW-NEXT: vmovdqu8 {{.*#+}} zmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -54,7 +56,8 @@ define <8 x i64> @testv8i64(<8 x i64> %i
; AVX512BW-NEXT: vpxord %zmm1, %zmm1, %zmm1
; AVX512BW-NEXT: vpsubq %zmm0, %zmm1, %zmm2
; AVX512BW-NEXT: vpandq %zmm2, %zmm0, %zmm0
-; AVX512BW-NEXT: vpsubq {{.*}}(%rip){1to8}, %zmm0, %zmm0
+; AVX512BW-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2
+; AVX512BW-NEXT: vpaddq %zmm2, %zmm0, %zmm0
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512BW-NEXT: vpandq %zmm2, %zmm0, %zmm3
; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -71,7 +74,8 @@ define <8 x i64> @testv8i64(<8 x i64> %i
; AVX512VPOPCNTDQ-NEXT: vpxord %zmm1, %zmm1, %zmm1
; AVX512VPOPCNTDQ-NEXT: vpsubq %zmm0, %zmm1, %zmm1
; AVX512VPOPCNTDQ-NEXT: vpandq %zmm1, %zmm0, %zmm0
-; AVX512VPOPCNTDQ-NEXT: vpsubq {{.*}}(%rip){1to8}, %zmm0, %zmm0
+; AVX512VPOPCNTDQ-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512VPOPCNTDQ-NEXT: vpaddq %zmm1, %zmm0, %zmm0
; AVX512VPOPCNTDQ-NEXT: vpopcntq %zmm0, %zmm0
; AVX512VPOPCNTDQ-NEXT: retq
%out = call <8 x i64> @llvm.cttz.v8i64(<8 x i64> %in, i1 0)
@@ -104,7 +108,8 @@ define <8 x i64> @testv8i64u(<8 x i64> %
; AVX512BW-NEXT: vpxord %zmm1, %zmm1, %zmm1
; AVX512BW-NEXT: vpsubq %zmm0, %zmm1, %zmm2
; AVX512BW-NEXT: vpandq %zmm2, %zmm0, %zmm0
-; AVX512BW-NEXT: vpsubq {{.*}}(%rip){1to8}, %zmm0, %zmm0
+; AVX512BW-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2
+; AVX512BW-NEXT: vpaddq %zmm2, %zmm0, %zmm0
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512BW-NEXT: vpandq %zmm2, %zmm0, %zmm3
; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -121,7 +126,8 @@ define <8 x i64> @testv8i64u(<8 x i64> %
; AVX512VPOPCNTDQ-NEXT: vpxord %zmm1, %zmm1, %zmm1
; AVX512VPOPCNTDQ-NEXT: vpsubq %zmm0, %zmm1, %zmm1
; AVX512VPOPCNTDQ-NEXT: vpandq %zmm1, %zmm0, %zmm0
-; AVX512VPOPCNTDQ-NEXT: vpsubq {{.*}}(%rip){1to8}, %zmm0, %zmm0
+; AVX512VPOPCNTDQ-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512VPOPCNTDQ-NEXT: vpaddq %zmm1, %zmm0, %zmm0
; AVX512VPOPCNTDQ-NEXT: vpopcntq %zmm0, %zmm0
; AVX512VPOPCNTDQ-NEXT: retq
%out = call <8 x i64> @llvm.cttz.v8i64(<8 x i64> %in, i1 -1)
@@ -134,7 +140,8 @@ define <16 x i32> @testv16i32(<16 x i32>
; AVX512CD-NEXT: vpxord %zmm1, %zmm1, %zmm1
; AVX512CD-NEXT: vpsubd %zmm0, %zmm1, %zmm1
; AVX512CD-NEXT: vpandd %zmm1, %zmm0, %zmm0
-; AVX512CD-NEXT: vpsubd {{.*}}(%rip){1to16}, %zmm0, %zmm0
+; AVX512CD-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512CD-NEXT: vpaddd %zmm1, %zmm0, %zmm0
; AVX512CD-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512CD-NEXT: vpand %ymm2, %ymm1, %ymm3
@@ -169,7 +176,8 @@ define <16 x i32> @testv16i32(<16 x i32>
; AVX512CDBW-NEXT: vpxord %zmm1, %zmm1, %zmm1
; AVX512CDBW-NEXT: vpsubd %zmm0, %zmm1, %zmm2
; AVX512CDBW-NEXT: vpandd %zmm2, %zmm0, %zmm0
-; AVX512CDBW-NEXT: vpsubd {{.*}}(%rip){1to16}, %zmm0, %zmm0
+; AVX512CDBW-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2
+; AVX512CDBW-NEXT: vpaddd %zmm2, %zmm0, %zmm0
; AVX512CDBW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512CDBW-NEXT: vpandq %zmm2, %zmm0, %zmm3
; AVX512CDBW-NEXT: vmovdqu8 {{.*#+}} zmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -190,7 +198,8 @@ define <16 x i32> @testv16i32(<16 x i32>
; AVX512BW-NEXT: vpxord %zmm1, %zmm1, %zmm1
; AVX512BW-NEXT: vpsubd %zmm0, %zmm1, %zmm2
; AVX512BW-NEXT: vpandd %zmm2, %zmm0, %zmm0
-; AVX512BW-NEXT: vpsubd {{.*}}(%rip){1to16}, %zmm0, %zmm0
+; AVX512BW-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2
+; AVX512BW-NEXT: vpaddd %zmm2, %zmm0, %zmm0
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512BW-NEXT: vpandq %zmm2, %zmm0, %zmm3
; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -211,7 +220,8 @@ define <16 x i32> @testv16i32(<16 x i32>
; AVX512VPOPCNTDQ-NEXT: vpxord %zmm1, %zmm1, %zmm1
; AVX512VPOPCNTDQ-NEXT: vpsubd %zmm0, %zmm1, %zmm1
; AVX512VPOPCNTDQ-NEXT: vpandd %zmm1, %zmm0, %zmm0
-; AVX512VPOPCNTDQ-NEXT: vpsubd {{.*}}(%rip){1to16}, %zmm0, %zmm0
+; AVX512VPOPCNTDQ-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512VPOPCNTDQ-NEXT: vpaddd %zmm1, %zmm0, %zmm0
; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0
; AVX512VPOPCNTDQ-NEXT: retq
%out = call <16 x i32> @llvm.cttz.v16i32(<16 x i32> %in, i1 0)
@@ -244,7 +254,8 @@ define <16 x i32> @testv16i32u(<16 x i32
; AVX512BW-NEXT: vpxord %zmm1, %zmm1, %zmm1
; AVX512BW-NEXT: vpsubd %zmm0, %zmm1, %zmm2
; AVX512BW-NEXT: vpandd %zmm2, %zmm0, %zmm0
-; AVX512BW-NEXT: vpsubd {{.*}}(%rip){1to16}, %zmm0, %zmm0
+; AVX512BW-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2
+; AVX512BW-NEXT: vpaddd %zmm2, %zmm0, %zmm0
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512BW-NEXT: vpandq %zmm2, %zmm0, %zmm3
; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -265,7 +276,8 @@ define <16 x i32> @testv16i32u(<16 x i32
; AVX512VPOPCNTDQ-NEXT: vpxord %zmm1, %zmm1, %zmm1
; AVX512VPOPCNTDQ-NEXT: vpsubd %zmm0, %zmm1, %zmm1
; AVX512VPOPCNTDQ-NEXT: vpandd %zmm1, %zmm0, %zmm0
-; AVX512VPOPCNTDQ-NEXT: vpsubd {{.*}}(%rip){1to16}, %zmm0, %zmm0
+; AVX512VPOPCNTDQ-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512VPOPCNTDQ-NEXT: vpaddd %zmm1, %zmm0, %zmm0
; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0
; AVX512VPOPCNTDQ-NEXT: retq
%out = call <16 x i32> @llvm.cttz.v16i32(<16 x i32> %in, i1 -1)
@@ -278,8 +290,8 @@ define <32 x i16> @testv32i16(<32 x i16>
; AVX512CD-NEXT: vpxor %ymm2, %ymm2, %ymm2
; AVX512CD-NEXT: vpsubw %ymm0, %ymm2, %ymm3
; AVX512CD-NEXT: vpand %ymm3, %ymm0, %ymm0
-; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512CD-NEXT: vpsubw %ymm3, %ymm0, %ymm0
+; AVX512CD-NEXT: vpcmpeqd %ymm3, %ymm3, %ymm3
+; AVX512CD-NEXT: vpaddw %ymm3, %ymm0, %ymm0
; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512CD-NEXT: vpand %ymm4, %ymm0, %ymm5
; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm6 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -293,7 +305,7 @@ define <32 x i16> @testv32i16(<32 x i16>
; AVX512CD-NEXT: vpsrlw $8, %ymm0, %ymm0
; AVX512CD-NEXT: vpsubw %ymm1, %ymm2, %ymm2
; AVX512CD-NEXT: vpand %ymm2, %ymm1, %ymm1
-; AVX512CD-NEXT: vpsubw %ymm3, %ymm1, %ymm1
+; AVX512CD-NEXT: vpaddw %ymm3, %ymm1, %ymm1
; AVX512CD-NEXT: vpand %ymm4, %ymm1, %ymm2
; AVX512CD-NEXT: vpshufb %ymm2, %ymm6, %ymm2
; AVX512CD-NEXT: vpsrlw $4, %ymm1, %ymm1
@@ -310,7 +322,8 @@ define <32 x i16> @testv32i16(<32 x i16>
; AVX512CDBW-NEXT: vpxord %zmm1, %zmm1, %zmm1
; AVX512CDBW-NEXT: vpsubw %zmm0, %zmm1, %zmm1
; AVX512CDBW-NEXT: vpandq %zmm1, %zmm0, %zmm0
-; AVX512CDBW-NEXT: vpsubw {{.*}}(%rip), %zmm0, %zmm0
+; AVX512CDBW-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512CDBW-NEXT: vpaddw %zmm1, %zmm0, %zmm0
; AVX512CDBW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512CDBW-NEXT: vpandq %zmm1, %zmm0, %zmm2
; AVX512CDBW-NEXT: vmovdqu8 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -329,7 +342,8 @@ define <32 x i16> @testv32i16(<32 x i16>
; AVX512BW-NEXT: vpxord %zmm1, %zmm1, %zmm1
; AVX512BW-NEXT: vpsubw %zmm0, %zmm1, %zmm1
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vpsubw {{.*}}(%rip), %zmm0, %zmm0
+; AVX512BW-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512BW-NEXT: vpaddw %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm2
; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -348,8 +362,8 @@ define <32 x i16> @testv32i16(<32 x i16>
; AVX512VPOPCNTDQ-NEXT: vpxor %ymm2, %ymm2, %ymm2
; AVX512VPOPCNTDQ-NEXT: vpsubw %ymm0, %ymm2, %ymm3
; AVX512VPOPCNTDQ-NEXT: vpand %ymm3, %ymm0, %ymm0
-; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512VPOPCNTDQ-NEXT: vpsubw %ymm3, %ymm0, %ymm0
+; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %ymm3, %ymm3, %ymm3
+; AVX512VPOPCNTDQ-NEXT: vpaddw %ymm3, %ymm0, %ymm0
; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VPOPCNTDQ-NEXT: vpand %ymm4, %ymm0, %ymm5
; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} ymm6 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -363,7 +377,7 @@ define <32 x i16> @testv32i16(<32 x i16>
; AVX512VPOPCNTDQ-NEXT: vpsrlw $8, %ymm0, %ymm0
; AVX512VPOPCNTDQ-NEXT: vpsubw %ymm1, %ymm2, %ymm2
; AVX512VPOPCNTDQ-NEXT: vpand %ymm2, %ymm1, %ymm1
-; AVX512VPOPCNTDQ-NEXT: vpsubw %ymm3, %ymm1, %ymm1
+; AVX512VPOPCNTDQ-NEXT: vpaddw %ymm3, %ymm1, %ymm1
; AVX512VPOPCNTDQ-NEXT: vpand %ymm4, %ymm1, %ymm2
; AVX512VPOPCNTDQ-NEXT: vpshufb %ymm2, %ymm6, %ymm2
; AVX512VPOPCNTDQ-NEXT: vpsrlw $4, %ymm1, %ymm1
@@ -384,8 +398,8 @@ define <32 x i16> @testv32i16u(<32 x i16
; AVX512CD-NEXT: vpxor %ymm2, %ymm2, %ymm2
; AVX512CD-NEXT: vpsubw %ymm0, %ymm2, %ymm3
; AVX512CD-NEXT: vpand %ymm3, %ymm0, %ymm0
-; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512CD-NEXT: vpsubw %ymm3, %ymm0, %ymm0
+; AVX512CD-NEXT: vpcmpeqd %ymm3, %ymm3, %ymm3
+; AVX512CD-NEXT: vpaddw %ymm3, %ymm0, %ymm0
; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512CD-NEXT: vpand %ymm4, %ymm0, %ymm5
; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm6 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -399,7 +413,7 @@ define <32 x i16> @testv32i16u(<32 x i16
; AVX512CD-NEXT: vpsrlw $8, %ymm0, %ymm0
; AVX512CD-NEXT: vpsubw %ymm1, %ymm2, %ymm2
; AVX512CD-NEXT: vpand %ymm2, %ymm1, %ymm1
-; AVX512CD-NEXT: vpsubw %ymm3, %ymm1, %ymm1
+; AVX512CD-NEXT: vpaddw %ymm3, %ymm1, %ymm1
; AVX512CD-NEXT: vpand %ymm4, %ymm1, %ymm2
; AVX512CD-NEXT: vpshufb %ymm2, %ymm6, %ymm2
; AVX512CD-NEXT: vpsrlw $4, %ymm1, %ymm1
@@ -416,7 +430,8 @@ define <32 x i16> @testv32i16u(<32 x i16
; AVX512CDBW-NEXT: vpxord %zmm1, %zmm1, %zmm1
; AVX512CDBW-NEXT: vpsubw %zmm0, %zmm1, %zmm1
; AVX512CDBW-NEXT: vpandq %zmm1, %zmm0, %zmm0
-; AVX512CDBW-NEXT: vpsubw {{.*}}(%rip), %zmm0, %zmm0
+; AVX512CDBW-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512CDBW-NEXT: vpaddw %zmm1, %zmm0, %zmm0
; AVX512CDBW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512CDBW-NEXT: vpandq %zmm1, %zmm0, %zmm2
; AVX512CDBW-NEXT: vmovdqu8 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -435,7 +450,8 @@ define <32 x i16> @testv32i16u(<32 x i16
; AVX512BW-NEXT: vpxord %zmm1, %zmm1, %zmm1
; AVX512BW-NEXT: vpsubw %zmm0, %zmm1, %zmm1
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vpsubw {{.*}}(%rip), %zmm0, %zmm0
+; AVX512BW-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512BW-NEXT: vpaddw %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm2
; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -454,8 +470,8 @@ define <32 x i16> @testv32i16u(<32 x i16
; AVX512VPOPCNTDQ-NEXT: vpxor %ymm2, %ymm2, %ymm2
; AVX512VPOPCNTDQ-NEXT: vpsubw %ymm0, %ymm2, %ymm3
; AVX512VPOPCNTDQ-NEXT: vpand %ymm3, %ymm0, %ymm0
-; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512VPOPCNTDQ-NEXT: vpsubw %ymm3, %ymm0, %ymm0
+; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %ymm3, %ymm3, %ymm3
+; AVX512VPOPCNTDQ-NEXT: vpaddw %ymm3, %ymm0, %ymm0
; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VPOPCNTDQ-NEXT: vpand %ymm4, %ymm0, %ymm5
; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} ymm6 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -469,7 +485,7 @@ define <32 x i16> @testv32i16u(<32 x i16
; AVX512VPOPCNTDQ-NEXT: vpsrlw $8, %ymm0, %ymm0
; AVX512VPOPCNTDQ-NEXT: vpsubw %ymm1, %ymm2, %ymm2
; AVX512VPOPCNTDQ-NEXT: vpand %ymm2, %ymm1, %ymm1
-; AVX512VPOPCNTDQ-NEXT: vpsubw %ymm3, %ymm1, %ymm1
+; AVX512VPOPCNTDQ-NEXT: vpaddw %ymm3, %ymm1, %ymm1
; AVX512VPOPCNTDQ-NEXT: vpand %ymm4, %ymm1, %ymm2
; AVX512VPOPCNTDQ-NEXT: vpshufb %ymm2, %ymm6, %ymm2
; AVX512VPOPCNTDQ-NEXT: vpsrlw $4, %ymm1, %ymm1
@@ -490,8 +506,8 @@ define <64 x i8> @testv64i8(<64 x i8> %i
; AVX512CD-NEXT: vpxor %ymm2, %ymm2, %ymm2
; AVX512CD-NEXT: vpsubb %ymm0, %ymm2, %ymm3
; AVX512CD-NEXT: vpand %ymm3, %ymm0, %ymm0
-; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512CD-NEXT: vpsubb %ymm3, %ymm0, %ymm0
+; AVX512CD-NEXT: vpcmpeqd %ymm3, %ymm3, %ymm3
+; AVX512CD-NEXT: vpaddb %ymm3, %ymm0, %ymm0
; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512CD-NEXT: vpand %ymm4, %ymm0, %ymm5
; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm6 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -502,7 +518,7 @@ define <64 x i8> @testv64i8(<64 x i8> %i
; AVX512CD-NEXT: vpaddb %ymm5, %ymm0, %ymm0
; AVX512CD-NEXT: vpsubb %ymm1, %ymm2, %ymm2
; AVX512CD-NEXT: vpand %ymm2, %ymm1, %ymm1
-; AVX512CD-NEXT: vpsubb %ymm3, %ymm1, %ymm1
+; AVX512CD-NEXT: vpaddb %ymm3, %ymm1, %ymm1
; AVX512CD-NEXT: vpand %ymm4, %ymm1, %ymm2
; AVX512CD-NEXT: vpshufb %ymm2, %ymm6, %ymm2
; AVX512CD-NEXT: vpsrlw $4, %ymm1, %ymm1
@@ -516,7 +532,8 @@ define <64 x i8> @testv64i8(<64 x i8> %i
; AVX512CDBW-NEXT: vpxord %zmm1, %zmm1, %zmm1
; AVX512CDBW-NEXT: vpsubb %zmm0, %zmm1, %zmm1
; AVX512CDBW-NEXT: vpandq %zmm1, %zmm0, %zmm0
-; AVX512CDBW-NEXT: vpsubb {{.*}}(%rip), %zmm0, %zmm0
+; AVX512CDBW-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512CDBW-NEXT: vpaddb %zmm1, %zmm0, %zmm0
; AVX512CDBW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512CDBW-NEXT: vpandq %zmm1, %zmm0, %zmm2
; AVX512CDBW-NEXT: vmovdqu8 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -532,7 +549,8 @@ define <64 x i8> @testv64i8(<64 x i8> %i
; AVX512BW-NEXT: vpxord %zmm1, %zmm1, %zmm1
; AVX512BW-NEXT: vpsubb %zmm0, %zmm1, %zmm1
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vpsubb {{.*}}(%rip), %zmm0, %zmm0
+; AVX512BW-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512BW-NEXT: vpaddb %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm2
; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -548,8 +566,8 @@ define <64 x i8> @testv64i8(<64 x i8> %i
; AVX512VPOPCNTDQ-NEXT: vpxor %ymm2, %ymm2, %ymm2
; AVX512VPOPCNTDQ-NEXT: vpsubb %ymm0, %ymm2, %ymm3
; AVX512VPOPCNTDQ-NEXT: vpand %ymm3, %ymm0, %ymm0
-; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512VPOPCNTDQ-NEXT: vpsubb %ymm3, %ymm0, %ymm0
+; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %ymm3, %ymm3, %ymm3
+; AVX512VPOPCNTDQ-NEXT: vpaddb %ymm3, %ymm0, %ymm0
; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VPOPCNTDQ-NEXT: vpand %ymm4, %ymm0, %ymm5
; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} ymm6 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -560,7 +578,7 @@ define <64 x i8> @testv64i8(<64 x i8> %i
; AVX512VPOPCNTDQ-NEXT: vpaddb %ymm5, %ymm0, %ymm0
; AVX512VPOPCNTDQ-NEXT: vpsubb %ymm1, %ymm2, %ymm2
; AVX512VPOPCNTDQ-NEXT: vpand %ymm2, %ymm1, %ymm1
-; AVX512VPOPCNTDQ-NEXT: vpsubb %ymm3, %ymm1, %ymm1
+; AVX512VPOPCNTDQ-NEXT: vpaddb %ymm3, %ymm1, %ymm1
; AVX512VPOPCNTDQ-NEXT: vpand %ymm4, %ymm1, %ymm2
; AVX512VPOPCNTDQ-NEXT: vpshufb %ymm2, %ymm6, %ymm2
; AVX512VPOPCNTDQ-NEXT: vpsrlw $4, %ymm1, %ymm1
@@ -578,8 +596,8 @@ define <64 x i8> @testv64i8u(<64 x i8> %
; AVX512CD-NEXT: vpxor %ymm2, %ymm2, %ymm2
; AVX512CD-NEXT: vpsubb %ymm0, %ymm2, %ymm3
; AVX512CD-NEXT: vpand %ymm3, %ymm0, %ymm0
-; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512CD-NEXT: vpsubb %ymm3, %ymm0, %ymm0
+; AVX512CD-NEXT: vpcmpeqd %ymm3, %ymm3, %ymm3
+; AVX512CD-NEXT: vpaddb %ymm3, %ymm0, %ymm0
; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512CD-NEXT: vpand %ymm4, %ymm0, %ymm5
; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm6 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -590,7 +608,7 @@ define <64 x i8> @testv64i8u(<64 x i8> %
; AVX512CD-NEXT: vpaddb %ymm5, %ymm0, %ymm0
; AVX512CD-NEXT: vpsubb %ymm1, %ymm2, %ymm2
; AVX512CD-NEXT: vpand %ymm2, %ymm1, %ymm1
-; AVX512CD-NEXT: vpsubb %ymm3, %ymm1, %ymm1
+; AVX512CD-NEXT: vpaddb %ymm3, %ymm1, %ymm1
; AVX512CD-NEXT: vpand %ymm4, %ymm1, %ymm2
; AVX512CD-NEXT: vpshufb %ymm2, %ymm6, %ymm2
; AVX512CD-NEXT: vpsrlw $4, %ymm1, %ymm1
@@ -604,7 +622,8 @@ define <64 x i8> @testv64i8u(<64 x i8> %
; AVX512CDBW-NEXT: vpxord %zmm1, %zmm1, %zmm1
; AVX512CDBW-NEXT: vpsubb %zmm0, %zmm1, %zmm1
; AVX512CDBW-NEXT: vpandq %zmm1, %zmm0, %zmm0
-; AVX512CDBW-NEXT: vpsubb {{.*}}(%rip), %zmm0, %zmm0
+; AVX512CDBW-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512CDBW-NEXT: vpaddb %zmm1, %zmm0, %zmm0
; AVX512CDBW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512CDBW-NEXT: vpandq %zmm1, %zmm0, %zmm2
; AVX512CDBW-NEXT: vmovdqu8 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -620,7 +639,8 @@ define <64 x i8> @testv64i8u(<64 x i8> %
; AVX512BW-NEXT: vpxord %zmm1, %zmm1, %zmm1
; AVX512BW-NEXT: vpsubb %zmm0, %zmm1, %zmm1
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vpsubb {{.*}}(%rip), %zmm0, %zmm0
+; AVX512BW-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512BW-NEXT: vpaddb %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm2
; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -636,8 +656,8 @@ define <64 x i8> @testv64i8u(<64 x i8> %
; AVX512VPOPCNTDQ-NEXT: vpxor %ymm2, %ymm2, %ymm2
; AVX512VPOPCNTDQ-NEXT: vpsubb %ymm0, %ymm2, %ymm3
; AVX512VPOPCNTDQ-NEXT: vpand %ymm3, %ymm0, %ymm0
-; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512VPOPCNTDQ-NEXT: vpsubb %ymm3, %ymm0, %ymm0
+; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %ymm3, %ymm3, %ymm3
+; AVX512VPOPCNTDQ-NEXT: vpaddb %ymm3, %ymm0, %ymm0
; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VPOPCNTDQ-NEXT: vpand %ymm4, %ymm0, %ymm5
; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} ymm6 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
@@ -648,7 +668,7 @@ define <64 x i8> @testv64i8u(<64 x i8> %
; AVX512VPOPCNTDQ-NEXT: vpaddb %ymm5, %ymm0, %ymm0
; AVX512VPOPCNTDQ-NEXT: vpsubb %ymm1, %ymm2, %ymm2
; AVX512VPOPCNTDQ-NEXT: vpand %ymm2, %ymm1, %ymm1
-; AVX512VPOPCNTDQ-NEXT: vpsubb %ymm3, %ymm1, %ymm1
+; AVX512VPOPCNTDQ-NEXT: vpaddb %ymm3, %ymm1, %ymm1
; AVX512VPOPCNTDQ-NEXT: vpand %ymm4, %ymm1, %ymm2
; AVX512VPOPCNTDQ-NEXT: vpshufb %ymm2, %ymm6, %ymm2
; AVX512VPOPCNTDQ-NEXT: vpsrlw $4, %ymm1, %ymm1
Modified: llvm/trunk/test/CodeGen/X86/widen_arith-1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/widen_arith-1.ll?rev=306289&r1=306288&r2=306289&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/widen_arith-1.ll (original)
+++ llvm/trunk/test/CodeGen/X86/widen_arith-1.ll Mon Jun 26 07:19:26 2017
@@ -6,7 +6,7 @@ define void @update(<3 x i8>* %dst, <3 x
; CHECK: # BB#0: # %entry
; CHECK-NEXT: subl $12, %esp
; CHECK-NEXT: movl $0, (%esp)
-; CHECK-NEXT: movdqa {{.*#+}} xmm0 = <1,1,1,u>
+; CHECK-NEXT: pcmpeqd %xmm0, %xmm0
; CHECK-NEXT: movdqa {{.*#+}} xmm1 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
; CHECK-NEXT: jmp .LBB0_1
; CHECK-NEXT: .p2align 4, 0x90
@@ -16,7 +16,7 @@ define void @update(<3 x i8>* %dst, <3 x
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edx
; CHECK-NEXT: pmovzxbd {{.*#+}} xmm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; CHECK-NEXT: paddd %xmm0, %xmm2
+; CHECK-NEXT: psubd %xmm0, %xmm2
; CHECK-NEXT: pextrb $8, %xmm2, 2(%ecx,%eax,4)
; CHECK-NEXT: pshufb %xmm1, %xmm2
; CHECK-NEXT: pextrw $0, %xmm2, (%ecx,%eax,4)
Modified: llvm/trunk/test/CodeGen/X86/widen_arith-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/widen_arith-2.ll?rev=306289&r1=306288&r2=306289&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/widen_arith-2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/widen_arith-2.ll Mon Jun 26 07:19:26 2017
@@ -8,7 +8,7 @@ define void @update(i64* %dst_i, i64* %s
; CHECK: # BB#0: # %entry
; CHECK-NEXT: subl $12, %esp
; CHECK-NEXT: movl $0, (%esp)
-; CHECK-NEXT: movdqa {{.*#+}} xmm0 = [1,1,1,1,1,1,1,1]
+; CHECK-NEXT: pcmpeqd %xmm0, %xmm0
; CHECK-NEXT: movdqa {{.*#+}} xmm1 = [4,4,4,4,4,4,4,4]
; CHECK-NEXT: movdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
; CHECK-NEXT: jmp .LBB0_1
@@ -26,7 +26,7 @@ define void @update(i64* %dst_i, i64* %s
; CHECK-NEXT: movl (%esp), %ecx
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edx
; CHECK-NEXT: pmovzxbw {{.*#+}} xmm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
-; CHECK-NEXT: paddw %xmm0, %xmm3
+; CHECK-NEXT: psubw %xmm0, %xmm3
; CHECK-NEXT: pand %xmm1, %xmm3
; CHECK-NEXT: pshufb %xmm2, %xmm3
; CHECK-NEXT: movq %xmm3, (%edx,%ecx,8)
Modified: llvm/trunk/test/CodeGen/X86/widen_arith-3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/widen_arith-3.ll?rev=306289&r1=306288&r2=306289&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/widen_arith-3.ll (original)
+++ llvm/trunk/test/CodeGen/X86/widen_arith-3.ll Mon Jun 26 07:19:26 2017
@@ -14,8 +14,8 @@ define void @update(<3 x i16>* %dst, <3
; CHECK-NEXT: andl $-8, %esp
; CHECK-NEXT: subl $40, %esp
; CHECK-NEXT: movl {{\.LCPI.*}}, %eax
-; CHECK-NEXT: movdqa {{.*#+}} xmm0 = <1,1,1,u>
; CHECK-NEXT: movdqa {{.*#+}} xmm1 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; CHECK-NEXT: pcmpeqd %xmm0, %xmm0
; CHECK-NEXT: movl $0, {{[0-9]+}}(%esp)
; CHECK-NEXT: movl %eax, {{[0-9]+}}(%esp)
; CHECK-NEXT: movw $1, {{[0-9]+}}(%esp)
@@ -29,7 +29,7 @@ define void @update(<3 x i16>* %dst, <3
; CHECK-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
; CHECK-NEXT: pmovzxwd {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
; CHECK-NEXT: pinsrd $2, 4(%edx,%eax,8), %xmm2
-; CHECK-NEXT: paddd %xmm0, %xmm2
+; CHECK-NEXT: psubd %xmm0, %xmm2
; CHECK-NEXT: pextrw $4, %xmm2, 4(%ecx,%eax,8)
; CHECK-NEXT: pshufb %xmm1, %xmm2
; CHECK-NEXT: movd %xmm2, (%ecx,%eax,8)
Modified: llvm/trunk/test/CodeGen/X86/widen_cast-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/widen_cast-2.ll?rev=306289&r1=306288&r2=306289&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/widen_cast-2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/widen_cast-2.ll Mon Jun 26 07:19:26 2017
@@ -7,8 +7,7 @@ define void @convert(<7 x i32>* %dst, <1
; CHECK: # BB#0: # %entry
; CHECK-NEXT: pushl %eax
; CHECK-NEXT: movl $0, (%esp)
-; CHECK-NEXT: movdqa {{.*#+}} xmm0 = [1,1,1,1,1,1,1,1]
-; CHECK-NEXT: movdqa {{.*#+}} xmm1 = <1,1,1,1,1,1,u,u>
+; CHECK-NEXT: pcmpeqd %xmm0, %xmm0
; CHECK-NEXT: cmpl $3, (%esp)
; CHECK-NEXT: jg .LBB0_3
; CHECK-NEXT: .p2align 4, 0x90
@@ -18,14 +17,14 @@ define void @convert(<7 x i32>* %dst, <1
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: shll $5, %eax
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edx
-; CHECK-NEXT: movdqa (%edx,%eax), %xmm2
-; CHECK-NEXT: paddw %xmm0, %xmm2
-; CHECK-NEXT: movdqa 16(%edx,%eax), %xmm3
-; CHECK-NEXT: paddw %xmm1, %xmm3
-; CHECK-NEXT: pextrd $2, %xmm3, 24(%ecx,%eax)
-; CHECK-NEXT: pextrd $1, %xmm3, 20(%ecx,%eax)
-; CHECK-NEXT: movd %xmm3, 16(%ecx,%eax)
-; CHECK-NEXT: movdqa %xmm2, (%ecx,%eax)
+; CHECK-NEXT: movdqa (%edx,%eax), %xmm1
+; CHECK-NEXT: movdqa 16(%edx,%eax), %xmm2
+; CHECK-NEXT: psubw %xmm0, %xmm1
+; CHECK-NEXT: psubw %xmm0, %xmm2
+; CHECK-NEXT: pextrd $2, %xmm2, 24(%ecx,%eax)
+; CHECK-NEXT: pextrd $1, %xmm2, 20(%ecx,%eax)
+; CHECK-NEXT: movd %xmm2, 16(%ecx,%eax)
+; CHECK-NEXT: movdqa %xmm1, (%ecx,%eax)
; CHECK-NEXT: incl (%esp)
; CHECK-NEXT: cmpl $3, (%esp)
; CHECK-NEXT: jle .LBB0_2
Modified: llvm/trunk/test/CodeGen/X86/widen_cast-3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/widen_cast-3.ll?rev=306289&r1=306288&r2=306289&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/widen_cast-3.ll (original)
+++ llvm/trunk/test/CodeGen/X86/widen_cast-3.ll Mon Jun 26 07:19:26 2017
@@ -8,7 +8,8 @@ define void @convert(<12 x i8>* %dst.add
; X86-LABEL: convert:
; X86: # BB#0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0
+; X86-NEXT: pcmpeqd %xmm1, %xmm1
+; X86-NEXT: psubd %xmm1, %xmm0
; X86-NEXT: pextrd $2, %xmm0, 8(%eax)
; X86-NEXT: pextrd $1, %xmm0, 4(%eax)
; X86-NEXT: movd %xmm0, (%eax)
@@ -16,7 +17,8 @@ define void @convert(<12 x i8>* %dst.add
;
; X64-LABEL: convert:
; X64: # BB#0:
-; X64-NEXT: paddd {{.*}}(%rip), %xmm0
+; X64-NEXT: pcmpeqd %xmm1, %xmm1
+; X64-NEXT: psubd %xmm1, %xmm0
; X64-NEXT: pextrd $2, %xmm0, 8(%rdi)
; X64-NEXT: movq %xmm0, (%rdi)
; X64-NEXT: retq
Modified: llvm/trunk/test/CodeGen/X86/widen_cast-4.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/widen_cast-4.ll?rev=306289&r1=306288&r2=306289&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/widen_cast-4.ll (original)
+++ llvm/trunk/test/CodeGen/X86/widen_cast-4.ll Mon Jun 26 07:19:26 2017
@@ -9,7 +9,7 @@ define void @update(i64* %dst_i, i64* %s
; NARROW: # BB#0: # %entry
; NARROW-NEXT: subl $12, %esp
; NARROW-NEXT: movl $0, (%esp)
-; NARROW-NEXT: movdqa {{.*#+}} xmm0 = [1,1,1,1,1,1,1,1]
+; NARROW-NEXT: pcmpeqd %xmm0, %xmm0
; NARROW-NEXT: movdqa {{.*#+}} xmm1 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
; NARROW-NEXT: jmp .LBB0_1
; NARROW-NEXT: .p2align 4, 0x90
@@ -26,7 +26,7 @@ define void @update(i64* %dst_i, i64* %s
; NARROW-NEXT: movl (%esp), %ecx
; NARROW-NEXT: movl {{[0-9]+}}(%esp), %edx
; NARROW-NEXT: pmovzxbw {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
-; NARROW-NEXT: paddw %xmm0, %xmm2
+; NARROW-NEXT: psubw %xmm0, %xmm2
; NARROW-NEXT: psllw $8, %xmm2
; NARROW-NEXT: psraw $8, %xmm2
; NARROW-NEXT: psraw $2, %xmm2
@@ -46,7 +46,7 @@ define void @update(i64* %dst_i, i64* %s
; WIDE: # BB#0: # %entry
; WIDE-NEXT: subl $12, %esp
; WIDE-NEXT: movl $0, (%esp)
-; WIDE-NEXT: movdqa {{.*#+}} xmm0 = <1,1,1,1,1,1,1,1,u,u,u,u,u,u,u,u>
+; WIDE-NEXT: pcmpeqd %xmm0, %xmm0
; WIDE-NEXT: movdqa {{.*#+}} xmm1 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
; WIDE-NEXT: movdqa {{.*#+}} xmm2 = [32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32]
; WIDE-NEXT: jmp .LBB0_1
@@ -65,7 +65,7 @@ define void @update(i64* %dst_i, i64* %s
; WIDE-NEXT: movl {{[0-9]+}}(%esp), %edx
; WIDE-NEXT: movd {{.*#+}} xmm3 = mem[0],zero,zero,zero
; WIDE-NEXT: pinsrd $1, 4(%eax,%ecx,8), %xmm3
-; WIDE-NEXT: paddb %xmm0, %xmm3
+; WIDE-NEXT: psubb %xmm0, %xmm3
; WIDE-NEXT: psrlw $2, %xmm3
; WIDE-NEXT: pand %xmm1, %xmm3
; WIDE-NEXT: pxor %xmm2, %xmm3
Modified: llvm/trunk/test/CodeGen/X86/widen_conv-1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/widen_conv-1.ll?rev=306289&r1=306288&r2=306289&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/widen_conv-1.ll (original)
+++ llvm/trunk/test/CodeGen/X86/widen_conv-1.ll Mon Jun 26 07:19:26 2017
@@ -35,7 +35,8 @@ define void @convert_v3i32_to_v3i8(<3 x
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movdqa (%ecx), %xmm0
-; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0
+; X86-NEXT: pcmpeqd %xmm1, %xmm1
+; X86-NEXT: psubd %xmm1, %xmm0
; X86-NEXT: pextrb $8, %xmm0, 2(%eax)
; X86-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
; X86-NEXT: pextrw $0, %xmm0, (%eax)
@@ -45,7 +46,8 @@ define void @convert_v3i32_to_v3i8(<3 x
; X64-LABEL: convert_v3i32_to_v3i8:
; X64: # BB#0: # %entry
; X64-NEXT: movdqa (%rsi), %xmm0
-; X64-NEXT: paddd {{.*}}(%rip), %xmm0
+; X64-NEXT: pcmpeqd %xmm1, %xmm1
+; X64-NEXT: psubd %xmm1, %xmm0
; X64-NEXT: pextrb $8, %xmm0, 2(%rdi)
; X64-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
; X64-NEXT: pextrw $0, %xmm0, (%rdi)
@@ -70,7 +72,8 @@ define void @convert_v5i16_to_v5i8(<5 x
; X86-NEXT: movl 8(%ebp), %eax
; X86-NEXT: movl 12(%ebp), %ecx
; X86-NEXT: movdqa (%ecx), %xmm0
-; X86-NEXT: paddw {{\.LCPI.*}}, %xmm0
+; X86-NEXT: pcmpeqd %xmm1, %xmm1
+; X86-NEXT: psubw %xmm1, %xmm0
; X86-NEXT: pextrb $8, %xmm0, 4(%eax)
; X86-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; X86-NEXT: movd %xmm0, (%eax)
@@ -81,7 +84,8 @@ define void @convert_v5i16_to_v5i8(<5 x
; X64-LABEL: convert_v5i16_to_v5i8:
; X64: # BB#0: # %entry
; X64-NEXT: movdqa (%rsi), %xmm0
-; X64-NEXT: paddw {{.*}}(%rip), %xmm0
+; X64-NEXT: pcmpeqd %xmm1, %xmm1
+; X64-NEXT: psubw %xmm1, %xmm0
; X64-NEXT: pextrb $8, %xmm0, 4(%rdi)
; X64-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; X64-NEXT: movd %xmm0, (%rdi)
More information about the llvm-commits
mailing list