[llvm] r338402 - [X86] Add pattern matching for PMADDUBSW

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Tue Jul 31 10:12:08 PDT 2018


Author: ctopper
Date: Tue Jul 31 10:12:08 2018
New Revision: 338402

URL: http://llvm.org/viewvc/llvm-project?rev=338402&view=rev
Log:
[X86] Add pattern matching for PMADDUBSW

Summary:
Similar to D49636, but for PMADDUBSW. This instruction has the additional complexity that the addition of the two products saturates to 16-bits rather than wrapping around. And one operand is treated as signed and the other as unsigned.

A C example that triggers this pattern

```
static const int N = 128;

int8_t A[2*N];
uint8_t B[2*N];
int16_t C[N];

void foo() {
  for (int i = 0; i != N; ++i)
    C[i] = MIN(MAX((int16_t)A[2*i]*(int16_t)B[2*i] + (int16_t)A[2*i+1]*(int16_t)B[2*i+1], -32768), 32767);
}
```

Reviewers: RKSimon, spatel, zvi

Reviewed By: RKSimon, zvi

Subscribers: llvm-commits

Differential Revision: https://reviews.llvm.org/D49829

Modified:
    llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
    llvm/trunk/test/CodeGen/X86/pmaddubsw.ll

Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=338402&r1=338401&r2=338402&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Tue Jul 31 10:12:08 2018
@@ -36753,6 +36753,145 @@ static SDValue combinePMULH(SDValue Src,
   return DAG.getNode(Opc, DL, VT, LHS, RHS);
 }
 
+// Attempt to match PMADDUBSW, which multiplies corresponding unsigned bytes
+// from one vector with signed bytes from another vector, adds together
+// adjacent pairs of 16-bit products, and saturates the result before
+// truncating to 16-bits.
+//
+// Which looks something like this:
+// (i16 (ssat (add (mul (zext (even elts (i8 A))), (sext (even elts (i8 B)))),
+//                 (mul (zext (odd elts (i8 A)), (sext (odd elts (i8 B))))))))
+static SDValue detectPMADDUBSW(SDValue In, EVT VT, SelectionDAG &DAG,
+                               const X86Subtarget &Subtarget,
+                               const SDLoc &DL) {
+  if (!VT.isVector() || !Subtarget.hasSSSE3())
+    return SDValue();
+
+  unsigned NumElems = VT.getVectorNumElements();
+  EVT ScalarVT = VT.getVectorElementType();
+  if (ScalarVT != MVT::i16 || NumElems < 8 || !isPowerOf2_32(NumElems))
+    return SDValue();
+
+  SDValue SSatVal = detectSSatPattern(In, VT);
+  if (!SSatVal || SSatVal.getOpcode() != ISD::ADD)
+    return SDValue();
+
+  // Ok this is a signed saturation of an ADD. See if this ADD is adding pairs
+  // of multiplies from even/odd elements.
+  SDValue N0 = SSatVal.getOperand(0);
+  SDValue N1 = SSatVal.getOperand(1);
+
+  if (N0.getOpcode() != ISD::MUL || N1.getOpcode() != ISD::MUL)
+    return SDValue();
+
+  SDValue N00 = N0.getOperand(0);
+  SDValue N01 = N0.getOperand(1);
+  SDValue N10 = N1.getOperand(0);
+  SDValue N11 = N1.getOperand(1);
+
+  // TODO: Handle constant vectors and use knownbits/computenumsignbits?
+  // Canonicalize zero_extend to LHS.
+  if (N01.getOpcode() == ISD::ZERO_EXTEND)
+    std::swap(N00, N01);
+  if (N11.getOpcode() == ISD::ZERO_EXTEND)
+    std::swap(N10, N11);
+
+  // Ensure we have a zero_extend and a sign_extend.
+  if (N00.getOpcode() != ISD::ZERO_EXTEND ||
+      N01.getOpcode() != ISD::SIGN_EXTEND ||
+      N10.getOpcode() != ISD::ZERO_EXTEND ||
+      N11.getOpcode() != ISD::SIGN_EXTEND)
+    return SDValue();
+
+  // Peek through the extends.
+  N00 = N00.getOperand(0);
+  N01 = N01.getOperand(0);
+  N10 = N10.getOperand(0);
+  N11 = N11.getOperand(0);
+
+  // Ensure the extend is from vXi8.
+  if (N00.getValueType().getVectorElementType() != MVT::i8 ||
+      N01.getValueType().getVectorElementType() != MVT::i8 ||
+      N10.getValueType().getVectorElementType() != MVT::i8 ||
+      N11.getValueType().getVectorElementType() != MVT::i8)
+    return SDValue();
+
+  // All inputs should be build_vectors.
+  if (N00.getOpcode() != ISD::BUILD_VECTOR ||
+      N01.getOpcode() != ISD::BUILD_VECTOR ||
+      N10.getOpcode() != ISD::BUILD_VECTOR ||
+      N11.getOpcode() != ISD::BUILD_VECTOR)
+    return SDValue();
+
+  // N00/N10 are zero extended. N01/N11 are sign extended.
+
+  // For each element, we need to ensure we have an odd element from one vector
+  // multiplied by the odd element of another vector and the even element from
+  // one of the same vectors being multiplied by the even element from the
+  // other vector. So we need to make sure for each element i, this operator
+  // is being performed:
+  //  A[2 * i] * B[2 * i] + A[2 * i + 1] * B[2 * i + 1]
+  SDValue ZExtIn, SExtIn;
+  for (unsigned i = 0; i != NumElems; ++i) {
+    SDValue N00Elt = N00.getOperand(i);
+    SDValue N01Elt = N01.getOperand(i);
+    SDValue N10Elt = N10.getOperand(i);
+    SDValue N11Elt = N11.getOperand(i);
+    // TODO: Be more tolerant to undefs.
+    if (N00Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
+        N01Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
+        N10Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
+        N11Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
+      return SDValue();
+    auto *ConstN00Elt = dyn_cast<ConstantSDNode>(N00Elt.getOperand(1));
+    auto *ConstN01Elt = dyn_cast<ConstantSDNode>(N01Elt.getOperand(1));
+    auto *ConstN10Elt = dyn_cast<ConstantSDNode>(N10Elt.getOperand(1));
+    auto *ConstN11Elt = dyn_cast<ConstantSDNode>(N11Elt.getOperand(1));
+    if (!ConstN00Elt || !ConstN01Elt || !ConstN10Elt || !ConstN11Elt)
+      return SDValue();
+    unsigned IdxN00 = ConstN00Elt->getZExtValue();
+    unsigned IdxN01 = ConstN01Elt->getZExtValue();
+    unsigned IdxN10 = ConstN10Elt->getZExtValue();
+    unsigned IdxN11 = ConstN11Elt->getZExtValue();
+    // Add is commutative so indices can be reordered.
+    if (IdxN00 > IdxN10) {
+      std::swap(IdxN00, IdxN10);
+      std::swap(IdxN01, IdxN11);
+    }
+    // N0 indices be the even element. N1 indices must be the next odd element.
+    if (IdxN00 != 2 * i || IdxN10 != 2 * i + 1 ||
+        IdxN01 != 2 * i || IdxN11 != 2 * i + 1)
+      return SDValue();
+    SDValue N00In = N00Elt.getOperand(0);
+    SDValue N01In = N01Elt.getOperand(0);
+    SDValue N10In = N10Elt.getOperand(0);
+    SDValue N11In = N11Elt.getOperand(0);
+    // First time we find an input capture it.
+    if (!ZExtIn) {
+      ZExtIn = N00In;
+      SExtIn = N01In;
+    }
+    if (ZExtIn != N00In || SExtIn != N01In ||
+        ZExtIn != N10In || SExtIn != N11In)
+      return SDValue();
+  }
+
+  auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
+                         ArrayRef<SDValue> Ops) {
+    // Shrink by adding truncate nodes and let DAGCombine fold with the
+    // sources.
+    EVT InVT = Ops[0].getValueType();
+    assert(InVT.getScalarType() == MVT::i8 &&
+           "Unexpected scalar element type");
+    assert(InVT == Ops[1].getValueType() && "Operands' types mismatch");
+    EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
+                                 InVT.getVectorNumElements() / 2);
+    return DAG.getNode(X86ISD::VPMADDUBSW, DL, ResVT, Ops[0], Ops[1]);
+  };
+  return SplitOpsAndApply(DAG, Subtarget, DL, VT, { ZExtIn, SExtIn },
+                          PMADDBuilder);
+}
+
 static SDValue combineTruncate(SDNode *N, SelectionDAG &DAG,
                                const X86Subtarget &Subtarget) {
   EVT VT = N->getValueType(0);
@@ -36767,6 +36906,10 @@ static SDValue combineTruncate(SDNode *N
   if (SDValue Avg = detectAVGPattern(Src, VT, DAG, Subtarget, DL))
     return Avg;
 
+  // Try to detect PMADD
+  if (SDValue PMAdd = detectPMADDUBSW(Src, VT, DAG, Subtarget, DL))
+    return PMAdd;
+
   // Try to combine truncation with signed/unsigned saturation.
   if (SDValue Val = combineTruncateWithSat(Src, VT, DL, DAG, Subtarget))
     return Val;

Modified: llvm/trunk/test/CodeGen/X86/pmaddubsw.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/pmaddubsw.ll?rev=338402&r1=338401&r2=338402&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/pmaddubsw.ll (original)
+++ llvm/trunk/test/CodeGen/X86/pmaddubsw.ll Tue Jul 31 10:12:08 2018
@@ -10,112 +10,15 @@
 define <8 x i16> @pmaddubsw_128(<16 x i8>* %Aptr, <16 x i8>* %Bptr) {
 ; SSE-LABEL: pmaddubsw_128:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    movdqa (%rdi), %xmm1
 ; SSE-NEXT:    movdqa (%rsi), %xmm0
-; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
-; SSE-NEXT:    pand %xmm0, %xmm2
-; SSE-NEXT:    movdqa %xmm1, %xmm3
-; SSE-NEXT:    psllw $8, %xmm3
-; SSE-NEXT:    psraw $8, %xmm3
-; SSE-NEXT:    movdqa %xmm3, %xmm4
-; SSE-NEXT:    pmulhw %xmm2, %xmm4
-; SSE-NEXT:    pmullw %xmm2, %xmm3
-; SSE-NEXT:    movdqa %xmm3, %xmm2
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
-; SSE-NEXT:    psrlw $8, %xmm0
-; SSE-NEXT:    psraw $8, %xmm1
-; SSE-NEXT:    movdqa %xmm1, %xmm4
-; SSE-NEXT:    pmulhw %xmm0, %xmm4
-; SSE-NEXT:    pmullw %xmm0, %xmm1
-; SSE-NEXT:    movdqa %xmm1, %xmm0
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
-; SSE-NEXT:    paddd %xmm2, %xmm0
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
-; SSE-NEXT:    paddd %xmm3, %xmm1
-; SSE-NEXT:    packssdw %xmm1, %xmm0
+; SSE-NEXT:    pmaddubsw (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX1-LABEL: pmaddubsw_128:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX1-NEXT:    vmovdqa (%rsi), %xmm1
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = <8,10,12,14,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX1-NEXT:    vpshufb %xmm2, %xmm0, %xmm3
-; AVX1-NEXT:    vpmovsxbd %xmm3, %xmm3
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = <0,2,4,6,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX1-NEXT:    vpshufb %xmm4, %xmm0, %xmm5
-; AVX1-NEXT:    vpmovsxbd %xmm5, %xmm5
-; AVX1-NEXT:    vpshufb %xmm2, %xmm1, %xmm2
-; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
-; AVX1-NEXT:    vpmulld %xmm2, %xmm3, %xmm2
-; AVX1-NEXT:    vpshufb %xmm4, %xmm1, %xmm3
-; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
-; AVX1-NEXT:    vpmulld %xmm3, %xmm5, %xmm3
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = <9,11,13,15,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX1-NEXT:    vpshufb %xmm4, %xmm0, %xmm5
-; AVX1-NEXT:    vpmovsxbd %xmm5, %xmm5
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = <1,3,5,7,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX1-NEXT:    vpshufb %xmm6, %xmm0, %xmm0
-; AVX1-NEXT:    vpmovsxbd %xmm0, %xmm0
-; AVX1-NEXT:    vpshufb %xmm4, %xmm1, %xmm4
-; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero,xmm4[2],zero,zero,zero,xmm4[3],zero,zero,zero
-; AVX1-NEXT:    vpmulld %xmm4, %xmm5, %xmm4
-; AVX1-NEXT:    vpaddd %xmm4, %xmm2, %xmm2
-; AVX1-NEXT:    vpshufb %xmm6, %xmm1, %xmm1
-; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
-; AVX1-NEXT:    vpmulld %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpaddd %xmm0, %xmm3, %xmm0
-; AVX1-NEXT:    vpackssdw %xmm2, %xmm0, %xmm0
-; AVX1-NEXT:    retq
-;
-; AVX2-LABEL: pmaddubsw_128:
-; AVX2:       # %bb.0:
-; AVX2-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX2-NEXT:    vmovdqa (%rsi), %xmm1
-; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
-; AVX2-NEXT:    vpshufb %xmm2, %xmm0, %xmm3
-; AVX2-NEXT:    vpmovsxbd %xmm3, %ymm3
-; AVX2-NEXT:    vpshufb %xmm2, %xmm1, %xmm2
-; AVX2-NEXT:    vpmovzxbd {{.*#+}} ymm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero
-; AVX2-NEXT:    vpmulld %ymm2, %ymm3, %ymm2
-; AVX2-NEXT:    vmovdqa {{.*#+}} xmm3 = <1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u>
-; AVX2-NEXT:    vpshufb %xmm3, %xmm0, %xmm0
-; AVX2-NEXT:    vpmovsxbd %xmm0, %ymm0
-; AVX2-NEXT:    vpshufb %xmm3, %xmm1, %xmm1
-; AVX2-NEXT:    vpmovzxbd {{.*#+}} ymm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero
-; AVX2-NEXT:    vpmulld %ymm1, %ymm0, %ymm0
-; AVX2-NEXT:    vpaddd %ymm0, %ymm2, %ymm0
-; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT:    vpackssdw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vzeroupper
-; AVX2-NEXT:    retq
-;
-; AVX512-LABEL: pmaddubsw_128:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX512-NEXT:    vmovdqa (%rsi), %xmm1
-; AVX512-NEXT:    vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
-; AVX512-NEXT:    vpshufb %xmm2, %xmm0, %xmm3
-; AVX512-NEXT:    vpmovsxbd %xmm3, %ymm3
-; AVX512-NEXT:    vpshufb %xmm2, %xmm1, %xmm2
-; AVX512-NEXT:    vpmovzxbd {{.*#+}} ymm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero
-; AVX512-NEXT:    vpmulld %ymm2, %ymm3, %ymm2
-; AVX512-NEXT:    vmovdqa {{.*#+}} xmm3 = <1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u>
-; AVX512-NEXT:    vpshufb %xmm3, %xmm0, %xmm0
-; AVX512-NEXT:    vpmovsxbd %xmm0, %ymm0
-; AVX512-NEXT:    vpshufb %xmm3, %xmm1, %xmm1
-; AVX512-NEXT:    vpmovzxbd {{.*#+}} ymm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero
-; AVX512-NEXT:    vpmulld %ymm1, %ymm0, %ymm0
-; AVX512-NEXT:    vpaddd %ymm0, %ymm2, %ymm0
-; AVX512-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [4294934528,4294934528,4294934528,4294934528,4294934528,4294934528,4294934528,4294934528]
-; AVX512-NEXT:    vpmaxsd %ymm1, %ymm0, %ymm0
-; AVX512-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
-; AVX512-NEXT:    vpminsd %ymm1, %ymm0, %ymm0
-; AVX512-NEXT:    vpmovdw %zmm0, %ymm0
-; AVX512-NEXT:    # kill: def $xmm0 killed $xmm0 killed $ymm0
-; AVX512-NEXT:    vzeroupper
-; AVX512-NEXT:    retq
+; AVX-LABEL: pmaddubsw_128:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovdqa (%rsi), %xmm0
+; AVX-NEXT:    vpmaddubsw (%rdi), %xmm0, %xmm0
+; AVX-NEXT:    retq
   %A = load <16 x i8>, <16 x i8>* %Aptr
   %B = load <16 x i8>, <16 x i8>* %Bptr
   %A_even = shufflevector <16 x i8> %A, <16 x i8> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
@@ -140,209 +43,28 @@ define <8 x i16> @pmaddubsw_128(<16 x i8
 define <16 x i16> @pmaddubsw_256(<32 x i8>* %Aptr, <32 x i8>* %Bptr) {
 ; SSE-LABEL: pmaddubsw_256:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    movdqa (%rdi), %xmm3
-; SSE-NEXT:    movdqa 16(%rdi), %xmm10
-; SSE-NEXT:    movdqa (%rsi), %xmm1
-; SSE-NEXT:    movdqa 16(%rsi), %xmm7
-; SSE-NEXT:    movdqa {{.*#+}} xmm0 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
-; SSE-NEXT:    movdqa %xmm7, %xmm4
-; SSE-NEXT:    pshufb %xmm0, %xmm4
-; SSE-NEXT:    movdqa %xmm1, %xmm6
-; SSE-NEXT:    pshufb %xmm0, %xmm6
-; SSE-NEXT:    punpcklqdq {{.*#+}} xmm6 = xmm6[0],xmm4[0]
-; SSE-NEXT:    pxor %xmm8, %xmm8
-; SSE-NEXT:    movdqa %xmm6, %xmm4
-; SSE-NEXT:    punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm8[0],xmm4[1],xmm8[1],xmm4[2],xmm8[2],xmm4[3],xmm8[3],xmm4[4],xmm8[4],xmm4[5],xmm8[5],xmm4[6],xmm8[6],xmm4[7],xmm8[7]
-; SSE-NEXT:    movdqa %xmm10, %xmm5
-; SSE-NEXT:    pshufb %xmm0, %xmm5
-; SSE-NEXT:    movdqa %xmm3, %xmm2
-; SSE-NEXT:    pshufb %xmm0, %xmm2
-; SSE-NEXT:    punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm5[0]
-; SSE-NEXT:    punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1],xmm5[2],xmm2[2],xmm5[3],xmm2[3],xmm5[4],xmm2[4],xmm5[5],xmm2[5],xmm5[6],xmm2[6],xmm5[7],xmm2[7]
-; SSE-NEXT:    psraw $8, %xmm5
-; SSE-NEXT:    movdqa %xmm5, %xmm0
-; SSE-NEXT:    pmulhw %xmm4, %xmm0
-; SSE-NEXT:    pmullw %xmm4, %xmm5
-; SSE-NEXT:    movdqa %xmm5, %xmm9
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm0[0],xmm9[1],xmm0[1],xmm9[2],xmm0[2],xmm9[3],xmm0[3]
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
-; SSE-NEXT:    punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm8[8],xmm6[9],xmm8[9],xmm6[10],xmm8[10],xmm6[11],xmm8[11],xmm6[12],xmm8[12],xmm6[13],xmm8[13],xmm6[14],xmm8[14],xmm6[15],xmm8[15]
-; SSE-NEXT:    punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm2[8],xmm4[9],xmm2[9],xmm4[10],xmm2[10],xmm4[11],xmm2[11],xmm4[12],xmm2[12],xmm4[13],xmm2[13],xmm4[14],xmm2[14],xmm4[15],xmm2[15]
-; SSE-NEXT:    psraw $8, %xmm4
-; SSE-NEXT:    movdqa %xmm4, %xmm0
-; SSE-NEXT:    pmulhw %xmm6, %xmm0
-; SSE-NEXT:    pmullw %xmm6, %xmm4
-; SSE-NEXT:    movdqa %xmm4, %xmm11
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm0[0],xmm11[1],xmm0[1],xmm11[2],xmm0[2],xmm11[3],xmm0[3]
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
-; SSE-NEXT:    movdqa {{.*#+}} xmm0 = <1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u>
-; SSE-NEXT:    pshufb %xmm0, %xmm7
-; SSE-NEXT:    pshufb %xmm0, %xmm1
-; SSE-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm7[0]
-; SSE-NEXT:    movdqa %xmm1, %xmm2
-; SSE-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm8[0],xmm2[1],xmm8[1],xmm2[2],xmm8[2],xmm2[3],xmm8[3],xmm2[4],xmm8[4],xmm2[5],xmm8[5],xmm2[6],xmm8[6],xmm2[7],xmm8[7]
-; SSE-NEXT:    pshufb %xmm0, %xmm10
-; SSE-NEXT:    pshufb %xmm0, %xmm3
-; SSE-NEXT:    punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm10[0]
-; SSE-NEXT:    punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm3[0],xmm7[1],xmm3[1],xmm7[2],xmm3[2],xmm7[3],xmm3[3],xmm7[4],xmm3[4],xmm7[5],xmm3[5],xmm7[6],xmm3[6],xmm7[7],xmm3[7]
-; SSE-NEXT:    psraw $8, %xmm7
-; SSE-NEXT:    movdqa %xmm7, %xmm6
-; SSE-NEXT:    pmulhw %xmm2, %xmm6
-; SSE-NEXT:    pmullw %xmm2, %xmm7
-; SSE-NEXT:    movdqa %xmm7, %xmm0
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3]
-; SSE-NEXT:    paddd %xmm9, %xmm0
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7]
-; SSE-NEXT:    paddd %xmm5, %xmm7
-; SSE-NEXT:    packssdw %xmm7, %xmm0
-; SSE-NEXT:    punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm8[8],xmm1[9],xmm8[9],xmm1[10],xmm8[10],xmm1[11],xmm8[11],xmm1[12],xmm8[12],xmm1[13],xmm8[13],xmm1[14],xmm8[14],xmm1[15],xmm8[15]
-; SSE-NEXT:    punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15]
-; SSE-NEXT:    psraw $8, %xmm2
-; SSE-NEXT:    movdqa %xmm2, %xmm3
-; SSE-NEXT:    pmulhw %xmm1, %xmm3
-; SSE-NEXT:    pmullw %xmm1, %xmm2
-; SSE-NEXT:    movdqa %xmm2, %xmm1
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
-; SSE-NEXT:    paddd %xmm11, %xmm1
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
-; SSE-NEXT:    paddd %xmm4, %xmm2
-; SSE-NEXT:    packssdw %xmm2, %xmm1
+; SSE-NEXT:    movdqa (%rsi), %xmm0
+; SSE-NEXT:    movdqa 16(%rsi), %xmm1
+; SSE-NEXT:    pmaddubsw (%rdi), %xmm0
+; SSE-NEXT:    pmaddubsw 16(%rdi), %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: pmaddubsw_256:
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX1-NEXT:    vmovdqa (%rsi), %ymm1
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT:    vpshufb %xmm2, %xmm3, %xmm4
-; AVX1-NEXT:    vpshufb %xmm2, %xmm0, %xmm5
-; AVX1-NEXT:    vpunpcklqdq {{.*#+}} xmm4 = xmm5[0],xmm4[0]
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = <1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u>
-; AVX1-NEXT:    vpshufb %xmm5, %xmm3, %xmm3
-; AVX1-NEXT:    vpshufb %xmm5, %xmm0, %xmm0
-; AVX1-NEXT:    vpunpcklqdq {{.*#+}} xmm3 = xmm0[0],xmm3[0]
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm0
-; AVX1-NEXT:    vpshufb %xmm2, %xmm0, %xmm6
-; AVX1-NEXT:    vpshufb %xmm2, %xmm1, %xmm2
-; AVX1-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm6[0]
-; AVX1-NEXT:    vpshufb %xmm5, %xmm0, %xmm0
-; AVX1-NEXT:    vpshufb %xmm5, %xmm1, %xmm1
-; AVX1-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX1-NEXT:    vpmovsxbd %xmm4, %xmm1
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm4[1,1,2,3]
-; AVX1-NEXT:    vpmovsxbd %xmm5, %xmm5
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm4[3,1,2,3]
-; AVX1-NEXT:    vpmovsxbd %xmm6, %xmm6
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[2,3,0,1]
-; AVX1-NEXT:    vpmovsxbd %xmm4, %xmm4
-; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm7 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
-; AVX1-NEXT:    vpmulld %xmm7, %xmm1, %xmm1
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm2[1,1,2,3]
-; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero
-; AVX1-NEXT:    vpmulld %xmm7, %xmm5, %xmm5
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm2[3,1,2,3]
-; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero
-; AVX1-NEXT:    vpmulld %xmm7, %xmm6, %xmm9
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
-; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
-; AVX1-NEXT:    vpmulld %xmm2, %xmm4, %xmm8
-; AVX1-NEXT:    vpmovsxbd %xmm3, %xmm4
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm3[1,1,2,3]
-; AVX1-NEXT:    vpmovsxbd %xmm7, %xmm7
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm3[3,1,2,3]
-; AVX1-NEXT:    vpmovsxbd %xmm2, %xmm2
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[2,3,0,1]
-; AVX1-NEXT:    vpmovsxbd %xmm3, %xmm3
-; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm6 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; AVX1-NEXT:    vpmulld %xmm6, %xmm4, %xmm4
-; AVX1-NEXT:    vpaddd %xmm4, %xmm1, %xmm1
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm0[1,1,2,3]
-; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero,xmm4[2],zero,zero,zero,xmm4[3],zero,zero,zero
-; AVX1-NEXT:    vpmulld %xmm4, %xmm7, %xmm4
-; AVX1-NEXT:    vpaddd %xmm4, %xmm5, %xmm4
-; AVX1-NEXT:    vpackssdw %xmm4, %xmm1, %xmm1
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm0[3,1,2,3]
-; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero,xmm4[2],zero,zero,zero,xmm4[3],zero,zero,zero
-; AVX1-NEXT:    vpmulld %xmm4, %xmm2, %xmm2
-; AVX1-NEXT:    vpaddd %xmm2, %xmm9, %xmm2
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; AVX1-NEXT:    vpmulld %xmm0, %xmm3, %xmm0
-; AVX1-NEXT:    vpaddd %xmm0, %xmm8, %xmm0
-; AVX1-NEXT:    vpackssdw %xmm2, %xmm0, %xmm0
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT:    vpmaddubsw %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpmaddubsw %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;
-; AVX2-LABEL: pmaddubsw_256:
-; AVX2:       # %bb.0:
-; AVX2-NEXT:    vmovdqa (%rdi), %ymm0
-; AVX2-NEXT:    vmovdqa (%rsi), %ymm1
-; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
-; AVX2-NEXT:    vpshufb %xmm2, %xmm0, %xmm3
-; AVX2-NEXT:    vpmovsxbd %xmm3, %ymm3
-; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm4
-; AVX2-NEXT:    vpshufb %xmm2, %xmm4, %xmm2
-; AVX2-NEXT:    vpmovsxbd %xmm2, %ymm2
-; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm5 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX2-NEXT:    vmovdqa {{.*#+}} ymm6 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
-; AVX2-NEXT:    vpand %ymm6, %ymm5, %ymm5
-; AVX2-NEXT:    vpmulld %ymm5, %ymm3, %ymm3
-; AVX2-NEXT:    vextracti128 $1, %ymm1, %xmm5
-; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm7 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
-; AVX2-NEXT:    vpand %ymm6, %ymm7, %ymm7
-; AVX2-NEXT:    vpmulld %ymm7, %ymm2, %ymm2
-; AVX2-NEXT:    vmovdqa {{.*#+}} xmm7 = <1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u>
-; AVX2-NEXT:    vpshufb %xmm7, %xmm0, %xmm0
-; AVX2-NEXT:    vpmovsxbd %xmm0, %ymm0
-; AVX2-NEXT:    vpshufb %xmm7, %xmm4, %xmm4
-; AVX2-NEXT:    vpmovsxbd %xmm4, %ymm4
-; AVX2-NEXT:    vpsrldq {{.*#+}} xmm1 = xmm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero
-; AVX2-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX2-NEXT:    vpand %ymm6, %ymm1, %ymm1
-; AVX2-NEXT:    vpmulld %ymm1, %ymm0, %ymm0
-; AVX2-NEXT:    vpaddd %ymm0, %ymm3, %ymm0
-; AVX2-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm5[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX2-NEXT:    vpsrld $16, %xmm1, %xmm1
-; AVX2-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX2-NEXT:    vpsrld $16, %xmm3, %xmm3
-; AVX2-NEXT:    vinserti128 $1, %xmm3, %ymm1, %ymm1
-; AVX2-NEXT:    vpand %ymm6, %ymm1, %ymm1
-; AVX2-NEXT:    vpmulld %ymm1, %ymm4, %ymm1
-; AVX2-NEXT:    vpaddd %ymm1, %ymm2, %ymm1
-; AVX2-NEXT:    vpackssdw %ymm1, %ymm0, %ymm0
-; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
-; AVX2-NEXT:    retq
-;
-; AVX512-LABEL: pmaddubsw_256:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vmovdqa (%rdi), %ymm0
-; AVX512-NEXT:    vmovdqa (%rsi), %ymm1
-; AVX512-NEXT:    vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
-; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm3
-; AVX512-NEXT:    vpshufb %xmm2, %xmm3, %xmm4
-; AVX512-NEXT:    vpshufb %xmm2, %xmm0, %xmm5
-; AVX512-NEXT:    vpunpcklqdq {{.*#+}} xmm4 = xmm5[0],xmm4[0]
-; AVX512-NEXT:    vmovdqa {{.*#+}} xmm5 = <1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u>
-; AVX512-NEXT:    vpshufb %xmm5, %xmm3, %xmm3
-; AVX512-NEXT:    vpshufb %xmm5, %xmm0, %xmm0
-; AVX512-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0]
-; AVX512-NEXT:    vextracti128 $1, %ymm1, %xmm3
-; AVX512-NEXT:    vpshufb %xmm2, %xmm3, %xmm6
-; AVX512-NEXT:    vpshufb %xmm2, %xmm1, %xmm2
-; AVX512-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm6[0]
-; AVX512-NEXT:    vpshufb %xmm5, %xmm3, %xmm3
-; AVX512-NEXT:    vpshufb %xmm5, %xmm1, %xmm1
-; AVX512-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
-; AVX512-NEXT:    vpmovsxbd %xmm4, %zmm3
-; AVX512-NEXT:    vpmovzxbd {{.*#+}} zmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero
-; AVX512-NEXT:    vpmulld %zmm2, %zmm3, %zmm2
-; AVX512-NEXT:    vpmovsxbd %xmm0, %zmm0
-; AVX512-NEXT:    vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
-; AVX512-NEXT:    vpmulld %zmm1, %zmm0, %zmm0
-; AVX512-NEXT:    vpaddd %zmm0, %zmm2, %zmm0
-; AVX512-NEXT:    vpmovsdw %zmm0, %ymm0
-; AVX512-NEXT:    retq
+; AVX256-LABEL: pmaddubsw_256:
+; AVX256:       # %bb.0:
+; AVX256-NEXT:    vmovdqa (%rsi), %ymm0
+; AVX256-NEXT:    vpmaddubsw (%rdi), %ymm0, %ymm0
+; AVX256-NEXT:    retq
   %A = load <32 x i8>, <32 x i8>* %Aptr
   %B = load <32 x i8>, <32 x i8>* %Bptr
   %A_even = shufflevector <32 x i8> %A, <32 x i8> undef, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
@@ -367,1205 +89,95 @@ define <16 x i16> @pmaddubsw_256(<32 x i
 define <64 x i16> @pmaddubsw_512(<128 x i8>* %Aptr, <128 x i8>* %Bptr) {
 ; SSE-LABEL: pmaddubsw_512:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    subq $232, %rsp
-; SSE-NEXT:    .cfi_def_cfa_offset 240
-; SSE-NEXT:    movdqa (%rsi), %xmm4
-; SSE-NEXT:    movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movdqa 16(%rsi), %xmm3
-; SSE-NEXT:    movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movdqa 32(%rsi), %xmm9
-; SSE-NEXT:    movdqa %xmm9, (%rsp) # 16-byte Spill
-; SSE-NEXT:    movdqa 48(%rsi), %xmm10
-; SSE-NEXT:    movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movdqa (%rdx), %xmm13
-; SSE-NEXT:    movdqa 16(%rdx), %xmm0
-; SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movdqa 32(%rdx), %xmm5
-; SSE-NEXT:    movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT:    movdqa 112(%rdx), %xmm0
+; SSE-NEXT:    movdqa 96(%rdx), %xmm1
+; SSE-NEXT:    movdqa 80(%rdx), %xmm2
+; SSE-NEXT:    movdqa 64(%rdx), %xmm3
+; SSE-NEXT:    movdqa (%rdx), %xmm4
+; SSE-NEXT:    movdqa 16(%rdx), %xmm5
+; SSE-NEXT:    movdqa 32(%rdx), %xmm6
 ; SSE-NEXT:    movdqa 48(%rdx), %xmm7
-; SSE-NEXT:    movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    movdqa {{.*#+}} xmm8 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
-; SSE-NEXT:    pshufb %xmm8, %xmm0
-; SSE-NEXT:    movdqa %xmm13, %xmm2
-; SSE-NEXT:    pshufb %xmm8, %xmm2
-; SSE-NEXT:    punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm0[0]
-; SSE-NEXT:    pxor %xmm0, %xmm0
-; SSE-NEXT:    movdqa %xmm2, %xmm1
-; SSE-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE-NEXT:    pxor %xmm11, %xmm11
-; SSE-NEXT:    movdqa %xmm3, %xmm0
-; SSE-NEXT:    pshufb %xmm8, %xmm0
-; SSE-NEXT:    movdqa %xmm4, %xmm3
-; SSE-NEXT:    pshufb %xmm8, %xmm3
-; SSE-NEXT:    punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0]
-; SSE-NEXT:    punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm3[0],xmm6[1],xmm3[1],xmm6[2],xmm3[2],xmm6[3],xmm3[3],xmm6[4],xmm3[4],xmm6[5],xmm3[5],xmm6[6],xmm3[6],xmm6[7],xmm3[7]
-; SSE-NEXT:    psraw $8, %xmm6
-; SSE-NEXT:    movdqa %xmm6, %xmm4
-; SSE-NEXT:    pmulhw %xmm1, %xmm4
-; SSE-NEXT:    pmullw %xmm1, %xmm6
-; SSE-NEXT:    movdqa %xmm6, %xmm0
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
-; SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7]
-; SSE-NEXT:    punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm11[8],xmm2[9],xmm11[9],xmm2[10],xmm11[10],xmm2[11],xmm11[11],xmm2[12],xmm11[12],xmm2[13],xmm11[13],xmm2[14],xmm11[14],xmm2[15],xmm11[15]
-; SSE-NEXT:    punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm3[8],xmm1[9],xmm3[9],xmm1[10],xmm3[10],xmm1[11],xmm3[11],xmm1[12],xmm3[12],xmm1[13],xmm3[13],xmm1[14],xmm3[14],xmm1[15],xmm3[15]
-; SSE-NEXT:    psraw $8, %xmm1
-; SSE-NEXT:    movdqa %xmm1, %xmm3
-; SSE-NEXT:    pmulhw %xmm2, %xmm3
-; SSE-NEXT:    pmullw %xmm2, %xmm1
-; SSE-NEXT:    movdqa %xmm1, %xmm0
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
-; SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
-; SSE-NEXT:    movdqa %xmm7, %xmm2
-; SSE-NEXT:    pshufb %xmm8, %xmm2
-; SSE-NEXT:    movdqa %xmm5, %xmm4
-; SSE-NEXT:    pshufb %xmm8, %xmm4
-; SSE-NEXT:    punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm2[0]
-; SSE-NEXT:    movdqa %xmm10, %xmm2
-; SSE-NEXT:    pshufb %xmm8, %xmm2
-; SSE-NEXT:    movdqa %xmm9, %xmm3
-; SSE-NEXT:    pshufb %xmm8, %xmm3
-; SSE-NEXT:    punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm2[0]
-; SSE-NEXT:    movdqa %xmm4, %xmm5
-; SSE-NEXT:    punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm11[0],xmm5[1],xmm11[1],xmm5[2],xmm11[2],xmm5[3],xmm11[3],xmm5[4],xmm11[4],xmm5[5],xmm11[5],xmm5[6],xmm11[6],xmm5[7],xmm11[7]
-; SSE-NEXT:    punpcklbw {{.*#+}} xmm14 = xmm14[0],xmm3[0],xmm14[1],xmm3[1],xmm14[2],xmm3[2],xmm14[3],xmm3[3],xmm14[4],xmm3[4],xmm14[5],xmm3[5],xmm14[6],xmm3[6],xmm14[7],xmm3[7]
-; SSE-NEXT:    psraw $8, %xmm14
-; SSE-NEXT:    movdqa %xmm14, %xmm7
-; SSE-NEXT:    pmulhw %xmm5, %xmm7
-; SSE-NEXT:    pmullw %xmm5, %xmm14
-; SSE-NEXT:    movdqa %xmm14, %xmm0
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3]
-; SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm14 = xmm14[4],xmm7[4],xmm14[5],xmm7[5],xmm14[6],xmm7[6],xmm14[7],xmm7[7]
-; SSE-NEXT:    punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm11[8],xmm4[9],xmm11[9],xmm4[10],xmm11[10],xmm4[11],xmm11[11],xmm4[12],xmm11[12],xmm4[13],xmm11[13],xmm4[14],xmm11[14],xmm4[15],xmm11[15]
-; SSE-NEXT:    punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSE-NEXT:    psraw $8, %xmm3
-; SSE-NEXT:    movdqa %xmm3, %xmm5
-; SSE-NEXT:    pmulhw %xmm4, %xmm5
-; SSE-NEXT:    pmullw %xmm4, %xmm3
-; SSE-NEXT:    movdqa %xmm3, %xmm0
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3]
-; SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
-; SSE-NEXT:    movdqa 80(%rdx), %xmm4
-; SSE-NEXT:    movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    pshufb %xmm8, %xmm4
-; SSE-NEXT:    movdqa 64(%rdx), %xmm15
-; SSE-NEXT:    movdqa %xmm15, %xmm9
-; SSE-NEXT:    pshufb %xmm8, %xmm9
-; SSE-NEXT:    punpcklqdq {{.*#+}} xmm9 = xmm9[0],xmm4[0]
-; SSE-NEXT:    movdqa 80(%rsi), %xmm4
-; SSE-NEXT:    movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    pshufb %xmm8, %xmm4
-; SSE-NEXT:    movdqa 64(%rsi), %xmm7
-; SSE-NEXT:    movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    pshufb %xmm8, %xmm7
-; SSE-NEXT:    punpcklqdq {{.*#+}} xmm7 = xmm7[0],xmm4[0]
-; SSE-NEXT:    movdqa %xmm9, %xmm10
-; SSE-NEXT:    punpcklbw {{.*#+}} xmm10 = xmm10[0],xmm11[0],xmm10[1],xmm11[1],xmm10[2],xmm11[2],xmm10[3],xmm11[3],xmm10[4],xmm11[4],xmm10[5],xmm11[5],xmm10[6],xmm11[6],xmm10[7],xmm11[7]
-; SSE-NEXT:    punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm7[0],xmm4[1],xmm7[1],xmm4[2],xmm7[2],xmm4[3],xmm7[3],xmm4[4],xmm7[4],xmm4[5],xmm7[5],xmm4[6],xmm7[6],xmm4[7],xmm7[7]
-; SSE-NEXT:    psraw $8, %xmm4
-; SSE-NEXT:    movdqa %xmm4, %xmm11
-; SSE-NEXT:    pmulhw %xmm10, %xmm11
-; SSE-NEXT:    pmullw %xmm10, %xmm4
-; SSE-NEXT:    movdqa %xmm4, %xmm0
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm11[0],xmm0[1],xmm11[1],xmm0[2],xmm11[2],xmm0[3],xmm11[3]
-; SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm11[4],xmm4[5],xmm11[5],xmm4[6],xmm11[6],xmm4[7],xmm11[7]
-; SSE-NEXT:    pxor %xmm0, %xmm0
-; SSE-NEXT:    punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm0[8],xmm9[9],xmm0[9],xmm9[10],xmm0[10],xmm9[11],xmm0[11],xmm9[12],xmm0[12],xmm9[13],xmm0[13],xmm9[14],xmm0[14],xmm9[15],xmm0[15]
-; SSE-NEXT:    punpckhbw {{.*#+}} xmm7 = xmm7[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSE-NEXT:    psraw $8, %xmm7
-; SSE-NEXT:    movdqa %xmm7, %xmm10
-; SSE-NEXT:    pmulhw %xmm9, %xmm10
-; SSE-NEXT:    pmullw %xmm9, %xmm7
-; SSE-NEXT:    movdqa %xmm7, %xmm2
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm10[0],xmm2[1],xmm10[1],xmm2[2],xmm10[2],xmm2[3],xmm10[3]
-; SSE-NEXT:    movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm10[4],xmm7[5],xmm10[5],xmm7[6],xmm10[6],xmm7[7],xmm10[7]
-; SSE-NEXT:    movdqa 112(%rdx), %xmm9
-; SSE-NEXT:    movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    pshufb %xmm8, %xmm9
-; SSE-NEXT:    movdqa 96(%rdx), %xmm10
-; SSE-NEXT:    movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    pshufb %xmm8, %xmm10
-; SSE-NEXT:    punpcklqdq {{.*#+}} xmm10 = xmm10[0],xmm9[0]
-; SSE-NEXT:    movdqa 112(%rsi), %xmm9
-; SSE-NEXT:    movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    pshufb %xmm8, %xmm9
-; SSE-NEXT:    movdqa 96(%rsi), %xmm12
-; SSE-NEXT:    movdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    pshufb %xmm8, %xmm12
-; SSE-NEXT:    punpcklqdq {{.*#+}} xmm12 = xmm12[0],xmm9[0]
-; SSE-NEXT:    movdqa %xmm10, %xmm8
-; SSE-NEXT:    punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm0[0],xmm8[1],xmm0[1],xmm8[2],xmm0[2],xmm8[3],xmm0[3],xmm8[4],xmm0[4],xmm8[5],xmm0[5],xmm8[6],xmm0[6],xmm8[7],xmm0[7]
-; SSE-NEXT:    pxor %xmm0, %xmm0
-; SSE-NEXT:    punpcklbw {{.*#+}} xmm11 = xmm11[0],xmm12[0],xmm11[1],xmm12[1],xmm11[2],xmm12[2],xmm11[3],xmm12[3],xmm11[4],xmm12[4],xmm11[5],xmm12[5],xmm11[6],xmm12[6],xmm11[7],xmm12[7]
-; SSE-NEXT:    psraw $8, %xmm11
-; SSE-NEXT:    movdqa %xmm11, %xmm9
-; SSE-NEXT:    pmulhw %xmm8, %xmm9
-; SSE-NEXT:    pmullw %xmm8, %xmm11
-; SSE-NEXT:    movdqa %xmm11, %xmm2
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm9[0],xmm2[1],xmm9[1],xmm2[2],xmm9[2],xmm2[3],xmm9[3]
-; SSE-NEXT:    movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm9[4],xmm11[5],xmm9[5],xmm11[6],xmm9[6],xmm11[7],xmm9[7]
-; SSE-NEXT:    punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm0[8],xmm10[9],xmm0[9],xmm10[10],xmm0[10],xmm10[11],xmm0[11],xmm10[12],xmm0[12],xmm10[13],xmm0[13],xmm10[14],xmm0[14],xmm10[15],xmm0[15]
-; SSE-NEXT:    pxor %xmm5, %xmm5
-; SSE-NEXT:    punpckhbw {{.*#+}} xmm12 = xmm12[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSE-NEXT:    psraw $8, %xmm12
-; SSE-NEXT:    movdqa %xmm12, %xmm8
-; SSE-NEXT:    pmulhw %xmm10, %xmm8
-; SSE-NEXT:    pmullw %xmm10, %xmm12
-; SSE-NEXT:    movdqa %xmm12, %xmm0
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1],xmm0[2],xmm8[2],xmm0[3],xmm8[3]
-; SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm8[4],xmm12[5],xmm8[5],xmm12[6],xmm8[6],xmm12[7],xmm8[7]
-; SSE-NEXT:    movdqa {{.*#+}} xmm8 = <1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u>
-; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT:    pshufb %xmm8, %xmm0
-; SSE-NEXT:    pshufb %xmm8, %xmm13
-; SSE-NEXT:    punpcklqdq {{.*#+}} xmm13 = xmm13[0],xmm0[0]
-; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; SSE-NEXT:    pshufb %xmm8, %xmm2
-; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT:    pshufb %xmm8, %xmm0
-; SSE-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
-; SSE-NEXT:    movdqa %xmm13, %xmm9
-; SSE-NEXT:    punpcklbw {{.*#+}} xmm9 = xmm9[0],xmm5[0],xmm9[1],xmm5[1],xmm9[2],xmm5[2],xmm9[3],xmm5[3],xmm9[4],xmm5[4],xmm9[5],xmm5[5],xmm9[6],xmm5[6],xmm9[7],xmm5[7]
-; SSE-NEXT:    punpcklbw {{.*#+}} xmm10 = xmm10[0],xmm0[0],xmm10[1],xmm0[1],xmm10[2],xmm0[2],xmm10[3],xmm0[3],xmm10[4],xmm0[4],xmm10[5],xmm0[5],xmm10[6],xmm0[6],xmm10[7],xmm0[7]
-; SSE-NEXT:    movdqa %xmm0, %xmm2
-; SSE-NEXT:    psraw $8, %xmm10
-; SSE-NEXT:    movdqa %xmm10, %xmm0
-; SSE-NEXT:    pmulhw %xmm9, %xmm0
-; SSE-NEXT:    pmullw %xmm9, %xmm10
-; SSE-NEXT:    movdqa %xmm10, %xmm9
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm0[0],xmm9[1],xmm0[1],xmm9[2],xmm0[2],xmm9[3],xmm0[3]
-; SSE-NEXT:    paddd {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm0[4],xmm10[5],xmm0[5],xmm10[6],xmm0[6],xmm10[7],xmm0[7]
-; SSE-NEXT:    paddd %xmm6, %xmm10
-; SSE-NEXT:    packssdw %xmm10, %xmm9
-; SSE-NEXT:    punpckhbw {{.*#+}} xmm13 = xmm13[8],xmm5[8],xmm13[9],xmm5[9],xmm13[10],xmm5[10],xmm13[11],xmm5[11],xmm13[12],xmm5[12],xmm13[13],xmm5[13],xmm13[14],xmm5[14],xmm13[15],xmm5[15]
-; SSE-NEXT:    pxor %xmm5, %xmm5
-; SSE-NEXT:    punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm2[8],xmm6[9],xmm2[9],xmm6[10],xmm2[10],xmm6[11],xmm2[11],xmm6[12],xmm2[12],xmm6[13],xmm2[13],xmm6[14],xmm2[14],xmm6[15],xmm2[15]
-; SSE-NEXT:    psraw $8, %xmm6
-; SSE-NEXT:    movdqa %xmm6, %xmm0
-; SSE-NEXT:    pmulhw %xmm13, %xmm0
-; SSE-NEXT:    pmullw %xmm13, %xmm6
-; SSE-NEXT:    movdqa %xmm6, %xmm10
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm0[0],xmm10[1],xmm0[1],xmm10[2],xmm0[2],xmm10[3],xmm0[3]
-; SSE-NEXT:    paddd {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7]
-; SSE-NEXT:    paddd %xmm1, %xmm6
-; SSE-NEXT:    packssdw %xmm6, %xmm10
-; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE-NEXT:    pshufb %xmm8, %xmm1
-; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; SSE-NEXT:    pshufb %xmm8, %xmm2
-; SSE-NEXT:    punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
-; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT:    pshufb %xmm8, %xmm0
-; SSE-NEXT:    movdqa (%rsp), %xmm1 # 16-byte Reload
-; SSE-NEXT:    pshufb %xmm8, %xmm1
-; SSE-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
-; SSE-NEXT:    movdqa %xmm2, %xmm0
-; SSE-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3],xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7]
-; SSE-NEXT:    punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm1[0],xmm6[1],xmm1[1],xmm6[2],xmm1[2],xmm6[3],xmm1[3],xmm6[4],xmm1[4],xmm6[5],xmm1[5],xmm6[6],xmm1[6],xmm6[7],xmm1[7]
-; SSE-NEXT:    movdqa %xmm1, %xmm5
-; SSE-NEXT:    psraw $8, %xmm6
-; SSE-NEXT:    movdqa %xmm6, %xmm1
-; SSE-NEXT:    pmulhw %xmm0, %xmm1
-; SSE-NEXT:    pmullw %xmm0, %xmm6
-; SSE-NEXT:    movdqa %xmm6, %xmm13
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm13 = xmm13[0],xmm1[0],xmm13[1],xmm1[1],xmm13[2],xmm1[2],xmm13[3],xmm1[3]
-; SSE-NEXT:    paddd {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Folded Reload
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm1[4],xmm6[5],xmm1[5],xmm6[6],xmm1[6],xmm6[7],xmm1[7]
-; SSE-NEXT:    paddd %xmm14, %xmm6
-; SSE-NEXT:    packssdw %xmm6, %xmm13
-; SSE-NEXT:    pxor %xmm14, %xmm14
-; SSE-NEXT:    punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm14[8],xmm2[9],xmm14[9],xmm2[10],xmm14[10],xmm2[11],xmm14[11],xmm2[12],xmm14[12],xmm2[13],xmm14[13],xmm2[14],xmm14[14],xmm2[15],xmm14[15]
-; SSE-NEXT:    punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm5[8],xmm1[9],xmm5[9],xmm1[10],xmm5[10],xmm1[11],xmm5[11],xmm1[12],xmm5[12],xmm1[13],xmm5[13],xmm1[14],xmm5[14],xmm1[15],xmm5[15]
-; SSE-NEXT:    psraw $8, %xmm1
-; SSE-NEXT:    movdqa %xmm1, %xmm6
-; SSE-NEXT:    pmulhw %xmm2, %xmm6
-; SSE-NEXT:    pmullw %xmm2, %xmm1
-; SSE-NEXT:    movdqa %xmm1, %xmm2
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1],xmm2[2],xmm6[2],xmm2[3],xmm6[3]
-; SSE-NEXT:    paddd {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm6[4],xmm1[5],xmm6[5],xmm1[6],xmm6[6],xmm1[7],xmm6[7]
-; SSE-NEXT:    paddd %xmm3, %xmm1
-; SSE-NEXT:    packssdw %xmm1, %xmm2
-; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT:    pshufb %xmm8, %xmm0
-; SSE-NEXT:    pshufb %xmm8, %xmm15
-; SSE-NEXT:    punpcklqdq {{.*#+}} xmm15 = xmm15[0],xmm0[0]
-; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE-NEXT:    pshufb %xmm8, %xmm1
-; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT:    pshufb %xmm8, %xmm0
-; SSE-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSE-NEXT:    movdqa %xmm15, %xmm1
-; SSE-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm14[0],xmm1[1],xmm14[1],xmm1[2],xmm14[2],xmm1[3],xmm14[3],xmm1[4],xmm14[4],xmm1[5],xmm14[5],xmm1[6],xmm14[6],xmm1[7],xmm14[7]
-; SSE-NEXT:    punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3],xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7]
-; SSE-NEXT:    movdqa %xmm0, %xmm5
-; SSE-NEXT:    psraw $8, %xmm6
-; SSE-NEXT:    movdqa %xmm6, %xmm0
-; SSE-NEXT:    pmulhw %xmm1, %xmm0
-; SSE-NEXT:    pmullw %xmm1, %xmm6
-; SSE-NEXT:    movdqa %xmm6, %xmm3
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
-; SSE-NEXT:    paddd {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7]
-; SSE-NEXT:    paddd %xmm4, %xmm6
-; SSE-NEXT:    packssdw %xmm6, %xmm3
-; SSE-NEXT:    punpckhbw {{.*#+}} xmm15 = xmm15[8],xmm14[8],xmm15[9],xmm14[9],xmm15[10],xmm14[10],xmm15[11],xmm14[11],xmm15[12],xmm14[12],xmm15[13],xmm14[13],xmm15[14],xmm14[14],xmm15[15],xmm14[15]
-; SSE-NEXT:    punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm5[8],xmm0[9],xmm5[9],xmm0[10],xmm5[10],xmm0[11],xmm5[11],xmm0[12],xmm5[12],xmm0[13],xmm5[13],xmm0[14],xmm5[14],xmm0[15],xmm5[15]
-; SSE-NEXT:    psraw $8, %xmm0
-; SSE-NEXT:    movdqa %xmm0, %xmm1
-; SSE-NEXT:    pmulhw %xmm15, %xmm1
-; SSE-NEXT:    pmullw %xmm15, %xmm0
-; SSE-NEXT:    movdqa %xmm0, %xmm4
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3]
-; SSE-NEXT:    paddd {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSE-NEXT:    paddd %xmm7, %xmm0
-; SSE-NEXT:    packssdw %xmm0, %xmm4
-; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT:    pshufb %xmm8, %xmm0
-; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE-NEXT:    pshufb %xmm8, %xmm1
-; SSE-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
-; SSE-NEXT:    movdqa %xmm1, %xmm0
-; SSE-NEXT:    movdqa %xmm1, %xmm6
-; SSE-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1],xmm0[2],xmm14[2],xmm0[3],xmm14[3],xmm0[4],xmm14[4],xmm0[5],xmm14[5],xmm0[6],xmm14[6],xmm0[7],xmm14[7]
-; SSE-NEXT:    pxor %xmm14, %xmm14
-; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE-NEXT:    pshufb %xmm8, %xmm1
-; SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; SSE-NEXT:    pshufb %xmm8, %xmm5
-; SSE-NEXT:    punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm1[0]
-; SSE-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3],xmm1[4],xmm5[4],xmm1[5],xmm5[5],xmm1[6],xmm5[6],xmm1[7],xmm5[7]
-; SSE-NEXT:    movdqa %xmm5, %xmm7
-; SSE-NEXT:    psraw $8, %xmm1
-; SSE-NEXT:    movdqa %xmm1, %xmm5
-; SSE-NEXT:    pmulhw %xmm0, %xmm5
-; SSE-NEXT:    pmullw %xmm0, %xmm1
-; SSE-NEXT:    movdqa %xmm1, %xmm0
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3]
-; SSE-NEXT:    paddd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm5[4],xmm1[5],xmm5[5],xmm1[6],xmm5[6],xmm1[7],xmm5[7]
-; SSE-NEXT:    paddd %xmm11, %xmm1
-; SSE-NEXT:    packssdw %xmm1, %xmm0
-; SSE-NEXT:    punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm14[8],xmm6[9],xmm14[9],xmm6[10],xmm14[10],xmm6[11],xmm14[11],xmm6[12],xmm14[12],xmm6[13],xmm14[13],xmm6[14],xmm14[14],xmm6[15],xmm14[15]
-; SSE-NEXT:    punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm7[8],xmm1[9],xmm7[9],xmm1[10],xmm7[10],xmm1[11],xmm7[11],xmm1[12],xmm7[12],xmm1[13],xmm7[13],xmm1[14],xmm7[14],xmm1[15],xmm7[15]
-; SSE-NEXT:    psraw $8, %xmm1
-; SSE-NEXT:    movdqa %xmm1, %xmm5
-; SSE-NEXT:    pmulhw %xmm6, %xmm5
-; SSE-NEXT:    pmullw %xmm6, %xmm1
-; SSE-NEXT:    movdqa %xmm1, %xmm6
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
-; SSE-NEXT:    paddd {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm5[4],xmm1[5],xmm5[5],xmm1[6],xmm5[6],xmm1[7],xmm5[7]
-; SSE-NEXT:    paddd %xmm12, %xmm1
-; SSE-NEXT:    packssdw %xmm1, %xmm6
-; SSE-NEXT:    movdqa %xmm6, 112(%rdi)
-; SSE-NEXT:    movdqa %xmm0, 96(%rdi)
-; SSE-NEXT:    movdqa %xmm4, 80(%rdi)
+; SSE-NEXT:    pmaddubsw (%rsi), %xmm4
+; SSE-NEXT:    pmaddubsw 16(%rsi), %xmm5
+; SSE-NEXT:    pmaddubsw 32(%rsi), %xmm6
+; SSE-NEXT:    pmaddubsw 48(%rsi), %xmm7
+; SSE-NEXT:    pmaddubsw 64(%rsi), %xmm3
+; SSE-NEXT:    pmaddubsw 80(%rsi), %xmm2
+; SSE-NEXT:    pmaddubsw 96(%rsi), %xmm1
+; SSE-NEXT:    pmaddubsw 112(%rsi), %xmm0
+; SSE-NEXT:    movdqa %xmm0, 112(%rdi)
+; SSE-NEXT:    movdqa %xmm1, 96(%rdi)
+; SSE-NEXT:    movdqa %xmm2, 80(%rdi)
 ; SSE-NEXT:    movdqa %xmm3, 64(%rdi)
-; SSE-NEXT:    movdqa %xmm2, 48(%rdi)
-; SSE-NEXT:    movdqa %xmm13, 32(%rdi)
-; SSE-NEXT:    movdqa %xmm10, 16(%rdi)
-; SSE-NEXT:    movdqa %xmm9, (%rdi)
+; SSE-NEXT:    movdqa %xmm7, 48(%rdi)
+; SSE-NEXT:    movdqa %xmm6, 32(%rdi)
+; SSE-NEXT:    movdqa %xmm5, 16(%rdi)
+; SSE-NEXT:    movdqa %xmm4, (%rdi)
 ; SSE-NEXT:    movq %rdi, %rax
-; SSE-NEXT:    addq $232, %rsp
-; SSE-NEXT:    .cfi_def_cfa_offset 8
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: pmaddubsw_512:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    subq $312, %rsp # imm = 0x138
-; AVX1-NEXT:    .cfi_def_cfa_offset 320
-; AVX1-NEXT:    vmovdqa 64(%rdi), %ymm4
-; AVX1-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vmovdqa 96(%rdi), %ymm5
-; AVX1-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vmovdqa 64(%rsi), %ymm10
-; AVX1-NEXT:    vmovdqa 96(%rsi), %ymm14
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = <8,10,12,14,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX1-NEXT:    vpshufb %xmm1, %xmm5, %xmm0
-; AVX1-NEXT:    vpmovsxbd %xmm0, %xmm3
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm0 = <0,2,4,6,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX1-NEXT:    vpshufb %xmm0, %xmm5, %xmm2
-; AVX1-NEXT:    vpmovsxbd %xmm2, %xmm7
-; AVX1-NEXT:    vextractf128 $1, %ymm5, %xmm6
-; AVX1-NEXT:    vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpshufb %xmm1, %xmm6, %xmm2
-; AVX1-NEXT:    vpmovsxbd %xmm2, %xmm5
-; AVX1-NEXT:    vpshufb %xmm0, %xmm6, %xmm2
-; AVX1-NEXT:    vpmovsxbd %xmm2, %xmm6
-; AVX1-NEXT:    vpshufb %xmm1, %xmm4, %xmm2
-; AVX1-NEXT:    vpmovsxbd %xmm2, %xmm9
-; AVX1-NEXT:    vpshufb %xmm0, %xmm4, %xmm2
-; AVX1-NEXT:    vpmovsxbd %xmm2, %xmm8
-; AVX1-NEXT:    vextractf128 $1, %ymm4, %xmm11
-; AVX1-NEXT:    vpshufb %xmm1, %xmm14, %xmm2
-; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
-; AVX1-NEXT:    vpmulld %xmm2, %xmm3, %xmm2
-; AVX1-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpshufb %xmm0, %xmm14, %xmm2
-; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
-; AVX1-NEXT:    vpmulld %xmm2, %xmm7, %xmm2
-; AVX1-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vextractf128 $1, %ymm14, %xmm4
-; AVX1-NEXT:    vpshufb %xmm1, %xmm4, %xmm2
-; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
-; AVX1-NEXT:    vpmulld %xmm2, %xmm5, %xmm2
-; AVX1-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpshufb %xmm0, %xmm4, %xmm2
-; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
-; AVX1-NEXT:    vpmulld %xmm2, %xmm6, %xmm2
-; AVX1-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vpshufb %xmm1, %xmm10, %xmm2
-; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
-; AVX1-NEXT:    vpmulld %xmm2, %xmm9, %xmm2
-; AVX1-NEXT:    vmovdqa %xmm2, (%rsp) # 16-byte Spill
-; AVX1-NEXT:    vpshufb %xmm0, %xmm10, %xmm2
-; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
-; AVX1-NEXT:    vpmulld %xmm2, %xmm8, %xmm2
-; AVX1-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vmovdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpshufb %xmm1, %xmm11, %xmm2
-; AVX1-NEXT:    vpmovsxbd %xmm2, %xmm2
-; AVX1-NEXT:    vextractf128 $1, %ymm10, %xmm10
-; AVX1-NEXT:    vpshufb %xmm1, %xmm10, %xmm5
-; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm5 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero,xmm5[2],zero,zero,zero,xmm5[3],zero,zero,zero
-; AVX1-NEXT:    vpmulld %xmm5, %xmm2, %xmm2
-; AVX1-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpshufb %xmm0, %xmm11, %xmm2
-; AVX1-NEXT:    vpmovsxbd %xmm2, %xmm2
-; AVX1-NEXT:    vpshufb %xmm0, %xmm10, %xmm5
-; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm5 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero,xmm5[2],zero,zero,zero,xmm5[3],zero,zero,zero
-; AVX1-NEXT:    vpmulld %xmm5, %xmm2, %xmm2
-; AVX1-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vmovdqa 32(%rdi), %ymm15
-; AVX1-NEXT:    vpshufb %xmm1, %xmm15, %xmm2
-; AVX1-NEXT:    vpmovsxbd %xmm2, %xmm5
-; AVX1-NEXT:    vmovdqa 32(%rsi), %ymm3
-; AVX1-NEXT:    vpshufb %xmm1, %xmm3, %xmm6
-; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm6 = xmm6[0],zero,zero,zero,xmm6[1],zero,zero,zero,xmm6[2],zero,zero,zero,xmm6[3],zero,zero,zero
-; AVX1-NEXT:    vpmulld %xmm6, %xmm5, %xmm2
-; AVX1-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpshufb %xmm0, %xmm15, %xmm5
-; AVX1-NEXT:    vpmovsxbd %xmm5, %xmm5
-; AVX1-NEXT:    vpshufb %xmm0, %xmm3, %xmm6
-; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm6 = xmm6[0],zero,zero,zero,xmm6[1],zero,zero,zero,xmm6[2],zero,zero,zero,xmm6[3],zero,zero,zero
-; AVX1-NEXT:    vpmulld %xmm6, %xmm5, %xmm2
-; AVX1-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vextractf128 $1, %ymm15, %xmm11
-; AVX1-NEXT:    vpshufb %xmm1, %xmm11, %xmm5
-; AVX1-NEXT:    vpmovsxbd %xmm5, %xmm7
-; AVX1-NEXT:    vextractf128 $1, %ymm3, %xmm5
-; AVX1-NEXT:    vpshufb %xmm1, %xmm5, %xmm6
-; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm6 = xmm6[0],zero,zero,zero,xmm6[1],zero,zero,zero,xmm6[2],zero,zero,zero,xmm6[3],zero,zero,zero
-; AVX1-NEXT:    vpmulld %xmm6, %xmm7, %xmm2
-; AVX1-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpshufb %xmm0, %xmm11, %xmm6
-; AVX1-NEXT:    vpmovsxbd %xmm6, %xmm6
-; AVX1-NEXT:    vpshufb %xmm0, %xmm5, %xmm7
-; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero
-; AVX1-NEXT:    vpmulld %xmm7, %xmm6, %xmm2
-; AVX1-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vmovdqa (%rdi), %ymm8
-; AVX1-NEXT:    vpshufb %xmm1, %xmm8, %xmm6
-; AVX1-NEXT:    vpmovsxbd %xmm6, %xmm6
-; AVX1-NEXT:    vmovdqa (%rsi), %ymm9
-; AVX1-NEXT:    vpshufb %xmm1, %xmm9, %xmm7
-; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero
-; AVX1-NEXT:    vpmulld %xmm7, %xmm6, %xmm2
-; AVX1-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpshufb %xmm0, %xmm8, %xmm6
-; AVX1-NEXT:    vpmovsxbd %xmm6, %xmm6
-; AVX1-NEXT:    vpshufb %xmm0, %xmm9, %xmm7
-; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero
-; AVX1-NEXT:    vpmulld %xmm7, %xmm6, %xmm2
-; AVX1-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vextractf128 $1, %ymm8, %xmm13
-; AVX1-NEXT:    vpshufb %xmm1, %xmm13, %xmm7
-; AVX1-NEXT:    vpmovsxbd %xmm7, %xmm2
-; AVX1-NEXT:    vextractf128 $1, %ymm9, %xmm12
-; AVX1-NEXT:    vpshufb %xmm1, %xmm12, %xmm1
-; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
-; AVX1-NEXT:    vpmulld %xmm1, %xmm2, %xmm1
-; AVX1-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpshufb %xmm0, %xmm13, %xmm1
-; AVX1-NEXT:    vpmovsxbd %xmm1, %xmm1
-; AVX1-NEXT:    vpshufb %xmm0, %xmm12, %xmm0
-; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; AVX1-NEXT:    vpmulld %xmm0, %xmm1, %xmm0
-; AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm0 = <9,11,13,15,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX1-NEXT:    vpshufb %xmm0, %xmm6, %xmm1
-; AVX1-NEXT:    vpmovsxbd %xmm1, %xmm1
-; AVX1-NEXT:    vpshufb %xmm0, %xmm14, %xmm2
-; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
-; AVX1-NEXT:    vpmulld %xmm2, %xmm1, %xmm1
-; AVX1-NEXT:    vpaddd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm2 # 16-byte Folded Reload
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = <1,3,5,7,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX1-NEXT:    vpshufb %xmm1, %xmm6, %xmm6
-; AVX1-NEXT:    vpmovsxbd %xmm6, %xmm6
-; AVX1-NEXT:    vpshufb %xmm1, %xmm14, %xmm7
-; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero
-; AVX1-NEXT:    vpmulld %xmm7, %xmm6, %xmm6
-; AVX1-NEXT:    vpaddd {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm6 # 16-byte Folded Reload
-; AVX1-NEXT:    vpackssdw %xmm2, %xmm6, %xmm2
-; AVX1-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX1-NEXT:    vpshufb %xmm0, %xmm6, %xmm2
-; AVX1-NEXT:    vpmovsxbd %xmm2, %xmm2
-; AVX1-NEXT:    vpshufb %xmm0, %xmm4, %xmm7
-; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero
-; AVX1-NEXT:    vpmulld %xmm7, %xmm2, %xmm2
-; AVX1-NEXT:    vpaddd {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
-; AVX1-NEXT:    vpshufb %xmm1, %xmm6, %xmm7
-; AVX1-NEXT:    vpmovsxbd %xmm7, %xmm7
-; AVX1-NEXT:    vpshufb %xmm1, %xmm4, %xmm4
-; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero,xmm4[2],zero,zero,zero,xmm4[3],zero,zero,zero
-; AVX1-NEXT:    vpmulld %xmm4, %xmm7, %xmm4
-; AVX1-NEXT:    vpaddd {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
-; AVX1-NEXT:    vpackssdw %xmm2, %xmm4, %xmm2
-; AVX1-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX1-NEXT:    vpshufb %xmm0, %xmm6, %xmm2
-; AVX1-NEXT:    vpmovsxbd %xmm2, %xmm2
-; AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX1-NEXT:    vpshufb %xmm0, %xmm7, %xmm4
-; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero,xmm4[2],zero,zero,zero,xmm4[3],zero,zero,zero
-; AVX1-NEXT:    vpmulld %xmm4, %xmm2, %xmm2
-; AVX1-NEXT:    vpaddd (%rsp), %xmm2, %xmm2 # 16-byte Folded Reload
-; AVX1-NEXT:    vpshufb %xmm1, %xmm6, %xmm4
-; AVX1-NEXT:    vpmovsxbd %xmm4, %xmm4
-; AVX1-NEXT:    vpshufb %xmm1, %xmm7, %xmm7
-; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero
-; AVX1-NEXT:    vpmulld %xmm7, %xmm4, %xmm4
-; AVX1-NEXT:    vpaddd {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
-; AVX1-NEXT:    vpackssdw %xmm2, %xmm4, %xmm14
-; AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX1-NEXT:    vpshufb %xmm0, %xmm2, %xmm4
-; AVX1-NEXT:    vpmovsxbd %xmm4, %xmm4
-; AVX1-NEXT:    vpshufb %xmm0, %xmm10, %xmm7
-; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero
-; AVX1-NEXT:    vpmulld %xmm7, %xmm4, %xmm4
-; AVX1-NEXT:    vpaddd {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
-; AVX1-NEXT:    vpshufb %xmm1, %xmm2, %xmm7
-; AVX1-NEXT:    vpmovsxbd %xmm7, %xmm7
-; AVX1-NEXT:    vpshufb %xmm1, %xmm10, %xmm6
-; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm6 = xmm6[0],zero,zero,zero,xmm6[1],zero,zero,zero,xmm6[2],zero,zero,zero,xmm6[3],zero,zero,zero
-; AVX1-NEXT:    vpmulld %xmm6, %xmm7, %xmm6
-; AVX1-NEXT:    vpaddd {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm6 # 16-byte Folded Reload
-; AVX1-NEXT:    vpackssdw %xmm4, %xmm6, %xmm4
-; AVX1-NEXT:    vpshufb %xmm0, %xmm15, %xmm6
-; AVX1-NEXT:    vpmovsxbd %xmm6, %xmm6
-; AVX1-NEXT:    vpshufb %xmm0, %xmm3, %xmm7
-; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero
-; AVX1-NEXT:    vpmulld %xmm7, %xmm6, %xmm6
-; AVX1-NEXT:    vpaddd {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm6 # 16-byte Folded Reload
-; AVX1-NEXT:    vpshufb %xmm1, %xmm15, %xmm7
-; AVX1-NEXT:    vpmovsxbd %xmm7, %xmm7
-; AVX1-NEXT:    vpshufb %xmm1, %xmm3, %xmm3
-; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
-; AVX1-NEXT:    vpmulld %xmm3, %xmm7, %xmm3
-; AVX1-NEXT:    vpaddd {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
-; AVX1-NEXT:    vpackssdw %xmm6, %xmm3, %xmm3
-; AVX1-NEXT:    vpshufb %xmm0, %xmm11, %xmm6
-; AVX1-NEXT:    vpmovsxbd %xmm6, %xmm6
-; AVX1-NEXT:    vpshufb %xmm0, %xmm5, %xmm7
-; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero
-; AVX1-NEXT:    vpmulld %xmm7, %xmm6, %xmm6
-; AVX1-NEXT:    vpaddd {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm6 # 16-byte Folded Reload
-; AVX1-NEXT:    vpshufb %xmm1, %xmm11, %xmm7
-; AVX1-NEXT:    vpmovsxbd %xmm7, %xmm7
-; AVX1-NEXT:    vpshufb %xmm1, %xmm5, %xmm5
-; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm5 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero,xmm5[2],zero,zero,zero,xmm5[3],zero,zero,zero
-; AVX1-NEXT:    vpmulld %xmm5, %xmm7, %xmm5
-; AVX1-NEXT:    vpaddd {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm5 # 16-byte Folded Reload
-; AVX1-NEXT:    vpackssdw %xmm6, %xmm5, %xmm5
-; AVX1-NEXT:    vpshufb %xmm0, %xmm8, %xmm6
-; AVX1-NEXT:    vpmovsxbd %xmm6, %xmm6
-; AVX1-NEXT:    vpshufb %xmm0, %xmm9, %xmm7
-; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero
-; AVX1-NEXT:    vpmulld %xmm7, %xmm6, %xmm6
-; AVX1-NEXT:    vpaddd {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm6 # 16-byte Folded Reload
-; AVX1-NEXT:    vpshufb %xmm1, %xmm8, %xmm7
-; AVX1-NEXT:    vpmovsxbd %xmm7, %xmm7
-; AVX1-NEXT:    vpshufb %xmm1, %xmm9, %xmm2
-; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
-; AVX1-NEXT:    vpmulld %xmm2, %xmm7, %xmm2
-; AVX1-NEXT:    vpaddd {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
-; AVX1-NEXT:    vpackssdw %xmm6, %xmm2, %xmm2
-; AVX1-NEXT:    vpshufb %xmm0, %xmm13, %xmm6
-; AVX1-NEXT:    vpmovsxbd %xmm6, %xmm6
-; AVX1-NEXT:    vpshufb %xmm0, %xmm12, %xmm0
-; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; AVX1-NEXT:    vpmulld %xmm0, %xmm6, %xmm0
-; AVX1-NEXT:    vpaddd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; AVX1-NEXT:    vpshufb %xmm1, %xmm13, %xmm6
-; AVX1-NEXT:    vpshufb %xmm1, %xmm12, %xmm1
-; AVX1-NEXT:    vpmovsxbd %xmm6, %xmm6
-; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
-; AVX1-NEXT:    vpmulld %xmm1, %xmm6, %xmm1
-; AVX1-NEXT:    vpaddd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
-; AVX1-NEXT:    vpackssdw %xmm0, %xmm1, %xmm0
-; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm2, %ymm0
-; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm3, %ymm1
-; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm14, %ymm2
-; AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX1-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 16-byte Folded Reload
-; AVX1-NEXT:    addq $312, %rsp # imm = 0x138
-; AVX1-NEXT:    .cfi_def_cfa_offset 8
+; AVX1-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX1-NEXT:    vmovdqa 32(%rdi), %ymm1
+; AVX1-NEXT:    vmovdqa 64(%rdi), %ymm2
+; AVX1-NEXT:    vmovdqa 96(%rdi), %ymm8
+; AVX1-NEXT:    vmovdqa (%rsi), %ymm4
+; AVX1-NEXT:    vmovdqa 32(%rsi), %ymm5
+; AVX1-NEXT:    vmovdqa 64(%rsi), %ymm6
+; AVX1-NEXT:    vmovdqa 96(%rsi), %ymm9
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT:    vextractf128 $1, %ymm4, %xmm7
+; AVX1-NEXT:    vpmaddubsw %xmm3, %xmm7, %xmm3
+; AVX1-NEXT:    vpmaddubsw %xmm0, %xmm4, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT:    vextractf128 $1, %ymm5, %xmm4
+; AVX1-NEXT:    vpmaddubsw %xmm3, %xmm4, %xmm3
+; AVX1-NEXT:    vpmaddubsw %xmm1, %xmm5, %xmm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm1, %ymm1
+; AVX1-NEXT:    vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT:    vextractf128 $1, %ymm6, %xmm4
+; AVX1-NEXT:    vpmaddubsw %xmm3, %xmm4, %xmm3
+; AVX1-NEXT:    vpmaddubsw %xmm2, %xmm6, %xmm2
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX1-NEXT:    vextractf128 $1, %ymm8, %xmm3
+; AVX1-NEXT:    vextractf128 $1, %ymm9, %xmm4
+; AVX1-NEXT:    vpmaddubsw %xmm3, %xmm4, %xmm3
+; AVX1-NEXT:    vpmaddubsw %xmm8, %xmm9, %xmm4
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: pmaddubsw_512:
 ; AVX2:       # %bb.0:
-; AVX2-NEXT:    subq $280, %rsp # imm = 0x118
-; AVX2-NEXT:    .cfi_def_cfa_offset 288
-; AVX2-NEXT:    vmovdqa 32(%rdi), %ymm10
-; AVX2-NEXT:    vmovdqa 64(%rdi), %ymm9
-; AVX2-NEXT:    vmovdqa 96(%rdi), %ymm11
-; AVX2-NEXT:    vmovdqa 64(%rsi), %ymm8
-; AVX2-NEXT:    vmovdqa 96(%rsi), %ymm1
-; AVX2-NEXT:    vmovdqa {{.*#+}} xmm5 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
-; AVX2-NEXT:    vpshufb %xmm5, %xmm11, %xmm0
-; AVX2-NEXT:    vpmovsxbd %xmm0, %ymm2
-; AVX2-NEXT:    vextracti128 $1, %ymm11, %xmm0
-; AVX2-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-NEXT:    vpshufb %xmm5, %xmm0, %xmm0
-; AVX2-NEXT:    vpmovsxbd %xmm0, %ymm3
-; AVX2-NEXT:    vpshufb %xmm5, %xmm9, %xmm0
-; AVX2-NEXT:    vpmovsxbd %xmm0, %ymm6
-; AVX2-NEXT:    vextracti128 $1, %ymm9, %xmm0
-; AVX2-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-NEXT:    vpshufb %xmm5, %xmm0, %xmm0
-; AVX2-NEXT:    vpmovsxbd %xmm0, %ymm7
-; AVX2-NEXT:    vmovdqa %ymm1, %ymm0
-; AVX2-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm13 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX2-NEXT:    vmovdqa {{.*#+}} ymm1 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
-; AVX2-NEXT:    vpand %ymm1, %ymm13, %ymm13
-; AVX2-NEXT:    vpmulld %ymm13, %ymm2, %ymm2
-; AVX2-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm4
-; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm13 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
-; AVX2-NEXT:    vpand %ymm1, %ymm13, %ymm13
-; AVX2-NEXT:    vpmulld %ymm13, %ymm3, %ymm0
-; AVX2-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-NEXT:    vmovdqu %ymm8, (%rsp) # 32-byte Spill
-; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm3 = xmm8[0],zero,xmm8[1],zero,xmm8[2],zero,xmm8[3],zero,xmm8[4],zero,xmm8[5],zero,xmm8[6],zero,xmm8[7],zero
-; AVX2-NEXT:    vpand %ymm1, %ymm3, %ymm3
-; AVX2-NEXT:    vpmulld %ymm3, %ymm6, %ymm0
-; AVX2-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-NEXT:    vextracti128 $1, %ymm8, %xmm6
-; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm3 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero,xmm6[4],zero,xmm6[5],zero,xmm6[6],zero,xmm6[7],zero
-; AVX2-NEXT:    vpand %ymm1, %ymm3, %ymm3
-; AVX2-NEXT:    vpmulld %ymm3, %ymm7, %ymm0
-; AVX2-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-NEXT:    vpshufb %xmm5, %xmm10, %xmm3
-; AVX2-NEXT:    vpmovsxbd %xmm3, %ymm3
-; AVX2-NEXT:    vmovdqa 32(%rsi), %ymm13
-; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm7 = xmm13[0],zero,xmm13[1],zero,xmm13[2],zero,xmm13[3],zero,xmm13[4],zero,xmm13[5],zero,xmm13[6],zero,xmm13[7],zero
-; AVX2-NEXT:    vpand %ymm1, %ymm7, %ymm7
-; AVX2-NEXT:    vpmulld %ymm7, %ymm3, %ymm0
-; AVX2-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-NEXT:    vextracti128 $1, %ymm10, %xmm2
-; AVX2-NEXT:    vpshufb %xmm5, %xmm2, %xmm3
-; AVX2-NEXT:    vpmovsxbd %xmm3, %ymm7
-; AVX2-NEXT:    vextracti128 $1, %ymm13, %xmm8
-; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm15 = xmm8[0],zero,xmm8[1],zero,xmm8[2],zero,xmm8[3],zero,xmm8[4],zero,xmm8[5],zero,xmm8[6],zero,xmm8[7],zero
-; AVX2-NEXT:    vpand %ymm1, %ymm15, %ymm15
-; AVX2-NEXT:    vpmulld %ymm15, %ymm7, %ymm0
-; AVX2-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-NEXT:    vmovdqa (%rdi), %ymm15
-; AVX2-NEXT:    vpshufb %xmm5, %xmm15, %xmm7
-; AVX2-NEXT:    vpmovsxbd %xmm7, %ymm0
-; AVX2-NEXT:    vmovdqa (%rsi), %ymm7
-; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm14 = xmm7[0],zero,xmm7[1],zero,xmm7[2],zero,xmm7[3],zero,xmm7[4],zero,xmm7[5],zero,xmm7[6],zero,xmm7[7],zero
-; AVX2-NEXT:    vpand %ymm1, %ymm14, %ymm14
-; AVX2-NEXT:    vpmulld %ymm14, %ymm0, %ymm0
-; AVX2-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-NEXT:    vextracti128 $1, %ymm15, %xmm0
-; AVX2-NEXT:    vpshufb %xmm5, %xmm0, %xmm5
-; AVX2-NEXT:    vpmovsxbd %xmm5, %ymm14
-; AVX2-NEXT:    vextracti128 $1, %ymm7, %xmm3
-; AVX2-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm12 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
-; AVX2-NEXT:    vpand %ymm1, %ymm12, %ymm12
-; AVX2-NEXT:    vpmulld %ymm12, %ymm14, %ymm3
-; AVX2-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-NEXT:    vmovdqa {{.*#+}} xmm12 = <1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u>
-; AVX2-NEXT:    vpshufb %xmm12, %xmm11, %xmm3
-; AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; AVX2-NEXT:    vpshufb %xmm12, %xmm5, %xmm11
-; AVX2-NEXT:    vpshufb %xmm12, %xmm9, %xmm9
-; AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; AVX2-NEXT:    vpshufb %xmm12, %xmm5, %xmm14
-; AVX2-NEXT:    vpshufb %xmm12, %xmm10, %xmm5
-; AVX2-NEXT:    vpshufb %xmm12, %xmm2, %xmm10
-; AVX2-NEXT:    vpshufb %xmm12, %xmm15, %xmm15
-; AVX2-NEXT:    vpshufb %xmm12, %xmm0, %xmm12
-; AVX2-NEXT:    vpmovsxbd %xmm3, %ymm2
-; AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-NEXT:    vpsrldq {{.*#+}} xmm3 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero
-; AVX2-NEXT:    vpmovzxbw {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
-; AVX2-NEXT:    vpand %ymm1, %ymm3, %ymm3
-; AVX2-NEXT:    vpmulld %ymm3, %ymm2, %ymm2
-; AVX2-NEXT:    vpaddd {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
-; AVX2-NEXT:    vpunpcklbw {{.*#+}} xmm3 = xmm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX2-NEXT:    vpsrld $16, %xmm3, %xmm3
-; AVX2-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX2-NEXT:    vpsrld $16, %xmm4, %xmm4
-; AVX2-NEXT:    vinserti128 $1, %xmm4, %ymm3, %ymm3
-; AVX2-NEXT:    vpmovsxbd %xmm11, %ymm4
-; AVX2-NEXT:    vpand %ymm1, %ymm3, %ymm3
-; AVX2-NEXT:    vpmulld %ymm3, %ymm4, %ymm3
-; AVX2-NEXT:    vpaddd {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
-; AVX2-NEXT:    vpackssdw %ymm3, %ymm2, %ymm3
-; AVX2-NEXT:    vpmovsxbd %xmm9, %ymm2
-; AVX2-NEXT:    vmovdqu (%rsp), %ymm0 # 32-byte Reload
-; AVX2-NEXT:    vpsrldq {{.*#+}} xmm4 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero
-; AVX2-NEXT:    vpmovzxbw {{.*#+}} ymm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero,xmm4[8],zero,xmm4[9],zero,xmm4[10],zero,xmm4[11],zero,xmm4[12],zero,xmm4[13],zero,xmm4[14],zero,xmm4[15],zero
-; AVX2-NEXT:    vpand %ymm1, %ymm4, %ymm4
-; AVX2-NEXT:    vpmulld %ymm4, %ymm2, %ymm2
-; AVX2-NEXT:    vpaddd {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
-; AVX2-NEXT:    vpunpcklbw {{.*#+}} xmm4 = xmm6[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX2-NEXT:    vpsrld $16, %xmm4, %xmm4
-; AVX2-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX2-NEXT:    vpsrld $16, %xmm6, %xmm6
-; AVX2-NEXT:    vinserti128 $1, %xmm6, %ymm4, %ymm4
-; AVX2-NEXT:    vpmovsxbd %xmm14, %ymm6
-; AVX2-NEXT:    vpand %ymm1, %ymm4, %ymm4
-; AVX2-NEXT:    vpmulld %ymm4, %ymm6, %ymm4
-; AVX2-NEXT:    vpaddd {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
-; AVX2-NEXT:    vpackssdw %ymm4, %ymm2, %ymm2
-; AVX2-NEXT:    vpmovsxbd %xmm5, %ymm4
-; AVX2-NEXT:    vpsrldq {{.*#+}} xmm6 = xmm13[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero
-; AVX2-NEXT:    vpmovzxbw {{.*#+}} ymm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero,xmm6[4],zero,xmm6[5],zero,xmm6[6],zero,xmm6[7],zero,xmm6[8],zero,xmm6[9],zero,xmm6[10],zero,xmm6[11],zero,xmm6[12],zero,xmm6[13],zero,xmm6[14],zero,xmm6[15],zero
-; AVX2-NEXT:    vpand %ymm1, %ymm6, %ymm6
-; AVX2-NEXT:    vpmulld %ymm6, %ymm4, %ymm4
-; AVX2-NEXT:    vpaddd {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
-; AVX2-NEXT:    vpunpcklbw {{.*#+}} xmm6 = xmm8[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX2-NEXT:    vpsrld $16, %xmm6, %xmm6
-; AVX2-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm8[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX2-NEXT:    vpsrld $16, %xmm0, %xmm0
-; AVX2-NEXT:    vinserti128 $1, %xmm0, %ymm6, %ymm0
-; AVX2-NEXT:    vpmovsxbd %xmm10, %ymm6
-; AVX2-NEXT:    vpand %ymm1, %ymm0, %ymm0
-; AVX2-NEXT:    vpmulld %ymm0, %ymm6, %ymm0
-; AVX2-NEXT:    vpaddd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX2-NEXT:    vpackssdw %ymm0, %ymm4, %ymm4
-; AVX2-NEXT:    vpmovsxbd %xmm15, %ymm0
-; AVX2-NEXT:    vpsrldq {{.*#+}} xmm6 = xmm7[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero
-; AVX2-NEXT:    vpmovzxbw {{.*#+}} ymm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero,xmm6[4],zero,xmm6[5],zero,xmm6[6],zero,xmm6[7],zero,xmm6[8],zero,xmm6[9],zero,xmm6[10],zero,xmm6[11],zero,xmm6[12],zero,xmm6[13],zero,xmm6[14],zero,xmm6[15],zero
-; AVX2-NEXT:    vpand %ymm1, %ymm6, %ymm6
-; AVX2-NEXT:    vpmulld %ymm6, %ymm0, %ymm0
-; AVX2-NEXT:    vpaddd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; AVX2-NEXT:    vpunpcklbw {{.*#+}} xmm6 = xmm5[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX2-NEXT:    vpsrld $16, %xmm6, %xmm6
-; AVX2-NEXT:    vpunpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX2-NEXT:    vpsrld $16, %xmm5, %xmm5
-; AVX2-NEXT:    vinserti128 $1, %xmm5, %ymm6, %ymm5
-; AVX2-NEXT:    vpand %ymm1, %ymm5, %ymm1
-; AVX2-NEXT:    vpmovsxbd %xmm12, %ymm5
-; AVX2-NEXT:    vpmulld %ymm1, %ymm5, %ymm1
-; AVX2-NEXT:    vpaddd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX2-NEXT:    vpackssdw %ymm1, %ymm0, %ymm0
-; AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
-; AVX2-NEXT:    vpermq {{.*#+}} ymm1 = ymm4[0,2,1,3]
-; AVX2-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,2,1,3]
-; AVX2-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,2,1,3]
-; AVX2-NEXT:    addq $280, %rsp # imm = 0x118
-; AVX2-NEXT:    .cfi_def_cfa_offset 8
+; AVX2-NEXT:    vmovdqa (%rsi), %ymm0
+; AVX2-NEXT:    vmovdqa 32(%rsi), %ymm1
+; AVX2-NEXT:    vmovdqa 64(%rsi), %ymm2
+; AVX2-NEXT:    vmovdqa 96(%rsi), %ymm3
+; AVX2-NEXT:    vpmaddubsw (%rdi), %ymm0, %ymm0
+; AVX2-NEXT:    vpmaddubsw 32(%rdi), %ymm1, %ymm1
+; AVX2-NEXT:    vpmaddubsw 64(%rdi), %ymm2, %ymm2
+; AVX2-NEXT:    vpmaddubsw 96(%rdi), %ymm3, %ymm3
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: pmaddubsw_512:
 ; AVX512F:       # %bb.0:
-; AVX512F-NEXT:    subq $56, %rsp
-; AVX512F-NEXT:    .cfi_def_cfa_offset 64
-; AVX512F-NEXT:    vmovdqa (%rdi), %ymm3
-; AVX512F-NEXT:    vmovdqa 32(%rdi), %ymm5
-; AVX512F-NEXT:    vmovdqa 64(%rdi), %ymm7
-; AVX512F-NEXT:    vmovdqa 96(%rdi), %ymm8
-; AVX512F-NEXT:    vmovdqa 64(%rsi), %ymm4
-; AVX512F-NEXT:    vmovdqa 96(%rsi), %ymm2
-; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm6 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
-; AVX512F-NEXT:    vextracti128 $1, %ymm8, %xmm0
-; AVX512F-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-NEXT:    vpshufb %xmm6, %xmm0, %xmm0
-; AVX512F-NEXT:    vpshufb %xmm6, %xmm8, %xmm1
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX512F-NEXT:    vpmovsxbd %xmm0, %zmm16
-; AVX512F-NEXT:    vextracti128 $1, %ymm7, %xmm0
-; AVX512F-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-NEXT:    vpshufb %xmm6, %xmm0, %xmm0
-; AVX512F-NEXT:    vpshufb %xmm6, %xmm7, %xmm1
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX512F-NEXT:    vpmovsxbd %xmm0, %zmm17
-; AVX512F-NEXT:    vextracti128 $1, %ymm5, %xmm0
-; AVX512F-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-NEXT:    vpshufb %xmm6, %xmm0, %xmm0
-; AVX512F-NEXT:    vpshufb %xmm6, %xmm5, %xmm1
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX512F-NEXT:    vpmovsxbd %xmm0, %zmm18
-; AVX512F-NEXT:    vmovdqu %ymm3, (%rsp) # 32-byte Spill
-; AVX512F-NEXT:    vextracti128 $1, %ymm3, %xmm13
-; AVX512F-NEXT:    vpshufb %xmm6, %xmm13, %xmm0
-; AVX512F-NEXT:    vpshufb %xmm6, %xmm3, %xmm3
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm3[0],xmm0[0]
-; AVX512F-NEXT:    vpmovsxbd %xmm0, %zmm19
-; AVX512F-NEXT:    vextracti128 $1, %ymm2, %xmm9
-; AVX512F-NEXT:    vpshufb %xmm6, %xmm9, %xmm0
-; AVX512F-NEXT:    vpshufb %xmm6, %xmm2, %xmm3
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm3[0],xmm0[0]
-; AVX512F-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-NEXT:    vextracti128 $1, %ymm4, %xmm10
-; AVX512F-NEXT:    vpshufb %xmm6, %xmm10, %xmm0
-; AVX512F-NEXT:    vpshufb %xmm6, %xmm4, %xmm3
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm3[0],xmm0[0]
-; AVX512F-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-NEXT:    vmovdqa 32(%rsi), %ymm15
-; AVX512F-NEXT:    vextracti128 $1, %ymm15, %xmm12
-; AVX512F-NEXT:    vpshufb %xmm6, %xmm12, %xmm11
-; AVX512F-NEXT:    vpshufb %xmm6, %xmm15, %xmm3
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm11[0]
-; AVX512F-NEXT:    vmovdqa (%rsi), %ymm14
-; AVX512F-NEXT:    vextracti128 $1, %ymm14, %xmm11
-; AVX512F-NEXT:    vpshufb %xmm6, %xmm11, %xmm0
-; AVX512F-NEXT:    vpshufb %xmm6, %xmm14, %xmm6
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm6[0],xmm0[0]
-; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm6 = <1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u>
-; AVX512F-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX512F-NEXT:    vpshufb %xmm6, %xmm0, %xmm0
-; AVX512F-NEXT:    vpshufb %xmm6, %xmm8, %xmm4
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm4[0],xmm0[0]
-; AVX512F-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; AVX512F-NEXT:    vpshufb %xmm6, %xmm4, %xmm4
-; AVX512F-NEXT:    vpshufb %xmm6, %xmm7, %xmm7
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} xmm4 = xmm7[0],xmm4[0]
-; AVX512F-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; AVX512F-NEXT:    vpshufb %xmm6, %xmm7, %xmm7
-; AVX512F-NEXT:    vpshufb %xmm6, %xmm5, %xmm5
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm7[0]
-; AVX512F-NEXT:    vpmovzxbd {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 16-byte Folded Reload
-; AVX512F-NEXT:    # zmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
-; AVX512F-NEXT:    vpmovzxbd {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 16-byte Folded Reload
-; AVX512F-NEXT:    # zmm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
-; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero,xmm3[4],zero,zero,zero,xmm3[5],zero,zero,zero,xmm3[6],zero,zero,zero,xmm3[7],zero,zero,zero,xmm3[8],zero,zero,zero,xmm3[9],zero,zero,zero,xmm3[10],zero,zero,zero,xmm3[11],zero,zero,zero,xmm3[12],zero,zero,zero,xmm3[13],zero,zero,zero,xmm3[14],zero,zero,zero,xmm3[15],zero,zero,zero
-; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
-; AVX512F-NEXT:    vpmovsxbd %xmm0, %zmm22
-; AVX512F-NEXT:    vpmovsxbd %xmm4, %zmm21
-; AVX512F-NEXT:    vpmovsxbd %xmm5, %zmm20
-; AVX512F-NEXT:    vpshufb %xmm6, %xmm13, %xmm5
-; AVX512F-NEXT:    vmovdqu (%rsp), %ymm0 # 32-byte Reload
-; AVX512F-NEXT:    vpshufb %xmm6, %xmm0, %xmm4
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm5[0]
-; AVX512F-NEXT:    vpmovsxbd %xmm4, %zmm13
-; AVX512F-NEXT:    vpshufb %xmm6, %xmm9, %xmm5
-; AVX512F-NEXT:    vpshufb %xmm6, %xmm2, %xmm4
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm5[0]
-; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero,xmm4[2],zero,zero,zero,xmm4[3],zero,zero,zero,xmm4[4],zero,zero,zero,xmm4[5],zero,zero,zero,xmm4[6],zero,zero,zero,xmm4[7],zero,zero,zero,xmm4[8],zero,zero,zero,xmm4[9],zero,zero,zero,xmm4[10],zero,zero,zero,xmm4[11],zero,zero,zero,xmm4[12],zero,zero,zero,xmm4[13],zero,zero,zero,xmm4[14],zero,zero,zero,xmm4[15],zero,zero,zero
-; AVX512F-NEXT:    vpshufb %xmm6, %xmm10, %xmm5
-; AVX512F-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512F-NEXT:    vpshufb %xmm6, %xmm0, %xmm2
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm5[0]
-; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero
-; AVX512F-NEXT:    vpshufb %xmm6, %xmm12, %xmm5
-; AVX512F-NEXT:    vpshufb %xmm6, %xmm15, %xmm0
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm5[0]
-; AVX512F-NEXT:    vpmulld %zmm7, %zmm16, %zmm5
-; AVX512F-NEXT:    vpmulld %zmm8, %zmm17, %zmm7
-; AVX512F-NEXT:    vpmulld %zmm3, %zmm18, %zmm3
-; AVX512F-NEXT:    vpmulld %zmm1, %zmm19, %zmm1
-; AVX512F-NEXT:    vpmulld %zmm4, %zmm22, %zmm4
-; AVX512F-NEXT:    vpaddd %zmm4, %zmm5, %zmm4
-; AVX512F-NEXT:    vpmulld %zmm2, %zmm21, %zmm2
-; AVX512F-NEXT:    vpaddd %zmm2, %zmm7, %zmm2
-; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
-; AVX512F-NEXT:    vpmulld %zmm0, %zmm20, %zmm0
-; AVX512F-NEXT:    vpaddd %zmm0, %zmm3, %zmm3
-; AVX512F-NEXT:    vpshufb %xmm6, %xmm11, %xmm0
-; AVX512F-NEXT:    vpshufb %xmm6, %xmm14, %xmm5
-; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm5[0],xmm0[0]
-; AVX512F-NEXT:    vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
-; AVX512F-NEXT:    vpmulld %zmm0, %zmm13, %zmm0
-; AVX512F-NEXT:    vpaddd %zmm0, %zmm1, %zmm0
-; AVX512F-NEXT:    vpmovsdw %zmm0, %ymm0
-; AVX512F-NEXT:    vpmovsdw %zmm3, %ymm1
-; AVX512F-NEXT:    vpmovsdw %zmm2, %ymm2
-; AVX512F-NEXT:    vpmovsdw %zmm4, %ymm3
-; AVX512F-NEXT:    addq $56, %rsp
-; AVX512F-NEXT:    .cfi_def_cfa_offset 8
+; AVX512F-NEXT:    vmovdqa (%rsi), %ymm0
+; AVX512F-NEXT:    vmovdqa 32(%rsi), %ymm1
+; AVX512F-NEXT:    vmovdqa 64(%rsi), %ymm2
+; AVX512F-NEXT:    vmovdqa 96(%rsi), %ymm3
+; AVX512F-NEXT:    vpmaddubsw (%rdi), %ymm0, %ymm0
+; AVX512F-NEXT:    vpmaddubsw 32(%rdi), %ymm1, %ymm1
+; AVX512F-NEXT:    vpmaddubsw 64(%rdi), %ymm2, %ymm2
+; AVX512F-NEXT:    vpmaddubsw 96(%rdi), %ymm3, %ymm3
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: pmaddubsw_512:
 ; AVX512BW:       # %bb.0:
-; AVX512BW-NEXT:    pushq %rbp
-; AVX512BW-NEXT:    .cfi_def_cfa_offset 16
-; AVX512BW-NEXT:    pushq %r15
-; AVX512BW-NEXT:    .cfi_def_cfa_offset 24
-; AVX512BW-NEXT:    pushq %r14
-; AVX512BW-NEXT:    .cfi_def_cfa_offset 32
-; AVX512BW-NEXT:    pushq %rbx
-; AVX512BW-NEXT:    .cfi_def_cfa_offset 40
-; AVX512BW-NEXT:    .cfi_offset %rbx, -40
-; AVX512BW-NEXT:    .cfi_offset %r14, -32
-; AVX512BW-NEXT:    .cfi_offset %r15, -24
-; AVX512BW-NEXT:    .cfi_offset %rbp, -16
-; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm2
-; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %zmm3
-; AVX512BW-NEXT:    vmovdqa64 (%rsi), %zmm11
+; AVX512BW-NEXT:    vmovdqa64 (%rsi), %zmm0
 ; AVX512BW-NEXT:    vmovdqa64 64(%rsi), %zmm1
-; AVX512BW-NEXT:    vextracti128 $1, %ymm2, %xmm5
-; AVX512BW-NEXT:    vpextrb $14, %xmm5, %r8d
-; AVX512BW-NEXT:    vpextrb $12, %xmm5, %r9d
-; AVX512BW-NEXT:    vpextrb $10, %xmm5, %r10d
-; AVX512BW-NEXT:    vpextrb $8, %xmm5, %r11d
-; AVX512BW-NEXT:    vpextrb $6, %xmm5, %edi
-; AVX512BW-NEXT:    vpextrb $4, %xmm5, %eax
-; AVX512BW-NEXT:    vpextrb $2, %xmm5, %ecx
-; AVX512BW-NEXT:    vpextrb $0, %xmm5, %edx
-; AVX512BW-NEXT:    vextracti128 $1, %ymm3, %xmm4
-; AVX512BW-NEXT:    vpextrb $0, %xmm3, %esi
-; AVX512BW-NEXT:    vmovd %esi, %xmm6
-; AVX512BW-NEXT:    vpextrb $2, %xmm3, %esi
-; AVX512BW-NEXT:    vpinsrb $1, %esi, %xmm6, %xmm6
-; AVX512BW-NEXT:    vpextrb $4, %xmm3, %esi
-; AVX512BW-NEXT:    vpinsrb $2, %esi, %xmm6, %xmm6
-; AVX512BW-NEXT:    vpextrb $6, %xmm3, %esi
-; AVX512BW-NEXT:    vpinsrb $3, %esi, %xmm6, %xmm6
-; AVX512BW-NEXT:    vpextrb $8, %xmm3, %esi
-; AVX512BW-NEXT:    vpinsrb $4, %esi, %xmm6, %xmm6
-; AVX512BW-NEXT:    vpextrb $10, %xmm3, %esi
-; AVX512BW-NEXT:    vpinsrb $5, %esi, %xmm6, %xmm6
-; AVX512BW-NEXT:    vpextrb $12, %xmm3, %esi
-; AVX512BW-NEXT:    vpinsrb $6, %esi, %xmm6, %xmm6
-; AVX512BW-NEXT:    vpextrb $14, %xmm3, %esi
-; AVX512BW-NEXT:    vpinsrb $7, %esi, %xmm6, %xmm6
-; AVX512BW-NEXT:    vpextrb $0, %xmm4, %esi
-; AVX512BW-NEXT:    vpinsrb $8, %esi, %xmm6, %xmm6
-; AVX512BW-NEXT:    vpextrb $2, %xmm4, %esi
-; AVX512BW-NEXT:    vpinsrb $9, %esi, %xmm6, %xmm6
-; AVX512BW-NEXT:    vpextrb $4, %xmm4, %esi
-; AVX512BW-NEXT:    vpinsrb $10, %esi, %xmm6, %xmm6
-; AVX512BW-NEXT:    vpextrb $6, %xmm4, %esi
-; AVX512BW-NEXT:    vpinsrb $11, %esi, %xmm6, %xmm6
-; AVX512BW-NEXT:    vpextrb $8, %xmm4, %esi
-; AVX512BW-NEXT:    vpinsrb $12, %esi, %xmm6, %xmm6
-; AVX512BW-NEXT:    vpextrb $10, %xmm4, %esi
-; AVX512BW-NEXT:    vpinsrb $13, %esi, %xmm6, %xmm6
-; AVX512BW-NEXT:    vpextrb $12, %xmm4, %esi
-; AVX512BW-NEXT:    vpinsrb $14, %esi, %xmm6, %xmm6
-; AVX512BW-NEXT:    vpextrb $14, %xmm4, %esi
-; AVX512BW-NEXT:    vpinsrb $15, %esi, %xmm6, %xmm10
-; AVX512BW-NEXT:    vpextrb $0, %xmm2, %esi
-; AVX512BW-NEXT:    vmovd %esi, %xmm6
-; AVX512BW-NEXT:    vpextrb $2, %xmm2, %esi
-; AVX512BW-NEXT:    vpinsrb $1, %esi, %xmm6, %xmm6
-; AVX512BW-NEXT:    vpextrb $4, %xmm2, %esi
-; AVX512BW-NEXT:    vpinsrb $2, %esi, %xmm6, %xmm6
-; AVX512BW-NEXT:    vpextrb $6, %xmm2, %esi
-; AVX512BW-NEXT:    vpinsrb $3, %esi, %xmm6, %xmm6
-; AVX512BW-NEXT:    vpextrb $8, %xmm2, %esi
-; AVX512BW-NEXT:    vpinsrb $4, %esi, %xmm6, %xmm6
-; AVX512BW-NEXT:    vpextrb $10, %xmm2, %esi
-; AVX512BW-NEXT:    vpinsrb $5, %esi, %xmm6, %xmm6
-; AVX512BW-NEXT:    vpextrb $12, %xmm2, %esi
-; AVX512BW-NEXT:    vpinsrb $6, %esi, %xmm6, %xmm6
-; AVX512BW-NEXT:    vpextrb $14, %xmm2, %esi
-; AVX512BW-NEXT:    vpinsrb $7, %esi, %xmm6, %xmm6
-; AVX512BW-NEXT:    vpextrb $15, %xmm5, %r14d
-; AVX512BW-NEXT:    vpinsrb $8, %edx, %xmm6, %xmm6
-; AVX512BW-NEXT:    vpextrb $13, %xmm5, %r15d
-; AVX512BW-NEXT:    vpinsrb $9, %ecx, %xmm6, %xmm6
-; AVX512BW-NEXT:    vpextrb $11, %xmm5, %ecx
-; AVX512BW-NEXT:    vpinsrb $10, %eax, %xmm6, %xmm6
-; AVX512BW-NEXT:    vpextrb $9, %xmm5, %eax
-; AVX512BW-NEXT:    vpinsrb $11, %edi, %xmm6, %xmm6
-; AVX512BW-NEXT:    vpextrb $7, %xmm5, %edi
-; AVX512BW-NEXT:    vpinsrb $12, %r11d, %xmm6, %xmm6
-; AVX512BW-NEXT:    vpextrb $5, %xmm5, %esi
-; AVX512BW-NEXT:    vpinsrb $13, %r10d, %xmm6, %xmm6
-; AVX512BW-NEXT:    vpextrb $3, %xmm5, %ebx
-; AVX512BW-NEXT:    vpinsrb $14, %r9d, %xmm6, %xmm0
-; AVX512BW-NEXT:    vpextrb $1, %xmm5, %ebp
-; AVX512BW-NEXT:    vextracti128 $1, %ymm11, %xmm6
-; AVX512BW-NEXT:    vextracti128 $1, %ymm1, %xmm7
-; AVX512BW-NEXT:    vpinsrb $15, %r8d, %xmm0, %xmm9
-; AVX512BW-NEXT:    vpextrb $0, %xmm1, %edx
-; AVX512BW-NEXT:    vmovd %edx, %xmm0
-; AVX512BW-NEXT:    vpextrb $2, %xmm1, %edx
-; AVX512BW-NEXT:    vpinsrb $1, %edx, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $4, %xmm1, %edx
-; AVX512BW-NEXT:    vpinsrb $2, %edx, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $6, %xmm1, %edx
-; AVX512BW-NEXT:    vpinsrb $3, %edx, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $8, %xmm1, %edx
-; AVX512BW-NEXT:    vpinsrb $4, %edx, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $10, %xmm1, %edx
-; AVX512BW-NEXT:    vpinsrb $5, %edx, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $12, %xmm1, %edx
-; AVX512BW-NEXT:    vpinsrb $6, %edx, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $14, %xmm1, %edx
-; AVX512BW-NEXT:    vpinsrb $7, %edx, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $0, %xmm7, %edx
-; AVX512BW-NEXT:    vpinsrb $8, %edx, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $2, %xmm7, %edx
-; AVX512BW-NEXT:    vpinsrb $9, %edx, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $4, %xmm7, %edx
-; AVX512BW-NEXT:    vpinsrb $10, %edx, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $6, %xmm7, %edx
-; AVX512BW-NEXT:    vpinsrb $11, %edx, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $8, %xmm7, %edx
-; AVX512BW-NEXT:    vpinsrb $12, %edx, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $10, %xmm7, %edx
-; AVX512BW-NEXT:    vpinsrb $13, %edx, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $12, %xmm7, %edx
-; AVX512BW-NEXT:    vpinsrb $14, %edx, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $14, %xmm7, %edx
-; AVX512BW-NEXT:    vpinsrb $15, %edx, %xmm0, %xmm0
-; AVX512BW-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512BW-NEXT:    vpextrb $0, %xmm11, %edx
-; AVX512BW-NEXT:    vmovd %edx, %xmm0
-; AVX512BW-NEXT:    vpextrb $2, %xmm11, %edx
-; AVX512BW-NEXT:    vpinsrb $1, %edx, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $4, %xmm11, %edx
-; AVX512BW-NEXT:    vpinsrb $2, %edx, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $6, %xmm11, %edx
-; AVX512BW-NEXT:    vpinsrb $3, %edx, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $8, %xmm11, %edx
-; AVX512BW-NEXT:    vpinsrb $4, %edx, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $10, %xmm11, %edx
-; AVX512BW-NEXT:    vpinsrb $5, %edx, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $12, %xmm11, %edx
-; AVX512BW-NEXT:    vpinsrb $6, %edx, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $14, %xmm11, %edx
-; AVX512BW-NEXT:    vpinsrb $7, %edx, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $0, %xmm6, %edx
-; AVX512BW-NEXT:    vpinsrb $8, %edx, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $2, %xmm6, %edx
-; AVX512BW-NEXT:    vpinsrb $9, %edx, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $4, %xmm6, %edx
-; AVX512BW-NEXT:    vpinsrb $10, %edx, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $6, %xmm6, %edx
-; AVX512BW-NEXT:    vpinsrb $11, %edx, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $8, %xmm6, %edx
-; AVX512BW-NEXT:    vpinsrb $12, %edx, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $10, %xmm6, %edx
-; AVX512BW-NEXT:    vpinsrb $13, %edx, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $12, %xmm6, %edx
-; AVX512BW-NEXT:    vpinsrb $14, %edx, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $14, %xmm6, %edx
-; AVX512BW-NEXT:    vpinsrb $15, %edx, %xmm0, %xmm0
-; AVX512BW-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512BW-NEXT:    vpextrb $1, %xmm3, %edx
-; AVX512BW-NEXT:    vmovd %edx, %xmm0
-; AVX512BW-NEXT:    vpextrb $3, %xmm3, %edx
-; AVX512BW-NEXT:    vpinsrb $1, %edx, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $5, %xmm3, %edx
-; AVX512BW-NEXT:    vpinsrb $2, %edx, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $7, %xmm3, %edx
-; AVX512BW-NEXT:    vpinsrb $3, %edx, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $9, %xmm3, %edx
-; AVX512BW-NEXT:    vpinsrb $4, %edx, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $11, %xmm3, %edx
-; AVX512BW-NEXT:    vpinsrb $5, %edx, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $13, %xmm3, %edx
-; AVX512BW-NEXT:    vpinsrb $6, %edx, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $15, %xmm3, %edx
-; AVX512BW-NEXT:    vpinsrb $7, %edx, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $1, %xmm4, %edx
-; AVX512BW-NEXT:    vpinsrb $8, %edx, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $3, %xmm4, %edx
-; AVX512BW-NEXT:    vpinsrb $9, %edx, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $5, %xmm4, %edx
-; AVX512BW-NEXT:    vpinsrb $10, %edx, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $7, %xmm4, %edx
-; AVX512BW-NEXT:    vpinsrb $11, %edx, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $9, %xmm4, %edx
-; AVX512BW-NEXT:    vpinsrb $12, %edx, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $11, %xmm4, %edx
-; AVX512BW-NEXT:    vpinsrb $13, %edx, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $13, %xmm4, %edx
-; AVX512BW-NEXT:    vpinsrb $14, %edx, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $15, %xmm4, %edx
-; AVX512BW-NEXT:    vpinsrb $15, %edx, %xmm0, %xmm13
-; AVX512BW-NEXT:    vpextrb $1, %xmm2, %edx
-; AVX512BW-NEXT:    vmovd %edx, %xmm0
-; AVX512BW-NEXT:    vpextrb $3, %xmm2, %edx
-; AVX512BW-NEXT:    vpinsrb $1, %edx, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $5, %xmm2, %edx
-; AVX512BW-NEXT:    vpinsrb $2, %edx, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $7, %xmm2, %edx
-; AVX512BW-NEXT:    vpinsrb $3, %edx, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $9, %xmm2, %edx
-; AVX512BW-NEXT:    vpinsrb $4, %edx, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $11, %xmm2, %edx
-; AVX512BW-NEXT:    vpinsrb $5, %edx, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $13, %xmm2, %edx
-; AVX512BW-NEXT:    vpinsrb $6, %edx, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $15, %xmm2, %edx
-; AVX512BW-NEXT:    vpinsrb $7, %edx, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $15, %xmm6, %r8d
-; AVX512BW-NEXT:    vpinsrb $8, %ebp, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $13, %xmm6, %r9d
-; AVX512BW-NEXT:    vpinsrb $9, %ebx, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $11, %xmm6, %ebx
-; AVX512BW-NEXT:    vpinsrb $10, %esi, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $9, %xmm6, %esi
-; AVX512BW-NEXT:    vpinsrb $11, %edi, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $7, %xmm6, %edi
-; AVX512BW-NEXT:    vpinsrb $12, %eax, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $5, %xmm6, %eax
-; AVX512BW-NEXT:    vpinsrb $13, %ecx, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $3, %xmm6, %ecx
-; AVX512BW-NEXT:    vpinsrb $14, %r15d, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $1, %xmm6, %edx
-; AVX512BW-NEXT:    vpinsrb $15, %r14d, %xmm0, %xmm14
-; AVX512BW-NEXT:    vpextrb $1, %xmm1, %ebp
-; AVX512BW-NEXT:    vmovd %ebp, %xmm0
-; AVX512BW-NEXT:    vpextrb $3, %xmm1, %ebp
-; AVX512BW-NEXT:    vpinsrb $1, %ebp, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $5, %xmm1, %ebp
-; AVX512BW-NEXT:    vpinsrb $2, %ebp, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $7, %xmm1, %ebp
-; AVX512BW-NEXT:    vpinsrb $3, %ebp, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $9, %xmm1, %ebp
-; AVX512BW-NEXT:    vpinsrb $4, %ebp, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $11, %xmm1, %ebp
-; AVX512BW-NEXT:    vpinsrb $5, %ebp, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $13, %xmm1, %ebp
-; AVX512BW-NEXT:    vpinsrb $6, %ebp, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $15, %xmm1, %ebp
-; AVX512BW-NEXT:    vpinsrb $7, %ebp, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $1, %xmm7, %ebp
-; AVX512BW-NEXT:    vpinsrb $8, %ebp, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $3, %xmm7, %ebp
-; AVX512BW-NEXT:    vpinsrb $9, %ebp, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $5, %xmm7, %ebp
-; AVX512BW-NEXT:    vpinsrb $10, %ebp, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $7, %xmm7, %ebp
-; AVX512BW-NEXT:    vpinsrb $11, %ebp, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $9, %xmm7, %ebp
-; AVX512BW-NEXT:    vpinsrb $12, %ebp, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $11, %xmm7, %ebp
-; AVX512BW-NEXT:    vpinsrb $13, %ebp, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $13, %xmm7, %ebp
-; AVX512BW-NEXT:    vpinsrb $14, %ebp, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpextrb $15, %xmm7, %ebp
-; AVX512BW-NEXT:    vpinsrb $15, %ebp, %xmm0, %xmm15
-; AVX512BW-NEXT:    vpextrb $1, %xmm11, %ebp
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm0 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
-; AVX512BW-NEXT:    vextracti64x4 $1, %zmm3, %ymm3
-; AVX512BW-NEXT:    vextracti128 $1, %ymm3, %xmm5
-; AVX512BW-NEXT:    vpshufb %xmm0, %xmm5, %xmm4
-; AVX512BW-NEXT:    vpshufb %xmm0, %xmm3, %xmm6
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} xmm4 = xmm6[0],xmm4[0]
-; AVX512BW-NEXT:    vpmovsxbd %xmm4, %zmm16
-; AVX512BW-NEXT:    vextracti64x4 $1, %zmm2, %ymm2
-; AVX512BW-NEXT:    vextracti128 $1, %ymm2, %xmm6
-; AVX512BW-NEXT:    vpshufb %xmm0, %xmm6, %xmm7
-; AVX512BW-NEXT:    vpshufb %xmm0, %xmm2, %xmm8
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} xmm7 = xmm8[0],xmm7[0]
-; AVX512BW-NEXT:    vmovd %ebp, %xmm4
-; AVX512BW-NEXT:    vpextrb $3, %xmm11, %ebp
-; AVX512BW-NEXT:    vpinsrb $1, %ebp, %xmm4, %xmm4
-; AVX512BW-NEXT:    vpextrb $5, %xmm11, %ebp
-; AVX512BW-NEXT:    vpinsrb $2, %ebp, %xmm4, %xmm4
-; AVX512BW-NEXT:    vpextrb $7, %xmm11, %ebp
-; AVX512BW-NEXT:    vpinsrb $3, %ebp, %xmm4, %xmm4
-; AVX512BW-NEXT:    vpextrb $9, %xmm11, %ebp
-; AVX512BW-NEXT:    vpinsrb $4, %ebp, %xmm4, %xmm4
-; AVX512BW-NEXT:    vpextrb $11, %xmm11, %ebp
-; AVX512BW-NEXT:    vpinsrb $5, %ebp, %xmm4, %xmm4
-; AVX512BW-NEXT:    vpextrb $13, %xmm11, %ebp
-; AVX512BW-NEXT:    vpinsrb $6, %ebp, %xmm4, %xmm4
-; AVX512BW-NEXT:    vpextrb $15, %xmm11, %ebp
-; AVX512BW-NEXT:    vpinsrb $7, %ebp, %xmm4, %xmm4
-; AVX512BW-NEXT:    vpinsrb $8, %edx, %xmm4, %xmm4
-; AVX512BW-NEXT:    vpinsrb $9, %ecx, %xmm4, %xmm4
-; AVX512BW-NEXT:    vpinsrb $10, %eax, %xmm4, %xmm4
-; AVX512BW-NEXT:    vpinsrb $11, %edi, %xmm4, %xmm4
-; AVX512BW-NEXT:    vpinsrb $12, %esi, %xmm4, %xmm4
-; AVX512BW-NEXT:    vpinsrb $13, %ebx, %xmm4, %xmm4
-; AVX512BW-NEXT:    vpinsrb $14, %r9d, %xmm4, %xmm4
-; AVX512BW-NEXT:    vpinsrb $15, %r8d, %xmm4, %xmm12
-; AVX512BW-NEXT:    vpmovsxbd %xmm7, %zmm17
-; AVX512BW-NEXT:    vpmovsxbd %xmm10, %zmm18
-; AVX512BW-NEXT:    vpmovsxbd %xmm9, %zmm20
-; AVX512BW-NEXT:    vextracti64x4 $1, %zmm1, %ymm1
-; AVX512BW-NEXT:    vextracti128 $1, %ymm1, %xmm7
-; AVX512BW-NEXT:    vpshufb %xmm0, %xmm7, %xmm8
-; AVX512BW-NEXT:    vpshufb %xmm0, %xmm1, %xmm10
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} xmm4 = xmm10[0],xmm8[0]
-; AVX512BW-NEXT:    vpmovzxbd {{.*#+}} zmm10 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero,xmm4[2],zero,zero,zero,xmm4[3],zero,zero,zero,xmm4[4],zero,zero,zero,xmm4[5],zero,zero,zero,xmm4[6],zero,zero,zero,xmm4[7],zero,zero,zero,xmm4[8],zero,zero,zero,xmm4[9],zero,zero,zero,xmm4[10],zero,zero,zero,xmm4[11],zero,zero,zero,xmm4[12],zero,zero,zero,xmm4[13],zero,zero,zero,xmm4[14],zero,zero,zero,xmm4[15],zero,zero,zero
-; AVX512BW-NEXT:    vextracti64x4 $1, %zmm11, %ymm8
-; AVX512BW-NEXT:    vextracti128 $1, %ymm8, %xmm4
-; AVX512BW-NEXT:    vpshufb %xmm0, %xmm4, %xmm11
-; AVX512BW-NEXT:    vpshufb %xmm0, %xmm8, %xmm0
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm11[0]
-; AVX512BW-NEXT:    vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
-; AVX512BW-NEXT:    vpmovzxbd {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 16-byte Folded Reload
-; AVX512BW-NEXT:    # zmm11 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
-; AVX512BW-NEXT:    vpmovzxbd {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 16-byte Folded Reload
-; AVX512BW-NEXT:    # zmm19 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm9 = <1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u>
-; AVX512BW-NEXT:    vpshufb %xmm9, %xmm5, %xmm5
-; AVX512BW-NEXT:    vpshufb %xmm9, %xmm3, %xmm3
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm5[0]
-; AVX512BW-NEXT:    vpshufb %xmm9, %xmm6, %xmm5
-; AVX512BW-NEXT:    vpshufb %xmm9, %xmm2, %xmm2
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm5[0]
-; AVX512BW-NEXT:    vpshufb %xmm9, %xmm7, %xmm5
-; AVX512BW-NEXT:    vpshufb %xmm9, %xmm1, %xmm1
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm5[0]
-; AVX512BW-NEXT:    vpmulld %zmm10, %zmm16, %zmm5
-; AVX512BW-NEXT:    vpmulld %zmm0, %zmm17, %zmm0
-; AVX512BW-NEXT:    vpmulld %zmm11, %zmm18, %zmm6
-; AVX512BW-NEXT:    vpmulld %zmm19, %zmm20, %zmm7
-; AVX512BW-NEXT:    vpmovsxbd %xmm3, %zmm3
-; AVX512BW-NEXT:    vpmovsxbd %xmm2, %zmm2
-; AVX512BW-NEXT:    vpmovsxbd %xmm13, %zmm10
-; AVX512BW-NEXT:    vpmovsxbd %xmm14, %zmm11
-; AVX512BW-NEXT:    vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
-; AVX512BW-NEXT:    vpmulld %zmm1, %zmm3, %zmm1
-; AVX512BW-NEXT:    vpaddd %zmm1, %zmm5, %zmm1
-; AVX512BW-NEXT:    vpshufb %xmm9, %xmm4, %xmm3
-; AVX512BW-NEXT:    vpshufb %xmm9, %xmm8, %xmm4
-; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} xmm3 = xmm4[0],xmm3[0]
-; AVX512BW-NEXT:    vpmovzxbd {{.*#+}} zmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero,xmm3[4],zero,zero,zero,xmm3[5],zero,zero,zero,xmm3[6],zero,zero,zero,xmm3[7],zero,zero,zero,xmm3[8],zero,zero,zero,xmm3[9],zero,zero,zero,xmm3[10],zero,zero,zero,xmm3[11],zero,zero,zero,xmm3[12],zero,zero,zero,xmm3[13],zero,zero,zero,xmm3[14],zero,zero,zero,xmm3[15],zero,zero,zero
-; AVX512BW-NEXT:    vpmulld %zmm3, %zmm2, %zmm2
-; AVX512BW-NEXT:    vpaddd %zmm2, %zmm0, %zmm0
-; AVX512BW-NEXT:    vpmovzxbd {{.*#+}} zmm2 = xmm15[0],zero,zero,zero,xmm15[1],zero,zero,zero,xmm15[2],zero,zero,zero,xmm15[3],zero,zero,zero,xmm15[4],zero,zero,zero,xmm15[5],zero,zero,zero,xmm15[6],zero,zero,zero,xmm15[7],zero,zero,zero,xmm15[8],zero,zero,zero,xmm15[9],zero,zero,zero,xmm15[10],zero,zero,zero,xmm15[11],zero,zero,zero,xmm15[12],zero,zero,zero,xmm15[13],zero,zero,zero,xmm15[14],zero,zero,zero,xmm15[15],zero,zero,zero
-; AVX512BW-NEXT:    vpmulld %zmm2, %zmm10, %zmm2
-; AVX512BW-NEXT:    vpaddd %zmm2, %zmm6, %zmm2
-; AVX512BW-NEXT:    vpmovzxbd {{.*#+}} zmm3 = xmm12[0],zero,zero,zero,xmm12[1],zero,zero,zero,xmm12[2],zero,zero,zero,xmm12[3],zero,zero,zero,xmm12[4],zero,zero,zero,xmm12[5],zero,zero,zero,xmm12[6],zero,zero,zero,xmm12[7],zero,zero,zero,xmm12[8],zero,zero,zero,xmm12[9],zero,zero,zero,xmm12[10],zero,zero,zero,xmm12[11],zero,zero,zero,xmm12[12],zero,zero,zero,xmm12[13],zero,zero,zero,xmm12[14],zero,zero,zero,xmm12[15],zero,zero,zero
-; AVX512BW-NEXT:    vpmulld %zmm3, %zmm11, %zmm3
-; AVX512BW-NEXT:    vpaddd %zmm3, %zmm7, %zmm3
-; AVX512BW-NEXT:    vpmovsdw %zmm0, %ymm0
-; AVX512BW-NEXT:    vpmovsdw %zmm3, %ymm3
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm0, %zmm3, %zmm0
-; AVX512BW-NEXT:    vpmovsdw %zmm1, %ymm1
-; AVX512BW-NEXT:    vpmovsdw %zmm2, %ymm2
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm1
-; AVX512BW-NEXT:    popq %rbx
-; AVX512BW-NEXT:    .cfi_def_cfa_offset 32
-; AVX512BW-NEXT:    popq %r14
-; AVX512BW-NEXT:    .cfi_def_cfa_offset 24
-; AVX512BW-NEXT:    popq %r15
-; AVX512BW-NEXT:    .cfi_def_cfa_offset 16
-; AVX512BW-NEXT:    popq %rbp
-; AVX512BW-NEXT:    .cfi_def_cfa_offset 8
+; AVX512BW-NEXT:    vpmaddubsw (%rdi), %zmm0, %zmm0
+; AVX512BW-NEXT:    vpmaddubsw 64(%rdi), %zmm1, %zmm1
 ; AVX512BW-NEXT:    retq
   %A = load <128 x i8>, <128 x i8>* %Aptr
   %B = load <128 x i8>, <128 x i8>* %Bptr
@@ -1591,113 +203,15 @@ define <64 x i16> @pmaddubsw_512(<128 x
 define <8 x i16> @pmaddubsw_swapped_indices(<16 x i8>* %Aptr, <16 x i8>* %Bptr) {
 ; SSE-LABEL: pmaddubsw_swapped_indices:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    movdqa (%rdi), %xmm1
 ; SSE-NEXT:    movdqa (%rsi), %xmm0
-; SSE-NEXT:    movdqa %xmm0, %xmm2
-; SSE-NEXT:    pshufb {{.*#+}} xmm2 = xmm2[1],zero,xmm2[2],zero,xmm2[5],zero,xmm2[6],zero,xmm2[9],zero,xmm2[10],zero,xmm2[13],zero,xmm2[14],zero
-; SSE-NEXT:    movdqa %xmm1, %xmm3
-; SSE-NEXT:    pshufb {{.*#+}} xmm3 = xmm3[u,1,u,2,u,5,u,6,u,9,u,10,u,13,u,14]
-; SSE-NEXT:    psraw $8, %xmm3
-; SSE-NEXT:    movdqa %xmm3, %xmm4
-; SSE-NEXT:    pmulhw %xmm2, %xmm4
-; SSE-NEXT:    pmullw %xmm2, %xmm3
-; SSE-NEXT:    movdqa %xmm3, %xmm2
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
-; SSE-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[0],zero,xmm0[3],zero,xmm0[4],zero,xmm0[7],zero,xmm0[8],zero,xmm0[11],zero,xmm0[12],zero,xmm0[15],zero
-; SSE-NEXT:    pshufb {{.*#+}} xmm1 = xmm1[u,0,u,3,u,4,u,7,u,8,u,11,u,12,u,15]
-; SSE-NEXT:    psraw $8, %xmm1
-; SSE-NEXT:    movdqa %xmm1, %xmm4
-; SSE-NEXT:    pmulhw %xmm0, %xmm4
-; SSE-NEXT:    pmullw %xmm0, %xmm1
-; SSE-NEXT:    movdqa %xmm1, %xmm0
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
-; SSE-NEXT:    paddd %xmm2, %xmm0
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
-; SSE-NEXT:    paddd %xmm3, %xmm1
-; SSE-NEXT:    packssdw %xmm1, %xmm0
+; SSE-NEXT:    pmaddubsw (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX1-LABEL: pmaddubsw_swapped_indices:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX1-NEXT:    vmovdqa (%rsi), %xmm1
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = <9,10,13,14,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX1-NEXT:    vpshufb %xmm2, %xmm0, %xmm3
-; AVX1-NEXT:    vpmovsxbd %xmm3, %xmm3
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = <1,2,5,6,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX1-NEXT:    vpshufb %xmm4, %xmm0, %xmm5
-; AVX1-NEXT:    vpmovsxbd %xmm5, %xmm5
-; AVX1-NEXT:    vpshufb %xmm2, %xmm1, %xmm2
-; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
-; AVX1-NEXT:    vpmulld %xmm2, %xmm3, %xmm2
-; AVX1-NEXT:    vpshufb %xmm4, %xmm1, %xmm3
-; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
-; AVX1-NEXT:    vpmulld %xmm3, %xmm5, %xmm3
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = <8,11,12,15,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX1-NEXT:    vpshufb %xmm4, %xmm0, %xmm5
-; AVX1-NEXT:    vpmovsxbd %xmm5, %xmm5
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = <0,3,4,7,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX1-NEXT:    vpshufb %xmm6, %xmm0, %xmm0
-; AVX1-NEXT:    vpmovsxbd %xmm0, %xmm0
-; AVX1-NEXT:    vpshufb %xmm4, %xmm1, %xmm4
-; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero,xmm4[2],zero,zero,zero,xmm4[3],zero,zero,zero
-; AVX1-NEXT:    vpmulld %xmm4, %xmm5, %xmm4
-; AVX1-NEXT:    vpaddd %xmm4, %xmm2, %xmm2
-; AVX1-NEXT:    vpshufb %xmm6, %xmm1, %xmm1
-; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
-; AVX1-NEXT:    vpmulld %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpaddd %xmm0, %xmm3, %xmm0
-; AVX1-NEXT:    vpackssdw %xmm2, %xmm0, %xmm0
-; AVX1-NEXT:    retq
-;
-; AVX2-LABEL: pmaddubsw_swapped_indices:
-; AVX2:       # %bb.0:
-; AVX2-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX2-NEXT:    vmovdqa (%rsi), %xmm1
-; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = <1,2,5,6,9,10,13,14,u,u,u,u,u,u,u,u>
-; AVX2-NEXT:    vpshufb %xmm2, %xmm0, %xmm3
-; AVX2-NEXT:    vpmovsxbd %xmm3, %ymm3
-; AVX2-NEXT:    vpshufb %xmm2, %xmm1, %xmm2
-; AVX2-NEXT:    vpmovzxbd {{.*#+}} ymm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero
-; AVX2-NEXT:    vpmulld %ymm2, %ymm3, %ymm2
-; AVX2-NEXT:    vmovdqa {{.*#+}} xmm3 = <0,3,4,7,8,11,12,15,u,u,u,u,u,u,u,u>
-; AVX2-NEXT:    vpshufb %xmm3, %xmm0, %xmm0
-; AVX2-NEXT:    vpmovsxbd %xmm0, %ymm0
-; AVX2-NEXT:    vpshufb %xmm3, %xmm1, %xmm1
-; AVX2-NEXT:    vpmovzxbd {{.*#+}} ymm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero
-; AVX2-NEXT:    vpmulld %ymm1, %ymm0, %ymm0
-; AVX2-NEXT:    vpaddd %ymm0, %ymm2, %ymm0
-; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT:    vpackssdw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vzeroupper
-; AVX2-NEXT:    retq
-;
-; AVX512-LABEL: pmaddubsw_swapped_indices:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX512-NEXT:    vmovdqa (%rsi), %xmm1
-; AVX512-NEXT:    vmovdqa {{.*#+}} xmm2 = <1,2,5,6,9,10,13,14,u,u,u,u,u,u,u,u>
-; AVX512-NEXT:    vpshufb %xmm2, %xmm0, %xmm3
-; AVX512-NEXT:    vpmovsxbd %xmm3, %ymm3
-; AVX512-NEXT:    vpshufb %xmm2, %xmm1, %xmm2
-; AVX512-NEXT:    vpmovzxbd {{.*#+}} ymm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero
-; AVX512-NEXT:    vpmulld %ymm2, %ymm3, %ymm2
-; AVX512-NEXT:    vmovdqa {{.*#+}} xmm3 = <0,3,4,7,8,11,12,15,u,u,u,u,u,u,u,u>
-; AVX512-NEXT:    vpshufb %xmm3, %xmm0, %xmm0
-; AVX512-NEXT:    vpmovsxbd %xmm0, %ymm0
-; AVX512-NEXT:    vpshufb %xmm3, %xmm1, %xmm1
-; AVX512-NEXT:    vpmovzxbd {{.*#+}} ymm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero
-; AVX512-NEXT:    vpmulld %ymm1, %ymm0, %ymm0
-; AVX512-NEXT:    vpaddd %ymm0, %ymm2, %ymm0
-; AVX512-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [4294934528,4294934528,4294934528,4294934528,4294934528,4294934528,4294934528,4294934528]
-; AVX512-NEXT:    vpmaxsd %ymm1, %ymm0, %ymm0
-; AVX512-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
-; AVX512-NEXT:    vpminsd %ymm1, %ymm0, %ymm0
-; AVX512-NEXT:    vpmovdw %zmm0, %ymm0
-; AVX512-NEXT:    # kill: def $xmm0 killed $xmm0 killed $ymm0
-; AVX512-NEXT:    vzeroupper
-; AVX512-NEXT:    retq
+; AVX-LABEL: pmaddubsw_swapped_indices:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovdqa (%rsi), %xmm0
+; AVX-NEXT:    vpmaddubsw (%rdi), %xmm0, %xmm0
+; AVX-NEXT:    retq
   %A = load <16 x i8>, <16 x i8>* %Aptr
   %B = load <16 x i8>, <16 x i8>* %Bptr
   %A_even = shufflevector <16 x i8> %A, <16 x i8> undef, <8 x i32> <i32 1, i32 2, i32 5, i32 6, i32 9, i32 10, i32 13, i32 14> ;indices aren't all even
@@ -1722,112 +236,15 @@ define <8 x i16> @pmaddubsw_swapped_indi
 define <8 x i16> @pmaddubsw_swapped_extend(<16 x i8>* %Aptr, <16 x i8>* %Bptr) {
 ; SSE-LABEL: pmaddubsw_swapped_extend:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    movdqa (%rdi), %xmm1
-; SSE-NEXT:    movdqa (%rsi), %xmm0
-; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
-; SSE-NEXT:    pand %xmm1, %xmm2
-; SSE-NEXT:    movdqa %xmm0, %xmm3
-; SSE-NEXT:    psllw $8, %xmm3
-; SSE-NEXT:    psraw $8, %xmm3
-; SSE-NEXT:    movdqa %xmm2, %xmm4
-; SSE-NEXT:    pmulhw %xmm3, %xmm4
-; SSE-NEXT:    pmullw %xmm2, %xmm3
-; SSE-NEXT:    movdqa %xmm3, %xmm2
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
-; SSE-NEXT:    psraw $8, %xmm0
-; SSE-NEXT:    psrlw $8, %xmm1
-; SSE-NEXT:    movdqa %xmm1, %xmm4
-; SSE-NEXT:    pmulhw %xmm0, %xmm4
-; SSE-NEXT:    pmullw %xmm0, %xmm1
-; SSE-NEXT:    movdqa %xmm1, %xmm0
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
-; SSE-NEXT:    paddd %xmm2, %xmm0
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
-; SSE-NEXT:    paddd %xmm3, %xmm1
-; SSE-NEXT:    packssdw %xmm1, %xmm0
+; SSE-NEXT:    movdqa (%rdi), %xmm0
+; SSE-NEXT:    pmaddubsw (%rsi), %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX1-LABEL: pmaddubsw_swapped_extend:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX1-NEXT:    vmovdqa (%rsi), %xmm1
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = <8,10,12,14,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX1-NEXT:    vpshufb %xmm2, %xmm0, %xmm3
-; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = <0,2,4,6,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX1-NEXT:    vpshufb %xmm4, %xmm0, %xmm5
-; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm5 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero,xmm5[2],zero,zero,zero,xmm5[3],zero,zero,zero
-; AVX1-NEXT:    vpshufb %xmm2, %xmm1, %xmm2
-; AVX1-NEXT:    vpmovsxbd %xmm2, %xmm2
-; AVX1-NEXT:    vpmulld %xmm2, %xmm3, %xmm2
-; AVX1-NEXT:    vpshufb %xmm4, %xmm1, %xmm3
-; AVX1-NEXT:    vpmovsxbd %xmm3, %xmm3
-; AVX1-NEXT:    vpmulld %xmm3, %xmm5, %xmm3
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = <9,11,13,15,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX1-NEXT:    vpshufb %xmm4, %xmm0, %xmm5
-; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm5 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero,xmm5[2],zero,zero,zero,xmm5[3],zero,zero,zero
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = <1,3,5,7,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX1-NEXT:    vpshufb %xmm6, %xmm0, %xmm0
-; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; AVX1-NEXT:    vpshufb %xmm4, %xmm1, %xmm4
-; AVX1-NEXT:    vpmovsxbd %xmm4, %xmm4
-; AVX1-NEXT:    vpmulld %xmm4, %xmm5, %xmm4
-; AVX1-NEXT:    vpaddd %xmm4, %xmm2, %xmm2
-; AVX1-NEXT:    vpshufb %xmm6, %xmm1, %xmm1
-; AVX1-NEXT:    vpmovsxbd %xmm1, %xmm1
-; AVX1-NEXT:    vpmulld %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpaddd %xmm0, %xmm3, %xmm0
-; AVX1-NEXT:    vpackssdw %xmm2, %xmm0, %xmm0
-; AVX1-NEXT:    retq
-;
-; AVX2-LABEL: pmaddubsw_swapped_extend:
-; AVX2:       # %bb.0:
-; AVX2-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX2-NEXT:    vmovdqa (%rsi), %xmm1
-; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
-; AVX2-NEXT:    vpshufb %xmm2, %xmm0, %xmm3
-; AVX2-NEXT:    vpmovzxbd {{.*#+}} ymm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero,xmm3[4],zero,zero,zero,xmm3[5],zero,zero,zero,xmm3[6],zero,zero,zero,xmm3[7],zero,zero,zero
-; AVX2-NEXT:    vpshufb %xmm2, %xmm1, %xmm2
-; AVX2-NEXT:    vpmovsxbd %xmm2, %ymm2
-; AVX2-NEXT:    vpmulld %ymm2, %ymm3, %ymm2
-; AVX2-NEXT:    vmovdqa {{.*#+}} xmm3 = <1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u>
-; AVX2-NEXT:    vpshufb %xmm3, %xmm0, %xmm0
-; AVX2-NEXT:    vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
-; AVX2-NEXT:    vpshufb %xmm3, %xmm1, %xmm1
-; AVX2-NEXT:    vpmovsxbd %xmm1, %ymm1
-; AVX2-NEXT:    vpmulld %ymm1, %ymm0, %ymm0
-; AVX2-NEXT:    vpaddd %ymm0, %ymm2, %ymm0
-; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT:    vpackssdw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vzeroupper
-; AVX2-NEXT:    retq
-;
-; AVX512-LABEL: pmaddubsw_swapped_extend:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX512-NEXT:    vmovdqa (%rsi), %xmm1
-; AVX512-NEXT:    vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
-; AVX512-NEXT:    vpshufb %xmm2, %xmm0, %xmm3
-; AVX512-NEXT:    vpmovzxbd {{.*#+}} ymm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero,xmm3[4],zero,zero,zero,xmm3[5],zero,zero,zero,xmm3[6],zero,zero,zero,xmm3[7],zero,zero,zero
-; AVX512-NEXT:    vpshufb %xmm2, %xmm1, %xmm2
-; AVX512-NEXT:    vpmovsxbd %xmm2, %ymm2
-; AVX512-NEXT:    vpmulld %ymm2, %ymm3, %ymm2
-; AVX512-NEXT:    vmovdqa {{.*#+}} xmm3 = <1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u>
-; AVX512-NEXT:    vpshufb %xmm3, %xmm0, %xmm0
-; AVX512-NEXT:    vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
-; AVX512-NEXT:    vpshufb %xmm3, %xmm1, %xmm1
-; AVX512-NEXT:    vpmovsxbd %xmm1, %ymm1
-; AVX512-NEXT:    vpmulld %ymm1, %ymm0, %ymm0
-; AVX512-NEXT:    vpaddd %ymm0, %ymm2, %ymm0
-; AVX512-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [4294934528,4294934528,4294934528,4294934528,4294934528,4294934528,4294934528,4294934528]
-; AVX512-NEXT:    vpmaxsd %ymm1, %ymm0, %ymm0
-; AVX512-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
-; AVX512-NEXT:    vpminsd %ymm1, %ymm0, %ymm0
-; AVX512-NEXT:    vpmovdw %zmm0, %ymm0
-; AVX512-NEXT:    # kill: def $xmm0 killed $xmm0 killed $ymm0
-; AVX512-NEXT:    vzeroupper
-; AVX512-NEXT:    retq
+; AVX-LABEL: pmaddubsw_swapped_extend:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX-NEXT:    vpmaddubsw (%rsi), %xmm0, %xmm0
+; AVX-NEXT:    retq
   %A = load <16 x i8>, <16 x i8>* %Aptr
   %B = load <16 x i8>, <16 x i8>* %Bptr
   %A_even = shufflevector <16 x i8> %A, <16 x i8> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
@@ -1852,112 +269,15 @@ define <8 x i16> @pmaddubsw_swapped_exte
 define <8 x i16> @pmaddubsw_commuted_mul(<16 x i8>* %Aptr, <16 x i8>* %Bptr) {
 ; SSE-LABEL: pmaddubsw_commuted_mul:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    movdqa (%rdi), %xmm1
 ; SSE-NEXT:    movdqa (%rsi), %xmm0
-; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
-; SSE-NEXT:    pand %xmm0, %xmm2
-; SSE-NEXT:    movdqa %xmm1, %xmm3
-; SSE-NEXT:    psllw $8, %xmm3
-; SSE-NEXT:    psraw $8, %xmm3
-; SSE-NEXT:    movdqa %xmm2, %xmm4
-; SSE-NEXT:    pmulhw %xmm3, %xmm4
-; SSE-NEXT:    pmullw %xmm2, %xmm3
-; SSE-NEXT:    movdqa %xmm3, %xmm2
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
-; SSE-NEXT:    psrlw $8, %xmm0
-; SSE-NEXT:    psraw $8, %xmm1
-; SSE-NEXT:    movdqa %xmm1, %xmm4
-; SSE-NEXT:    pmulhw %xmm0, %xmm4
-; SSE-NEXT:    pmullw %xmm0, %xmm1
-; SSE-NEXT:    movdqa %xmm1, %xmm0
-; SSE-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
-; SSE-NEXT:    paddd %xmm2, %xmm0
-; SSE-NEXT:    punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
-; SSE-NEXT:    paddd %xmm3, %xmm1
-; SSE-NEXT:    packssdw %xmm1, %xmm0
+; SSE-NEXT:    pmaddubsw (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX1-LABEL: pmaddubsw_commuted_mul:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX1-NEXT:    vmovdqa (%rsi), %xmm1
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = <8,10,12,14,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX1-NEXT:    vpshufb %xmm2, %xmm0, %xmm3
-; AVX1-NEXT:    vpmovsxbd %xmm3, %xmm3
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = <0,2,4,6,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX1-NEXT:    vpshufb %xmm4, %xmm0, %xmm5
-; AVX1-NEXT:    vpmovsxbd %xmm5, %xmm5
-; AVX1-NEXT:    vpshufb %xmm2, %xmm1, %xmm2
-; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
-; AVX1-NEXT:    vpmulld %xmm3, %xmm2, %xmm2
-; AVX1-NEXT:    vpshufb %xmm4, %xmm1, %xmm3
-; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
-; AVX1-NEXT:    vpmulld %xmm5, %xmm3, %xmm3
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = <9,11,13,15,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX1-NEXT:    vpshufb %xmm4, %xmm0, %xmm5
-; AVX1-NEXT:    vpmovsxbd %xmm5, %xmm5
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = <1,3,5,7,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX1-NEXT:    vpshufb %xmm6, %xmm0, %xmm0
-; AVX1-NEXT:    vpmovsxbd %xmm0, %xmm0
-; AVX1-NEXT:    vpshufb %xmm4, %xmm1, %xmm4
-; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero,xmm4[2],zero,zero,zero,xmm4[3],zero,zero,zero
-; AVX1-NEXT:    vpmulld %xmm4, %xmm5, %xmm4
-; AVX1-NEXT:    vpaddd %xmm4, %xmm2, %xmm2
-; AVX1-NEXT:    vpshufb %xmm6, %xmm1, %xmm1
-; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
-; AVX1-NEXT:    vpmulld %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpaddd %xmm0, %xmm3, %xmm0
-; AVX1-NEXT:    vpackssdw %xmm2, %xmm0, %xmm0
-; AVX1-NEXT:    retq
-;
-; AVX2-LABEL: pmaddubsw_commuted_mul:
-; AVX2:       # %bb.0:
-; AVX2-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX2-NEXT:    vmovdqa (%rsi), %xmm1
-; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
-; AVX2-NEXT:    vpshufb %xmm2, %xmm0, %xmm3
-; AVX2-NEXT:    vpmovsxbd %xmm3, %ymm3
-; AVX2-NEXT:    vpshufb %xmm2, %xmm1, %xmm2
-; AVX2-NEXT:    vpmovzxbd {{.*#+}} ymm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero
-; AVX2-NEXT:    vpmulld %ymm3, %ymm2, %ymm2
-; AVX2-NEXT:    vmovdqa {{.*#+}} xmm3 = <1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u>
-; AVX2-NEXT:    vpshufb %xmm3, %xmm0, %xmm0
-; AVX2-NEXT:    vpmovsxbd %xmm0, %ymm0
-; AVX2-NEXT:    vpshufb %xmm3, %xmm1, %xmm1
-; AVX2-NEXT:    vpmovzxbd {{.*#+}} ymm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero
-; AVX2-NEXT:    vpmulld %ymm1, %ymm0, %ymm0
-; AVX2-NEXT:    vpaddd %ymm0, %ymm2, %ymm0
-; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT:    vpackssdw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vzeroupper
-; AVX2-NEXT:    retq
-;
-; AVX512-LABEL: pmaddubsw_commuted_mul:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX512-NEXT:    vmovdqa (%rsi), %xmm1
-; AVX512-NEXT:    vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
-; AVX512-NEXT:    vpshufb %xmm2, %xmm0, %xmm3
-; AVX512-NEXT:    vpmovsxbd %xmm3, %ymm3
-; AVX512-NEXT:    vpshufb %xmm2, %xmm1, %xmm2
-; AVX512-NEXT:    vpmovzxbd {{.*#+}} ymm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero
-; AVX512-NEXT:    vpmulld %ymm3, %ymm2, %ymm2
-; AVX512-NEXT:    vmovdqa {{.*#+}} xmm3 = <1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u>
-; AVX512-NEXT:    vpshufb %xmm3, %xmm0, %xmm0
-; AVX512-NEXT:    vpmovsxbd %xmm0, %ymm0
-; AVX512-NEXT:    vpshufb %xmm3, %xmm1, %xmm1
-; AVX512-NEXT:    vpmovzxbd {{.*#+}} ymm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero
-; AVX512-NEXT:    vpmulld %ymm1, %ymm0, %ymm0
-; AVX512-NEXT:    vpaddd %ymm0, %ymm2, %ymm0
-; AVX512-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [4294934528,4294934528,4294934528,4294934528,4294934528,4294934528,4294934528,4294934528]
-; AVX512-NEXT:    vpmaxsd %ymm1, %ymm0, %ymm0
-; AVX512-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
-; AVX512-NEXT:    vpminsd %ymm1, %ymm0, %ymm0
-; AVX512-NEXT:    vpmovdw %zmm0, %ymm0
-; AVX512-NEXT:    # kill: def $xmm0 killed $xmm0 killed $ymm0
-; AVX512-NEXT:    vzeroupper
-; AVX512-NEXT:    retq
+; AVX-LABEL: pmaddubsw_commuted_mul:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovdqa (%rsi), %xmm0
+; AVX-NEXT:    vpmaddubsw (%rdi), %xmm0, %xmm0
+; AVX-NEXT:    retq
   %A = load <16 x i8>, <16 x i8>* %Aptr
   %B = load <16 x i8>, <16 x i8>* %Bptr
   %A_even = shufflevector <16 x i8> %A, <16 x i8> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>




More information about the llvm-commits mailing list