[llvm] r330520 - [X86] Add DAG combine to turn (trunc (srl (mul ext, ext), 16) into PMULHW/PMULHUW.
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Sat Apr 21 11:39:21 PDT 2018
Author: ctopper
Date: Sat Apr 21 11:39:21 2018
New Revision: 330520
URL: http://llvm.org/viewvc/llvm-project?rev=330520&view=rev
Log:
[X86] Add DAG combine to turn (trunc (srl (mul ext, ext), 16) into PMULHW/PMULHUW.
Ultimately I want to use this to remove the intrinsics for these instructions.
Modified:
llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
llvm/trunk/test/CodeGen/X86/pmulh.ll
Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=330520&r1=330519&r2=330520&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Sat Apr 21 11:39:21 2018
@@ -36102,6 +36102,59 @@ static SDValue detectAddSubSatPattern(SD
AddSubSatBuilder);
}
+// Try to form a MULHU or MULHS node by looking for
+// (trunc (srl (mul ext, ext), 16))
+// TODO: This is X86 specific because we want to be able to handle wide types
+// before type legalization. But we can only do it if the vector will be
+// legalized via widening/splitting. Type legalization can't handle promotion
+// of a MULHU/MULHS. There isn't a way to convey this to the generic DAG
+// combiner.
+static SDValue combinePMULH(SDValue Src, EVT VT, const SDLoc &DL,
+ SelectionDAG &DAG, const X86Subtarget &Subtarget) {
+ // First instruction should be a right shift of a multiply.
+ if (Src.getOpcode() != ISD::SRL ||
+ Src.getOperand(0).getOpcode() != ISD::MUL)
+ return SDValue();
+
+ if (!Subtarget.hasSSE2())
+ return SDValue();
+
+ // Only handle vXi16 types that are at least 128-bits.
+ if (!VT.isVector() || VT.getVectorElementType() != MVT::i16 ||
+ VT.getVectorNumElements() < 8)
+ return SDValue();
+
+ // Input type should be vXi32.
+ EVT InVT = Src.getValueType();
+ if (InVT.getVectorElementType() != MVT::i32)
+ return SDValue();
+
+ // Need a shift by 16.
+ APInt ShiftAmt;
+ if (!ISD::isConstantSplatVector(Src.getOperand(1).getNode(), ShiftAmt) ||
+ ShiftAmt != 16)
+ return SDValue();
+
+ SDValue LHS = Src.getOperand(0).getOperand(0);
+ SDValue RHS = Src.getOperand(0).getOperand(1);
+
+ unsigned ExtOpc = LHS.getOpcode();
+ if ((ExtOpc != ISD::SIGN_EXTEND && ExtOpc != ISD::ZERO_EXTEND) ||
+ RHS.getOpcode() != ExtOpc)
+ return SDValue();
+
+ // Peek through the extends.
+ LHS = LHS.getOperand(0);
+ RHS = RHS.getOperand(0);
+
+ // Ensure the input types match.
+ if (LHS.getValueType() != VT || RHS.getValueType() != VT)
+ return SDValue();
+
+ unsigned Opc = ExtOpc == ISD::SIGN_EXTEND ? ISD::MULHS : ISD::MULHU;
+ return DAG.getNode(Opc, DL, VT, LHS, RHS);
+}
+
static SDValue combineTruncate(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
EVT VT = N->getValueType(0);
@@ -36124,6 +36177,10 @@ static SDValue combineTruncate(SDNode *N
if (SDValue Val = combineTruncateWithSat(Src, VT, DL, DAG, Subtarget))
return Val;
+ // Try to combine PMULHUW/PMULHW for vXi16.
+ if (SDValue V = combinePMULH(Src, VT, DL, DAG, Subtarget))
+ return V;
+
// The bitcast source is a direct mmx result.
// Detect bitcasts between i32 to x86mmx
if (Src.getOpcode() == ISD::BITCAST && VT == MVT::i32) {
Modified: llvm/trunk/test/CodeGen/X86/pmulh.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/pmulh.ll?rev=330520&r1=330519&r2=330520&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/pmulh.ll (original)
+++ llvm/trunk/test/CodeGen/X86/pmulh.ll Sat Apr 21 11:39:21 2018
@@ -86,55 +86,15 @@ define <4 x i16> @mulhw_v4i16(<4 x i16>
}
define <8 x i16> @mulhuw_v8i16(<8 x i16> %a, <8 x i16> %b) {
-; SSE2-LABEL: mulhuw_v8i16:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pmulhuw %xmm1, %xmm0
-; SSE2-NEXT: pxor %xmm1, %xmm1
-; SSE2-NEXT: pxor %xmm2, %xmm2
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE2-NEXT: psrad $16, %xmm2
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSE2-NEXT: psrad $16, %xmm1
-; SSE2-NEXT: packssdw %xmm2, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: mulhuw_v8i16:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; SSE41-NEXT: pmulld %xmm2, %xmm0
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; SSE41-NEXT: pmulld %xmm3, %xmm1
-; SSE41-NEXT: psrld $16, %xmm0
-; SSE41-NEXT: psrld $16, %xmm1
-; SSE41-NEXT: packusdw %xmm1, %xmm0
-; SSE41-NEXT: retq
-;
-; AVX2-LABEL: mulhuw_v8i16:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX2-NEXT: vpmulld %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: mulhuw_v8i16:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX512-NEXT: vpmulld %ymm1, %ymm0, %ymm0
-; AVX512-NEXT: vpsrld $16, %ymm0, %ymm0
-; AVX512-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
+; SSE-LABEL: mulhuw_v8i16:
+; SSE: # %bb.0:
+; SSE-NEXT: pmulhuw %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: mulhuw_v8i16:
+; AVX: # %bb.0:
+; AVX-NEXT: vpmulhuw %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
%a1 = zext <8 x i16> %a to <8 x i32>
%b1 = zext <8 x i16> %b to <8 x i32>
%c = mul <8 x i32> %a1, %b1
@@ -144,55 +104,15 @@ define <8 x i16> @mulhuw_v8i16(<8 x i16>
}
define <8 x i16> @mulhw_v8i16(<8 x i16> %a, <8 x i16> %b) {
-; SSE2-LABEL: mulhw_v8i16:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pmulhw %xmm1, %xmm0
-; SSE2-NEXT: pxor %xmm1, %xmm1
-; SSE2-NEXT: pxor %xmm2, %xmm2
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE2-NEXT: psrad $16, %xmm2
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSE2-NEXT: psrad $16, %xmm1
-; SSE2-NEXT: packssdw %xmm2, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: mulhw_v8i16:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pmovsxwd %xmm0, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; SSE41-NEXT: pmovsxwd %xmm0, %xmm3
-; SSE41-NEXT: pmovsxwd %xmm1, %xmm0
-; SSE41-NEXT: pmulld %xmm2, %xmm0
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; SSE41-NEXT: pmovsxwd %xmm1, %xmm1
-; SSE41-NEXT: pmulld %xmm3, %xmm1
-; SSE41-NEXT: psrld $16, %xmm0
-; SSE41-NEXT: psrld $16, %xmm1
-; SSE41-NEXT: packusdw %xmm1, %xmm0
-; SSE41-NEXT: retq
-;
-; AVX2-LABEL: mulhw_v8i16:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
-; AVX2-NEXT: vpmovsxwd %xmm1, %ymm1
-; AVX2-NEXT: vpmulld %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: mulhw_v8i16:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpmovsxwd %xmm0, %ymm0
-; AVX512-NEXT: vpmovsxwd %xmm1, %ymm1
-; AVX512-NEXT: vpmulld %ymm1, %ymm0, %ymm0
-; AVX512-NEXT: vpsrld $16, %ymm0, %ymm0
-; AVX512-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
+; SSE-LABEL: mulhw_v8i16:
+; SSE: # %bb.0:
+; SSE-NEXT: pmulhw %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: mulhw_v8i16:
+; AVX: # %bb.0:
+; AVX-NEXT: vpmulhw %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
%a1 = sext <8 x i16> %a to <8 x i32>
%b1 = sext <8 x i16> %b to <8 x i32>
%c = mul <8 x i32> %a1, %b1
@@ -202,78 +122,16 @@ define <8 x i16> @mulhw_v8i16(<8 x i16>
}
define <16 x i16> @mulhuw_v16i16(<16 x i16> %a, <16 x i16> %b) {
-; SSE2-LABEL: mulhuw_v16i16:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pmulhuw %xmm3, %xmm1
-; SSE2-NEXT: pmulhuw %xmm2, %xmm0
-; SSE2-NEXT: pxor %xmm2, %xmm2
-; SSE2-NEXT: pxor %xmm4, %xmm4
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
-; SSE2-NEXT: psrad $16, %xmm4
-; SSE2-NEXT: pxor %xmm3, %xmm3
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
-; SSE2-NEXT: psrad $16, %xmm3
-; SSE2-NEXT: packssdw %xmm4, %xmm3
-; SSE2-NEXT: pxor %xmm0, %xmm0
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSE2-NEXT: psrad $16, %xmm0
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; SSE2-NEXT: psrad $16, %xmm2
-; SSE2-NEXT: packssdw %xmm0, %xmm2
-; SSE2-NEXT: movdqa %xmm3, %xmm0
-; SSE2-NEXT: movdqa %xmm2, %xmm1
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: mulhuw_v16i16:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm4 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm5 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm6 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm7 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
-; SSE41-NEXT: pmulld %xmm4, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,0,1]
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; SSE41-NEXT: pmulld %xmm5, %xmm3
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
-; SSE41-NEXT: pmulld %xmm6, %xmm0
-; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
-; SSE41-NEXT: pmulld %xmm7, %xmm2
-; SSE41-NEXT: psrld $16, %xmm1
-; SSE41-NEXT: psrld $16, %xmm3
-; SSE41-NEXT: packusdw %xmm3, %xmm1
-; SSE41-NEXT: psrld $16, %xmm0
-; SSE41-NEXT: psrld $16, %xmm2
-; SSE41-NEXT: packusdw %xmm2, %xmm0
-; SSE41-NEXT: retq
-;
-; AVX2-LABEL: mulhuw_v16i16:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
-; AVX2-NEXT: vpmulld %ymm3, %ymm2, %ymm2
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX2-NEXT: vpmulld %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpsrld $16, %ymm2, %ymm1
-; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
-; AVX2-NEXT: vpackusdw %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: mulhuw_v16i16:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
-; AVX512-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
-; AVX512-NEXT: vpmulld %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vpsrld $16, %zmm0, %zmm0
-; AVX512-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512-NEXT: retq
+; SSE-LABEL: mulhuw_v16i16:
+; SSE: # %bb.0:
+; SSE-NEXT: pmulhuw %xmm2, %xmm0
+; SSE-NEXT: pmulhuw %xmm3, %xmm1
+; SSE-NEXT: retq
+;
+; AVX-LABEL: mulhuw_v16i16:
+; AVX: # %bb.0:
+; AVX-NEXT: vpmulhuw %ymm1, %ymm0, %ymm0
+; AVX-NEXT: retq
%a1 = zext <16 x i16> %a to <16 x i32>
%b1 = zext <16 x i16> %b to <16 x i32>
%c = mul <16 x i32> %a1, %b1
@@ -283,78 +141,16 @@ define <16 x i16> @mulhuw_v16i16(<16 x i
}
define <16 x i16> @mulhw_v16i16(<16 x i16> %a, <16 x i16> %b) {
-; SSE2-LABEL: mulhw_v16i16:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pmulhw %xmm3, %xmm1
-; SSE2-NEXT: pmulhw %xmm2, %xmm0
-; SSE2-NEXT: pxor %xmm2, %xmm2
-; SSE2-NEXT: pxor %xmm4, %xmm4
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
-; SSE2-NEXT: psrad $16, %xmm4
-; SSE2-NEXT: pxor %xmm3, %xmm3
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
-; SSE2-NEXT: psrad $16, %xmm3
-; SSE2-NEXT: packssdw %xmm4, %xmm3
-; SSE2-NEXT: pxor %xmm0, %xmm0
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSE2-NEXT: psrad $16, %xmm0
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; SSE2-NEXT: psrad $16, %xmm2
-; SSE2-NEXT: packssdw %xmm0, %xmm2
-; SSE2-NEXT: movdqa %xmm3, %xmm0
-; SSE2-NEXT: movdqa %xmm2, %xmm1
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: mulhw_v16i16:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pmovsxwd %xmm1, %xmm4
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; SSE41-NEXT: pmovsxwd %xmm1, %xmm5
-; SSE41-NEXT: pmovsxwd %xmm0, %xmm6
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; SSE41-NEXT: pmovsxwd %xmm0, %xmm7
-; SSE41-NEXT: pmovsxwd %xmm3, %xmm1
-; SSE41-NEXT: pmulld %xmm4, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,0,1]
-; SSE41-NEXT: pmovsxwd %xmm0, %xmm3
-; SSE41-NEXT: pmulld %xmm5, %xmm3
-; SSE41-NEXT: pmovsxwd %xmm2, %xmm0
-; SSE41-NEXT: pmulld %xmm6, %xmm0
-; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
-; SSE41-NEXT: pmovsxwd %xmm2, %xmm2
-; SSE41-NEXT: pmulld %xmm7, %xmm2
-; SSE41-NEXT: psrld $16, %xmm1
-; SSE41-NEXT: psrld $16, %xmm3
-; SSE41-NEXT: packusdw %xmm3, %xmm1
-; SSE41-NEXT: psrld $16, %xmm0
-; SSE41-NEXT: psrld $16, %xmm2
-; SSE41-NEXT: packusdw %xmm2, %xmm0
-; SSE41-NEXT: retq
-;
-; AVX2-LABEL: mulhw_v16i16:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
-; AVX2-NEXT: vpmovsxwd %xmm2, %ymm2
-; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
-; AVX2-NEXT: vpmovsxwd %xmm3, %ymm3
-; AVX2-NEXT: vpmulld %ymm3, %ymm2, %ymm2
-; AVX2-NEXT: vpmovsxwd %xmm1, %ymm1
-; AVX2-NEXT: vpmulld %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpsrld $16, %ymm2, %ymm1
-; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
-; AVX2-NEXT: vpackusdw %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: mulhw_v16i16:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpmovsxwd %ymm0, %zmm0
-; AVX512-NEXT: vpmovsxwd %ymm1, %zmm1
-; AVX512-NEXT: vpmulld %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vpsrld $16, %zmm0, %zmm0
-; AVX512-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512-NEXT: retq
+; SSE-LABEL: mulhw_v16i16:
+; SSE: # %bb.0:
+; SSE-NEXT: pmulhw %xmm2, %xmm0
+; SSE-NEXT: pmulhw %xmm3, %xmm1
+; SSE-NEXT: retq
+;
+; AVX-LABEL: mulhw_v16i16:
+; AVX: # %bb.0:
+; AVX-NEXT: vpmulhw %ymm1, %ymm0, %ymm0
+; AVX-NEXT: retq
%a1 = sext <16 x i16> %a to <16 x i32>
%b1 = sext <16 x i16> %b to <16 x i32>
%c = mul <16 x i32> %a1, %b1
@@ -364,157 +160,29 @@ define <16 x i16> @mulhw_v16i16(<16 x i1
}
define <32 x i16> @mulhuw_v32i16(<32 x i16> %a, <32 x i16> %b) {
-; SSE2-LABEL: mulhuw_v32i16:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm0, %xmm8
-; SSE2-NEXT: pmulhuw %xmm7, %xmm3
-; SSE2-NEXT: pmulhuw %xmm6, %xmm2
-; SSE2-NEXT: pmulhuw %xmm5, %xmm1
-; SSE2-NEXT: pmulhuw %xmm4, %xmm8
-; SSE2-NEXT: pxor %xmm4, %xmm4
-; SSE2-NEXT: pxor %xmm5, %xmm5
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm8[4],xmm5[5],xmm8[5],xmm5[6],xmm8[6],xmm5[7],xmm8[7]
-; SSE2-NEXT: psrad $16, %xmm5
-; SSE2-NEXT: pxor %xmm0, %xmm0
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1],xmm0[2],xmm8[2],xmm0[3],xmm8[3]
-; SSE2-NEXT: psrad $16, %xmm0
-; SSE2-NEXT: packssdw %xmm5, %xmm0
-; SSE2-NEXT: pxor %xmm6, %xmm6
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm1[4],xmm6[5],xmm1[5],xmm6[6],xmm1[6],xmm6[7],xmm1[7]
-; SSE2-NEXT: psrad $16, %xmm6
-; SSE2-NEXT: pxor %xmm5, %xmm5
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3]
-; SSE2-NEXT: psrad $16, %xmm5
-; SSE2-NEXT: packssdw %xmm6, %xmm5
-; SSE2-NEXT: pxor %xmm1, %xmm1
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
-; SSE2-NEXT: psrad $16, %xmm1
-; SSE2-NEXT: pxor %xmm6, %xmm6
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3]
-; SSE2-NEXT: psrad $16, %xmm6
-; SSE2-NEXT: packssdw %xmm1, %xmm6
-; SSE2-NEXT: pxor %xmm1, %xmm1
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
-; SSE2-NEXT: psrad $16, %xmm1
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
-; SSE2-NEXT: psrad $16, %xmm4
-; SSE2-NEXT: packssdw %xmm1, %xmm4
-; SSE2-NEXT: movdqa %xmm5, %xmm1
-; SSE2-NEXT: movdqa %xmm6, %xmm2
-; SSE2-NEXT: movdqa %xmm4, %xmm3
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: mulhuw_v32i16:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm8 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
-; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,0,1]
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm9 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm10 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
-; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm11 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm12 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm13 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm14 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm15 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm3 = xmm7[0],zero,xmm7[1],zero,xmm7[2],zero,xmm7[3],zero
-; SSE41-NEXT: pmulld %xmm8, %xmm3
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm7[2,3,0,1]
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm7 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; SSE41-NEXT: pmulld %xmm9, %xmm7
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm2 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero
-; SSE41-NEXT: pmulld %xmm10, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm6[2,3,0,1]
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm6 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; SSE41-NEXT: pmulld %xmm11, %xmm6
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero
-; SSE41-NEXT: pmulld %xmm12, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm5[2,3,0,1]
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; SSE41-NEXT: pmulld %xmm13, %xmm5
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero
-; SSE41-NEXT: pmulld %xmm14, %xmm0
-; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,3,0,1]
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero
-; SSE41-NEXT: pmulld %xmm15, %xmm4
-; SSE41-NEXT: psrld $16, %xmm3
-; SSE41-NEXT: psrld $16, %xmm7
-; SSE41-NEXT: packusdw %xmm7, %xmm3
-; SSE41-NEXT: psrld $16, %xmm2
-; SSE41-NEXT: psrld $16, %xmm6
-; SSE41-NEXT: packusdw %xmm6, %xmm2
-; SSE41-NEXT: psrld $16, %xmm1
-; SSE41-NEXT: psrld $16, %xmm5
-; SSE41-NEXT: packusdw %xmm5, %xmm1
-; SSE41-NEXT: psrld $16, %xmm0
-; SSE41-NEXT: psrld $16, %xmm4
-; SSE41-NEXT: packusdw %xmm4, %xmm0
-; SSE41-NEXT: retq
+; SSE-LABEL: mulhuw_v32i16:
+; SSE: # %bb.0:
+; SSE-NEXT: pmulhuw %xmm4, %xmm0
+; SSE-NEXT: pmulhuw %xmm5, %xmm1
+; SSE-NEXT: pmulhuw %xmm6, %xmm2
+; SSE-NEXT: pmulhuw %xmm7, %xmm3
+; SSE-NEXT: retq
;
; AVX2-LABEL: mulhuw_v32i16:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm4 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm5 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm6 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
-; AVX2-NEXT: vpmulld %ymm6, %ymm4, %ymm4
-; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm3
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
-; AVX2-NEXT: vpmulld %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; AVX2-NEXT: vpmulld %ymm3, %ymm5, %ymm3
-; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm2
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; AVX2-NEXT: vpmulld %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpsrld $16, %ymm4, %ymm2
-; AVX2-NEXT: vpsrld $16, %ymm1, %ymm1
-; AVX2-NEXT: vpsrld $16, %ymm3, %ymm3
-; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm4
-; AVX2-NEXT: vpackusdw %xmm4, %xmm0, %xmm0
-; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm4
-; AVX2-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
-; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm3, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
-; AVX2-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
-; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3
-; AVX2-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
-; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
+; AVX2-NEXT: vpmulhuw %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpmulhuw %ymm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512F-LABEL: mulhuw_v32i16:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
-; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
-; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm3 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero
-; AVX512F-NEXT: vpmulld %zmm3, %zmm1, %zmm1
-; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
-; AVX512F-NEXT: vpmulld %zmm2, %zmm0, %zmm0
-; AVX512F-NEXT: vpsrld $16, %zmm1, %zmm1
-; AVX512F-NEXT: vpsrld $16, %zmm0, %zmm0
-; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512F-NEXT: vpmovdw %zmm1, %ymm1
+; AVX512F-NEXT: vpmulhuw %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT: vpmulhuw %ymm3, %ymm1, %ymm1
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: mulhuw_v32i16:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm2
-; AVX512BW-NEXT: vpmovzxwd {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
-; AVX512BW-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
-; AVX512BW-NEXT: vextracti64x4 $1, %zmm1, %ymm3
-; AVX512BW-NEXT: vpmovzxwd {{.*#+}} zmm3 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero
-; AVX512BW-NEXT: vpmulld %zmm3, %zmm2, %zmm2
-; AVX512BW-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
-; AVX512BW-NEXT: vpmulld %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vpsrld $16, %zmm2, %zmm1
-; AVX512BW-NEXT: vpsrld $16, %zmm0, %zmm0
-; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512BW-NEXT: vpmovdw %zmm1, %ymm1
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpmulhuw %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
%a1 = zext <32 x i16> %a to <32 x i32>
%b1 = zext <32 x i16> %b to <32 x i32>
@@ -525,157 +193,29 @@ define <32 x i16> @mulhuw_v32i16(<32 x i
}
define <32 x i16> @mulhw_v32i16(<32 x i16> %a, <32 x i16> %b) {
-; SSE2-LABEL: mulhw_v32i16:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm0, %xmm8
-; SSE2-NEXT: pmulhw %xmm7, %xmm3
-; SSE2-NEXT: pmulhw %xmm6, %xmm2
-; SSE2-NEXT: pmulhw %xmm5, %xmm1
-; SSE2-NEXT: pmulhw %xmm4, %xmm8
-; SSE2-NEXT: pxor %xmm4, %xmm4
-; SSE2-NEXT: pxor %xmm5, %xmm5
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm8[4],xmm5[5],xmm8[5],xmm5[6],xmm8[6],xmm5[7],xmm8[7]
-; SSE2-NEXT: psrad $16, %xmm5
-; SSE2-NEXT: pxor %xmm0, %xmm0
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1],xmm0[2],xmm8[2],xmm0[3],xmm8[3]
-; SSE2-NEXT: psrad $16, %xmm0
-; SSE2-NEXT: packssdw %xmm5, %xmm0
-; SSE2-NEXT: pxor %xmm6, %xmm6
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm1[4],xmm6[5],xmm1[5],xmm6[6],xmm1[6],xmm6[7],xmm1[7]
-; SSE2-NEXT: psrad $16, %xmm6
-; SSE2-NEXT: pxor %xmm5, %xmm5
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3]
-; SSE2-NEXT: psrad $16, %xmm5
-; SSE2-NEXT: packssdw %xmm6, %xmm5
-; SSE2-NEXT: pxor %xmm1, %xmm1
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
-; SSE2-NEXT: psrad $16, %xmm1
-; SSE2-NEXT: pxor %xmm6, %xmm6
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3]
-; SSE2-NEXT: psrad $16, %xmm6
-; SSE2-NEXT: packssdw %xmm1, %xmm6
-; SSE2-NEXT: pxor %xmm1, %xmm1
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
-; SSE2-NEXT: psrad $16, %xmm1
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
-; SSE2-NEXT: psrad $16, %xmm4
-; SSE2-NEXT: packssdw %xmm1, %xmm4
-; SSE2-NEXT: movdqa %xmm5, %xmm1
-; SSE2-NEXT: movdqa %xmm6, %xmm2
-; SSE2-NEXT: movdqa %xmm4, %xmm3
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: mulhw_v32i16:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pmovsxwd %xmm3, %xmm8
-; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,0,1]
-; SSE41-NEXT: pmovsxwd %xmm3, %xmm9
-; SSE41-NEXT: pmovsxwd %xmm2, %xmm10
-; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
-; SSE41-NEXT: pmovsxwd %xmm2, %xmm11
-; SSE41-NEXT: pmovsxwd %xmm1, %xmm12
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; SSE41-NEXT: pmovsxwd %xmm1, %xmm13
-; SSE41-NEXT: pmovsxwd %xmm0, %xmm14
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; SSE41-NEXT: pmovsxwd %xmm0, %xmm15
-; SSE41-NEXT: pmovsxwd %xmm7, %xmm3
-; SSE41-NEXT: pmulld %xmm8, %xmm3
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm7[2,3,0,1]
-; SSE41-NEXT: pmovsxwd %xmm0, %xmm7
-; SSE41-NEXT: pmulld %xmm9, %xmm7
-; SSE41-NEXT: pmovsxwd %xmm6, %xmm2
-; SSE41-NEXT: pmulld %xmm10, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm6[2,3,0,1]
-; SSE41-NEXT: pmovsxwd %xmm0, %xmm6
-; SSE41-NEXT: pmulld %xmm11, %xmm6
-; SSE41-NEXT: pmovsxwd %xmm5, %xmm1
-; SSE41-NEXT: pmulld %xmm12, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm5[2,3,0,1]
-; SSE41-NEXT: pmovsxwd %xmm0, %xmm5
-; SSE41-NEXT: pmulld %xmm13, %xmm5
-; SSE41-NEXT: pmovsxwd %xmm4, %xmm0
-; SSE41-NEXT: pmulld %xmm14, %xmm0
-; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,3,0,1]
-; SSE41-NEXT: pmovsxwd %xmm4, %xmm4
-; SSE41-NEXT: pmulld %xmm15, %xmm4
-; SSE41-NEXT: psrld $16, %xmm3
-; SSE41-NEXT: psrld $16, %xmm7
-; SSE41-NEXT: packusdw %xmm7, %xmm3
-; SSE41-NEXT: psrld $16, %xmm2
-; SSE41-NEXT: psrld $16, %xmm6
-; SSE41-NEXT: packusdw %xmm6, %xmm2
-; SSE41-NEXT: psrld $16, %xmm1
-; SSE41-NEXT: psrld $16, %xmm5
-; SSE41-NEXT: packusdw %xmm5, %xmm1
-; SSE41-NEXT: psrld $16, %xmm0
-; SSE41-NEXT: psrld $16, %xmm4
-; SSE41-NEXT: packusdw %xmm4, %xmm0
-; SSE41-NEXT: retq
+; SSE-LABEL: mulhw_v32i16:
+; SSE: # %bb.0:
+; SSE-NEXT: pmulhw %xmm4, %xmm0
+; SSE-NEXT: pmulhw %xmm5, %xmm1
+; SSE-NEXT: pmulhw %xmm6, %xmm2
+; SSE-NEXT: pmulhw %xmm7, %xmm3
+; SSE-NEXT: retq
;
; AVX2-LABEL: mulhw_v32i16:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpmovsxwd %xmm1, %ymm4
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1
-; AVX2-NEXT: vpmovsxwd %xmm1, %ymm1
-; AVX2-NEXT: vpmovsxwd %xmm0, %ymm5
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
-; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
-; AVX2-NEXT: vpmovsxwd %xmm3, %ymm6
-; AVX2-NEXT: vpmulld %ymm6, %ymm4, %ymm4
-; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm3
-; AVX2-NEXT: vpmovsxwd %xmm3, %ymm3
-; AVX2-NEXT: vpmulld %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vpmovsxwd %xmm2, %ymm3
-; AVX2-NEXT: vpmulld %ymm3, %ymm5, %ymm3
-; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm2
-; AVX2-NEXT: vpmovsxwd %xmm2, %ymm2
-; AVX2-NEXT: vpmulld %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpsrld $16, %ymm4, %ymm2
-; AVX2-NEXT: vpsrld $16, %ymm1, %ymm1
-; AVX2-NEXT: vpsrld $16, %ymm3, %ymm3
-; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm4
-; AVX2-NEXT: vpackusdw %xmm4, %xmm0, %xmm0
-; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm4
-; AVX2-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
-; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm3, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
-; AVX2-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
-; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3
-; AVX2-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
-; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
+; AVX2-NEXT: vpmulhw %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpmulhw %ymm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512F-LABEL: mulhw_v32i16:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: vpmovsxwd %ymm1, %zmm1
-; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
-; AVX512F-NEXT: vpmovsxwd %ymm3, %zmm3
-; AVX512F-NEXT: vpmulld %zmm3, %zmm1, %zmm1
-; AVX512F-NEXT: vpmovsxwd %ymm2, %zmm2
-; AVX512F-NEXT: vpmulld %zmm2, %zmm0, %zmm0
-; AVX512F-NEXT: vpsrld $16, %zmm1, %zmm1
-; AVX512F-NEXT: vpsrld $16, %zmm0, %zmm0
-; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512F-NEXT: vpmovdw %zmm1, %ymm1
+; AVX512F-NEXT: vpmulhw %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT: vpmulhw %ymm3, %ymm1, %ymm1
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: mulhw_v32i16:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm2
-; AVX512BW-NEXT: vpmovsxwd %ymm2, %zmm2
-; AVX512BW-NEXT: vpmovsxwd %ymm0, %zmm0
-; AVX512BW-NEXT: vextracti64x4 $1, %zmm1, %ymm3
-; AVX512BW-NEXT: vpmovsxwd %ymm3, %zmm3
-; AVX512BW-NEXT: vpmulld %zmm3, %zmm2, %zmm2
-; AVX512BW-NEXT: vpmovsxwd %ymm1, %zmm1
-; AVX512BW-NEXT: vpmulld %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vpsrld $16, %zmm2, %zmm1
-; AVX512BW-NEXT: vpsrld $16, %zmm0, %zmm0
-; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512BW-NEXT: vpmovdw %zmm1, %ymm1
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpmulhw %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
%a1 = sext <32 x i16> %a to <32 x i32>
%b1 = sext <32 x i16> %b to <32 x i32>
@@ -686,311 +226,47 @@ define <32 x i16> @mulhw_v32i16(<32 x i1
}
define <64 x i16> @mulhuw_v64i16(<64 x i16> %a, <64 x i16> %b) {
-; SSE2-LABEL: mulhuw_v64i16:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pmulhuw {{[0-9]+}}(%rsp), %xmm7
-; SSE2-NEXT: pmulhuw {{[0-9]+}}(%rsp), %xmm6
-; SSE2-NEXT: pmulhuw {{[0-9]+}}(%rsp), %xmm5
-; SSE2-NEXT: pmulhuw {{[0-9]+}}(%rsp), %xmm4
-; SSE2-NEXT: pmulhuw {{[0-9]+}}(%rsp), %xmm3
-; SSE2-NEXT: pmulhuw {{[0-9]+}}(%rsp), %xmm2
-; SSE2-NEXT: pmulhuw {{[0-9]+}}(%rsp), %xmm1
-; SSE2-NEXT: pmulhuw {{[0-9]+}}(%rsp), %xmm0
-; SSE2-NEXT: pxor %xmm9, %xmm9
-; SSE2-NEXT: pxor %xmm10, %xmm10
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm0[4],xmm10[5],xmm0[5],xmm10[6],xmm0[6],xmm10[7],xmm0[7]
-; SSE2-NEXT: psrad $16, %xmm10
-; SSE2-NEXT: pxor %xmm8, %xmm8
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm0[0],xmm8[1],xmm0[1],xmm8[2],xmm0[2],xmm8[3],xmm0[3]
-; SSE2-NEXT: psrad $16, %xmm8
-; SSE2-NEXT: packssdw %xmm10, %xmm8
-; SSE2-NEXT: pxor %xmm0, %xmm0
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSE2-NEXT: psrad $16, %xmm0
-; SSE2-NEXT: pxor %xmm10, %xmm10
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm1[0],xmm10[1],xmm1[1],xmm10[2],xmm1[2],xmm10[3],xmm1[3]
-; SSE2-NEXT: psrad $16, %xmm10
-; SSE2-NEXT: packssdw %xmm0, %xmm10
-; SSE2-NEXT: pxor %xmm0, %xmm0
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
-; SSE2-NEXT: psrad $16, %xmm0
-; SSE2-NEXT: pxor %xmm1, %xmm1
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; SSE2-NEXT: psrad $16, %xmm1
-; SSE2-NEXT: packssdw %xmm0, %xmm1
-; SSE2-NEXT: pxor %xmm0, %xmm0
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
-; SSE2-NEXT: psrad $16, %xmm0
-; SSE2-NEXT: pxor %xmm2, %xmm2
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
-; SSE2-NEXT: psrad $16, %xmm2
-; SSE2-NEXT: packssdw %xmm0, %xmm2
-; SSE2-NEXT: pxor %xmm0, %xmm0
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
-; SSE2-NEXT: psrad $16, %xmm0
-; SSE2-NEXT: pxor %xmm3, %xmm3
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
-; SSE2-NEXT: psrad $16, %xmm3
-; SSE2-NEXT: packssdw %xmm0, %xmm3
-; SSE2-NEXT: pxor %xmm0, %xmm0
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7]
-; SSE2-NEXT: psrad $16, %xmm0
-; SSE2-NEXT: pxor %xmm4, %xmm4
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
-; SSE2-NEXT: psrad $16, %xmm4
-; SSE2-NEXT: packssdw %xmm0, %xmm4
-; SSE2-NEXT: pxor %xmm0, %xmm0
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm6[4],xmm0[5],xmm6[5],xmm0[6],xmm6[6],xmm0[7],xmm6[7]
-; SSE2-NEXT: psrad $16, %xmm0
-; SSE2-NEXT: pxor %xmm5, %xmm5
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3]
-; SSE2-NEXT: psrad $16, %xmm5
-; SSE2-NEXT: packssdw %xmm0, %xmm5
-; SSE2-NEXT: pxor %xmm0, %xmm0
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7]
-; SSE2-NEXT: psrad $16, %xmm0
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm7[0],xmm9[1],xmm7[1],xmm9[2],xmm7[2],xmm9[3],xmm7[3]
-; SSE2-NEXT: psrad $16, %xmm9
-; SSE2-NEXT: packssdw %xmm0, %xmm9
-; SSE2-NEXT: movdqa %xmm9, 112(%rdi)
-; SSE2-NEXT: movdqa %xmm5, 96(%rdi)
-; SSE2-NEXT: movdqa %xmm4, 80(%rdi)
-; SSE2-NEXT: movdqa %xmm3, 64(%rdi)
-; SSE2-NEXT: movdqa %xmm2, 48(%rdi)
-; SSE2-NEXT: movdqa %xmm1, 32(%rdi)
-; SSE2-NEXT: movdqa %xmm10, 16(%rdi)
-; SSE2-NEXT: movdqa %xmm8, (%rdi)
-; SSE2-NEXT: movq %rdi, %rax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: mulhuw_v64i16:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE41-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm9
-; SSE41-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm10
-; SSE41-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm15
-; SSE41-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm13
-; SSE41-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm12
-; SSE41-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm11
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm14 = xmm7[0],zero,xmm7[1],zero,xmm7[2],zero,xmm7[3],zero
-; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm7[2,3,0,1]
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm8 = xmm7[0],zero,xmm7[1],zero,xmm7[2],zero,xmm7[3],zero
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm7 = xmm11[0],zero,xmm11[1],zero,xmm11[2],zero,xmm11[3],zero
-; SSE41-NEXT: pmulld %xmm14, %xmm7
-; SSE41-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero
-; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm6[2,3,0,1]
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero
-; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm11[2,3,0,1]
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm14 = xmm7[0],zero,xmm7[1],zero,xmm7[2],zero,xmm7[3],zero
-; SSE41-NEXT: pmulld %xmm8, %xmm14
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm11 = xmm12[0],zero,xmm12[1],zero,xmm12[2],zero,xmm12[3],zero
-; SSE41-NEXT: pmulld %xmm1, %xmm11
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm7 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm5[2,3,0,1]
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm12[2,3,0,1]
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero
-; SSE41-NEXT: pmulld %xmm6, %xmm5
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm12 = xmm13[0],zero,xmm13[1],zero,xmm13[2],zero,xmm13[3],zero
-; SSE41-NEXT: pmulld %xmm7, %xmm12
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm6 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero
-; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,3,0,1]
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm8 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero
-; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm13[2,3,0,1]
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero
-; SSE41-NEXT: pmulld %xmm1, %xmm4
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm13 = xmm15[0],zero,xmm15[1],zero,xmm15[2],zero,xmm15[3],zero
-; SSE41-NEXT: pmulld %xmm6, %xmm13
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm7 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm3[2,3,0,1]
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm15[2,3,0,1]
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm6 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
-; SSE41-NEXT: pmulld %xmm8, %xmm6
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm15 = xmm10[0],zero,xmm10[1],zero,xmm10[2],zero,xmm10[3],zero
-; SSE41-NEXT: pmulld %xmm7, %xmm15
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm8 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
-; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm10[2,3,0,1]
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm10 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
-; SSE41-NEXT: pmulld %xmm1, %xmm10
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm7 = xmm9[0],zero,xmm9[1],zero,xmm9[2],zero,xmm9[3],zero
-; SSE41-NEXT: pmulld %xmm8, %xmm7
-; SSE41-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,3,0,1]
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm9[2,3,0,1]
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm9 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
-; SSE41-NEXT: pmulld %xmm1, %xmm9
-; SSE41-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm2 = xmm8[0],zero,xmm8[1],zero,xmm8[2],zero,xmm8[3],zero
-; SSE41-NEXT: pmulld %xmm1, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm3[2,3,0,1]
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm8[2,3,0,1]
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
-; SSE41-NEXT: pmulld %xmm1, %xmm3
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm8 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
-; SSE41-NEXT: pmulld %xmm8, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm8 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = mem[2,3,0,1]
-; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; SSE41-NEXT: pmulld %xmm8, %xmm0
-; SSE41-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
-; SSE41-NEXT: psrld $16, %xmm8
-; SSE41-NEXT: psrld $16, %xmm14
-; SSE41-NEXT: packusdw %xmm14, %xmm8
-; SSE41-NEXT: psrld $16, %xmm11
-; SSE41-NEXT: psrld $16, %xmm5
-; SSE41-NEXT: packusdw %xmm5, %xmm11
-; SSE41-NEXT: psrld $16, %xmm12
-; SSE41-NEXT: psrld $16, %xmm4
-; SSE41-NEXT: packusdw %xmm4, %xmm12
-; SSE41-NEXT: psrld $16, %xmm13
-; SSE41-NEXT: psrld $16, %xmm6
-; SSE41-NEXT: packusdw %xmm6, %xmm13
-; SSE41-NEXT: psrld $16, %xmm15
-; SSE41-NEXT: psrld $16, %xmm10
-; SSE41-NEXT: packusdw %xmm10, %xmm15
-; SSE41-NEXT: psrld $16, %xmm7
-; SSE41-NEXT: psrld $16, %xmm9
-; SSE41-NEXT: packusdw %xmm9, %xmm7
-; SSE41-NEXT: psrld $16, %xmm2
-; SSE41-NEXT: psrld $16, %xmm3
-; SSE41-NEXT: packusdw %xmm3, %xmm2
-; SSE41-NEXT: psrld $16, %xmm1
-; SSE41-NEXT: psrld $16, %xmm0
-; SSE41-NEXT: packusdw %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm8, 112(%rdi)
-; SSE41-NEXT: movdqa %xmm11, 96(%rdi)
-; SSE41-NEXT: movdqa %xmm12, 80(%rdi)
-; SSE41-NEXT: movdqa %xmm13, 64(%rdi)
-; SSE41-NEXT: movdqa %xmm15, 48(%rdi)
-; SSE41-NEXT: movdqa %xmm7, 32(%rdi)
-; SSE41-NEXT: movdqa %xmm2, 16(%rdi)
-; SSE41-NEXT: movdqa %xmm1, (%rdi)
-; SSE41-NEXT: movq %rdi, %rax
-; SSE41-NEXT: retq
+; SSE-LABEL: mulhuw_v64i16:
+; SSE: # %bb.0:
+; SSE-NEXT: pmulhuw {{[0-9]+}}(%rsp), %xmm0
+; SSE-NEXT: pmulhuw {{[0-9]+}}(%rsp), %xmm1
+; SSE-NEXT: pmulhuw {{[0-9]+}}(%rsp), %xmm2
+; SSE-NEXT: pmulhuw {{[0-9]+}}(%rsp), %xmm3
+; SSE-NEXT: pmulhuw {{[0-9]+}}(%rsp), %xmm4
+; SSE-NEXT: pmulhuw {{[0-9]+}}(%rsp), %xmm5
+; SSE-NEXT: pmulhuw {{[0-9]+}}(%rsp), %xmm6
+; SSE-NEXT: pmulhuw {{[0-9]+}}(%rsp), %xmm7
+; SSE-NEXT: movdqa %xmm7, 112(%rdi)
+; SSE-NEXT: movdqa %xmm6, 96(%rdi)
+; SSE-NEXT: movdqa %xmm5, 80(%rdi)
+; SSE-NEXT: movdqa %xmm4, 64(%rdi)
+; SSE-NEXT: movdqa %xmm3, 48(%rdi)
+; SSE-NEXT: movdqa %xmm2, 32(%rdi)
+; SSE-NEXT: movdqa %xmm1, 16(%rdi)
+; SSE-NEXT: movdqa %xmm0, (%rdi)
+; SSE-NEXT: movq %rdi, %rax
+; SSE-NEXT: retq
;
; AVX2-LABEL: mulhuw_v64i16:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm8 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
-; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm3
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm9 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm2
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm10 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm11 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm12 = xmm7[0],zero,xmm7[1],zero,xmm7[2],zero,xmm7[3],zero,xmm7[4],zero,xmm7[5],zero,xmm7[6],zero,xmm7[7],zero
-; AVX2-NEXT: vpmulld %ymm12, %ymm8, %ymm8
-; AVX2-NEXT: vextracti128 $1, %ymm7, %xmm7
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm7 = xmm7[0],zero,xmm7[1],zero,xmm7[2],zero,xmm7[3],zero,xmm7[4],zero,xmm7[5],zero,xmm7[6],zero,xmm7[7],zero
-; AVX2-NEXT: vpmulld %ymm7, %ymm3, %ymm7
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm3 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero,xmm6[4],zero,xmm6[5],zero,xmm6[6],zero,xmm6[7],zero
-; AVX2-NEXT: vpmulld %ymm3, %ymm9, %ymm9
-; AVX2-NEXT: vextracti128 $1, %ymm6, %xmm3
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
-; AVX2-NEXT: vpmulld %ymm3, %ymm2, %ymm2
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm3 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
-; AVX2-NEXT: vpmulld %ymm3, %ymm10, %ymm6
-; AVX2-NEXT: vextracti128 $1, %ymm5, %xmm3
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
-; AVX2-NEXT: vpmulld %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm3 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
-; AVX2-NEXT: vpmulld %ymm3, %ymm11, %ymm5
-; AVX2-NEXT: vextracti128 $1, %ymm4, %xmm3
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
-; AVX2-NEXT: vpmulld %ymm3, %ymm0, %ymm0
-; AVX2-NEXT: vpsrld $16, %ymm8, %ymm8
-; AVX2-NEXT: vpsrld $16, %ymm7, %ymm4
-; AVX2-NEXT: vpsrld $16, %ymm9, %ymm7
-; AVX2-NEXT: vpsrld $16, %ymm2, %ymm2
-; AVX2-NEXT: vpsrld $16, %ymm6, %ymm6
-; AVX2-NEXT: vpsrld $16, %ymm1, %ymm1
-; AVX2-NEXT: vpsrld $16, %ymm5, %ymm5
-; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm3
-; AVX2-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
-; AVX2-NEXT: vextracti128 $1, %ymm5, %xmm3
-; AVX2-NEXT: vpackusdw %xmm3, %xmm5, %xmm3
-; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm3, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
-; AVX2-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
-; AVX2-NEXT: vextracti128 $1, %ymm6, %xmm3
-; AVX2-NEXT: vpackusdw %xmm3, %xmm6, %xmm3
-; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm3, %ymm1
-; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3
-; AVX2-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
-; AVX2-NEXT: vextracti128 $1, %ymm7, %xmm3
-; AVX2-NEXT: vpackusdw %xmm3, %xmm7, %xmm3
-; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm3, %ymm2
-; AVX2-NEXT: vextracti128 $1, %ymm4, %xmm3
-; AVX2-NEXT: vpackusdw %xmm3, %xmm4, %xmm3
-; AVX2-NEXT: vextracti128 $1, %ymm8, %xmm4
-; AVX2-NEXT: vpackusdw %xmm4, %xmm8, %xmm4
-; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm4, %ymm3
+; AVX2-NEXT: vpmulhuw %ymm4, %ymm0, %ymm0
+; AVX2-NEXT: vpmulhuw %ymm5, %ymm1, %ymm1
+; AVX2-NEXT: vpmulhuw %ymm6, %ymm2, %ymm2
+; AVX2-NEXT: vpmulhuw %ymm7, %ymm3, %ymm3
; AVX2-NEXT: retq
;
; AVX512F-LABEL: mulhuw_v64i16:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm3 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero
-; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
-; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
-; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
-; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm7 = ymm7[0],zero,ymm7[1],zero,ymm7[2],zero,ymm7[3],zero,ymm7[4],zero,ymm7[5],zero,ymm7[6],zero,ymm7[7],zero,ymm7[8],zero,ymm7[9],zero,ymm7[10],zero,ymm7[11],zero,ymm7[12],zero,ymm7[13],zero,ymm7[14],zero,ymm7[15],zero
-; AVX512F-NEXT: vpmulld %zmm7, %zmm3, %zmm3
-; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm6 = ymm6[0],zero,ymm6[1],zero,ymm6[2],zero,ymm6[3],zero,ymm6[4],zero,ymm6[5],zero,ymm6[6],zero,ymm6[7],zero,ymm6[8],zero,ymm6[9],zero,ymm6[10],zero,ymm6[11],zero,ymm6[12],zero,ymm6[13],zero,ymm6[14],zero,ymm6[15],zero
-; AVX512F-NEXT: vpmulld %zmm6, %zmm2, %zmm2
-; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm5 = ymm5[0],zero,ymm5[1],zero,ymm5[2],zero,ymm5[3],zero,ymm5[4],zero,ymm5[5],zero,ymm5[6],zero,ymm5[7],zero,ymm5[8],zero,ymm5[9],zero,ymm5[10],zero,ymm5[11],zero,ymm5[12],zero,ymm5[13],zero,ymm5[14],zero,ymm5[15],zero
-; AVX512F-NEXT: vpmulld %zmm5, %zmm1, %zmm1
-; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm4 = ymm4[0],zero,ymm4[1],zero,ymm4[2],zero,ymm4[3],zero,ymm4[4],zero,ymm4[5],zero,ymm4[6],zero,ymm4[7],zero,ymm4[8],zero,ymm4[9],zero,ymm4[10],zero,ymm4[11],zero,ymm4[12],zero,ymm4[13],zero,ymm4[14],zero,ymm4[15],zero
-; AVX512F-NEXT: vpmulld %zmm4, %zmm0, %zmm0
-; AVX512F-NEXT: vpsrld $16, %zmm3, %zmm3
-; AVX512F-NEXT: vpsrld $16, %zmm2, %zmm2
-; AVX512F-NEXT: vpsrld $16, %zmm1, %zmm1
-; AVX512F-NEXT: vpsrld $16, %zmm0, %zmm0
-; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512F-NEXT: vpmovdw %zmm1, %ymm1
-; AVX512F-NEXT: vpmovdw %zmm2, %ymm2
-; AVX512F-NEXT: vpmovdw %zmm3, %ymm3
+; AVX512F-NEXT: vpmulhuw %ymm4, %ymm0, %ymm0
+; AVX512F-NEXT: vpmulhuw %ymm5, %ymm1, %ymm1
+; AVX512F-NEXT: vpmulhuw %ymm6, %ymm2, %ymm2
+; AVX512F-NEXT: vpmulhuw %ymm7, %ymm3, %ymm3
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: mulhuw_v64i16:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vextracti64x4 $1, %zmm1, %ymm4
-; AVX512BW-NEXT: vpmovzxwd {{.*#+}} zmm4 = ymm4[0],zero,ymm4[1],zero,ymm4[2],zero,ymm4[3],zero,ymm4[4],zero,ymm4[5],zero,ymm4[6],zero,ymm4[7],zero,ymm4[8],zero,ymm4[9],zero,ymm4[10],zero,ymm4[11],zero,ymm4[12],zero,ymm4[13],zero,ymm4[14],zero,ymm4[15],zero
-; AVX512BW-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
-; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm5
-; AVX512BW-NEXT: vpmovzxwd {{.*#+}} zmm5 = ymm5[0],zero,ymm5[1],zero,ymm5[2],zero,ymm5[3],zero,ymm5[4],zero,ymm5[5],zero,ymm5[6],zero,ymm5[7],zero,ymm5[8],zero,ymm5[9],zero,ymm5[10],zero,ymm5[11],zero,ymm5[12],zero,ymm5[13],zero,ymm5[14],zero,ymm5[15],zero
-; AVX512BW-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
-; AVX512BW-NEXT: vextracti64x4 $1, %zmm3, %ymm6
-; AVX512BW-NEXT: vpmovzxwd {{.*#+}} zmm6 = ymm6[0],zero,ymm6[1],zero,ymm6[2],zero,ymm6[3],zero,ymm6[4],zero,ymm6[5],zero,ymm6[6],zero,ymm6[7],zero,ymm6[8],zero,ymm6[9],zero,ymm6[10],zero,ymm6[11],zero,ymm6[12],zero,ymm6[13],zero,ymm6[14],zero,ymm6[15],zero
-; AVX512BW-NEXT: vpmulld %zmm6, %zmm4, %zmm4
-; AVX512BW-NEXT: vpmovzxwd {{.*#+}} zmm3 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero
-; AVX512BW-NEXT: vpmulld %zmm3, %zmm1, %zmm1
-; AVX512BW-NEXT: vextracti64x4 $1, %zmm2, %ymm3
-; AVX512BW-NEXT: vpmovzxwd {{.*#+}} zmm3 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero
-; AVX512BW-NEXT: vpmulld %zmm3, %zmm5, %zmm3
-; AVX512BW-NEXT: vpmovzxwd {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
-; AVX512BW-NEXT: vpmulld %zmm2, %zmm0, %zmm0
-; AVX512BW-NEXT: vpsrld $16, %zmm4, %zmm2
-; AVX512BW-NEXT: vpsrld $16, %zmm1, %zmm1
-; AVX512BW-NEXT: vpsrld $16, %zmm3, %zmm3
-; AVX512BW-NEXT: vpsrld $16, %zmm0, %zmm0
-; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512BW-NEXT: vpmovdw %zmm3, %ymm3
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm3, %zmm0, %zmm0
-; AVX512BW-NEXT: vpmovdw %zmm1, %ymm1
-; AVX512BW-NEXT: vpmovdw %zmm2, %ymm2
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1
+; AVX512BW-NEXT: vpmulhuw %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT: vpmulhuw %zmm3, %zmm1, %zmm1
; AVX512BW-NEXT: retq
%a1 = zext <64 x i16> %a to <64 x i32>
%b1 = zext <64 x i16> %b to <64 x i32>
@@ -1001,311 +277,47 @@ define <64 x i16> @mulhuw_v64i16(<64 x i
}
define <64 x i16> @mulhw_v64i16(<64 x i16> %a, <64 x i16> %b) {
-; SSE2-LABEL: mulhw_v64i16:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pmulhw {{[0-9]+}}(%rsp), %xmm7
-; SSE2-NEXT: pmulhw {{[0-9]+}}(%rsp), %xmm6
-; SSE2-NEXT: pmulhw {{[0-9]+}}(%rsp), %xmm5
-; SSE2-NEXT: pmulhw {{[0-9]+}}(%rsp), %xmm4
-; SSE2-NEXT: pmulhw {{[0-9]+}}(%rsp), %xmm3
-; SSE2-NEXT: pmulhw {{[0-9]+}}(%rsp), %xmm2
-; SSE2-NEXT: pmulhw {{[0-9]+}}(%rsp), %xmm1
-; SSE2-NEXT: pmulhw {{[0-9]+}}(%rsp), %xmm0
-; SSE2-NEXT: pxor %xmm9, %xmm9
-; SSE2-NEXT: pxor %xmm10, %xmm10
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm0[4],xmm10[5],xmm0[5],xmm10[6],xmm0[6],xmm10[7],xmm0[7]
-; SSE2-NEXT: psrad $16, %xmm10
-; SSE2-NEXT: pxor %xmm8, %xmm8
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm0[0],xmm8[1],xmm0[1],xmm8[2],xmm0[2],xmm8[3],xmm0[3]
-; SSE2-NEXT: psrad $16, %xmm8
-; SSE2-NEXT: packssdw %xmm10, %xmm8
-; SSE2-NEXT: pxor %xmm0, %xmm0
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSE2-NEXT: psrad $16, %xmm0
-; SSE2-NEXT: pxor %xmm10, %xmm10
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm1[0],xmm10[1],xmm1[1],xmm10[2],xmm1[2],xmm10[3],xmm1[3]
-; SSE2-NEXT: psrad $16, %xmm10
-; SSE2-NEXT: packssdw %xmm0, %xmm10
-; SSE2-NEXT: pxor %xmm0, %xmm0
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
-; SSE2-NEXT: psrad $16, %xmm0
-; SSE2-NEXT: pxor %xmm1, %xmm1
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; SSE2-NEXT: psrad $16, %xmm1
-; SSE2-NEXT: packssdw %xmm0, %xmm1
-; SSE2-NEXT: pxor %xmm0, %xmm0
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
-; SSE2-NEXT: psrad $16, %xmm0
-; SSE2-NEXT: pxor %xmm2, %xmm2
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
-; SSE2-NEXT: psrad $16, %xmm2
-; SSE2-NEXT: packssdw %xmm0, %xmm2
-; SSE2-NEXT: pxor %xmm0, %xmm0
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
-; SSE2-NEXT: psrad $16, %xmm0
-; SSE2-NEXT: pxor %xmm3, %xmm3
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
-; SSE2-NEXT: psrad $16, %xmm3
-; SSE2-NEXT: packssdw %xmm0, %xmm3
-; SSE2-NEXT: pxor %xmm0, %xmm0
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7]
-; SSE2-NEXT: psrad $16, %xmm0
-; SSE2-NEXT: pxor %xmm4, %xmm4
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
-; SSE2-NEXT: psrad $16, %xmm4
-; SSE2-NEXT: packssdw %xmm0, %xmm4
-; SSE2-NEXT: pxor %xmm0, %xmm0
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm6[4],xmm0[5],xmm6[5],xmm0[6],xmm6[6],xmm0[7],xmm6[7]
-; SSE2-NEXT: psrad $16, %xmm0
-; SSE2-NEXT: pxor %xmm5, %xmm5
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3]
-; SSE2-NEXT: psrad $16, %xmm5
-; SSE2-NEXT: packssdw %xmm0, %xmm5
-; SSE2-NEXT: pxor %xmm0, %xmm0
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7]
-; SSE2-NEXT: psrad $16, %xmm0
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm7[0],xmm9[1],xmm7[1],xmm9[2],xmm7[2],xmm9[3],xmm7[3]
-; SSE2-NEXT: psrad $16, %xmm9
-; SSE2-NEXT: packssdw %xmm0, %xmm9
-; SSE2-NEXT: movdqa %xmm9, 112(%rdi)
-; SSE2-NEXT: movdqa %xmm5, 96(%rdi)
-; SSE2-NEXT: movdqa %xmm4, 80(%rdi)
-; SSE2-NEXT: movdqa %xmm3, 64(%rdi)
-; SSE2-NEXT: movdqa %xmm2, 48(%rdi)
-; SSE2-NEXT: movdqa %xmm1, 32(%rdi)
-; SSE2-NEXT: movdqa %xmm10, 16(%rdi)
-; SSE2-NEXT: movdqa %xmm8, (%rdi)
-; SSE2-NEXT: movq %rdi, %rax
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: mulhw_v64i16:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE41-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm9
-; SSE41-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm10
-; SSE41-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm15
-; SSE41-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm13
-; SSE41-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm12
-; SSE41-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm11
-; SSE41-NEXT: pmovsxwd %xmm7, %xmm14
-; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm7[2,3,0,1]
-; SSE41-NEXT: pmovsxwd %xmm7, %xmm8
-; SSE41-NEXT: pmovsxwd %xmm11, %xmm7
-; SSE41-NEXT: pmulld %xmm14, %xmm7
-; SSE41-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE41-NEXT: pmovsxwd %xmm6, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm6[2,3,0,1]
-; SSE41-NEXT: pmovsxwd %xmm6, %xmm6
-; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm11[2,3,0,1]
-; SSE41-NEXT: pmovsxwd %xmm7, %xmm14
-; SSE41-NEXT: pmulld %xmm8, %xmm14
-; SSE41-NEXT: pmovsxwd %xmm12, %xmm11
-; SSE41-NEXT: pmulld %xmm1, %xmm11
-; SSE41-NEXT: pmovsxwd %xmm5, %xmm7
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm5[2,3,0,1]
-; SSE41-NEXT: pmovsxwd %xmm1, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm12[2,3,0,1]
-; SSE41-NEXT: pmovsxwd %xmm5, %xmm5
-; SSE41-NEXT: pmulld %xmm6, %xmm5
-; SSE41-NEXT: pmovsxwd %xmm13, %xmm12
-; SSE41-NEXT: pmulld %xmm7, %xmm12
-; SSE41-NEXT: pmovsxwd %xmm4, %xmm6
-; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,3,0,1]
-; SSE41-NEXT: pmovsxwd %xmm4, %xmm8
-; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm13[2,3,0,1]
-; SSE41-NEXT: pmovsxwd %xmm4, %xmm4
-; SSE41-NEXT: pmulld %xmm1, %xmm4
-; SSE41-NEXT: pmovsxwd %xmm15, %xmm13
-; SSE41-NEXT: pmulld %xmm6, %xmm13
-; SSE41-NEXT: pmovsxwd %xmm3, %xmm7
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm3[2,3,0,1]
-; SSE41-NEXT: pmovsxwd %xmm1, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm15[2,3,0,1]
-; SSE41-NEXT: pmovsxwd %xmm3, %xmm6
-; SSE41-NEXT: pmulld %xmm8, %xmm6
-; SSE41-NEXT: pmovsxwd %xmm10, %xmm15
-; SSE41-NEXT: pmulld %xmm7, %xmm15
-; SSE41-NEXT: pmovsxwd %xmm2, %xmm8
-; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm10[2,3,0,1]
-; SSE41-NEXT: pmovsxwd %xmm3, %xmm10
-; SSE41-NEXT: pmulld %xmm1, %xmm10
-; SSE41-NEXT: pmovsxwd %xmm9, %xmm7
-; SSE41-NEXT: pmulld %xmm8, %xmm7
-; SSE41-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,3,0,1]
-; SSE41-NEXT: pmovsxwd %xmm1, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm9[2,3,0,1]
-; SSE41-NEXT: pmovsxwd %xmm2, %xmm9
-; SSE41-NEXT: pmulld %xmm1, %xmm9
-; SSE41-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; SSE41-NEXT: pmovsxwd %xmm3, %xmm1
-; SSE41-NEXT: pmovsxwd %xmm8, %xmm2
-; SSE41-NEXT: pmulld %xmm1, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm3[2,3,0,1]
-; SSE41-NEXT: pmovsxwd %xmm1, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm8[2,3,0,1]
-; SSE41-NEXT: pmovsxwd %xmm3, %xmm3
-; SSE41-NEXT: pmulld %xmm1, %xmm3
-; SSE41-NEXT: pmovsxwd %xmm0, %xmm8
-; SSE41-NEXT: pmovsxwd {{[0-9]+}}(%rsp), %xmm1
-; SSE41-NEXT: pmulld %xmm8, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; SSE41-NEXT: pmovsxwd %xmm0, %xmm8
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = mem[2,3,0,1]
-; SSE41-NEXT: pmovsxwd %xmm0, %xmm0
-; SSE41-NEXT: pmulld %xmm8, %xmm0
-; SSE41-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
-; SSE41-NEXT: psrld $16, %xmm8
-; SSE41-NEXT: psrld $16, %xmm14
-; SSE41-NEXT: packusdw %xmm14, %xmm8
-; SSE41-NEXT: psrld $16, %xmm11
-; SSE41-NEXT: psrld $16, %xmm5
-; SSE41-NEXT: packusdw %xmm5, %xmm11
-; SSE41-NEXT: psrld $16, %xmm12
-; SSE41-NEXT: psrld $16, %xmm4
-; SSE41-NEXT: packusdw %xmm4, %xmm12
-; SSE41-NEXT: psrld $16, %xmm13
-; SSE41-NEXT: psrld $16, %xmm6
-; SSE41-NEXT: packusdw %xmm6, %xmm13
-; SSE41-NEXT: psrld $16, %xmm15
-; SSE41-NEXT: psrld $16, %xmm10
-; SSE41-NEXT: packusdw %xmm10, %xmm15
-; SSE41-NEXT: psrld $16, %xmm7
-; SSE41-NEXT: psrld $16, %xmm9
-; SSE41-NEXT: packusdw %xmm9, %xmm7
-; SSE41-NEXT: psrld $16, %xmm2
-; SSE41-NEXT: psrld $16, %xmm3
-; SSE41-NEXT: packusdw %xmm3, %xmm2
-; SSE41-NEXT: psrld $16, %xmm1
-; SSE41-NEXT: psrld $16, %xmm0
-; SSE41-NEXT: packusdw %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm8, 112(%rdi)
-; SSE41-NEXT: movdqa %xmm11, 96(%rdi)
-; SSE41-NEXT: movdqa %xmm12, 80(%rdi)
-; SSE41-NEXT: movdqa %xmm13, 64(%rdi)
-; SSE41-NEXT: movdqa %xmm15, 48(%rdi)
-; SSE41-NEXT: movdqa %xmm7, 32(%rdi)
-; SSE41-NEXT: movdqa %xmm2, 16(%rdi)
-; SSE41-NEXT: movdqa %xmm1, (%rdi)
-; SSE41-NEXT: movq %rdi, %rax
-; SSE41-NEXT: retq
+; SSE-LABEL: mulhw_v64i16:
+; SSE: # %bb.0:
+; SSE-NEXT: pmulhw {{[0-9]+}}(%rsp), %xmm0
+; SSE-NEXT: pmulhw {{[0-9]+}}(%rsp), %xmm1
+; SSE-NEXT: pmulhw {{[0-9]+}}(%rsp), %xmm2
+; SSE-NEXT: pmulhw {{[0-9]+}}(%rsp), %xmm3
+; SSE-NEXT: pmulhw {{[0-9]+}}(%rsp), %xmm4
+; SSE-NEXT: pmulhw {{[0-9]+}}(%rsp), %xmm5
+; SSE-NEXT: pmulhw {{[0-9]+}}(%rsp), %xmm6
+; SSE-NEXT: pmulhw {{[0-9]+}}(%rsp), %xmm7
+; SSE-NEXT: movdqa %xmm7, 112(%rdi)
+; SSE-NEXT: movdqa %xmm6, 96(%rdi)
+; SSE-NEXT: movdqa %xmm5, 80(%rdi)
+; SSE-NEXT: movdqa %xmm4, 64(%rdi)
+; SSE-NEXT: movdqa %xmm3, 48(%rdi)
+; SSE-NEXT: movdqa %xmm2, 32(%rdi)
+; SSE-NEXT: movdqa %xmm1, 16(%rdi)
+; SSE-NEXT: movdqa %xmm0, (%rdi)
+; SSE-NEXT: movq %rdi, %rax
+; SSE-NEXT: retq
;
; AVX2-LABEL: mulhw_v64i16:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpmovsxwd %xmm3, %ymm8
-; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm3
-; AVX2-NEXT: vpmovsxwd %xmm3, %ymm3
-; AVX2-NEXT: vpmovsxwd %xmm2, %ymm9
-; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm2
-; AVX2-NEXT: vpmovsxwd %xmm2, %ymm2
-; AVX2-NEXT: vpmovsxwd %xmm1, %ymm10
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1
-; AVX2-NEXT: vpmovsxwd %xmm1, %ymm1
-; AVX2-NEXT: vpmovsxwd %xmm0, %ymm11
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
-; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
-; AVX2-NEXT: vpmovsxwd %xmm7, %ymm12
-; AVX2-NEXT: vpmulld %ymm12, %ymm8, %ymm8
-; AVX2-NEXT: vextracti128 $1, %ymm7, %xmm7
-; AVX2-NEXT: vpmovsxwd %xmm7, %ymm7
-; AVX2-NEXT: vpmulld %ymm7, %ymm3, %ymm7
-; AVX2-NEXT: vpmovsxwd %xmm6, %ymm3
-; AVX2-NEXT: vpmulld %ymm3, %ymm9, %ymm9
-; AVX2-NEXT: vextracti128 $1, %ymm6, %xmm3
-; AVX2-NEXT: vpmovsxwd %xmm3, %ymm3
-; AVX2-NEXT: vpmulld %ymm3, %ymm2, %ymm2
-; AVX2-NEXT: vpmovsxwd %xmm5, %ymm3
-; AVX2-NEXT: vpmulld %ymm3, %ymm10, %ymm6
-; AVX2-NEXT: vextracti128 $1, %ymm5, %xmm3
-; AVX2-NEXT: vpmovsxwd %xmm3, %ymm3
-; AVX2-NEXT: vpmulld %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vpmovsxwd %xmm4, %ymm3
-; AVX2-NEXT: vpmulld %ymm3, %ymm11, %ymm5
-; AVX2-NEXT: vextracti128 $1, %ymm4, %xmm3
-; AVX2-NEXT: vpmovsxwd %xmm3, %ymm3
-; AVX2-NEXT: vpmulld %ymm3, %ymm0, %ymm0
-; AVX2-NEXT: vpsrld $16, %ymm8, %ymm8
-; AVX2-NEXT: vpsrld $16, %ymm7, %ymm4
-; AVX2-NEXT: vpsrld $16, %ymm9, %ymm7
-; AVX2-NEXT: vpsrld $16, %ymm2, %ymm2
-; AVX2-NEXT: vpsrld $16, %ymm6, %ymm6
-; AVX2-NEXT: vpsrld $16, %ymm1, %ymm1
-; AVX2-NEXT: vpsrld $16, %ymm5, %ymm5
-; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm3
-; AVX2-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
-; AVX2-NEXT: vextracti128 $1, %ymm5, %xmm3
-; AVX2-NEXT: vpackusdw %xmm3, %xmm5, %xmm3
-; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm3, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
-; AVX2-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
-; AVX2-NEXT: vextracti128 $1, %ymm6, %xmm3
-; AVX2-NEXT: vpackusdw %xmm3, %xmm6, %xmm3
-; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm3, %ymm1
-; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3
-; AVX2-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
-; AVX2-NEXT: vextracti128 $1, %ymm7, %xmm3
-; AVX2-NEXT: vpackusdw %xmm3, %xmm7, %xmm3
-; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm3, %ymm2
-; AVX2-NEXT: vextracti128 $1, %ymm4, %xmm3
-; AVX2-NEXT: vpackusdw %xmm3, %xmm4, %xmm3
-; AVX2-NEXT: vextracti128 $1, %ymm8, %xmm4
-; AVX2-NEXT: vpackusdw %xmm4, %xmm8, %xmm4
-; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm4, %ymm3
+; AVX2-NEXT: vpmulhw %ymm4, %ymm0, %ymm0
+; AVX2-NEXT: vpmulhw %ymm5, %ymm1, %ymm1
+; AVX2-NEXT: vpmulhw %ymm6, %ymm2, %ymm2
+; AVX2-NEXT: vpmulhw %ymm7, %ymm3, %ymm3
; AVX2-NEXT: retq
;
; AVX512F-LABEL: mulhw_v64i16:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: vpmovsxwd %ymm3, %zmm3
-; AVX512F-NEXT: vpmovsxwd %ymm2, %zmm2
-; AVX512F-NEXT: vpmovsxwd %ymm1, %zmm1
-; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
-; AVX512F-NEXT: vpmovsxwd %ymm7, %zmm7
-; AVX512F-NEXT: vpmulld %zmm7, %zmm3, %zmm3
-; AVX512F-NEXT: vpmovsxwd %ymm6, %zmm6
-; AVX512F-NEXT: vpmulld %zmm6, %zmm2, %zmm2
-; AVX512F-NEXT: vpmovsxwd %ymm5, %zmm5
-; AVX512F-NEXT: vpmulld %zmm5, %zmm1, %zmm1
-; AVX512F-NEXT: vpmovsxwd %ymm4, %zmm4
-; AVX512F-NEXT: vpmulld %zmm4, %zmm0, %zmm0
-; AVX512F-NEXT: vpsrld $16, %zmm3, %zmm3
-; AVX512F-NEXT: vpsrld $16, %zmm2, %zmm2
-; AVX512F-NEXT: vpsrld $16, %zmm1, %zmm1
-; AVX512F-NEXT: vpsrld $16, %zmm0, %zmm0
-; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512F-NEXT: vpmovdw %zmm1, %ymm1
-; AVX512F-NEXT: vpmovdw %zmm2, %ymm2
-; AVX512F-NEXT: vpmovdw %zmm3, %ymm3
+; AVX512F-NEXT: vpmulhw %ymm4, %ymm0, %ymm0
+; AVX512F-NEXT: vpmulhw %ymm5, %ymm1, %ymm1
+; AVX512F-NEXT: vpmulhw %ymm6, %ymm2, %ymm2
+; AVX512F-NEXT: vpmulhw %ymm7, %ymm3, %ymm3
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: mulhw_v64i16:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vextracti64x4 $1, %zmm1, %ymm4
-; AVX512BW-NEXT: vpmovsxwd %ymm4, %zmm4
-; AVX512BW-NEXT: vpmovsxwd %ymm1, %zmm1
-; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm5
-; AVX512BW-NEXT: vpmovsxwd %ymm5, %zmm5
-; AVX512BW-NEXT: vpmovsxwd %ymm0, %zmm0
-; AVX512BW-NEXT: vextracti64x4 $1, %zmm3, %ymm6
-; AVX512BW-NEXT: vpmovsxwd %ymm6, %zmm6
-; AVX512BW-NEXT: vpmulld %zmm6, %zmm4, %zmm4
-; AVX512BW-NEXT: vpmovsxwd %ymm3, %zmm3
-; AVX512BW-NEXT: vpmulld %zmm3, %zmm1, %zmm1
-; AVX512BW-NEXT: vextracti64x4 $1, %zmm2, %ymm3
-; AVX512BW-NEXT: vpmovsxwd %ymm3, %zmm3
-; AVX512BW-NEXT: vpmulld %zmm3, %zmm5, %zmm3
-; AVX512BW-NEXT: vpmovsxwd %ymm2, %zmm2
-; AVX512BW-NEXT: vpmulld %zmm2, %zmm0, %zmm0
-; AVX512BW-NEXT: vpsrld $16, %zmm4, %zmm2
-; AVX512BW-NEXT: vpsrld $16, %zmm1, %zmm1
-; AVX512BW-NEXT: vpsrld $16, %zmm3, %zmm3
-; AVX512BW-NEXT: vpsrld $16, %zmm0, %zmm0
-; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512BW-NEXT: vpmovdw %zmm3, %ymm3
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm3, %zmm0, %zmm0
-; AVX512BW-NEXT: vpmovdw %zmm1, %ymm1
-; AVX512BW-NEXT: vpmovdw %zmm2, %ymm2
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1
+; AVX512BW-NEXT: vpmulhw %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT: vpmulhw %zmm3, %zmm1, %zmm1
; AVX512BW-NEXT: retq
%a1 = sext <64 x i16> %a to <64 x i32>
%b1 = sext <64 x i16> %b to <64 x i32>
More information about the llvm-commits
mailing list