[llvm] 5dea7a8 - Combine to vpdpbusd when operand is constant and small enough.

via llvm-commits llvm-commits at lists.llvm.org
Wed Jan 19 19:23:32 PST 2022


Author: Luo, Yuanke
Date: 2022-01-20T11:10:49+08:00
New Revision: 5dea7a865e6f2406fa2b8115cc35833347906826

URL: https://github.com/llvm/llvm-project/commit/5dea7a865e6f2406fa2b8115cc35833347906826
DIFF: https://github.com/llvm/llvm-project/commit/5dea7a865e6f2406fa2b8115cc35833347906826.diff

LOG: Combine to vpdpbusd when operand is constant and small enough.

Differential Revision: https://reviews.llvm.org/D116363

Added: 
    

Modified: 
    llvm/lib/Target/X86/X86ISelLowering.cpp
    llvm/lib/Target/X86/X86PartialReduction.cpp
    llvm/test/CodeGen/X86/dpbusd_const.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index cd8b131c7db4e..d4dafd63cc82d 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -9252,8 +9252,13 @@ static bool isFoldableUseOfShuffle(SDNode *N) {
       return true;
     if (Opc == ISD::BITCAST) // Ignore bitcasts
       return isFoldableUseOfShuffle(U);
-    if (N->hasOneUse())
+    if (N->hasOneUse()) {
+      // TODO, there may be some general way to know if a SDNode can
+      // be folded. We now only know whether an MI is foldable.
+      if (Opc == X86ISD::VPDPBUSD && U->getOperand(2).getNode() != N)
+        return false;
       return true;
+    }
   }
   return false;
 }
@@ -41973,17 +41978,14 @@ static bool detectExtMul(SelectionDAG &DAG, const SDValue &Mul, SDValue &Op0,
   if (Op0.getOpcode() == ISD::SIGN_EXTEND)
     std::swap(Op0, Op1);
 
-  if (Op0.getOpcode() != ISD::ZERO_EXTEND)
-    return false;
-
   auto IsFreeTruncation = [](SDValue &Op) -> bool {
     if ((Op.getOpcode() == ISD::ZERO_EXTEND ||
          Op.getOpcode() == ISD::SIGN_EXTEND) &&
         Op.getOperand(0).getScalarValueSizeInBits() <= 8)
       return true;
 
-    // TODO: Support contant value.
-    return false;
+    auto *BV = dyn_cast<BuildVectorSDNode>(Op);
+    return (BV && BV->isConstant());
   };
 
   // (dpbusd (zext a), (sext, b)). Since the first operand should be unsigned

diff  --git a/llvm/lib/Target/X86/X86PartialReduction.cpp b/llvm/lib/Target/X86/X86PartialReduction.cpp
index 4e1bb047f2243..4342ac089cae8 100644
--- a/llvm/lib/Target/X86/X86PartialReduction.cpp
+++ b/llvm/lib/Target/X86/X86PartialReduction.cpp
@@ -76,9 +76,6 @@ static bool matchVPDPBUSDPattern(const X86Subtarget *ST, BinaryOperator *Mul,
   if (isa<SExtInst>(LHS))
     std::swap(LHS, RHS);
 
-  if (!isa<ZExtInst>(LHS))
-    return false;
-
   auto IsFreeTruncation = [&](Value *Op) {
     if (auto *Cast = dyn_cast<CastInst>(Op)) {
       if (Cast->getParent() == Mul->getParent() &&
@@ -87,8 +84,8 @@ static bool matchVPDPBUSDPattern(const X86Subtarget *ST, BinaryOperator *Mul,
           Cast->getOperand(0)->getType()->getScalarSizeInBits() <= 8)
         return true;
     }
-    // TODO: Support constant in ISel.
-    return false;
+
+    return isa<Constant>(Op);
   };
 
   // (dpbusd (zext a), (sext, b)). Since the first operand should be unsigned

diff  --git a/llvm/test/CodeGen/X86/dpbusd_const.ll b/llvm/test/CodeGen/X86/dpbusd_const.ll
index d0cefc5ef3b72..aa780fe3b94ad 100644
--- a/llvm/test/CodeGen/X86/dpbusd_const.ll
+++ b/llvm/test/CodeGen/X86/dpbusd_const.ll
@@ -24,17 +24,35 @@ entry:
 }
 
 define i32 @mul_4xi8_zc(<4 x i8> %a, i32 %c) {
-; ALL-LABEL: mul_4xi8_zc:
-; ALL:       # %bb.0: # %entry
-; ALL-NEXT:    vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; ALL-NEXT:    vpmaddwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; ALL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; ALL-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
-; ALL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; ALL-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
-; ALL-NEXT:    vmovd %xmm0, %eax
-; ALL-NEXT:    addl %edi, %eax
-; ALL-NEXT:    retq
+; AVXVNNI-LABEL: mul_4xi8_zc:
+; AVXVNNI:       # %bb.0: # %entry
+; AVXVNNI-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVXVNNI-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
+; AVXVNNI-NEXT:    {vex} vpdpbusd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; AVXVNNI-NEXT:    vmovd %xmm1, %eax
+; AVXVNNI-NEXT:    addl %edi, %eax
+; AVXVNNI-NEXT:    retq
+;
+; AVX512VNNI-LABEL: mul_4xi8_zc:
+; AVX512VNNI:       # %bb.0: # %entry
+; AVX512VNNI-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX512VNNI-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
+; AVX512VNNI-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX512VNNI-NEXT:    vpdpbusd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1
+; AVX512VNNI-NEXT:    vmovd %xmm1, %eax
+; AVX512VNNI-NEXT:    addl %edi, %eax
+; AVX512VNNI-NEXT:    vzeroupper
+; AVX512VNNI-NEXT:    retq
+;
+; AVX512VLVNNI-LABEL: mul_4xi8_zc:
+; AVX512VLVNNI:       # %bb.0: # %entry
+; AVX512VLVNNI-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX512VLVNNI-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
+; AVX512VLVNNI-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX512VLVNNI-NEXT:    vpdpbusd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; AVX512VLVNNI-NEXT:    vmovd %xmm1, %eax
+; AVX512VLVNNI-NEXT:    addl %edi, %eax
+; AVX512VLVNNI-NEXT:    retq
 entry:
   %0 = zext <4 x i8> %a to <4 x i32>
   %1 = mul nsw <4 x i32> %0, <i32 0, i32 1, i32 2, i32 127>
@@ -46,39 +64,38 @@ entry:
 define i32 @mul_4xi4_cz(<4 x i4> %a, i32 %c) {
 ; AVXVNNI-LABEL: mul_4xi4_cz:
 ; AVXVNNI:       # %bb.0: # %entry
-; AVXVNNI-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [15,15,15,15]
-; AVXVNNI-NEXT:    vpand %xmm1, %xmm0, %xmm0
-; AVXVNNI-NEXT:    vpmaddwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; AVXVNNI-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVXVNNI-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
-; AVXVNNI-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVXVNNI-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
-; AVXVNNI-NEXT:    vmovd %xmm0, %eax
+; AVXVNNI-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVXVNNI-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVXVNNI-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVXVNNI-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
+; AVXVNNI-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,2,127,0,0,0,0,0,0,0,0,0,0,0,0]
+; AVXVNNI-NEXT:    {vex} vpdpbusd %xmm0, %xmm2, %xmm1
+; AVXVNNI-NEXT:    vmovd %xmm1, %eax
 ; AVXVNNI-NEXT:    addl %edi, %eax
 ; AVXVNNI-NEXT:    retq
 ;
 ; AVX512VNNI-LABEL: mul_4xi4_cz:
 ; AVX512VNNI:       # %bb.0: # %entry
-; AVX512VNNI-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [15,15,15,15]
-; AVX512VNNI-NEXT:    vpand %xmm1, %xmm0, %xmm0
-; AVX512VNNI-NEXT:    vpmaddwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; AVX512VNNI-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX512VNNI-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
-; AVX512VNNI-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX512VNNI-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
-; AVX512VNNI-NEXT:    vmovd %xmm0, %eax
+; AVX512VNNI-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512VNNI-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX512VNNI-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX512VNNI-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
+; AVX512VNNI-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,1,2,127,0,0,0,0,0,0,0,0,0,0,0,0]
+; AVX512VNNI-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX512VNNI-NEXT:    vpdpbusd %zmm0, %zmm1, %zmm2
+; AVX512VNNI-NEXT:    vmovd %xmm2, %eax
 ; AVX512VNNI-NEXT:    addl %edi, %eax
+; AVX512VNNI-NEXT:    vzeroupper
 ; AVX512VNNI-NEXT:    retq
 ;
 ; AVX512VLVNNI-LABEL: mul_4xi4_cz:
 ; AVX512VLVNNI:       # %bb.0: # %entry
-; AVX512VLVNNI-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
-; AVX512VLVNNI-NEXT:    vpmaddwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; AVX512VLVNNI-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX512VLVNNI-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
-; AVX512VLVNNI-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX512VLVNNI-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
-; AVX512VLVNNI-NEXT:    vmovd %xmm0, %eax
+; AVX512VLVNNI-NEXT:    vpmovdb %xmm0, %xmm0
+; AVX512VLVNNI-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX512VLVNNI-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,1,2,127,0,0,0,0,0,0,0,0,0,0,0,0]
+; AVX512VLVNNI-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX512VLVNNI-NEXT:    vpdpbusd %xmm0, %xmm1, %xmm2
+; AVX512VLVNNI-NEXT:    vmovd %xmm2, %eax
 ; AVX512VLVNNI-NEXT:    addl %edi, %eax
 ; AVX512VLVNNI-NEXT:    retq
 entry:
@@ -90,17 +107,38 @@ entry:
 }
 
 define i32 @mul_4xi8_cs(<4 x i8> %a, i32 %c) {
-; ALL-LABEL: mul_4xi8_cs:
-; ALL:       # %bb.0: # %entry
-; ALL-NEXT:    vpmovsxbd %xmm0, %xmm0
-; ALL-NEXT:    vpmaddwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; ALL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; ALL-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
-; ALL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; ALL-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
-; ALL-NEXT:    vmovd %xmm0, %eax
-; ALL-NEXT:    addl %edi, %eax
-; ALL-NEXT:    retq
+; AVXVNNI-LABEL: mul_4xi8_cs:
+; AVXVNNI:       # %bb.0: # %entry
+; AVXVNNI-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVXVNNI-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
+; AVXVNNI-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,2,255,0,0,0,0,0,0,0,0,0,0,0,0]
+; AVXVNNI-NEXT:    {vex} vpdpbusd %xmm0, %xmm2, %xmm1
+; AVXVNNI-NEXT:    vmovd %xmm1, %eax
+; AVXVNNI-NEXT:    addl %edi, %eax
+; AVXVNNI-NEXT:    retq
+;
+; AVX512VNNI-LABEL: mul_4xi8_cs:
+; AVX512VNNI:       # %bb.0: # %entry
+; AVX512VNNI-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX512VNNI-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
+; AVX512VNNI-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,1,2,255,0,0,0,0,0,0,0,0,0,0,0,0]
+; AVX512VNNI-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX512VNNI-NEXT:    vpdpbusd %zmm0, %zmm1, %zmm2
+; AVX512VNNI-NEXT:    vmovd %xmm2, %eax
+; AVX512VNNI-NEXT:    addl %edi, %eax
+; AVX512VNNI-NEXT:    vzeroupper
+; AVX512VNNI-NEXT:    retq
+;
+; AVX512VLVNNI-LABEL: mul_4xi8_cs:
+; AVX512VLVNNI:       # %bb.0: # %entry
+; AVX512VLVNNI-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX512VLVNNI-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
+; AVX512VLVNNI-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,1,2,255,0,0,0,0,0,0,0,0,0,0,0,0]
+; AVX512VLVNNI-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX512VLVNNI-NEXT:    vpdpbusd %xmm0, %xmm1, %xmm2
+; AVX512VLVNNI-NEXT:    vmovd %xmm2, %eax
+; AVX512VLVNNI-NEXT:    addl %edi, %eax
+; AVX512VLVNNI-NEXT:    retq
 entry:
   %0 = sext <4 x i8> %a to <4 x i32>
   %1 = mul nsw <4 x i32> <i32 0, i32 1, i32 2, i32 255>, %0
@@ -132,33 +170,41 @@ entry:
 define i32 @mul_16xi8_zc(<16 x i8> %a, i32 %c) {
 ; AVXVNNI-LABEL: mul_16xi8_zc:
 ; AVXVNNI:       # %bb.0: # %entry
-; AVXVNNI-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVXVNNI-NEXT:    vpmaddwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; AVXVNNI-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; AVXVNNI-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
-; AVXVNNI-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVXVNNI-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVXVNNI-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVXVNNI-NEXT:    {vex} vpdpbusd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; AVXVNNI-NEXT:    vpshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
+; AVXVNNI-NEXT:    vpaddd %xmm0, %xmm1, %xmm0
 ; AVXVNNI-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
 ; AVXVNNI-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
 ; AVXVNNI-NEXT:    vmovd %xmm0, %eax
 ; AVXVNNI-NEXT:    addl %edi, %eax
-; AVXVNNI-NEXT:    vzeroupper
 ; AVXVNNI-NEXT:    retq
 ;
-; AVX512-LABEL: mul_16xi8_zc:
-; AVX512:       # %bb.0: # %entry
-; AVX512-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVX512-NEXT:    vpmaddwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT:    vpaddd %ymm1, %ymm0, %ymm0
-; AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX512-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX512-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vmovd %xmm0, %eax
-; AVX512-NEXT:    addl %edi, %eax
-; AVX512-NEXT:    vzeroupper
-; AVX512-NEXT:    retq
+; AVX512VNNI-LABEL: mul_16xi8_zc:
+; AVX512VNNI:       # %bb.0: # %entry
+; AVX512VNNI-NEXT:    vmovdqa %xmm0, %xmm0
+; AVX512VNNI-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX512VNNI-NEXT:    vpdpbusd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1
+; AVX512VNNI-NEXT:    vpshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
+; AVX512VNNI-NEXT:    vpaddd %xmm0, %xmm1, %xmm0
+; AVX512VNNI-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
+; AVX512VNNI-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVX512VNNI-NEXT:    vmovd %xmm0, %eax
+; AVX512VNNI-NEXT:    addl %edi, %eax
+; AVX512VNNI-NEXT:    vzeroupper
+; AVX512VNNI-NEXT:    retq
+;
+; AVX512VLVNNI-LABEL: mul_16xi8_zc:
+; AVX512VLVNNI:       # %bb.0: # %entry
+; AVX512VLVNNI-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX512VLVNNI-NEXT:    vpdpbusd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; AVX512VLVNNI-NEXT:    vpshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
+; AVX512VLVNNI-NEXT:    vpaddd %xmm0, %xmm1, %xmm0
+; AVX512VLVNNI-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
+; AVX512VLVNNI-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVX512VLVNNI-NEXT:    vmovd %xmm0, %eax
+; AVX512VLVNNI-NEXT:    addl %edi, %eax
+; AVX512VLVNNI-NEXT:    retq
 entry:
   %0 = zext <16 x i8> %a to <16 x i32>
   %1 = mul nsw <16 x i32> %0, <i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64>
@@ -170,12 +216,8 @@ entry:
 define i32 @mul_32xi8_zc(<32 x i8> %a, i32 %c) {
 ; AVXVNNI-LABEL: mul_32xi8_zc:
 ; AVXVNNI:       # %bb.0: # %entry
-; AVXVNNI-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVXVNNI-NEXT:    vpbroadcastq {{.*#+}} ymm2 = [18014407099482112,18014407099482112,18014407099482112,18014407099482112]
-; AVXVNNI-NEXT:    vpmaddwd %ymm2, %ymm1, %ymm1
-; AVXVNNI-NEXT:    vextracti128 $1, %ymm0, %xmm0
-; AVXVNNI-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVXVNNI-NEXT:    {vex} vpdpwssd %ymm2, %ymm0, %ymm1
+; AVXVNNI-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVXVNNI-NEXT:    {vex} vpdpbusd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1
 ; AVXVNNI-NEXT:    vextracti128 $1, %ymm1, %xmm0
 ; AVXVNNI-NEXT:    vpaddd %xmm0, %xmm1, %xmm0
 ; AVXVNNI-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
@@ -187,27 +229,36 @@ define i32 @mul_32xi8_zc(<32 x i8> %a, i32 %c) {
 ; AVXVNNI-NEXT:    vzeroupper
 ; AVXVNNI-NEXT:    retq
 ;
-; AVX512-LABEL: mul_32xi8_zc:
-; AVX512:       # %bb.0: # %entry
-; AVX512-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVX512-NEXT:    vpbroadcastq {{.*#+}} ymm2 = [18014407099482112,18014407099482112,18014407099482112,18014407099482112]
-; AVX512-NEXT:    vpmaddwd %ymm2, %ymm1, %ymm1
-; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm0
-; AVX512-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVX512-NEXT:    vpmaddwd %ymm2, %ymm0, %ymm0
-; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm2
-; AVX512-NEXT:    vextracti128 $1, %ymm1, %xmm3
-; AVX512-NEXT:    vpaddd %xmm2, %xmm3, %xmm2
-; AVX512-NEXT:    vpaddd %xmm2, %xmm0, %xmm0
-; AVX512-NEXT:    vpaddd %xmm0, %xmm1, %xmm0
-; AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX512-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX512-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vmovd %xmm0, %eax
-; AVX512-NEXT:    addl %edi, %eax
-; AVX512-NEXT:    vzeroupper
-; AVX512-NEXT:    retq
+; AVX512VNNI-LABEL: mul_32xi8_zc:
+; AVX512VNNI:       # %bb.0: # %entry
+; AVX512VNNI-NEXT:    vmovdqa %ymm0, %ymm0
+; AVX512VNNI-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX512VNNI-NEXT:    vpdpbusd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1
+; AVX512VNNI-NEXT:    vextracti128 $1, %ymm1, %xmm0
+; AVX512VNNI-NEXT:    vpaddd %xmm0, %xmm1, %xmm0
+; AVX512VNNI-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVX512VNNI-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVX512VNNI-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
+; AVX512VNNI-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVX512VNNI-NEXT:    vmovd %xmm0, %eax
+; AVX512VNNI-NEXT:    addl %edi, %eax
+; AVX512VNNI-NEXT:    vzeroupper
+; AVX512VNNI-NEXT:    retq
+;
+; AVX512VLVNNI-LABEL: mul_32xi8_zc:
+; AVX512VLVNNI:       # %bb.0: # %entry
+; AVX512VLVNNI-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX512VLVNNI-NEXT:    vpdpbusd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1
+; AVX512VLVNNI-NEXT:    vextracti128 $1, %ymm1, %xmm0
+; AVX512VLVNNI-NEXT:    vpaddd %xmm0, %xmm1, %xmm0
+; AVX512VLVNNI-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVX512VLVNNI-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVX512VLVNNI-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
+; AVX512VLVNNI-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVX512VLVNNI-NEXT:    vmovd %xmm0, %eax
+; AVX512VLVNNI-NEXT:    addl %edi, %eax
+; AVX512VLVNNI-NEXT:    vzeroupper
+; AVX512VLVNNI-NEXT:    retq
 entry:
   %0 = zext <32 x i8> %a to <32 x i32>
   %1 = mul nsw <32 x i32> %0, <i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64, i32 0, i32 1, i32 2, i32 64>
@@ -219,18 +270,12 @@ entry:
 define i32 @mul_64xi8_zc(<64 x i8> %a, i32 %c) {
 ; AVXVNNI-LABEL: mul_64xi8_zc:
 ; AVXVNNI:       # %bb.0: # %entry
-; AVXVNNI-NEXT:    vpmovzxbw {{.*#+}} ymm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVXVNNI-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [18014407099482112,18014407099482112,18014407099482112,18014407099482112]
-; AVXVNNI-NEXT:    vpmaddwd %ymm3, %ymm2, %ymm2
-; AVXVNNI-NEXT:    vextracti128 $1, %ymm0, %xmm0
-; AVXVNNI-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVXVNNI-NEXT:    vpmaddwd %ymm3, %ymm0, %ymm0
-; AVXVNNI-NEXT:    vpmovzxbw {{.*#+}} ymm4 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVXVNNI-NEXT:    vextracti128 $1, %ymm1, %xmm1
-; AVXVNNI-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVXVNNI-NEXT:    {vex} vpdpwssd %ymm3, %ymm1, %ymm0
-; AVXVNNI-NEXT:    {vex} vpdpwssd %ymm3, %ymm4, %ymm2
-; AVXVNNI-NEXT:    vpaddd %ymm0, %ymm2, %ymm0
+; AVXVNNI-NEXT:    vpbroadcastd {{.*#+}} ymm2 = [1073873152,1073873152,1073873152,1073873152,1073873152,1073873152,1073873152,1073873152]
+; AVXVNNI-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVXVNNI-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVXVNNI-NEXT:    {vex} vpdpbusd %ymm2, %ymm1, %ymm4
+; AVXVNNI-NEXT:    {vex} vpdpbusd %ymm2, %ymm0, %ymm3
+; AVXVNNI-NEXT:    vpaddd %ymm4, %ymm3, %ymm0
 ; AVXVNNI-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVXVNNI-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
 ; AVXVNNI-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
@@ -244,23 +289,10 @@ define i32 @mul_64xi8_zc(<64 x i8> %a, i32 %c) {
 ;
 ; AVX512-LABEL: mul_64xi8_zc:
 ; AVX512:       # %bb.0: # %entry
-; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX512-NEXT:    vpbroadcastq {{.*#+}} ymm2 = [18014407099482112,18014407099482112,18014407099482112,18014407099482112]
-; AVX512-NEXT:    vpmaddwd %ymm2, %ymm1, %ymm1
-; AVX512-NEXT:    vpmovzxbw {{.*#+}} ymm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVX512-NEXT:    vpmaddwd %ymm2, %ymm3, %ymm3
-; AVX512-NEXT:    vinserti64x4 $1, %ymm1, %zmm3, %zmm3
-; AVX512-NEXT:    vextracti64x4 $1, %zmm0, %ymm0
-; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm4
-; AVX512-NEXT:    vpmovzxbw {{.*#+}} ymm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero,xmm4[8],zero,xmm4[9],zero,xmm4[10],zero,xmm4[11],zero,xmm4[12],zero,xmm4[13],zero,xmm4[14],zero,xmm4[15],zero
-; AVX512-NEXT:    vpmaddwd %ymm2, %ymm4, %ymm4
-; AVX512-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVX512-NEXT:    vpmaddwd %ymm2, %ymm0, %ymm0
-; AVX512-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm0
-; AVX512-NEXT:    vpaddd %ymm4, %ymm1, %ymm1
-; AVX512-NEXT:    vpaddd %zmm1, %zmm0, %zmm0
-; AVX512-NEXT:    vpaddd %zmm0, %zmm3, %zmm0
+; AVX512-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX512-NEXT:    vpdpbusd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1
+; AVX512-NEXT:    vextracti64x4 $1, %zmm1, %ymm0
+; AVX512-NEXT:    vpaddd %zmm0, %zmm1, %zmm0
 ; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]


        


More information about the llvm-commits mailing list