[llvm] [X86] X86TargetLowering::computeKnownBitsForTargetNode - add X86ISD::VPMADD52L\H handling (PR #156349)

via llvm-commits llvm-commits at lists.llvm.org
Sun Sep 14 20:37:21 PDT 2025


https://github.com/houngkoungting updated https://github.com/llvm/llvm-project/pull/156349

>From 1d75814d6a3444c663e5e41e78c39ccda133e020 Mon Sep 17 00:00:00 2001
From: william <we3223 at gmail.com>
Date: Mon, 1 Sep 2025 23:39:56 +0800
Subject: [PATCH 1/7] [X86] X86TargetLowering::computeKnownBitsForTargetNode -
 add handling for VPMADD52L/VPMADD52H nodes-1

---
 llvm/lib/Target/X86/X86ISelLowering.cpp     |  40 +++++
 llvm/test/CodeGen/X86/combine-vpmadd52-1.ll | 159 ++++++++++++++++++++
 2 files changed, 199 insertions(+)
 create mode 100644 llvm/test/CodeGen/X86/combine-vpmadd52-1.ll

diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index ab21cf534b304..733651a7adc62 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -38994,9 +38994,49 @@ void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
       computeKnownBitsForPSADBW(LHS, RHS, Known, DemandedElts, DAG, Depth);
       break;
     }
+    
     }
     break;
   }
+  case X86ISD::VPMADD52L:
+  case X86ISD::VPMADD52H: {
+    EVT VT = Op.getValueType();
+    if (!VT.isVector() || VT.getScalarSizeInBits() != 64) {
+      Known.resetAll();
+      return;
+    }
+
+    const unsigned BW = 64;
+    APInt Low52 = APInt::getLowBitsSet(BW, 52);
+    APInt High12 = APInt::getBitsSetFrom(BW, 52);
+
+    KnownBits K0 =
+        DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
+    KnownBits K1 =
+        DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
+    KnownBits KAcc =
+        DAG.computeKnownBits(Op.getOperand(2), DemandedElts, Depth + 1);
+
+    if ((K0.Zero & Low52) == Low52 || (K1.Zero & Low52) == Low52) {
+      Known = KAcc;
+      return;
+    }
+
+    KnownBits AddendKB(BW);
+    AddendKB.Zero |= High12;
+
+    KnownBits OutKB =
+        KnownBits::computeForAddSub(true, false, false, KAcc, AddendKB);
+    Known = OutKB;
+
+    if ((KAcc.Zero & Low52) == Low52) {
+      Known.One |= (KAcc.One & High12);
+      Known.Zero |= (KAcc.Zero & High12);
+      Known.Zero &= ~Known.One;
+    }
+
+    return;
+  }
   }
 
   // Handle target shuffles.
diff --git a/llvm/test/CodeGen/X86/combine-vpmadd52-1.ll b/llvm/test/CodeGen/X86/combine-vpmadd52-1.ll
new file mode 100644
index 0000000000000..8aefb5b8c373f
--- /dev/null
+++ b/llvm/test/CodeGen/X86/combine-vpmadd52-1.ll
@@ -0,0 +1,159 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512ifma,+avx512vl | FileCheck %s --check-prefix=AVX512
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avxifma                      | FileCheck %s --check-prefix=AVX
+
+
+define <4 x i64> @test4_vpmadd52l_vl256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2) {
+; AVX512-LABEL: test4_vpmadd52l_vl256:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpmadd52luq %ymm2, %ymm1, %ymm0
+; AVX512-NEXT:    retq
+;
+; AVX-LABEL: test4_vpmadd52l_vl256:
+; AVX:       # %bb.0:
+; AVX-NEXT:    {vex} vpmadd52luq %ymm2, %ymm1, %ymm0
+; AVX-NEXT:    retq
+  %m1 = and <4 x i64> %x1, splat (i64 4503599627370495)
+  %m2 = and <4 x i64> %x2, splat (i64 4503599627370495)
+  %r  = call <4 x i64> @llvm.x86.avx512.vpmadd52l.uq.256(<4 x i64> %x0, <4 x i64> %m1, <4 x i64> %m2)
+  ret <4 x i64> %r
+}
+
+
+
+define <2 x i64> @test5_vpmadd52l_oporder(<2 x i64> %acc, <2 x i64> %mulA, <2 x i64> %mulB) {
+; AVX512-LABEL: test5_vpmadd52l_oporder:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpmadd52luq %xmm2, %xmm1, %xmm0
+; AVX512-NEXT:    retq
+;
+; AVX-LABEL: test5_vpmadd52l_oporder:
+; AVX:       # %bb.0:
+; AVX-NEXT:    {vex} vpmadd52luq %xmm2, %xmm1, %xmm0
+; AVX-NEXT:    retq
+  %a = and <2 x i64> %mulA, splat (i64 4503599627370495)
+  %b = and <2 x i64> %mulB, splat (i64 4503599627370495)
+  %r = call <2 x i64> @llvm.x86.avx512.vpmadd52l.uq.128(<2 x i64> %acc, <2 x i64> %a, <2 x i64> %b)
+  ret <2 x i64> %r
+}
+
+
+
+define <4 x i64> @test6_vpmadd52l_under_mask(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2) {
+; AVX512-LABEL: test6_vpmadd52l_under_mask:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm1, %ymm1
+; AVX512-NEXT:    vporq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm2, %ymm2
+; AVX512-NEXT:    vpmadd52luq %ymm2, %ymm1, %ymm0
+; AVX512-NEXT:    retq
+;
+; AVX-LABEL: test6_vpmadd52l_under_mask:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [2251799813685247,2251799813685247,2251799813685247,2251799813685247]
+; AVX-NEXT:    vpand %ymm3, %ymm1, %ymm1
+; AVX-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [2251799813685248,2251799813685248,2251799813685248,2251799813685248]
+; AVX-NEXT:    vpor %ymm3, %ymm2, %ymm2
+; AVX-NEXT:    {vex} vpmadd52luq %ymm2, %ymm1, %ymm0
+; AVX-NEXT:    retq
+  %and = and <4 x i64> %x1, splat (i64 2251799813685247)
+  %or  = or  <4 x i64> %x2, splat (i64 2251799813685248)
+  %r   = call <4 x i64> @llvm.x86.avx512.vpmadd52l.uq.256(<4 x i64> %x0, <4 x i64> %and, <4 x i64> %or)
+  ret <4 x i64> %r
+}
+
+
+
+define <2 x i64> @test7_vpmadd52h_ok(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2) {
+; AVX512-LABEL: test7_vpmadd52h_ok:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpmadd52huq %xmm2, %xmm1, %xmm0
+; AVX512-NEXT:    retq
+;
+; AVX-LABEL: test7_vpmadd52h_ok:
+; AVX:       # %bb.0:
+; AVX-NEXT:    {vex} vpmadd52huq %xmm2, %xmm1, %xmm0
+; AVX-NEXT:    retq
+  %m1 = and <2 x i64> %x1, splat (i64 4503599627370495)
+  %m2 = and <2 x i64> %x2, splat (i64 4503599627370495)
+  %r  = call <2 x i64> @llvm.x86.avx512.vpmadd52h.uq.128(<2 x i64> %x0, <2 x i64> %m1, <2 x i64> %m2)
+  ret <2 x i64> %r
+}
+
+define <4 x i64> @test8_vpmadd52h_vl256_misplaced_mask(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2) {
+; AVX512-LABEL: test8_vpmadd52h_vl256_misplaced_mask:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm1, %ymm0
+; AVX512-NEXT:    vpmadd52huq %ymm2, %ymm1, %ymm0
+; AVX512-NEXT:    retq
+;
+; AVX-LABEL: test8_vpmadd52h_vl256_misplaced_mask:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpbroadcastq {{.*#+}} ymm0 = [4503599627370495,4503599627370495,4503599627370495,4503599627370495]
+; AVX-NEXT:    vpand %ymm0, %ymm1, %ymm0
+; AVX-NEXT:    {vex} vpmadd52huq %ymm2, %ymm1, %ymm0
+; AVX-NEXT:    retq
+  %mask = and <4 x i64> %x1, splat (i64 4503599627370495)
+  %r    = call <4 x i64> @llvm.x86.avx512.vpmadd52h.uq.256(<4 x i64> %mask, <4 x i64> %x1, <4 x i64> %x2)
+  ret <4 x i64> %r
+}
+
+
+
+define <2 x i64> @test9_vpmadd52l_mix_and_or(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2) {
+; AVX512-LABEL: test9_vpmadd52l_mix_and_or:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpmadd52luq %xmm2, %xmm1, %xmm0
+; AVX512-NEXT:    retq
+;
+; AVX-LABEL: test9_vpmadd52l_mix_and_or:
+; AVX:       # %bb.0:
+; AVX-NEXT:    {vex} vpmadd52luq %xmm2, %xmm1, %xmm0
+; AVX-NEXT:    retq
+  %a = and <2 x i64> %x1, splat (i64 4503599627370495)
+  %b = or  <2 x i64> %x2, splat (i64 0)
+  %r = call <2 x i64> @llvm.x86.avx512.vpmadd52l.uq.128(<2 x i64> %x0, <2 x i64> %a, <2 x i64> %b)
+  ret <2 x i64> %r
+}
+
+define <4 x i64> @knownbits_propagate_high_from_acc(<4 x i64> %acc, <4 x i64> %x1, <4 x i64> %x2) {
+; AVX512-LABEL: knownbits_propagate_high_from_acc:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [18442240474082181120,18442240474082181120,18442240474082181120,18442240474082181120]
+; AVX512-NEXT:    vpand %ymm3, %ymm0, %ymm0
+; AVX512-NEXT:    vpmadd52luq %ymm2, %ymm1, %ymm0
+; AVX512-NEXT:    vpand %ymm3, %ymm0, %ymm0
+; AVX512-NEXT:    retq
+;
+; AVX-LABEL: knownbits_propagate_high_from_acc:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [18442240474082181120,18442240474082181120,18442240474082181120,18442240474082181120]
+; AVX-NEXT:    vpand %ymm3, %ymm0, %ymm0
+; AVX-NEXT:    {vex} vpmadd52luq %ymm2, %ymm1, %ymm0
+; AVX-NEXT:    vpand %ymm3, %ymm0, %ymm0
+; AVX-NEXT:    retq
+  %acc_hi = and <4 x i64> %acc,
+            <i64 -4503599627370496, i64 -4503599627370496, i64 -4503599627370496, i64 -4503599627370496>
+
+
+  %m1 = and <4 x i64> %x1,
+        <i64 4503599627370495, i64 4503599627370495, i64 4503599627370495, i64 4503599627370495>
+  %m2 = and <4 x i64> %x2,
+        <i64 4503599627370495, i64 4503599627370495, i64 4503599627370495, i64 4503599627370495>
+
+  %r = call <4 x i64> @llvm.x86.avx512.vpmadd52l.uq.256(<4 x i64> %acc_hi, <4 x i64> %m1, <4 x i64> %m2)
+
+
+  %only_high = and <4 x i64> %r,
+               <i64 -4503599627370496, i64 -4503599627370496, i64 -4503599627370496, i64 -4503599627370496>
+  ret <4 x i64> %only_high
+}
+
+
+
+
+; ---- intrinsics decls ----
+declare <2 x i64> @llvm.x86.avx512.vpmadd52l.uq.128(<2 x i64>, <2 x i64>, <2 x i64>)
+declare <2 x i64> @llvm.x86.avx512.vpmadd52h.uq.128(<2 x i64>, <2 x i64>, <2 x i64>)
+declare <4 x i64> @llvm.x86.avx512.vpmadd52l.uq.256(<4 x i64>, <4 x i64>, <4 x i64>)
+declare <4 x i64> @llvm.x86.avx512.vpmadd52h.uq.256(<4 x i64>, <4 x i64>, <4 x i64>)
+

>From b3bf19499501c5300d80b1659de3a4501e7e69f1 Mon Sep 17 00:00:00 2001
From: william <we3223 at gmail.com>
Date: Sat, 6 Sep 2025 16:56:11 +0800
Subject: [PATCH 2/7] [X86] update code + tests

---
 llvm/lib/Target/X86/X86ISelLowering.cpp   |  31 +----
 llvm/test/CodeGen/X86/combine-vpmadd52.ll | 154 +++++++++++++++++++++-
 2 files changed, 157 insertions(+), 28 deletions(-)

diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 733651a7adc62..1b0b8595b21f9 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -39005,36 +39005,19 @@ void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
       Known.resetAll();
       return;
     }
-
-    const unsigned BW = 64;
-    APInt Low52 = APInt::getLowBitsSet(BW, 52);
-    APInt High12 = APInt::getBitsSetFrom(BW, 52);
-
     KnownBits K0 =
         DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
     KnownBits K1 =
         DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
     KnownBits KAcc =
         DAG.computeKnownBits(Op.getOperand(2), DemandedElts, Depth + 1);
-
-    if ((K0.Zero & Low52) == Low52 || (K1.Zero & Low52) == Low52) {
-      Known = KAcc;
-      return;
-    }
-
-    KnownBits AddendKB(BW);
-    AddendKB.Zero |= High12;
-
-    KnownBits OutKB =
-        KnownBits::computeForAddSub(true, false, false, KAcc, AddendKB);
-    Known = OutKB;
-
-    if ((KAcc.Zero & Low52) == Low52) {
-      Known.One |= (KAcc.One & High12);
-      Known.Zero |= (KAcc.Zero & High12);
-      Known.Zero &= ~Known.One;
-    }
-
+    K0 = K0.trunc(52);
+    K1 = K1.trunc(52);
+    KnownBits KnownMul = (Op.getOpcode() == X86ISD::VPMADD52L)
+                             ? KnownBits::mul(K0, K1)
+                             : KnownBits::mulhu(K0, K1);
+    KnownMul = KnownMul.zext(64);
+    Known = KnownBits::computeForAddSub(true, false, false, KAcc, KnownMul);
     return;
   }
   }
diff --git a/llvm/test/CodeGen/X86/combine-vpmadd52.ll b/llvm/test/CodeGen/X86/combine-vpmadd52.ll
index 004db995ee584..ad392da905ab8 100644
--- a/llvm/test/CodeGen/X86/combine-vpmadd52.ll
+++ b/llvm/test/CodeGen/X86/combine-vpmadd52.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512ifma,+avx512vl | FileCheck %s --check-prefixes=CHECK,AVX512
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avxifma | FileCheck %s --check-prefixes=CHECK,AVX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512ifma,+avx512vl | FileCheck %s --check-prefix=AVX512
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avxifma | FileCheck %s --check-prefix=AVX
 
 define <2 x i64> @test1_vpmadd52l(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2) {
 ; AVX512-LABEL: test1_vpmadd52l:
@@ -102,5 +102,151 @@ define <2 x i64> @test_vpmadd52h(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2) {
   %1 = call <2 x i64> @llvm.x86.avx512.vpmadd52h.uq.128(<2 x i64> %x0, <2 x i64> %and, <2 x i64> %or)
   ret <2 x i64> %1
 }
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; CHECK: {{.*}}
+
+
+define <4 x i64> @test4_vpmadd52l_vl256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2) {
+; AVX512-LABEL: test4_vpmadd52l_vl256:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpmadd52luq %ymm2, %ymm1, %ymm0
+; AVX512-NEXT:    retq
+;
+; AVX-LABEL: test4_vpmadd52l_vl256:
+; AVX:       # %bb.0:
+; AVX-NEXT:    {vex} vpmadd52luq %ymm2, %ymm1, %ymm0
+; AVX-NEXT:    retq
+  ; keep only low 52 bits of %x1/%x2
+  %m1 = and <4 x i64> %x1, splat (i64 4503599627370495) ; (1 << 52) - 1
+  %m2 = and <4 x i64> %x2, splat (i64 4503599627370495) ; (1 << 52) - 1
+  %r  = call <4 x i64> @llvm.x86.avx512.vpmadd52l.uq.256(<4 x i64> %x0, <4 x i64> %m1, <4 x i64> %m2)
+  ret <4 x i64> %r
+}
+
+define <2 x i64> @test5_vpmadd52l_oporder(<2 x i64> %acc, <2 x i64> %mulA, <2 x i64> %mulB) {
+; AVX512-LABEL: test5_vpmadd52l_oporder:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpmadd52luq %xmm2, %xmm1, %xmm0
+; AVX512-NEXT:    retq
+;
+; AVX-LABEL: test5_vpmadd52l_oporder:
+; AVX:       # %bb.0:
+; AVX-NEXT:    {vex} vpmadd52luq %xmm2, %xmm1, %xmm0
+; AVX-NEXT:    retq
+  ; keep only low 52 bits of mulA/mulB
+  %a = and <2 x i64> %mulA, splat (i64 4503599627370495) ; (1 << 52) - 1
+  %b = and <2 x i64> %mulB, splat (i64 4503599627370495) ; (1 << 52) - 1
+  %r = call <2 x i64> @llvm.x86.avx512.vpmadd52l.uq.128(<2 x i64> %acc, <2 x i64> %a, <2 x i64> %b)
+  ret <2 x i64> %r
+}
+
+define <4 x i64> @test6_vpmadd52l_under_mask(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2) {
+; AVX512-LABEL: test6_vpmadd52l_under_mask:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm1, %ymm1
+; AVX512-NEXT:    vporq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm2, %ymm2
+; AVX512-NEXT:    vpmadd52luq %ymm2, %ymm1, %ymm0
+; AVX512-NEXT:    retq
+;
+; AVX-LABEL: test6_vpmadd52l_under_mask:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [2251799813685247,2251799813685247,2251799813685247,2251799813685247]
+; AVX-NEXT:    vpand %ymm3, %ymm1, %ymm1
+; AVX-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [2251799813685248,2251799813685248,2251799813685248,2251799813685248]
+; AVX-NEXT:    vpor %ymm3, %ymm2, %ymm2
+; AVX-NEXT:    {vex} vpmadd52luq %ymm2, %ymm1, %ymm0
+; AVX-NEXT:    retq
+  ; keep only low 51 bits of %x1, force bit 51 of %x2 to 1
+  %andv = and <4 x i64> %x1, splat (i64 2251799813685247) ; (1 << 51) - 1
+  %orv  = or  <4 x i64> %x2, splat (i64 2251799813685248) ; 1 << 51
+  %r    = call <4 x i64> @llvm.x86.avx512.vpmadd52l.uq.256(<4 x i64> %x0, <4 x i64> %andv, <4 x i64> %orv)
+  ret <4 x i64> %r
+}
+
+define <2 x i64> @test7_vpmadd52h_ok(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2) {
+; AVX512-LABEL: test7_vpmadd52h_ok:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpmadd52huq %xmm2, %xmm1, %xmm0
+; AVX512-NEXT:    retq
+;
+; AVX-LABEL: test7_vpmadd52h_ok:
+; AVX:       # %bb.0:
+; AVX-NEXT:    {vex} vpmadd52huq %xmm2, %xmm1, %xmm0
+; AVX-NEXT:    retq
+  ; keep only low 52 bits of %x1/%x2
+  %m1 = and <2 x i64> %x1, splat (i64 4503599627370495) ; (1 << 52) - 1
+  %m2 = and <2 x i64> %x2, splat (i64 4503599627370495) ; (1 << 52) - 1
+  %r  = call <2 x i64> @llvm.x86.avx512.vpmadd52h.uq.128(<2 x i64> %x0, <2 x i64> %m1, <2 x i64> %m2)
+  ret <2 x i64> %r
+}
+
+define <4 x i64> @test8_vpmadd52h_vl256_misplaced_mask(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2) {
+; AVX512-LABEL: test8_vpmadd52h_vl256_misplaced_mask:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm1, %ymm0
+; AVX512-NEXT:    vpmadd52huq %ymm2, %ymm1, %ymm0
+; AVX512-NEXT:    retq
+;
+; AVX-LABEL: test8_vpmadd52h_vl256_misplaced_mask:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpbroadcastq {{.*#+}} ymm0 = [4503599627370495,4503599627370495,4503599627370495,4503599627370495]
+; AVX-NEXT:    vpand %ymm0, %ymm1, %ymm0
+; AVX-NEXT:    {vex} vpmadd52huq %ymm2, %ymm1, %ymm0
+; AVX-NEXT:    retq
+  ; keep only low 52 bits of %x1, then place it into %x0 operand position
+  %mask = and <4 x i64> %x1, splat (i64 4503599627370495) ; (1 << 52) - 1
+  %r    = call <4 x i64> @llvm.x86.avx512.vpmadd52h.uq.256(<4 x i64> %mask, <4 x i64> %x1, <4 x i64> %x2)
+  ret <4 x i64> %r
+}
+
+define <2 x i64> @test9_vpmadd52l_mix_and_or(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2) {
+; AVX512-LABEL: test9_vpmadd52l_mix_and_or:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpmadd52luq %xmm2, %xmm1, %xmm0
+; AVX512-NEXT:    retq
+;
+; AVX-LABEL: test9_vpmadd52l_mix_and_or:
+; AVX:       # %bb.0:
+; AVX-NEXT:    {vex} vpmadd52luq %xmm2, %xmm1, %xmm0
+; AVX-NEXT:    retq
+  ; keep only low 52 bits of %x1
+  %a = and <2 x i64> %x1, splat (i64 4503599627370495) ; (1 << 52) - 1
+  ; force high 12 bits of %x2 to 1
+  %b = or  <2 x i64> %x2, splat (i64 -4503599627370496) ; ~((1 << 52) - 1) = -(1 << 52)
+  %r = call <2 x i64> @llvm.x86.avx512.vpmadd52l.uq.128(<2 x i64> %x0, <2 x i64> %a, <2 x i64> %b)
+  ret <2 x i64> %r
+}
+; Positive test: ensure only the high 12 bits from %acc propagate through VPMADD52L.
+define <4 x i64> @knownbits_propagate_high_from_acc(<4 x i64> %acc, <4 x i64> %x1, <4 x i64> %x2) {
+; AVX512-LABEL: knownbits_propagate_high_from_acc:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [18442240474082181120,18442240474082181120,18442240474082181120,18442240474082181120]
+; AVX512-NEXT:    vpand %ymm3, %ymm0, %ymm0
+; AVX512-NEXT:    vpmadd52luq %ymm2, %ymm1, %ymm0
+; AVX512-NEXT:    vpand %ymm3, %ymm0, %ymm0
+; AVX512-NEXT:    retq
+;
+; AVX-LABEL: knownbits_propagate_high_from_acc:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [18442240474082181120,18442240474082181120,18442240474082181120,18442240474082181120]
+; AVX-NEXT:    vpand %ymm3, %ymm0, %ymm0
+; AVX-NEXT:    {vex} vpmadd52luq %ymm2, %ymm1, %ymm0
+; AVX-NEXT:    vpand %ymm3, %ymm0, %ymm0
+; AVX-NEXT:    retq
+  ; mask high 12 bits of accumulator (unsigned value 18442240474082181120 == 0xFFF0000000000000)
+  %acc_hi = and <4 x i64> %acc, splat (i64 -4503599627370496) ; ~((1 << 52) - 1) = -(1 << 52)
+  ; keep only low 52 bits of multipliers
+  %m1 = and <4 x i64> %x1, splat (i64 4503599627370495) ; (1 << 52) - 1
+  %m2 = and <4 x i64> %x2, splat (i64 4503599627370495) ; (1 << 52) - 1
+  %r = call <4 x i64> @llvm.x86.avx512.vpmadd52l.uq.256(<4 x i64> %acc_hi, <4 x i64> %m1, <4 x i64> %m2)
+  ; keep only high 12 bits
+  %only_high = and <4 x i64> %r, splat (i64 -4503599627370496) ; ~((1 << 52) - 1) = -(1 << 52)
+  ret <4 x i64> %only_high
+}
+
+
+
+declare <2 x i64> @llvm.x86.avx512.vpmadd52l.uq.128(<2 x i64>, <2 x i64>, <2 x i64>)
+declare <2 x i64> @llvm.x86.avx512.vpmadd52h.uq.128(<2 x i64>, <2 x i64>, <2 x i64>)
+declare <4 x i64> @llvm.x86.avx512.vpmadd52l.uq.256(<4 x i64>, <4 x i64>, <4 x i64>)
+declare <4 x i64> @llvm.x86.avx512.vpmadd52h.uq.256(<4 x i64>, <4 x i64>, <4 x i64>)
+
+

>From 10ecfec1d372568f902a931ea5224c744900301c Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E9=BB=83=E5=9C=8B=E5=BA=AD?= <we3223 at gmail.com>
Date: Sat, 6 Sep 2025 18:31:36 +0800
Subject: [PATCH 3/7] Delete llvm/test/CodeGen/X86/combine-vpmadd52-1.ll

---
 llvm/test/CodeGen/X86/combine-vpmadd52-1.ll | 159 --------------------
 1 file changed, 159 deletions(-)
 delete mode 100644 llvm/test/CodeGen/X86/combine-vpmadd52-1.ll

diff --git a/llvm/test/CodeGen/X86/combine-vpmadd52-1.ll b/llvm/test/CodeGen/X86/combine-vpmadd52-1.ll
deleted file mode 100644
index 8aefb5b8c373f..0000000000000
--- a/llvm/test/CodeGen/X86/combine-vpmadd52-1.ll
+++ /dev/null
@@ -1,159 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512ifma,+avx512vl | FileCheck %s --check-prefix=AVX512
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avxifma                      | FileCheck %s --check-prefix=AVX
-
-
-define <4 x i64> @test4_vpmadd52l_vl256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2) {
-; AVX512-LABEL: test4_vpmadd52l_vl256:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpmadd52luq %ymm2, %ymm1, %ymm0
-; AVX512-NEXT:    retq
-;
-; AVX-LABEL: test4_vpmadd52l_vl256:
-; AVX:       # %bb.0:
-; AVX-NEXT:    {vex} vpmadd52luq %ymm2, %ymm1, %ymm0
-; AVX-NEXT:    retq
-  %m1 = and <4 x i64> %x1, splat (i64 4503599627370495)
-  %m2 = and <4 x i64> %x2, splat (i64 4503599627370495)
-  %r  = call <4 x i64> @llvm.x86.avx512.vpmadd52l.uq.256(<4 x i64> %x0, <4 x i64> %m1, <4 x i64> %m2)
-  ret <4 x i64> %r
-}
-
-
-
-define <2 x i64> @test5_vpmadd52l_oporder(<2 x i64> %acc, <2 x i64> %mulA, <2 x i64> %mulB) {
-; AVX512-LABEL: test5_vpmadd52l_oporder:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpmadd52luq %xmm2, %xmm1, %xmm0
-; AVX512-NEXT:    retq
-;
-; AVX-LABEL: test5_vpmadd52l_oporder:
-; AVX:       # %bb.0:
-; AVX-NEXT:    {vex} vpmadd52luq %xmm2, %xmm1, %xmm0
-; AVX-NEXT:    retq
-  %a = and <2 x i64> %mulA, splat (i64 4503599627370495)
-  %b = and <2 x i64> %mulB, splat (i64 4503599627370495)
-  %r = call <2 x i64> @llvm.x86.avx512.vpmadd52l.uq.128(<2 x i64> %acc, <2 x i64> %a, <2 x i64> %b)
-  ret <2 x i64> %r
-}
-
-
-
-define <4 x i64> @test6_vpmadd52l_under_mask(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2) {
-; AVX512-LABEL: test6_vpmadd52l_under_mask:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm1, %ymm1
-; AVX512-NEXT:    vporq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm2, %ymm2
-; AVX512-NEXT:    vpmadd52luq %ymm2, %ymm1, %ymm0
-; AVX512-NEXT:    retq
-;
-; AVX-LABEL: test6_vpmadd52l_under_mask:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [2251799813685247,2251799813685247,2251799813685247,2251799813685247]
-; AVX-NEXT:    vpand %ymm3, %ymm1, %ymm1
-; AVX-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [2251799813685248,2251799813685248,2251799813685248,2251799813685248]
-; AVX-NEXT:    vpor %ymm3, %ymm2, %ymm2
-; AVX-NEXT:    {vex} vpmadd52luq %ymm2, %ymm1, %ymm0
-; AVX-NEXT:    retq
-  %and = and <4 x i64> %x1, splat (i64 2251799813685247)
-  %or  = or  <4 x i64> %x2, splat (i64 2251799813685248)
-  %r   = call <4 x i64> @llvm.x86.avx512.vpmadd52l.uq.256(<4 x i64> %x0, <4 x i64> %and, <4 x i64> %or)
-  ret <4 x i64> %r
-}
-
-
-
-define <2 x i64> @test7_vpmadd52h_ok(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2) {
-; AVX512-LABEL: test7_vpmadd52h_ok:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpmadd52huq %xmm2, %xmm1, %xmm0
-; AVX512-NEXT:    retq
-;
-; AVX-LABEL: test7_vpmadd52h_ok:
-; AVX:       # %bb.0:
-; AVX-NEXT:    {vex} vpmadd52huq %xmm2, %xmm1, %xmm0
-; AVX-NEXT:    retq
-  %m1 = and <2 x i64> %x1, splat (i64 4503599627370495)
-  %m2 = and <2 x i64> %x2, splat (i64 4503599627370495)
-  %r  = call <2 x i64> @llvm.x86.avx512.vpmadd52h.uq.128(<2 x i64> %x0, <2 x i64> %m1, <2 x i64> %m2)
-  ret <2 x i64> %r
-}
-
-define <4 x i64> @test8_vpmadd52h_vl256_misplaced_mask(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2) {
-; AVX512-LABEL: test8_vpmadd52h_vl256_misplaced_mask:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm1, %ymm0
-; AVX512-NEXT:    vpmadd52huq %ymm2, %ymm1, %ymm0
-; AVX512-NEXT:    retq
-;
-; AVX-LABEL: test8_vpmadd52h_vl256_misplaced_mask:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vpbroadcastq {{.*#+}} ymm0 = [4503599627370495,4503599627370495,4503599627370495,4503599627370495]
-; AVX-NEXT:    vpand %ymm0, %ymm1, %ymm0
-; AVX-NEXT:    {vex} vpmadd52huq %ymm2, %ymm1, %ymm0
-; AVX-NEXT:    retq
-  %mask = and <4 x i64> %x1, splat (i64 4503599627370495)
-  %r    = call <4 x i64> @llvm.x86.avx512.vpmadd52h.uq.256(<4 x i64> %mask, <4 x i64> %x1, <4 x i64> %x2)
-  ret <4 x i64> %r
-}
-
-
-
-define <2 x i64> @test9_vpmadd52l_mix_and_or(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2) {
-; AVX512-LABEL: test9_vpmadd52l_mix_and_or:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpmadd52luq %xmm2, %xmm1, %xmm0
-; AVX512-NEXT:    retq
-;
-; AVX-LABEL: test9_vpmadd52l_mix_and_or:
-; AVX:       # %bb.0:
-; AVX-NEXT:    {vex} vpmadd52luq %xmm2, %xmm1, %xmm0
-; AVX-NEXT:    retq
-  %a = and <2 x i64> %x1, splat (i64 4503599627370495)
-  %b = or  <2 x i64> %x2, splat (i64 0)
-  %r = call <2 x i64> @llvm.x86.avx512.vpmadd52l.uq.128(<2 x i64> %x0, <2 x i64> %a, <2 x i64> %b)
-  ret <2 x i64> %r
-}
-
-define <4 x i64> @knownbits_propagate_high_from_acc(<4 x i64> %acc, <4 x i64> %x1, <4 x i64> %x2) {
-; AVX512-LABEL: knownbits_propagate_high_from_acc:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [18442240474082181120,18442240474082181120,18442240474082181120,18442240474082181120]
-; AVX512-NEXT:    vpand %ymm3, %ymm0, %ymm0
-; AVX512-NEXT:    vpmadd52luq %ymm2, %ymm1, %ymm0
-; AVX512-NEXT:    vpand %ymm3, %ymm0, %ymm0
-; AVX512-NEXT:    retq
-;
-; AVX-LABEL: knownbits_propagate_high_from_acc:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [18442240474082181120,18442240474082181120,18442240474082181120,18442240474082181120]
-; AVX-NEXT:    vpand %ymm3, %ymm0, %ymm0
-; AVX-NEXT:    {vex} vpmadd52luq %ymm2, %ymm1, %ymm0
-; AVX-NEXT:    vpand %ymm3, %ymm0, %ymm0
-; AVX-NEXT:    retq
-  %acc_hi = and <4 x i64> %acc,
-            <i64 -4503599627370496, i64 -4503599627370496, i64 -4503599627370496, i64 -4503599627370496>
-
-
-  %m1 = and <4 x i64> %x1,
-        <i64 4503599627370495, i64 4503599627370495, i64 4503599627370495, i64 4503599627370495>
-  %m2 = and <4 x i64> %x2,
-        <i64 4503599627370495, i64 4503599627370495, i64 4503599627370495, i64 4503599627370495>
-
-  %r = call <4 x i64> @llvm.x86.avx512.vpmadd52l.uq.256(<4 x i64> %acc_hi, <4 x i64> %m1, <4 x i64> %m2)
-
-
-  %only_high = and <4 x i64> %r,
-               <i64 -4503599627370496, i64 -4503599627370496, i64 -4503599627370496, i64 -4503599627370496>
-  ret <4 x i64> %only_high
-}
-
-
-
-
-; ---- intrinsics decls ----
-declare <2 x i64> @llvm.x86.avx512.vpmadd52l.uq.128(<2 x i64>, <2 x i64>, <2 x i64>)
-declare <2 x i64> @llvm.x86.avx512.vpmadd52h.uq.128(<2 x i64>, <2 x i64>, <2 x i64>)
-declare <4 x i64> @llvm.x86.avx512.vpmadd52l.uq.256(<4 x i64>, <4 x i64>, <4 x i64>)
-declare <4 x i64> @llvm.x86.avx512.vpmadd52h.uq.256(<4 x i64>, <4 x i64>, <4 x i64>)
-

>From 8e41815473c691c26f269e84011e69b1ff16220e Mon Sep 17 00:00:00 2001
From: william <we3223 at gmail.com>
Date: Mon, 8 Sep 2025 23:16:18 +0800
Subject: [PATCH 4/7] update code-1 and test case -1

---
 llvm/lib/Target/X86/X86ISelLowering.cpp   |  2 +-
 llvm/test/CodeGen/X86/combine-vpmadd52.ll | 10 ++++++----
 2 files changed, 7 insertions(+), 5 deletions(-)

diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 1b0b8595b21f9..51f7862dd251d 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -39017,7 +39017,7 @@ void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
                              ? KnownBits::mul(K0, K1)
                              : KnownBits::mulhu(K0, K1);
     KnownMul = KnownMul.zext(64);
-    Known = KnownBits::computeForAddSub(true, false, false, KAcc, KnownMul);
+    Known = KnownBits::add(KAcc, KnownMul);
     return;
   }
   }
diff --git a/llvm/test/CodeGen/X86/combine-vpmadd52.ll b/llvm/test/CodeGen/X86/combine-vpmadd52.ll
index ad392da905ab8..132a0643c3426 100644
--- a/llvm/test/CodeGen/X86/combine-vpmadd52.ll
+++ b/llvm/test/CodeGen/X86/combine-vpmadd52.ll
@@ -1,6 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512ifma,+avx512vl | FileCheck %s --check-prefix=AVX512
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avxifma | FileCheck %s --check-prefix=AVX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512ifma,+avx512vl | FileCheck %s --check-prefixes=CHECK,AVX512
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avxifma | FileCheck %s --check-prefixes=CHECK,AVX
+
 
 define <2 x i64> @test1_vpmadd52l(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2) {
 ; AVX512-LABEL: test1_vpmadd52l:
@@ -104,6 +105,7 @@ define <2 x i64> @test_vpmadd52h(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2) {
 }
 
 
+
 define <4 x i64> @test4_vpmadd52l_vl256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2) {
 ; AVX512-LABEL: test4_vpmadd52l_vl256:
 ; AVX512:       # %bb.0:
@@ -248,5 +250,5 @@ declare <2 x i64> @llvm.x86.avx512.vpmadd52l.uq.128(<2 x i64>, <2 x i64>, <2 x i
 declare <2 x i64> @llvm.x86.avx512.vpmadd52h.uq.128(<2 x i64>, <2 x i64>, <2 x i64>)
 declare <4 x i64> @llvm.x86.avx512.vpmadd52l.uq.256(<4 x i64>, <4 x i64>, <4 x i64>)
 declare <4 x i64> @llvm.x86.avx512.vpmadd52h.uq.256(<4 x i64>, <4 x i64>, <4 x i64>)
-
-
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK: {{.*}}

>From 64c3e95a68a75d7fa859d0e1b2062bb80ca9a11c Mon Sep 17 00:00:00 2001
From: william <we3223 at gmail.com>
Date: Mon, 8 Sep 2025 23:28:24 +0800
Subject: [PATCH 5/7] update code-2  delete blank

---
 llvm/lib/Target/X86/X86ISelLowering.cpp | 1 -
 1 file changed, 1 deletion(-)

diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 51f7862dd251d..932c84c2a6686 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -38994,7 +38994,6 @@ void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
       computeKnownBitsForPSADBW(LHS, RHS, Known, DemandedElts, DAG, Depth);
       break;
     }
-    
     }
     break;
   }

>From f8d3ae902cbd87f84db9e1b89e68421489f8229b Mon Sep 17 00:00:00 2001
From: william <we3223 at gmail.com>
Date: Mon, 15 Sep 2025 11:36:00 +0800
Subject: [PATCH 6/7] [X86] update code + tests

---
 llvm/lib/Target/X86/X86ISelLowering.cpp     |   5 +-
 llvm/test/CodeGen/X86/combine-vpmadd52HL.ll | 138 ++++++++++++++++++++
 2 files changed, 139 insertions(+), 4 deletions(-)
 create mode 100644 llvm/test/CodeGen/X86/combine-vpmadd52HL.ll

diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 932c84c2a6686..18ab3eb866561 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -39000,10 +39000,7 @@ void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
   case X86ISD::VPMADD52L:
   case X86ISD::VPMADD52H: {
     EVT VT = Op.getValueType();
-    if (!VT.isVector() || VT.getScalarSizeInBits() != 64) {
-      Known.resetAll();
-      return;
-    }
+    assert(Op.getValueType().isVector() && Op.getValueType().getScalarType() == MVT::i64 && "Unexpected VPMADD52 type");
     KnownBits K0 =
         DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
     KnownBits K1 =
diff --git a/llvm/test/CodeGen/X86/combine-vpmadd52HL.ll b/llvm/test/CodeGen/X86/combine-vpmadd52HL.ll
new file mode 100644
index 0000000000000..0b5be5fc9900b
--- /dev/null
+++ b/llvm/test/CodeGen/X86/combine-vpmadd52HL.ll
@@ -0,0 +1,138 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512ifma,+avx512vl | FileCheck %s --check-prefixes=AVX512VL
+
+
+
+; H path: take the high 52 bits of the product and add them to the accumulator
+; 25-bit = (1<<25)-1 = 33554431
+; 26-bit = (1<<26)-1 = 67108863
+
+declare <2 x i64> @llvm.x86.avx512.vpmadd52h.uq.128(<2 x i64>, <2 x i64>, <2 x i64>)
+declare <4 x i64> @llvm.x86.avx512.vpmadd52h.uq.256(<4 x i64>, <4 x i64>, <4 x i64>)
+declare <8 x i64> @llvm.x86.avx512.vpmadd52h.uq.512(<8 x i64>, <8 x i64>, <8 x i64>)
+
+define <2 x i64> @kb52h_128_mask25_and1(<2 x i64> %x, <2 x i64> %y) {
+; AVX512VL-LABEL: kb52h_128_mask25_and1:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vmovddup {{.*#+}} xmm0 = [1,1]
+; AVX512VL-NEXT:    # xmm0 = mem[0,0]
+; AVX512VL-NEXT:    retq
+  %mx  = and <2 x i64> %x, <i64 33554431, i64 33554431>
+  %my  = and <2 x i64> %y, <i64 33554431, i64 33554431>
+  %r   = call <2 x i64> @llvm.x86.avx512.vpmadd52h.uq.128(
+             <2 x i64> <i64 1, i64 1>,           ; acc
+             <2 x i64> %mx,                      ; x (masked to 25-bit)
+             <2 x i64> %my)                      ; y (masked to 25-bit)
+  %ret = and <2 x i64> %r, <i64 1, i64 1>
+  ret <2 x i64> %ret
+}
+
+define <4 x i64> @kb52h_256_mask25x26_acc1(<4 x i64> %x, <4 x i64> %y) {
+; AVX512VL-LABEL: kb52h_256_mask25x26_acc1:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vbroadcastsd {{.*#+}} ymm0 = [1,1,1,1]
+; AVX512VL-NEXT:    retq
+  %mx = and <4 x i64> %x, <i64 33554431, i64 33554431, i64 33554431, i64 33554431>
+  %my = and <4 x i64> %y, <i64 67108863, i64 67108863, i64 67108863, i64 67108863>
+  %r  = call <4 x i64> @llvm.x86.avx512.vpmadd52h.uq.256(
+            <4 x i64> <i64 1, i64 1, i64 1, i64 1>,
+            <4 x i64> %mx,
+            <4 x i64> %my)
+  ret <4 x i64> %r
+}
+
+define <8 x i64> @kb52h_512_mask25_and1(<8 x i64> %x, <8 x i64> %y) {
+; AVX512VL-LABEL: kb52h_512_mask25_and1:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vbroadcastsd {{.*#+}} zmm0 = [1,1,1,1,1,1,1,1]
+; AVX512VL-NEXT:    retq
+  %mx = and <8 x i64> %x, <i64 33554431, i64 33554431, i64 33554431, i64 33554431, i64 33554431, i64 33554431, i64 33554431, i64 33554431>
+  %my = and <8 x i64> %y, <i64 33554431, i64 33554431, i64 33554431, i64 33554431, i64 33554431, i64 33554431, i64 33554431, i64 33554431>
+  %r  = call <8 x i64> @llvm.x86.avx512.vpmadd52h.uq.512(
+            <8 x i64> <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>,
+            <8 x i64> %mx,
+            <8 x i64> %my)
+  %ret = and <8 x i64> %r, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+  ret <8 x i64> %ret
+}
+
+
+; 26-bit =  67108863 = (1<<26)-1
+; 50-bit = 1125899906842623 = (1<<50)-1
+
+declare <2 x i64> @llvm.x86.avx512.vpmadd52l.uq.128(<2 x i64>, <2 x i64>, <2 x i64>)
+declare <4 x i64> @llvm.x86.avx512.vpmadd52l.uq.256(<4 x i64>, <4 x i64>, <4 x i64>)
+declare <8 x i64> @llvm.x86.avx512.vpmadd52l.uq.512(<8 x i64>, <8 x i64>, <8 x i64>)
+
+
+
+define <2 x i64> @kb52l_128_mask26x26_add_intrin(<2 x i64> %x, <2 x i64> %y, <2 x i64> %acc) {
+; AVX512VL-LABEL: kb52l_128_mask26x26_add_intrin:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpbroadcastq {{.*#+}} xmm3 = [67108863,67108863]
+; AVX512VL-NEXT:    vpand %xmm3, %xmm0, %xmm0
+; AVX512VL-NEXT:    vpand %xmm3, %xmm1, %xmm1
+; AVX512VL-NEXT:    vpmadd52luq %xmm1, %xmm0, %xmm2
+; AVX512VL-NEXT:    vmovdqa %xmm2, %xmm0
+; AVX512VL-NEXT:    retq
+  %xm = and <2 x i64> %x, <i64 67108863, i64 67108863>
+  %ym = and <2 x i64> %y, <i64 67108863, i64 67108863>
+  %r  = call <2 x i64> @llvm.x86.avx512.vpmadd52l.uq.128(<2 x i64> %acc, <2 x i64> %xm, <2 x i64> %ym)
+  ret <2 x i64> %r
+}
+
+
+
+define <4 x i64> @kb52l_256_mask50x3_add_intrin(<4 x i64> %x, <4 x i64> %y, <4 x i64> %acc) {
+; AVX512VL-LABEL: kb52l_256_mask50x3_add_intrin:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpmadd52luq %ymm1, %ymm0, %ymm2
+; AVX512VL-NEXT:    vmovdqa %ymm2, %ymm0
+; AVX512VL-NEXT:    retq
+  %xm = and <4 x i64> %x, <i64 1125899906842623, i64 1125899906842623, i64 1125899906842623, i64 1125899906842623>
+  %ym = and <4 x i64> %y, <i64 3, i64 3, i64 3, i64 3>
+  %r  = call <4 x i64> @llvm.x86.avx512.vpmadd52l.uq.256(<4 x i64> %acc, <4 x i64> %xm, <4 x i64> %ym)
+  ret <4 x i64> %r
+}
+
+
+
+define <8 x i64> @kb52l_512_mask26x26_add_intrin(<8 x i64> %x, <8 x i64> %y, <8 x i64> %acc) {
+; AVX512-NOVL-LABEL: kb52l_512_mask26x26_add_intrin:
+; AVX512-NOVL:       vpmadd52luq
+; AVX512-NOVL:       retq
+; AVX512VL-LABEL: kb52l_512_mask26x26_add_intrin:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpbroadcastq {{.*#+}} zmm3 = [67108863,67108863,67108863,67108863,67108863,67108863,67108863,67108863]
+; AVX512VL-NEXT:    vpandq %zmm3, %zmm0, %zmm0
+; AVX512VL-NEXT:    vpandq %zmm3, %zmm1, %zmm1
+; AVX512VL-NEXT:    vpmadd52luq %zmm1, %zmm0, %zmm2
+; AVX512VL-NEXT:    vmovdqa64 %zmm2, %zmm0
+; AVX512VL-NEXT:    retq
+  %xm = and <8 x i64> %x, <i64 67108863, i64 67108863, i64 67108863, i64 67108863, i64 67108863, i64 67108863, i64 67108863, i64 67108863>
+  %ym = and <8 x i64> %y, <i64 67108863, i64 67108863, i64 67108863, i64 67108863, i64 67108863, i64 67108863, i64 67108863, i64 67108863>
+  %r  = call <8 x i64> @llvm.x86.avx512.vpmadd52l.uq.512(<8 x i64> %acc, <8 x i64> %xm, <8 x i64> %ym)
+  ret <8 x i64> %r
+}
+
+
+
+
+define <2 x i64> @kb52l_128_neg_27x27_plain(<2 x i64> %x, <2 x i64> %y, <2 x i64> %acc) {
+; AVX512VL-LABEL: kb52l_128_neg_27x27_plain:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpbroadcastq {{.*#+}} xmm3 = [67108864,67108864]
+; AVX512VL-NEXT:    vpand %xmm3, %xmm0, %xmm0
+; AVX512VL-NEXT:    vpand %xmm3, %xmm1, %xmm1
+; AVX512VL-NEXT:    vpmuldq %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT:    vpaddq %xmm2, %xmm0, %xmm0
+; AVX512VL-NEXT:    retq
+  %xm = and <2 x i64> %x, <i64 67108864, i64 67108864>   ; 1<<26
+  %ym = and <2 x i64> %y, <i64 67108864, i64 67108864>
+  %mul = mul <2 x i64> %xm, %ym
+  %res = add <2 x i64> %mul, %acc
+  ret <2 x i64> %res
+}
+

>From 51f53bdb988b6ef765887e5b7f931c971da3c367 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E9=BB=83=E5=9C=8B=E5=BA=AD?= <we3223 at gmail.com>
Date: Mon, 15 Sep 2025 11:37:09 +0800
Subject: [PATCH 7/7] Delete llvm/test/CodeGen/X86/combine-vpmadd52.ll

---
 llvm/test/CodeGen/X86/combine-vpmadd52.ll | 254 ----------------------
 1 file changed, 254 deletions(-)
 delete mode 100644 llvm/test/CodeGen/X86/combine-vpmadd52.ll

diff --git a/llvm/test/CodeGen/X86/combine-vpmadd52.ll b/llvm/test/CodeGen/X86/combine-vpmadd52.ll
deleted file mode 100644
index 132a0643c3426..0000000000000
--- a/llvm/test/CodeGen/X86/combine-vpmadd52.ll
+++ /dev/null
@@ -1,254 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512ifma,+avx512vl | FileCheck %s --check-prefixes=CHECK,AVX512
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avxifma | FileCheck %s --check-prefixes=CHECK,AVX
-
-
-define <2 x i64> @test1_vpmadd52l(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2) {
-; AVX512-LABEL: test1_vpmadd52l:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpmadd52luq %xmm2, %xmm1, %xmm0
-; AVX512-NEXT:    retq
-;
-; AVX-LABEL: test1_vpmadd52l:
-; AVX:       # %bb.0:
-; AVX-NEXT:    {vex} vpmadd52luq %xmm2, %xmm1, %xmm0
-; AVX-NEXT:    retq
-
-  %and = and <2 x i64> %x1, splat (i64 4503599627370495) ; (1LL << 52) - 1
-  %1 = call <2 x i64> @llvm.x86.avx512.vpmadd52l.uq.128(<2 x i64> %x0, <2 x i64> %and, <2 x i64> %x2)
-  ret <2 x i64> %1
-}
-
-define <2 x i64> @test2_vpmadd52l(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2) {
-; AVX512-LABEL: test2_vpmadd52l:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpmadd52luq %xmm2, %xmm1, %xmm0
-; AVX512-NEXT:    retq
-;
-; AVX-LABEL: test2_vpmadd52l:
-; AVX:       # %bb.0:
-; AVX-NEXT:    {vex} vpmadd52luq %xmm2, %xmm1, %xmm0
-; AVX-NEXT:    retq
-  %and = and <2 x i64> %x2, splat (i64 4503599627370495) ; (1LL << 52) - 1
-  %1 = call <2 x i64> @llvm.x86.avx512.vpmadd52l.uq.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %and)
-  ret <2 x i64> %1
-}
-
-define <2 x i64> @test3_vpmadd52l(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2) {
-; AVX512-LABEL: test3_vpmadd52l:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpmadd52luq %xmm2, %xmm1, %xmm0
-; AVX512-NEXT:    retq
-;
-; AVX-LABEL: test3_vpmadd52l:
-; AVX:       # %bb.0:
-; AVX-NEXT:    {vex} vpmadd52luq %xmm2, %xmm1, %xmm0
-; AVX-NEXT:    retq
-  %and = and <2 x i64> %x1, splat (i64 4503599627370495) ; (1LL << 52) - 1
-  %or = or <2 x i64> %x2, splat (i64 4503599627370496) ; 1LL << 52
-  %1 = call <2 x i64> @llvm.x86.avx512.vpmadd52l.uq.128(<2 x i64> %x0, <2 x i64> %and, <2 x i64> %or)
-  ret <2 x i64> %1
-}
-
-define <2 x i64> @test_vpmadd52l_wrong_bits(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2) {
-; AVX512-LABEL: test_vpmadd52l_wrong_bits:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm1, %xmm1
-; AVX512-NEXT:    vporq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm2, %xmm2
-; AVX512-NEXT:    vpmadd52luq %xmm2, %xmm1, %xmm0
-; AVX512-NEXT:    retq
-;
-; AVX-LABEL: test_vpmadd52l_wrong_bits:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX-NEXT:    {vex} vpmadd52luq %xmm2, %xmm1, %xmm0
-; AVX-NEXT:    retq
-  %and = and <2 x i64> %x1, splat (i64 2251799813685247) ; (1LL << 51) - 1
-  %or = or <2 x i64> %x2, splat (i64 2251799813685248) ; 1LL << 51
-  %1 = call <2 x i64> @llvm.x86.avx512.vpmadd52l.uq.128(<2 x i64> %x0, <2 x i64> %and, <2 x i64> %or)
-  ret <2 x i64> %1
-}
-
-define <2 x i64> @test_vpmadd52l_wrong_op(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2) {
-; AVX512-LABEL: test_vpmadd52l_wrong_op:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm1, %xmm0
-; AVX512-NEXT:    vpmadd52luq %xmm2, %xmm1, %xmm0
-; AVX512-NEXT:    retq
-;
-; AVX-LABEL: test_vpmadd52l_wrong_op:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm0
-; AVX-NEXT:    {vex} vpmadd52luq %xmm2, %xmm1, %xmm0
-; AVX-NEXT:    retq
-  %and = and <2 x i64> %x1, splat (i64 4503599627370495) ; (1LL << 52) - 1
-  %1 = call <2 x i64> @llvm.x86.avx512.vpmadd52l.uq.128(<2 x i64> %and, <2 x i64> %x1, <2 x i64> %x2)
-  ret <2 x i64> %1
-}
-
-define <2 x i64> @test_vpmadd52h(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2) {
-; AVX512-LABEL: test_vpmadd52h:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpmadd52huq %xmm2, %xmm1, %xmm0
-; AVX512-NEXT:    retq
-;
-; AVX-LABEL: test_vpmadd52h:
-; AVX:       # %bb.0:
-; AVX-NEXT:    {vex} vpmadd52huq %xmm2, %xmm1, %xmm0
-; AVX-NEXT:    retq
-
-  %and = and <2 x i64> %x1, splat (i64 4503599627370495) ; (1LL << 52) - 1
-  %or = or <2 x i64> %x2, splat (i64 4503599627370496) ; 1LL << 52
-  %1 = call <2 x i64> @llvm.x86.avx512.vpmadd52h.uq.128(<2 x i64> %x0, <2 x i64> %and, <2 x i64> %or)
-  ret <2 x i64> %1
-}
-
-
-
-define <4 x i64> @test4_vpmadd52l_vl256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2) {
-; AVX512-LABEL: test4_vpmadd52l_vl256:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpmadd52luq %ymm2, %ymm1, %ymm0
-; AVX512-NEXT:    retq
-;
-; AVX-LABEL: test4_vpmadd52l_vl256:
-; AVX:       # %bb.0:
-; AVX-NEXT:    {vex} vpmadd52luq %ymm2, %ymm1, %ymm0
-; AVX-NEXT:    retq
-  ; keep only low 52 bits of %x1/%x2
-  %m1 = and <4 x i64> %x1, splat (i64 4503599627370495) ; (1 << 52) - 1
-  %m2 = and <4 x i64> %x2, splat (i64 4503599627370495) ; (1 << 52) - 1
-  %r  = call <4 x i64> @llvm.x86.avx512.vpmadd52l.uq.256(<4 x i64> %x0, <4 x i64> %m1, <4 x i64> %m2)
-  ret <4 x i64> %r
-}
-
-define <2 x i64> @test5_vpmadd52l_oporder(<2 x i64> %acc, <2 x i64> %mulA, <2 x i64> %mulB) {
-; AVX512-LABEL: test5_vpmadd52l_oporder:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpmadd52luq %xmm2, %xmm1, %xmm0
-; AVX512-NEXT:    retq
-;
-; AVX-LABEL: test5_vpmadd52l_oporder:
-; AVX:       # %bb.0:
-; AVX-NEXT:    {vex} vpmadd52luq %xmm2, %xmm1, %xmm0
-; AVX-NEXT:    retq
-  ; keep only low 52 bits of mulA/mulB
-  %a = and <2 x i64> %mulA, splat (i64 4503599627370495) ; (1 << 52) - 1
-  %b = and <2 x i64> %mulB, splat (i64 4503599627370495) ; (1 << 52) - 1
-  %r = call <2 x i64> @llvm.x86.avx512.vpmadd52l.uq.128(<2 x i64> %acc, <2 x i64> %a, <2 x i64> %b)
-  ret <2 x i64> %r
-}
-
-define <4 x i64> @test6_vpmadd52l_under_mask(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2) {
-; AVX512-LABEL: test6_vpmadd52l_under_mask:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm1, %ymm1
-; AVX512-NEXT:    vporq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm2, %ymm2
-; AVX512-NEXT:    vpmadd52luq %ymm2, %ymm1, %ymm0
-; AVX512-NEXT:    retq
-;
-; AVX-LABEL: test6_vpmadd52l_under_mask:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [2251799813685247,2251799813685247,2251799813685247,2251799813685247]
-; AVX-NEXT:    vpand %ymm3, %ymm1, %ymm1
-; AVX-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [2251799813685248,2251799813685248,2251799813685248,2251799813685248]
-; AVX-NEXT:    vpor %ymm3, %ymm2, %ymm2
-; AVX-NEXT:    {vex} vpmadd52luq %ymm2, %ymm1, %ymm0
-; AVX-NEXT:    retq
-  ; keep only low 51 bits of %x1, force bit 51 of %x2 to 1
-  %andv = and <4 x i64> %x1, splat (i64 2251799813685247) ; (1 << 51) - 1
-  %orv  = or  <4 x i64> %x2, splat (i64 2251799813685248) ; 1 << 51
-  %r    = call <4 x i64> @llvm.x86.avx512.vpmadd52l.uq.256(<4 x i64> %x0, <4 x i64> %andv, <4 x i64> %orv)
-  ret <4 x i64> %r
-}
-
-define <2 x i64> @test7_vpmadd52h_ok(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2) {
-; AVX512-LABEL: test7_vpmadd52h_ok:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpmadd52huq %xmm2, %xmm1, %xmm0
-; AVX512-NEXT:    retq
-;
-; AVX-LABEL: test7_vpmadd52h_ok:
-; AVX:       # %bb.0:
-; AVX-NEXT:    {vex} vpmadd52huq %xmm2, %xmm1, %xmm0
-; AVX-NEXT:    retq
-  ; keep only low 52 bits of %x1/%x2
-  %m1 = and <2 x i64> %x1, splat (i64 4503599627370495) ; (1 << 52) - 1
-  %m2 = and <2 x i64> %x2, splat (i64 4503599627370495) ; (1 << 52) - 1
-  %r  = call <2 x i64> @llvm.x86.avx512.vpmadd52h.uq.128(<2 x i64> %x0, <2 x i64> %m1, <2 x i64> %m2)
-  ret <2 x i64> %r
-}
-
-define <4 x i64> @test8_vpmadd52h_vl256_misplaced_mask(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2) {
-; AVX512-LABEL: test8_vpmadd52h_vl256_misplaced_mask:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm1, %ymm0
-; AVX512-NEXT:    vpmadd52huq %ymm2, %ymm1, %ymm0
-; AVX512-NEXT:    retq
-;
-; AVX-LABEL: test8_vpmadd52h_vl256_misplaced_mask:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vpbroadcastq {{.*#+}} ymm0 = [4503599627370495,4503599627370495,4503599627370495,4503599627370495]
-; AVX-NEXT:    vpand %ymm0, %ymm1, %ymm0
-; AVX-NEXT:    {vex} vpmadd52huq %ymm2, %ymm1, %ymm0
-; AVX-NEXT:    retq
-  ; keep only low 52 bits of %x1, then place it into %x0 operand position
-  %mask = and <4 x i64> %x1, splat (i64 4503599627370495) ; (1 << 52) - 1
-  %r    = call <4 x i64> @llvm.x86.avx512.vpmadd52h.uq.256(<4 x i64> %mask, <4 x i64> %x1, <4 x i64> %x2)
-  ret <4 x i64> %r
-}
-
-define <2 x i64> @test9_vpmadd52l_mix_and_or(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2) {
-; AVX512-LABEL: test9_vpmadd52l_mix_and_or:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpmadd52luq %xmm2, %xmm1, %xmm0
-; AVX512-NEXT:    retq
-;
-; AVX-LABEL: test9_vpmadd52l_mix_and_or:
-; AVX:       # %bb.0:
-; AVX-NEXT:    {vex} vpmadd52luq %xmm2, %xmm1, %xmm0
-; AVX-NEXT:    retq
-  ; keep only low 52 bits of %x1
-  %a = and <2 x i64> %x1, splat (i64 4503599627370495) ; (1 << 52) - 1
-  ; force high 12 bits of %x2 to 1
-  %b = or  <2 x i64> %x2, splat (i64 -4503599627370496) ; ~((1 << 52) - 1) = -(1 << 52)
-  %r = call <2 x i64> @llvm.x86.avx512.vpmadd52l.uq.128(<2 x i64> %x0, <2 x i64> %a, <2 x i64> %b)
-  ret <2 x i64> %r
-}
-; Positive test: ensure only the high 12 bits from %acc propagate through VPMADD52L.
-define <4 x i64> @knownbits_propagate_high_from_acc(<4 x i64> %acc, <4 x i64> %x1, <4 x i64> %x2) {
-; AVX512-LABEL: knownbits_propagate_high_from_acc:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [18442240474082181120,18442240474082181120,18442240474082181120,18442240474082181120]
-; AVX512-NEXT:    vpand %ymm3, %ymm0, %ymm0
-; AVX512-NEXT:    vpmadd52luq %ymm2, %ymm1, %ymm0
-; AVX512-NEXT:    vpand %ymm3, %ymm0, %ymm0
-; AVX512-NEXT:    retq
-;
-; AVX-LABEL: knownbits_propagate_high_from_acc:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [18442240474082181120,18442240474082181120,18442240474082181120,18442240474082181120]
-; AVX-NEXT:    vpand %ymm3, %ymm0, %ymm0
-; AVX-NEXT:    {vex} vpmadd52luq %ymm2, %ymm1, %ymm0
-; AVX-NEXT:    vpand %ymm3, %ymm0, %ymm0
-; AVX-NEXT:    retq
-  ; mask high 12 bits of accumulator (unsigned value 18442240474082181120 == 0xFFF0000000000000)
-  %acc_hi = and <4 x i64> %acc, splat (i64 -4503599627370496) ; ~((1 << 52) - 1) = -(1 << 52)
-  ; keep only low 52 bits of multipliers
-  %m1 = and <4 x i64> %x1, splat (i64 4503599627370495) ; (1 << 52) - 1
-  %m2 = and <4 x i64> %x2, splat (i64 4503599627370495) ; (1 << 52) - 1
-  %r = call <4 x i64> @llvm.x86.avx512.vpmadd52l.uq.256(<4 x i64> %acc_hi, <4 x i64> %m1, <4 x i64> %m2)
-  ; keep only high 12 bits
-  %only_high = and <4 x i64> %r, splat (i64 -4503599627370496) ; ~((1 << 52) - 1) = -(1 << 52)
-  ret <4 x i64> %only_high
-}
-
-
-
-declare <2 x i64> @llvm.x86.avx512.vpmadd52l.uq.128(<2 x i64>, <2 x i64>, <2 x i64>)
-declare <2 x i64> @llvm.x86.avx512.vpmadd52h.uq.128(<2 x i64>, <2 x i64>, <2 x i64>)
-declare <4 x i64> @llvm.x86.avx512.vpmadd52l.uq.256(<4 x i64>, <4 x i64>, <4 x i64>)
-declare <4 x i64> @llvm.x86.avx512.vpmadd52h.uq.256(<4 x i64>, <4 x i64>, <4 x i64>)
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; CHECK: {{.*}}



More information about the llvm-commits mailing list