[llvm] [X86] X86TargetLowering::computeKnownBitsForTargetNode - add X86ISD::VPMADD52L\H handling (PR #156349)

via llvm-commits llvm-commits at lists.llvm.org
Mon Sep 8 08:26:31 PDT 2025


https://github.com/houngkoungting updated https://github.com/llvm/llvm-project/pull/156349

>From 1d75814d6a3444c663e5e41e78c39ccda133e020 Mon Sep 17 00:00:00 2001
From: william <we3223 at gmail.com>
Date: Mon, 1 Sep 2025 23:39:56 +0800
Subject: [PATCH 1/4] [X86] X86TargetLowering::computeKnownBitsForTargetNode -
 add handling for VPMADD52L/VPMADD52H nodes-1

---
 llvm/lib/Target/X86/X86ISelLowering.cpp     |  40 +++++
 llvm/test/CodeGen/X86/combine-vpmadd52-1.ll | 159 ++++++++++++++++++++
 2 files changed, 199 insertions(+)
 create mode 100644 llvm/test/CodeGen/X86/combine-vpmadd52-1.ll

diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index ab21cf534b304..733651a7adc62 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -38994,9 +38994,49 @@ void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
       computeKnownBitsForPSADBW(LHS, RHS, Known, DemandedElts, DAG, Depth);
       break;
     }
+    
     }
     break;
   }
+  case X86ISD::VPMADD52L:
+  case X86ISD::VPMADD52H: {
+    EVT VT = Op.getValueType();
+    if (!VT.isVector() || VT.getScalarSizeInBits() != 64) {
+      Known.resetAll();
+      return;
+    }
+
+    const unsigned BW = 64;
+    APInt Low52 = APInt::getLowBitsSet(BW, 52);
+    APInt High12 = APInt::getBitsSetFrom(BW, 52);
+
+    KnownBits K0 =
+        DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
+    KnownBits K1 =
+        DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
+    KnownBits KAcc =
+        DAG.computeKnownBits(Op.getOperand(2), DemandedElts, Depth + 1);
+
+    if ((K0.Zero & Low52) == Low52 || (K1.Zero & Low52) == Low52) {
+      Known = KAcc;
+      return;
+    }
+
+    KnownBits AddendKB(BW);
+    AddendKB.Zero |= High12;
+
+    KnownBits OutKB =
+        KnownBits::computeForAddSub(true, false, false, KAcc, AddendKB);
+    Known = OutKB;
+
+    if ((KAcc.Zero & Low52) == Low52) {
+      Known.One |= (KAcc.One & High12);
+      Known.Zero |= (KAcc.Zero & High12);
+      Known.Zero &= ~Known.One;
+    }
+
+    return;
+  }
   }
 
   // Handle target shuffles.
diff --git a/llvm/test/CodeGen/X86/combine-vpmadd52-1.ll b/llvm/test/CodeGen/X86/combine-vpmadd52-1.ll
new file mode 100644
index 0000000000000..8aefb5b8c373f
--- /dev/null
+++ b/llvm/test/CodeGen/X86/combine-vpmadd52-1.ll
@@ -0,0 +1,159 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512ifma,+avx512vl | FileCheck %s --check-prefix=AVX512
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avxifma                      | FileCheck %s --check-prefix=AVX
+
+
+define <4 x i64> @test4_vpmadd52l_vl256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2) {
+; AVX512-LABEL: test4_vpmadd52l_vl256:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpmadd52luq %ymm2, %ymm1, %ymm0
+; AVX512-NEXT:    retq
+;
+; AVX-LABEL: test4_vpmadd52l_vl256:
+; AVX:       # %bb.0:
+; AVX-NEXT:    {vex} vpmadd52luq %ymm2, %ymm1, %ymm0
+; AVX-NEXT:    retq
+  %m1 = and <4 x i64> %x1, splat (i64 4503599627370495)
+  %m2 = and <4 x i64> %x2, splat (i64 4503599627370495)
+  %r  = call <4 x i64> @llvm.x86.avx512.vpmadd52l.uq.256(<4 x i64> %x0, <4 x i64> %m1, <4 x i64> %m2)
+  ret <4 x i64> %r
+}
+
+
+
+define <2 x i64> @test5_vpmadd52l_oporder(<2 x i64> %acc, <2 x i64> %mulA, <2 x i64> %mulB) {
+; AVX512-LABEL: test5_vpmadd52l_oporder:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpmadd52luq %xmm2, %xmm1, %xmm0
+; AVX512-NEXT:    retq
+;
+; AVX-LABEL: test5_vpmadd52l_oporder:
+; AVX:       # %bb.0:
+; AVX-NEXT:    {vex} vpmadd52luq %xmm2, %xmm1, %xmm0
+; AVX-NEXT:    retq
+  %a = and <2 x i64> %mulA, splat (i64 4503599627370495)
+  %b = and <2 x i64> %mulB, splat (i64 4503599627370495)
+  %r = call <2 x i64> @llvm.x86.avx512.vpmadd52l.uq.128(<2 x i64> %acc, <2 x i64> %a, <2 x i64> %b)
+  ret <2 x i64> %r
+}
+
+
+
+define <4 x i64> @test6_vpmadd52l_under_mask(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2) {
+; AVX512-LABEL: test6_vpmadd52l_under_mask:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm1, %ymm1
+; AVX512-NEXT:    vporq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm2, %ymm2
+; AVX512-NEXT:    vpmadd52luq %ymm2, %ymm1, %ymm0
+; AVX512-NEXT:    retq
+;
+; AVX-LABEL: test6_vpmadd52l_under_mask:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [2251799813685247,2251799813685247,2251799813685247,2251799813685247]
+; AVX-NEXT:    vpand %ymm3, %ymm1, %ymm1
+; AVX-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [2251799813685248,2251799813685248,2251799813685248,2251799813685248]
+; AVX-NEXT:    vpor %ymm3, %ymm2, %ymm2
+; AVX-NEXT:    {vex} vpmadd52luq %ymm2, %ymm1, %ymm0
+; AVX-NEXT:    retq
+  %and = and <4 x i64> %x1, splat (i64 2251799813685247)
+  %or  = or  <4 x i64> %x2, splat (i64 2251799813685248)
+  %r   = call <4 x i64> @llvm.x86.avx512.vpmadd52l.uq.256(<4 x i64> %x0, <4 x i64> %and, <4 x i64> %or)
+  ret <4 x i64> %r
+}
+
+
+
+define <2 x i64> @test7_vpmadd52h_ok(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2) {
+; AVX512-LABEL: test7_vpmadd52h_ok:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpmadd52huq %xmm2, %xmm1, %xmm0
+; AVX512-NEXT:    retq
+;
+; AVX-LABEL: test7_vpmadd52h_ok:
+; AVX:       # %bb.0:
+; AVX-NEXT:    {vex} vpmadd52huq %xmm2, %xmm1, %xmm0
+; AVX-NEXT:    retq
+  %m1 = and <2 x i64> %x1, splat (i64 4503599627370495)
+  %m2 = and <2 x i64> %x2, splat (i64 4503599627370495)
+  %r  = call <2 x i64> @llvm.x86.avx512.vpmadd52h.uq.128(<2 x i64> %x0, <2 x i64> %m1, <2 x i64> %m2)
+  ret <2 x i64> %r
+}
+
+define <4 x i64> @test8_vpmadd52h_vl256_misplaced_mask(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2) {
+; AVX512-LABEL: test8_vpmadd52h_vl256_misplaced_mask:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm1, %ymm0
+; AVX512-NEXT:    vpmadd52huq %ymm2, %ymm1, %ymm0
+; AVX512-NEXT:    retq
+;
+; AVX-LABEL: test8_vpmadd52h_vl256_misplaced_mask:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpbroadcastq {{.*#+}} ymm0 = [4503599627370495,4503599627370495,4503599627370495,4503599627370495]
+; AVX-NEXT:    vpand %ymm0, %ymm1, %ymm0
+; AVX-NEXT:    {vex} vpmadd52huq %ymm2, %ymm1, %ymm0
+; AVX-NEXT:    retq
+  %mask = and <4 x i64> %x1, splat (i64 4503599627370495)
+  %r    = call <4 x i64> @llvm.x86.avx512.vpmadd52h.uq.256(<4 x i64> %mask, <4 x i64> %x1, <4 x i64> %x2)
+  ret <4 x i64> %r
+}
+
+
+
+define <2 x i64> @test9_vpmadd52l_mix_and_or(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2) {
+; AVX512-LABEL: test9_vpmadd52l_mix_and_or:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpmadd52luq %xmm2, %xmm1, %xmm0
+; AVX512-NEXT:    retq
+;
+; AVX-LABEL: test9_vpmadd52l_mix_and_or:
+; AVX:       # %bb.0:
+; AVX-NEXT:    {vex} vpmadd52luq %xmm2, %xmm1, %xmm0
+; AVX-NEXT:    retq
+  %a = and <2 x i64> %x1, splat (i64 4503599627370495)
+  %b = or  <2 x i64> %x2, splat (i64 0)
+  %r = call <2 x i64> @llvm.x86.avx512.vpmadd52l.uq.128(<2 x i64> %x0, <2 x i64> %a, <2 x i64> %b)
+  ret <2 x i64> %r
+}
+
+define <4 x i64> @knownbits_propagate_high_from_acc(<4 x i64> %acc, <4 x i64> %x1, <4 x i64> %x2) {
+; AVX512-LABEL: knownbits_propagate_high_from_acc:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [18442240474082181120,18442240474082181120,18442240474082181120,18442240474082181120]
+; AVX512-NEXT:    vpand %ymm3, %ymm0, %ymm0
+; AVX512-NEXT:    vpmadd52luq %ymm2, %ymm1, %ymm0
+; AVX512-NEXT:    vpand %ymm3, %ymm0, %ymm0
+; AVX512-NEXT:    retq
+;
+; AVX-LABEL: knownbits_propagate_high_from_acc:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [18442240474082181120,18442240474082181120,18442240474082181120,18442240474082181120]
+; AVX-NEXT:    vpand %ymm3, %ymm0, %ymm0
+; AVX-NEXT:    {vex} vpmadd52luq %ymm2, %ymm1, %ymm0
+; AVX-NEXT:    vpand %ymm3, %ymm0, %ymm0
+; AVX-NEXT:    retq
+  %acc_hi = and <4 x i64> %acc,
+            <i64 -4503599627370496, i64 -4503599627370496, i64 -4503599627370496, i64 -4503599627370496>
+
+
+  %m1 = and <4 x i64> %x1,
+        <i64 4503599627370495, i64 4503599627370495, i64 4503599627370495, i64 4503599627370495>
+  %m2 = and <4 x i64> %x2,
+        <i64 4503599627370495, i64 4503599627370495, i64 4503599627370495, i64 4503599627370495>
+
+  %r = call <4 x i64> @llvm.x86.avx512.vpmadd52l.uq.256(<4 x i64> %acc_hi, <4 x i64> %m1, <4 x i64> %m2)
+
+
+  %only_high = and <4 x i64> %r,
+               <i64 -4503599627370496, i64 -4503599627370496, i64 -4503599627370496, i64 -4503599627370496>
+  ret <4 x i64> %only_high
+}
+
+
+
+
+; ---- intrinsics decls ----
+declare <2 x i64> @llvm.x86.avx512.vpmadd52l.uq.128(<2 x i64>, <2 x i64>, <2 x i64>)
+declare <2 x i64> @llvm.x86.avx512.vpmadd52h.uq.128(<2 x i64>, <2 x i64>, <2 x i64>)
+declare <4 x i64> @llvm.x86.avx512.vpmadd52l.uq.256(<4 x i64>, <4 x i64>, <4 x i64>)
+declare <4 x i64> @llvm.x86.avx512.vpmadd52h.uq.256(<4 x i64>, <4 x i64>, <4 x i64>)
+

>From b3bf19499501c5300d80b1659de3a4501e7e69f1 Mon Sep 17 00:00:00 2001
From: william <we3223 at gmail.com>
Date: Sat, 6 Sep 2025 16:56:11 +0800
Subject: [PATCH 2/4] [X86] update code + tests

---
 llvm/lib/Target/X86/X86ISelLowering.cpp   |  31 +----
 llvm/test/CodeGen/X86/combine-vpmadd52.ll | 154 +++++++++++++++++++++-
 2 files changed, 157 insertions(+), 28 deletions(-)

diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 733651a7adc62..1b0b8595b21f9 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -39005,36 +39005,19 @@ void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
       Known.resetAll();
       return;
     }
-
-    const unsigned BW = 64;
-    APInt Low52 = APInt::getLowBitsSet(BW, 52);
-    APInt High12 = APInt::getBitsSetFrom(BW, 52);
-
     KnownBits K0 =
         DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
     KnownBits K1 =
         DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
     KnownBits KAcc =
         DAG.computeKnownBits(Op.getOperand(2), DemandedElts, Depth + 1);
-
-    if ((K0.Zero & Low52) == Low52 || (K1.Zero & Low52) == Low52) {
-      Known = KAcc;
-      return;
-    }
-
-    KnownBits AddendKB(BW);
-    AddendKB.Zero |= High12;
-
-    KnownBits OutKB =
-        KnownBits::computeForAddSub(true, false, false, KAcc, AddendKB);
-    Known = OutKB;
-
-    if ((KAcc.Zero & Low52) == Low52) {
-      Known.One |= (KAcc.One & High12);
-      Known.Zero |= (KAcc.Zero & High12);
-      Known.Zero &= ~Known.One;
-    }
-
+    K0 = K0.trunc(52);
+    K1 = K1.trunc(52);
+    KnownBits KnownMul = (Op.getOpcode() == X86ISD::VPMADD52L)
+                             ? KnownBits::mul(K0, K1)
+                             : KnownBits::mulhu(K0, K1);
+    KnownMul = KnownMul.zext(64);
+    Known = KnownBits::computeForAddSub(true, false, false, KAcc, KnownMul);
     return;
   }
   }
diff --git a/llvm/test/CodeGen/X86/combine-vpmadd52.ll b/llvm/test/CodeGen/X86/combine-vpmadd52.ll
index 004db995ee584..ad392da905ab8 100644
--- a/llvm/test/CodeGen/X86/combine-vpmadd52.ll
+++ b/llvm/test/CodeGen/X86/combine-vpmadd52.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512ifma,+avx512vl | FileCheck %s --check-prefixes=CHECK,AVX512
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avxifma | FileCheck %s --check-prefixes=CHECK,AVX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512ifma,+avx512vl | FileCheck %s --check-prefix=AVX512
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avxifma | FileCheck %s --check-prefix=AVX
 
 define <2 x i64> @test1_vpmadd52l(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2) {
 ; AVX512-LABEL: test1_vpmadd52l:
@@ -102,5 +102,151 @@ define <2 x i64> @test_vpmadd52h(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2) {
   %1 = call <2 x i64> @llvm.x86.avx512.vpmadd52h.uq.128(<2 x i64> %x0, <2 x i64> %and, <2 x i64> %or)
   ret <2 x i64> %1
 }
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; CHECK: {{.*}}
+
+
+define <4 x i64> @test4_vpmadd52l_vl256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2) {
+; AVX512-LABEL: test4_vpmadd52l_vl256:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpmadd52luq %ymm2, %ymm1, %ymm0
+; AVX512-NEXT:    retq
+;
+; AVX-LABEL: test4_vpmadd52l_vl256:
+; AVX:       # %bb.0:
+; AVX-NEXT:    {vex} vpmadd52luq %ymm2, %ymm1, %ymm0
+; AVX-NEXT:    retq
+  ; keep only low 52 bits of %x1/%x2
+  %m1 = and <4 x i64> %x1, splat (i64 4503599627370495) ; (1 << 52) - 1
+  %m2 = and <4 x i64> %x2, splat (i64 4503599627370495) ; (1 << 52) - 1
+  %r  = call <4 x i64> @llvm.x86.avx512.vpmadd52l.uq.256(<4 x i64> %x0, <4 x i64> %m1, <4 x i64> %m2)
+  ret <4 x i64> %r
+}
+
+define <2 x i64> @test5_vpmadd52l_oporder(<2 x i64> %acc, <2 x i64> %mulA, <2 x i64> %mulB) {
+; AVX512-LABEL: test5_vpmadd52l_oporder:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpmadd52luq %xmm2, %xmm1, %xmm0
+; AVX512-NEXT:    retq
+;
+; AVX-LABEL: test5_vpmadd52l_oporder:
+; AVX:       # %bb.0:
+; AVX-NEXT:    {vex} vpmadd52luq %xmm2, %xmm1, %xmm0
+; AVX-NEXT:    retq
+  ; keep only low 52 bits of mulA/mulB
+  %a = and <2 x i64> %mulA, splat (i64 4503599627370495) ; (1 << 52) - 1
+  %b = and <2 x i64> %mulB, splat (i64 4503599627370495) ; (1 << 52) - 1
+  %r = call <2 x i64> @llvm.x86.avx512.vpmadd52l.uq.128(<2 x i64> %acc, <2 x i64> %a, <2 x i64> %b)
+  ret <2 x i64> %r
+}
+
+define <4 x i64> @test6_vpmadd52l_under_mask(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2) {
+; AVX512-LABEL: test6_vpmadd52l_under_mask:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm1, %ymm1
+; AVX512-NEXT:    vporq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm2, %ymm2
+; AVX512-NEXT:    vpmadd52luq %ymm2, %ymm1, %ymm0
+; AVX512-NEXT:    retq
+;
+; AVX-LABEL: test6_vpmadd52l_under_mask:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [2251799813685247,2251799813685247,2251799813685247,2251799813685247]
+; AVX-NEXT:    vpand %ymm3, %ymm1, %ymm1
+; AVX-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [2251799813685248,2251799813685248,2251799813685248,2251799813685248]
+; AVX-NEXT:    vpor %ymm3, %ymm2, %ymm2
+; AVX-NEXT:    {vex} vpmadd52luq %ymm2, %ymm1, %ymm0
+; AVX-NEXT:    retq
+  ; keep only low 51 bits of %x1, force bit 51 of %x2 to 1
+  %andv = and <4 x i64> %x1, splat (i64 2251799813685247) ; (1 << 51) - 1
+  %orv  = or  <4 x i64> %x2, splat (i64 2251799813685248) ; 1 << 51
+  %r    = call <4 x i64> @llvm.x86.avx512.vpmadd52l.uq.256(<4 x i64> %x0, <4 x i64> %andv, <4 x i64> %orv)
+  ret <4 x i64> %r
+}
+
+define <2 x i64> @test7_vpmadd52h_ok(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2) {
+; AVX512-LABEL: test7_vpmadd52h_ok:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpmadd52huq %xmm2, %xmm1, %xmm0
+; AVX512-NEXT:    retq
+;
+; AVX-LABEL: test7_vpmadd52h_ok:
+; AVX:       # %bb.0:
+; AVX-NEXT:    {vex} vpmadd52huq %xmm2, %xmm1, %xmm0
+; AVX-NEXT:    retq
+  ; keep only low 52 bits of %x1/%x2
+  %m1 = and <2 x i64> %x1, splat (i64 4503599627370495) ; (1 << 52) - 1
+  %m2 = and <2 x i64> %x2, splat (i64 4503599627370495) ; (1 << 52) - 1
+  %r  = call <2 x i64> @llvm.x86.avx512.vpmadd52h.uq.128(<2 x i64> %x0, <2 x i64> %m1, <2 x i64> %m2)
+  ret <2 x i64> %r
+}
+
+define <4 x i64> @test8_vpmadd52h_vl256_misplaced_mask(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2) {
+; AVX512-LABEL: test8_vpmadd52h_vl256_misplaced_mask:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm1, %ymm0
+; AVX512-NEXT:    vpmadd52huq %ymm2, %ymm1, %ymm0
+; AVX512-NEXT:    retq
+;
+; AVX-LABEL: test8_vpmadd52h_vl256_misplaced_mask:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpbroadcastq {{.*#+}} ymm0 = [4503599627370495,4503599627370495,4503599627370495,4503599627370495]
+; AVX-NEXT:    vpand %ymm0, %ymm1, %ymm0
+; AVX-NEXT:    {vex} vpmadd52huq %ymm2, %ymm1, %ymm0
+; AVX-NEXT:    retq
+  ; keep only low 52 bits of %x1, then place it into %x0 operand position
+  %mask = and <4 x i64> %x1, splat (i64 4503599627370495) ; (1 << 52) - 1
+  %r    = call <4 x i64> @llvm.x86.avx512.vpmadd52h.uq.256(<4 x i64> %mask, <4 x i64> %x1, <4 x i64> %x2)
+  ret <4 x i64> %r
+}
+
+define <2 x i64> @test9_vpmadd52l_mix_and_or(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2) {
+; AVX512-LABEL: test9_vpmadd52l_mix_and_or:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpmadd52luq %xmm2, %xmm1, %xmm0
+; AVX512-NEXT:    retq
+;
+; AVX-LABEL: test9_vpmadd52l_mix_and_or:
+; AVX:       # %bb.0:
+; AVX-NEXT:    {vex} vpmadd52luq %xmm2, %xmm1, %xmm0
+; AVX-NEXT:    retq
+  ; keep only low 52 bits of %x1
+  %a = and <2 x i64> %x1, splat (i64 4503599627370495) ; (1 << 52) - 1
+  ; force high 12 bits of %x2 to 1
+  %b = or  <2 x i64> %x2, splat (i64 -4503599627370496) ; ~((1 << 52) - 1) = -(1 << 52)
+  %r = call <2 x i64> @llvm.x86.avx512.vpmadd52l.uq.128(<2 x i64> %x0, <2 x i64> %a, <2 x i64> %b)
+  ret <2 x i64> %r
+}
+; Positive test: ensure only the high 12 bits from %acc propagate through VPMADD52L.
+define <4 x i64> @knownbits_propagate_high_from_acc(<4 x i64> %acc, <4 x i64> %x1, <4 x i64> %x2) {
+; AVX512-LABEL: knownbits_propagate_high_from_acc:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [18442240474082181120,18442240474082181120,18442240474082181120,18442240474082181120]
+; AVX512-NEXT:    vpand %ymm3, %ymm0, %ymm0
+; AVX512-NEXT:    vpmadd52luq %ymm2, %ymm1, %ymm0
+; AVX512-NEXT:    vpand %ymm3, %ymm0, %ymm0
+; AVX512-NEXT:    retq
+;
+; AVX-LABEL: knownbits_propagate_high_from_acc:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [18442240474082181120,18442240474082181120,18442240474082181120,18442240474082181120]
+; AVX-NEXT:    vpand %ymm3, %ymm0, %ymm0
+; AVX-NEXT:    {vex} vpmadd52luq %ymm2, %ymm1, %ymm0
+; AVX-NEXT:    vpand %ymm3, %ymm0, %ymm0
+; AVX-NEXT:    retq
+  ; mask high 12 bits of accumulator (unsigned value 18442240474082181120 == 0xFFF0000000000000)
+  %acc_hi = and <4 x i64> %acc, splat (i64 -4503599627370496) ; ~((1 << 52) - 1) = -(1 << 52)
+  ; keep only low 52 bits of multipliers
+  %m1 = and <4 x i64> %x1, splat (i64 4503599627370495) ; (1 << 52) - 1
+  %m2 = and <4 x i64> %x2, splat (i64 4503599627370495) ; (1 << 52) - 1
+  %r = call <4 x i64> @llvm.x86.avx512.vpmadd52l.uq.256(<4 x i64> %acc_hi, <4 x i64> %m1, <4 x i64> %m2)
+  ; keep only high 12 bits
+  %only_high = and <4 x i64> %r, splat (i64 -4503599627370496) ; ~((1 << 52) - 1) = -(1 << 52)
+  ret <4 x i64> %only_high
+}
+
+
+
+declare <2 x i64> @llvm.x86.avx512.vpmadd52l.uq.128(<2 x i64>, <2 x i64>, <2 x i64>)
+declare <2 x i64> @llvm.x86.avx512.vpmadd52h.uq.128(<2 x i64>, <2 x i64>, <2 x i64>)
+declare <4 x i64> @llvm.x86.avx512.vpmadd52l.uq.256(<4 x i64>, <4 x i64>, <4 x i64>)
+declare <4 x i64> @llvm.x86.avx512.vpmadd52h.uq.256(<4 x i64>, <4 x i64>, <4 x i64>)
+
+

>From 10ecfec1d372568f902a931ea5224c744900301c Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E9=BB=83=E5=9C=8B=E5=BA=AD?= <we3223 at gmail.com>
Date: Sat, 6 Sep 2025 18:31:36 +0800
Subject: [PATCH 3/4] Delete llvm/test/CodeGen/X86/combine-vpmadd52-1.ll

---
 llvm/test/CodeGen/X86/combine-vpmadd52-1.ll | 159 --------------------
 1 file changed, 159 deletions(-)
 delete mode 100644 llvm/test/CodeGen/X86/combine-vpmadd52-1.ll

diff --git a/llvm/test/CodeGen/X86/combine-vpmadd52-1.ll b/llvm/test/CodeGen/X86/combine-vpmadd52-1.ll
deleted file mode 100644
index 8aefb5b8c373f..0000000000000
--- a/llvm/test/CodeGen/X86/combine-vpmadd52-1.ll
+++ /dev/null
@@ -1,159 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512ifma,+avx512vl | FileCheck %s --check-prefix=AVX512
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avxifma                      | FileCheck %s --check-prefix=AVX
-
-
-define <4 x i64> @test4_vpmadd52l_vl256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2) {
-; AVX512-LABEL: test4_vpmadd52l_vl256:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpmadd52luq %ymm2, %ymm1, %ymm0
-; AVX512-NEXT:    retq
-;
-; AVX-LABEL: test4_vpmadd52l_vl256:
-; AVX:       # %bb.0:
-; AVX-NEXT:    {vex} vpmadd52luq %ymm2, %ymm1, %ymm0
-; AVX-NEXT:    retq
-  %m1 = and <4 x i64> %x1, splat (i64 4503599627370495)
-  %m2 = and <4 x i64> %x2, splat (i64 4503599627370495)
-  %r  = call <4 x i64> @llvm.x86.avx512.vpmadd52l.uq.256(<4 x i64> %x0, <4 x i64> %m1, <4 x i64> %m2)
-  ret <4 x i64> %r
-}
-
-
-
-define <2 x i64> @test5_vpmadd52l_oporder(<2 x i64> %acc, <2 x i64> %mulA, <2 x i64> %mulB) {
-; AVX512-LABEL: test5_vpmadd52l_oporder:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpmadd52luq %xmm2, %xmm1, %xmm0
-; AVX512-NEXT:    retq
-;
-; AVX-LABEL: test5_vpmadd52l_oporder:
-; AVX:       # %bb.0:
-; AVX-NEXT:    {vex} vpmadd52luq %xmm2, %xmm1, %xmm0
-; AVX-NEXT:    retq
-  %a = and <2 x i64> %mulA, splat (i64 4503599627370495)
-  %b = and <2 x i64> %mulB, splat (i64 4503599627370495)
-  %r = call <2 x i64> @llvm.x86.avx512.vpmadd52l.uq.128(<2 x i64> %acc, <2 x i64> %a, <2 x i64> %b)
-  ret <2 x i64> %r
-}
-
-
-
-define <4 x i64> @test6_vpmadd52l_under_mask(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2) {
-; AVX512-LABEL: test6_vpmadd52l_under_mask:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm1, %ymm1
-; AVX512-NEXT:    vporq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm2, %ymm2
-; AVX512-NEXT:    vpmadd52luq %ymm2, %ymm1, %ymm0
-; AVX512-NEXT:    retq
-;
-; AVX-LABEL: test6_vpmadd52l_under_mask:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [2251799813685247,2251799813685247,2251799813685247,2251799813685247]
-; AVX-NEXT:    vpand %ymm3, %ymm1, %ymm1
-; AVX-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [2251799813685248,2251799813685248,2251799813685248,2251799813685248]
-; AVX-NEXT:    vpor %ymm3, %ymm2, %ymm2
-; AVX-NEXT:    {vex} vpmadd52luq %ymm2, %ymm1, %ymm0
-; AVX-NEXT:    retq
-  %and = and <4 x i64> %x1, splat (i64 2251799813685247)
-  %or  = or  <4 x i64> %x2, splat (i64 2251799813685248)
-  %r   = call <4 x i64> @llvm.x86.avx512.vpmadd52l.uq.256(<4 x i64> %x0, <4 x i64> %and, <4 x i64> %or)
-  ret <4 x i64> %r
-}
-
-
-
-define <2 x i64> @test7_vpmadd52h_ok(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2) {
-; AVX512-LABEL: test7_vpmadd52h_ok:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpmadd52huq %xmm2, %xmm1, %xmm0
-; AVX512-NEXT:    retq
-;
-; AVX-LABEL: test7_vpmadd52h_ok:
-; AVX:       # %bb.0:
-; AVX-NEXT:    {vex} vpmadd52huq %xmm2, %xmm1, %xmm0
-; AVX-NEXT:    retq
-  %m1 = and <2 x i64> %x1, splat (i64 4503599627370495)
-  %m2 = and <2 x i64> %x2, splat (i64 4503599627370495)
-  %r  = call <2 x i64> @llvm.x86.avx512.vpmadd52h.uq.128(<2 x i64> %x0, <2 x i64> %m1, <2 x i64> %m2)
-  ret <2 x i64> %r
-}
-
-define <4 x i64> @test8_vpmadd52h_vl256_misplaced_mask(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2) {
-; AVX512-LABEL: test8_vpmadd52h_vl256_misplaced_mask:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm1, %ymm0
-; AVX512-NEXT:    vpmadd52huq %ymm2, %ymm1, %ymm0
-; AVX512-NEXT:    retq
-;
-; AVX-LABEL: test8_vpmadd52h_vl256_misplaced_mask:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vpbroadcastq {{.*#+}} ymm0 = [4503599627370495,4503599627370495,4503599627370495,4503599627370495]
-; AVX-NEXT:    vpand %ymm0, %ymm1, %ymm0
-; AVX-NEXT:    {vex} vpmadd52huq %ymm2, %ymm1, %ymm0
-; AVX-NEXT:    retq
-  %mask = and <4 x i64> %x1, splat (i64 4503599627370495)
-  %r    = call <4 x i64> @llvm.x86.avx512.vpmadd52h.uq.256(<4 x i64> %mask, <4 x i64> %x1, <4 x i64> %x2)
-  ret <4 x i64> %r
-}
-
-
-
-define <2 x i64> @test9_vpmadd52l_mix_and_or(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2) {
-; AVX512-LABEL: test9_vpmadd52l_mix_and_or:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpmadd52luq %xmm2, %xmm1, %xmm0
-; AVX512-NEXT:    retq
-;
-; AVX-LABEL: test9_vpmadd52l_mix_and_or:
-; AVX:       # %bb.0:
-; AVX-NEXT:    {vex} vpmadd52luq %xmm2, %xmm1, %xmm0
-; AVX-NEXT:    retq
-  %a = and <2 x i64> %x1, splat (i64 4503599627370495)
-  %b = or  <2 x i64> %x2, splat (i64 0)
-  %r = call <2 x i64> @llvm.x86.avx512.vpmadd52l.uq.128(<2 x i64> %x0, <2 x i64> %a, <2 x i64> %b)
-  ret <2 x i64> %r
-}
-
-define <4 x i64> @knownbits_propagate_high_from_acc(<4 x i64> %acc, <4 x i64> %x1, <4 x i64> %x2) {
-; AVX512-LABEL: knownbits_propagate_high_from_acc:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [18442240474082181120,18442240474082181120,18442240474082181120,18442240474082181120]
-; AVX512-NEXT:    vpand %ymm3, %ymm0, %ymm0
-; AVX512-NEXT:    vpmadd52luq %ymm2, %ymm1, %ymm0
-; AVX512-NEXT:    vpand %ymm3, %ymm0, %ymm0
-; AVX512-NEXT:    retq
-;
-; AVX-LABEL: knownbits_propagate_high_from_acc:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [18442240474082181120,18442240474082181120,18442240474082181120,18442240474082181120]
-; AVX-NEXT:    vpand %ymm3, %ymm0, %ymm0
-; AVX-NEXT:    {vex} vpmadd52luq %ymm2, %ymm1, %ymm0
-; AVX-NEXT:    vpand %ymm3, %ymm0, %ymm0
-; AVX-NEXT:    retq
-  %acc_hi = and <4 x i64> %acc,
-            <i64 -4503599627370496, i64 -4503599627370496, i64 -4503599627370496, i64 -4503599627370496>
-
-
-  %m1 = and <4 x i64> %x1,
-        <i64 4503599627370495, i64 4503599627370495, i64 4503599627370495, i64 4503599627370495>
-  %m2 = and <4 x i64> %x2,
-        <i64 4503599627370495, i64 4503599627370495, i64 4503599627370495, i64 4503599627370495>
-
-  %r = call <4 x i64> @llvm.x86.avx512.vpmadd52l.uq.256(<4 x i64> %acc_hi, <4 x i64> %m1, <4 x i64> %m2)
-
-
-  %only_high = and <4 x i64> %r,
-               <i64 -4503599627370496, i64 -4503599627370496, i64 -4503599627370496, i64 -4503599627370496>
-  ret <4 x i64> %only_high
-}
-
-
-
-
-; ---- intrinsics decls ----
-declare <2 x i64> @llvm.x86.avx512.vpmadd52l.uq.128(<2 x i64>, <2 x i64>, <2 x i64>)
-declare <2 x i64> @llvm.x86.avx512.vpmadd52h.uq.128(<2 x i64>, <2 x i64>, <2 x i64>)
-declare <4 x i64> @llvm.x86.avx512.vpmadd52l.uq.256(<4 x i64>, <4 x i64>, <4 x i64>)
-declare <4 x i64> @llvm.x86.avx512.vpmadd52h.uq.256(<4 x i64>, <4 x i64>, <4 x i64>)
-

>From 8e41815473c691c26f269e84011e69b1ff16220e Mon Sep 17 00:00:00 2001
From: william <we3223 at gmail.com>
Date: Mon, 8 Sep 2025 23:16:18 +0800
Subject: [PATCH 4/4] update code-1 and test case -1

---
 llvm/lib/Target/X86/X86ISelLowering.cpp   |  2 +-
 llvm/test/CodeGen/X86/combine-vpmadd52.ll | 10 ++++++----
 2 files changed, 7 insertions(+), 5 deletions(-)

diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 1b0b8595b21f9..51f7862dd251d 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -39017,7 +39017,7 @@ void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
                              ? KnownBits::mul(K0, K1)
                              : KnownBits::mulhu(K0, K1);
     KnownMul = KnownMul.zext(64);
-    Known = KnownBits::computeForAddSub(true, false, false, KAcc, KnownMul);
+    Known = KnownBits::add(KAcc, KnownMul);
     return;
   }
   }
diff --git a/llvm/test/CodeGen/X86/combine-vpmadd52.ll b/llvm/test/CodeGen/X86/combine-vpmadd52.ll
index ad392da905ab8..132a0643c3426 100644
--- a/llvm/test/CodeGen/X86/combine-vpmadd52.ll
+++ b/llvm/test/CodeGen/X86/combine-vpmadd52.ll
@@ -1,6 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512ifma,+avx512vl | FileCheck %s --check-prefix=AVX512
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avxifma | FileCheck %s --check-prefix=AVX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512ifma,+avx512vl | FileCheck %s --check-prefixes=CHECK,AVX512
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avxifma | FileCheck %s --check-prefixes=CHECK,AVX
+
 
 define <2 x i64> @test1_vpmadd52l(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2) {
 ; AVX512-LABEL: test1_vpmadd52l:
@@ -104,6 +105,7 @@ define <2 x i64> @test_vpmadd52h(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2) {
 }
 
 
+
 define <4 x i64> @test4_vpmadd52l_vl256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2) {
 ; AVX512-LABEL: test4_vpmadd52l_vl256:
 ; AVX512:       # %bb.0:
@@ -248,5 +250,5 @@ declare <2 x i64> @llvm.x86.avx512.vpmadd52l.uq.128(<2 x i64>, <2 x i64>, <2 x i
 declare <2 x i64> @llvm.x86.avx512.vpmadd52h.uq.128(<2 x i64>, <2 x i64>, <2 x i64>)
 declare <4 x i64> @llvm.x86.avx512.vpmadd52l.uq.256(<4 x i64>, <4 x i64>, <4 x i64>)
 declare <4 x i64> @llvm.x86.avx512.vpmadd52h.uq.256(<4 x i64>, <4 x i64>, <4 x i64>)
-
-
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK: {{.*}}



More information about the llvm-commits mailing list