[llvm] [X86] Compute the known bits for VPMADD52L/VPMADD52H in SimplifyDemandedBitsForTargetNode (PR #156847)

Hongyu Chen via llvm-commits llvm-commits at lists.llvm.org
Thu Sep 4 03:27:14 PDT 2025


https://github.com/XChy created https://github.com/llvm/llvm-project/pull/156847

Address TODO and compute the known bits with the intermediate result.

>From 35e818d41a9f82882e1d33ebaefa8c2368f7ea4a Mon Sep 17 00:00:00 2001
From: XChy <xxs_chy at outlook.com>
Date: Thu, 4 Sep 2025 18:19:18 +0800
Subject: [PATCH 1/2] Precommit testcases

---
 llvm/test/CodeGen/X86/combine-vpmadd52.ll | 168 ++++++++++++++++++++++
 1 file changed, 168 insertions(+)

diff --git a/llvm/test/CodeGen/X86/combine-vpmadd52.ll b/llvm/test/CodeGen/X86/combine-vpmadd52.ll
index 9afc1119267ec..293d95f6e07f1 100644
--- a/llvm/test/CodeGen/X86/combine-vpmadd52.ll
+++ b/llvm/test/CodeGen/X86/combine-vpmadd52.ll
@@ -290,3 +290,171 @@ define <2 x i64> @test_vpmadd52h_mul_hi52_negative(<2 x i64> %x0, <2 x i64> %x1,
   %1 = call <2 x i64> @llvm.x86.avx512.vpmadd52h.uq.128(<2 x i64> %x0, <2 x i64> %and1, <2 x i64> %and2)
   ret <2 x i64> %1
 }
+
+define <2 x i64> @test1_knownbits_vpmadd52l(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2) {
+; AVX512-LABEL: test1_knownbits_vpmadd52l:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpbroadcastq {{.*#+}} xmm2 = [4,4]
+; AVX512-NEXT:    vpand %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX512-NEXT:    vpbroadcastq {{.*#+}} xmm2 = [1,1]
+; AVX512-NEXT:    vmovdqa %xmm2, %xmm3
+; AVX512-NEXT:    vpmadd52luq %xmm1, %xmm0, %xmm3
+; AVX512-NEXT:    vpand %xmm2, %xmm3, %xmm0
+; AVX512-NEXT:    retq
+;
+; AVX-LABEL: test1_knownbits_vpmadd52l:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpmovsxbq {{.*#+}} xmm2 = [4,4]
+; AVX-NEXT:    vpand %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX-NEXT:    vpmovsxbq {{.*#+}} xmm2 = [1,1]
+; AVX-NEXT:    vmovdqa %xmm2, %xmm3
+; AVX-NEXT:    {vex} vpmadd52luq %xmm1, %xmm0, %xmm3
+; AVX-NEXT:    vpand %xmm2, %xmm3, %xmm0
+; AVX-NEXT:    retq
+  %and1 = and <2 x i64> %x0, splat (i64 4)
+  %and2 = and <2 x i64> %x1, splat (i64 4)
+  %madd = call <2 x i64> @llvm.x86.avx512.vpmadd52l.uq.128(<2 x i64> splat(i64 1), <2 x i64> %and1, <2 x i64> %and2)
+  %ret = and <2 x i64> %madd, splat (i64 1)
+  ret <2 x i64> %ret
+}
+
+define <2 x i64> @test1_knownbits_vpmadd52h(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2) {
+; AVX512-LABEL: test1_knownbits_vpmadd52h:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpbroadcastq {{.*#+}} xmm2 = [1073741824,1073741824]
+; AVX512-NEXT:    vpand %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX512-NEXT:    vpbroadcastq {{.*#+}} xmm2 = [3,3]
+; AVX512-NEXT:    vmovdqa %xmm2, %xmm3
+; AVX512-NEXT:    vpmadd52huq %xmm1, %xmm0, %xmm3
+; AVX512-NEXT:    vpand %xmm2, %xmm3, %xmm0
+; AVX512-NEXT:    retq
+;
+; AVX-LABEL: test1_knownbits_vpmadd52h:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpbroadcastq {{.*#+}} xmm2 = [1073741824,1073741824]
+; AVX-NEXT:    vpand %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX-NEXT:    vpmovsxbq {{.*#+}} xmm2 = [3,3]
+; AVX-NEXT:    vmovdqa %xmm2, %xmm3
+; AVX-NEXT:    {vex} vpmadd52huq %xmm1, %xmm0, %xmm3
+; AVX-NEXT:    vpand %xmm2, %xmm3, %xmm0
+; AVX-NEXT:    retq
+  %and1 = and <2 x i64> %x0, splat (i64 1073741824) ; 1LL << 30
+  %and2 = and <2 x i64> %x1, splat (i64 1073741824) ; 1LL << 30
+  %madd = call <2 x i64> @llvm.x86.avx512.vpmadd52h.uq.128(<2 x i64> splat(i64 3), <2 x i64> %and1, <2 x i64> %and2)
+  %ret = and <2 x i64> %madd, splat (i64 3)
+  ret <2 x i64> %ret
+}
+
+define <2 x i64> @test2_knownbits_vpmadd52l(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2) {
+; AVX512-LABEL: test2_knownbits_vpmadd52l:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm0, %xmm0
+; AVX512-NEXT:    vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm1, %xmm1
+; AVX512-NEXT:    vpbroadcastq {{.*#+}} xmm2 = [1234,1234]
+; AVX512-NEXT:    vmovdqa %xmm2, %xmm3
+; AVX512-NEXT:    vpmadd52luq %xmm1, %xmm0, %xmm3
+; AVX512-NEXT:    vpand %xmm2, %xmm3, %xmm0
+; AVX512-NEXT:    retq
+;
+; AVX-LABEL: test2_knownbits_vpmadd52l:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX-NEXT:    vpmovsxwq {{.*#+}} xmm2 = [1234,1234]
+; AVX-NEXT:    vmovdqa %xmm2, %xmm3
+; AVX-NEXT:    {vex} vpmadd52luq %xmm1, %xmm0, %xmm3
+; AVX-NEXT:    vpand %xmm2, %xmm3, %xmm0
+; AVX-NEXT:    retq
+  %and1 = and <2 x i64> %x0, splat (i64 67108864) ; 1LL << 26
+  %and2 = and <2 x i64> %x1, splat (i64 33554432) ; 1LL << 25
+  %madd = call <2 x i64> @llvm.x86.avx512.vpmadd52l.uq.128(<2 x i64> splat(i64 1234), <2 x i64> %and1, <2 x i64> %and2)
+  %ret = and <2 x i64> %madd, splat (i64 1234)
+  ret <2 x i64> %ret
+}
+
+define <2 x i64> @test2_knownbits_vpmadd52h(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2) {
+; AVX512-LABEL: test2_knownbits_vpmadd52h:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpbroadcastq {{.*#+}} xmm2 = [1073741824,1073741824]
+; AVX512-NEXT:    vpand %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX512-NEXT:    vpbroadcastq {{.*#+}} xmm2 = [1025,1025]
+; AVX512-NEXT:    vpmadd52huq %xmm1, %xmm0, %xmm2
+; AVX512-NEXT:    vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm2, %xmm0
+; AVX512-NEXT:    retq
+;
+; AVX-LABEL: test2_knownbits_vpmadd52h:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpbroadcastq {{.*#+}} xmm2 = [1073741824,1073741824]
+; AVX-NEXT:    vpand %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX-NEXT:    vpmovsxwq {{.*#+}} xmm2 = [1025,1025]
+; AVX-NEXT:    {vex} vpmadd52huq %xmm1, %xmm0, %xmm2
+; AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm0
+; AVX-NEXT:    retq
+  %and1 = and <2 x i64> %x0, splat (i64 1073741824) ; 1LL << 30
+  %and2 = and <2 x i64> %x1, splat (i64 1073741824) ; 1LL << 30
+  ; add (1LL << 20) + 1
+  %madd = call <2 x i64> @llvm.x86.avx512.vpmadd52h.uq.128(<2 x i64> splat(i64 1025), <2 x i64> %and1, <2 x i64> %and2)
+  %ret = and <2 x i64> %madd, splat (i64 1)
+  ret <2 x i64> %ret
+}
+
+define <2 x i64> @test3_knownbits_vpmadd52l_negative(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2) {
+; AVX512-LABEL: test3_knownbits_vpmadd52l_negative:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm0, %xmm0
+; AVX512-NEXT:    vpbroadcastq {{.*#+}} xmm2 = [1,1]
+; AVX512-NEXT:    vpor %xmm2, %xmm1, %xmm1
+; AVX512-NEXT:    vmovdqa %xmm2, %xmm3
+; AVX512-NEXT:    vpmadd52luq %xmm1, %xmm0, %xmm3
+; AVX512-NEXT:    vpand %xmm2, %xmm3, %xmm0
+; AVX512-NEXT:    retq
+;
+; AVX-LABEL: test3_knownbits_vpmadd52l_negative:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    vpmovsxbq {{.*#+}} xmm2 = [1,1]
+; AVX-NEXT:    vpor %xmm2, %xmm1, %xmm1
+; AVX-NEXT:    vmovdqa %xmm2, %xmm3
+; AVX-NEXT:    {vex} vpmadd52luq %xmm1, %xmm0, %xmm3
+; AVX-NEXT:    vpand %xmm2, %xmm3, %xmm0
+; AVX-NEXT:    retq
+  %and1 = and <2 x i64> %x0, splat (i64 67108865) ; (1LL << 26) + 1
+  %or = or <2 x i64> %x1, splat (i64 1)
+  %madd = call <2 x i64> @llvm.x86.avx512.vpmadd52l.uq.128(<2 x i64> splat(i64 1), <2 x i64> %and1, <2 x i64> %or)
+  %ret = and <2 x i64> %madd, splat (i64 1)
+  ret <2 x i64> %ret
+}
+
+define <2 x i64> @test3_knownbits_vpmadd52h_negative(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2) {
+; AVX512-LABEL: test3_knownbits_vpmadd52h_negative:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm0, %xmm0
+; AVX512-NEXT:    vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm1, %xmm1
+; AVX512-NEXT:    vpbroadcastq {{.*#+}} xmm2 = [1,1]
+; AVX512-NEXT:    vmovdqa %xmm2, %xmm3
+; AVX512-NEXT:    vpmadd52huq %xmm1, %xmm0, %xmm3
+; AVX512-NEXT:    vpand %xmm2, %xmm3, %xmm0
+; AVX512-NEXT:    retq
+;
+; AVX-LABEL: test3_knownbits_vpmadd52h_negative:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX-NEXT:    vpmovsxbq {{.*#+}} xmm2 = [1,1]
+; AVX-NEXT:    vmovdqa %xmm2, %xmm3
+; AVX-NEXT:    {vex} vpmadd52huq %xmm1, %xmm0, %xmm3
+; AVX-NEXT:    vpand %xmm2, %xmm3, %xmm0
+; AVX-NEXT:    retq
+  %and1 = and <2 x i64> %x0, splat (i64 4194304) ; 1LL << 22
+  %and2 = and <2 x i64> %x1, splat (i64 1073741824) ; 1LL << 30
+  ; add (1LL << 20) + 1
+  %madd = call <2 x i64> @llvm.x86.avx512.vpmadd52h.uq.128(<2 x i64> splat(i64 1), <2 x i64> %and1, <2 x i64> %and2)
+  %ret = and <2 x i64> %madd, splat (i64 1)
+  ret <2 x i64> %ret
+}

>From f4deffa2fb70968adcce213c0a9f16f11d00b756 Mon Sep 17 00:00:00 2001
From: XChy <xxs_chy at outlook.com>
Date: Thu, 4 Sep 2025 18:22:01 +0800
Subject: [PATCH 2/2] [X86] Compute the known bits for VPMADD52L/VPMADD52H in
 SimplifyDemandedBitsForTargetNode

---
 llvm/lib/Target/X86/X86ISelLowering.cpp   |  10 ++-
 llvm/test/CodeGen/X86/combine-vpmadd52.ll | 100 +++++-----------------
 2 files changed, 27 insertions(+), 83 deletions(-)

diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 572cfdad3c93b..923af28c8cf34 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -44964,7 +44964,7 @@ bool X86TargetLowering::SimplifyDemandedBitsForTargetNode(
   }
   case X86ISD::VPMADD52L:
   case X86ISD::VPMADD52H: {
-    KnownBits KnownOp0, KnownOp1;
+    KnownBits KnownOp0, KnownOp1, KnownOp2;
     SDValue Op0 = Op.getOperand(0);
     SDValue Op1 = Op.getOperand(1);
     SDValue Op2 = Op.getOperand(2);
@@ -44979,6 +44979,10 @@ bool X86TargetLowering::SimplifyDemandedBitsForTargetNode(
                              TLO, Depth + 1))
       return true;
 
+    if (SimplifyDemandedBits(Op2, APInt::getAllOnes(64), OriginalDemandedElts,
+                             KnownOp2, TLO, Depth + 1))
+      return true;
+
     KnownBits KnownMul;
     KnownOp0 = KnownOp0.trunc(52);
     KnownOp1 = KnownOp1.trunc(52);
@@ -44993,8 +44997,8 @@ bool X86TargetLowering::SimplifyDemandedBitsForTargetNode(
       return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::ADD, DL, VT, C, Op2));
     }
 
-    // TODO: Compute the known bits for VPMADD52L/VPMADD52H.
-    break;
+    Known = KnownBits::add(KnownMul, KnownOp2);
+    return false;
   }
   }
 
diff --git a/llvm/test/CodeGen/X86/combine-vpmadd52.ll b/llvm/test/CodeGen/X86/combine-vpmadd52.ll
index 293d95f6e07f1..2cb060ea92b14 100644
--- a/llvm/test/CodeGen/X86/combine-vpmadd52.ll
+++ b/llvm/test/CodeGen/X86/combine-vpmadd52.ll
@@ -292,27 +292,11 @@ define <2 x i64> @test_vpmadd52h_mul_hi52_negative(<2 x i64> %x0, <2 x i64> %x1,
 }
 
 define <2 x i64> @test1_knownbits_vpmadd52l(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2) {
-; AVX512-LABEL: test1_knownbits_vpmadd52l:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpbroadcastq {{.*#+}} xmm2 = [4,4]
-; AVX512-NEXT:    vpand %xmm2, %xmm0, %xmm0
-; AVX512-NEXT:    vpand %xmm2, %xmm1, %xmm1
-; AVX512-NEXT:    vpbroadcastq {{.*#+}} xmm2 = [1,1]
-; AVX512-NEXT:    vmovdqa %xmm2, %xmm3
-; AVX512-NEXT:    vpmadd52luq %xmm1, %xmm0, %xmm3
-; AVX512-NEXT:    vpand %xmm2, %xmm3, %xmm0
-; AVX512-NEXT:    retq
-;
-; AVX-LABEL: test1_knownbits_vpmadd52l:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vpmovsxbq {{.*#+}} xmm2 = [4,4]
-; AVX-NEXT:    vpand %xmm2, %xmm0, %xmm0
-; AVX-NEXT:    vpand %xmm2, %xmm1, %xmm1
-; AVX-NEXT:    vpmovsxbq {{.*#+}} xmm2 = [1,1]
-; AVX-NEXT:    vmovdqa %xmm2, %xmm3
-; AVX-NEXT:    {vex} vpmadd52luq %xmm1, %xmm0, %xmm3
-; AVX-NEXT:    vpand %xmm2, %xmm3, %xmm0
-; AVX-NEXT:    retq
+; CHECK-LABEL: test1_knownbits_vpmadd52l:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmovddup {{.*#+}} xmm0 = [1,1]
+; CHECK-NEXT:    # xmm0 = mem[0,0]
+; CHECK-NEXT:    retq
   %and1 = and <2 x i64> %x0, splat (i64 4)
   %and2 = and <2 x i64> %x1, splat (i64 4)
   %madd = call <2 x i64> @llvm.x86.avx512.vpmadd52l.uq.128(<2 x i64> splat(i64 1), <2 x i64> %and1, <2 x i64> %and2)
@@ -321,27 +305,11 @@ define <2 x i64> @test1_knownbits_vpmadd52l(<2 x i64> %x0, <2 x i64> %x1, <2 x i
 }
 
 define <2 x i64> @test1_knownbits_vpmadd52h(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2) {
-; AVX512-LABEL: test1_knownbits_vpmadd52h:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpbroadcastq {{.*#+}} xmm2 = [1073741824,1073741824]
-; AVX512-NEXT:    vpand %xmm2, %xmm0, %xmm0
-; AVX512-NEXT:    vpand %xmm2, %xmm1, %xmm1
-; AVX512-NEXT:    vpbroadcastq {{.*#+}} xmm2 = [3,3]
-; AVX512-NEXT:    vmovdqa %xmm2, %xmm3
-; AVX512-NEXT:    vpmadd52huq %xmm1, %xmm0, %xmm3
-; AVX512-NEXT:    vpand %xmm2, %xmm3, %xmm0
-; AVX512-NEXT:    retq
-;
-; AVX-LABEL: test1_knownbits_vpmadd52h:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vpbroadcastq {{.*#+}} xmm2 = [1073741824,1073741824]
-; AVX-NEXT:    vpand %xmm2, %xmm0, %xmm0
-; AVX-NEXT:    vpand %xmm2, %xmm1, %xmm1
-; AVX-NEXT:    vpmovsxbq {{.*#+}} xmm2 = [3,3]
-; AVX-NEXT:    vmovdqa %xmm2, %xmm3
-; AVX-NEXT:    {vex} vpmadd52huq %xmm1, %xmm0, %xmm3
-; AVX-NEXT:    vpand %xmm2, %xmm3, %xmm0
-; AVX-NEXT:    retq
+; CHECK-LABEL: test1_knownbits_vpmadd52h:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmovddup {{.*#+}} xmm0 = [3,3]
+; CHECK-NEXT:    # xmm0 = mem[0,0]
+; CHECK-NEXT:    retq
   %and1 = and <2 x i64> %x0, splat (i64 1073741824) ; 1LL << 30
   %and2 = and <2 x i64> %x1, splat (i64 1073741824) ; 1LL << 30
   %madd = call <2 x i64> @llvm.x86.avx512.vpmadd52h.uq.128(<2 x i64> splat(i64 3), <2 x i64> %and1, <2 x i64> %and2)
@@ -350,25 +318,11 @@ define <2 x i64> @test1_knownbits_vpmadd52h(<2 x i64> %x0, <2 x i64> %x1, <2 x i
 }
 
 define <2 x i64> @test2_knownbits_vpmadd52l(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2) {
-; AVX512-LABEL: test2_knownbits_vpmadd52l:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm0, %xmm0
-; AVX512-NEXT:    vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm1, %xmm1
-; AVX512-NEXT:    vpbroadcastq {{.*#+}} xmm2 = [1234,1234]
-; AVX512-NEXT:    vmovdqa %xmm2, %xmm3
-; AVX512-NEXT:    vpmadd52luq %xmm1, %xmm0, %xmm3
-; AVX512-NEXT:    vpand %xmm2, %xmm3, %xmm0
-; AVX512-NEXT:    retq
-;
-; AVX-LABEL: test2_knownbits_vpmadd52l:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX-NEXT:    vpmovsxwq {{.*#+}} xmm2 = [1234,1234]
-; AVX-NEXT:    vmovdqa %xmm2, %xmm3
-; AVX-NEXT:    {vex} vpmadd52luq %xmm1, %xmm0, %xmm3
-; AVX-NEXT:    vpand %xmm2, %xmm3, %xmm0
-; AVX-NEXT:    retq
+; CHECK-LABEL: test2_knownbits_vpmadd52l:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmovddup {{.*#+}} xmm0 = [1234,1234]
+; CHECK-NEXT:    # xmm0 = mem[0,0]
+; CHECK-NEXT:    retq
   %and1 = and <2 x i64> %x0, splat (i64 67108864) ; 1LL << 26
   %and2 = and <2 x i64> %x1, splat (i64 33554432) ; 1LL << 25
   %madd = call <2 x i64> @llvm.x86.avx512.vpmadd52l.uq.128(<2 x i64> splat(i64 1234), <2 x i64> %and1, <2 x i64> %and2)
@@ -377,25 +331,11 @@ define <2 x i64> @test2_knownbits_vpmadd52l(<2 x i64> %x0, <2 x i64> %x1, <2 x i
 }
 
 define <2 x i64> @test2_knownbits_vpmadd52h(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2) {
-; AVX512-LABEL: test2_knownbits_vpmadd52h:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpbroadcastq {{.*#+}} xmm2 = [1073741824,1073741824]
-; AVX512-NEXT:    vpand %xmm2, %xmm0, %xmm0
-; AVX512-NEXT:    vpand %xmm2, %xmm1, %xmm1
-; AVX512-NEXT:    vpbroadcastq {{.*#+}} xmm2 = [1025,1025]
-; AVX512-NEXT:    vpmadd52huq %xmm1, %xmm0, %xmm2
-; AVX512-NEXT:    vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm2, %xmm0
-; AVX512-NEXT:    retq
-;
-; AVX-LABEL: test2_knownbits_vpmadd52h:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vpbroadcastq {{.*#+}} xmm2 = [1073741824,1073741824]
-; AVX-NEXT:    vpand %xmm2, %xmm0, %xmm0
-; AVX-NEXT:    vpand %xmm2, %xmm1, %xmm1
-; AVX-NEXT:    vpmovsxwq {{.*#+}} xmm2 = [1025,1025]
-; AVX-NEXT:    {vex} vpmadd52huq %xmm1, %xmm0, %xmm2
-; AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm0
-; AVX-NEXT:    retq
+; CHECK-LABEL: test2_knownbits_vpmadd52h:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmovddup {{.*#+}} xmm0 = [1,1]
+; CHECK-NEXT:    # xmm0 = mem[0,0]
+; CHECK-NEXT:    retq
   %and1 = and <2 x i64> %x0, splat (i64 1073741824) ; 1LL << 30
   %and2 = and <2 x i64> %x1, splat (i64 1073741824) ; 1LL << 30
   ; add (1LL << 20) + 1



More information about the llvm-commits mailing list