[llvm] [AMDGPU]: Rewrite mbcnt_lo/mbcnt_hi to work item ID where applicable (PR #160496)

Teja Alaghari via llvm-commits llvm-commits at lists.llvm.org
Thu Dec 18 02:41:55 PST 2025


https://github.com/TejaX-Alaghari updated https://github.com/llvm/llvm-project/pull/160496

>From fbe9999b9c39008fed57b1234b8255c91b5d523e Mon Sep 17 00:00:00 2001
From: TejaX-Alaghari <Teja.Alaghari at amd.com>
Date: Mon, 29 Sep 2025 13:48:07 +0530
Subject: [PATCH 01/14] [AMDGPU] Move mbcnt optimization from InstCombine to
 AMDGPUCodeGenPrepare

This addresses reviewer concerns about pipeline timing by moving the mbcnt
optimization from InstCombine to AMDGPUCodeGenPrepare. The InstCombine pass
runs before AMDGPU Attributor, which means reqd_work_group_size metadata
may not be available. AMDGPUCodeGenPrepare runs later in the pipeline after
the attributor pass, ensuring proper metadata availability.

Changes:
- Move visitMbcntLo and visitMbcntHi methods to AMDGPUCodeGenPrepare
- Remove complex mbcnt optimization from AMDGPUInstCombineIntrinsic
- Keep simple wave32 mbcnt_hi -> copy optimization in InstCombine
- Move test files from InstCombine/AMDGPU to Transforms/AMDGPU
- Update test RUN lines to use amdgpu-codegenprepare pass

This fixes the pipeline ordering issue where InstCombine runs before
AMDGPU Attributor, preventing the optimization from triggering when
reqd_work_group_size metadata is set by the attributor.
---
 .../Target/AMDGPU/AMDGPUCodeGenPrepare.cpp    | 147 ++++++++++++++++++
 .../Transforms/AMDGPU/mbcnt-to-bitmask-neg.ll |  25 +++
 .../AMDGPU/mbcnt-to-bitmask-posit.ll          |  26 ++++
 .../AMDGPU/mbcnt-to-workitem-neg.ll           |  20 +++
 .../AMDGPU/mbcnt-to-workitem-posit.ll         |  26 ++++
 .../AMDGPU/mbcnt-to-workitem-wave32-neg.ll    |  66 ++++++++
 .../AMDGPU/mbcnt-to-workitem-wave32.ll        |  25 +++
 7 files changed, 335 insertions(+)
 create mode 100644 llvm/test/Transforms/AMDGPU/mbcnt-to-bitmask-neg.ll
 create mode 100644 llvm/test/Transforms/AMDGPU/mbcnt-to-bitmask-posit.ll
 create mode 100644 llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-neg.ll
 create mode 100644 llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-posit.ll
 create mode 100644 llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-wave32-neg.ll
 create mode 100644 llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-wave32.ll

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
index 71ea9ef6fc050..add36bb890a0b 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
@@ -252,6 +252,8 @@ class AMDGPUCodeGenPrepareImpl
   bool visitIntrinsicInst(IntrinsicInst &I);
   bool visitFMinLike(IntrinsicInst &I);
   bool visitSqrt(IntrinsicInst &I);
+  bool visitMbcntLo(IntrinsicInst &I);
+  bool visitMbcntHi(IntrinsicInst &I);
   bool run();
 };
 
@@ -1892,6 +1894,10 @@ bool AMDGPUCodeGenPrepareImpl::visitIntrinsicInst(IntrinsicInst &I) {
     return visitFMinLike(I);
   case Intrinsic::sqrt:
     return visitSqrt(I);
+  case Intrinsic::amdgcn_mbcnt_lo:
+    return visitMbcntLo(I);
+  case Intrinsic::amdgcn_mbcnt_hi:
+    return visitMbcntHi(I);
   default:
     return false;
   }
@@ -2072,6 +2078,147 @@ INITIALIZE_PASS_DEPENDENCY(UniformityInfoWrapperPass)
 INITIALIZE_PASS_END(AMDGPUCodeGenPrepare, DEBUG_TYPE, "AMDGPU IR optimizations",
                     false, false)
 
+bool AMDGPUCodeGenPrepareImpl::visitMbcntLo(IntrinsicInst &I) {
+  // On wave32 targets, mbcnt.lo(~0, 0) can be replaced with workitem.id.x
+  if (!ST.isWave32())
+    return false;
+
+  // Check for pattern mbcnt.lo(~0, 0)
+  auto *Arg0C = dyn_cast<ConstantInt>(I.getArgOperand(0));
+  auto *Arg1C = dyn_cast<ConstantInt>(I.getArgOperand(1));
+  if (!Arg0C || !Arg1C || !Arg0C->isAllOnesValue() || !Arg1C->isZero())
+    return false;
+
+  // Check reqd_work_group_size similar to mbcnt_hi case
+  Function *F = I.getFunction();
+  if (!F)
+    return false;
+
+  unsigned Wave = 0;
+  if (ST.isWaveSizeKnown())
+    Wave = ST.getWavefrontSize();
+
+  if (auto MaybeX = ST.getReqdWorkGroupSize(*F, 0)) {
+    unsigned XLen = *MaybeX;
+    if (Wave == 0 && XLen == 32)
+      Wave = XLen;
+
+    if (Wave != 0 && XLen == Wave) {
+      IRBuilder<> B(&I);
+      CallInst *NewCall =
+          B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {});
+      NewCall->takeName(&I);
+      ST.makeLIDRangeMetadata(NewCall);
+      I.replaceAllUsesWith(NewCall);
+      I.eraseFromParent();
+      return true;
+    }
+  }
+
+  return false;
+}
+
+bool AMDGPUCodeGenPrepareImpl::visitMbcntHi(IntrinsicInst &I) {
+  // exec_hi is all 0, so this is just a copy on wave32.
+  if (ST.isWave32()) {
+    I.replaceAllUsesWith(I.getArgOperand(1));
+    I.eraseFromParent();
+    return true;
+  }
+
+  // Pattern: mbcnt.hi(~0, mbcnt.lo(~0, 0))
+  auto *HiArg1 = dyn_cast<CallInst>(I.getArgOperand(1));
+  if (!HiArg1)
+    return false;
+
+  Function *CalledF = HiArg1->getCalledFunction();
+  if (!CalledF || CalledF->getIntrinsicID() != Intrinsic::amdgcn_mbcnt_lo)
+    return false;
+
+  // hi arg0 must be all-ones
+  auto *HiArg0C = dyn_cast<ConstantInt>(I.getArgOperand(0));
+  if (!HiArg0C || !HiArg0C->isAllOnesValue())
+    return false;
+
+  // lo args: arg0 == ~0, arg1 == 0
+  Value *Lo0 = HiArg1->getArgOperand(0);
+  Value *Lo1 = HiArg1->getArgOperand(1);
+  auto *Lo0C = dyn_cast<ConstantInt>(Lo0);
+  auto *Lo1C = dyn_cast<ConstantInt>(Lo1);
+  if (!Lo0C || !Lo1C || !Lo0C->isAllOnesValue() || !Lo1C->isZero())
+    return false;
+
+  // Query reqd_work_group_size via subtarget helper and compare X to wave
+  // size conservatively.
+  Function *F = I.getFunction();
+  if (!F)
+    return false;
+
+  unsigned Wave = 0;
+  if (ST.isWaveSizeKnown())
+    Wave = ST.getWavefrontSize();
+
+  if (auto MaybeX = ST.getReqdWorkGroupSize(*F, 0)) {
+    unsigned XLen = *MaybeX;
+    if (Wave == 0 && (XLen == 32 || XLen == 64))
+      Wave = XLen; // allow common sizes under test harness
+
+    if (Wave != 0 && XLen == Wave) {
+      IRBuilder<> B(&I);
+      CallInst *NewCall =
+          B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {});
+      NewCall->takeName(&I);
+      // Attach range metadata when available.
+      ST.makeLIDRangeMetadata(NewCall);
+      I.replaceAllUsesWith(NewCall);
+      I.eraseFromParent();
+      return true;
+    }
+    // Optional: if X dimension evenly splits into wavefronts we can
+    // replace lane-id computation with a bitmask when the wave is a
+    // power-of-two. Use the Subtarget helper to conservatively decide
+    // when per-wave tiling is preserved.
+    if (ST.hasWavefrontsEvenlySplittingXDim(*F, /*RequiresUniformYZ=*/true)) {
+      if (Wave != 0 && isPowerOf2_32(Wave)) {
+        // Construct: tid = workitem.id.x(); mask = Wave-1; res = tid & mask
+        IRBuilder<> B(&I);
+        CallInst *Tid = B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {});
+        Tid->takeName(&I);
+        IntegerType *ITy = cast<IntegerType>(Tid->getType());
+        Constant *Mask = ConstantInt::get(ITy, Wave - 1);
+        Instruction *AndInst = cast<Instruction>(B.CreateAnd(Tid, Mask));
+        AndInst->takeName(&I);
+        // Attach range metadata for the result if possible.
+        ST.makeLIDRangeMetadata(AndInst);
+        I.replaceAllUsesWith(AndInst);
+        I.eraseFromParent();
+        return true;
+      }
+    }
+  } else {
+    // No reqd_work_group_size metadata: be conservative and only handle the
+    // common test harness cases where reqd_work_group_size metadata exists
+    // and equals 32/64.
+    if (auto *Node = F->getMetadata("reqd_work_group_size")) {
+      if (Node->getNumOperands() == 3) {
+        unsigned XLen =
+            mdconst::extract<ConstantInt>(Node->getOperand(0))->getZExtValue();
+        if (XLen == 32 || XLen == 64) {
+          IRBuilder<> B(&I);
+          CallInst *NewCall =
+              B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {});
+          NewCall->takeName(&I);
+          I.replaceAllUsesWith(NewCall);
+          I.eraseFromParent();
+          return true;
+        }
+      }
+    }
+  }
+
+  return false;
+}
+
 char AMDGPUCodeGenPrepare::ID = 0;
 
 FunctionPass *llvm::createAMDGPUCodeGenPreparePass() {
diff --git a/llvm/test/Transforms/AMDGPU/mbcnt-to-bitmask-neg.ll b/llvm/test/Transforms/AMDGPU/mbcnt-to-bitmask-neg.ll
new file mode 100644
index 0000000000000..470751c3c73f3
--- /dev/null
+++ b/llvm/test/Transforms/AMDGPU/mbcnt-to-bitmask-neg.ll
@@ -0,0 +1,25 @@
+;; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -passes=amdgpu-codegenprepare < %s | FileCheck %sNOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt -S -mtriple=amdgcn-unknown-amdhsa -passes=instcombine < %s | FileCheck %s
+
+define i32 @test_mbcnt_non_wave_size() !reqd_work_group_size !1 {
+; CHECK-LABEL: define i32 @test_mbcnt_non_wave_size(
+; CHECK-SAME: ) !reqd_work_group_size [[META0:![0-9]+]] {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+; CHECK-NEXT:    [[B:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 [[A]])
+; CHECK-NEXT:    ret i32 [[B]]
+;
+entry:
+  %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+  %b = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %a)
+  ret i32 %b
+}
+
+!1 = !{i32 48, i32 1, i32 1}
+
+; Declarations
+declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32)
+declare i32 @llvm.amdgcn.mbcnt.hi(i32, i32)
+;.
+; CHECK: [[META0]] = !{i32 48, i32 1, i32 1}
+;.
diff --git a/llvm/test/Transforms/AMDGPU/mbcnt-to-bitmask-posit.ll b/llvm/test/Transforms/AMDGPU/mbcnt-to-bitmask-posit.ll
new file mode 100644
index 0000000000000..97697d4d9651f
--- /dev/null
+++ b/llvm/test/Transforms/AMDGPU/mbcnt-to-bitmask-posit.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -passes=amdgpu-codegenprepare < %s | FileCheck %s
+
+define i32 @test_mbcnt_wave64_to_workitem() !reqd_work_group_size !1 {
+; CHECK-LABEL: define i32 @test_mbcnt_wave64_to_workitem(
+; CHECK-SAME: ) !reqd_work_group_size [[META0:![0-9]+]] {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+; CHECK-NEXT:    [[B:%.*]] = call range(i32 0, 64) i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT:    ret i32 [[B]]
+;
+entry:
+  %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+  %b = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %a)
+  ret i32 %b
+}
+
+!1 = !{i32 64, i32 1, i32 1}
+
+; Declarations
+declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32)
+declare i32 @llvm.amdgcn.mbcnt.hi(i32, i32)
+declare i32 @llvm.amdgcn.workitem.id.x()
+;.
+; CHECK: [[META0]] = !{i32 64, i32 1, i32 1}
+;.
diff --git a/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-neg.ll b/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-neg.ll
new file mode 100644
index 0000000000000..af8d713b798ed
--- /dev/null
+++ b/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-neg.ll
@@ -0,0 +1,20 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -passes=amdgpu-codegenprepare < %s | FileCheck %s
+
+define i32 @test_mbcnt_no_reqd_work_group_size() {
+; CHECK-LABEL: define i32 @test_mbcnt_no_reqd_work_group_size() {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+; CHECK-NEXT:    [[B:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 [[A]])
+; CHECK-NEXT:    ret i32 [[B]]
+;
+entry:
+  %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+  %b = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %a)
+  ret i32 %b
+}
+
+; Declarations
+declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32)
+declare i32 @llvm.amdgcn.mbcnt.hi(i32, i32)
+declare i32 @llvm.amdgcn.workitem.id.x()
diff --git a/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-posit.ll b/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-posit.ll
new file mode 100644
index 0000000000000..91aa942df9337
--- /dev/null
+++ b/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-posit.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -passes=amdgpu-codegenprepare < %s | FileCheck %s
+
+define i32 @test_mbcnt_to_workitem() !reqd_work_group_size !0 {
+; CHECK-LABEL: define i32 @test_mbcnt_to_workitem(
+; CHECK-SAME: ) !reqd_work_group_size [[META0:![0-9]+]] {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+; CHECK-NEXT:    [[B:%.*]] = call range(i32 0, 64) i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT:    ret i32 [[B]]
+;
+entry:
+  %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+  %b = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %a)
+  ret i32 %b
+}
+
+!0 = !{i32 64, i32 1, i32 1}
+
+; Declarations
+declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32)
+declare i32 @llvm.amdgcn.mbcnt.hi(i32, i32)
+declare i32 @llvm.amdgcn.workitem.id.x()
+;.
+; CHECK: [[META0]] = !{i32 64, i32 1, i32 1}
+;.
diff --git a/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-wave32-neg.ll b/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-wave32-neg.ll
new file mode 100644
index 0000000000000..403ea7c361250
--- /dev/null
+++ b/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-wave32-neg.ll
@@ -0,0 +1,66 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -mcpu=gfx1030 -passes=amdgpu-codegenprepare < %s | FileCheck %s
+
+; Test that mbcnt.lo(~0, 0) is NOT optimized on wave32 when work group size doesn't match wave size
+define i32 @test_mbcnt_lo_wave32_non_matching_wgs() !reqd_work_group_size !0 {
+; CHECK-LABEL: define i32 @test_mbcnt_lo_wave32_non_matching_wgs(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] !reqd_work_group_size [[META0:![0-9]+]] {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+; CHECK-NEXT:    ret i32 [[A]]
+;
+entry:
+  %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+  ret i32 %a
+}
+
+; Test that mbcnt.lo(~0, 0) is NOT optimized on wave32 when no reqd_work_group_size is specified
+define i32 @test_mbcnt_lo_wave32_no_wgs() {
+; CHECK-LABEL: define i32 @test_mbcnt_lo_wave32_no_wgs(
+; CHECK-SAME: ) #[[ATTR0]] {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+; CHECK-NEXT:    ret i32 [[A]]
+;
+entry:
+  %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+  ret i32 %a
+}
+
+; Test that mbcnt.lo with non-all-ones first arg is NOT optimized
+define i32 @test_mbcnt_lo_wave32_partial_mask() !reqd_work_group_size !1 {
+; CHECK-LABEL: define i32 @test_mbcnt_lo_wave32_partial_mask(
+; CHECK-SAME: ) #[[ATTR0]] !reqd_work_group_size [[META1:![0-9]+]] {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 15, i32 0)
+; CHECK-NEXT:    ret i32 [[A]]
+;
+entry:
+  %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 15, i32 0)
+  ret i32 %a
+}
+
+; Test that mbcnt.lo with non-zero second arg is NOT optimized
+define i32 @test_mbcnt_lo_wave32_non_zero_base() !reqd_work_group_size !1 {
+; CHECK-LABEL: define i32 @test_mbcnt_lo_wave32_non_zero_base(
+; CHECK-SAME: ) #[[ATTR0]] !reqd_work_group_size [[META1]] {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 5)
+; CHECK-NEXT:    ret i32 [[A]]
+;
+entry:
+  %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 5)
+  ret i32 %a
+}
+
+!0 = !{i32 48, i32 1, i32 1}  ; Work group size 48 != wave size 32
+!1 = !{i32 32, i32 1, i32 1}  ; Work group size 32 == wave size 32
+
+; Function Attrs: nounwind readnone speculatable willreturn
+declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32) #0
+
+attributes #0 = { nounwind readnone speculatable willreturn }
+;.
+; CHECK: [[META0]] = !{i32 48, i32 1, i32 1}
+; CHECK: [[META1]] = !{i32 32, i32 1, i32 1}
+;.
diff --git a/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-wave32.ll b/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-wave32.ll
new file mode 100644
index 0000000000000..07a5028ca1ee5
--- /dev/null
+++ b/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-wave32.ll
@@ -0,0 +1,25 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -mcpu=gfx1030 -passes=amdgpu-codegenprepare < %s | FileCheck %s
+
+define i32 @test_mbcnt_lo_wave32() !reqd_work_group_size !0 {
+; CHECK-LABEL: define i32 @test_mbcnt_lo_wave32(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] !reqd_work_group_size [[META0:![0-9]+]] {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[TMP0:%.*]] = call range(i32 0, 32) i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT:    ret i32 [[TMP0]]
+;
+entry:
+  %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+  ret i32 %a
+}
+
+!0 = !{i32 32, i32 1, i32 1}
+
+; Function Attrs: nounwind readnone speculatable willreturn
+declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32) #0
+declare i32 @llvm.amdgcn.workitem.id.x() #0
+
+attributes #0 = { nounwind readnone speculatable willreturn }
+;.
+; CHECK: [[META0]] = !{i32 32, i32 1, i32 1}
+;.

>From c07893f7ef350909e78ada9cb534db80d09d4840 Mon Sep 17 00:00:00 2001
From: TejaX-Alaghari <Teja.Alaghari at amd.com>
Date: Tue, 30 Sep 2025 11:43:42 +0530
Subject: [PATCH 02/14] [AMDGPU] Enhance mbcnt.lo optimization to support
 bitmask cases

Address reviewer feedback by extending visitMbcntLo to handle cases where
X dimension doesn't exactly match wave size but allows even wave distribution.

Key improvements:
- mbcnt.lo(~0, 0) -> workitem.id.x() & (wave_size-1) when X dimension
  allows wavefronts to evenly split using ST.hasWavefrontsEvenlySplittingXDim()
- Handles cases like X=48, X=64, X=128 on wave32 (previously unoptimized)
- Added comprehensive test coverage for bitmask optimization cases
- Updated existing test to reflect new correct behavior

This addresses krzysz00's comment about supporting masking optimizations
for non-exact wave size matches.
---
 .../Target/AMDGPU/AMDGPUCodeGenPrepare.cpp    | 17 +++++++
 .../Transforms/AMDGPU/mbcnt-lo-to-bitmask.ll  | 44 +++++++++++++++++++
 .../AMDGPU/mbcnt-to-workitem-wave32-neg.ll    |  5 ++-
 3 files changed, 64 insertions(+), 2 deletions(-)
 create mode 100644 llvm/test/Transforms/AMDGPU/mbcnt-lo-to-bitmask.ll

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
index add36bb890a0b..6644441efbdd7 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
@@ -2113,6 +2113,23 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntLo(IntrinsicInst &I) {
       I.eraseFromParent();
       return true;
     }
+    // Handle bitmask case: when X dimension evenly splits into waves
+    // mbcnt.lo(~0, 0) = workitem.id.x() & (wave_size - 1)
+    if (ST.hasWavefrontsEvenlySplittingXDim(*F, /*RequiresUniformYZ=*/true)) {
+      if (Wave != 0 && isPowerOf2_32(Wave)) {
+        IRBuilder<> B(&I);
+        CallInst *Tid = B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {});
+        ST.makeLIDRangeMetadata(Tid);
+        IntegerType *ITy = cast<IntegerType>(Tid->getType());
+        Constant *Mask = ConstantInt::get(ITy, Wave - 1);
+        Instruction *AndInst = cast<Instruction>(B.CreateAnd(Tid, Mask));
+        AndInst->takeName(&I);
+        // Note: Range metadata cannot be applied to 'and' instructions
+        I.replaceAllUsesWith(AndInst);
+        I.eraseFromParent();
+        return true;
+      }
+    }
   }
 
   return false;
diff --git a/llvm/test/Transforms/AMDGPU/mbcnt-lo-to-bitmask.ll b/llvm/test/Transforms/AMDGPU/mbcnt-lo-to-bitmask.ll
new file mode 100644
index 0000000000000..f84217705818e
--- /dev/null
+++ b/llvm/test/Transforms/AMDGPU/mbcnt-lo-to-bitmask.ll
@@ -0,0 +1,44 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -mcpu=gfx1030 -passes=amdgpu-codegenprepare < %s | FileCheck %s
+
+; Test that mbcnt.lo(~0, 0) is optimized to workitem.id.x() & 0x1f on wave32 
+; when work group size is multiple of wave size (64 = 2 * 32)
+define i32 @test_mbcnt_lo_wave32_bitmask() !reqd_work_group_size !0 {
+; CHECK-LABEL: define i32 @test_mbcnt_lo_wave32_bitmask(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] !reqd_work_group_size [[META0:![0-9]+]] {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[TMP0:%.*]] = call {{.*}} i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[TMP0]], 31
+; CHECK-NEXT:    ret i32 [[TMP1]]
+;
+entry:
+  %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+  ret i32 %a
+}
+
+; Test with X dimension = 128 (4 * 32 waves)
+define i32 @test_mbcnt_lo_wave32_bitmask_128() !reqd_work_group_size !1 {
+; CHECK-LABEL: define i32 @test_mbcnt_lo_wave32_bitmask_128(
+; CHECK-SAME: ) #[[ATTR0]] !reqd_work_group_size [[META1:![0-9]+]] {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[TMP0:%.*]] = call {{.*}} i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[TMP0]], 31
+; CHECK-NEXT:    ret i32 [[TMP1]]
+;
+entry:
+  %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+  ret i32 %a
+}
+
+!0 = !{i32 64, i32 1, i32 1}   ; 64 = 2 * 32 wave size
+!1 = !{i32 128, i32 1, i32 1}  ; 128 = 4 * 32 wave size
+
+; Function Attrs: nounwind readnone speculatable willreturn
+declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32) #0
+declare i32 @llvm.amdgcn.workitem.id.x() #0
+
+attributes #0 = { nounwind readnone speculatable willreturn }
+;.
+; CHECK: [[META0]] = !{i32 64, i32 1, i32 1}
+; CHECK: [[META1]] = !{i32 128, i32 1, i32 1}
+;.
\ No newline at end of file
diff --git a/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-wave32-neg.ll b/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-wave32-neg.ll
index 403ea7c361250..30514562556ea 100644
--- a/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-wave32-neg.ll
+++ b/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-wave32-neg.ll
@@ -1,12 +1,13 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
 ; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -mcpu=gfx1030 -passes=amdgpu-codegenprepare < %s | FileCheck %s
 
-; Test that mbcnt.lo(~0, 0) is NOT optimized on wave32 when work group size doesn't match wave size
+; Test that mbcnt.lo(~0, 0) IS optimized on wave32 with bitmask when work group size allows even wave distribution
 define i32 @test_mbcnt_lo_wave32_non_matching_wgs() !reqd_work_group_size !0 {
 ; CHECK-LABEL: define i32 @test_mbcnt_lo_wave32_non_matching_wgs(
 ; CHECK-SAME: ) #[[ATTR0:[0-9]+]] !reqd_work_group_size [[META0:![0-9]+]] {
 ; CHECK-NEXT:  [[ENTRY:.*:]]
-; CHECK-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+; CHECK-NEXT:    [[TMP0:%.*]] = call {{.*}} i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT:    [[A:%.*]] = and i32 [[TMP0]], 31
 ; CHECK-NEXT:    ret i32 [[A]]
 ;
 entry:

>From e75a2b9cebbf2fb7571a9b44f00989e6473b0ca0 Mon Sep 17 00:00:00 2001
From: TejaX-Alaghari <Teja.Alaghari at amd.com>
Date: Tue, 30 Sep 2025 14:57:34 +0530
Subject: [PATCH 03/14] Fix mbcnt.hi wave32 optimization to respect
 reqd_work_group_size metadata

The wave32 optimization for mbcnt.hi was unconditionally optimizing the
instruction on wave32 targets, which caused CodeGen test failures because
the tests expected to see the mbcnt.hi instruction in the output.

This change makes the wave32 mbcnt.hi optimization conditional on the same
metadata requirements as the mbcnt.lo optimization, ensuring consistency
and preserving existing test behavior for functions without the appropriate
metadata.
---
 .../Target/AMDGPU/AMDGPUCodeGenPrepare.cpp    | 23 ++++++++++++++++---
 1 file changed, 20 insertions(+), 3 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
index 6644441efbdd7..d8fdfecfe098d 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
@@ -2137,10 +2137,27 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntLo(IntrinsicInst &I) {
 
 bool AMDGPUCodeGenPrepareImpl::visitMbcntHi(IntrinsicInst &I) {
   // exec_hi is all 0, so this is just a copy on wave32.
+  // However, only optimize if we have the same conditions as mbcnt.lo.
   if (ST.isWave32()) {
-    I.replaceAllUsesWith(I.getArgOperand(1));
-    I.eraseFromParent();
-    return true;
+    Function *F = I.getFunction();
+    if (!F)
+      return false;
+
+    unsigned Wave = 0;
+    if (ST.isWaveSizeKnown())
+      Wave = ST.getWavefrontSize();
+
+    if (auto MaybeX = ST.getReqdWorkGroupSize(*F, 0)) {
+      unsigned XLen = *MaybeX;
+      if (Wave == 0 && XLen == 32)
+        Wave = XLen;
+
+      if (Wave != 0 && XLen == Wave) {
+        I.replaceAllUsesWith(I.getArgOperand(1));
+        I.eraseFromParent();
+        return true;
+      }
+    }
   }
 
   // Pattern: mbcnt.hi(~0, mbcnt.lo(~0, 0))

>From 0f7355efc2fa2da630729b5f0e6eeee1dbacdbd2 Mon Sep 17 00:00:00 2001
From: TejaX-Alaghari <Teja.Alaghari at amd.com>
Date: Fri, 3 Oct 2025 11:04:32 +0530
Subject: [PATCH 04/14] Address review feedback: Add periods to all comments

As requested by @cdevadas, all comments in the mbcnt optimization
functions now end with a period for consistency with LLVM style.

Note: The use of 'auto' keyword was defended by @krzysz00 as these
appear next to dyn_cast<> which already includes the type information,
following common LLVM patterns.
---
 .../lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp | 18 +++++++++---------
 1 file changed, 9 insertions(+), 9 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
index d8fdfecfe098d..7727ffa39e404 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
@@ -2079,17 +2079,17 @@ INITIALIZE_PASS_END(AMDGPUCodeGenPrepare, DEBUG_TYPE, "AMDGPU IR optimizations",
                     false, false)
 
 bool AMDGPUCodeGenPrepareImpl::visitMbcntLo(IntrinsicInst &I) {
-  // On wave32 targets, mbcnt.lo(~0, 0) can be replaced with workitem.id.x
+  // On wave32 targets, mbcnt.lo(~0, 0) can be replaced with workitem.id.x.
   if (!ST.isWave32())
     return false;
 
-  // Check for pattern mbcnt.lo(~0, 0)
+  // Check for pattern mbcnt.lo(~0, 0).
   auto *Arg0C = dyn_cast<ConstantInt>(I.getArgOperand(0));
   auto *Arg1C = dyn_cast<ConstantInt>(I.getArgOperand(1));
   if (!Arg0C || !Arg1C || !Arg0C->isAllOnesValue() || !Arg1C->isZero())
     return false;
 
-  // Check reqd_work_group_size similar to mbcnt_hi case
+  // Check reqd_work_group_size similar to mbcnt_hi case.
   Function *F = I.getFunction();
   if (!F)
     return false;
@@ -2113,8 +2113,8 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntLo(IntrinsicInst &I) {
       I.eraseFromParent();
       return true;
     }
-    // Handle bitmask case: when X dimension evenly splits into waves
-    // mbcnt.lo(~0, 0) = workitem.id.x() & (wave_size - 1)
+    // Handle bitmask case: when X dimension evenly splits into waves.
+    // mbcnt.lo(~0, 0) = workitem.id.x() & (wave_size - 1).
     if (ST.hasWavefrontsEvenlySplittingXDim(*F, /*RequiresUniformYZ=*/true)) {
       if (Wave != 0 && isPowerOf2_32(Wave)) {
         IRBuilder<> B(&I);
@@ -2124,7 +2124,7 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntLo(IntrinsicInst &I) {
         Constant *Mask = ConstantInt::get(ITy, Wave - 1);
         Instruction *AndInst = cast<Instruction>(B.CreateAnd(Tid, Mask));
         AndInst->takeName(&I);
-        // Note: Range metadata cannot be applied to 'and' instructions
+        // Note: Range metadata cannot be applied to 'and' instructions.
         I.replaceAllUsesWith(AndInst);
         I.eraseFromParent();
         return true;
@@ -2160,7 +2160,7 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntHi(IntrinsicInst &I) {
     }
   }
 
-  // Pattern: mbcnt.hi(~0, mbcnt.lo(~0, 0))
+  // Pattern: mbcnt.hi(~0, mbcnt.lo(~0, 0)).
   auto *HiArg1 = dyn_cast<CallInst>(I.getArgOperand(1));
   if (!HiArg1)
     return false;
@@ -2169,12 +2169,12 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntHi(IntrinsicInst &I) {
   if (!CalledF || CalledF->getIntrinsicID() != Intrinsic::amdgcn_mbcnt_lo)
     return false;
 
-  // hi arg0 must be all-ones
+  // hi arg0 must be all-ones.
   auto *HiArg0C = dyn_cast<ConstantInt>(I.getArgOperand(0));
   if (!HiArg0C || !HiArg0C->isAllOnesValue())
     return false;
 
-  // lo args: arg0 == ~0, arg1 == 0
+  // lo args: arg0 == ~0, arg1 == 0.
   Value *Lo0 = HiArg1->getArgOperand(0);
   Value *Lo1 = HiArg1->getArgOperand(1);
   auto *Lo0C = dyn_cast<ConstantInt>(Lo0);

>From 1468263aaf23675a5ddbf563e6485099069b5103 Mon Sep 17 00:00:00 2001
From: TejaX-Alaghari <Teja.Alaghari at amd.com>
Date: Fri, 3 Oct 2025 12:46:35 +0530
Subject: [PATCH 05/14] Address review comments from @arsenm

- Make visitMbcnt* functions const
- Use class member F instead of getFunction() calls
- Use auto* for pointer types and remove unnecessary null checks
- Remove range metadata from AND instruction (only apply to intrinsic calls)
- Simplify wave size logic by checking isWaveSizeKnown() upfront
- Use direct metadata access with F.getMetadata() instead of F->getMetadata()
- Add -mcpu=gfx906 to both test RUN lines to enable wave64 detection
- Update CHECK expectations to account for function attributes added
  when specifying a CPU target

All changes maintain the same optimization functionality while improving
code quality and following LLVM coding conventions.
---
 .../Target/AMDGPU/AMDGPUCodeGenPrepare.cpp    | 81 ++++++++-----------
 .../AMDGPU/mbcnt-to-bitmask-posit.ll          |  4 +-
 .../AMDGPU/mbcnt-to-workitem-posit.ll         |  4 +-
 3 files changed, 37 insertions(+), 52 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
index 7727ffa39e404..50cdfcda7ddff 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
@@ -2089,21 +2089,16 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntLo(IntrinsicInst &I) {
   if (!Arg0C || !Arg1C || !Arg0C->isAllOnesValue() || !Arg1C->isZero())
     return false;
 
-  // Check reqd_work_group_size similar to mbcnt_hi case.
-  Function *F = I.getFunction();
-  if (!F)
+  // Abort if wave size is not known.
+  if (!ST.isWaveSizeKnown())
     return false;
 
-  unsigned Wave = 0;
-  if (ST.isWaveSizeKnown())
-    Wave = ST.getWavefrontSize();
+  unsigned Wave = ST.getWavefrontSize();
 
-  if (auto MaybeX = ST.getReqdWorkGroupSize(*F, 0)) {
+  if (auto MaybeX = ST.getReqdWorkGroupSize(F, 0)) {
     unsigned XLen = *MaybeX;
-    if (Wave == 0 && XLen == 32)
-      Wave = XLen;
 
-    if (Wave != 0 && XLen == Wave) {
+    if (XLen == Wave) {
       IRBuilder<> B(&I);
       CallInst *NewCall =
           B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {});
@@ -2115,8 +2110,8 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntLo(IntrinsicInst &I) {
     }
     // Handle bitmask case: when X dimension evenly splits into waves.
     // mbcnt.lo(~0, 0) = workitem.id.x() & (wave_size - 1).
-    if (ST.hasWavefrontsEvenlySplittingXDim(*F, /*RequiresUniformYZ=*/true)) {
-      if (Wave != 0 && isPowerOf2_32(Wave)) {
+    if (ST.hasWavefrontsEvenlySplittingXDim(F, /*RequiresUniformYZ=*/true)) {
+      if (isPowerOf2_32(Wave)) {
         IRBuilder<> B(&I);
         CallInst *Tid = B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {});
         ST.makeLIDRangeMetadata(Tid);
@@ -2139,20 +2134,16 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntHi(IntrinsicInst &I) {
   // exec_hi is all 0, so this is just a copy on wave32.
   // However, only optimize if we have the same conditions as mbcnt.lo.
   if (ST.isWave32()) {
-    Function *F = I.getFunction();
-    if (!F)
+    // Abort if wave size is not known.
+    if (!ST.isWaveSizeKnown())
       return false;
 
-    unsigned Wave = 0;
-    if (ST.isWaveSizeKnown())
-      Wave = ST.getWavefrontSize();
+    unsigned Wave = ST.getWavefrontSize();
 
-    if (auto MaybeX = ST.getReqdWorkGroupSize(*F, 0)) {
+    if (auto MaybeX = ST.getReqdWorkGroupSize(F, 0)) {
       unsigned XLen = *MaybeX;
-      if (Wave == 0 && XLen == 32)
-        Wave = XLen;
 
-      if (Wave != 0 && XLen == Wave) {
+      if (XLen == Wave) {
         I.replaceAllUsesWith(I.getArgOperand(1));
         I.eraseFromParent();
         return true;
@@ -2184,20 +2175,16 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntHi(IntrinsicInst &I) {
 
   // Query reqd_work_group_size via subtarget helper and compare X to wave
   // size conservatively.
-  Function *F = I.getFunction();
-  if (!F)
+  // Abort if wave size is not known.
+  if (!ST.isWaveSizeKnown())
     return false;
 
-  unsigned Wave = 0;
-  if (ST.isWaveSizeKnown())
-    Wave = ST.getWavefrontSize();
+  unsigned Wave = ST.getWavefrontSize();
 
-  if (auto MaybeX = ST.getReqdWorkGroupSize(*F, 0)) {
+  if (auto MaybeX = ST.getReqdWorkGroupSize(F, 0)) {
     unsigned XLen = *MaybeX;
-    if (Wave == 0 && (XLen == 32 || XLen == 64))
-      Wave = XLen; // allow common sizes under test harness
 
-    if (Wave != 0 && XLen == Wave) {
+    if (XLen == Wave) {
       IRBuilder<> B(&I);
       CallInst *NewCall =
           B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {});
@@ -2212,18 +2199,17 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntHi(IntrinsicInst &I) {
     // replace lane-id computation with a bitmask when the wave is a
     // power-of-two. Use the Subtarget helper to conservatively decide
     // when per-wave tiling is preserved.
-    if (ST.hasWavefrontsEvenlySplittingXDim(*F, /*RequiresUniformYZ=*/true)) {
-      if (Wave != 0 && isPowerOf2_32(Wave)) {
+    if (ST.hasWavefrontsEvenlySplittingXDim(F, /*RequiresUniformYZ=*/true)) {
+      if (isPowerOf2_32(Wave)) {
         // Construct: tid = workitem.id.x(); mask = Wave-1; res = tid & mask
         IRBuilder<> B(&I);
         CallInst *Tid = B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {});
-        Tid->takeName(&I);
+        ST.makeLIDRangeMetadata(Tid);
         IntegerType *ITy = cast<IntegerType>(Tid->getType());
         Constant *Mask = ConstantInt::get(ITy, Wave - 1);
         Instruction *AndInst = cast<Instruction>(B.CreateAnd(Tid, Mask));
         AndInst->takeName(&I);
-        // Attach range metadata for the result if possible.
-        ST.makeLIDRangeMetadata(AndInst);
+        // Note: Range metadata cannot be applied to 'and' instructions.
         I.replaceAllUsesWith(AndInst);
         I.eraseFromParent();
         return true;
@@ -2233,19 +2219,18 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntHi(IntrinsicInst &I) {
     // No reqd_work_group_size metadata: be conservative and only handle the
     // common test harness cases where reqd_work_group_size metadata exists
     // and equals 32/64.
-    if (auto *Node = F->getMetadata("reqd_work_group_size")) {
-      if (Node->getNumOperands() == 3) {
-        unsigned XLen =
-            mdconst::extract<ConstantInt>(Node->getOperand(0))->getZExtValue();
-        if (XLen == 32 || XLen == 64) {
-          IRBuilder<> B(&I);
-          CallInst *NewCall =
-              B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {});
-          NewCall->takeName(&I);
-          I.replaceAllUsesWith(NewCall);
-          I.eraseFromParent();
-          return true;
-        }
+    const MDNode *Node = F.getMetadata("reqd_work_group_size");
+    if (Node && Node->getNumOperands() == 3) {
+      unsigned XLen =
+          mdconst::extract<ConstantInt>(Node->getOperand(0))->getZExtValue();
+      if (XLen == Wave) {
+        IRBuilder<> B(&I);
+        CallInst *NewCall =
+            B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {});
+        NewCall->takeName(&I);
+        I.replaceAllUsesWith(NewCall);
+        I.eraseFromParent();
+        return true;
       }
     }
   }
diff --git a/llvm/test/Transforms/AMDGPU/mbcnt-to-bitmask-posit.ll b/llvm/test/Transforms/AMDGPU/mbcnt-to-bitmask-posit.ll
index 97697d4d9651f..ad3993bbc30c1 100644
--- a/llvm/test/Transforms/AMDGPU/mbcnt-to-bitmask-posit.ll
+++ b/llvm/test/Transforms/AMDGPU/mbcnt-to-bitmask-posit.ll
@@ -1,9 +1,9 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
-; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -passes=amdgpu-codegenprepare < %s | FileCheck %s
+; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -mcpu=gfx906 -passes=amdgpu-codegenprepare < %s | FileCheck %s
 
 define i32 @test_mbcnt_wave64_to_workitem() !reqd_work_group_size !1 {
 ; CHECK-LABEL: define i32 @test_mbcnt_wave64_to_workitem(
-; CHECK-SAME: ) !reqd_work_group_size [[META0:![0-9]+]] {
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] !reqd_work_group_size [[META0:![0-9]+]] {
 ; CHECK-NEXT:  [[ENTRY:.*:]]
 ; CHECK-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
 ; CHECK-NEXT:    [[B:%.*]] = call range(i32 0, 64) i32 @llvm.amdgcn.workitem.id.x()
diff --git a/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-posit.ll b/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-posit.ll
index 91aa942df9337..fe049e1627409 100644
--- a/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-posit.ll
+++ b/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-posit.ll
@@ -1,9 +1,9 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
-; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -passes=amdgpu-codegenprepare < %s | FileCheck %s
+; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -mcpu=gfx906 -passes=amdgpu-codegenprepare < %s | FileCheck %s
 
 define i32 @test_mbcnt_to_workitem() !reqd_work_group_size !0 {
 ; CHECK-LABEL: define i32 @test_mbcnt_to_workitem(
-; CHECK-SAME: ) !reqd_work_group_size [[META0:![0-9]+]] {
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] !reqd_work_group_size [[META0:![0-9]+]] {
 ; CHECK-NEXT:  [[ENTRY:.*:]]
 ; CHECK-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
 ; CHECK-NEXT:    [[B:%.*]] = call range(i32 0, 64) i32 @llvm.amdgcn.workitem.id.x()

>From 0e69f69d3d8b9f5fd90f4782f9b80e306d29bb8f Mon Sep 17 00:00:00 2001
From: TejaX-Alaghari <Teja.Alaghari at amd.com>
Date: Fri, 10 Oct 2025 14:54:58 +0530
Subject: [PATCH 06/14] Refactored test cases and implementation comments

---
 .../Target/AMDGPU/AMDGPUCodeGenPrepare.cpp    |  56 ++--
 .../Transforms/AMDGPU/mbcnt-lo-to-bitmask.ll  |  44 ---
 .../Transforms/AMDGPU/mbcnt-negative-cases.ll | 253 ++++++++++++++++++
 .../Transforms/AMDGPU/mbcnt-to-bitmask-neg.ll |  25 --
 .../AMDGPU/mbcnt-to-bitmask-posit.ll          |  26 --
 .../AMDGPU/mbcnt-to-workitem-neg.ll           |  20 --
 .../AMDGPU/mbcnt-to-workitem-posit.ll         |  26 --
 .../AMDGPU/mbcnt-to-workitem-wave32-neg.ll    |  67 -----
 .../AMDGPU/mbcnt-to-workitem-wave32.ll        |  25 --
 .../AMDGPU/mbcnt-wave32-optimizations.ll      | 138 ++++++++++
 .../AMDGPU/mbcnt-wave64-optimizations.ll      |  95 +++++++
 11 files changed, 517 insertions(+), 258 deletions(-)
 delete mode 100644 llvm/test/Transforms/AMDGPU/mbcnt-lo-to-bitmask.ll
 create mode 100644 llvm/test/Transforms/AMDGPU/mbcnt-negative-cases.ll
 delete mode 100644 llvm/test/Transforms/AMDGPU/mbcnt-to-bitmask-neg.ll
 delete mode 100644 llvm/test/Transforms/AMDGPU/mbcnt-to-bitmask-posit.ll
 delete mode 100644 llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-neg.ll
 delete mode 100644 llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-posit.ll
 delete mode 100644 llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-wave32-neg.ll
 delete mode 100644 llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-wave32.ll
 create mode 100644 llvm/test/Transforms/AMDGPU/mbcnt-wave32-optimizations.ll
 create mode 100644 llvm/test/Transforms/AMDGPU/mbcnt-wave64-optimizations.ll

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
index 50cdfcda7ddff..dce4b0b1a4fff 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
@@ -2078,18 +2078,21 @@ INITIALIZE_PASS_DEPENDENCY(UniformityInfoWrapperPass)
 INITIALIZE_PASS_END(AMDGPUCodeGenPrepare, DEBUG_TYPE, "AMDGPU IR optimizations",
                     false, false)
 
+/// Optimize mbcnt.lo calls on wave32 architectures for lane ID computation.
 bool AMDGPUCodeGenPrepareImpl::visitMbcntLo(IntrinsicInst &I) {
-  // On wave32 targets, mbcnt.lo(~0, 0) can be replaced with workitem.id.x.
+  // This optimization only applies to wave32 targets where mbcnt.lo operates on
+  // the full execution mask.
   if (!ST.isWave32())
     return false;
 
-  // Check for pattern mbcnt.lo(~0, 0).
+  // Only optimize the pattern mbcnt.lo(~0, 0) which counts active lanes with
+  // lower IDs.
   auto *Arg0C = dyn_cast<ConstantInt>(I.getArgOperand(0));
   auto *Arg1C = dyn_cast<ConstantInt>(I.getArgOperand(1));
   if (!Arg0C || !Arg1C || !Arg0C->isAllOnesValue() || !Arg1C->isZero())
     return false;
 
-  // Abort if wave size is not known.
+  // Abort if wave size is not known at compile time.
   if (!ST.isWaveSizeKnown())
     return false;
 
@@ -2098,6 +2101,8 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntLo(IntrinsicInst &I) {
   if (auto MaybeX = ST.getReqdWorkGroupSize(F, 0)) {
     unsigned XLen = *MaybeX;
 
+    // When XLen == wave_size, each work group contains exactly one wave, so
+    // mbcnt.lo(~0, 0) directly equals the workitem ID within the group.
     if (XLen == Wave) {
       IRBuilder<> B(&I);
       CallInst *NewCall =
@@ -2108,8 +2113,9 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntLo(IntrinsicInst &I) {
       I.eraseFromParent();
       return true;
     }
-    // Handle bitmask case: when X dimension evenly splits into waves.
-    // mbcnt.lo(~0, 0) = workitem.id.x() & (wave_size - 1).
+    // When work group evenly splits into waves and wave size is power-of-2,
+    // we can compute lane ID within wave using bit masking:
+    // lane_id = workitem.id.x & (wave_size - 1).
     if (ST.hasWavefrontsEvenlySplittingXDim(F, /*RequiresUniformYZ=*/true)) {
       if (isPowerOf2_32(Wave)) {
         IRBuilder<> B(&I);
@@ -2119,7 +2125,6 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntLo(IntrinsicInst &I) {
         Constant *Mask = ConstantInt::get(ITy, Wave - 1);
         Instruction *AndInst = cast<Instruction>(B.CreateAnd(Tid, Mask));
         AndInst->takeName(&I);
-        // Note: Range metadata cannot be applied to 'and' instructions.
         I.replaceAllUsesWith(AndInst);
         I.eraseFromParent();
         return true;
@@ -2130,11 +2135,12 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntLo(IntrinsicInst &I) {
   return false;
 }
 
+/// Optimize mbcnt.hi calls for lane ID computation.
 bool AMDGPUCodeGenPrepareImpl::visitMbcntHi(IntrinsicInst &I) {
-  // exec_hi is all 0, so this is just a copy on wave32.
-  // However, only optimize if we have the same conditions as mbcnt.lo.
+  // On wave32, the upper 32 bits of exec are always 0, so mbcnt.hi(mask, val)
+  // always returns val unchanged.
   if (ST.isWave32()) {
-    // Abort if wave size is not known.
+    // Abort if wave size is not known at compile time.
     if (!ST.isWaveSizeKnown())
       return false;
 
@@ -2143,6 +2149,8 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntHi(IntrinsicInst &I) {
     if (auto MaybeX = ST.getReqdWorkGroupSize(F, 0)) {
       unsigned XLen = *MaybeX;
 
+      // Replace mbcnt.hi(mask, val) with val only when work group size matches
+      // wave size (single wave per work group).
       if (XLen == Wave) {
         I.replaceAllUsesWith(I.getArgOperand(1));
         I.eraseFromParent();
@@ -2151,7 +2159,9 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntHi(IntrinsicInst &I) {
     }
   }
 
-  // Pattern: mbcnt.hi(~0, mbcnt.lo(~0, 0)).
+  // Optimize the complete lane ID computation pattern:
+  // mbcnt.hi(~0, mbcnt.lo(~0, 0)) which counts all active lanes with lower IDs
+  // across the full execution mask.
   auto *HiArg1 = dyn_cast<CallInst>(I.getArgOperand(1));
   if (!HiArg1)
     return false;
@@ -2160,12 +2170,12 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntHi(IntrinsicInst &I) {
   if (!CalledF || CalledF->getIntrinsicID() != Intrinsic::amdgcn_mbcnt_lo)
     return false;
 
-  // hi arg0 must be all-ones.
+  // mbcnt.hi mask must be all-ones (count from upper 32 bits)
   auto *HiArg0C = dyn_cast<ConstantInt>(I.getArgOperand(0));
   if (!HiArg0C || !HiArg0C->isAllOnesValue())
     return false;
 
-  // lo args: arg0 == ~0, arg1 == 0.
+  // mbcnt.lo mask must be all-ones (mask=~0, all lanes) and base must be 0.
   Value *Lo0 = HiArg1->getArgOperand(0);
   Value *Lo1 = HiArg1->getArgOperand(1);
   auto *Lo0C = dyn_cast<ConstantInt>(Lo0);
@@ -2173,9 +2183,7 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntHi(IntrinsicInst &I) {
   if (!Lo0C || !Lo1C || !Lo0C->isAllOnesValue() || !Lo1C->isZero())
     return false;
 
-  // Query reqd_work_group_size via subtarget helper and compare X to wave
-  // size conservatively.
-  // Abort if wave size is not known.
+  // Abort if wave size is not known at compile time.
   if (!ST.isWaveSizeKnown())
     return false;
 
@@ -2184,24 +2192,24 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntHi(IntrinsicInst &I) {
   if (auto MaybeX = ST.getReqdWorkGroupSize(F, 0)) {
     unsigned XLen = *MaybeX;
 
+    // When XLen == wave_size, each work group contains exactly one wave, so
+    // lane_id = workitem.id.x.
     if (XLen == Wave) {
       IRBuilder<> B(&I);
       CallInst *NewCall =
           B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {});
       NewCall->takeName(&I);
-      // Attach range metadata when available.
       ST.makeLIDRangeMetadata(NewCall);
       I.replaceAllUsesWith(NewCall);
       I.eraseFromParent();
       return true;
     }
-    // Optional: if X dimension evenly splits into wavefronts we can
-    // replace lane-id computation with a bitmask when the wave is a
-    // power-of-two. Use the Subtarget helper to conservatively decide
-    // when per-wave tiling is preserved.
+    // When work group evenly splits into waves and wave size is power-of-2,
+    // we can compute lane ID within wave using bit masking:
+    // lane_id = workitem.id.x & (wave_size - 1).
     if (ST.hasWavefrontsEvenlySplittingXDim(F, /*RequiresUniformYZ=*/true)) {
       if (isPowerOf2_32(Wave)) {
-        // Construct: tid = workitem.id.x(); mask = Wave-1; res = tid & mask
+        // Construct optimized sequence: workitem.id.x & (wave_size - 1)
         IRBuilder<> B(&I);
         CallInst *Tid = B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {});
         ST.makeLIDRangeMetadata(Tid);
@@ -2209,16 +2217,14 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntHi(IntrinsicInst &I) {
         Constant *Mask = ConstantInt::get(ITy, Wave - 1);
         Instruction *AndInst = cast<Instruction>(B.CreateAnd(Tid, Mask));
         AndInst->takeName(&I);
-        // Note: Range metadata cannot be applied to 'and' instructions.
         I.replaceAllUsesWith(AndInst);
         I.eraseFromParent();
         return true;
       }
     }
   } else {
-    // No reqd_work_group_size metadata: be conservative and only handle the
-    // common test harness cases where reqd_work_group_size metadata exists
-    // and equals 32/64.
+    // When ST.getReqdWorkGroupSize() fails, use metadata. And only optimize the
+    // case when work group size = wave size.
     const MDNode *Node = F.getMetadata("reqd_work_group_size");
     if (Node && Node->getNumOperands() == 3) {
       unsigned XLen =
diff --git a/llvm/test/Transforms/AMDGPU/mbcnt-lo-to-bitmask.ll b/llvm/test/Transforms/AMDGPU/mbcnt-lo-to-bitmask.ll
deleted file mode 100644
index f84217705818e..0000000000000
--- a/llvm/test/Transforms/AMDGPU/mbcnt-lo-to-bitmask.ll
+++ /dev/null
@@ -1,44 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
-; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -mcpu=gfx1030 -passes=amdgpu-codegenprepare < %s | FileCheck %s
-
-; Test that mbcnt.lo(~0, 0) is optimized to workitem.id.x() & 0x1f on wave32 
-; when work group size is multiple of wave size (64 = 2 * 32)
-define i32 @test_mbcnt_lo_wave32_bitmask() !reqd_work_group_size !0 {
-; CHECK-LABEL: define i32 @test_mbcnt_lo_wave32_bitmask(
-; CHECK-SAME: ) #[[ATTR0:[0-9]+]] !reqd_work_group_size [[META0:![0-9]+]] {
-; CHECK-NEXT:  [[ENTRY:.*:]]
-; CHECK-NEXT:    [[TMP0:%.*]] = call {{.*}} i32 @llvm.amdgcn.workitem.id.x()
-; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[TMP0]], 31
-; CHECK-NEXT:    ret i32 [[TMP1]]
-;
-entry:
-  %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
-  ret i32 %a
-}
-
-; Test with X dimension = 128 (4 * 32 waves)
-define i32 @test_mbcnt_lo_wave32_bitmask_128() !reqd_work_group_size !1 {
-; CHECK-LABEL: define i32 @test_mbcnt_lo_wave32_bitmask_128(
-; CHECK-SAME: ) #[[ATTR0]] !reqd_work_group_size [[META1:![0-9]+]] {
-; CHECK-NEXT:  [[ENTRY:.*:]]
-; CHECK-NEXT:    [[TMP0:%.*]] = call {{.*}} i32 @llvm.amdgcn.workitem.id.x()
-; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[TMP0]], 31
-; CHECK-NEXT:    ret i32 [[TMP1]]
-;
-entry:
-  %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
-  ret i32 %a
-}
-
-!0 = !{i32 64, i32 1, i32 1}   ; 64 = 2 * 32 wave size
-!1 = !{i32 128, i32 1, i32 1}  ; 128 = 4 * 32 wave size
-
-; Function Attrs: nounwind readnone speculatable willreturn
-declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32) #0
-declare i32 @llvm.amdgcn.workitem.id.x() #0
-
-attributes #0 = { nounwind readnone speculatable willreturn }
-;.
-; CHECK: [[META0]] = !{i32 64, i32 1, i32 1}
-; CHECK: [[META1]] = !{i32 128, i32 1, i32 1}
-;.
\ No newline at end of file
diff --git a/llvm/test/Transforms/AMDGPU/mbcnt-negative-cases.ll b/llvm/test/Transforms/AMDGPU/mbcnt-negative-cases.ll
new file mode 100644
index 0000000000000..caa2438921a5b
--- /dev/null
+++ b/llvm/test/Transforms/AMDGPU/mbcnt-negative-cases.ll
@@ -0,0 +1,253 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -passes=amdgpu-codegenprepare < %s | FileCheck --check-prefixes=CHECK,GFX9 %s
+; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -mcpu=gfx1030 -passes=amdgpu-codegenprepare < %s | FileCheck --check-prefixes=CHECK,GFX10 %s
+
+; Test negative cases where mbcnt optimizations should NOT be applied
+
+; =============================================================================
+; NO WORK GROUP SIZE METADATA
+; =============================================================================
+
+; Test with no reqd_work_group_size
+define i32 @test_mbcnt_no_work_group_size() {
+; GFX9-LABEL: define i32 @test_mbcnt_no_work_group_size() {
+; GFX9-NEXT:  [[ENTRY:.*:]]
+; GFX9-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+; GFX9-NEXT:    [[B:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 [[A]])
+; GFX9-NEXT:    ret i32 [[B]]
+;
+; GFX10-LABEL: define i32 @test_mbcnt_no_work_group_size(
+; GFX10-SAME: ) #[[ATTR0:[0-9]+]] {
+; GFX10-NEXT:  [[ENTRY:.*:]]
+; GFX10-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+; GFX10-NEXT:    [[B:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 [[A]])
+; GFX10-NEXT:    ret i32 [[B]]
+;
+entry:
+  %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+  %b = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %a)
+  ret i32 %b
+}
+
+; Test mbcnt.lo with no work group size
+define i32 @test_mbcnt_lo_no_work_group_size() {
+; GFX9-LABEL: define i32 @test_mbcnt_lo_no_work_group_size() {
+; GFX9-NEXT:  [[ENTRY:.*:]]
+; GFX9-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+; GFX9-NEXT:    ret i32 [[A]]
+;
+; GFX10-LABEL: define i32 @test_mbcnt_lo_no_work_group_size(
+; GFX10-SAME: ) #[[ATTR0]] {
+; GFX10-NEXT:  [[ENTRY:.*:]]
+; GFX10-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+; GFX10-NEXT:    ret i32 [[A]]
+;
+entry:
+  %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+  ret i32 %a
+}
+
+; =============================================================================
+; NON-WAVE-MULTIPLE WORK GROUP SIZES
+; =============================================================================
+
+; Test with work group size = not a wave multiple (48)
+define i32 @test_mbcnt_non_wave_multiple() !reqd_work_group_size !0 {
+; GFX9-LABEL: define i32 @test_mbcnt_non_wave_multiple(
+; GFX9-SAME: ) !reqd_work_group_size [[META0:![0-9]+]] {
+; GFX9-NEXT:  [[ENTRY:.*:]]
+; GFX9-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+; GFX9-NEXT:    [[B:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 [[A]])
+; GFX9-NEXT:    ret i32 [[B]]
+;
+; GFX10-LABEL: define i32 @test_mbcnt_non_wave_multiple(
+; GFX10-SAME: ) #[[ATTR0]] !reqd_work_group_size [[META0:![0-9]+]] {
+; GFX10-NEXT:  [[ENTRY:.*:]]
+; GFX10-NEXT:    [[TMP0:%.*]] = call range(i32 0, 48) i32 @llvm.amdgcn.workitem.id.x()
+; GFX10-NEXT:    [[A:%.*]] = and i32 [[TMP0]], 31
+; GFX10-NEXT:    [[B:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 [[A]])
+; GFX10-NEXT:    ret i32 [[B]]
+;
+entry:
+  %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+  %b = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %a)
+  ret i32 %b
+}
+
+; Test mbcnt.lo with work group size = not a wave multiple (48)
+define i32 @test_mbcnt_lo_non_wave_multiple() !reqd_work_group_size !0 {
+; GFX9-LABEL: define i32 @test_mbcnt_lo_non_wave_multiple(
+; GFX9-SAME: ) !reqd_work_group_size [[META0]] {
+; GFX9-NEXT:  [[ENTRY:.*:]]
+; GFX9-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+; GFX9-NEXT:    ret i32 [[A]]
+;
+; GFX10-LABEL: define i32 @test_mbcnt_lo_non_wave_multiple(
+; GFX10-SAME: ) #[[ATTR0]] !reqd_work_group_size [[META0]] {
+; GFX10-NEXT:  [[ENTRY:.*:]]
+; GFX10-NEXT:    [[TMP0:%.*]] = call range(i32 0, 48) i32 @llvm.amdgcn.workitem.id.x()
+; GFX10-NEXT:    [[A:%.*]] = and i32 [[TMP0]], 31
+; GFX10-NEXT:    ret i32 [[A]]
+;
+entry:
+  %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+  ret i32 %a
+}
+
+; =============================================================================
+; PARTIAL MASKS AND NON-STANDARD PATTERNS
+; =============================================================================
+
+; Test with partial mask
+define i32 @test_mbcnt_partial_mask() !reqd_work_group_size !1 {
+; GFX9-LABEL: define i32 @test_mbcnt_partial_mask(
+; GFX9-SAME: ) !reqd_work_group_size [[META1:![0-9]+]] {
+; GFX9-NEXT:  [[ENTRY:.*:]]
+; GFX9-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 65535, i32 0)
+; GFX9-NEXT:    [[B:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 [[A]])
+; GFX9-NEXT:    ret i32 [[B]]
+;
+; GFX10-LABEL: define i32 @test_mbcnt_partial_mask(
+; GFX10-SAME: ) #[[ATTR0]] !reqd_work_group_size [[META1:![0-9]+]] {
+; GFX10-NEXT:  [[ENTRY:.*:]]
+; GFX10-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 65535, i32 0)
+; GFX10-NEXT:    [[B:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 [[A]])
+; GFX10-NEXT:    ret i32 [[B]]
+;
+entry:
+  %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 65535, i32 0)
+  %b = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %a)
+  ret i32 %b
+}
+
+; Test with non-zero base
+define i32 @test_mbcnt_non_zero_base() !reqd_work_group_size !1 {
+; GFX9-LABEL: define i32 @test_mbcnt_non_zero_base(
+; GFX9-SAME: ) !reqd_work_group_size [[META1]] {
+; GFX9-NEXT:  [[ENTRY:.*:]]
+; GFX9-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 5)
+; GFX9-NEXT:    [[B:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 [[A]])
+; GFX9-NEXT:    ret i32 [[B]]
+;
+; GFX10-LABEL: define i32 @test_mbcnt_non_zero_base(
+; GFX10-SAME: ) #[[ATTR0]] !reqd_work_group_size [[META1]] {
+; GFX10-NEXT:  [[ENTRY:.*:]]
+; GFX10-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 5)
+; GFX10-NEXT:    [[B:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 [[A]])
+; GFX10-NEXT:    ret i32 [[B]]
+;
+entry:
+  %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 5)
+  %b = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %a)
+  ret i32 %b
+}
+
+; =============================================================================
+; ARCHITECTURE-SPECIFIC NEGATIVE CASES
+; =============================================================================
+
+; Test with work group size = partial wave multiple (48)
+define i32 @test_mbcnt_lo_wave32_partial_wave() !reqd_work_group_size !0 {
+; GFX9-LABEL: define i32 @test_mbcnt_lo_wave32_partial_wave(
+; GFX9-SAME: ) !reqd_work_group_size [[META0]] {
+; GFX9-NEXT:  [[ENTRY:.*:]]
+; GFX9-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+; GFX9-NEXT:    ret i32 [[A]]
+;
+; GFX10-LABEL: define i32 @test_mbcnt_lo_wave32_partial_wave(
+; GFX10-SAME: ) #[[ATTR0]] !reqd_work_group_size [[META0]] {
+; GFX10-NEXT:  [[ENTRY:.*:]]
+; GFX10-NEXT:    [[TMP0:%.*]] = call range(i32 0, 48) i32 @llvm.amdgcn.workitem.id.x()
+; GFX10-NEXT:    [[A:%.*]] = and i32 [[TMP0]], 31
+; GFX10-NEXT:    ret i32 [[A]]
+;
+entry:
+  %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+  ret i32 %a
+}
+
+; =============================================================================
+; COPY OPTIMIZATION NEGATIVE CASES
+; =============================================================================
+
+; Test with no work group size
+define i32 @test_mbcnt_hi_copy_no_wgs(i32 %val) {
+; GFX9-LABEL: define i32 @test_mbcnt_hi_copy_no_wgs(
+; GFX9-SAME: i32 [[VAL:%.*]]) {
+; GFX9-NEXT:  [[ENTRY:.*:]]
+; GFX9-NEXT:    [[RESULT:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 [[VAL]])
+; GFX9-NEXT:    ret i32 [[RESULT]]
+;
+; GFX10-LABEL: define i32 @test_mbcnt_hi_copy_no_wgs(
+; GFX10-SAME: i32 [[VAL:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT:  [[ENTRY:.*:]]
+; GFX10-NEXT:    [[RESULT:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 [[VAL]])
+; GFX10-NEXT:    ret i32 [[RESULT]]
+;
+entry:
+  %result = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %val)
+  ret i32 %result
+}
+
+; Test with work group size = not a wave multiple (48)
+define i32 @test_mbcnt_hi_copy_non_wave_multiple(i32 %val) !reqd_work_group_size !0 {
+; GFX9-LABEL: define i32 @test_mbcnt_hi_copy_non_wave_multiple(
+; GFX9-SAME: i32 [[VAL:%.*]]) !reqd_work_group_size [[META0]] {
+; GFX9-NEXT:  [[ENTRY:.*:]]
+; GFX9-NEXT:    [[RESULT:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 [[VAL]])
+; GFX9-NEXT:    ret i32 [[RESULT]]
+;
+; GFX10-LABEL: define i32 @test_mbcnt_hi_copy_non_wave_multiple(
+; GFX10-SAME: i32 [[VAL:%.*]]) #[[ATTR0]] !reqd_work_group_size [[META0]] {
+; GFX10-NEXT:  [[ENTRY:.*:]]
+; GFX10-NEXT:    [[RESULT:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 [[VAL]])
+; GFX10-NEXT:    ret i32 [[RESULT]]
+;
+entry:
+  %result = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %val)
+  ret i32 %result
+}
+
+; Test with zero mask
+define i32 @test_mbcnt_hi_copy_zero_mask(i32 %val) !reqd_work_group_size !1 {
+; GFX9-LABEL: define i32 @test_mbcnt_hi_copy_zero_mask(
+; GFX9-SAME: i32 [[VAL:%.*]]) !reqd_work_group_size [[META1]] {
+; GFX9-NEXT:  [[ENTRY:.*:]]
+; GFX9-NEXT:    [[RESULT:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 0, i32 [[VAL]])
+; GFX9-NEXT:    ret i32 [[RESULT]]
+;
+; GFX10-LABEL: define i32 @test_mbcnt_hi_copy_zero_mask(
+; GFX10-SAME: i32 [[VAL:%.*]]) #[[ATTR0]] !reqd_work_group_size [[META1]] {
+; GFX10-NEXT:  [[ENTRY:.*:]]
+; GFX10-NEXT:    [[RESULT:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 0, i32 [[VAL]])
+; GFX10-NEXT:    ret i32 [[RESULT]]
+;
+entry:
+  %result = call i32 @llvm.amdgcn.mbcnt.hi(i32 0, i32 %val)
+  ret i32 %result
+}
+
+; =============================================================================
+; METADATA
+; =============================================================================
+
+!0 = !{i32 48, i32 1, i32 1}   ; X=48 (not wave multiple)
+!1 = !{i32 64, i32 1, i32 1}   ; X=64 (wave64 or 2*wave32)
+
+; =============================================================================
+; FUNCTION DECLARATIONS
+; =============================================================================
+
+declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32) #0
+declare i32 @llvm.amdgcn.mbcnt.hi(i32, i32) #0
+
+attributes #0 = { nounwind readnone speculatable willreturn }
+;.
+; GFX9: [[META0]] = !{i32 48, i32 1, i32 1}
+; GFX9: [[META1]] = !{i32 64, i32 1, i32 1}
+;.
+; GFX10: [[META0]] = !{i32 48, i32 1, i32 1}
+; GFX10: [[META1]] = !{i32 64, i32 1, i32 1}
+;.
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK: {{.*}}
diff --git a/llvm/test/Transforms/AMDGPU/mbcnt-to-bitmask-neg.ll b/llvm/test/Transforms/AMDGPU/mbcnt-to-bitmask-neg.ll
deleted file mode 100644
index 470751c3c73f3..0000000000000
--- a/llvm/test/Transforms/AMDGPU/mbcnt-to-bitmask-neg.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-;; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -passes=amdgpu-codegenprepare < %s | FileCheck %sNOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
-; RUN: opt -S -mtriple=amdgcn-unknown-amdhsa -passes=instcombine < %s | FileCheck %s
-
-define i32 @test_mbcnt_non_wave_size() !reqd_work_group_size !1 {
-; CHECK-LABEL: define i32 @test_mbcnt_non_wave_size(
-; CHECK-SAME: ) !reqd_work_group_size [[META0:![0-9]+]] {
-; CHECK-NEXT:  [[ENTRY:.*:]]
-; CHECK-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
-; CHECK-NEXT:    [[B:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 [[A]])
-; CHECK-NEXT:    ret i32 [[B]]
-;
-entry:
-  %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
-  %b = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %a)
-  ret i32 %b
-}
-
-!1 = !{i32 48, i32 1, i32 1}
-
-; Declarations
-declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32)
-declare i32 @llvm.amdgcn.mbcnt.hi(i32, i32)
-;.
-; CHECK: [[META0]] = !{i32 48, i32 1, i32 1}
-;.
diff --git a/llvm/test/Transforms/AMDGPU/mbcnt-to-bitmask-posit.ll b/llvm/test/Transforms/AMDGPU/mbcnt-to-bitmask-posit.ll
deleted file mode 100644
index ad3993bbc30c1..0000000000000
--- a/llvm/test/Transforms/AMDGPU/mbcnt-to-bitmask-posit.ll
+++ /dev/null
@@ -1,26 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
-; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -mcpu=gfx906 -passes=amdgpu-codegenprepare < %s | FileCheck %s
-
-define i32 @test_mbcnt_wave64_to_workitem() !reqd_work_group_size !1 {
-; CHECK-LABEL: define i32 @test_mbcnt_wave64_to_workitem(
-; CHECK-SAME: ) #[[ATTR0:[0-9]+]] !reqd_work_group_size [[META0:![0-9]+]] {
-; CHECK-NEXT:  [[ENTRY:.*:]]
-; CHECK-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
-; CHECK-NEXT:    [[B:%.*]] = call range(i32 0, 64) i32 @llvm.amdgcn.workitem.id.x()
-; CHECK-NEXT:    ret i32 [[B]]
-;
-entry:
-  %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
-  %b = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %a)
-  ret i32 %b
-}
-
-!1 = !{i32 64, i32 1, i32 1}
-
-; Declarations
-declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32)
-declare i32 @llvm.amdgcn.mbcnt.hi(i32, i32)
-declare i32 @llvm.amdgcn.workitem.id.x()
-;.
-; CHECK: [[META0]] = !{i32 64, i32 1, i32 1}
-;.
diff --git a/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-neg.ll b/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-neg.ll
deleted file mode 100644
index af8d713b798ed..0000000000000
--- a/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-neg.ll
+++ /dev/null
@@ -1,20 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
-; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -passes=amdgpu-codegenprepare < %s | FileCheck %s
-
-define i32 @test_mbcnt_no_reqd_work_group_size() {
-; CHECK-LABEL: define i32 @test_mbcnt_no_reqd_work_group_size() {
-; CHECK-NEXT:  [[ENTRY:.*:]]
-; CHECK-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
-; CHECK-NEXT:    [[B:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 [[A]])
-; CHECK-NEXT:    ret i32 [[B]]
-;
-entry:
-  %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
-  %b = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %a)
-  ret i32 %b
-}
-
-; Declarations
-declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32)
-declare i32 @llvm.amdgcn.mbcnt.hi(i32, i32)
-declare i32 @llvm.amdgcn.workitem.id.x()
diff --git a/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-posit.ll b/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-posit.ll
deleted file mode 100644
index fe049e1627409..0000000000000
--- a/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-posit.ll
+++ /dev/null
@@ -1,26 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
-; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -mcpu=gfx906 -passes=amdgpu-codegenprepare < %s | FileCheck %s
-
-define i32 @test_mbcnt_to_workitem() !reqd_work_group_size !0 {
-; CHECK-LABEL: define i32 @test_mbcnt_to_workitem(
-; CHECK-SAME: ) #[[ATTR0:[0-9]+]] !reqd_work_group_size [[META0:![0-9]+]] {
-; CHECK-NEXT:  [[ENTRY:.*:]]
-; CHECK-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
-; CHECK-NEXT:    [[B:%.*]] = call range(i32 0, 64) i32 @llvm.amdgcn.workitem.id.x()
-; CHECK-NEXT:    ret i32 [[B]]
-;
-entry:
-  %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
-  %b = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %a)
-  ret i32 %b
-}
-
-!0 = !{i32 64, i32 1, i32 1}
-
-; Declarations
-declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32)
-declare i32 @llvm.amdgcn.mbcnt.hi(i32, i32)
-declare i32 @llvm.amdgcn.workitem.id.x()
-;.
-; CHECK: [[META0]] = !{i32 64, i32 1, i32 1}
-;.
diff --git a/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-wave32-neg.ll b/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-wave32-neg.ll
deleted file mode 100644
index 30514562556ea..0000000000000
--- a/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-wave32-neg.ll
+++ /dev/null
@@ -1,67 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
-; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -mcpu=gfx1030 -passes=amdgpu-codegenprepare < %s | FileCheck %s
-
-; Test that mbcnt.lo(~0, 0) IS optimized on wave32 with bitmask when work group size allows even wave distribution
-define i32 @test_mbcnt_lo_wave32_non_matching_wgs() !reqd_work_group_size !0 {
-; CHECK-LABEL: define i32 @test_mbcnt_lo_wave32_non_matching_wgs(
-; CHECK-SAME: ) #[[ATTR0:[0-9]+]] !reqd_work_group_size [[META0:![0-9]+]] {
-; CHECK-NEXT:  [[ENTRY:.*:]]
-; CHECK-NEXT:    [[TMP0:%.*]] = call {{.*}} i32 @llvm.amdgcn.workitem.id.x()
-; CHECK-NEXT:    [[A:%.*]] = and i32 [[TMP0]], 31
-; CHECK-NEXT:    ret i32 [[A]]
-;
-entry:
-  %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
-  ret i32 %a
-}
-
-; Test that mbcnt.lo(~0, 0) is NOT optimized on wave32 when no reqd_work_group_size is specified
-define i32 @test_mbcnt_lo_wave32_no_wgs() {
-; CHECK-LABEL: define i32 @test_mbcnt_lo_wave32_no_wgs(
-; CHECK-SAME: ) #[[ATTR0]] {
-; CHECK-NEXT:  [[ENTRY:.*:]]
-; CHECK-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
-; CHECK-NEXT:    ret i32 [[A]]
-;
-entry:
-  %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
-  ret i32 %a
-}
-
-; Test that mbcnt.lo with non-all-ones first arg is NOT optimized
-define i32 @test_mbcnt_lo_wave32_partial_mask() !reqd_work_group_size !1 {
-; CHECK-LABEL: define i32 @test_mbcnt_lo_wave32_partial_mask(
-; CHECK-SAME: ) #[[ATTR0]] !reqd_work_group_size [[META1:![0-9]+]] {
-; CHECK-NEXT:  [[ENTRY:.*:]]
-; CHECK-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 15, i32 0)
-; CHECK-NEXT:    ret i32 [[A]]
-;
-entry:
-  %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 15, i32 0)
-  ret i32 %a
-}
-
-; Test that mbcnt.lo with non-zero second arg is NOT optimized
-define i32 @test_mbcnt_lo_wave32_non_zero_base() !reqd_work_group_size !1 {
-; CHECK-LABEL: define i32 @test_mbcnt_lo_wave32_non_zero_base(
-; CHECK-SAME: ) #[[ATTR0]] !reqd_work_group_size [[META1]] {
-; CHECK-NEXT:  [[ENTRY:.*:]]
-; CHECK-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 5)
-; CHECK-NEXT:    ret i32 [[A]]
-;
-entry:
-  %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 5)
-  ret i32 %a
-}
-
-!0 = !{i32 48, i32 1, i32 1}  ; Work group size 48 != wave size 32
-!1 = !{i32 32, i32 1, i32 1}  ; Work group size 32 == wave size 32
-
-; Function Attrs: nounwind readnone speculatable willreturn
-declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32) #0
-
-attributes #0 = { nounwind readnone speculatable willreturn }
-;.
-; CHECK: [[META0]] = !{i32 48, i32 1, i32 1}
-; CHECK: [[META1]] = !{i32 32, i32 1, i32 1}
-;.
diff --git a/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-wave32.ll b/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-wave32.ll
deleted file mode 100644
index 07a5028ca1ee5..0000000000000
--- a/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-wave32.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
-; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -mcpu=gfx1030 -passes=amdgpu-codegenprepare < %s | FileCheck %s
-
-define i32 @test_mbcnt_lo_wave32() !reqd_work_group_size !0 {
-; CHECK-LABEL: define i32 @test_mbcnt_lo_wave32(
-; CHECK-SAME: ) #[[ATTR0:[0-9]+]] !reqd_work_group_size [[META0:![0-9]+]] {
-; CHECK-NEXT:  [[ENTRY:.*:]]
-; CHECK-NEXT:    [[TMP0:%.*]] = call range(i32 0, 32) i32 @llvm.amdgcn.workitem.id.x()
-; CHECK-NEXT:    ret i32 [[TMP0]]
-;
-entry:
-  %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
-  ret i32 %a
-}
-
-!0 = !{i32 32, i32 1, i32 1}
-
-; Function Attrs: nounwind readnone speculatable willreturn
-declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32) #0
-declare i32 @llvm.amdgcn.workitem.id.x() #0
-
-attributes #0 = { nounwind readnone speculatable willreturn }
-;.
-; CHECK: [[META0]] = !{i32 32, i32 1, i32 1}
-;.
diff --git a/llvm/test/Transforms/AMDGPU/mbcnt-wave32-optimizations.ll b/llvm/test/Transforms/AMDGPU/mbcnt-wave32-optimizations.ll
new file mode 100644
index 0000000000000..975d3f295e9db
--- /dev/null
+++ b/llvm/test/Transforms/AMDGPU/mbcnt-wave32-optimizations.ll
@@ -0,0 +1,138 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -mcpu=gfx1030 -passes=amdgpu-codegenprepare < %s | FileCheck %s
+
+; Test mbcnt optimizations for wave32 architecture
+; Covers: simple replacement, bitmask optimization, copy optimization
+
+; =============================================================================
+; SIMPLE REPLACEMENT OPTIMIZATIONS (mbcnt.lo -> workitem.id.x)
+; =============================================================================
+
+; Test with work group size = wave size
+define i32 @test_mbcnt_lo_simple_wave32() !reqd_work_group_size !0 {
+; CHECK-LABEL: define i32 @test_mbcnt_lo_simple_wave32(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] !reqd_work_group_size [[META0:![0-9]+]] {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[TMP0:%.*]] = call range(i32 0, 32) i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT:    ret i32 [[TMP0]]
+;
+entry:
+  %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+  ret i32 %a
+}
+
+; =============================================================================
+; BITMASK OPTIMIZATIONS (mbcnt.lo -> workitem.id.x & 0x1f)
+; =============================================================================
+
+; Test with work group size = 2 * wave size
+define i32 @test_mbcnt_lo_bitmask_64() !reqd_work_group_size !1 {
+; CHECK-LABEL: define i32 @test_mbcnt_lo_bitmask_64(
+; CHECK-SAME: ) #[[ATTR0]] !reqd_work_group_size [[META1:![0-9]+]] {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[TMP0:%.*]] = call range(i32 0, 64) i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[TMP0]], 31
+; CHECK-NEXT:    ret i32 [[TMP1]]
+;
+entry:
+  %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+  ret i32 %a
+}
+
+; Test with work group size = 4 * wave size
+define i32 @test_mbcnt_lo_bitmask_128() !reqd_work_group_size !3 {
+; CHECK-LABEL: define i32 @test_mbcnt_lo_bitmask_128(
+; CHECK-SAME: ) #[[ATTR0]] !reqd_work_group_size [[META2:![0-9]+]] {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[TMP0:%.*]] = call range(i32 0, 128) i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[TMP0]], 31
+; CHECK-NEXT:    ret i32 [[TMP1]]
+;
+entry:
+  %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+  ret i32 %a
+}
+
+; Test with work group size = non-power-of-2 but wave-multiple
+define i32 @test_mbcnt_lo_bitmask_96() !reqd_work_group_size !2 {
+; CHECK-LABEL: define i32 @test_mbcnt_lo_bitmask_96(
+; CHECK-SAME: ) #[[ATTR0]] !reqd_work_group_size [[META3:![0-9]+]] {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[TMP0:%.*]] = call range(i32 0, 96) i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[TMP0]], 31
+; CHECK-NEXT:    ret i32 [[TMP1]]
+;
+entry:
+  %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+  ret i32 %a
+}
+
+; =============================================================================
+; COPY OPTIMIZATION (mbcnt.hi(mask, val) -> val)
+; =============================================================================
+
+; Test with mask = wave32 range
+define i32 @test_mbcnt_hi_copy_basic(i32 %val) !reqd_work_group_size !0 {
+; CHECK-LABEL: define i32 @test_mbcnt_hi_copy_basic(
+; CHECK-SAME: i32 [[VAL:%.*]]) #[[ATTR0]] !reqd_work_group_size [[META0]] {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    ret i32 [[VAL]]
+;
+entry:
+  %result = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %val)
+  ret i32 %result
+}
+
+; Test with partial mask
+define i32 @test_mbcnt_hi_copy_partial_mask(i32 %val) !reqd_work_group_size !0 {
+; CHECK-LABEL: define i32 @test_mbcnt_hi_copy_partial_mask(
+; CHECK-SAME: i32 [[VAL:%.*]]) #[[ATTR0]] !reqd_work_group_size [[META0]] {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    ret i32 [[VAL]]
+;
+entry:
+  %result = call i32 @llvm.amdgcn.mbcnt.hi(i32 65535, i32 %val)
+  ret i32 %result
+}
+
+; =============================================================================
+; FULL PATTERN OPTIMIZATION (mbcnt.hi(~0, mbcnt.lo(~0, 0)) -> workitem.id.x)
+; =============================================================================
+
+; Test full pattern on wave32
+define i32 @test_mbcnt_full_pattern_wave32() !reqd_work_group_size !0 {
+; CHECK-LABEL: define i32 @test_mbcnt_full_pattern_wave32(
+; CHECK-SAME: ) #[[ATTR0]] !reqd_work_group_size [[META0]] {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[TMP0:%.*]] = call range(i32 0, 32) i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT:    ret i32 [[TMP0]]
+;
+entry:
+  %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+  %b = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %a)
+  ret i32 %b
+}
+
+; =============================================================================
+; METADATA
+; =============================================================================
+
+!0 = !{i32 32, i32 1, i32 1}   ; X=32 (1*wave32), Y=1, Z=1
+!1 = !{i32 64, i32 1, i32 1}   ; X=64 (2*wave32), Y=1, Z=1
+!2 = !{i32 96, i32 1, i32 1}   ; X=96 (3*wave32), Y=1, Z=1
+!3 = !{i32 128, i32 1, i32 1}  ; X=128 (4*wave32), Y=1, Z=1
+
+; =============================================================================
+; FUNCTION DECLARATIONS
+; =============================================================================
+
+declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32) #0
+declare i32 @llvm.amdgcn.mbcnt.hi(i32, i32) #0
+
+attributes #0 = { nounwind readnone speculatable willreturn }
+;.
+; CHECK: [[META0]] = !{i32 32, i32 1, i32 1}
+; CHECK: [[META1]] = !{i32 64, i32 1, i32 1}
+; CHECK: [[META2]] = !{i32 128, i32 1, i32 1}
+; CHECK: [[META3]] = !{i32 96, i32 1, i32 1}
+;.
diff --git a/llvm/test/Transforms/AMDGPU/mbcnt-wave64-optimizations.ll b/llvm/test/Transforms/AMDGPU/mbcnt-wave64-optimizations.ll
new file mode 100644
index 0000000000000..42ba27a37b094
--- /dev/null
+++ b/llvm/test/Transforms/AMDGPU/mbcnt-wave64-optimizations.ll
@@ -0,0 +1,95 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -mcpu=gfx906 -passes=amdgpu-codegenprepare < %s | FileCheck %s
+
+; Test full mbcnt pattern optimization for wave64 architecture
+; Covers: full pattern optimization
+
+; =============================================================================
+; FULL PATTERN OPTIMIZATION - mbcnt.hi(~0, mbcnt.lo(~0, 0)) -> workitem.id.x
+; =============================================================================
+
+; Test with work group size = wave size (64)
+define i32 @test_mbcnt_full_pattern_wave64_basic() !reqd_work_group_size !0 {
+; CHECK-LABEL: define i32 @test_mbcnt_full_pattern_wave64_basic(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] !reqd_work_group_size [[META0:![0-9]+]] {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+; CHECK-NEXT:    [[TMP0:%.*]] = call range(i32 0, 64) i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT:    ret i32 [[TMP0]]
+;
+entry:
+  %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+  %b = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %a)
+  ret i32 %b
+}
+
+; Test with work group size = 2 * wave size (128)
+define i32 @test_mbcnt_full_pattern_wave64_128() !reqd_work_group_size !1 {
+; CHECK-LABEL: define i32 @test_mbcnt_full_pattern_wave64_128(
+; CHECK-SAME: ) #[[ATTR0]] !reqd_work_group_size [[META1:![0-9]+]] {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+; CHECK-NEXT:    [[TMP0:%.*]] = call range(i32 0, 128) i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT:    [[B:%.*]] = and i32 [[TMP0]], 63
+; CHECK-NEXT:    ret i32 [[B]]
+;
+entry:
+  %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+  %b = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %a)
+  ret i32 %b
+}
+
+; Test with work group size = 4 * wave size (256)
+define i32 @test_mbcnt_full_pattern_wave64_256() !reqd_work_group_size !2 {
+; CHECK-LABEL: define i32 @test_mbcnt_full_pattern_wave64_256(
+; CHECK-SAME: ) #[[ATTR0]] !reqd_work_group_size [[META2:![0-9]+]] {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+; CHECK-NEXT:    [[TMP0:%.*]] = call range(i32 0, 256) i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT:    [[B:%.*]] = and i32 [[TMP0]], 63
+; CHECK-NEXT:    ret i32 [[B]]
+;
+entry:
+  %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+  %b = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %a)
+  ret i32 %b
+}
+
+; Test with multidimensional work group where X dimension matches pattern
+define i32 @test_mbcnt_full_pattern_wave64_multidim() !reqd_work_group_size !3 {
+; CHECK-LABEL: define i32 @test_mbcnt_full_pattern_wave64_multidim(
+; CHECK-SAME: ) #[[ATTR0]] !reqd_work_group_size [[META3:![0-9]+]] {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+; CHECK-NEXT:    [[TMP0:%.*]] = call range(i32 0, 64) i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT:    ret i32 [[TMP0]]
+;
+entry:
+  %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+  %b = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %a)
+  ret i32 %b
+}
+
+; =============================================================================
+; METADATA
+; =============================================================================
+
+!0 = !{i32 64, i32 1, i32 1}   ; X=64 (1*wave), Y=1, Z=1
+!1 = !{i32 128, i32 1, i32 1}  ; X=128 (2*wave), Y=1, Z=1
+!2 = !{i32 256, i32 1, i32 1}  ; X=256 (4*wave), Y=1, Z=1
+!3 = !{i32 64, i32 2, i32 1}   ; X=64 (1*wave), Y=2, Z=1
+
+; =============================================================================
+; FUNCTION DECLARATIONS
+; =============================================================================
+
+declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32) #0
+declare i32 @llvm.amdgcn.mbcnt.hi(i32, i32) #0
+
+attributes #0 = { nounwind readnone speculatable willreturn }
+;.
+; CHECK: [[META0]] = !{i32 64, i32 1, i32 1}
+; CHECK: [[META1]] = !{i32 128, i32 1, i32 1}
+; CHECK: [[META2]] = !{i32 256, i32 1, i32 1}
+; CHECK: [[META3]] = !{i32 64, i32 2, i32 1}
+;.

>From f1eda3426a303fedcd1bed9138fc215df352f747 Mon Sep 17 00:00:00 2001
From: TejaX-Alaghari <Teja.Alaghari at amd.com>
Date: Wed, 29 Oct 2025 11:08:54 +0530
Subject: [PATCH 07/14] Simplified the implementation per review suggestions -
 1. Remove unnecessary casts 2. Replace manual AND instruction injection with
 ReplaceInstWithValue utility 3. Replace manual pattern matching with match
 utility 4. Rearange the logic to avoid redundancy 5. Add additional logic to
 check if the reqd wave size is a multiple of Wave size for full pattern
 optimization

---
 .../Target/AMDGPU/AMDGPUCodeGenPrepare.cpp    | 108 +++++++-----------
 1 file changed, 42 insertions(+), 66 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
index dce4b0b1a4fff..d96a9b0b600c8 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
@@ -35,6 +35,7 @@
 #include "llvm/Support/KnownFPClass.h"
 #include "llvm/Transforms/Utils/IntegerDivision.h"
 #include "llvm/Transforms/Utils/Local.h"
+#include <llvm/Transforms/Utils/BasicBlockUtils.h>
 
 #define DEBUG_TYPE "amdgpu-codegenprepare"
 
@@ -2080,6 +2081,10 @@ INITIALIZE_PASS_END(AMDGPUCodeGenPrepare, DEBUG_TYPE, "AMDGPU IR optimizations",
 
 /// Optimize mbcnt.lo calls on wave32 architectures for lane ID computation.
 bool AMDGPUCodeGenPrepareImpl::visitMbcntLo(IntrinsicInst &I) {
+  // Abort if wave size is not known at compile time.
+  if (!ST.isWaveSizeKnown())
+    return false;
+
   // This optimization only applies to wave32 targets where mbcnt.lo operates on
   // the full execution mask.
   if (!ST.isWave32())
@@ -2092,10 +2097,6 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntLo(IntrinsicInst &I) {
   if (!Arg0C || !Arg1C || !Arg0C->isAllOnesValue() || !Arg1C->isZero())
     return false;
 
-  // Abort if wave size is not known at compile time.
-  if (!ST.isWaveSizeKnown())
-    return false;
-
   unsigned Wave = ST.getWavefrontSize();
 
   if (auto MaybeX = ST.getReqdWorkGroupSize(F, 0)) {
@@ -2104,13 +2105,11 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntLo(IntrinsicInst &I) {
     // When XLen == wave_size, each work group contains exactly one wave, so
     // mbcnt.lo(~0, 0) directly equals the workitem ID within the group.
     if (XLen == Wave) {
-      IRBuilder<> B(&I);
-      CallInst *NewCall =
-          B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {});
-      NewCall->takeName(&I);
+      Function *WorkitemIdFn = Intrinsic::getOrInsertDeclaration(
+          I.getModule(), Intrinsic::amdgcn_workitem_id_x);
+      CallInst *NewCall = CallInst::Create(WorkitemIdFn, I.getName());
+      ReplaceInstWithInst(&I, NewCall);
       ST.makeLIDRangeMetadata(NewCall);
-      I.replaceAllUsesWith(NewCall);
-      I.eraseFromParent();
       return true;
     }
     // When work group evenly splits into waves and wave size is power-of-2,
@@ -2121,12 +2120,10 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntLo(IntrinsicInst &I) {
         IRBuilder<> B(&I);
         CallInst *Tid = B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {});
         ST.makeLIDRangeMetadata(Tid);
-        IntegerType *ITy = cast<IntegerType>(Tid->getType());
-        Constant *Mask = ConstantInt::get(ITy, Wave - 1);
-        Instruction *AndInst = cast<Instruction>(B.CreateAnd(Tid, Mask));
-        AndInst->takeName(&I);
-        I.replaceAllUsesWith(AndInst);
-        I.eraseFromParent();
+        Constant *Mask = ConstantInt::get(Tid->getType(), Wave - 1);
+        Value *AndInst = B.CreateAnd(Tid, Mask);
+        BasicBlock::iterator BI(&I);
+        ReplaceInstWithValue(BI, AndInst);
         return true;
       }
     }
@@ -2137,23 +2134,24 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntLo(IntrinsicInst &I) {
 
 /// Optimize mbcnt.hi calls for lane ID computation.
 bool AMDGPUCodeGenPrepareImpl::visitMbcntHi(IntrinsicInst &I) {
-  // On wave32, the upper 32 bits of exec are always 0, so mbcnt.hi(mask, val)
-  // always returns val unchanged.
-  if (ST.isWave32()) {
-    // Abort if wave size is not known at compile time.
-    if (!ST.isWaveSizeKnown())
-      return false;
+  // Abort if wave size is not known at compile time.
+  if (!ST.isWaveSizeKnown())
+    return false;
 
-    unsigned Wave = ST.getWavefrontSize();
+  // Calculate wave size once at the beginning
+  unsigned Wave = ST.getWavefrontSize();
 
+  // On wave32, the upper 32 bits of execution mask are always 0, so
+  // mbcnt.hi(mask, val) always returns val unchanged.
+  if (ST.isWave32()) {
     if (auto MaybeX = ST.getReqdWorkGroupSize(F, 0)) {
       unsigned XLen = *MaybeX;
 
       // Replace mbcnt.hi(mask, val) with val only when work group size matches
       // wave size (single wave per work group).
       if (XLen == Wave) {
-        I.replaceAllUsesWith(I.getArgOperand(1));
-        I.eraseFromParent();
+        BasicBlock::iterator BI(&I);
+        ReplaceInstWithValue(BI, I.getArgOperand(1));
         return true;
       }
     }
@@ -2162,63 +2160,42 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntHi(IntrinsicInst &I) {
   // Optimize the complete lane ID computation pattern:
   // mbcnt.hi(~0, mbcnt.lo(~0, 0)) which counts all active lanes with lower IDs
   // across the full execution mask.
-  auto *HiArg1 = dyn_cast<CallInst>(I.getArgOperand(1));
-  if (!HiArg1)
-    return false;
+  using namespace PatternMatch;
 
-  Function *CalledF = HiArg1->getCalledFunction();
-  if (!CalledF || CalledF->getIntrinsicID() != Intrinsic::amdgcn_mbcnt_lo)
+  // Check for pattern: mbcnt.hi(~0, mbcnt.lo(~0, 0))
+  if (!match(I.getArgOperand(0), m_AllOnes()))
     return false;
 
-  // mbcnt.hi mask must be all-ones (count from upper 32 bits)
-  auto *HiArg0C = dyn_cast<ConstantInt>(I.getArgOperand(0));
-  if (!HiArg0C || !HiArg0C->isAllOnesValue())
+  if (!match(I.getArgOperand(1),
+             m_Intrinsic<Intrinsic::amdgcn_mbcnt_lo>(m_AllOnes(), m_Zero())))
     return false;
 
-  // mbcnt.lo mask must be all-ones (mask=~0, all lanes) and base must be 0.
-  Value *Lo0 = HiArg1->getArgOperand(0);
-  Value *Lo1 = HiArg1->getArgOperand(1);
-  auto *Lo0C = dyn_cast<ConstantInt>(Lo0);
-  auto *Lo1C = dyn_cast<ConstantInt>(Lo1);
-  if (!Lo0C || !Lo1C || !Lo0C->isAllOnesValue() || !Lo1C->isZero())
-    return false;
-
-  // Abort if wave size is not known at compile time.
-  if (!ST.isWaveSizeKnown())
-    return false;
-
-  unsigned Wave = ST.getWavefrontSize();
-
   if (auto MaybeX = ST.getReqdWorkGroupSize(F, 0)) {
     unsigned XLen = *MaybeX;
 
     // When XLen == wave_size, each work group contains exactly one wave, so
     // lane_id = workitem.id.x.
     if (XLen == Wave) {
-      IRBuilder<> B(&I);
-      CallInst *NewCall =
-          B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {});
-      NewCall->takeName(&I);
+      Function *WorkitemIdFn = Intrinsic::getOrInsertDeclaration(
+          I.getModule(), Intrinsic::amdgcn_workitem_id_x);
+      CallInst *NewCall = CallInst::Create(WorkitemIdFn, I.getName());
+      ReplaceInstWithInst(&I, NewCall);
       ST.makeLIDRangeMetadata(NewCall);
-      I.replaceAllUsesWith(NewCall);
-      I.eraseFromParent();
       return true;
     }
     // When work group evenly splits into waves and wave size is power-of-2,
     // we can compute lane ID within wave using bit masking:
     // lane_id = workitem.id.x & (wave_size - 1).
     if (ST.hasWavefrontsEvenlySplittingXDim(F, /*RequiresUniformYZ=*/true)) {
-      if (isPowerOf2_32(Wave)) {
+      if (XLen % Wave == 0 && isPowerOf2_32(Wave)) {
         // Construct optimized sequence: workitem.id.x & (wave_size - 1)
         IRBuilder<> B(&I);
         CallInst *Tid = B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {});
         ST.makeLIDRangeMetadata(Tid);
-        IntegerType *ITy = cast<IntegerType>(Tid->getType());
-        Constant *Mask = ConstantInt::get(ITy, Wave - 1);
-        Instruction *AndInst = cast<Instruction>(B.CreateAnd(Tid, Mask));
-        AndInst->takeName(&I);
-        I.replaceAllUsesWith(AndInst);
-        I.eraseFromParent();
+        Constant *Mask = ConstantInt::get(Tid->getType(), Wave - 1);
+        Value *AndInst = B.CreateAnd(Tid, Mask);
+        BasicBlock::iterator BI(&I);
+        ReplaceInstWithValue(BI, AndInst);
         return true;
       }
     }
@@ -2230,12 +2207,11 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntHi(IntrinsicInst &I) {
       unsigned XLen =
           mdconst::extract<ConstantInt>(Node->getOperand(0))->getZExtValue();
       if (XLen == Wave) {
-        IRBuilder<> B(&I);
-        CallInst *NewCall =
-            B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {});
-        NewCall->takeName(&I);
-        I.replaceAllUsesWith(NewCall);
-        I.eraseFromParent();
+        Function *WorkitemIdFn = Intrinsic::getOrInsertDeclaration(
+            I.getModule(), Intrinsic::amdgcn_workitem_id_x);
+        CallInst *NewCall = CallInst::Create(WorkitemIdFn, I.getName());
+        ReplaceInstWithInst(&I, NewCall);
+        ST.makeLIDRangeMetadata(NewCall);
         return true;
       }
     }

>From 0759c5b68902819c2d14235fbd13db57691fdd19 Mon Sep 17 00:00:00 2001
From: TejaX-Alaghari <Teja.Alaghari at amd.com>
Date: Wed, 29 Oct 2025 14:12:33 +0530
Subject: [PATCH 08/14] Addressed feedback on - 1. Add const qualifiers to
 mbcnt lo & hi visit functions 2. Reorganize partial wave size cases in the
 test files

---
 .../Target/AMDGPU/AMDGPUCodeGenPrepare.cpp    |  17 ++-
 .../Transforms/AMDGPU/mbcnt-negative-cases.ll | 117 ++++--------------
 .../AMDGPU/mbcnt-wave32-optimizations.ll      |  58 +++++++--
 .../AMDGPU/mbcnt-wave64-optimizations.ll      |  18 +++
 4 files changed, 95 insertions(+), 115 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
index d96a9b0b600c8..133e9963550c1 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
@@ -253,8 +253,8 @@ class AMDGPUCodeGenPrepareImpl
   bool visitIntrinsicInst(IntrinsicInst &I);
   bool visitFMinLike(IntrinsicInst &I);
   bool visitSqrt(IntrinsicInst &I);
-  bool visitMbcntLo(IntrinsicInst &I);
-  bool visitMbcntHi(IntrinsicInst &I);
+  bool visitMbcntLo(IntrinsicInst &I) const;
+  bool visitMbcntHi(IntrinsicInst &I) const;
   bool run();
 };
 
@@ -2080,7 +2080,7 @@ INITIALIZE_PASS_END(AMDGPUCodeGenPrepare, DEBUG_TYPE, "AMDGPU IR optimizations",
                     false, false)
 
 /// Optimize mbcnt.lo calls on wave32 architectures for lane ID computation.
-bool AMDGPUCodeGenPrepareImpl::visitMbcntLo(IntrinsicInst &I) {
+bool AMDGPUCodeGenPrepareImpl::visitMbcntLo(IntrinsicInst &I) const {
   // Abort if wave size is not known at compile time.
   if (!ST.isWaveSizeKnown())
     return false;
@@ -2092,9 +2092,8 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntLo(IntrinsicInst &I) {
 
   // Only optimize the pattern mbcnt.lo(~0, 0) which counts active lanes with
   // lower IDs.
-  auto *Arg0C = dyn_cast<ConstantInt>(I.getArgOperand(0));
-  auto *Arg1C = dyn_cast<ConstantInt>(I.getArgOperand(1));
-  if (!Arg0C || !Arg1C || !Arg0C->isAllOnesValue() || !Arg1C->isZero())
+  if (!match(&I,
+             m_Intrinsic<Intrinsic::amdgcn_mbcnt_lo>(m_AllOnes(), m_Zero())))
     return false;
 
   unsigned Wave = ST.getWavefrontSize();
@@ -2133,12 +2132,12 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntLo(IntrinsicInst &I) {
 }
 
 /// Optimize mbcnt.hi calls for lane ID computation.
-bool AMDGPUCodeGenPrepareImpl::visitMbcntHi(IntrinsicInst &I) {
+bool AMDGPUCodeGenPrepareImpl::visitMbcntHi(IntrinsicInst &I) const {
   // Abort if wave size is not known at compile time.
   if (!ST.isWaveSizeKnown())
     return false;
 
-  // Calculate wave size once at the beginning
+  // Calculate wave size
   unsigned Wave = ST.getWavefrontSize();
 
   // On wave32, the upper 32 bits of execution mask are always 0, so
@@ -2187,7 +2186,7 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntHi(IntrinsicInst &I) {
     // we can compute lane ID within wave using bit masking:
     // lane_id = workitem.id.x & (wave_size - 1).
     if (ST.hasWavefrontsEvenlySplittingXDim(F, /*RequiresUniformYZ=*/true)) {
-      if (XLen % Wave == 0 && isPowerOf2_32(Wave)) {
+      if (isPowerOf2_32(Wave)) {
         // Construct optimized sequence: workitem.id.x & (wave_size - 1)
         IRBuilder<> B(&I);
         CallInst *Tid = B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {});
diff --git a/llvm/test/Transforms/AMDGPU/mbcnt-negative-cases.ll b/llvm/test/Transforms/AMDGPU/mbcnt-negative-cases.ll
index caa2438921a5b..758f76358c771 100644
--- a/llvm/test/Transforms/AMDGPU/mbcnt-negative-cases.ll
+++ b/llvm/test/Transforms/AMDGPU/mbcnt-negative-cases.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
-; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -passes=amdgpu-codegenprepare < %s | FileCheck --check-prefixes=CHECK,GFX9 %s
+; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -mcpu=gfx906 -passes=amdgpu-codegenprepare < %s | FileCheck --check-prefixes=CHECK,GFX9 %s
 ; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -mcpu=gfx1030 -passes=amdgpu-codegenprepare < %s | FileCheck --check-prefixes=CHECK,GFX10 %s
 
 ; Test negative cases where mbcnt optimizations should NOT be applied
@@ -10,7 +10,8 @@
 
 ; Test with no reqd_work_group_size
 define i32 @test_mbcnt_no_work_group_size() {
-; GFX9-LABEL: define i32 @test_mbcnt_no_work_group_size() {
+; GFX9-LABEL: define i32 @test_mbcnt_no_work_group_size(
+; GFX9-SAME: ) #[[ATTR0:[0-9]+]] {
 ; GFX9-NEXT:  [[ENTRY:.*:]]
 ; GFX9-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
 ; GFX9-NEXT:    [[B:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 [[A]])
@@ -31,7 +32,8 @@ entry:
 
 ; Test mbcnt.lo with no work group size
 define i32 @test_mbcnt_lo_no_work_group_size() {
-; GFX9-LABEL: define i32 @test_mbcnt_lo_no_work_group_size() {
+; GFX9-LABEL: define i32 @test_mbcnt_lo_no_work_group_size(
+; GFX9-SAME: ) #[[ATTR0]] {
 ; GFX9-NEXT:  [[ENTRY:.*:]]
 ; GFX9-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
 ; GFX9-NEXT:    ret i32 [[A]]
@@ -47,68 +49,21 @@ entry:
   ret i32 %a
 }
 
-; =============================================================================
-; NON-WAVE-MULTIPLE WORK GROUP SIZES
-; =============================================================================
-
-; Test with work group size = not a wave multiple (48)
-define i32 @test_mbcnt_non_wave_multiple() !reqd_work_group_size !0 {
-; GFX9-LABEL: define i32 @test_mbcnt_non_wave_multiple(
-; GFX9-SAME: ) !reqd_work_group_size [[META0:![0-9]+]] {
-; GFX9-NEXT:  [[ENTRY:.*:]]
-; GFX9-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
-; GFX9-NEXT:    [[B:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 [[A]])
-; GFX9-NEXT:    ret i32 [[B]]
-;
-; GFX10-LABEL: define i32 @test_mbcnt_non_wave_multiple(
-; GFX10-SAME: ) #[[ATTR0]] !reqd_work_group_size [[META0:![0-9]+]] {
-; GFX10-NEXT:  [[ENTRY:.*:]]
-; GFX10-NEXT:    [[TMP0:%.*]] = call range(i32 0, 48) i32 @llvm.amdgcn.workitem.id.x()
-; GFX10-NEXT:    [[A:%.*]] = and i32 [[TMP0]], 31
-; GFX10-NEXT:    [[B:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 [[A]])
-; GFX10-NEXT:    ret i32 [[B]]
-;
-entry:
-  %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
-  %b = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %a)
-  ret i32 %b
-}
-
-; Test mbcnt.lo with work group size = not a wave multiple (48)
-define i32 @test_mbcnt_lo_non_wave_multiple() !reqd_work_group_size !0 {
-; GFX9-LABEL: define i32 @test_mbcnt_lo_non_wave_multiple(
-; GFX9-SAME: ) !reqd_work_group_size [[META0]] {
-; GFX9-NEXT:  [[ENTRY:.*:]]
-; GFX9-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
-; GFX9-NEXT:    ret i32 [[A]]
-;
-; GFX10-LABEL: define i32 @test_mbcnt_lo_non_wave_multiple(
-; GFX10-SAME: ) #[[ATTR0]] !reqd_work_group_size [[META0]] {
-; GFX10-NEXT:  [[ENTRY:.*:]]
-; GFX10-NEXT:    [[TMP0:%.*]] = call range(i32 0, 48) i32 @llvm.amdgcn.workitem.id.x()
-; GFX10-NEXT:    [[A:%.*]] = and i32 [[TMP0]], 31
-; GFX10-NEXT:    ret i32 [[A]]
-;
-entry:
-  %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
-  ret i32 %a
-}
-
 ; =============================================================================
 ; PARTIAL MASKS AND NON-STANDARD PATTERNS
 ; =============================================================================
 
 ; Test with partial mask
-define i32 @test_mbcnt_partial_mask() !reqd_work_group_size !1 {
+define i32 @test_mbcnt_partial_mask() !reqd_work_group_size !0 {
 ; GFX9-LABEL: define i32 @test_mbcnt_partial_mask(
-; GFX9-SAME: ) !reqd_work_group_size [[META1:![0-9]+]] {
+; GFX9-SAME: ) #[[ATTR0:[0-9]+]] !reqd_work_group_size [[META0:![0-9]+]] {
 ; GFX9-NEXT:  [[ENTRY:.*:]]
 ; GFX9-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 65535, i32 0)
 ; GFX9-NEXT:    [[B:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 [[A]])
 ; GFX9-NEXT:    ret i32 [[B]]
 ;
 ; GFX10-LABEL: define i32 @test_mbcnt_partial_mask(
-; GFX10-SAME: ) #[[ATTR0]] !reqd_work_group_size [[META1:![0-9]+]] {
+; GFX10-SAME: ) #[[ATTR0:[0-9]+]] !reqd_work_group_size [[META0:![0-9]+]] {
 ; GFX10-NEXT:  [[ENTRY:.*:]]
 ; GFX10-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 65535, i32 0)
 ; GFX10-NEXT:    [[B:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 [[A]])
@@ -121,16 +76,16 @@ entry:
 }
 
 ; Test with non-zero base
-define i32 @test_mbcnt_non_zero_base() !reqd_work_group_size !1 {
+define i32 @test_mbcnt_non_zero_base() !reqd_work_group_size !0 {
 ; GFX9-LABEL: define i32 @test_mbcnt_non_zero_base(
-; GFX9-SAME: ) !reqd_work_group_size [[META1]] {
+; GFX9-SAME: ) #[[ATTR0]] !reqd_work_group_size [[META0]] {
 ; GFX9-NEXT:  [[ENTRY:.*:]]
 ; GFX9-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 5)
 ; GFX9-NEXT:    [[B:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 [[A]])
 ; GFX9-NEXT:    ret i32 [[B]]
 ;
 ; GFX10-LABEL: define i32 @test_mbcnt_non_zero_base(
-; GFX10-SAME: ) #[[ATTR0]] !reqd_work_group_size [[META1]] {
+; GFX10-SAME: ) #[[ATTR0]] !reqd_work_group_size [[META0]] {
 ; GFX10-NEXT:  [[ENTRY:.*:]]
 ; GFX10-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 5)
 ; GFX10-NEXT:    [[B:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 [[A]])
@@ -142,30 +97,6 @@ entry:
   ret i32 %b
 }
 
-; =============================================================================
-; ARCHITECTURE-SPECIFIC NEGATIVE CASES
-; =============================================================================
-
-; Test with work group size = partial wave multiple (48)
-define i32 @test_mbcnt_lo_wave32_partial_wave() !reqd_work_group_size !0 {
-; GFX9-LABEL: define i32 @test_mbcnt_lo_wave32_partial_wave(
-; GFX9-SAME: ) !reqd_work_group_size [[META0]] {
-; GFX9-NEXT:  [[ENTRY:.*:]]
-; GFX9-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
-; GFX9-NEXT:    ret i32 [[A]]
-;
-; GFX10-LABEL: define i32 @test_mbcnt_lo_wave32_partial_wave(
-; GFX10-SAME: ) #[[ATTR0]] !reqd_work_group_size [[META0]] {
-; GFX10-NEXT:  [[ENTRY:.*:]]
-; GFX10-NEXT:    [[TMP0:%.*]] = call range(i32 0, 48) i32 @llvm.amdgcn.workitem.id.x()
-; GFX10-NEXT:    [[A:%.*]] = and i32 [[TMP0]], 31
-; GFX10-NEXT:    ret i32 [[A]]
-;
-entry:
-  %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
-  ret i32 %a
-}
-
 ; =============================================================================
 ; COPY OPTIMIZATION NEGATIVE CASES
 ; =============================================================================
@@ -173,7 +104,7 @@ entry:
 ; Test with no work group size
 define i32 @test_mbcnt_hi_copy_no_wgs(i32 %val) {
 ; GFX9-LABEL: define i32 @test_mbcnt_hi_copy_no_wgs(
-; GFX9-SAME: i32 [[VAL:%.*]]) {
+; GFX9-SAME: i32 [[VAL:%.*]]) #[[ATTR0]] {
 ; GFX9-NEXT:  [[ENTRY:.*:]]
 ; GFX9-NEXT:    [[RESULT:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 [[VAL]])
 ; GFX9-NEXT:    ret i32 [[RESULT]]
@@ -190,15 +121,15 @@ entry:
 }
 
 ; Test with work group size = not a wave multiple (48)
-define i32 @test_mbcnt_hi_copy_non_wave_multiple(i32 %val) !reqd_work_group_size !0 {
+define i32 @test_mbcnt_hi_copy_non_wave_multiple(i32 %val) !reqd_work_group_size !1 {
 ; GFX9-LABEL: define i32 @test_mbcnt_hi_copy_non_wave_multiple(
-; GFX9-SAME: i32 [[VAL:%.*]]) !reqd_work_group_size [[META0]] {
+; GFX9-SAME: i32 [[VAL:%.*]]) #[[ATTR0]] !reqd_work_group_size [[META1:![0-9]+]] {
 ; GFX9-NEXT:  [[ENTRY:.*:]]
 ; GFX9-NEXT:    [[RESULT:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 [[VAL]])
 ; GFX9-NEXT:    ret i32 [[RESULT]]
 ;
 ; GFX10-LABEL: define i32 @test_mbcnt_hi_copy_non_wave_multiple(
-; GFX10-SAME: i32 [[VAL:%.*]]) #[[ATTR0]] !reqd_work_group_size [[META0]] {
+; GFX10-SAME: i32 [[VAL:%.*]]) #[[ATTR0]] !reqd_work_group_size [[META1:![0-9]+]] {
 ; GFX10-NEXT:  [[ENTRY:.*:]]
 ; GFX10-NEXT:    [[RESULT:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 [[VAL]])
 ; GFX10-NEXT:    ret i32 [[RESULT]]
@@ -209,15 +140,15 @@ entry:
 }
 
 ; Test with zero mask
-define i32 @test_mbcnt_hi_copy_zero_mask(i32 %val) !reqd_work_group_size !1 {
+define i32 @test_mbcnt_hi_copy_zero_mask(i32 %val) !reqd_work_group_size !0 {
 ; GFX9-LABEL: define i32 @test_mbcnt_hi_copy_zero_mask(
-; GFX9-SAME: i32 [[VAL:%.*]]) !reqd_work_group_size [[META1]] {
+; GFX9-SAME: i32 [[VAL:%.*]]) #[[ATTR0]] !reqd_work_group_size [[META0]] {
 ; GFX9-NEXT:  [[ENTRY:.*:]]
 ; GFX9-NEXT:    [[RESULT:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 0, i32 [[VAL]])
 ; GFX9-NEXT:    ret i32 [[RESULT]]
 ;
 ; GFX10-LABEL: define i32 @test_mbcnt_hi_copy_zero_mask(
-; GFX10-SAME: i32 [[VAL:%.*]]) #[[ATTR0]] !reqd_work_group_size [[META1]] {
+; GFX10-SAME: i32 [[VAL:%.*]]) #[[ATTR0]] !reqd_work_group_size [[META0]] {
 ; GFX10-NEXT:  [[ENTRY:.*:]]
 ; GFX10-NEXT:    [[RESULT:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 0, i32 [[VAL]])
 ; GFX10-NEXT:    ret i32 [[RESULT]]
@@ -231,8 +162,8 @@ entry:
 ; METADATA
 ; =============================================================================
 
-!0 = !{i32 48, i32 1, i32 1}   ; X=48 (not wave multiple)
-!1 = !{i32 64, i32 1, i32 1}   ; X=64 (wave64 or 2*wave32)
+!0 = !{i32 64, i32 1, i32 1}   ; X=64 (wave64 or 2*wave32)
+!1 = !{i32 48, i32 1, i32 1}   ; X=48 (not wave multiple)
 
 ; =============================================================================
 ; FUNCTION DECLARATIONS
@@ -243,11 +174,11 @@ declare i32 @llvm.amdgcn.mbcnt.hi(i32, i32) #0
 
 attributes #0 = { nounwind readnone speculatable willreturn }
 ;.
-; GFX9: [[META0]] = !{i32 48, i32 1, i32 1}
-; GFX9: [[META1]] = !{i32 64, i32 1, i32 1}
+; GFX9: [[META0]] = !{i32 64, i32 1, i32 1}
+; GFX9: [[META1]] = !{i32 48, i32 1, i32 1}
 ;.
-; GFX10: [[META0]] = !{i32 48, i32 1, i32 1}
-; GFX10: [[META1]] = !{i32 64, i32 1, i32 1}
+; GFX10: [[META0]] = !{i32 64, i32 1, i32 1}
+; GFX10: [[META1]] = !{i32 48, i32 1, i32 1}
 ;.
 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
 ; CHECK: {{.*}}
diff --git a/llvm/test/Transforms/AMDGPU/mbcnt-wave32-optimizations.ll b/llvm/test/Transforms/AMDGPU/mbcnt-wave32-optimizations.ll
index 975d3f295e9db..e2c683ffd5562 100644
--- a/llvm/test/Transforms/AMDGPU/mbcnt-wave32-optimizations.ll
+++ b/llvm/test/Transforms/AMDGPU/mbcnt-wave32-optimizations.ll
@@ -8,7 +8,7 @@
 ; SIMPLE REPLACEMENT OPTIMIZATIONS (mbcnt.lo -> workitem.id.x)
 ; =============================================================================
 
-; Test with work group size = wave size
+; Test with work group size = wave size (32)
 define i32 @test_mbcnt_lo_simple_wave32() !reqd_work_group_size !0 {
 ; CHECK-LABEL: define i32 @test_mbcnt_lo_simple_wave32(
 ; CHECK-SAME: ) #[[ATTR0:[0-9]+]] !reqd_work_group_size [[META0:![0-9]+]] {
@@ -25,7 +25,7 @@ entry:
 ; BITMASK OPTIMIZATIONS (mbcnt.lo -> workitem.id.x & 0x1f)
 ; =============================================================================
 
-; Test with work group size = 2 * wave size
+; Test with work group size = 2 * wave size (64)
 define i32 @test_mbcnt_lo_bitmask_64() !reqd_work_group_size !1 {
 ; CHECK-LABEL: define i32 @test_mbcnt_lo_bitmask_64(
 ; CHECK-SAME: ) #[[ATTR0]] !reqd_work_group_size [[META1:![0-9]+]] {
@@ -39,12 +39,12 @@ entry:
   ret i32 %a
 }
 
-; Test with work group size = 4 * wave size
-define i32 @test_mbcnt_lo_bitmask_128() !reqd_work_group_size !3 {
-; CHECK-LABEL: define i32 @test_mbcnt_lo_bitmask_128(
+; Test with work group size = 3 * wave size (96)
+define i32 @test_mbcnt_lo_bitmask_96() !reqd_work_group_size !2 {
+; CHECK-LABEL: define i32 @test_mbcnt_lo_bitmask_96(
 ; CHECK-SAME: ) #[[ATTR0]] !reqd_work_group_size [[META2:![0-9]+]] {
 ; CHECK-NEXT:  [[ENTRY:.*:]]
-; CHECK-NEXT:    [[TMP0:%.*]] = call range(i32 0, 128) i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT:    [[TMP0:%.*]] = call range(i32 0, 96) i32 @llvm.amdgcn.workitem.id.x()
 ; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[TMP0]], 31
 ; CHECK-NEXT:    ret i32 [[TMP1]]
 ;
@@ -53,12 +53,12 @@ entry:
   ret i32 %a
 }
 
-; Test with work group size = non-power-of-2 but wave-multiple
-define i32 @test_mbcnt_lo_bitmask_96() !reqd_work_group_size !2 {
-; CHECK-LABEL: define i32 @test_mbcnt_lo_bitmask_96(
+; Test with work group size = 4 * wave size (128)
+define i32 @test_mbcnt_lo_bitmask_128() !reqd_work_group_size !3 {
+; CHECK-LABEL: define i32 @test_mbcnt_lo_bitmask_128(
 ; CHECK-SAME: ) #[[ATTR0]] !reqd_work_group_size [[META3:![0-9]+]] {
 ; CHECK-NEXT:  [[ENTRY:.*:]]
-; CHECK-NEXT:    [[TMP0:%.*]] = call range(i32 0, 96) i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT:    [[TMP0:%.*]] = call range(i32 0, 128) i32 @llvm.amdgcn.workitem.id.x()
 ; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[TMP0]], 31
 ; CHECK-NEXT:    ret i32 [[TMP1]]
 ;
@@ -67,6 +67,20 @@ entry:
   ret i32 %a
 }
 
+; Test with work group size = 0.75 * wave size (48)
+define i32 @test_mbcnt_lo_bitmask_48() !reqd_work_group_size !4 {
+; CHECK-LABEL: define i32 @test_mbcnt_lo_bitmask_48(
+; CHECK-SAME: ) #[[ATTR0]] !reqd_work_group_size [[META4:![0-9]+]] {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[TMP0:%.*]] = call range(i32 0, 48) i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT:    [[A:%.*]] = and i32 [[TMP0]], 31
+; CHECK-NEXT:    ret i32 [[A]]
+;
+entry:
+  %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+  ret i32 %a
+}
+
 ; =============================================================================
 ; COPY OPTIMIZATION (mbcnt.hi(mask, val) -> val)
 ; =============================================================================
@@ -99,7 +113,7 @@ entry:
 ; FULL PATTERN OPTIMIZATION (mbcnt.hi(~0, mbcnt.lo(~0, 0)) -> workitem.id.x)
 ; =============================================================================
 
-; Test full pattern on wave32
+; Test with work group size = wave size (32)
 define i32 @test_mbcnt_full_pattern_wave32() !reqd_work_group_size !0 {
 ; CHECK-LABEL: define i32 @test_mbcnt_full_pattern_wave32(
 ; CHECK-SAME: ) #[[ATTR0]] !reqd_work_group_size [[META0]] {
@@ -113,6 +127,22 @@ entry:
   ret i32 %b
 }
 
+; Test with work group size = 0.75 * wave size (48)
+define i32 @test_mbcnt_full_pattern_wave32_partial() !reqd_work_group_size !4 {
+; CHECK-LABEL: define i32 @test_mbcnt_full_pattern_wave32_partial(
+; CHECK-SAME: ) #[[ATTR0]] !reqd_work_group_size [[META4]] {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+; CHECK-NEXT:    [[TMP0:%.*]] = call range(i32 0, 48) i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT:    [[B:%.*]] = and i32 [[TMP0]], 31
+; CHECK-NEXT:    ret i32 [[B]]
+;
+entry:
+  %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+  %b = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %a)
+  ret i32 %b
+}
+
 ; =============================================================================
 ; METADATA
 ; =============================================================================
@@ -121,6 +151,7 @@ entry:
 !1 = !{i32 64, i32 1, i32 1}   ; X=64 (2*wave32), Y=1, Z=1
 !2 = !{i32 96, i32 1, i32 1}   ; X=96 (3*wave32), Y=1, Z=1
 !3 = !{i32 128, i32 1, i32 1}  ; X=128 (4*wave32), Y=1, Z=1
+!4 = !{i32 48, i32 1, i32 1}   ; X=48 (1.5*wave32), Y=1, Z=1
 
 ; =============================================================================
 ; FUNCTION DECLARATIONS
@@ -133,6 +164,7 @@ attributes #0 = { nounwind readnone speculatable willreturn }
 ;.
 ; CHECK: [[META0]] = !{i32 32, i32 1, i32 1}
 ; CHECK: [[META1]] = !{i32 64, i32 1, i32 1}
-; CHECK: [[META2]] = !{i32 128, i32 1, i32 1}
-; CHECK: [[META3]] = !{i32 96, i32 1, i32 1}
+; CHECK: [[META2]] = !{i32 96, i32 1, i32 1}
+; CHECK: [[META3]] = !{i32 128, i32 1, i32 1}
+; CHECK: [[META4]] = !{i32 48, i32 1, i32 1}
 ;.
diff --git a/llvm/test/Transforms/AMDGPU/mbcnt-wave64-optimizations.ll b/llvm/test/Transforms/AMDGPU/mbcnt-wave64-optimizations.ll
index 42ba27a37b094..d70871b6eda3b 100644
--- a/llvm/test/Transforms/AMDGPU/mbcnt-wave64-optimizations.ll
+++ b/llvm/test/Transforms/AMDGPU/mbcnt-wave64-optimizations.ll
@@ -70,6 +70,22 @@ entry:
   ret i32 %b
 }
 
+; Test with work group size = 0.75 * wave size (48)
+define i32 @test_mbcnt_full_pattern_wave64_partial() !reqd_work_group_size !4 {
+; CHECK-LABEL: define i32 @test_mbcnt_full_pattern_wave64_partial(
+; CHECK-SAME: ) #[[ATTR0]] !reqd_work_group_size [[META4:![0-9]+]] {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+; CHECK-NEXT:    [[TMP0:%.*]] = call range(i32 0, 48) i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT:    [[B:%.*]] = and i32 [[TMP0]], 63
+; CHECK-NEXT:    ret i32 [[B]]
+;
+entry:
+  %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+  %b = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %a)
+  ret i32 %b
+}
+
 ; =============================================================================
 ; METADATA
 ; =============================================================================
@@ -78,6 +94,7 @@ entry:
 !1 = !{i32 128, i32 1, i32 1}  ; X=128 (2*wave), Y=1, Z=1
 !2 = !{i32 256, i32 1, i32 1}  ; X=256 (4*wave), Y=1, Z=1
 !3 = !{i32 64, i32 2, i32 1}   ; X=64 (1*wave), Y=2, Z=1
+!4 = !{i32 48, i32 1, i32 1}   ; X=48 (0.75*wave), Y=1, Z=1
 
 ; =============================================================================
 ; FUNCTION DECLARATIONS
@@ -92,4 +109,5 @@ attributes #0 = { nounwind readnone speculatable willreturn }
 ; CHECK: [[META1]] = !{i32 128, i32 1, i32 1}
 ; CHECK: [[META2]] = !{i32 256, i32 1, i32 1}
 ; CHECK: [[META3]] = !{i32 64, i32 2, i32 1}
+; CHECK: [[META4]] = !{i32 48, i32 1, i32 1}
 ;.

>From e908b3e7bb653fce1f083357bc9c6123f4e9dcb9 Mon Sep 17 00:00:00 2001
From: TejaX-Alaghari <Teja.Alaghari at amd.com>
Date: Wed, 29 Oct 2025 16:14:59 +0530
Subject: [PATCH 09/14] Remove redundant isPower2_32 check and combine 2 match
 functions in visitMbcntHi

---
 .../Target/AMDGPU/AMDGPUCodeGenPrepare.cpp    | 57 ++++++++-----------
 1 file changed, 25 insertions(+), 32 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
index 133e9963550c1..faf00ce35d703 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
@@ -2111,20 +2111,18 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntLo(IntrinsicInst &I) const {
       ST.makeLIDRangeMetadata(NewCall);
       return true;
     }
-    // When work group evenly splits into waves and wave size is power-of-2,
-    // we can compute lane ID within wave using bit masking:
-    // lane_id = workitem.id.x & (wave_size - 1).
+    // When work group evenly splits into waves, we can compute lane ID within
+    // wave using bit masking: lane_id = workitem.id.x & (wave_size - 1).
     if (ST.hasWavefrontsEvenlySplittingXDim(F, /*RequiresUniformYZ=*/true)) {
-      if (isPowerOf2_32(Wave)) {
-        IRBuilder<> B(&I);
-        CallInst *Tid = B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {});
-        ST.makeLIDRangeMetadata(Tid);
-        Constant *Mask = ConstantInt::get(Tid->getType(), Wave - 1);
-        Value *AndInst = B.CreateAnd(Tid, Mask);
-        BasicBlock::iterator BI(&I);
-        ReplaceInstWithValue(BI, AndInst);
-        return true;
-      }
+      // Construct optimized sequence: workitem.id.x & (wave_size - 1)
+      IRBuilder<> B(&I);
+      CallInst *Tid = B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {});
+      ST.makeLIDRangeMetadata(Tid);
+      Constant *Mask = ConstantInt::get(Tid->getType(), Wave - 1);
+      Value *AndInst = B.CreateAnd(Tid, Mask);
+      BasicBlock::iterator BI(&I);
+      ReplaceInstWithValue(BI, AndInst);
+      return true;
     }
   }
 
@@ -2162,11 +2160,9 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntHi(IntrinsicInst &I) const {
   using namespace PatternMatch;
 
   // Check for pattern: mbcnt.hi(~0, mbcnt.lo(~0, 0))
-  if (!match(I.getArgOperand(0), m_AllOnes()))
-    return false;
-
-  if (!match(I.getArgOperand(1),
-             m_Intrinsic<Intrinsic::amdgcn_mbcnt_lo>(m_AllOnes(), m_Zero())))
+  if (!match(&I, m_Intrinsic<Intrinsic::amdgcn_mbcnt_hi>(
+                     m_AllOnes(), m_Intrinsic<Intrinsic::amdgcn_mbcnt_lo>(
+                                      m_AllOnes(), m_Zero()))))
     return false;
 
   if (auto MaybeX = ST.getReqdWorkGroupSize(F, 0)) {
@@ -2182,21 +2178,18 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntHi(IntrinsicInst &I) const {
       ST.makeLIDRangeMetadata(NewCall);
       return true;
     }
-    // When work group evenly splits into waves and wave size is power-of-2,
-    // we can compute lane ID within wave using bit masking:
-    // lane_id = workitem.id.x & (wave_size - 1).
+    // When work group evenly splits into waves, we can compute lane ID within
+    // wave using bit masking: lane_id = workitem.id.x & (wave_size - 1).
     if (ST.hasWavefrontsEvenlySplittingXDim(F, /*RequiresUniformYZ=*/true)) {
-      if (isPowerOf2_32(Wave)) {
-        // Construct optimized sequence: workitem.id.x & (wave_size - 1)
-        IRBuilder<> B(&I);
-        CallInst *Tid = B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {});
-        ST.makeLIDRangeMetadata(Tid);
-        Constant *Mask = ConstantInt::get(Tid->getType(), Wave - 1);
-        Value *AndInst = B.CreateAnd(Tid, Mask);
-        BasicBlock::iterator BI(&I);
-        ReplaceInstWithValue(BI, AndInst);
-        return true;
-      }
+      // Construct optimized sequence: workitem.id.x & (wave_size - 1)
+      IRBuilder<> B(&I);
+      CallInst *Tid = B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {});
+      ST.makeLIDRangeMetadata(Tid);
+      Constant *Mask = ConstantInt::get(Tid->getType(), Wave - 1);
+      Value *AndInst = B.CreateAnd(Tid, Mask);
+      BasicBlock::iterator BI(&I);
+      ReplaceInstWithValue(BI, AndInst);
+      return true;
     }
   } else {
     // When ST.getReqdWorkGroupSize() fails, use metadata. And only optimize the

>From 7708c9bd0316fef33759f9c9aabc8826388df299 Mon Sep 17 00:00:00 2001
From: TejaX-Alaghari <Teja.Alaghari at amd.com>
Date: Wed, 29 Oct 2025 16:31:29 +0530
Subject: [PATCH 10/14] Removed 4*Wave case from the tests

---
 .../AMDGPU/mbcnt-wave32-optimizations.ll      | 28 ++++-----------
 .../AMDGPU/mbcnt-wave64-optimizations.ll      | 34 +++++--------------
 2 files changed, 14 insertions(+), 48 deletions(-)

diff --git a/llvm/test/Transforms/AMDGPU/mbcnt-wave32-optimizations.ll b/llvm/test/Transforms/AMDGPU/mbcnt-wave32-optimizations.ll
index e2c683ffd5562..1ec25da09d6a1 100644
--- a/llvm/test/Transforms/AMDGPU/mbcnt-wave32-optimizations.ll
+++ b/llvm/test/Transforms/AMDGPU/mbcnt-wave32-optimizations.ll
@@ -53,24 +53,10 @@ entry:
   ret i32 %a
 }
 
-; Test with work group size = 4 * wave size (128)
-define i32 @test_mbcnt_lo_bitmask_128() !reqd_work_group_size !3 {
-; CHECK-LABEL: define i32 @test_mbcnt_lo_bitmask_128(
-; CHECK-SAME: ) #[[ATTR0]] !reqd_work_group_size [[META3:![0-9]+]] {
-; CHECK-NEXT:  [[ENTRY:.*:]]
-; CHECK-NEXT:    [[TMP0:%.*]] = call range(i32 0, 128) i32 @llvm.amdgcn.workitem.id.x()
-; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[TMP0]], 31
-; CHECK-NEXT:    ret i32 [[TMP1]]
-;
-entry:
-  %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
-  ret i32 %a
-}
-
 ; Test with work group size = 0.75 * wave size (48)
-define i32 @test_mbcnt_lo_bitmask_48() !reqd_work_group_size !4 {
+define i32 @test_mbcnt_lo_bitmask_48() !reqd_work_group_size !3 {
 ; CHECK-LABEL: define i32 @test_mbcnt_lo_bitmask_48(
-; CHECK-SAME: ) #[[ATTR0]] !reqd_work_group_size [[META4:![0-9]+]] {
+; CHECK-SAME: ) #[[ATTR0]] !reqd_work_group_size [[META3:![0-9]+]] {
 ; CHECK-NEXT:  [[ENTRY:.*:]]
 ; CHECK-NEXT:    [[TMP0:%.*]] = call range(i32 0, 48) i32 @llvm.amdgcn.workitem.id.x()
 ; CHECK-NEXT:    [[A:%.*]] = and i32 [[TMP0]], 31
@@ -128,9 +114,9 @@ entry:
 }
 
 ; Test with work group size = 0.75 * wave size (48)
-define i32 @test_mbcnt_full_pattern_wave32_partial() !reqd_work_group_size !4 {
+define i32 @test_mbcnt_full_pattern_wave32_partial() !reqd_work_group_size !3 {
 ; CHECK-LABEL: define i32 @test_mbcnt_full_pattern_wave32_partial(
-; CHECK-SAME: ) #[[ATTR0]] !reqd_work_group_size [[META4]] {
+; CHECK-SAME: ) #[[ATTR0]] !reqd_work_group_size [[META3]] {
 ; CHECK-NEXT:  [[ENTRY:.*:]]
 ; CHECK-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
 ; CHECK-NEXT:    [[TMP0:%.*]] = call range(i32 0, 48) i32 @llvm.amdgcn.workitem.id.x()
@@ -150,8 +136,7 @@ entry:
 !0 = !{i32 32, i32 1, i32 1}   ; X=32 (1*wave32), Y=1, Z=1
 !1 = !{i32 64, i32 1, i32 1}   ; X=64 (2*wave32), Y=1, Z=1
 !2 = !{i32 96, i32 1, i32 1}   ; X=96 (3*wave32), Y=1, Z=1
-!3 = !{i32 128, i32 1, i32 1}  ; X=128 (4*wave32), Y=1, Z=1
-!4 = !{i32 48, i32 1, i32 1}   ; X=48 (1.5*wave32), Y=1, Z=1
+!3 = !{i32 48, i32 1, i32 1}   ; X=48 (1.5*wave32), Y=1, Z=1
 
 ; =============================================================================
 ; FUNCTION DECLARATIONS
@@ -165,6 +150,5 @@ attributes #0 = { nounwind readnone speculatable willreturn }
 ; CHECK: [[META0]] = !{i32 32, i32 1, i32 1}
 ; CHECK: [[META1]] = !{i32 64, i32 1, i32 1}
 ; CHECK: [[META2]] = !{i32 96, i32 1, i32 1}
-; CHECK: [[META3]] = !{i32 128, i32 1, i32 1}
-; CHECK: [[META4]] = !{i32 48, i32 1, i32 1}
+; CHECK: [[META3]] = !{i32 48, i32 1, i32 1}
 ;.
diff --git a/llvm/test/Transforms/AMDGPU/mbcnt-wave64-optimizations.ll b/llvm/test/Transforms/AMDGPU/mbcnt-wave64-optimizations.ll
index d70871b6eda3b..595ca0026cf3a 100644
--- a/llvm/test/Transforms/AMDGPU/mbcnt-wave64-optimizations.ll
+++ b/llvm/test/Transforms/AMDGPU/mbcnt-wave64-optimizations.ll
@@ -39,26 +39,10 @@ entry:
   ret i32 %b
 }
 
-; Test with work group size = 4 * wave size (256)
-define i32 @test_mbcnt_full_pattern_wave64_256() !reqd_work_group_size !2 {
-; CHECK-LABEL: define i32 @test_mbcnt_full_pattern_wave64_256(
-; CHECK-SAME: ) #[[ATTR0]] !reqd_work_group_size [[META2:![0-9]+]] {
-; CHECK-NEXT:  [[ENTRY:.*:]]
-; CHECK-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
-; CHECK-NEXT:    [[TMP0:%.*]] = call range(i32 0, 256) i32 @llvm.amdgcn.workitem.id.x()
-; CHECK-NEXT:    [[B:%.*]] = and i32 [[TMP0]], 63
-; CHECK-NEXT:    ret i32 [[B]]
-;
-entry:
-  %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
-  %b = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %a)
-  ret i32 %b
-}
-
 ; Test with multidimensional work group where X dimension matches pattern
-define i32 @test_mbcnt_full_pattern_wave64_multidim() !reqd_work_group_size !3 {
+define i32 @test_mbcnt_full_pattern_wave64_multidim() !reqd_work_group_size !2 {
 ; CHECK-LABEL: define i32 @test_mbcnt_full_pattern_wave64_multidim(
-; CHECK-SAME: ) #[[ATTR0]] !reqd_work_group_size [[META3:![0-9]+]] {
+; CHECK-SAME: ) #[[ATTR0]] !reqd_work_group_size [[META2:![0-9]+]] {
 ; CHECK-NEXT:  [[ENTRY:.*:]]
 ; CHECK-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
 ; CHECK-NEXT:    [[TMP0:%.*]] = call range(i32 0, 64) i32 @llvm.amdgcn.workitem.id.x()
@@ -71,9 +55,9 @@ entry:
 }
 
 ; Test with work group size = 0.75 * wave size (48)
-define i32 @test_mbcnt_full_pattern_wave64_partial() !reqd_work_group_size !4 {
+define i32 @test_mbcnt_full_pattern_wave64_partial() !reqd_work_group_size !3 {
 ; CHECK-LABEL: define i32 @test_mbcnt_full_pattern_wave64_partial(
-; CHECK-SAME: ) #[[ATTR0]] !reqd_work_group_size [[META4:![0-9]+]] {
+; CHECK-SAME: ) #[[ATTR0]] !reqd_work_group_size [[META3:![0-9]+]] {
 ; CHECK-NEXT:  [[ENTRY:.*:]]
 ; CHECK-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
 ; CHECK-NEXT:    [[TMP0:%.*]] = call range(i32 0, 48) i32 @llvm.amdgcn.workitem.id.x()
@@ -92,9 +76,8 @@ entry:
 
 !0 = !{i32 64, i32 1, i32 1}   ; X=64 (1*wave), Y=1, Z=1
 !1 = !{i32 128, i32 1, i32 1}  ; X=128 (2*wave), Y=1, Z=1
-!2 = !{i32 256, i32 1, i32 1}  ; X=256 (4*wave), Y=1, Z=1
-!3 = !{i32 64, i32 2, i32 1}   ; X=64 (1*wave), Y=2, Z=1
-!4 = !{i32 48, i32 1, i32 1}   ; X=48 (0.75*wave), Y=1, Z=1
+!2 = !{i32 64, i32 2, i32 1}   ; X=64 (1*wave), Y=2, Z=1
+!3 = !{i32 48, i32 1, i32 1}   ; X=48 (0.75*wave), Y=1, Z=1
 
 ; =============================================================================
 ; FUNCTION DECLARATIONS
@@ -107,7 +90,6 @@ attributes #0 = { nounwind readnone speculatable willreturn }
 ;.
 ; CHECK: [[META0]] = !{i32 64, i32 1, i32 1}
 ; CHECK: [[META1]] = !{i32 128, i32 1, i32 1}
-; CHECK: [[META2]] = !{i32 256, i32 1, i32 1}
-; CHECK: [[META3]] = !{i32 64, i32 2, i32 1}
-; CHECK: [[META4]] = !{i32 48, i32 1, i32 1}
+; CHECK: [[META2]] = !{i32 64, i32 2, i32 1}
+; CHECK: [[META3]] = !{i32 48, i32 1, i32 1}
 ;.

>From 9863672badd3b90ae206fa6d0d8dcc91209e2278 Mon Sep 17 00:00:00 2001
From: TejaX-Alaghari <Teja.Alaghari at amd.com>
Date: Fri, 31 Oct 2025 12:15:14 +0530
Subject: [PATCH 11/14] Simplified negative cases test file as the arch
 specific cases are moved to a different file now

---
 .../Transforms/AMDGPU/mbcnt-negative-cases.ll | 131 ++++++------------
 1 file changed, 39 insertions(+), 92 deletions(-)

diff --git a/llvm/test/Transforms/AMDGPU/mbcnt-negative-cases.ll b/llvm/test/Transforms/AMDGPU/mbcnt-negative-cases.ll
index 758f76358c771..0b9000138e5b3 100644
--- a/llvm/test/Transforms/AMDGPU/mbcnt-negative-cases.ll
+++ b/llvm/test/Transforms/AMDGPU/mbcnt-negative-cases.ll
@@ -1,6 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
-; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -mcpu=gfx906 -passes=amdgpu-codegenprepare < %s | FileCheck --check-prefixes=CHECK,GFX9 %s
-; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -mcpu=gfx1030 -passes=amdgpu-codegenprepare < %s | FileCheck --check-prefixes=CHECK,GFX10 %s
+; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -passes=amdgpu-codegenprepare < %s | FileCheck %s
 
 ; Test negative cases where mbcnt optimizations should NOT be applied
 
@@ -10,19 +9,11 @@
 
 ; Test with no reqd_work_group_size
 define i32 @test_mbcnt_no_work_group_size() {
-; GFX9-LABEL: define i32 @test_mbcnt_no_work_group_size(
-; GFX9-SAME: ) #[[ATTR0:[0-9]+]] {
-; GFX9-NEXT:  [[ENTRY:.*:]]
-; GFX9-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
-; GFX9-NEXT:    [[B:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 [[A]])
-; GFX9-NEXT:    ret i32 [[B]]
-;
-; GFX10-LABEL: define i32 @test_mbcnt_no_work_group_size(
-; GFX10-SAME: ) #[[ATTR0:[0-9]+]] {
-; GFX10-NEXT:  [[ENTRY:.*:]]
-; GFX10-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
-; GFX10-NEXT:    [[B:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 [[A]])
-; GFX10-NEXT:    ret i32 [[B]]
+; CHECK-LABEL: define i32 @test_mbcnt_no_work_group_size() {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+; CHECK-NEXT:    [[B:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 [[A]])
+; CHECK-NEXT:    ret i32 [[B]]
 ;
 entry:
   %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
@@ -32,17 +23,10 @@ entry:
 
 ; Test mbcnt.lo with no work group size
 define i32 @test_mbcnt_lo_no_work_group_size() {
-; GFX9-LABEL: define i32 @test_mbcnt_lo_no_work_group_size(
-; GFX9-SAME: ) #[[ATTR0]] {
-; GFX9-NEXT:  [[ENTRY:.*:]]
-; GFX9-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
-; GFX9-NEXT:    ret i32 [[A]]
-;
-; GFX10-LABEL: define i32 @test_mbcnt_lo_no_work_group_size(
-; GFX10-SAME: ) #[[ATTR0]] {
-; GFX10-NEXT:  [[ENTRY:.*:]]
-; GFX10-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
-; GFX10-NEXT:    ret i32 [[A]]
+; CHECK-LABEL: define i32 @test_mbcnt_lo_no_work_group_size() {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+; CHECK-NEXT:    ret i32 [[A]]
 ;
 entry:
   %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
@@ -55,19 +39,12 @@ entry:
 
 ; Test with partial mask
 define i32 @test_mbcnt_partial_mask() !reqd_work_group_size !0 {
-; GFX9-LABEL: define i32 @test_mbcnt_partial_mask(
-; GFX9-SAME: ) #[[ATTR0:[0-9]+]] !reqd_work_group_size [[META0:![0-9]+]] {
-; GFX9-NEXT:  [[ENTRY:.*:]]
-; GFX9-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 65535, i32 0)
-; GFX9-NEXT:    [[B:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 [[A]])
-; GFX9-NEXT:    ret i32 [[B]]
-;
-; GFX10-LABEL: define i32 @test_mbcnt_partial_mask(
-; GFX10-SAME: ) #[[ATTR0:[0-9]+]] !reqd_work_group_size [[META0:![0-9]+]] {
-; GFX10-NEXT:  [[ENTRY:.*:]]
-; GFX10-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 65535, i32 0)
-; GFX10-NEXT:    [[B:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 [[A]])
-; GFX10-NEXT:    ret i32 [[B]]
+; CHECK-LABEL: define i32 @test_mbcnt_partial_mask(
+; CHECK-SAME: ) !reqd_work_group_size [[META0:![0-9]+]] {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 65535, i32 0)
+; CHECK-NEXT:    [[B:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 [[A]])
+; CHECK-NEXT:    ret i32 [[B]]
 ;
 entry:
   %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 65535, i32 0)
@@ -77,19 +54,12 @@ entry:
 
 ; Test with non-zero base
 define i32 @test_mbcnt_non_zero_base() !reqd_work_group_size !0 {
-; GFX9-LABEL: define i32 @test_mbcnt_non_zero_base(
-; GFX9-SAME: ) #[[ATTR0]] !reqd_work_group_size [[META0]] {
-; GFX9-NEXT:  [[ENTRY:.*:]]
-; GFX9-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 5)
-; GFX9-NEXT:    [[B:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 [[A]])
-; GFX9-NEXT:    ret i32 [[B]]
-;
-; GFX10-LABEL: define i32 @test_mbcnt_non_zero_base(
-; GFX10-SAME: ) #[[ATTR0]] !reqd_work_group_size [[META0]] {
-; GFX10-NEXT:  [[ENTRY:.*:]]
-; GFX10-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 5)
-; GFX10-NEXT:    [[B:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 [[A]])
-; GFX10-NEXT:    ret i32 [[B]]
+; CHECK-LABEL: define i32 @test_mbcnt_non_zero_base(
+; CHECK-SAME: ) !reqd_work_group_size [[META0]] {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 5)
+; CHECK-NEXT:    [[B:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 [[A]])
+; CHECK-NEXT:    ret i32 [[B]]
 ;
 entry:
   %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 5)
@@ -103,17 +73,11 @@ entry:
 
 ; Test with no work group size
 define i32 @test_mbcnt_hi_copy_no_wgs(i32 %val) {
-; GFX9-LABEL: define i32 @test_mbcnt_hi_copy_no_wgs(
-; GFX9-SAME: i32 [[VAL:%.*]]) #[[ATTR0]] {
-; GFX9-NEXT:  [[ENTRY:.*:]]
-; GFX9-NEXT:    [[RESULT:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 [[VAL]])
-; GFX9-NEXT:    ret i32 [[RESULT]]
-;
-; GFX10-LABEL: define i32 @test_mbcnt_hi_copy_no_wgs(
-; GFX10-SAME: i32 [[VAL:%.*]]) #[[ATTR0]] {
-; GFX10-NEXT:  [[ENTRY:.*:]]
-; GFX10-NEXT:    [[RESULT:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 [[VAL]])
-; GFX10-NEXT:    ret i32 [[RESULT]]
+; CHECK-LABEL: define i32 @test_mbcnt_hi_copy_no_wgs(
+; CHECK-SAME: i32 [[VAL:%.*]]) {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[RESULT:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 [[VAL]])
+; CHECK-NEXT:    ret i32 [[RESULT]]
 ;
 entry:
   %result = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %val)
@@ -122,17 +86,11 @@ entry:
 
 ; Test with work group size = not a wave multiple (48)
 define i32 @test_mbcnt_hi_copy_non_wave_multiple(i32 %val) !reqd_work_group_size !1 {
-; GFX9-LABEL: define i32 @test_mbcnt_hi_copy_non_wave_multiple(
-; GFX9-SAME: i32 [[VAL:%.*]]) #[[ATTR0]] !reqd_work_group_size [[META1:![0-9]+]] {
-; GFX9-NEXT:  [[ENTRY:.*:]]
-; GFX9-NEXT:    [[RESULT:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 [[VAL]])
-; GFX9-NEXT:    ret i32 [[RESULT]]
-;
-; GFX10-LABEL: define i32 @test_mbcnt_hi_copy_non_wave_multiple(
-; GFX10-SAME: i32 [[VAL:%.*]]) #[[ATTR0]] !reqd_work_group_size [[META1:![0-9]+]] {
-; GFX10-NEXT:  [[ENTRY:.*:]]
-; GFX10-NEXT:    [[RESULT:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 [[VAL]])
-; GFX10-NEXT:    ret i32 [[RESULT]]
+; CHECK-LABEL: define i32 @test_mbcnt_hi_copy_non_wave_multiple(
+; CHECK-SAME: i32 [[VAL:%.*]]) !reqd_work_group_size [[META1:![0-9]+]] {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[RESULT:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 [[VAL]])
+; CHECK-NEXT:    ret i32 [[RESULT]]
 ;
 entry:
   %result = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %val)
@@ -141,17 +99,11 @@ entry:
 
 ; Test with zero mask
 define i32 @test_mbcnt_hi_copy_zero_mask(i32 %val) !reqd_work_group_size !0 {
-; GFX9-LABEL: define i32 @test_mbcnt_hi_copy_zero_mask(
-; GFX9-SAME: i32 [[VAL:%.*]]) #[[ATTR0]] !reqd_work_group_size [[META0]] {
-; GFX9-NEXT:  [[ENTRY:.*:]]
-; GFX9-NEXT:    [[RESULT:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 0, i32 [[VAL]])
-; GFX9-NEXT:    ret i32 [[RESULT]]
-;
-; GFX10-LABEL: define i32 @test_mbcnt_hi_copy_zero_mask(
-; GFX10-SAME: i32 [[VAL:%.*]]) #[[ATTR0]] !reqd_work_group_size [[META0]] {
-; GFX10-NEXT:  [[ENTRY:.*:]]
-; GFX10-NEXT:    [[RESULT:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 0, i32 [[VAL]])
-; GFX10-NEXT:    ret i32 [[RESULT]]
+; CHECK-LABEL: define i32 @test_mbcnt_hi_copy_zero_mask(
+; CHECK-SAME: i32 [[VAL:%.*]]) !reqd_work_group_size [[META0]] {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[RESULT:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 0, i32 [[VAL]])
+; CHECK-NEXT:    ret i32 [[RESULT]]
 ;
 entry:
   %result = call i32 @llvm.amdgcn.mbcnt.hi(i32 0, i32 %val)
@@ -174,11 +126,6 @@ declare i32 @llvm.amdgcn.mbcnt.hi(i32, i32) #0
 
 attributes #0 = { nounwind readnone speculatable willreturn }
 ;.
-; GFX9: [[META0]] = !{i32 64, i32 1, i32 1}
-; GFX9: [[META1]] = !{i32 48, i32 1, i32 1}
-;.
-; GFX10: [[META0]] = !{i32 64, i32 1, i32 1}
-; GFX10: [[META1]] = !{i32 48, i32 1, i32 1}
+; CHECK: [[META0]] = !{i32 64, i32 1, i32 1}
+; CHECK: [[META1]] = !{i32 48, i32 1, i32 1}
 ;.
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; CHECK: {{.*}}

>From 1e5ccac9e3c43362b131493f15edcdfc224ced0f Mon Sep 17 00:00:00 2001
From: TejaX-Alaghari <Teja.Alaghari at amd.com>
Date: Mon, 3 Nov 2025 11:54:52 +0530
Subject: [PATCH 12/14] Address feedback about - 1. Use absolute path in
 headers inclusion 2. Eliminated redundant logic 3. Used CreateIntrinsic in
 place of getOrInsertDeclaration 4. Chanegd the triplet in test cases

---
 .../Target/AMDGPU/AMDGPUCodeGenPrepare.cpp    | 42 +++++--------------
 .../Transforms/AMDGPU/mbcnt-negative-cases.ll |  2 +-
 .../AMDGPU/mbcnt-wave32-optimizations.ll      |  2 +-
 .../AMDGPU/mbcnt-wave64-optimizations.ll      |  2 +-
 4 files changed, 14 insertions(+), 34 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
index faf00ce35d703..833433aef664b 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
@@ -35,7 +35,7 @@
 #include "llvm/Support/KnownFPClass.h"
 #include "llvm/Transforms/Utils/IntegerDivision.h"
 #include "llvm/Transforms/Utils/Local.h"
-#include <llvm/Transforms/Utils/BasicBlockUtils.h>
+#include "llvm/Transforms/Utils/BasicBlockUtils.h"
 
 #define DEBUG_TYPE "amdgpu-codegenprepare"
 
@@ -2081,10 +2081,6 @@ INITIALIZE_PASS_END(AMDGPUCodeGenPrepare, DEBUG_TYPE, "AMDGPU IR optimizations",
 
 /// Optimize mbcnt.lo calls on wave32 architectures for lane ID computation.
 bool AMDGPUCodeGenPrepareImpl::visitMbcntLo(IntrinsicInst &I) const {
-  // Abort if wave size is not known at compile time.
-  if (!ST.isWaveSizeKnown())
-    return false;
-
   // This optimization only applies to wave32 targets where mbcnt.lo operates on
   // the full execution mask.
   if (!ST.isWave32())
@@ -2104,11 +2100,11 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntLo(IntrinsicInst &I) const {
     // When XLen == wave_size, each work group contains exactly one wave, so
     // mbcnt.lo(~0, 0) directly equals the workitem ID within the group.
     if (XLen == Wave) {
-      Function *WorkitemIdFn = Intrinsic::getOrInsertDeclaration(
-          I.getModule(), Intrinsic::amdgcn_workitem_id_x);
-      CallInst *NewCall = CallInst::Create(WorkitemIdFn, I.getName());
-      ReplaceInstWithInst(&I, NewCall);
-      ST.makeLIDRangeMetadata(NewCall);
+      IRBuilder<> B(&I);
+      CallInst *Tid = B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {});
+      ST.makeLIDRangeMetadata(Tid);
+      BasicBlock::iterator BI(&I);
+      ReplaceInstWithValue(BI, Tid);
       return true;
     }
     // When work group evenly splits into waves, we can compute lane ID within
@@ -2171,11 +2167,11 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntHi(IntrinsicInst &I) const {
     // When XLen == wave_size, each work group contains exactly one wave, so
     // lane_id = workitem.id.x.
     if (XLen == Wave) {
-      Function *WorkitemIdFn = Intrinsic::getOrInsertDeclaration(
-          I.getModule(), Intrinsic::amdgcn_workitem_id_x);
-      CallInst *NewCall = CallInst::Create(WorkitemIdFn, I.getName());
-      ReplaceInstWithInst(&I, NewCall);
-      ST.makeLIDRangeMetadata(NewCall);
+      IRBuilder<> B(&I);
+      CallInst *Tid = B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {});
+      ST.makeLIDRangeMetadata(Tid);
+      BasicBlock::iterator BI(&I);
+      ReplaceInstWithValue(BI, Tid);
       return true;
     }
     // When work group evenly splits into waves, we can compute lane ID within
@@ -2191,22 +2187,6 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntHi(IntrinsicInst &I) const {
       ReplaceInstWithValue(BI, AndInst);
       return true;
     }
-  } else {
-    // When ST.getReqdWorkGroupSize() fails, use metadata. And only optimize the
-    // case when work group size = wave size.
-    const MDNode *Node = F.getMetadata("reqd_work_group_size");
-    if (Node && Node->getNumOperands() == 3) {
-      unsigned XLen =
-          mdconst::extract<ConstantInt>(Node->getOperand(0))->getZExtValue();
-      if (XLen == Wave) {
-        Function *WorkitemIdFn = Intrinsic::getOrInsertDeclaration(
-            I.getModule(), Intrinsic::amdgcn_workitem_id_x);
-        CallInst *NewCall = CallInst::Create(WorkitemIdFn, I.getName());
-        ReplaceInstWithInst(&I, NewCall);
-        ST.makeLIDRangeMetadata(NewCall);
-        return true;
-      }
-    }
   }
 
   return false;
diff --git a/llvm/test/Transforms/AMDGPU/mbcnt-negative-cases.ll b/llvm/test/Transforms/AMDGPU/mbcnt-negative-cases.ll
index 0b9000138e5b3..36c7cfd388bf8 100644
--- a/llvm/test/Transforms/AMDGPU/mbcnt-negative-cases.ll
+++ b/llvm/test/Transforms/AMDGPU/mbcnt-negative-cases.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
-; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -passes=amdgpu-codegenprepare < %s | FileCheck %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -passes=amdgpu-codegenprepare < %s | FileCheck %s
 
 ; Test negative cases where mbcnt optimizations should NOT be applied
 
diff --git a/llvm/test/Transforms/AMDGPU/mbcnt-wave32-optimizations.ll b/llvm/test/Transforms/AMDGPU/mbcnt-wave32-optimizations.ll
index 1ec25da09d6a1..e0afa3e876ec2 100644
--- a/llvm/test/Transforms/AMDGPU/mbcnt-wave32-optimizations.ll
+++ b/llvm/test/Transforms/AMDGPU/mbcnt-wave32-optimizations.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
-; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -mcpu=gfx1030 -passes=amdgpu-codegenprepare < %s | FileCheck %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1030 -passes=amdgpu-codegenprepare < %s | FileCheck %s
 
 ; Test mbcnt optimizations for wave32 architecture
 ; Covers: simple replacement, bitmask optimization, copy optimization
diff --git a/llvm/test/Transforms/AMDGPU/mbcnt-wave64-optimizations.ll b/llvm/test/Transforms/AMDGPU/mbcnt-wave64-optimizations.ll
index 595ca0026cf3a..3da06b6692ecc 100644
--- a/llvm/test/Transforms/AMDGPU/mbcnt-wave64-optimizations.ll
+++ b/llvm/test/Transforms/AMDGPU/mbcnt-wave64-optimizations.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
-; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -mcpu=gfx906 -passes=amdgpu-codegenprepare < %s | FileCheck %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx906 -passes=amdgpu-codegenprepare < %s | FileCheck %s
 
 ; Test full mbcnt pattern optimization for wave64 architecture
 ; Covers: full pattern optimization

>From db1b89341aca28aad005873cb17f9fbd30934554 Mon Sep 17 00:00:00 2001
From: TejaX-Alaghari <Teja.Alaghari at amd.com>
Date: Mon, 10 Nov 2025 14:07:27 +0530
Subject: [PATCH 13/14] Fixed format issue in AMDGPUCodeGenPrepare.cpp

---
 llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
index 833433aef664b..23a02f0221f86 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
@@ -33,9 +33,9 @@
 #include "llvm/Pass.h"
 #include "llvm/Support/KnownBits.h"
 #include "llvm/Support/KnownFPClass.h"
+#include "llvm/Transforms/Utils/BasicBlockUtils.h"
 #include "llvm/Transforms/Utils/IntegerDivision.h"
 #include "llvm/Transforms/Utils/Local.h"
-#include "llvm/Transforms/Utils/BasicBlockUtils.h"
 
 #define DEBUG_TYPE "amdgpu-codegenprepare"
 

>From 4938371dcdad58aae056a7459f52ea8692cf8937 Mon Sep 17 00:00:00 2001
From: TejaX-Alaghari <Teja.Alaghari at amd.com>
Date: Thu, 13 Nov 2025 14:23:43 +0530
Subject: [PATCH 14/14] Refactor the code to remove redundant logic

---
 .../Target/AMDGPU/AMDGPUCodeGenPrepare.cpp    | 124 +++++++++---------
 1 file changed, 60 insertions(+), 64 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
index 23a02f0221f86..722e8022401d9 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
@@ -238,6 +238,11 @@ class AMDGPUCodeGenPrepareImpl
   Value *emitSqrtIEEE2ULP(IRBuilder<> &Builder, Value *Src,
                           FastMathFlags FMF) const;
 
+  CallInst *createWorkitemIdX(IRBuilder<> &B) const;
+  void replaceWithWorkitemIdX(Instruction &I) const;
+  void replaceWithMaskedWorkitemIdX(Instruction &I, unsigned WaveSize) const;
+  bool tryReplaceWithWorkitemId(Instruction &I, unsigned Wave) const;
+
   bool tryNarrowMathIfNoOverflow(Instruction *I);
 
 public:
@@ -2079,6 +2084,58 @@ INITIALIZE_PASS_DEPENDENCY(UniformityInfoWrapperPass)
 INITIALIZE_PASS_END(AMDGPUCodeGenPrepare, DEBUG_TYPE, "AMDGPU IR optimizations",
                     false, false)
 
+/// Create a workitem.id.x intrinsic call with range metadata.
+CallInst *AMDGPUCodeGenPrepareImpl::createWorkitemIdX(IRBuilder<> &B) const {
+  CallInst *Tid = B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {});
+  ST.makeLIDRangeMetadata(Tid);
+  return Tid;
+}
+
+/// Replace the instruction with a direct workitem.id.x call.
+void AMDGPUCodeGenPrepareImpl::replaceWithWorkitemIdX(Instruction &I) const {
+  IRBuilder<> B(&I);
+  CallInst *Tid = createWorkitemIdX(B);
+  BasicBlock::iterator BI(&I);
+  ReplaceInstWithValue(BI, Tid);
+}
+
+/// Replace the instruction with (workitem.id.x & mask).
+void AMDGPUCodeGenPrepareImpl::replaceWithMaskedWorkitemIdX(
+    Instruction &I, unsigned WaveSize) const {
+  IRBuilder<> B(&I);
+  CallInst *Tid = createWorkitemIdX(B);
+  Constant *Mask = ConstantInt::get(Tid->getType(), WaveSize - 1);
+  Value *AndInst = B.CreateAnd(Tid, Mask);
+  BasicBlock::iterator BI(&I);
+  ReplaceInstWithValue(BI, AndInst);
+}
+
+/// Try to optimize mbcnt instruction by replacing with workitem.id.x when
+/// work group size allows direct computation of lane ID.
+/// Returns true if optimization was applied, false otherwise.
+bool AMDGPUCodeGenPrepareImpl::tryReplaceWithWorkitemId(Instruction &I,
+                                                        unsigned Wave) const {
+  std::optional<unsigned> MaybeX = ST.getReqdWorkGroupSize(F, 0);
+  if (!MaybeX)
+    return false;
+
+  // When work group size == wave_size, each work group contains exactly one
+  // wave, so the instruction can be replaced with workitem.id.x directly.
+  if (*MaybeX == Wave) {
+    replaceWithWorkitemIdX(I);
+    return true;
+  }
+
+  // When work group evenly splits into waves, compute lane ID within wave
+  // using bit masking: lane_id = workitem.id.x & (wave_size - 1).
+  if (ST.hasWavefrontsEvenlySplittingXDim(F, /*RequiresUniformYZ=*/true)) {
+    replaceWithMaskedWorkitemIdX(I, Wave);
+    return true;
+  }
+
+  return false;
+}
+
 /// Optimize mbcnt.lo calls on wave32 architectures for lane ID computation.
 bool AMDGPUCodeGenPrepareImpl::visitMbcntLo(IntrinsicInst &I) const {
   // This optimization only applies to wave32 targets where mbcnt.lo operates on
@@ -2092,37 +2149,7 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntLo(IntrinsicInst &I) const {
              m_Intrinsic<Intrinsic::amdgcn_mbcnt_lo>(m_AllOnes(), m_Zero())))
     return false;
 
-  unsigned Wave = ST.getWavefrontSize();
-
-  if (auto MaybeX = ST.getReqdWorkGroupSize(F, 0)) {
-    unsigned XLen = *MaybeX;
-
-    // When XLen == wave_size, each work group contains exactly one wave, so
-    // mbcnt.lo(~0, 0) directly equals the workitem ID within the group.
-    if (XLen == Wave) {
-      IRBuilder<> B(&I);
-      CallInst *Tid = B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {});
-      ST.makeLIDRangeMetadata(Tid);
-      BasicBlock::iterator BI(&I);
-      ReplaceInstWithValue(BI, Tid);
-      return true;
-    }
-    // When work group evenly splits into waves, we can compute lane ID within
-    // wave using bit masking: lane_id = workitem.id.x & (wave_size - 1).
-    if (ST.hasWavefrontsEvenlySplittingXDim(F, /*RequiresUniformYZ=*/true)) {
-      // Construct optimized sequence: workitem.id.x & (wave_size - 1)
-      IRBuilder<> B(&I);
-      CallInst *Tid = B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {});
-      ST.makeLIDRangeMetadata(Tid);
-      Constant *Mask = ConstantInt::get(Tid->getType(), Wave - 1);
-      Value *AndInst = B.CreateAnd(Tid, Mask);
-      BasicBlock::iterator BI(&I);
-      ReplaceInstWithValue(BI, AndInst);
-      return true;
-    }
-  }
-
-  return false;
+  return tryReplaceWithWorkitemId(I, ST.getWavefrontSize());
 }
 
 /// Optimize mbcnt.hi calls for lane ID computation.
@@ -2131,18 +2158,15 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntHi(IntrinsicInst &I) const {
   if (!ST.isWaveSizeKnown())
     return false;
 
-  // Calculate wave size
   unsigned Wave = ST.getWavefrontSize();
 
   // On wave32, the upper 32 bits of execution mask are always 0, so
   // mbcnt.hi(mask, val) always returns val unchanged.
   if (ST.isWave32()) {
     if (auto MaybeX = ST.getReqdWorkGroupSize(F, 0)) {
-      unsigned XLen = *MaybeX;
-
       // Replace mbcnt.hi(mask, val) with val only when work group size matches
       // wave size (single wave per work group).
-      if (XLen == Wave) {
+      if (*MaybeX == Wave) {
         BasicBlock::iterator BI(&I);
         ReplaceInstWithValue(BI, I.getArgOperand(1));
         return true;
@@ -2161,35 +2185,7 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntHi(IntrinsicInst &I) const {
                                       m_AllOnes(), m_Zero()))))
     return false;
 
-  if (auto MaybeX = ST.getReqdWorkGroupSize(F, 0)) {
-    unsigned XLen = *MaybeX;
-
-    // When XLen == wave_size, each work group contains exactly one wave, so
-    // lane_id = workitem.id.x.
-    if (XLen == Wave) {
-      IRBuilder<> B(&I);
-      CallInst *Tid = B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {});
-      ST.makeLIDRangeMetadata(Tid);
-      BasicBlock::iterator BI(&I);
-      ReplaceInstWithValue(BI, Tid);
-      return true;
-    }
-    // When work group evenly splits into waves, we can compute lane ID within
-    // wave using bit masking: lane_id = workitem.id.x & (wave_size - 1).
-    if (ST.hasWavefrontsEvenlySplittingXDim(F, /*RequiresUniformYZ=*/true)) {
-      // Construct optimized sequence: workitem.id.x & (wave_size - 1)
-      IRBuilder<> B(&I);
-      CallInst *Tid = B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {});
-      ST.makeLIDRangeMetadata(Tid);
-      Constant *Mask = ConstantInt::get(Tid->getType(), Wave - 1);
-      Value *AndInst = B.CreateAnd(Tid, Mask);
-      BasicBlock::iterator BI(&I);
-      ReplaceInstWithValue(BI, AndInst);
-      return true;
-    }
-  }
-
-  return false;
+  return tryReplaceWithWorkitemId(I, Wave);
 }
 
 char AMDGPUCodeGenPrepare::ID = 0;



More information about the llvm-commits mailing list