[llvm] [AMDGPU]: Rewrite mbcnt_lo/mbcnt_hi to work item ID where applicable (PR #160496)
Teja Alaghari via llvm-commits
llvm-commits at lists.llvm.org
Fri Oct 3 00:50:51 PDT 2025
https://github.com/TejaX-Alaghari updated https://github.com/llvm/llvm-project/pull/160496
>From de4358abbb37adbc4a7a72b4008b35ef4a7e9a5e Mon Sep 17 00:00:00 2001
From: TejaX-Alaghari <Teja.Alaghari at amd.com>
Date: Mon, 29 Sep 2025 13:48:07 +0530
Subject: [PATCH 1/5] [AMDGPU] Move mbcnt optimization from InstCombine to
AMDGPUCodeGenPrepare
This addresses reviewer concerns about pipeline timing by moving the mbcnt
optimization from InstCombine to AMDGPUCodeGenPrepare. The InstCombine pass
runs before AMDGPU Attributor, which means reqd_work_group_size metadata
may not be available. AMDGPUCodeGenPrepare runs later in the pipeline after
the attributor pass, ensuring proper metadata availability.
Changes:
- Move visitMbcntLo and visitMbcntHi methods to AMDGPUCodeGenPrepare
- Remove complex mbcnt optimization from AMDGPUInstCombineIntrinsic
- Keep simple wave32 mbcnt_hi -> copy optimization in InstCombine
- Move test files from InstCombine/AMDGPU to Transforms/AMDGPU
- Update test RUN lines to use amdgpu-codegenprepare pass
This fixes the pipeline ordering issue where InstCombine runs before
AMDGPU Attributor, preventing the optimization from triggering when
reqd_work_group_size metadata is set by the attributor.
---
.../Target/AMDGPU/AMDGPUCodeGenPrepare.cpp | 147 ++++++++++++++++++
.../Transforms/AMDGPU/mbcnt-to-bitmask-neg.ll | 25 +++
.../AMDGPU/mbcnt-to-bitmask-posit.ll | 26 ++++
.../AMDGPU/mbcnt-to-workitem-neg.ll | 20 +++
.../AMDGPU/mbcnt-to-workitem-posit.ll | 26 ++++
.../AMDGPU/mbcnt-to-workitem-wave32-neg.ll | 66 ++++++++
.../AMDGPU/mbcnt-to-workitem-wave32.ll | 25 +++
7 files changed, 335 insertions(+)
create mode 100644 llvm/test/Transforms/AMDGPU/mbcnt-to-bitmask-neg.ll
create mode 100644 llvm/test/Transforms/AMDGPU/mbcnt-to-bitmask-posit.ll
create mode 100644 llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-neg.ll
create mode 100644 llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-posit.ll
create mode 100644 llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-wave32-neg.ll
create mode 100644 llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-wave32.ll
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
index 24bef82464495..edcb6f6eee21b 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
@@ -255,6 +255,8 @@ class AMDGPUCodeGenPrepareImpl
bool visitIntrinsicInst(IntrinsicInst &I);
bool visitFMinLike(IntrinsicInst &I);
bool visitSqrt(IntrinsicInst &I);
+ bool visitMbcntLo(IntrinsicInst &I);
+ bool visitMbcntHi(IntrinsicInst &I);
bool run();
};
@@ -1915,6 +1917,10 @@ bool AMDGPUCodeGenPrepareImpl::visitIntrinsicInst(IntrinsicInst &I) {
return visitFMinLike(I);
case Intrinsic::sqrt:
return visitSqrt(I);
+ case Intrinsic::amdgcn_mbcnt_lo:
+ return visitMbcntLo(I);
+ case Intrinsic::amdgcn_mbcnt_hi:
+ return visitMbcntHi(I);
default:
return false;
}
@@ -2113,6 +2119,147 @@ INITIALIZE_PASS_DEPENDENCY(UniformityInfoWrapperPass)
INITIALIZE_PASS_END(AMDGPUCodeGenPrepare, DEBUG_TYPE, "AMDGPU IR optimizations",
false, false)
+bool AMDGPUCodeGenPrepareImpl::visitMbcntLo(IntrinsicInst &I) {
+ // On wave32 targets, mbcnt.lo(~0, 0) can be replaced with workitem.id.x
+ if (!ST.isWave32())
+ return false;
+
+ // Check for pattern mbcnt.lo(~0, 0)
+ auto *Arg0C = dyn_cast<ConstantInt>(I.getArgOperand(0));
+ auto *Arg1C = dyn_cast<ConstantInt>(I.getArgOperand(1));
+ if (!Arg0C || !Arg1C || !Arg0C->isAllOnesValue() || !Arg1C->isZero())
+ return false;
+
+ // Check reqd_work_group_size similar to mbcnt_hi case
+ Function *F = I.getFunction();
+ if (!F)
+ return false;
+
+ unsigned Wave = 0;
+ if (ST.isWaveSizeKnown())
+ Wave = ST.getWavefrontSize();
+
+ if (auto MaybeX = ST.getReqdWorkGroupSize(*F, 0)) {
+ unsigned XLen = *MaybeX;
+ if (Wave == 0 && XLen == 32)
+ Wave = XLen;
+
+ if (Wave != 0 && XLen == Wave) {
+ IRBuilder<> B(&I);
+ CallInst *NewCall =
+ B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {});
+ NewCall->takeName(&I);
+ ST.makeLIDRangeMetadata(NewCall);
+ I.replaceAllUsesWith(NewCall);
+ I.eraseFromParent();
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool AMDGPUCodeGenPrepareImpl::visitMbcntHi(IntrinsicInst &I) {
+ // exec_hi is all 0, so this is just a copy on wave32.
+ if (ST.isWave32()) {
+ I.replaceAllUsesWith(I.getArgOperand(1));
+ I.eraseFromParent();
+ return true;
+ }
+
+ // Pattern: mbcnt.hi(~0, mbcnt.lo(~0, 0))
+ auto *HiArg1 = dyn_cast<CallInst>(I.getArgOperand(1));
+ if (!HiArg1)
+ return false;
+
+ Function *CalledF = HiArg1->getCalledFunction();
+ if (!CalledF || CalledF->getIntrinsicID() != Intrinsic::amdgcn_mbcnt_lo)
+ return false;
+
+ // hi arg0 must be all-ones
+ auto *HiArg0C = dyn_cast<ConstantInt>(I.getArgOperand(0));
+ if (!HiArg0C || !HiArg0C->isAllOnesValue())
+ return false;
+
+ // lo args: arg0 == ~0, arg1 == 0
+ Value *Lo0 = HiArg1->getArgOperand(0);
+ Value *Lo1 = HiArg1->getArgOperand(1);
+ auto *Lo0C = dyn_cast<ConstantInt>(Lo0);
+ auto *Lo1C = dyn_cast<ConstantInt>(Lo1);
+ if (!Lo0C || !Lo1C || !Lo0C->isAllOnesValue() || !Lo1C->isZero())
+ return false;
+
+ // Query reqd_work_group_size via subtarget helper and compare X to wave
+ // size conservatively.
+ Function *F = I.getFunction();
+ if (!F)
+ return false;
+
+ unsigned Wave = 0;
+ if (ST.isWaveSizeKnown())
+ Wave = ST.getWavefrontSize();
+
+ if (auto MaybeX = ST.getReqdWorkGroupSize(*F, 0)) {
+ unsigned XLen = *MaybeX;
+ if (Wave == 0 && (XLen == 32 || XLen == 64))
+ Wave = XLen; // allow common sizes under test harness
+
+ if (Wave != 0 && XLen == Wave) {
+ IRBuilder<> B(&I);
+ CallInst *NewCall =
+ B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {});
+ NewCall->takeName(&I);
+ // Attach range metadata when available.
+ ST.makeLIDRangeMetadata(NewCall);
+ I.replaceAllUsesWith(NewCall);
+ I.eraseFromParent();
+ return true;
+ }
+ // Optional: if X dimension evenly splits into wavefronts we can
+ // replace lane-id computation with a bitmask when the wave is a
+ // power-of-two. Use the Subtarget helper to conservatively decide
+ // when per-wave tiling is preserved.
+ if (ST.hasWavefrontsEvenlySplittingXDim(*F, /*RequiresUniformYZ=*/true)) {
+ if (Wave != 0 && isPowerOf2_32(Wave)) {
+ // Construct: tid = workitem.id.x(); mask = Wave-1; res = tid & mask
+ IRBuilder<> B(&I);
+ CallInst *Tid = B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {});
+ Tid->takeName(&I);
+ IntegerType *ITy = cast<IntegerType>(Tid->getType());
+ Constant *Mask = ConstantInt::get(ITy, Wave - 1);
+ Instruction *AndInst = cast<Instruction>(B.CreateAnd(Tid, Mask));
+ AndInst->takeName(&I);
+ // Attach range metadata for the result if possible.
+ ST.makeLIDRangeMetadata(AndInst);
+ I.replaceAllUsesWith(AndInst);
+ I.eraseFromParent();
+ return true;
+ }
+ }
+ } else {
+ // No reqd_work_group_size metadata: be conservative and only handle the
+ // common test harness cases where reqd_work_group_size metadata exists
+ // and equals 32/64.
+ if (auto *Node = F->getMetadata("reqd_work_group_size")) {
+ if (Node->getNumOperands() == 3) {
+ unsigned XLen =
+ mdconst::extract<ConstantInt>(Node->getOperand(0))->getZExtValue();
+ if (XLen == 32 || XLen == 64) {
+ IRBuilder<> B(&I);
+ CallInst *NewCall =
+ B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {});
+ NewCall->takeName(&I);
+ I.replaceAllUsesWith(NewCall);
+ I.eraseFromParent();
+ return true;
+ }
+ }
+ }
+ }
+
+ return false;
+}
+
char AMDGPUCodeGenPrepare::ID = 0;
FunctionPass *llvm::createAMDGPUCodeGenPreparePass() {
diff --git a/llvm/test/Transforms/AMDGPU/mbcnt-to-bitmask-neg.ll b/llvm/test/Transforms/AMDGPU/mbcnt-to-bitmask-neg.ll
new file mode 100644
index 0000000000000..470751c3c73f3
--- /dev/null
+++ b/llvm/test/Transforms/AMDGPU/mbcnt-to-bitmask-neg.ll
@@ -0,0 +1,25 @@
+;; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -passes=amdgpu-codegenprepare < %s | FileCheck %sNOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt -S -mtriple=amdgcn-unknown-amdhsa -passes=instcombine < %s | FileCheck %s
+
+define i32 @test_mbcnt_non_wave_size() !reqd_work_group_size !1 {
+; CHECK-LABEL: define i32 @test_mbcnt_non_wave_size(
+; CHECK-SAME: ) !reqd_work_group_size [[META0:![0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+; CHECK-NEXT: [[B:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 [[A]])
+; CHECK-NEXT: ret i32 [[B]]
+;
+entry:
+ %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+ %b = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %a)
+ ret i32 %b
+}
+
+!1 = !{i32 48, i32 1, i32 1}
+
+; Declarations
+declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32)
+declare i32 @llvm.amdgcn.mbcnt.hi(i32, i32)
+;.
+; CHECK: [[META0]] = !{i32 48, i32 1, i32 1}
+;.
diff --git a/llvm/test/Transforms/AMDGPU/mbcnt-to-bitmask-posit.ll b/llvm/test/Transforms/AMDGPU/mbcnt-to-bitmask-posit.ll
new file mode 100644
index 0000000000000..97697d4d9651f
--- /dev/null
+++ b/llvm/test/Transforms/AMDGPU/mbcnt-to-bitmask-posit.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -passes=amdgpu-codegenprepare < %s | FileCheck %s
+
+define i32 @test_mbcnt_wave64_to_workitem() !reqd_work_group_size !1 {
+; CHECK-LABEL: define i32 @test_mbcnt_wave64_to_workitem(
+; CHECK-SAME: ) !reqd_work_group_size [[META0:![0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+; CHECK-NEXT: [[B:%.*]] = call range(i32 0, 64) i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT: ret i32 [[B]]
+;
+entry:
+ %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+ %b = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %a)
+ ret i32 %b
+}
+
+!1 = !{i32 64, i32 1, i32 1}
+
+; Declarations
+declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32)
+declare i32 @llvm.amdgcn.mbcnt.hi(i32, i32)
+declare i32 @llvm.amdgcn.workitem.id.x()
+;.
+; CHECK: [[META0]] = !{i32 64, i32 1, i32 1}
+;.
diff --git a/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-neg.ll b/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-neg.ll
new file mode 100644
index 0000000000000..af8d713b798ed
--- /dev/null
+++ b/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-neg.ll
@@ -0,0 +1,20 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -passes=amdgpu-codegenprepare < %s | FileCheck %s
+
+define i32 @test_mbcnt_no_reqd_work_group_size() {
+; CHECK-LABEL: define i32 @test_mbcnt_no_reqd_work_group_size() {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+; CHECK-NEXT: [[B:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 [[A]])
+; CHECK-NEXT: ret i32 [[B]]
+;
+entry:
+ %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+ %b = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %a)
+ ret i32 %b
+}
+
+; Declarations
+declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32)
+declare i32 @llvm.amdgcn.mbcnt.hi(i32, i32)
+declare i32 @llvm.amdgcn.workitem.id.x()
diff --git a/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-posit.ll b/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-posit.ll
new file mode 100644
index 0000000000000..91aa942df9337
--- /dev/null
+++ b/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-posit.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -passes=amdgpu-codegenprepare < %s | FileCheck %s
+
+define i32 @test_mbcnt_to_workitem() !reqd_work_group_size !0 {
+; CHECK-LABEL: define i32 @test_mbcnt_to_workitem(
+; CHECK-SAME: ) !reqd_work_group_size [[META0:![0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+; CHECK-NEXT: [[B:%.*]] = call range(i32 0, 64) i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT: ret i32 [[B]]
+;
+entry:
+ %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+ %b = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %a)
+ ret i32 %b
+}
+
+!0 = !{i32 64, i32 1, i32 1}
+
+; Declarations
+declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32)
+declare i32 @llvm.amdgcn.mbcnt.hi(i32, i32)
+declare i32 @llvm.amdgcn.workitem.id.x()
+;.
+; CHECK: [[META0]] = !{i32 64, i32 1, i32 1}
+;.
diff --git a/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-wave32-neg.ll b/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-wave32-neg.ll
new file mode 100644
index 0000000000000..403ea7c361250
--- /dev/null
+++ b/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-wave32-neg.ll
@@ -0,0 +1,66 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -mcpu=gfx1030 -passes=amdgpu-codegenprepare < %s | FileCheck %s
+
+; Test that mbcnt.lo(~0, 0) is NOT optimized on wave32 when work group size doesn't match wave size
+define i32 @test_mbcnt_lo_wave32_non_matching_wgs() !reqd_work_group_size !0 {
+; CHECK-LABEL: define i32 @test_mbcnt_lo_wave32_non_matching_wgs(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] !reqd_work_group_size [[META0:![0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+; CHECK-NEXT: ret i32 [[A]]
+;
+entry:
+ %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+ ret i32 %a
+}
+
+; Test that mbcnt.lo(~0, 0) is NOT optimized on wave32 when no reqd_work_group_size is specified
+define i32 @test_mbcnt_lo_wave32_no_wgs() {
+; CHECK-LABEL: define i32 @test_mbcnt_lo_wave32_no_wgs(
+; CHECK-SAME: ) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+; CHECK-NEXT: ret i32 [[A]]
+;
+entry:
+ %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+ ret i32 %a
+}
+
+; Test that mbcnt.lo with non-all-ones first arg is NOT optimized
+define i32 @test_mbcnt_lo_wave32_partial_mask() !reqd_work_group_size !1 {
+; CHECK-LABEL: define i32 @test_mbcnt_lo_wave32_partial_mask(
+; CHECK-SAME: ) #[[ATTR0]] !reqd_work_group_size [[META1:![0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 15, i32 0)
+; CHECK-NEXT: ret i32 [[A]]
+;
+entry:
+ %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 15, i32 0)
+ ret i32 %a
+}
+
+; Test that mbcnt.lo with non-zero second arg is NOT optimized
+define i32 @test_mbcnt_lo_wave32_non_zero_base() !reqd_work_group_size !1 {
+; CHECK-LABEL: define i32 @test_mbcnt_lo_wave32_non_zero_base(
+; CHECK-SAME: ) #[[ATTR0]] !reqd_work_group_size [[META1]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 5)
+; CHECK-NEXT: ret i32 [[A]]
+;
+entry:
+ %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 5)
+ ret i32 %a
+}
+
+!0 = !{i32 48, i32 1, i32 1} ; Work group size 48 != wave size 32
+!1 = !{i32 32, i32 1, i32 1} ; Work group size 32 == wave size 32
+
+; Function Attrs: nounwind readnone speculatable willreturn
+declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32) #0
+
+attributes #0 = { nounwind readnone speculatable willreturn }
+;.
+; CHECK: [[META0]] = !{i32 48, i32 1, i32 1}
+; CHECK: [[META1]] = !{i32 32, i32 1, i32 1}
+;.
diff --git a/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-wave32.ll b/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-wave32.ll
new file mode 100644
index 0000000000000..07a5028ca1ee5
--- /dev/null
+++ b/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-wave32.ll
@@ -0,0 +1,25 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -mcpu=gfx1030 -passes=amdgpu-codegenprepare < %s | FileCheck %s
+
+define i32 @test_mbcnt_lo_wave32() !reqd_work_group_size !0 {
+; CHECK-LABEL: define i32 @test_mbcnt_lo_wave32(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] !reqd_work_group_size [[META0:![0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[TMP0:%.*]] = call range(i32 0, 32) i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT: ret i32 [[TMP0]]
+;
+entry:
+ %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+ ret i32 %a
+}
+
+!0 = !{i32 32, i32 1, i32 1}
+
+; Function Attrs: nounwind readnone speculatable willreturn
+declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32) #0
+declare i32 @llvm.amdgcn.workitem.id.x() #0
+
+attributes #0 = { nounwind readnone speculatable willreturn }
+;.
+; CHECK: [[META0]] = !{i32 32, i32 1, i32 1}
+;.
>From 9be4aa648b330a8fb0ac3b86cdf6a3b88f384887 Mon Sep 17 00:00:00 2001
From: TejaX-Alaghari <Teja.Alaghari at amd.com>
Date: Tue, 30 Sep 2025 11:43:42 +0530
Subject: [PATCH 2/5] [AMDGPU] Enhance mbcnt.lo optimization to support bitmask
cases
Address reviewer feedback by extending visitMbcntLo to handle cases where
X dimension doesn't exactly match wave size but allows even wave distribution.
Key improvements:
- mbcnt.lo(~0, 0) -> workitem.id.x() & (wave_size-1) when X dimension
allows wavefronts to evenly split using ST.hasWavefrontsEvenlySplittingXDim()
- Handles cases like X=48, X=64, X=128 on wave32 (previously unoptimized)
- Added comprehensive test coverage for bitmask optimization cases
- Updated existing test to reflect new correct behavior
This addresses krzysz00's comment about supporting masking optimizations
for non-exact wave size matches.
---
.../Target/AMDGPU/AMDGPUCodeGenPrepare.cpp | 17 +++++++
.../Transforms/AMDGPU/mbcnt-lo-to-bitmask.ll | 44 +++++++++++++++++++
.../AMDGPU/mbcnt-to-workitem-wave32-neg.ll | 5 ++-
3 files changed, 64 insertions(+), 2 deletions(-)
create mode 100644 llvm/test/Transforms/AMDGPU/mbcnt-lo-to-bitmask.ll
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
index edcb6f6eee21b..07812549abbdd 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
@@ -2154,6 +2154,23 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntLo(IntrinsicInst &I) {
I.eraseFromParent();
return true;
}
+ // Handle bitmask case: when X dimension evenly splits into waves
+ // mbcnt.lo(~0, 0) = workitem.id.x() & (wave_size - 1)
+ if (ST.hasWavefrontsEvenlySplittingXDim(*F, /*RequiresUniformYZ=*/true)) {
+ if (Wave != 0 && isPowerOf2_32(Wave)) {
+ IRBuilder<> B(&I);
+ CallInst *Tid = B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {});
+ ST.makeLIDRangeMetadata(Tid);
+ IntegerType *ITy = cast<IntegerType>(Tid->getType());
+ Constant *Mask = ConstantInt::get(ITy, Wave - 1);
+ Instruction *AndInst = cast<Instruction>(B.CreateAnd(Tid, Mask));
+ AndInst->takeName(&I);
+ // Note: Range metadata cannot be applied to 'and' instructions
+ I.replaceAllUsesWith(AndInst);
+ I.eraseFromParent();
+ return true;
+ }
+ }
}
return false;
diff --git a/llvm/test/Transforms/AMDGPU/mbcnt-lo-to-bitmask.ll b/llvm/test/Transforms/AMDGPU/mbcnt-lo-to-bitmask.ll
new file mode 100644
index 0000000000000..f84217705818e
--- /dev/null
+++ b/llvm/test/Transforms/AMDGPU/mbcnt-lo-to-bitmask.ll
@@ -0,0 +1,44 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -mcpu=gfx1030 -passes=amdgpu-codegenprepare < %s | FileCheck %s
+
+; Test that mbcnt.lo(~0, 0) is optimized to workitem.id.x() & 0x1f on wave32
+; when work group size is multiple of wave size (64 = 2 * 32)
+define i32 @test_mbcnt_lo_wave32_bitmask() !reqd_work_group_size !0 {
+; CHECK-LABEL: define i32 @test_mbcnt_lo_wave32_bitmask(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] !reqd_work_group_size [[META0:![0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[TMP0:%.*]] = call {{.*}} i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[TMP0]], 31
+; CHECK-NEXT: ret i32 [[TMP1]]
+;
+entry:
+ %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+ ret i32 %a
+}
+
+; Test with X dimension = 128 (4 * 32 waves)
+define i32 @test_mbcnt_lo_wave32_bitmask_128() !reqd_work_group_size !1 {
+; CHECK-LABEL: define i32 @test_mbcnt_lo_wave32_bitmask_128(
+; CHECK-SAME: ) #[[ATTR0]] !reqd_work_group_size [[META1:![0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[TMP0:%.*]] = call {{.*}} i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[TMP0]], 31
+; CHECK-NEXT: ret i32 [[TMP1]]
+;
+entry:
+ %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+ ret i32 %a
+}
+
+!0 = !{i32 64, i32 1, i32 1} ; 64 = 2 * 32 wave size
+!1 = !{i32 128, i32 1, i32 1} ; 128 = 4 * 32 wave size
+
+; Function Attrs: nounwind readnone speculatable willreturn
+declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32) #0
+declare i32 @llvm.amdgcn.workitem.id.x() #0
+
+attributes #0 = { nounwind readnone speculatable willreturn }
+;.
+; CHECK: [[META0]] = !{i32 64, i32 1, i32 1}
+; CHECK: [[META1]] = !{i32 128, i32 1, i32 1}
+;.
\ No newline at end of file
diff --git a/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-wave32-neg.ll b/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-wave32-neg.ll
index 403ea7c361250..30514562556ea 100644
--- a/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-wave32-neg.ll
+++ b/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-wave32-neg.ll
@@ -1,12 +1,13 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -mcpu=gfx1030 -passes=amdgpu-codegenprepare < %s | FileCheck %s
-; Test that mbcnt.lo(~0, 0) is NOT optimized on wave32 when work group size doesn't match wave size
+; Test that mbcnt.lo(~0, 0) IS optimized on wave32 with bitmask when work group size allows even wave distribution
define i32 @test_mbcnt_lo_wave32_non_matching_wgs() !reqd_work_group_size !0 {
; CHECK-LABEL: define i32 @test_mbcnt_lo_wave32_non_matching_wgs(
; CHECK-SAME: ) #[[ATTR0:[0-9]+]] !reqd_work_group_size [[META0:![0-9]+]] {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+; CHECK-NEXT: [[TMP0:%.*]] = call {{.*}} i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT: [[A:%.*]] = and i32 [[TMP0]], 31
; CHECK-NEXT: ret i32 [[A]]
;
entry:
>From 97acb93fc1efe02f45dc913d8e5f33d6d2d07ee8 Mon Sep 17 00:00:00 2001
From: TejaX-Alaghari <Teja.Alaghari at amd.com>
Date: Tue, 30 Sep 2025 14:57:34 +0530
Subject: [PATCH 3/5] Fix mbcnt.hi wave32 optimization to respect
reqd_work_group_size metadata
The wave32 optimization for mbcnt.hi was unconditionally optimizing the
instruction on wave32 targets, which caused CodeGen test failures because
the tests expected to see the mbcnt.hi instruction in the output.
This change makes the wave32 mbcnt.hi optimization conditional on the same
metadata requirements as the mbcnt.lo optimization, ensuring consistency
and preserving existing test behavior for functions without the appropriate
metadata.
---
.../Target/AMDGPU/AMDGPUCodeGenPrepare.cpp | 23 ++++++++++++++++---
1 file changed, 20 insertions(+), 3 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
index 07812549abbdd..87487310b67d0 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
@@ -2178,10 +2178,27 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntLo(IntrinsicInst &I) {
bool AMDGPUCodeGenPrepareImpl::visitMbcntHi(IntrinsicInst &I) {
// exec_hi is all 0, so this is just a copy on wave32.
+ // However, only optimize if we have the same conditions as mbcnt.lo.
if (ST.isWave32()) {
- I.replaceAllUsesWith(I.getArgOperand(1));
- I.eraseFromParent();
- return true;
+ Function *F = I.getFunction();
+ if (!F)
+ return false;
+
+ unsigned Wave = 0;
+ if (ST.isWaveSizeKnown())
+ Wave = ST.getWavefrontSize();
+
+ if (auto MaybeX = ST.getReqdWorkGroupSize(*F, 0)) {
+ unsigned XLen = *MaybeX;
+ if (Wave == 0 && XLen == 32)
+ Wave = XLen;
+
+ if (Wave != 0 && XLen == Wave) {
+ I.replaceAllUsesWith(I.getArgOperand(1));
+ I.eraseFromParent();
+ return true;
+ }
+ }
}
// Pattern: mbcnt.hi(~0, mbcnt.lo(~0, 0))
>From 32d533ee7bfcd8338a79bb867badc1c630d7e9b3 Mon Sep 17 00:00:00 2001
From: TejaX-Alaghari <Teja.Alaghari at amd.com>
Date: Fri, 3 Oct 2025 11:04:32 +0530
Subject: [PATCH 4/5] Address review feedback: Add periods to all comments
As requested by @cdevadas, all comments in the mbcnt optimization
functions now end with a period for consistency with LLVM style.
Note: The use of 'auto' keyword was defended by @krzysz00 as these
appear next to dyn_cast<> which already includes the type information,
following common LLVM patterns.
---
.../lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp | 18 +++++++++---------
1 file changed, 9 insertions(+), 9 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
index 87487310b67d0..ae15faeb3aa9e 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
@@ -2120,17 +2120,17 @@ INITIALIZE_PASS_END(AMDGPUCodeGenPrepare, DEBUG_TYPE, "AMDGPU IR optimizations",
false, false)
bool AMDGPUCodeGenPrepareImpl::visitMbcntLo(IntrinsicInst &I) {
- // On wave32 targets, mbcnt.lo(~0, 0) can be replaced with workitem.id.x
+ // On wave32 targets, mbcnt.lo(~0, 0) can be replaced with workitem.id.x.
if (!ST.isWave32())
return false;
- // Check for pattern mbcnt.lo(~0, 0)
+ // Check for pattern mbcnt.lo(~0, 0).
auto *Arg0C = dyn_cast<ConstantInt>(I.getArgOperand(0));
auto *Arg1C = dyn_cast<ConstantInt>(I.getArgOperand(1));
if (!Arg0C || !Arg1C || !Arg0C->isAllOnesValue() || !Arg1C->isZero())
return false;
- // Check reqd_work_group_size similar to mbcnt_hi case
+ // Check reqd_work_group_size similar to mbcnt_hi case.
Function *F = I.getFunction();
if (!F)
return false;
@@ -2154,8 +2154,8 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntLo(IntrinsicInst &I) {
I.eraseFromParent();
return true;
}
- // Handle bitmask case: when X dimension evenly splits into waves
- // mbcnt.lo(~0, 0) = workitem.id.x() & (wave_size - 1)
+ // Handle bitmask case: when X dimension evenly splits into waves.
+ // mbcnt.lo(~0, 0) = workitem.id.x() & (wave_size - 1).
if (ST.hasWavefrontsEvenlySplittingXDim(*F, /*RequiresUniformYZ=*/true)) {
if (Wave != 0 && isPowerOf2_32(Wave)) {
IRBuilder<> B(&I);
@@ -2165,7 +2165,7 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntLo(IntrinsicInst &I) {
Constant *Mask = ConstantInt::get(ITy, Wave - 1);
Instruction *AndInst = cast<Instruction>(B.CreateAnd(Tid, Mask));
AndInst->takeName(&I);
- // Note: Range metadata cannot be applied to 'and' instructions
+ // Note: Range metadata cannot be applied to 'and' instructions.
I.replaceAllUsesWith(AndInst);
I.eraseFromParent();
return true;
@@ -2201,7 +2201,7 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntHi(IntrinsicInst &I) {
}
}
- // Pattern: mbcnt.hi(~0, mbcnt.lo(~0, 0))
+ // Pattern: mbcnt.hi(~0, mbcnt.lo(~0, 0)).
auto *HiArg1 = dyn_cast<CallInst>(I.getArgOperand(1));
if (!HiArg1)
return false;
@@ -2210,12 +2210,12 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntHi(IntrinsicInst &I) {
if (!CalledF || CalledF->getIntrinsicID() != Intrinsic::amdgcn_mbcnt_lo)
return false;
- // hi arg0 must be all-ones
+ // hi arg0 must be all-ones.
auto *HiArg0C = dyn_cast<ConstantInt>(I.getArgOperand(0));
if (!HiArg0C || !HiArg0C->isAllOnesValue())
return false;
- // lo args: arg0 == ~0, arg1 == 0
+ // lo args: arg0 == ~0, arg1 == 0.
Value *Lo0 = HiArg1->getArgOperand(0);
Value *Lo1 = HiArg1->getArgOperand(1);
auto *Lo0C = dyn_cast<ConstantInt>(Lo0);
>From 8fc6c7e1fa8ca2bc1c94f50a86b4ab2bf4a1bf68 Mon Sep 17 00:00:00 2001
From: TejaX-Alaghari <Teja.Alaghari at amd.com>
Date: Fri, 3 Oct 2025 12:46:35 +0530
Subject: [PATCH 5/5] Address review comments from @arsenm
- Make visitMbcnt* functions const
- Use class member F instead of getFunction() calls
- Use auto* for pointer types and remove unnecessary null checks
- Remove range metadata from AND instruction (only apply to intrinsic calls)
- Simplify wave size logic by checking isWaveSizeKnown() upfront
- Use direct metadata access with F.getMetadata() instead of F->getMetadata()
- Add -mcpu=gfx906 to both test RUN lines to enable wave64 detection
- Update CHECK expectations to account for function attributes added
when specifying a CPU target
All changes maintain the same optimization functionality while improving
code quality and following LLVM coding conventions.
---
.../Target/AMDGPU/AMDGPUCodeGenPrepare.cpp | 81 ++++++++-----------
.../AMDGPU/mbcnt-to-bitmask-posit.ll | 4 +-
.../AMDGPU/mbcnt-to-workitem-posit.ll | 4 +-
3 files changed, 37 insertions(+), 52 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
index ae15faeb3aa9e..39ae5d38ec70b 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
@@ -2130,21 +2130,16 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntLo(IntrinsicInst &I) {
if (!Arg0C || !Arg1C || !Arg0C->isAllOnesValue() || !Arg1C->isZero())
return false;
- // Check reqd_work_group_size similar to mbcnt_hi case.
- Function *F = I.getFunction();
- if (!F)
+ // Abort if wave size is not known.
+ if (!ST.isWaveSizeKnown())
return false;
- unsigned Wave = 0;
- if (ST.isWaveSizeKnown())
- Wave = ST.getWavefrontSize();
+ unsigned Wave = ST.getWavefrontSize();
- if (auto MaybeX = ST.getReqdWorkGroupSize(*F, 0)) {
+ if (auto MaybeX = ST.getReqdWorkGroupSize(F, 0)) {
unsigned XLen = *MaybeX;
- if (Wave == 0 && XLen == 32)
- Wave = XLen;
- if (Wave != 0 && XLen == Wave) {
+ if (XLen == Wave) {
IRBuilder<> B(&I);
CallInst *NewCall =
B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {});
@@ -2156,8 +2151,8 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntLo(IntrinsicInst &I) {
}
// Handle bitmask case: when X dimension evenly splits into waves.
// mbcnt.lo(~0, 0) = workitem.id.x() & (wave_size - 1).
- if (ST.hasWavefrontsEvenlySplittingXDim(*F, /*RequiresUniformYZ=*/true)) {
- if (Wave != 0 && isPowerOf2_32(Wave)) {
+ if (ST.hasWavefrontsEvenlySplittingXDim(F, /*RequiresUniformYZ=*/true)) {
+ if (isPowerOf2_32(Wave)) {
IRBuilder<> B(&I);
CallInst *Tid = B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {});
ST.makeLIDRangeMetadata(Tid);
@@ -2180,20 +2175,16 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntHi(IntrinsicInst &I) {
// exec_hi is all 0, so this is just a copy on wave32.
// However, only optimize if we have the same conditions as mbcnt.lo.
if (ST.isWave32()) {
- Function *F = I.getFunction();
- if (!F)
+ // Abort if wave size is not known.
+ if (!ST.isWaveSizeKnown())
return false;
- unsigned Wave = 0;
- if (ST.isWaveSizeKnown())
- Wave = ST.getWavefrontSize();
+ unsigned Wave = ST.getWavefrontSize();
- if (auto MaybeX = ST.getReqdWorkGroupSize(*F, 0)) {
+ if (auto MaybeX = ST.getReqdWorkGroupSize(F, 0)) {
unsigned XLen = *MaybeX;
- if (Wave == 0 && XLen == 32)
- Wave = XLen;
- if (Wave != 0 && XLen == Wave) {
+ if (XLen == Wave) {
I.replaceAllUsesWith(I.getArgOperand(1));
I.eraseFromParent();
return true;
@@ -2225,20 +2216,16 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntHi(IntrinsicInst &I) {
// Query reqd_work_group_size via subtarget helper and compare X to wave
// size conservatively.
- Function *F = I.getFunction();
- if (!F)
+ // Abort if wave size is not known.
+ if (!ST.isWaveSizeKnown())
return false;
- unsigned Wave = 0;
- if (ST.isWaveSizeKnown())
- Wave = ST.getWavefrontSize();
+ unsigned Wave = ST.getWavefrontSize();
- if (auto MaybeX = ST.getReqdWorkGroupSize(*F, 0)) {
+ if (auto MaybeX = ST.getReqdWorkGroupSize(F, 0)) {
unsigned XLen = *MaybeX;
- if (Wave == 0 && (XLen == 32 || XLen == 64))
- Wave = XLen; // allow common sizes under test harness
- if (Wave != 0 && XLen == Wave) {
+ if (XLen == Wave) {
IRBuilder<> B(&I);
CallInst *NewCall =
B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {});
@@ -2253,18 +2240,17 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntHi(IntrinsicInst &I) {
// replace lane-id computation with a bitmask when the wave is a
// power-of-two. Use the Subtarget helper to conservatively decide
// when per-wave tiling is preserved.
- if (ST.hasWavefrontsEvenlySplittingXDim(*F, /*RequiresUniformYZ=*/true)) {
- if (Wave != 0 && isPowerOf2_32(Wave)) {
+ if (ST.hasWavefrontsEvenlySplittingXDim(F, /*RequiresUniformYZ=*/true)) {
+ if (isPowerOf2_32(Wave)) {
// Construct: tid = workitem.id.x(); mask = Wave-1; res = tid & mask
IRBuilder<> B(&I);
CallInst *Tid = B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {});
- Tid->takeName(&I);
+ ST.makeLIDRangeMetadata(Tid);
IntegerType *ITy = cast<IntegerType>(Tid->getType());
Constant *Mask = ConstantInt::get(ITy, Wave - 1);
Instruction *AndInst = cast<Instruction>(B.CreateAnd(Tid, Mask));
AndInst->takeName(&I);
- // Attach range metadata for the result if possible.
- ST.makeLIDRangeMetadata(AndInst);
+ // Note: Range metadata cannot be applied to 'and' instructions.
I.replaceAllUsesWith(AndInst);
I.eraseFromParent();
return true;
@@ -2274,19 +2260,18 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntHi(IntrinsicInst &I) {
// No reqd_work_group_size metadata: be conservative and only handle the
// common test harness cases where reqd_work_group_size metadata exists
// and equals 32/64.
- if (auto *Node = F->getMetadata("reqd_work_group_size")) {
- if (Node->getNumOperands() == 3) {
- unsigned XLen =
- mdconst::extract<ConstantInt>(Node->getOperand(0))->getZExtValue();
- if (XLen == 32 || XLen == 64) {
- IRBuilder<> B(&I);
- CallInst *NewCall =
- B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {});
- NewCall->takeName(&I);
- I.replaceAllUsesWith(NewCall);
- I.eraseFromParent();
- return true;
- }
+ const MDNode *Node = F.getMetadata("reqd_work_group_size");
+ if (Node && Node->getNumOperands() == 3) {
+ unsigned XLen =
+ mdconst::extract<ConstantInt>(Node->getOperand(0))->getZExtValue();
+ if (XLen == Wave) {
+ IRBuilder<> B(&I);
+ CallInst *NewCall =
+ B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {});
+ NewCall->takeName(&I);
+ I.replaceAllUsesWith(NewCall);
+ I.eraseFromParent();
+ return true;
}
}
}
diff --git a/llvm/test/Transforms/AMDGPU/mbcnt-to-bitmask-posit.ll b/llvm/test/Transforms/AMDGPU/mbcnt-to-bitmask-posit.ll
index 97697d4d9651f..ad3993bbc30c1 100644
--- a/llvm/test/Transforms/AMDGPU/mbcnt-to-bitmask-posit.ll
+++ b/llvm/test/Transforms/AMDGPU/mbcnt-to-bitmask-posit.ll
@@ -1,9 +1,9 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
-; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -passes=amdgpu-codegenprepare < %s | FileCheck %s
+; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -mcpu=gfx906 -passes=amdgpu-codegenprepare < %s | FileCheck %s
define i32 @test_mbcnt_wave64_to_workitem() !reqd_work_group_size !1 {
; CHECK-LABEL: define i32 @test_mbcnt_wave64_to_workitem(
-; CHECK-SAME: ) !reqd_work_group_size [[META0:![0-9]+]] {
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] !reqd_work_group_size [[META0:![0-9]+]] {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
; CHECK-NEXT: [[B:%.*]] = call range(i32 0, 64) i32 @llvm.amdgcn.workitem.id.x()
diff --git a/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-posit.ll b/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-posit.ll
index 91aa942df9337..fe049e1627409 100644
--- a/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-posit.ll
+++ b/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-posit.ll
@@ -1,9 +1,9 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
-; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -passes=amdgpu-codegenprepare < %s | FileCheck %s
+; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -mcpu=gfx906 -passes=amdgpu-codegenprepare < %s | FileCheck %s
define i32 @test_mbcnt_to_workitem() !reqd_work_group_size !0 {
; CHECK-LABEL: define i32 @test_mbcnt_to_workitem(
-; CHECK-SAME: ) !reqd_work_group_size [[META0:![0-9]+]] {
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] !reqd_work_group_size [[META0:![0-9]+]] {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
; CHECK-NEXT: [[B:%.*]] = call range(i32 0, 64) i32 @llvm.amdgcn.workitem.id.x()
More information about the llvm-commits
mailing list