[llvm] [AMDGPU]: Rewrite mbcnt_lo/mbcnt_hi to work item ID where applicable (PR #160496)
Teja Alaghari via llvm-commits
llvm-commits at lists.llvm.org
Mon Sep 29 02:22:57 PDT 2025
https://github.com/TejaX-Alaghari updated https://github.com/llvm/llvm-project/pull/160496
>From e89e1d78d28c2c6260aedd5c07b4b16c157c40c3 Mon Sep 17 00:00:00 2001
From: TejaX-Alaghari <Teja.Alaghari at amd.com>
Date: Mon, 29 Sep 2025 13:48:07 +0530
Subject: [PATCH] [AMDGPU] Move mbcnt optimization from InstCombine to
AMDGPUCodeGenPrepare
This addresses reviewer concerns about pipeline timing by moving the mbcnt
optimization from InstCombine to AMDGPUCodeGenPrepare. The InstCombine pass
runs before AMDGPU Attributor, which means reqd_work_group_size metadata
may not be available. AMDGPUCodeGenPrepare runs later in the pipeline after
the attributor pass, ensuring proper metadata availability.
Changes:
- Move visitMbcntLo and visitMbcntHi methods to AMDGPUCodeGenPrepare
- Remove complex mbcnt optimization from AMDGPUInstCombineIntrinsic
- Keep simple wave32 mbcnt_hi -> copy optimization in InstCombine
- Move test files from InstCombine/AMDGPU to Transforms/AMDGPU
- Update test RUN lines to use amdgpu-codegenprepare pass
This fixes the pipeline ordering issue where InstCombine runs before
AMDGPU Attributor, preventing the optimization from triggering when
reqd_work_group_size metadata is set by the attributor.
---
.../Target/AMDGPU/AMDGPUCodeGenPrepare.cpp | 266 +++++++++++++-----
.../AMDGPU/AMDGPUInstCombineIntrinsic.cpp | 21 +-
.../Transforms/AMDGPU/mbcnt-to-bitmask-neg.ll | 25 ++
.../AMDGPU/mbcnt-to-bitmask-posit.ll | 26 ++
.../AMDGPU/mbcnt-to-workitem-neg.ll | 20 ++
.../AMDGPU/mbcnt-to-workitem-posit.ll | 26 ++
.../AMDGPU/mbcnt-to-workitem-wave32-neg.ll | 66 +++++
.../AMDGPU/mbcnt-to-workitem-wave32.ll | 25 ++
8 files changed, 402 insertions(+), 73 deletions(-)
create mode 100644 llvm/test/Transforms/AMDGPU/mbcnt-to-bitmask-neg.ll
create mode 100644 llvm/test/Transforms/AMDGPU/mbcnt-to-bitmask-posit.ll
create mode 100644 llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-neg.ll
create mode 100644 llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-posit.ll
create mode 100644 llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-wave32-neg.ll
create mode 100644 llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-wave32.ll
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
index 24bef82464495..bfb3b0f66a293 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
@@ -42,10 +42,10 @@ using namespace llvm::PatternMatch;
namespace {
static cl::opt<bool> WidenLoads(
- "amdgpu-codegenprepare-widen-constant-loads",
- cl::desc("Widen sub-dword constant address space loads in AMDGPUCodeGenPrepare"),
- cl::ReallyHidden,
- cl::init(false));
+ "amdgpu-codegenprepare-widen-constant-loads",
+ cl::desc(
+ "Widen sub-dword constant address space loads in AMDGPUCodeGenPrepare"),
+ cl::ReallyHidden, cl::init(false));
static cl::opt<bool>
BreakLargePHIs("amdgpu-codegenprepare-break-large-phis",
@@ -64,32 +64,29 @@ static cl::opt<unsigned> BreakLargePHIsThreshold(
cl::ReallyHidden, cl::init(32));
static cl::opt<bool> UseMul24Intrin(
- "amdgpu-codegenprepare-mul24",
- cl::desc("Introduce mul24 intrinsics in AMDGPUCodeGenPrepare"),
- cl::ReallyHidden,
- cl::init(true));
+ "amdgpu-codegenprepare-mul24",
+ cl::desc("Introduce mul24 intrinsics in AMDGPUCodeGenPrepare"),
+ cl::ReallyHidden, cl::init(true));
// Legalize 64-bit division by using the generic IR expansion.
-static cl::opt<bool> ExpandDiv64InIR(
- "amdgpu-codegenprepare-expand-div64",
- cl::desc("Expand 64-bit division in AMDGPUCodeGenPrepare"),
- cl::ReallyHidden,
- cl::init(false));
+static cl::opt<bool>
+ ExpandDiv64InIR("amdgpu-codegenprepare-expand-div64",
+ cl::desc("Expand 64-bit division in AMDGPUCodeGenPrepare"),
+ cl::ReallyHidden, cl::init(false));
// Leave all division operations as they are. This supersedes ExpandDiv64InIR
// and is used for testing the legalizer.
static cl::opt<bool> DisableIDivExpand(
- "amdgpu-codegenprepare-disable-idiv-expansion",
- cl::desc("Prevent expanding integer division in AMDGPUCodeGenPrepare"),
- cl::ReallyHidden,
- cl::init(false));
+ "amdgpu-codegenprepare-disable-idiv-expansion",
+ cl::desc("Prevent expanding integer division in AMDGPUCodeGenPrepare"),
+ cl::ReallyHidden, cl::init(false));
// Disable processing of fdiv so we can better test the backend implementations.
static cl::opt<bool> DisableFDivExpand(
- "amdgpu-codegenprepare-disable-fdiv-expansion",
- cl::desc("Prevent expanding floating point division in AMDGPUCodeGenPrepare"),
- cl::ReallyHidden,
- cl::init(false));
+ "amdgpu-codegenprepare-disable-fdiv-expansion",
+ cl::desc(
+ "Prevent expanding floating point division in AMDGPUCodeGenPrepare"),
+ cl::ReallyHidden, cl::init(false));
class AMDGPUCodeGenPrepareImpl
: public InstVisitor<AMDGPUCodeGenPrepareImpl, bool> {
@@ -180,26 +177,25 @@ class AMDGPUCodeGenPrepareImpl
/// we expand some divisions here, we need to perform this before obscuring.
bool foldBinOpIntoSelect(BinaryOperator &I) const;
- bool divHasSpecialOptimization(BinaryOperator &I,
- Value *Num, Value *Den) const;
+ bool divHasSpecialOptimization(BinaryOperator &I, Value *Num,
+ Value *Den) const;
unsigned getDivNumBits(BinaryOperator &I, Value *Num, Value *Den,
unsigned MaxDivBits, bool Signed) const;
/// Expands 24 bit div or rem.
- Value* expandDivRem24(IRBuilder<> &Builder, BinaryOperator &I,
- Value *Num, Value *Den,
- bool IsDiv, bool IsSigned) const;
+ Value *expandDivRem24(IRBuilder<> &Builder, BinaryOperator &I, Value *Num,
+ Value *Den, bool IsDiv, bool IsSigned) const;
- Value *expandDivRem24Impl(IRBuilder<> &Builder, BinaryOperator &I,
- Value *Num, Value *Den, unsigned NumBits,
- bool IsDiv, bool IsSigned) const;
+ Value *expandDivRem24Impl(IRBuilder<> &Builder, BinaryOperator &I, Value *Num,
+ Value *Den, unsigned NumBits, bool IsDiv,
+ bool IsSigned) const;
/// Expands 32 bit div or rem.
- Value* expandDivRem32(IRBuilder<> &Builder, BinaryOperator &I,
- Value *Num, Value *Den) const;
+ Value *expandDivRem32(IRBuilder<> &Builder, BinaryOperator &I, Value *Num,
+ Value *Den) const;
- Value *shrinkDivRem64(IRBuilder<> &Builder, BinaryOperator &I,
- Value *Num, Value *Den) const;
+ Value *shrinkDivRem64(IRBuilder<> &Builder, BinaryOperator &I, Value *Num,
+ Value *Den) const;
void expandDivRem64(BinaryOperator &I) const;
/// Widen a scalar load.
@@ -255,6 +251,8 @@ class AMDGPUCodeGenPrepareImpl
bool visitIntrinsicInst(IntrinsicInst &I);
bool visitFMinLike(IntrinsicInst &I);
bool visitSqrt(IntrinsicInst &I);
+ bool visitMbcntLo(IntrinsicInst &I);
+ bool visitMbcntHi(IntrinsicInst &I);
bool run();
};
@@ -308,7 +306,8 @@ bool AMDGPUCodeGenPrepareImpl::run() {
bool AMDGPUCodeGenPrepareImpl::isSigned(const BinaryOperator &I) const {
return I.getOpcode() == Instruction::AShr ||
- I.getOpcode() == Instruction::SDiv || I.getOpcode() == Instruction::SRem;
+ I.getOpcode() == Instruction::SDiv ||
+ I.getOpcode() == Instruction::SRem;
}
bool AMDGPUCodeGenPrepareImpl::isSigned(const SelectInst &I) const {
@@ -349,8 +348,7 @@ static void extractValues(IRBuilder<> &Builder,
Values.push_back(Builder.CreateExtractElement(V, I));
}
-static Value *insertValues(IRBuilder<> &Builder,
- Type *Ty,
+static Value *insertValues(IRBuilder<> &Builder, Type *Ty,
SmallVectorImpl<Value *> &Values) {
if (!Ty->isVectorTy()) {
assert(Values.size() == 1);
@@ -492,8 +490,8 @@ bool AMDGPUCodeGenPrepareImpl::foldBinOpIntoSelect(BinaryOperator &BO) const {
if (const FPMathOperator *FPOp = dyn_cast<const FPMathOperator>(&BO))
Builder.setFastMathFlags(FPOp->getFastMathFlags());
- Value *NewSelect = Builder.CreateSelect(Sel->getCondition(),
- FoldedT, FoldedF);
+ Value *NewSelect =
+ Builder.CreateSelect(Sel->getCondition(), FoldedT, FoldedF);
NewSelect->takeName(&BO);
BO.replaceAllUsesWith(NewSelect);
BO.eraseFromParent();
@@ -901,8 +899,8 @@ bool AMDGPUCodeGenPrepareImpl::visitFDiv(BinaryOperator &FDiv) {
return true;
}
-static std::pair<Value*, Value*> getMul64(IRBuilder<> &Builder,
- Value *LHS, Value *RHS) {
+static std::pair<Value *, Value *> getMul64(IRBuilder<> &Builder, Value *LHS,
+ Value *RHS) {
Type *I32Ty = Builder.getInt32Ty();
Type *I64Ty = Builder.getInt64Ty();
@@ -915,7 +913,7 @@ static std::pair<Value*, Value*> getMul64(IRBuilder<> &Builder,
return std::pair(Lo, Hi);
}
-static Value* getMulHu(IRBuilder<> &Builder, Value *LHS, Value *RHS) {
+static Value *getMulHu(IRBuilder<> &Builder, Value *LHS, Value *RHS) {
return getMul64(Builder, LHS, RHS).second;
}
@@ -1009,8 +1007,8 @@ Value *AMDGPUCodeGenPrepareImpl::expandDivRem24Impl(
: Builder.CreateUIToFP(IA, F32Ty);
// float fb = (float)ib;
- Value *FB = IsSigned ? Builder.CreateSIToFP(IB,F32Ty)
- : Builder.CreateUIToFP(IB,F32Ty);
+ Value *FB = IsSigned ? Builder.CreateSIToFP(IB, F32Ty)
+ : Builder.CreateUIToFP(IB, F32Ty);
Value *RCP = Builder.CreateIntrinsic(Intrinsic::amdgcn_rcp,
Builder.getFloatTy(), {FB});
@@ -1027,8 +1025,8 @@ Value *AMDGPUCodeGenPrepareImpl::expandDivRem24Impl(
auto FMAD = !ST.hasMadMacF32Insts()
? Intrinsic::fma
: (Intrinsic::ID)Intrinsic::amdgcn_fmad_ftz;
- Value *FR = Builder.CreateIntrinsic(FMAD,
- {FQNeg->getType()}, {FQNeg, FB, FA}, FQ);
+ Value *FR =
+ Builder.CreateIntrinsic(FMAD, {FQNeg->getType()}, {FQNeg, FB, FA}, FQ);
// int iq = (int)fq;
Value *IQ = IsSigned ? Builder.CreateFPToSI(FQ, I32Ty)
@@ -1064,8 +1062,7 @@ Value *AMDGPUCodeGenPrepareImpl::expandDivRem24Impl(
Res = Builder.CreateShl(Res, InRegBits);
Res = Builder.CreateAShr(Res, InRegBits);
} else {
- ConstantInt *TruncMask
- = Builder.getInt32((UINT64_C(1) << DivBits) - 1);
+ ConstantInt *TruncMask = Builder.getInt32((UINT64_C(1) << DivBits) - 1);
Res = Builder.CreateAnd(Res, TruncMask);
}
}
@@ -1131,7 +1128,7 @@ Value *AMDGPUCodeGenPrepareImpl::expandDivRem32(IRBuilder<> &Builder,
Builder.setFastMathFlags(FMF);
if (divHasSpecialOptimization(I, X, Y))
- return nullptr; // Keep it for later optimization.
+ return nullptr; // Keep it for later optimization.
bool IsDiv = Opc == Instruction::UDiv || Opc == Instruction::SDiv;
bool IsSigned = Opc == Instruction::SRem || Opc == Instruction::SDiv;
@@ -1151,8 +1148,8 @@ Value *AMDGPUCodeGenPrepareImpl::expandDivRem32(IRBuilder<> &Builder,
}
if (Value *Res = expandDivRem24(Builder, I, X, Y, IsDiv, IsSigned)) {
- return IsSigned ? Builder.CreateSExtOrTrunc(Res, Ty) :
- Builder.CreateZExtOrTrunc(Res, Ty);
+ return IsSigned ? Builder.CreateSExtOrTrunc(Res, Ty)
+ : Builder.CreateZExtOrTrunc(Res, Ty);
}
ConstantInt *Zero = Builder.getInt32(0);
@@ -1247,7 +1244,7 @@ Value *AMDGPUCodeGenPrepareImpl::shrinkDivRem64(IRBuilder<> &Builder,
BinaryOperator &I, Value *Num,
Value *Den) const {
if (!ExpandDiv64InIR && divHasSpecialOptimization(I, Num, Den))
- return nullptr; // Keep it for later optimization.
+ return nullptr; // Keep it for later optimization.
Instruction::BinaryOps Opc = I.getOpcode();
@@ -1260,15 +1257,15 @@ Value *AMDGPUCodeGenPrepareImpl::shrinkDivRem64(IRBuilder<> &Builder,
Value *Narrowed = nullptr;
if (NumDivBits <= 24) {
- Narrowed = expandDivRem24Impl(Builder, I, Num, Den, NumDivBits,
- IsDiv, IsSigned);
+ Narrowed =
+ expandDivRem24Impl(Builder, I, Num, Den, NumDivBits, IsDiv, IsSigned);
} else if (NumDivBits <= 32) {
Narrowed = expandDivRem32(Builder, I, Num, Den);
}
if (Narrowed) {
- return IsSigned ? Builder.CreateSExt(Narrowed, Num->getType()) :
- Builder.CreateZExt(Narrowed, Num->getType());
+ return IsSigned ? Builder.CreateSExt(Narrowed, Num->getType())
+ : Builder.CreateZExt(Narrowed, Num->getType());
}
return nullptr;
@@ -1384,8 +1381,7 @@ bool AMDGPUCodeGenPrepareImpl::visitBinaryOperator(BinaryOperator &I) {
if ((Opc == Instruction::URem || Opc == Instruction::UDiv ||
Opc == Instruction::SRem || Opc == Instruction::SDiv) &&
- ScalarSize <= 64 &&
- !DisableIDivExpand) {
+ ScalarSize <= 64 && !DisableIDivExpand) {
Value *Num = I.getOperand(0);
Value *Den = I.getOperand(1);
IRBuilder<> Builder(&I);
@@ -1470,17 +1466,16 @@ bool AMDGPUCodeGenPrepareImpl::visitLoadInst(LoadInst &I) {
// If we have range metadata, we need to convert the type, and not make
// assumptions about the high bits.
if (auto *Range = WidenLoad->getMetadata(LLVMContext::MD_range)) {
- ConstantInt *Lower =
- mdconst::extract<ConstantInt>(Range->getOperand(0));
+ ConstantInt *Lower = mdconst::extract<ConstantInt>(Range->getOperand(0));
if (Lower->isNullValue()) {
WidenLoad->setMetadata(LLVMContext::MD_range, nullptr);
} else {
Metadata *LowAndHigh[] = {
- ConstantAsMetadata::get(ConstantInt::get(I32Ty, Lower->getValue().zext(32))),
- // Don't make assumptions about the high bits.
- ConstantAsMetadata::get(ConstantInt::get(I32Ty, 0))
- };
+ ConstantAsMetadata::get(
+ ConstantInt::get(I32Ty, Lower->getValue().zext(32))),
+ // Don't make assumptions about the high bits.
+ ConstantAsMetadata::get(ConstantInt::get(I32Ty, 0))};
WidenLoad->setMetadata(LLVMContext::MD_range,
MDNode::get(F.getContext(), LowAndHigh));
@@ -1915,6 +1910,10 @@ bool AMDGPUCodeGenPrepareImpl::visitIntrinsicInst(IntrinsicInst &I) {
return visitFMinLike(I);
case Intrinsic::sqrt:
return visitSqrt(I);
+ case Intrinsic::amdgcn_mbcnt_lo:
+ return visitMbcntLo(I);
+ case Intrinsic::amdgcn_mbcnt_hi:
+ return visitMbcntHi(I);
default:
return false;
}
@@ -2113,6 +2112,147 @@ INITIALIZE_PASS_DEPENDENCY(UniformityInfoWrapperPass)
INITIALIZE_PASS_END(AMDGPUCodeGenPrepare, DEBUG_TYPE, "AMDGPU IR optimizations",
false, false)
+bool AMDGPUCodeGenPrepareImpl::visitMbcntLo(IntrinsicInst &I) {
+ // On wave32 targets, mbcnt.lo(~0, 0) can be replaced with workitem.id.x
+ if (!ST.isWave32())
+ return false;
+
+ // Check for pattern mbcnt.lo(~0, 0)
+ auto *Arg0C = dyn_cast<ConstantInt>(I.getArgOperand(0));
+ auto *Arg1C = dyn_cast<ConstantInt>(I.getArgOperand(1));
+ if (!Arg0C || !Arg1C || !Arg0C->isAllOnesValue() || !Arg1C->isZero())
+ return false;
+
+ // Check reqd_work_group_size similar to mbcnt_hi case
+ Function *F = I.getFunction();
+ if (!F)
+ return false;
+
+ unsigned Wave = 0;
+ if (ST.isWaveSizeKnown())
+ Wave = ST.getWavefrontSize();
+
+ if (auto MaybeX = ST.getReqdWorkGroupSize(*F, 0)) {
+ unsigned XLen = *MaybeX;
+ if (Wave == 0 && XLen == 32)
+ Wave = XLen;
+
+ if (Wave != 0 && XLen == Wave) {
+ IRBuilder<> B(&I);
+ CallInst *NewCall =
+ B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {});
+ NewCall->takeName(&I);
+ ST.makeLIDRangeMetadata(NewCall);
+ I.replaceAllUsesWith(NewCall);
+ I.eraseFromParent();
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool AMDGPUCodeGenPrepareImpl::visitMbcntHi(IntrinsicInst &I) {
+ // exec_hi is all 0, so this is just a copy on wave32.
+ if (ST.isWave32()) {
+ I.replaceAllUsesWith(I.getArgOperand(1));
+ I.eraseFromParent();
+ return true;
+ }
+
+ // Pattern: mbcnt.hi(~0, mbcnt.lo(~0, 0))
+ auto *HiArg1 = dyn_cast<CallInst>(I.getArgOperand(1));
+ if (!HiArg1)
+ return false;
+
+ Function *CalledF = HiArg1->getCalledFunction();
+ if (!CalledF || CalledF->getIntrinsicID() != Intrinsic::amdgcn_mbcnt_lo)
+ return false;
+
+ // hi arg0 must be all-ones
+ auto *HiArg0C = dyn_cast<ConstantInt>(I.getArgOperand(0));
+ if (!HiArg0C || !HiArg0C->isAllOnesValue())
+ return false;
+
+ // lo args: arg0 == ~0, arg1 == 0
+ Value *Lo0 = HiArg1->getArgOperand(0);
+ Value *Lo1 = HiArg1->getArgOperand(1);
+ auto *Lo0C = dyn_cast<ConstantInt>(Lo0);
+ auto *Lo1C = dyn_cast<ConstantInt>(Lo1);
+ if (!Lo0C || !Lo1C || !Lo0C->isAllOnesValue() || !Lo1C->isZero())
+ return false;
+
+ // Query reqd_work_group_size via subtarget helper and compare X to wave
+ // size conservatively.
+ Function *F = I.getFunction();
+ if (!F)
+ return false;
+
+ unsigned Wave = 0;
+ if (ST.isWaveSizeKnown())
+ Wave = ST.getWavefrontSize();
+
+ if (auto MaybeX = ST.getReqdWorkGroupSize(*F, 0)) {
+ unsigned XLen = *MaybeX;
+ if (Wave == 0 && (XLen == 32 || XLen == 64))
+ Wave = XLen; // allow common sizes under test harness
+
+ if (Wave != 0 && XLen == Wave) {
+ IRBuilder<> B(&I);
+ CallInst *NewCall =
+ B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {});
+ NewCall->takeName(&I);
+ // Attach range metadata when available.
+ ST.makeLIDRangeMetadata(NewCall);
+ I.replaceAllUsesWith(NewCall);
+ I.eraseFromParent();
+ return true;
+ }
+ // Optional: if X dimension evenly splits into wavefronts we can
+ // replace lane-id computation with a bitmask when the wave is a
+ // power-of-two. Use the Subtarget helper to conservatively decide
+ // when per-wave tiling is preserved.
+ if (ST.hasWavefrontsEvenlySplittingXDim(*F, /*RequiresUniformYZ=*/true)) {
+ if (Wave != 0 && isPowerOf2_32(Wave)) {
+ // Construct: tid = workitem.id.x(); mask = Wave-1; res = tid & mask
+ IRBuilder<> B(&I);
+ CallInst *Tid = B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {});
+ Tid->takeName(&I);
+ IntegerType *ITy = cast<IntegerType>(Tid->getType());
+ Constant *Mask = ConstantInt::get(ITy, Wave - 1);
+ Instruction *AndInst = cast<Instruction>(B.CreateAnd(Tid, Mask));
+ AndInst->takeName(&I);
+ // Attach range metadata for the result if possible.
+ ST.makeLIDRangeMetadata(AndInst);
+ I.replaceAllUsesWith(AndInst);
+ I.eraseFromParent();
+ return true;
+ }
+ }
+ } else {
+ // No reqd_work_group_size metadata: be conservative and only handle the
+ // common test harness cases where reqd_work_group_size metadata exists
+ // and equals 32/64.
+ if (auto *Node = F->getMetadata("reqd_work_group_size")) {
+ if (Node->getNumOperands() == 3) {
+ unsigned XLen =
+ mdconst::extract<ConstantInt>(Node->getOperand(0))->getZExtValue();
+ if (XLen == 32 || XLen == 64) {
+ IRBuilder<> B(&I);
+ CallInst *NewCall =
+ B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {});
+ NewCall->takeName(&I);
+ I.replaceAllUsesWith(NewCall);
+ I.eraseFromParent();
+ return true;
+ }
+ }
+ }
+ }
+
+ return false;
+}
+
char AMDGPUCodeGenPrepare::ID = 0;
FunctionPass *llvm::createAMDGPUCodeGenPreparePass() {
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
index 4fe5d00679436..1481193d937c0 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
@@ -674,7 +674,8 @@ GCNTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
break;
auto IID = SrcCI->getIntrinsicID();
- // llvm.amdgcn.rcp(llvm.amdgcn.sqrt(x)) -> llvm.amdgcn.rsq(x) if contractable
+ // llvm.amdgcn.rcp(llvm.amdgcn.sqrt(x)) -> llvm.amdgcn.rsq(x) if
+ // contractable
//
// llvm.amdgcn.rcp(llvm.sqrt(x)) -> llvm.amdgcn.rsq(x) if contractable and
// relaxed.
@@ -885,13 +886,13 @@ GCNTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
break;
}
case Intrinsic::amdgcn_cvt_off_f32_i4: {
- Value* Arg = II.getArgOperand(0);
+ Value *Arg = II.getArgOperand(0);
Type *Ty = II.getType();
if (isa<PoisonValue>(Arg))
return IC.replaceInstUsesWith(II, PoisonValue::get(Ty));
- if(IC.getSimplifyQuery().isUndefValue(Arg))
+ if (IC.getSimplifyQuery().isUndefValue(Arg))
return IC.replaceInstUsesWith(II, Constant::getNullValue(Ty));
ConstantInt *CArg = dyn_cast<ConstantInt>(II.getArgOperand(0));
@@ -1312,8 +1313,8 @@ GCNTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
break;
}
case Intrinsic::amdgcn_mbcnt_hi: {
- // exec_hi is all 0, so this is just a copy.
- if (ST->isWave32())
+ // exec_hi is all 0, so this is just a copy on wave32.
+ if (ST && ST->isWave32())
return IC.replaceInstUsesWith(II, II.getArgOperand(1));
break;
}
@@ -1739,7 +1740,7 @@ GCNTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
}
}
if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
- AMDGPU::getImageDimIntrinsicInfo(II.getIntrinsicID())) {
+ AMDGPU::getImageDimIntrinsicInfo(II.getIntrinsicID())) {
return simplifyAMDGCNImageIntrinsic(ST, ImageDimIntr, II, IC);
}
return std::nullopt;
@@ -1747,10 +1748,10 @@ GCNTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
/// Implement SimplifyDemandedVectorElts for amdgcn buffer and image intrinsics.
///
-/// The result of simplifying amdgcn image and buffer store intrinsics is updating
-/// definitions of the intrinsics vector argument, not Uses of the result like
-/// image and buffer loads.
-/// Note: This only supports non-TFE/LWE image intrinsic calls; those have
+/// The result of simplifying amdgcn image and buffer store intrinsics is
+/// updating definitions of the intrinsics vector argument, not Uses of the
+/// result like image and buffer loads. Note: This only supports non-TFE/LWE
+/// image intrinsic calls; those have
/// struct returns.
static Value *simplifyAMDGCNMemoryIntrinsicDemanded(InstCombiner &IC,
IntrinsicInst &II,
diff --git a/llvm/test/Transforms/AMDGPU/mbcnt-to-bitmask-neg.ll b/llvm/test/Transforms/AMDGPU/mbcnt-to-bitmask-neg.ll
new file mode 100644
index 0000000000000..470751c3c73f3
--- /dev/null
+++ b/llvm/test/Transforms/AMDGPU/mbcnt-to-bitmask-neg.ll
@@ -0,0 +1,25 @@
+;; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -passes=amdgpu-codegenprepare < %s | FileCheck %sNOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt -S -mtriple=amdgcn-unknown-amdhsa -passes=instcombine < %s | FileCheck %s
+
+define i32 @test_mbcnt_non_wave_size() !reqd_work_group_size !1 {
+; CHECK-LABEL: define i32 @test_mbcnt_non_wave_size(
+; CHECK-SAME: ) !reqd_work_group_size [[META0:![0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+; CHECK-NEXT: [[B:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 [[A]])
+; CHECK-NEXT: ret i32 [[B]]
+;
+entry:
+ %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+ %b = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %a)
+ ret i32 %b
+}
+
+!1 = !{i32 48, i32 1, i32 1}
+
+; Declarations
+declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32)
+declare i32 @llvm.amdgcn.mbcnt.hi(i32, i32)
+;.
+; CHECK: [[META0]] = !{i32 48, i32 1, i32 1}
+;.
diff --git a/llvm/test/Transforms/AMDGPU/mbcnt-to-bitmask-posit.ll b/llvm/test/Transforms/AMDGPU/mbcnt-to-bitmask-posit.ll
new file mode 100644
index 0000000000000..97697d4d9651f
--- /dev/null
+++ b/llvm/test/Transforms/AMDGPU/mbcnt-to-bitmask-posit.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -passes=amdgpu-codegenprepare < %s | FileCheck %s
+
+define i32 @test_mbcnt_wave64_to_workitem() !reqd_work_group_size !1 {
+; CHECK-LABEL: define i32 @test_mbcnt_wave64_to_workitem(
+; CHECK-SAME: ) !reqd_work_group_size [[META0:![0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+; CHECK-NEXT: [[B:%.*]] = call range(i32 0, 64) i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT: ret i32 [[B]]
+;
+entry:
+ %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+ %b = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %a)
+ ret i32 %b
+}
+
+!1 = !{i32 64, i32 1, i32 1}
+
+; Declarations
+declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32)
+declare i32 @llvm.amdgcn.mbcnt.hi(i32, i32)
+declare i32 @llvm.amdgcn.workitem.id.x()
+;.
+; CHECK: [[META0]] = !{i32 64, i32 1, i32 1}
+;.
diff --git a/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-neg.ll b/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-neg.ll
new file mode 100644
index 0000000000000..af8d713b798ed
--- /dev/null
+++ b/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-neg.ll
@@ -0,0 +1,20 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -passes=amdgpu-codegenprepare < %s | FileCheck %s
+
+define i32 @test_mbcnt_no_reqd_work_group_size() {
+; CHECK-LABEL: define i32 @test_mbcnt_no_reqd_work_group_size() {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+; CHECK-NEXT: [[B:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 [[A]])
+; CHECK-NEXT: ret i32 [[B]]
+;
+entry:
+ %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+ %b = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %a)
+ ret i32 %b
+}
+
+; Declarations
+declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32)
+declare i32 @llvm.amdgcn.mbcnt.hi(i32, i32)
+declare i32 @llvm.amdgcn.workitem.id.x()
diff --git a/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-posit.ll b/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-posit.ll
new file mode 100644
index 0000000000000..91aa942df9337
--- /dev/null
+++ b/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-posit.ll
@@ -0,0 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -passes=amdgpu-codegenprepare < %s | FileCheck %s
+
+define i32 @test_mbcnt_to_workitem() !reqd_work_group_size !0 {
+; CHECK-LABEL: define i32 @test_mbcnt_to_workitem(
+; CHECK-SAME: ) !reqd_work_group_size [[META0:![0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+; CHECK-NEXT: [[B:%.*]] = call range(i32 0, 64) i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT: ret i32 [[B]]
+;
+entry:
+ %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+ %b = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %a)
+ ret i32 %b
+}
+
+!0 = !{i32 64, i32 1, i32 1}
+
+; Declarations
+declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32)
+declare i32 @llvm.amdgcn.mbcnt.hi(i32, i32)
+declare i32 @llvm.amdgcn.workitem.id.x()
+;.
+; CHECK: [[META0]] = !{i32 64, i32 1, i32 1}
+;.
diff --git a/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-wave32-neg.ll b/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-wave32-neg.ll
new file mode 100644
index 0000000000000..403ea7c361250
--- /dev/null
+++ b/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-wave32-neg.ll
@@ -0,0 +1,66 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -mcpu=gfx1030 -passes=amdgpu-codegenprepare < %s | FileCheck %s
+
+; Test that mbcnt.lo(~0, 0) is NOT optimized on wave32 when work group size doesn't match wave size
+define i32 @test_mbcnt_lo_wave32_non_matching_wgs() !reqd_work_group_size !0 {
+; CHECK-LABEL: define i32 @test_mbcnt_lo_wave32_non_matching_wgs(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] !reqd_work_group_size [[META0:![0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+; CHECK-NEXT: ret i32 [[A]]
+;
+entry:
+ %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+ ret i32 %a
+}
+
+; Test that mbcnt.lo(~0, 0) is NOT optimized on wave32 when no reqd_work_group_size is specified
+define i32 @test_mbcnt_lo_wave32_no_wgs() {
+; CHECK-LABEL: define i32 @test_mbcnt_lo_wave32_no_wgs(
+; CHECK-SAME: ) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+; CHECK-NEXT: ret i32 [[A]]
+;
+entry:
+ %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+ ret i32 %a
+}
+
+; Test that mbcnt.lo with non-all-ones first arg is NOT optimized
+define i32 @test_mbcnt_lo_wave32_partial_mask() !reqd_work_group_size !1 {
+; CHECK-LABEL: define i32 @test_mbcnt_lo_wave32_partial_mask(
+; CHECK-SAME: ) #[[ATTR0]] !reqd_work_group_size [[META1:![0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 15, i32 0)
+; CHECK-NEXT: ret i32 [[A]]
+;
+entry:
+ %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 15, i32 0)
+ ret i32 %a
+}
+
+; Test that mbcnt.lo with non-zero second arg is NOT optimized
+define i32 @test_mbcnt_lo_wave32_non_zero_base() !reqd_work_group_size !1 {
+; CHECK-LABEL: define i32 @test_mbcnt_lo_wave32_non_zero_base(
+; CHECK-SAME: ) #[[ATTR0]] !reqd_work_group_size [[META1]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 5)
+; CHECK-NEXT: ret i32 [[A]]
+;
+entry:
+ %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 5)
+ ret i32 %a
+}
+
+!0 = !{i32 48, i32 1, i32 1} ; Work group size 48 != wave size 32
+!1 = !{i32 32, i32 1, i32 1} ; Work group size 32 == wave size 32
+
+; Function Attrs: nounwind readnone speculatable willreturn
+declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32) #0
+
+attributes #0 = { nounwind readnone speculatable willreturn }
+;.
+; CHECK: [[META0]] = !{i32 48, i32 1, i32 1}
+; CHECK: [[META1]] = !{i32 32, i32 1, i32 1}
+;.
diff --git a/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-wave32.ll b/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-wave32.ll
new file mode 100644
index 0000000000000..07a5028ca1ee5
--- /dev/null
+++ b/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-wave32.ll
@@ -0,0 +1,25 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -mcpu=gfx1030 -passes=amdgpu-codegenprepare < %s | FileCheck %s
+
+define i32 @test_mbcnt_lo_wave32() !reqd_work_group_size !0 {
+; CHECK-LABEL: define i32 @test_mbcnt_lo_wave32(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] !reqd_work_group_size [[META0:![0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[TMP0:%.*]] = call range(i32 0, 32) i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT: ret i32 [[TMP0]]
+;
+entry:
+ %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+ ret i32 %a
+}
+
+!0 = !{i32 32, i32 1, i32 1}
+
+; Function Attrs: nounwind readnone speculatable willreturn
+declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32) #0
+declare i32 @llvm.amdgcn.workitem.id.x() #0
+
+attributes #0 = { nounwind readnone speculatable willreturn }
+;.
+; CHECK: [[META0]] = !{i32 32, i32 1, i32 1}
+;.
More information about the llvm-commits
mailing list