[llvm-branch-commits] [llvm] [AMDGPU] Use different name scope for MIMGBaseOpcode (PR #170904)
Mirko Brkušanin via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Mon Dec 8 09:48:48 PST 2025
https://github.com/mbrkusanin updated https://github.com/llvm/llvm-project/pull/170904
>From 882c6ec5c217e4a29963ba735a7068b28d92bf96 Mon Sep 17 00:00:00 2001
From: Mirko Brkusanin <Mirko.Brkusanin at amd.com>
Date: Fri, 5 Dec 2025 19:21:27 +0100
Subject: [PATCH 1/2] [AMDGPU] Use different name scope for MIMGBaseOpcode
Define MIMGBaseOpcode values with `enum class` instead of regular
`enum` so they will be in a separate name scope from regular
opcodes. These two groups of opcodes should not be mixed and
keeping them in different scopes will reduce a chance of
introducing bugs.
---
llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.h | 6 ++---
.../AMDGPU/AMDGPUInstructionSelector.cpp | 2 +-
.../lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp | 19 ++++++++-------
llvm/lib/Target/AMDGPU/MIMGInstructions.td | 3 ++-
llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 23 ++++++++++---------
.../Target/AMDGPU/SILoadStoreOptimizer.cpp | 4 +++-
.../Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp | 2 +-
llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h | 16 ++++++-------
8 files changed, 41 insertions(+), 34 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.h b/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.h
index 529da8d28a3c1..328ef3e10f5c9 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.h
@@ -49,8 +49,8 @@ const D16ImageDimIntrinsic *lookupD16ImageDimIntrinsic(unsigned Intr);
struct ImageDimIntrinsicInfo {
unsigned Intr;
- unsigned BaseOpcode;
- unsigned AtomicNoRetBaseOpcode;
+ MIMGBaseOpcode BaseOpcode;
+ MIMGBaseOpcode AtomicNoRetBaseOpcode;
MIMGDim Dim;
uint8_t NumOffsetArgs;
@@ -85,7 +85,7 @@ struct ImageDimIntrinsicInfo {
const ImageDimIntrinsicInfo *getImageDimIntrinsicInfo(unsigned Intr);
const ImageDimIntrinsicInfo *
-getImageDimIntrinsicByBaseOpcode(unsigned BaseOpcode, unsigned Dim);
+getImageDimIntrinsicByBaseOpcode(MIMGBaseOpcode BaseOpcode, unsigned Dim);
} // end AMDGPU namespace
} // End llvm namespace
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
index 15492144ba615..e3d4a063c7d3a 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
@@ -2054,7 +2054,7 @@ bool AMDGPUInstructionSelector::selectImageIntrinsic(
MachineInstr &MI, const AMDGPU::ImageDimIntrinsicInfo *Intr) const {
MachineBasicBlock *MBB = MI.getParent();
const DebugLoc &DL = MI.getDebugLoc();
- unsigned IntrOpcode = Intr->BaseOpcode;
+ AMDGPU::MIMGBaseOpcode IntrOpcode = Intr->BaseOpcode;
// For image atomic: use no-return opcode if result is unused.
if (Intr->AtomicNoRetBaseOpcode != Intr->BaseOpcode) {
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index cb1a4ee6d542e..137aa7d3d18e9 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -6850,8 +6850,10 @@ bool AMDGPULegalizerInfo::legalizeImageIntrinsic(
}
const bool IsAtomicPacked16Bit =
- (BaseOpcode->BaseOpcode == AMDGPU::IMAGE_ATOMIC_PK_ADD_F16 ||
- BaseOpcode->BaseOpcode == AMDGPU::IMAGE_ATOMIC_PK_ADD_BF16);
+ (BaseOpcode->BaseOpcode ==
+ AMDGPU::MIMGBaseOpcode::IMAGE_ATOMIC_PK_ADD_F16 ||
+ BaseOpcode->BaseOpcode ==
+ AMDGPU::MIMGBaseOpcode::IMAGE_ATOMIC_PK_ADD_BF16);
// Check for 16 bit addresses and pack if true.
LLT GradTy =
@@ -7476,10 +7478,11 @@ bool AMDGPULegalizerInfo::legalizeBVHIntersectRayIntrinsic(
const bool UseNSA =
IsGFX12Plus || (ST.hasNSAEncoding() && NumVAddrs <= ST.getNSAMaxSize());
- const unsigned BaseOpcodes[2][2] = {
- {AMDGPU::IMAGE_BVH_INTERSECT_RAY, AMDGPU::IMAGE_BVH_INTERSECT_RAY_a16},
- {AMDGPU::IMAGE_BVH64_INTERSECT_RAY,
- AMDGPU::IMAGE_BVH64_INTERSECT_RAY_a16}};
+ const AMDGPU::MIMGBaseOpcode BaseOpcodes[2][2] = {
+ {AMDGPU::MIMGBaseOpcode::IMAGE_BVH_INTERSECT_RAY,
+ AMDGPU::MIMGBaseOpcode::IMAGE_BVH_INTERSECT_RAY_a16},
+ {AMDGPU::MIMGBaseOpcode::IMAGE_BVH64_INTERSECT_RAY,
+ AMDGPU::MIMGBaseOpcode::IMAGE_BVH64_INTERSECT_RAY_a16}};
int Opcode;
if (UseNSA) {
Opcode = AMDGPU::getMIMGOpcode(BaseOpcodes[Is64][IsA16],
@@ -7622,8 +7625,8 @@ bool AMDGPULegalizerInfo::legalizeBVHDualOrBVH8IntersectRayIntrinsic(
const unsigned NumVDataDwords = 10;
const unsigned NumVAddrDwords = IsBVH8 ? 11 : 12;
int Opcode = AMDGPU::getMIMGOpcode(
- IsBVH8 ? AMDGPU::IMAGE_BVH8_INTERSECT_RAY
- : AMDGPU::IMAGE_BVH_DUAL_INTERSECT_RAY,
+ IsBVH8 ? AMDGPU::MIMGBaseOpcode::IMAGE_BVH8_INTERSECT_RAY
+ : AMDGPU::MIMGBaseOpcode::IMAGE_BVH_DUAL_INTERSECT_RAY,
AMDGPU::MIMGEncGfx12, NumVDataDwords, NumVAddrDwords);
assert(Opcode != -1);
diff --git a/llvm/lib/Target/AMDGPU/MIMGInstructions.td b/llvm/lib/Target/AMDGPU/MIMGInstructions.td
index 65dce74a1e894..494c3c07ea88c 100644
--- a/llvm/lib/Target/AMDGPU/MIMGInstructions.td
+++ b/llvm/lib/Target/AMDGPU/MIMGInstructions.td
@@ -55,7 +55,7 @@ class MIMGBaseOpcode : PredicateControl {
bit PointSampleAccel = 0; // Opcode eligible for gfx11.5 point sample acceleration
}
-def MIMGBaseOpcode : GenericEnum {
+def MIMGBaseOpcode : GenericEnumClass {
let FilterClass = "MIMGBaseOpcode";
}
@@ -1888,6 +1888,7 @@ def ImageDimIntrinsicTable : GenericTable {
def getImageDimIntrinsicByBaseOpcode : SearchIndex {
let Table = ImageDimIntrinsicTable;
let Key = ["BaseOpcode", "Dim"];
+ string TypeOf_BaseOpcode = "MIMGBaseOpcode";
}
foreach intr = AMDGPUImageDimIntrinsics in {
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index f206cee0222f9..6a1bc4dd74070 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -9252,7 +9252,7 @@ SDValue SITargetLowering::lowerImage(SDValue Op,
SDLoc DL(Op);
MachineFunction &MF = DAG.getMachineFunction();
const GCNSubtarget *ST = &MF.getSubtarget<GCNSubtarget>();
- unsigned IntrOpcode = Intr->BaseOpcode;
+ AMDGPU::MIMGBaseOpcode IntrOpcode = Intr->BaseOpcode;
// For image atomic: use no-return opcode if result is unused.
if (Intr->AtomicNoRetBaseOpcode != Intr->BaseOpcode &&
!Op.getNode()->hasAnyUseOfValue(0))
@@ -9287,10 +9287,10 @@ SDValue SITargetLowering::lowerImage(SDValue Op,
VData = Op.getOperand(2);
IsAtomicPacked16Bit =
- (IntrOpcode == AMDGPU::IMAGE_ATOMIC_PK_ADD_F16 ||
- IntrOpcode == AMDGPU::IMAGE_ATOMIC_PK_ADD_F16_NORTN ||
- IntrOpcode == AMDGPU::IMAGE_ATOMIC_PK_ADD_BF16 ||
- IntrOpcode == AMDGPU::IMAGE_ATOMIC_PK_ADD_BF16_NORTN);
+ (IntrOpcode == AMDGPU::MIMGBaseOpcode::IMAGE_ATOMIC_PK_ADD_F16 ||
+ IntrOpcode == AMDGPU::MIMGBaseOpcode::IMAGE_ATOMIC_PK_ADD_F16_NORTN ||
+ IntrOpcode == AMDGPU::MIMGBaseOpcode::IMAGE_ATOMIC_PK_ADD_BF16 ||
+ IntrOpcode == AMDGPU::MIMGBaseOpcode::IMAGE_ATOMIC_PK_ADD_BF16_NORTN);
bool Is64Bit = VData.getValueSizeInBits() == 64;
if (BaseOpcode->AtomicX2) {
@@ -10722,8 +10722,8 @@ SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
const unsigned NumVDataDwords = 10;
const unsigned NumVAddrDwords = IsBVH8 ? 11 : 12;
int Opcode = AMDGPU::getMIMGOpcode(
- IsBVH8 ? AMDGPU::IMAGE_BVH8_INTERSECT_RAY
- : AMDGPU::IMAGE_BVH_DUAL_INTERSECT_RAY,
+ IsBVH8 ? AMDGPU::MIMGBaseOpcode::IMAGE_BVH8_INTERSECT_RAY
+ : AMDGPU::MIMGBaseOpcode::IMAGE_BVH_DUAL_INTERSECT_RAY,
AMDGPU::MIMGEncGfx12, NumVDataDwords, NumVAddrDwords);
assert(Opcode != -1);
@@ -10774,10 +10774,11 @@ SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
const bool UseNSA = (Subtarget->hasNSAEncoding() &&
NumVAddrs <= Subtarget->getNSAMaxSize()) ||
IsGFX12Plus;
- const unsigned BaseOpcodes[2][2] = {
- {AMDGPU::IMAGE_BVH_INTERSECT_RAY, AMDGPU::IMAGE_BVH_INTERSECT_RAY_a16},
- {AMDGPU::IMAGE_BVH64_INTERSECT_RAY,
- AMDGPU::IMAGE_BVH64_INTERSECT_RAY_a16}};
+ const AMDGPU::MIMGBaseOpcode BaseOpcodes[2][2] = {
+ {AMDGPU::MIMGBaseOpcode::IMAGE_BVH_INTERSECT_RAY,
+ AMDGPU::MIMGBaseOpcode::IMAGE_BVH_INTERSECT_RAY_a16},
+ {AMDGPU::MIMGBaseOpcode::IMAGE_BVH64_INTERSECT_RAY,
+ AMDGPU::MIMGBaseOpcode::IMAGE_BVH64_INTERSECT_RAY_a16}};
int Opcode;
if (UseNSA) {
Opcode = AMDGPU::getMIMGOpcode(BaseOpcodes[Is64][IsA16],
diff --git a/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp b/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
index fcf91e0cf0a7c..d730b4935e268 100644
--- a/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
+++ b/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
@@ -613,7 +613,9 @@ static unsigned getInstSubclass(unsigned Opc, const SIInstrInfo &TII) {
if (TII.isImage(Opc)) {
const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(Opc);
assert(Info);
- return Info->BaseOpcode;
+ // FIXME: MIMGInfo.BaseOpcode is a different class of opcodes and can
+ // potentially have overlap with regular opcodes.
+ return (unsigned)Info->BaseOpcode;
}
if (TII.isMTBUF(Opc))
return AMDGPU::getMTBUFBaseOpcode(Opc);
diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
index c6e061f368aef..4334d586106e7 100644
--- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
@@ -300,7 +300,7 @@ unsigned getCompletionActionImplicitArgPosition(unsigned CodeObjectVersion) {
#define GET_WMMAInstInfoTable_IMPL
#include "AMDGPUGenSearchableTables.inc"
-int getMIMGOpcode(unsigned BaseOpcode, unsigned MIMGEncoding,
+int getMIMGOpcode(MIMGBaseOpcode BaseOpcode, unsigned MIMGEncoding,
unsigned VDataDwords, unsigned VAddrDwords) {
const MIMGInfo *Info =
getMIMGOpcodeHelper(BaseOpcode, MIMGEncoding, VDataDwords, VAddrDwords);
diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
index 3a352006e006c..1859a9af89eec 100644
--- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
+++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
@@ -443,7 +443,7 @@ LLVM_READONLY
const MIMGBaseOpcodeInfo *getMIMGBaseOpcode(unsigned Opc);
LLVM_READONLY
-const MIMGBaseOpcodeInfo *getMIMGBaseOpcodeInfo(unsigned BaseOpcode);
+const MIMGBaseOpcodeInfo *getMIMGBaseOpcodeInfo(MIMGBaseOpcode BaseOpcode);
struct MIMGDimInfo {
MIMGDim Dim;
@@ -490,7 +490,7 @@ struct MIMGG16MappingInfo {
};
LLVM_READONLY
-const MIMGLZMappingInfo *getMIMGLZMappingInfo(unsigned L);
+const MIMGLZMappingInfo *getMIMGLZMappingInfo(MIMGBaseOpcode L);
struct WMMAOpcodeMappingInfo {
unsigned Opcode2Addr;
@@ -498,19 +498,19 @@ struct WMMAOpcodeMappingInfo {
};
LLVM_READONLY
-const MIMGMIPMappingInfo *getMIMGMIPMappingInfo(unsigned MIP);
+const MIMGMIPMappingInfo *getMIMGMIPMappingInfo(MIMGBaseOpcode MIP);
LLVM_READONLY
-const MIMGBiasMappingInfo *getMIMGBiasMappingInfo(unsigned Bias);
+const MIMGBiasMappingInfo *getMIMGBiasMappingInfo(MIMGBaseOpcode Bias);
LLVM_READONLY
-const MIMGOffsetMappingInfo *getMIMGOffsetMappingInfo(unsigned Offset);
+const MIMGOffsetMappingInfo *getMIMGOffsetMappingInfo(MIMGBaseOpcode Offset);
LLVM_READONLY
-const MIMGG16MappingInfo *getMIMGG16MappingInfo(unsigned G);
+const MIMGG16MappingInfo *getMIMGG16MappingInfo(MIMGBaseOpcode G);
LLVM_READONLY
-int getMIMGOpcode(unsigned BaseOpcode, unsigned MIMGEncoding,
+int getMIMGOpcode(MIMGBaseOpcode BaseOpcode, unsigned MIMGEncoding,
unsigned VDataDwords, unsigned VAddrDwords);
LLVM_READONLY
@@ -523,7 +523,7 @@ unsigned getAddrSizeMIMGOp(const MIMGBaseOpcodeInfo *BaseOpcode,
struct MIMGInfo {
uint16_t Opcode;
- uint16_t BaseOpcode;
+ MIMGBaseOpcode BaseOpcode;
uint8_t MIMGEncoding;
uint8_t VDataDwords;
uint8_t VAddrDwords;
>From b877d83920555e34a5277da465779f7f4bdb3fc5 Mon Sep 17 00:00:00 2001
From: Mirko Brkusanin <Mirko.Brkusanin at amd.com>
Date: Mon, 8 Dec 2025 18:39:09 +0100
Subject: [PATCH 2/2] Reduce size to uint8_t
---
llvm/lib/Target/AMDGPU/MIMGInstructions.td | 1 +
1 file changed, 1 insertion(+)
diff --git a/llvm/lib/Target/AMDGPU/MIMGInstructions.td b/llvm/lib/Target/AMDGPU/MIMGInstructions.td
index 494c3c07ea88c..e63b6e4fa1743 100644
--- a/llvm/lib/Target/AMDGPU/MIMGInstructions.td
+++ b/llvm/lib/Target/AMDGPU/MIMGInstructions.td
@@ -57,6 +57,7 @@ class MIMGBaseOpcode : PredicateControl {
def MIMGBaseOpcode : GenericEnumClass {
let FilterClass = "MIMGBaseOpcode";
+ let Size = 8;
}
def MIMGBaseOpcodesTable : GenericTable {
More information about the llvm-branch-commits
mailing list