[llvm-branch-commits] [llvm] [AMDGPU] Use different name scope for MIMGBaseOpcode (PR #170904)
via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Fri Dec 5 10:50:25 PST 2025
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-amdgpu
Author: Mirko BrkuĊĦanin (mbrkusanin)
<details>
<summary>Changes</summary>
Define MIMGBaseOpcode values with `enum class` instead of regular
`enum` so they will be in a separate name scope from regular
opcodes. These two groups of opcodes should not be mixed and
keeping them in different scopes will reduce a chance of
introducing bugs.
---
Full diff: https://github.com/llvm/llvm-project/pull/170904.diff
8 Files Affected:
- (modified) llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.h (+3-3)
- (modified) llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp (+1-1)
- (modified) llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp (+11-8)
- (modified) llvm/lib/Target/AMDGPU/MIMGInstructions.td (+2-1)
- (modified) llvm/lib/Target/AMDGPU/SIISelLowering.cpp (+12-11)
- (modified) llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp (+3-1)
- (modified) llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp (+1-1)
- (modified) llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h (+8-8)
``````````diff
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.h b/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.h
index 529da8d28a3c1..328ef3e10f5c9 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.h
@@ -49,8 +49,8 @@ const D16ImageDimIntrinsic *lookupD16ImageDimIntrinsic(unsigned Intr);
struct ImageDimIntrinsicInfo {
unsigned Intr;
- unsigned BaseOpcode;
- unsigned AtomicNoRetBaseOpcode;
+ MIMGBaseOpcode BaseOpcode;
+ MIMGBaseOpcode AtomicNoRetBaseOpcode;
MIMGDim Dim;
uint8_t NumOffsetArgs;
@@ -85,7 +85,7 @@ struct ImageDimIntrinsicInfo {
const ImageDimIntrinsicInfo *getImageDimIntrinsicInfo(unsigned Intr);
const ImageDimIntrinsicInfo *
-getImageDimIntrinsicByBaseOpcode(unsigned BaseOpcode, unsigned Dim);
+getImageDimIntrinsicByBaseOpcode(MIMGBaseOpcode BaseOpcode, unsigned Dim);
} // end AMDGPU namespace
} // End llvm namespace
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
index 15492144ba615..e3d4a063c7d3a 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
@@ -2054,7 +2054,7 @@ bool AMDGPUInstructionSelector::selectImageIntrinsic(
MachineInstr &MI, const AMDGPU::ImageDimIntrinsicInfo *Intr) const {
MachineBasicBlock *MBB = MI.getParent();
const DebugLoc &DL = MI.getDebugLoc();
- unsigned IntrOpcode = Intr->BaseOpcode;
+ AMDGPU::MIMGBaseOpcode IntrOpcode = Intr->BaseOpcode;
// For image atomic: use no-return opcode if result is unused.
if (Intr->AtomicNoRetBaseOpcode != Intr->BaseOpcode) {
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index 6e3a1b6a5563f..f171b217f0260 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -6781,8 +6781,10 @@ bool AMDGPULegalizerInfo::legalizeImageIntrinsic(
}
const bool IsAtomicPacked16Bit =
- (BaseOpcode->BaseOpcode == AMDGPU::IMAGE_ATOMIC_PK_ADD_F16 ||
- BaseOpcode->BaseOpcode == AMDGPU::IMAGE_ATOMIC_PK_ADD_BF16);
+ (BaseOpcode->BaseOpcode ==
+ AMDGPU::MIMGBaseOpcode::IMAGE_ATOMIC_PK_ADD_F16 ||
+ BaseOpcode->BaseOpcode ==
+ AMDGPU::MIMGBaseOpcode::IMAGE_ATOMIC_PK_ADD_BF16);
// Check for 16 bit addresses and pack if true.
LLT GradTy =
@@ -7407,10 +7409,11 @@ bool AMDGPULegalizerInfo::legalizeBVHIntersectRayIntrinsic(
const bool UseNSA =
IsGFX12Plus || (ST.hasNSAEncoding() && NumVAddrs <= ST.getNSAMaxSize());
- const unsigned BaseOpcodes[2][2] = {
- {AMDGPU::IMAGE_BVH_INTERSECT_RAY, AMDGPU::IMAGE_BVH_INTERSECT_RAY_a16},
- {AMDGPU::IMAGE_BVH64_INTERSECT_RAY,
- AMDGPU::IMAGE_BVH64_INTERSECT_RAY_a16}};
+ const AMDGPU::MIMGBaseOpcode BaseOpcodes[2][2] = {
+ {AMDGPU::MIMGBaseOpcode::IMAGE_BVH_INTERSECT_RAY,
+ AMDGPU::MIMGBaseOpcode::IMAGE_BVH_INTERSECT_RAY_a16},
+ {AMDGPU::MIMGBaseOpcode::IMAGE_BVH64_INTERSECT_RAY,
+ AMDGPU::MIMGBaseOpcode::IMAGE_BVH64_INTERSECT_RAY_a16}};
int Opcode;
if (UseNSA) {
Opcode = AMDGPU::getMIMGOpcode(BaseOpcodes[Is64][IsA16],
@@ -7553,8 +7556,8 @@ bool AMDGPULegalizerInfo::legalizeBVHDualOrBVH8IntersectRayIntrinsic(
const unsigned NumVDataDwords = 10;
const unsigned NumVAddrDwords = IsBVH8 ? 11 : 12;
int Opcode = AMDGPU::getMIMGOpcode(
- IsBVH8 ? AMDGPU::IMAGE_BVH8_INTERSECT_RAY
- : AMDGPU::IMAGE_BVH_DUAL_INTERSECT_RAY,
+ IsBVH8 ? AMDGPU::MIMGBaseOpcode::IMAGE_BVH8_INTERSECT_RAY
+ : AMDGPU::MIMGBaseOpcode::IMAGE_BVH_DUAL_INTERSECT_RAY,
AMDGPU::MIMGEncGfx12, NumVDataDwords, NumVAddrDwords);
assert(Opcode != -1);
diff --git a/llvm/lib/Target/AMDGPU/MIMGInstructions.td b/llvm/lib/Target/AMDGPU/MIMGInstructions.td
index 65dce74a1e894..494c3c07ea88c 100644
--- a/llvm/lib/Target/AMDGPU/MIMGInstructions.td
+++ b/llvm/lib/Target/AMDGPU/MIMGInstructions.td
@@ -55,7 +55,7 @@ class MIMGBaseOpcode : PredicateControl {
bit PointSampleAccel = 0; // Opcode eligible for gfx11.5 point sample acceleration
}
-def MIMGBaseOpcode : GenericEnum {
+def MIMGBaseOpcode : GenericEnumClass {
let FilterClass = "MIMGBaseOpcode";
}
@@ -1888,6 +1888,7 @@ def ImageDimIntrinsicTable : GenericTable {
def getImageDimIntrinsicByBaseOpcode : SearchIndex {
let Table = ImageDimIntrinsicTable;
let Key = ["BaseOpcode", "Dim"];
+ string TypeOf_BaseOpcode = "MIMGBaseOpcode";
}
foreach intr = AMDGPUImageDimIntrinsics in {
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index ff67fd63ea75e..6319cf3d6474e 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -9238,7 +9238,7 @@ SDValue SITargetLowering::lowerImage(SDValue Op,
SDLoc DL(Op);
MachineFunction &MF = DAG.getMachineFunction();
const GCNSubtarget *ST = &MF.getSubtarget<GCNSubtarget>();
- unsigned IntrOpcode = Intr->BaseOpcode;
+ AMDGPU::MIMGBaseOpcode IntrOpcode = Intr->BaseOpcode;
// For image atomic: use no-return opcode if result is unused.
if (Intr->AtomicNoRetBaseOpcode != Intr->BaseOpcode &&
!Op.getNode()->hasAnyUseOfValue(0))
@@ -9273,10 +9273,10 @@ SDValue SITargetLowering::lowerImage(SDValue Op,
VData = Op.getOperand(2);
IsAtomicPacked16Bit =
- (IntrOpcode == AMDGPU::IMAGE_ATOMIC_PK_ADD_F16 ||
- IntrOpcode == AMDGPU::IMAGE_ATOMIC_PK_ADD_F16_NORTN ||
- IntrOpcode == AMDGPU::IMAGE_ATOMIC_PK_ADD_BF16 ||
- IntrOpcode == AMDGPU::IMAGE_ATOMIC_PK_ADD_BF16_NORTN);
+ (IntrOpcode == AMDGPU::MIMGBaseOpcode::IMAGE_ATOMIC_PK_ADD_F16 ||
+ IntrOpcode == AMDGPU::MIMGBaseOpcode::IMAGE_ATOMIC_PK_ADD_F16_NORTN ||
+ IntrOpcode == AMDGPU::MIMGBaseOpcode::IMAGE_ATOMIC_PK_ADD_BF16 ||
+ IntrOpcode == AMDGPU::MIMGBaseOpcode::IMAGE_ATOMIC_PK_ADD_BF16_NORTN);
bool Is64Bit = VData.getValueSizeInBits() == 64;
if (BaseOpcode->AtomicX2) {
@@ -10708,8 +10708,8 @@ SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
const unsigned NumVDataDwords = 10;
const unsigned NumVAddrDwords = IsBVH8 ? 11 : 12;
int Opcode = AMDGPU::getMIMGOpcode(
- IsBVH8 ? AMDGPU::IMAGE_BVH8_INTERSECT_RAY
- : AMDGPU::IMAGE_BVH_DUAL_INTERSECT_RAY,
+ IsBVH8 ? AMDGPU::MIMGBaseOpcode::IMAGE_BVH8_INTERSECT_RAY
+ : AMDGPU::MIMGBaseOpcode::IMAGE_BVH_DUAL_INTERSECT_RAY,
AMDGPU::MIMGEncGfx12, NumVDataDwords, NumVAddrDwords);
assert(Opcode != -1);
@@ -10760,10 +10760,11 @@ SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
const bool UseNSA = (Subtarget->hasNSAEncoding() &&
NumVAddrs <= Subtarget->getNSAMaxSize()) ||
IsGFX12Plus;
- const unsigned BaseOpcodes[2][2] = {
- {AMDGPU::IMAGE_BVH_INTERSECT_RAY, AMDGPU::IMAGE_BVH_INTERSECT_RAY_a16},
- {AMDGPU::IMAGE_BVH64_INTERSECT_RAY,
- AMDGPU::IMAGE_BVH64_INTERSECT_RAY_a16}};
+ const AMDGPU::MIMGBaseOpcode BaseOpcodes[2][2] = {
+ {AMDGPU::MIMGBaseOpcode::IMAGE_BVH_INTERSECT_RAY,
+ AMDGPU::MIMGBaseOpcode::IMAGE_BVH_INTERSECT_RAY_a16},
+ {AMDGPU::MIMGBaseOpcode::IMAGE_BVH64_INTERSECT_RAY,
+ AMDGPU::MIMGBaseOpcode::IMAGE_BVH64_INTERSECT_RAY_a16}};
int Opcode;
if (UseNSA) {
Opcode = AMDGPU::getMIMGOpcode(BaseOpcodes[Is64][IsA16],
diff --git a/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp b/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
index fcf91e0cf0a7c..d730b4935e268 100644
--- a/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
+++ b/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
@@ -613,7 +613,9 @@ static unsigned getInstSubclass(unsigned Opc, const SIInstrInfo &TII) {
if (TII.isImage(Opc)) {
const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(Opc);
assert(Info);
- return Info->BaseOpcode;
+ // FIXME: MIMGInfo.BaseOpcode is a different class of opcodes and can
+ // potentially have overlap with regular opcodes.
+ return (unsigned)Info->BaseOpcode;
}
if (TII.isMTBUF(Opc))
return AMDGPU::getMTBUFBaseOpcode(Opc);
diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
index c6e061f368aef..4334d586106e7 100644
--- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
@@ -300,7 +300,7 @@ unsigned getCompletionActionImplicitArgPosition(unsigned CodeObjectVersion) {
#define GET_WMMAInstInfoTable_IMPL
#include "AMDGPUGenSearchableTables.inc"
-int getMIMGOpcode(unsigned BaseOpcode, unsigned MIMGEncoding,
+int getMIMGOpcode(MIMGBaseOpcode BaseOpcode, unsigned MIMGEncoding,
unsigned VDataDwords, unsigned VAddrDwords) {
const MIMGInfo *Info =
getMIMGOpcodeHelper(BaseOpcode, MIMGEncoding, VDataDwords, VAddrDwords);
diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
index 3a352006e006c..1859a9af89eec 100644
--- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
+++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
@@ -443,7 +443,7 @@ LLVM_READONLY
const MIMGBaseOpcodeInfo *getMIMGBaseOpcode(unsigned Opc);
LLVM_READONLY
-const MIMGBaseOpcodeInfo *getMIMGBaseOpcodeInfo(unsigned BaseOpcode);
+const MIMGBaseOpcodeInfo *getMIMGBaseOpcodeInfo(MIMGBaseOpcode BaseOpcode);
struct MIMGDimInfo {
MIMGDim Dim;
@@ -490,7 +490,7 @@ struct MIMGG16MappingInfo {
};
LLVM_READONLY
-const MIMGLZMappingInfo *getMIMGLZMappingInfo(unsigned L);
+const MIMGLZMappingInfo *getMIMGLZMappingInfo(MIMGBaseOpcode L);
struct WMMAOpcodeMappingInfo {
unsigned Opcode2Addr;
@@ -498,19 +498,19 @@ struct WMMAOpcodeMappingInfo {
};
LLVM_READONLY
-const MIMGMIPMappingInfo *getMIMGMIPMappingInfo(unsigned MIP);
+const MIMGMIPMappingInfo *getMIMGMIPMappingInfo(MIMGBaseOpcode MIP);
LLVM_READONLY
-const MIMGBiasMappingInfo *getMIMGBiasMappingInfo(unsigned Bias);
+const MIMGBiasMappingInfo *getMIMGBiasMappingInfo(MIMGBaseOpcode Bias);
LLVM_READONLY
-const MIMGOffsetMappingInfo *getMIMGOffsetMappingInfo(unsigned Offset);
+const MIMGOffsetMappingInfo *getMIMGOffsetMappingInfo(MIMGBaseOpcode Offset);
LLVM_READONLY
-const MIMGG16MappingInfo *getMIMGG16MappingInfo(unsigned G);
+const MIMGG16MappingInfo *getMIMGG16MappingInfo(MIMGBaseOpcode G);
LLVM_READONLY
-int getMIMGOpcode(unsigned BaseOpcode, unsigned MIMGEncoding,
+int getMIMGOpcode(MIMGBaseOpcode BaseOpcode, unsigned MIMGEncoding,
unsigned VDataDwords, unsigned VAddrDwords);
LLVM_READONLY
@@ -523,7 +523,7 @@ unsigned getAddrSizeMIMGOp(const MIMGBaseOpcodeInfo *BaseOpcode,
struct MIMGInfo {
uint16_t Opcode;
- uint16_t BaseOpcode;
+ MIMGBaseOpcode BaseOpcode;
uint8_t MIMGEncoding;
uint8_t VDataDwords;
uint8_t VAddrDwords;
``````````
</details>
https://github.com/llvm/llvm-project/pull/170904
More information about the llvm-branch-commits
mailing list