[llvm] [TableGen] Emit OpName as an enum class instead of a namespace (PR #125313)

Rahul Joshi via llvm-commits llvm-commits at lists.llvm.org
Sat Feb 1 07:10:30 PST 2025


https://github.com/jurahul updated https://github.com/llvm/llvm-project/pull/125313

>From c2a8901dee16e18e14012ff2da21f28e3798d140 Mon Sep 17 00:00:00 2001
From: Rahul Joshi <rjoshi at nvidia.com>
Date: Fri, 31 Jan 2025 13:53:21 -0800
Subject: [PATCH] [TableGen] Emit OpName as an enum class instead of a
 namespace

- Change InstrInfoEmitter to emit OpName as an enum class instead of an
  anonymous enum in the OpName namespace.
- This will help clearly distinguish between vales that are OpNames vs
  just operand indices and should help avoid bugs due to confusion
  between the two.
- Also updated AMDGPU, RISCV, and WebAssembly backends to conform to
  the new definition of OpName (mostly mechanical changes).
---
 llvm/docs/WritingAnLLVMBackend.rst            |   6 +-
 .../AMDGPU/AsmParser/AMDGPUAsmParser.cpp      |  56 ++++----
 .../Disassembler/AMDGPUDisassembler.cpp       |  40 +++---
 llvm/lib/Target/AMDGPU/GCNDPPCombine.cpp      |   6 +-
 .../lib/Target/AMDGPU/GCNHazardRecognizer.cpp |   2 +-
 .../AMDGPU/MCA/AMDGPUCustomBehaviour.cpp      |   2 +-
 .../Target/AMDGPU/MCA/AMDGPUCustomBehaviour.h |   3 +-
 .../AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp |   4 +-
 .../MCTargetDesc/AMDGPUMCCodeEmitter.cpp      |   9 +-
 .../AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.h  |   1 -
 .../AMDGPU/MCTargetDesc/R600MCTargetDesc.h    |   1 -
 .../Target/AMDGPU/R600ExpandSpecialInstrs.cpp |   5 +-
 llvm/lib/Target/AMDGPU/R600InstrInfo.cpp      | 125 +++++++++---------
 llvm/lib/Target/AMDGPU/R600InstrInfo.h        |  17 +--
 llvm/lib/Target/AMDGPU/R600Packetizer.cpp     |   9 +-
 llvm/lib/Target/AMDGPU/SIFoldOperands.cpp     |  12 +-
 llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp   |   2 +-
 llvm/lib/Target/AMDGPU/SIInstrInfo.cpp        |  40 +++---
 llvm/lib/Target/AMDGPU/SIInstrInfo.h          |  23 ++--
 .../Target/AMDGPU/SILoadStoreOptimizer.cpp    |  20 +--
 .../Target/AMDGPU/SIOptimizeExecMasking.cpp   |   2 +-
 llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h |   9 +-
 llvm/lib/Target/RISCV/RISCVInstrInfo.h        |   2 +-
 .../Target/WebAssembly/WebAssemblyInstrInfo.h |   3 +-
 llvm/utils/TableGen/InstrInfoEmitter.cpp      |  15 ++-
 25 files changed, 207 insertions(+), 207 deletions(-)

diff --git a/llvm/docs/WritingAnLLVMBackend.rst b/llvm/docs/WritingAnLLVMBackend.rst
index 1b9173b1fe139c..d8a8ebf90dbc7d 100644
--- a/llvm/docs/WritingAnLLVMBackend.rst
+++ b/llvm/docs/WritingAnLLVMBackend.rst
@@ -954,8 +954,8 @@ Instruction Operand Name Mapping
 TableGen will also generate a function called getNamedOperandIdx() which
 can be used to look up an operand's index in a MachineInstr based on its
 TableGen name.  Setting the UseNamedOperandTable bit in an instruction's
-TableGen definition will add all of its operands to an enumeration in the
-llvm::XXX:OpName namespace and also add an entry for it into the OperandMap
+TableGen definition will add all of its operands to an enumeration
+llvm::XXX:OpName and also add an entry for it into the OperandMap
 table, which can be queried using getNamedOperandIdx()
 
 .. code-block:: text
@@ -989,7 +989,7 @@ XXXInstrInfo.h:
   #include "XXXGenInstrInfo.inc"
 
   namespace XXX {
-    int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIndex);
+    int16_t getNamedOperandIdx(uint16_t Opcode, OpName Name);
   } // End namespace XXX
 
 Instruction Operand Types
diff --git a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
index ed922245b3e2ba..198741310a0f43 100644
--- a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
+++ b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
@@ -1777,7 +1777,7 @@ class AMDGPUAsmParser : public MCTargetAsmParser {
   bool validateMIMGDim(const MCInst &Inst, const OperandVector &Operands);
   bool validateMIMGMSAA(const MCInst &Inst);
   bool validateOpSel(const MCInst &Inst);
-  bool validateNeg(const MCInst &Inst, int OpName);
+  bool validateNeg(const MCInst &Inst, AMDGPU::OpName OpName);
   bool validateDPP(const MCInst &Inst, const OperandVector &Operands);
   bool validateVccOperand(MCRegister Reg) const;
   bool validateVOPLiteral(const MCInst &Inst, const OperandVector &Operands);
@@ -3953,8 +3953,9 @@ bool AMDGPUAsmParser::validateMIMGAddrSize(const MCInst &Inst,
   const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
       AMDGPU::getMIMGBaseOpcodeInfo(Info->BaseOpcode);
   int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr0);
-  int RSrcOpName = (Desc.TSFlags & SIInstrFlags::MIMG) ? AMDGPU::OpName::srsrc
-                                                       : AMDGPU::OpName::rsrc;
+  AMDGPU::OpName RSrcOpName = (Desc.TSFlags & SIInstrFlags::MIMG)
+                                  ? AMDGPU::OpName::srsrc
+                                  : AMDGPU::OpName::rsrc;
   int SrsrcIdx = AMDGPU::getNamedOperandIdx(Opc, RSrcOpName);
   int DimIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::dim);
   int A16Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::a16);
@@ -4651,7 +4652,7 @@ bool AMDGPUAsmParser::validateOpSel(const MCInst &Inst) {
   return true;
 }
 
-bool AMDGPUAsmParser::validateNeg(const MCInst &Inst, int OpName) {
+bool AMDGPUAsmParser::validateNeg(const MCInst &Inst, AMDGPU::OpName OpName) {
   assert(OpName == AMDGPU::OpName::neg_lo || OpName == AMDGPU::OpName::neg_hi);
 
   const unsigned Opc = Inst.getOpcode();
@@ -4676,9 +4677,9 @@ bool AMDGPUAsmParser::validateNeg(const MCInst &Inst, int OpName) {
   // It is convenient that such instructions don't have src_modifiers operand
   // for src operands that don't allow neg because they also don't allow opsel.
 
-  int SrcMods[3] = {AMDGPU::OpName::src0_modifiers,
-                    AMDGPU::OpName::src1_modifiers,
-                    AMDGPU::OpName::src2_modifiers};
+  const AMDGPU::OpName SrcMods[3] = {AMDGPU::OpName::src0_modifiers,
+                                     AMDGPU::OpName::src1_modifiers,
+                                     AMDGPU::OpName::src2_modifiers};
 
   for (unsigned i = 0; i < 3; ++i) {
     if (!AMDGPU::hasNamedOperand(Opc, SrcMods[i])) {
@@ -4805,9 +4806,9 @@ bool AMDGPUAsmParser::validateVOPLiteral(const MCInst &Inst,
 }
 
 // Returns -1 if not a register, 0 if VGPR and 1 if AGPR.
-static int IsAGPROperand(const MCInst &Inst, uint16_t NameIdx,
+static int IsAGPROperand(const MCInst &Inst, AMDGPU::OpName Name,
                          const MCRegisterInfo *MRI) {
-  int OpIdx = AMDGPU::getNamedOperandIdx(Inst.getOpcode(), NameIdx);
+  int OpIdx = AMDGPU::getNamedOperandIdx(Inst.getOpcode(), Name);
   if (OpIdx < 0)
     return -1;
 
@@ -4828,12 +4829,13 @@ bool AMDGPUAsmParser::validateAGPRLdSt(const MCInst &Inst) const {
                   SIInstrFlags::DS)) == 0)
     return true;
 
-  uint16_t DataNameIdx = (TSFlags & SIInstrFlags::DS) ? AMDGPU::OpName::data0
-                                                      : AMDGPU::OpName::vdata;
+  AMDGPU::OpName DataName = (TSFlags & SIInstrFlags::DS)
+                                ? AMDGPU::OpName::data0
+                                : AMDGPU::OpName::vdata;
 
   const MCRegisterInfo *MRI = getMRI();
   int DstAreg = IsAGPROperand(Inst, AMDGPU::OpName::vdst, MRI);
-  int DataAreg = IsAGPROperand(Inst, DataNameIdx, MRI);
+  int DataAreg = IsAGPROperand(Inst, DataName, MRI);
 
   if ((TSFlags & SIInstrFlags::DS) && DataAreg >= 0) {
     int Data2Areg = IsAGPROperand(Inst, AMDGPU::OpName::data1, MRI);
@@ -8647,9 +8649,8 @@ static void cvtVOP3DstOpSelOnly(MCInst &Inst, const MCRegisterInfo &MRI) {
     return;
 
   int SrcNum;
-  const int Ops[] = { AMDGPU::OpName::src0,
-                      AMDGPU::OpName::src1,
-                      AMDGPU::OpName::src2 };
+  const AMDGPU::OpName Ops[] = {AMDGPU::OpName::src0, AMDGPU::OpName::src1,
+                                AMDGPU::OpName::src2};
   for (SrcNum = 0; SrcNum < 3 && AMDGPU::hasNamedOperand(Opc, Ops[SrcNum]);
        ++SrcNum)
     ;
@@ -8771,12 +8772,11 @@ void AMDGPUAsmParser::cvtVINTERP(MCInst &Inst, const OperandVector &Operands)
   if (OpSelIdx == -1)
     return;
 
-  const int Ops[] = { AMDGPU::OpName::src0,
-                      AMDGPU::OpName::src1,
-                      AMDGPU::OpName::src2 };
-  const int ModOps[] = { AMDGPU::OpName::src0_modifiers,
-                         AMDGPU::OpName::src1_modifiers,
-                         AMDGPU::OpName::src2_modifiers };
+  const AMDGPU::OpName Ops[] = {AMDGPU::OpName::src0, AMDGPU::OpName::src1,
+                                AMDGPU::OpName::src2};
+  const AMDGPU::OpName ModOps[] = {AMDGPU::OpName::src0_modifiers,
+                                   AMDGPU::OpName::src1_modifiers,
+                                   AMDGPU::OpName::src2_modifiers};
 
   unsigned OpSel = Inst.getOperand(OpSelIdx).getImm();
 
@@ -8912,12 +8912,11 @@ void AMDGPUAsmParser::cvtVOP3P(MCInst &Inst, const OperandVector &Operands,
   if (NegHiIdx != -1)
     addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyNegHi);
 
-  const int Ops[] = { AMDGPU::OpName::src0,
-                      AMDGPU::OpName::src1,
-                      AMDGPU::OpName::src2 };
-  const int ModOps[] = { AMDGPU::OpName::src0_modifiers,
-                         AMDGPU::OpName::src1_modifiers,
-                         AMDGPU::OpName::src2_modifiers };
+  const AMDGPU::OpName Ops[] = {AMDGPU::OpName::src0, AMDGPU::OpName::src1,
+                                AMDGPU::OpName::src2};
+  const AMDGPU::OpName ModOps[] = {AMDGPU::OpName::src0_modifiers,
+                                   AMDGPU::OpName::src1_modifiers,
+                                   AMDGPU::OpName::src2_modifiers};
 
   unsigned OpSel = 0;
   unsigned OpSelHi = 0;
@@ -8980,7 +8979,8 @@ void AMDGPUAsmParser::cvtVOP3P(MCInst &Inst, const OperandVector &Operands) {
 }
 
 static void addSrcModifiersAndSrc(MCInst &Inst, const OperandVector &Operands,
-                                  unsigned i, unsigned Opc, unsigned OpName) {
+                                  unsigned i, unsigned Opc,
+                                  AMDGPU::OpName OpName) {
   if (AMDGPU::getNamedOperandIdx(Opc, OpName) != -1)
     ((AMDGPUOperand &)*Operands[i]).addRegOrImmWithFPInputModsOperands(Inst, 2);
   else
diff --git a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp
index 58cdbe6cf373ed..5d45c046af0fb2 100644
--- a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp
+++ b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp
@@ -75,8 +75,8 @@ addOperand(MCInst &Inst, const MCOperand& Opnd) {
 }
 
 static int insertNamedMCOperand(MCInst &MI, const MCOperand &Op,
-                                uint16_t NameIdx) {
-  int OpIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), NameIdx);
+                                AMDGPU::OpName Name) {
+  int OpIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), Name);
   if (OpIdx != -1) {
     auto *I = MI.begin();
     std::advance(I, OpIdx);
@@ -423,10 +423,11 @@ static DecodeStatus decodeAVLdSt(MCInst &Inst, unsigned Imm,
     // are also tied.
     unsigned Opc = Inst.getOpcode();
     uint64_t TSFlags = DAsm->getMCII()->get(Opc).TSFlags;
-    uint16_t DataNameIdx = (TSFlags & SIInstrFlags::DS) ? AMDGPU::OpName::data0
-                                                        : AMDGPU::OpName::vdata;
+    AMDGPU::OpName DataName = (TSFlags & SIInstrFlags::DS)
+                                  ? AMDGPU::OpName::data0
+                                  : AMDGPU::OpName::vdata;
     const MCRegisterInfo *MRI = DAsm->getContext().getRegisterInfo();
-    int DataIdx = AMDGPU::getNamedOperandIdx(Opc, DataNameIdx);
+    int DataIdx = AMDGPU::getNamedOperandIdx(Opc, DataName);
     if ((int)Inst.getNumOperands() == DataIdx) {
       int DstIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst);
       if (IsAGPROperand(Inst, DstIdx, MRI))
@@ -922,9 +923,9 @@ static VOPModifiers collectVOPModifiers(const MCInst &MI,
                                         bool IsVOP3P = false) {
   VOPModifiers Modifiers;
   unsigned Opc = MI.getOpcode();
-  const int ModOps[] = {AMDGPU::OpName::src0_modifiers,
-                        AMDGPU::OpName::src1_modifiers,
-                        AMDGPU::OpName::src2_modifiers};
+  const AMDGPU::OpName ModOps[] = {AMDGPU::OpName::src0_modifiers,
+                                   AMDGPU::OpName::src1_modifiers,
+                                   AMDGPU::OpName::src2_modifiers};
   for (int J = 0; J < 3; ++J) {
     int OpIdx = AMDGPU::getNamedOperandIdx(Opc, ModOps[J]);
     if (OpIdx == -1)
@@ -951,15 +952,15 @@ void AMDGPUDisassembler::convertTrue16OpSel(MCInst &MI) const {
   const unsigned Opc = MI.getOpcode();
   const MCRegisterClass &ConversionRC =
       MRI.getRegClass(AMDGPU::VGPR_16RegClassID);
-  constexpr std::array<std::tuple<int, int, unsigned>, 4> OpAndOpMods = {
-      {{AMDGPU::OpName::src0, AMDGPU::OpName::src0_modifiers,
-        SISrcMods::OP_SEL_0},
-       {AMDGPU::OpName::src1, AMDGPU::OpName::src1_modifiers,
-        SISrcMods::OP_SEL_0},
-       {AMDGPU::OpName::src2, AMDGPU::OpName::src2_modifiers,
-        SISrcMods::OP_SEL_0},
-       {AMDGPU::OpName::vdst, AMDGPU::OpName::src0_modifiers,
-        SISrcMods::DST_OP_SEL}}};
+  constexpr std::array<std::tuple<AMDGPU::OpName, AMDGPU::OpName, unsigned>, 4>
+      OpAndOpMods = {{{AMDGPU::OpName::src0, AMDGPU::OpName::src0_modifiers,
+                       SISrcMods::OP_SEL_0},
+                      {AMDGPU::OpName::src1, AMDGPU::OpName::src1_modifiers,
+                       SISrcMods::OP_SEL_0},
+                      {AMDGPU::OpName::src2, AMDGPU::OpName::src2_modifiers,
+                       SISrcMods::OP_SEL_0},
+                      {AMDGPU::OpName::vdst, AMDGPU::OpName::src0_modifiers,
+                       SISrcMods::DST_OP_SEL}}};
   for (const auto &[OpName, OpModsName, OpSelMask] : OpAndOpMods) {
     int OpIdx = AMDGPU::getNamedOperandIdx(Opc, OpName);
     int OpModsIdx = AMDGPU::getNamedOperandIdx(Opc, OpModsName);
@@ -1069,8 +1070,9 @@ void AMDGPUDisassembler::convertMIMGInst(MCInst &MI) const {
                                             AMDGPU::OpName::vdata);
   int VAddr0Idx =
       AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vaddr0);
-  int RsrcOpName = (TSFlags & SIInstrFlags::MIMG) ? AMDGPU::OpName::srsrc
-                                                  : AMDGPU::OpName::rsrc;
+  AMDGPU::OpName RsrcOpName = (TSFlags & SIInstrFlags::MIMG)
+                                  ? AMDGPU::OpName::srsrc
+                                  : AMDGPU::OpName::rsrc;
   int RsrcIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), RsrcOpName);
   int DMaskIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
                                             AMDGPU::OpName::dmask);
diff --git a/llvm/lib/Target/AMDGPU/GCNDPPCombine.cpp b/llvm/lib/Target/AMDGPU/GCNDPPCombine.cpp
index cc802b5fbb67c1..b22babb4a00d8f 100644
--- a/llvm/lib/Target/AMDGPU/GCNDPPCombine.cpp
+++ b/llvm/lib/Target/AMDGPU/GCNDPPCombine.cpp
@@ -70,9 +70,7 @@ class GCNDPPCombine {
                               RegSubRegPair CombOldVGPR, bool CombBCZ,
                               bool IsShrinkable) const;
 
-  bool hasNoImmOrEqual(MachineInstr &MI,
-                       unsigned OpndName,
-                       int64_t Value,
+  bool hasNoImmOrEqual(MachineInstr &MI, AMDGPU::OpName OpndName, int64_t Value,
                        int64_t Mask = -1) const;
 
   bool combineDPPMov(MachineInstr &MI) const;
@@ -513,7 +511,7 @@ MachineInstr *GCNDPPCombine::createDPPInst(
 
 // returns true if MI doesn't have OpndName immediate operand or the
 // operand has Value
-bool GCNDPPCombine::hasNoImmOrEqual(MachineInstr &MI, unsigned OpndName,
+bool GCNDPPCombine::hasNoImmOrEqual(MachineInstr &MI, AMDGPU::OpName OpndName,
                                     int64_t Value, int64_t Mask) const {
   auto *Imm = TII->getNamedOperand(MI, OpndName);
   if (!Imm)
diff --git a/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp b/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp
index a21702af11a984..e0850d1033e111 100644
--- a/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp
+++ b/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp
@@ -1310,7 +1310,7 @@ bool GCNHazardRecognizer::fixSMEMtoVectorWriteHazards(MachineInstr *MI) {
   if (!SIInstrInfo::isVALU(*MI))
     return false;
 
-  unsigned SDSTName;
+  AMDGPU::OpName SDSTName;
   switch (MI->getOpcode()) {
   case AMDGPU::V_READLANE_B32:
   case AMDGPU::V_READFIRSTLANE_B32:
diff --git a/llvm/lib/Target/AMDGPU/MCA/AMDGPUCustomBehaviour.cpp b/llvm/lib/Target/AMDGPU/MCA/AMDGPUCustomBehaviour.cpp
index ed9c48ff9c4de4..d0043bcc920b68 100644
--- a/llvm/lib/Target/AMDGPU/MCA/AMDGPUCustomBehaviour.cpp
+++ b/llvm/lib/Target/AMDGPU/MCA/AMDGPUCustomBehaviour.cpp
@@ -308,7 +308,7 @@ bool AMDGPUCustomBehaviour::isVMEM(const MCInstrDesc &MCID) {
 
 // taken from SIInstrInfo::hasModifiersSet()
 bool AMDGPUCustomBehaviour::hasModifiersSet(
-    const std::unique_ptr<Instruction> &Inst, unsigned OpName) const {
+    const std::unique_ptr<Instruction> &Inst, AMDGPU::OpName OpName) const {
   int Idx = AMDGPU::getNamedOperandIdx(Inst->getOpcode(), OpName);
   if (Idx == -1)
     return false;
diff --git a/llvm/lib/Target/AMDGPU/MCA/AMDGPUCustomBehaviour.h b/llvm/lib/Target/AMDGPU/MCA/AMDGPUCustomBehaviour.h
index 3a231758887ba6..85b9c188b5d1a7 100644
--- a/llvm/lib/Target/AMDGPU/MCA/AMDGPUCustomBehaviour.h
+++ b/llvm/lib/Target/AMDGPU/MCA/AMDGPUCustomBehaviour.h
@@ -17,6 +17,7 @@
 #ifndef LLVM_LIB_TARGET_AMDGPU_MCA_AMDGPUCUSTOMBEHAVIOUR_H
 #define LLVM_LIB_TARGET_AMDGPU_MCA_AMDGPUCUSTOMBEHAVIOUR_H
 
+#include "Utils/AMDGPUBaseInfo.h"
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/MCA/CustomBehaviour.h"
 #include "llvm/TargetParser/TargetParser.h"
@@ -66,7 +67,7 @@ class AMDGPUCustomBehaviour : public CustomBehaviour {
   void generateWaitCntInfo();
   /// Helper function used in generateWaitCntInfo()
   bool hasModifiersSet(const std::unique_ptr<Instruction> &Inst,
-                       unsigned OpName) const;
+                       AMDGPU::OpName OpName) const;
   /// Helper function used in generateWaitCntInfo()
   bool isGWS(uint16_t Opcode) const;
   /// Helper function used in generateWaitCntInfo()
diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp
index c389f3a13d9524..381841f1428553 100644
--- a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp
+++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp
@@ -1205,7 +1205,7 @@ void AMDGPUInstPrinter::printPackedModifier(const MCInst *MI,
   int NumOps = 0;
   int Ops[3];
 
-  std::pair<int, int> MOps[] = {
+  std::pair<AMDGPU::OpName, AMDGPU::OpName> MOps[] = {
       {AMDGPU::OpName::src0_modifiers, AMDGPU::OpName::src0},
       {AMDGPU::OpName::src1_modifiers, AMDGPU::OpName::src1},
       {AMDGPU::OpName::src2_modifiers, AMDGPU::OpName::src2}};
@@ -1226,7 +1226,7 @@ void AMDGPUInstPrinter::printPackedModifier(const MCInst *MI,
       MII.get(MI->getOpcode()).TSFlags & SIInstrFlags::IsWMMA) {
     NumOps = 0;
     int DefaultValue = Mod == SISrcMods::OP_SEL_1;
-    for (int OpName :
+    for (AMDGPU::OpName OpName :
          {AMDGPU::OpName::src0_modifiers, AMDGPU::OpName::src1_modifiers,
           AMDGPU::OpName::src2_modifiers}) {
       int Idx = AMDGPU::getNamedOperandIdx(Opc, OpName);
diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCCodeEmitter.cpp b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCCodeEmitter.cpp
index b9a424bb1d0590..1391ef6dd09e54 100644
--- a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCCodeEmitter.cpp
+++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCCodeEmitter.cpp
@@ -340,14 +340,13 @@ AMDGPUMCCodeEmitter::getLitEncoding(const MCOperand &MO,
 
 uint64_t AMDGPUMCCodeEmitter::getImplicitOpSelHiEncoding(int Opcode) const {
   using namespace AMDGPU::VOP3PEncoding;
-  using namespace AMDGPU::OpName;
 
-  if (AMDGPU::hasNamedOperand(Opcode, op_sel_hi)) {
-    if (AMDGPU::hasNamedOperand(Opcode, src2))
+  if (AMDGPU::hasNamedOperand(Opcode, AMDGPU::OpName::op_sel_hi)) {
+    if (AMDGPU::hasNamedOperand(Opcode, AMDGPU::OpName::src2))
       return 0;
-    if (AMDGPU::hasNamedOperand(Opcode, src1))
+    if (AMDGPU::hasNamedOperand(Opcode, AMDGPU::OpName::src1))
       return OP_SEL_HI_2;
-    if (AMDGPU::hasNamedOperand(Opcode, src0))
+    if (AMDGPU::hasNamedOperand(Opcode, AMDGPU::OpName::src0))
       return OP_SEL_HI_1 | OP_SEL_HI_2;
   }
   return OP_SEL_HI_0 | OP_SEL_HI_1 | OP_SEL_HI_2;
diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.h b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.h
index 879dbe1b279b18..9c0b2da0fcb0a2 100644
--- a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.h
+++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.h
@@ -50,7 +50,6 @@ createAMDGPUELFObjectWriter(bool Is64Bit, uint8_t OSABI,
 #include "AMDGPUGenRegisterInfo.inc"
 
 #define GET_INSTRINFO_ENUM
-#define GET_INSTRINFO_OPERAND_ENUM
 #define GET_INSTRINFO_MC_HELPER_DECLS
 #include "AMDGPUGenInstrInfo.inc"
 
diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/R600MCTargetDesc.h b/llvm/lib/Target/AMDGPU/MCTargetDesc/R600MCTargetDesc.h
index cf40e7eccb5d24..20f2cb826ac4b1 100644
--- a/llvm/lib/Target/AMDGPU/MCTargetDesc/R600MCTargetDesc.h
+++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/R600MCTargetDesc.h
@@ -32,7 +32,6 @@ MCInstrInfo *createR600MCInstrInfo();
 #include "R600GenRegisterInfo.inc"
 
 #define GET_INSTRINFO_ENUM
-#define GET_INSTRINFO_OPERAND_ENUM
 #define GET_INSTRINFO_SCHED_ENUM
 #define GET_INSTRINFO_MC_HELPER_DECLS
 #include "R600GenInstrInfo.inc"
diff --git a/llvm/lib/Target/AMDGPU/R600ExpandSpecialInstrs.cpp b/llvm/lib/Target/AMDGPU/R600ExpandSpecialInstrs.cpp
index ef2d049f917526..429ce0e0857acb 100644
--- a/llvm/lib/Target/AMDGPU/R600ExpandSpecialInstrs.cpp
+++ b/llvm/lib/Target/AMDGPU/R600ExpandSpecialInstrs.cpp
@@ -31,7 +31,7 @@ class R600ExpandSpecialInstrsPass : public MachineFunctionPass {
   const R600InstrInfo *TII = nullptr;
 
   void SetFlagInNewMI(MachineInstr *NewMI, const MachineInstr *OldMI,
-      unsigned Op);
+                      R600::OpName Op);
 
 public:
   static char ID;
@@ -61,7 +61,8 @@ FunctionPass *llvm::createR600ExpandSpecialInstrsPass() {
 }
 
 void R600ExpandSpecialInstrsPass::SetFlagInNewMI(MachineInstr *NewMI,
-    const MachineInstr *OldMI, unsigned Op) {
+                                                 const MachineInstr *OldMI,
+                                                 R600::OpName Op) {
   int OpIdx = TII->getOperandIdx(*OldMI, Op);
   if (OpIdx > -1) {
     uint64_t Val = OldMI->getOperand(OpIdx).getImm();
diff --git a/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp b/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp
index f95649db2942ec..1c4a992c872716 100644
--- a/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp
@@ -222,19 +222,18 @@ bool R600InstrInfo::readsLDSSrcReg(const MachineInstr &MI) const {
 }
 
 int R600InstrInfo::getSelIdx(unsigned Opcode, unsigned SrcIdx) const {
-  static const unsigned SrcSelTable[][2] = {
-    {R600::OpName::src0, R600::OpName::src0_sel},
-    {R600::OpName::src1, R600::OpName::src1_sel},
-    {R600::OpName::src2, R600::OpName::src2_sel},
-    {R600::OpName::src0_X, R600::OpName::src0_sel_X},
-    {R600::OpName::src0_Y, R600::OpName::src0_sel_Y},
-    {R600::OpName::src0_Z, R600::OpName::src0_sel_Z},
-    {R600::OpName::src0_W, R600::OpName::src0_sel_W},
-    {R600::OpName::src1_X, R600::OpName::src1_sel_X},
-    {R600::OpName::src1_Y, R600::OpName::src1_sel_Y},
-    {R600::OpName::src1_Z, R600::OpName::src1_sel_Z},
-    {R600::OpName::src1_W, R600::OpName::src1_sel_W}
-  };
+  static const R600::OpName SrcSelTable[][2] = {
+      {R600::OpName::src0, R600::OpName::src0_sel},
+      {R600::OpName::src1, R600::OpName::src1_sel},
+      {R600::OpName::src2, R600::OpName::src2_sel},
+      {R600::OpName::src0_X, R600::OpName::src0_sel_X},
+      {R600::OpName::src0_Y, R600::OpName::src0_sel_Y},
+      {R600::OpName::src0_Z, R600::OpName::src0_sel_Z},
+      {R600::OpName::src0_W, R600::OpName::src0_sel_W},
+      {R600::OpName::src1_X, R600::OpName::src1_sel_X},
+      {R600::OpName::src1_Y, R600::OpName::src1_sel_Y},
+      {R600::OpName::src1_Z, R600::OpName::src1_sel_Z},
+      {R600::OpName::src1_W, R600::OpName::src1_sel_W}};
 
   for (const auto &Row : SrcSelTable) {
     if (getOperandIdx(Opcode, Row[0]) == (int)SrcIdx) {
@@ -249,15 +248,15 @@ R600InstrInfo::getSrcs(MachineInstr &MI) const {
   SmallVector<std::pair<MachineOperand *, int64_t>, 3> Result;
 
   if (MI.getOpcode() == R600::DOT_4) {
-    static const unsigned OpTable[8][2] = {
-      {R600::OpName::src0_X, R600::OpName::src0_sel_X},
-      {R600::OpName::src0_Y, R600::OpName::src0_sel_Y},
-      {R600::OpName::src0_Z, R600::OpName::src0_sel_Z},
-      {R600::OpName::src0_W, R600::OpName::src0_sel_W},
-      {R600::OpName::src1_X, R600::OpName::src1_sel_X},
-      {R600::OpName::src1_Y, R600::OpName::src1_sel_Y},
-      {R600::OpName::src1_Z, R600::OpName::src1_sel_Z},
-      {R600::OpName::src1_W, R600::OpName::src1_sel_W},
+    static const R600::OpName OpTable[8][2] = {
+        {R600::OpName::src0_X, R600::OpName::src0_sel_X},
+        {R600::OpName::src0_Y, R600::OpName::src0_sel_Y},
+        {R600::OpName::src0_Z, R600::OpName::src0_sel_Z},
+        {R600::OpName::src0_W, R600::OpName::src0_sel_W},
+        {R600::OpName::src1_X, R600::OpName::src1_sel_X},
+        {R600::OpName::src1_Y, R600::OpName::src1_sel_Y},
+        {R600::OpName::src1_Z, R600::OpName::src1_sel_Z},
+        {R600::OpName::src1_W, R600::OpName::src1_sel_W},
     };
 
     for (const auto &Op : OpTable) {
@@ -273,10 +272,10 @@ R600InstrInfo::getSrcs(MachineInstr &MI) const {
     return Result;
   }
 
-  static const unsigned OpTable[3][2] = {
-    {R600::OpName::src0, R600::OpName::src0_sel},
-    {R600::OpName::src1, R600::OpName::src1_sel},
-    {R600::OpName::src2, R600::OpName::src2_sel},
+  static const R600::OpName OpTable[3][2] = {
+      {R600::OpName::src0, R600::OpName::src0_sel},
+      {R600::OpName::src1, R600::OpName::src1_sel},
+      {R600::OpName::src2, R600::OpName::src2_sel},
   };
 
   for (const auto &Op : OpTable) {
@@ -1238,19 +1237,14 @@ MachineInstrBuilder R600InstrInfo::buildDefaultInstruction(MachineBasicBlock &MB
   return MIB;
 }
 
-#define OPERAND_CASE(Label) \
-  case Label: { \
-    static const unsigned Ops[] = \
-    { \
-      Label##_X, \
-      Label##_Y, \
-      Label##_Z, \
-      Label##_W \
-    }; \
-    return Ops[Slot]; \
+#define OPERAND_CASE(Label)                                                    \
+  case Label: {                                                                \
+    static const R600::OpName Ops[] = {Label##_X, Label##_Y, Label##_Z,        \
+                                       Label##_W};                             \
+    return Ops[Slot];                                                          \
   }
 
-static unsigned getSlotedOps(unsigned  Op, unsigned Slot) {
+static R600::OpName getSlotedOps(R600::OpName Op, unsigned Slot) {
   switch (Op) {
   OPERAND_CASE(R600::OpName::update_exec_mask)
   OPERAND_CASE(R600::OpName::update_pred)
@@ -1292,21 +1286,21 @@ MachineInstr *R600InstrInfo::buildSlotOfVectorInstruction(
       getOperandIdx(MI->getOpcode(), getSlotedOps(R600::OpName::src1, Slot)));
   MachineInstr *MIB = buildDefaultInstruction(
       MBB, I, Opcode, DstReg, Src0.getReg(), Src1.getReg());
-  static const unsigned  Operands[14] = {
-    R600::OpName::update_exec_mask,
-    R600::OpName::update_pred,
-    R600::OpName::write,
-    R600::OpName::omod,
-    R600::OpName::dst_rel,
-    R600::OpName::clamp,
-    R600::OpName::src0_neg,
-    R600::OpName::src0_rel,
-    R600::OpName::src0_abs,
-    R600::OpName::src0_sel,
-    R600::OpName::src1_neg,
-    R600::OpName::src1_rel,
-    R600::OpName::src1_abs,
-    R600::OpName::src1_sel,
+  static const R600::OpName Operands[14] = {
+      R600::OpName::update_exec_mask,
+      R600::OpName::update_pred,
+      R600::OpName::write,
+      R600::OpName::omod,
+      R600::OpName::dst_rel,
+      R600::OpName::clamp,
+      R600::OpName::src0_neg,
+      R600::OpName::src0_rel,
+      R600::OpName::src0_abs,
+      R600::OpName::src0_sel,
+      R600::OpName::src1_neg,
+      R600::OpName::src1_rel,
+      R600::OpName::src1_abs,
+      R600::OpName::src1_sel,
   };
 
   MachineOperand &MO = MI->getOperand(getOperandIdx(MI->getOpcode(),
@@ -1314,7 +1308,7 @@ MachineInstr *R600InstrInfo::buildSlotOfVectorInstruction(
   MIB->getOperand(getOperandIdx(Opcode, R600::OpName::pred_sel))
       .setReg(MO.getReg());
 
-  for (unsigned Operand : Operands) {
+  for (R600::OpName Operand : Operands) {
     MachineOperand &MO = MI->getOperand(
         getOperandIdx(MI->getOpcode(), getSlotedOps(Operand, Slot)));
     assert (MO.isImm());
@@ -1340,15 +1334,16 @@ MachineInstr *R600InstrInfo::buildMovInstr(MachineBasicBlock *MBB,
   return buildDefaultInstruction(*MBB, I, R600::MOV, DstReg, SrcReg);
 }
 
-int R600InstrInfo::getOperandIdx(const MachineInstr &MI, unsigned Op) const {
+int R600InstrInfo::getOperandIdx(const MachineInstr &MI,
+                                 R600::OpName Op) const {
   return getOperandIdx(MI.getOpcode(), Op);
 }
 
-int R600InstrInfo::getOperandIdx(unsigned Opcode, unsigned Op) const {
+int R600InstrInfo::getOperandIdx(unsigned Opcode, R600::OpName Op) const {
   return R600::getNamedOperandIdx(Opcode, Op);
 }
 
-void R600InstrInfo::setImmOperand(MachineInstr &MI, unsigned Op,
+void R600InstrInfo::setImmOperand(MachineInstr &MI, R600::OpName Op,
                                   int64_t Imm) const {
   int Idx = getOperandIdx(MI, Op);
   assert(Idx != -1 && "Operand not supported for this instruction.");
@@ -1425,37 +1420,37 @@ MachineOperand &R600InstrInfo::getFlagOp(MachineInstr &MI, unsigned SrcIdx,
   return FlagOp;
 }
 
-void R600InstrInfo::addFlag(MachineInstr &MI, unsigned Operand,
+void R600InstrInfo::addFlag(MachineInstr &MI, unsigned SrcIdx,
                             unsigned Flag) const {
   unsigned TargetFlags = get(MI.getOpcode()).TSFlags;
   if (Flag == 0) {
     return;
   }
   if (HAS_NATIVE_OPERANDS(TargetFlags)) {
-    MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag);
+    MachineOperand &FlagOp = getFlagOp(MI, SrcIdx, Flag);
     if (Flag == MO_FLAG_NOT_LAST) {
-      clearFlag(MI, Operand, MO_FLAG_LAST);
+      clearFlag(MI, SrcIdx, MO_FLAG_LAST);
     } else if (Flag == MO_FLAG_MASK) {
-      clearFlag(MI, Operand, Flag);
+      clearFlag(MI, SrcIdx, Flag);
     } else {
       FlagOp.setImm(1);
     }
   } else {
-      MachineOperand &FlagOp = getFlagOp(MI, Operand);
-      FlagOp.setImm(FlagOp.getImm() | (Flag << (NUM_MO_FLAGS * Operand)));
+    MachineOperand &FlagOp = getFlagOp(MI, SrcIdx);
+    FlagOp.setImm(FlagOp.getImm() | (Flag << (NUM_MO_FLAGS * SrcIdx)));
   }
 }
 
-void R600InstrInfo::clearFlag(MachineInstr &MI, unsigned Operand,
+void R600InstrInfo::clearFlag(MachineInstr &MI, unsigned SrcIdx,
                               unsigned Flag) const {
   unsigned TargetFlags = get(MI.getOpcode()).TSFlags;
   if (HAS_NATIVE_OPERANDS(TargetFlags)) {
-    MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag);
+    MachineOperand &FlagOp = getFlagOp(MI, SrcIdx, Flag);
     FlagOp.setImm(0);
   } else {
     MachineOperand &FlagOp = getFlagOp(MI);
     unsigned InstFlags = FlagOp.getImm();
-    InstFlags &= ~(Flag << (NUM_MO_FLAGS * Operand));
+    InstFlags &= ~(Flag << (NUM_MO_FLAGS * SrcIdx));
     FlagOp.setImm(InstFlags);
   }
 }
diff --git a/llvm/lib/Target/AMDGPU/R600InstrInfo.h b/llvm/lib/Target/AMDGPU/R600InstrInfo.h
index c767ecb24590b3..a403c65141fed5 100644
--- a/llvm/lib/Target/AMDGPU/R600InstrInfo.h
+++ b/llvm/lib/Target/AMDGPU/R600InstrInfo.h
@@ -18,6 +18,7 @@
 #include "llvm/CodeGen/TargetInstrInfo.h"
 
 #define GET_INSTRINFO_HEADER
+#define GET_INSTRINFO_OPERAND_ENUM
 #include "R600GenInstrInfo.inc"
 
 namespace llvm {
@@ -287,21 +288,21 @@ class R600InstrInfo final : public R600GenInstrInfo {
   /// Get the index of Op in the MachineInstr.
   ///
   /// \returns -1 if the Instruction does not contain the specified \p Op.
-  int getOperandIdx(const MachineInstr &MI, unsigned Op) const;
+  int getOperandIdx(const MachineInstr &MI, R600::OpName Op) const;
 
   /// Get the index of \p Op for the given Opcode.
   ///
   /// \returns -1 if the Instruction does not contain the specified \p Op.
-  int getOperandIdx(unsigned Opcode, unsigned Op) const;
+  int getOperandIdx(unsigned Opcode, R600::OpName Op) const;
 
   /// Helper function for setting instruction flag values.
-  void setImmOperand(MachineInstr &MI, unsigned Op, int64_t Imm) const;
+  void setImmOperand(MachineInstr &MI, R600::OpName Op, int64_t Imm) const;
 
-  ///Add one of the MO_FLAG* flags to the specified \p Operand.
-  void addFlag(MachineInstr &MI, unsigned Operand, unsigned Flag) const;
+  /// Add one of the MO_FLAG* flags to the operand at \p SrcIdx.
+  void addFlag(MachineInstr &MI, unsigned SrcIdx, unsigned Flag) const;
 
-  ///Determine if the specified \p Flag is set on this \p Operand.
-  bool isFlagSet(const MachineInstr &MI, unsigned Operand, unsigned Flag) const;
+  /// Determine if the specified \p Flag is set on operand at \p SrcIdx.
+  bool isFlagSet(const MachineInstr &MI, unsigned SrcIdx, unsigned Flag) const;
 
   /// \param SrcIdx The register source to set the flag on (e.g src0, src1, src2)
   /// \param Flag The flag being set.
@@ -311,7 +312,7 @@ class R600InstrInfo final : public R600GenInstrInfo {
                             unsigned Flag = 0) const;
 
   /// Clear the specified flag on the instruction.
-  void clearFlag(MachineInstr &MI, unsigned Operand, unsigned Flag) const;
+  void clearFlag(MachineInstr &MI, unsigned SrcIdx, unsigned Flag) const;
 
   // Helper functions that check the opcode for status information
   bool isRegisterStore(const MachineInstr &MI) const {
diff --git a/llvm/lib/Target/AMDGPU/R600Packetizer.cpp b/llvm/lib/Target/AMDGPU/R600Packetizer.cpp
index 28bf6e33384d2e..c1ed176ed29d21 100644
--- a/llvm/lib/Target/AMDGPU/R600Packetizer.cpp
+++ b/llvm/lib/Target/AMDGPU/R600Packetizer.cpp
@@ -122,12 +122,9 @@ class R600PacketizerList : public VLIWPacketizerList {
 
   void substitutePV(MachineInstr &MI, const DenseMap<unsigned, unsigned> &PVs)
       const {
-    unsigned Ops[] = {
-      R600::OpName::src0,
-      R600::OpName::src1,
-      R600::OpName::src2
-    };
-    for (unsigned Op : Ops) {
+    const R600::OpName Ops[] = {R600::OpName::src0, R600::OpName::src1,
+                                R600::OpName::src2};
+    for (R600::OpName Op : Ops) {
       int OperandIdx = TII->getOperandIdx(MI.getOpcode(), Op);
       if (OperandIdx < 0)
         continue;
diff --git a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
index 2bc19137b1ca0f..d8f3f9c54abc17 100644
--- a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
@@ -369,20 +369,20 @@ bool SIFoldOperandsImpl::tryFoldImmWithOpSel(FoldCandidate &Fold) const {
 
   // Refer to op_sel/op_sel_hi and check if we can change the immediate and
   // op_sel in a way that allows an inline constant.
-  int ModIdx = -1;
+  AMDGPU::OpName ModName = AMDGPU::OpName::NUM_OPERAND_NAMES;
   unsigned SrcIdx = ~0;
   if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0)) {
-    ModIdx = AMDGPU::OpName::src0_modifiers;
+    ModName = AMDGPU::OpName::src0_modifiers;
     SrcIdx = 0;
   } else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1)) {
-    ModIdx = AMDGPU::OpName::src1_modifiers;
+    ModName = AMDGPU::OpName::src1_modifiers;
     SrcIdx = 1;
   } else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2)) {
-    ModIdx = AMDGPU::OpName::src2_modifiers;
+    ModName = AMDGPU::OpName::src2_modifiers;
     SrcIdx = 2;
   }
-  assert(ModIdx != -1);
-  ModIdx = AMDGPU::getNamedOperandIdx(Opcode, ModIdx);
+  assert(ModName != AMDGPU::OpName::NUM_OPERAND_NAMES);
+  int ModIdx = AMDGPU::getNamedOperandIdx(Opcode, ModName);
   MachineOperand &Mod = MI->getOperand(ModIdx);
   unsigned ModVal = Mod.getImm();
 
diff --git a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
index 3d6419778f4b1c..ee263f58bcaf2d 100644
--- a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
@@ -1139,7 +1139,7 @@ FunctionPass *llvm::createSIInsertWaitcntsPass() {
   return new SIInsertWaitcnts();
 }
 
-static bool updateOperandIfDifferent(MachineInstr &MI, uint16_t OpName,
+static bool updateOperandIfDifferent(MachineInstr &MI, AMDGPU::OpName OpName,
                                      unsigned NewEnc) {
   int OpIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OpName);
   assert(OpIdx >= 0);
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index 35667801c809d5..baacb5d3d54551 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -79,7 +79,8 @@ static unsigned getNumOperandsNoGlue(SDNode *Node) {
 
 /// Returns true if both nodes have the same value for the given
 ///        operand \p Op, or if both nodes do not have this operand.
-static bool nodesHaveSameOperandValue(SDNode *N0, SDNode* N1, unsigned OpName) {
+static bool nodesHaveSameOperandValue(SDNode *N0, SDNode *N1,
+                                      AMDGPU::OpName OpName) {
   unsigned Opc0 = N0->getMachineOpcode();
   unsigned Opc1 = N1->getMachineOpcode();
 
@@ -2701,11 +2702,10 @@ SIInstrInfo::isCopyInstrImpl(const MachineInstr &MI) const {
   return std::nullopt;
 }
 
-bool SIInstrInfo::swapSourceModifiers(MachineInstr &MI,
-                                      MachineOperand &Src0,
-                                      unsigned Src0OpName,
+bool SIInstrInfo::swapSourceModifiers(MachineInstr &MI, MachineOperand &Src0,
+                                      AMDGPU::OpName Src0OpName,
                                       MachineOperand &Src1,
-                                      unsigned Src1OpName) const {
+                                      AMDGPU::OpName Src1OpName) const {
   MachineOperand *Src0Mods = getNamedOperand(MI, Src0OpName);
   if (!Src0Mods)
     return false;
@@ -3432,14 +3432,14 @@ bool SIInstrInfo::isFoldableCopy(const MachineInstr &MI) {
   }
 }
 
-static constexpr unsigned ModifierOpNames[] = {
+static constexpr AMDGPU::OpName ModifierOpNames[] = {
     AMDGPU::OpName::src0_modifiers, AMDGPU::OpName::src1_modifiers,
     AMDGPU::OpName::src2_modifiers, AMDGPU::OpName::clamp,
     AMDGPU::OpName::omod,           AMDGPU::OpName::op_sel};
 
 void SIInstrInfo::removeModOperands(MachineInstr &MI) const {
   unsigned Opc = MI.getOpcode();
-  for (unsigned Name : reverse(ModifierOpNames)) {
+  for (AMDGPU::OpName Name : reverse(ModifierOpNames)) {
     int Idx = AMDGPU::getNamedOperandIdx(Opc, Name);
     if (Idx >= 0)
       MI.removeOperand(Idx);
@@ -4494,14 +4494,14 @@ bool SIInstrInfo::hasModifiers(unsigned Opcode) const {
 }
 
 bool SIInstrInfo::hasModifiersSet(const MachineInstr &MI,
-                                  unsigned OpName) const {
+                                  AMDGPU::OpName OpName) const {
   const MachineOperand *Mods = getNamedOperand(MI, OpName);
   return Mods && Mods->getImm();
 }
 
 bool SIInstrInfo::hasAnyModifiersSet(const MachineInstr &MI) const {
   return any_of(ModifierOpNames,
-                [&](unsigned Name) { return hasModifiersSet(MI, Name); });
+                [&](AMDGPU::OpName Name) { return hasModifiersSet(MI, Name); });
 }
 
 bool SIInstrInfo::canShrink(const MachineInstr &MI,
@@ -5268,7 +5268,7 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr &MI,
     if (DimOp) {
       int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opcode,
                                                  AMDGPU::OpName::vaddr0);
-      int RSrcOpName =
+      AMDGPU::OpName RSrcOpName =
           isMIMG(MI) ? AMDGPU::OpName::srsrc : AMDGPU::OpName::rsrc;
       int RsrcIdx = AMDGPU::getNamedOperandIdx(Opcode, RSrcOpName);
       const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(Opcode);
@@ -5373,9 +5373,9 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr &MI,
 
   if ((MI.mayStore() || MI.mayLoad()) && !isVGPRSpill(MI)) {
     const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst);
-    uint16_t DataNameIdx = isDS(Opcode) ? AMDGPU::OpName::data0
-                                        : AMDGPU::OpName::vdata;
-    const MachineOperand *Data = getNamedOperand(MI, DataNameIdx);
+    AMDGPU::OpName DataName =
+        isDS(Opcode) ? AMDGPU::OpName::data0 : AMDGPU::OpName::vdata;
+    const MachineOperand *Data = getNamedOperand(MI, DataName);
     const MachineOperand *Data2 = getNamedOperand(MI, AMDGPU::OpName::data1);
     if (Data && !Data->isReg())
       Data = nullptr;
@@ -5405,7 +5405,7 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr &MI,
   }
 
   if (ST.needsAlignedVGPRs()) {
-    const auto isAlignedReg = [&MI, &MRI, this](unsigned OpName) -> bool {
+    const auto isAlignedReg = [&MI, &MRI, this](AMDGPU::OpName OpName) -> bool {
       const MachineOperand *Op = getNamedOperand(MI, OpName);
       if (!Op)
         return true;
@@ -6890,13 +6890,15 @@ SIInstrInfo::legalizeOperands(MachineInstr &MI,
   // conversion to the addr64 form.
   if (isImage(MI) || (AMDGPU::isGraphics(MF.getFunction().getCallingConv()) &&
                       (isMUBUF(MI) || isMTBUF(MI)))) {
-    int RSrcOpName = (isVIMAGE(MI) || isVSAMPLE(MI)) ? AMDGPU::OpName::rsrc
-                                                     : AMDGPU::OpName::srsrc;
+    AMDGPU::OpName RSrcOpName = (isVIMAGE(MI) || isVSAMPLE(MI))
+                                    ? AMDGPU::OpName::rsrc
+                                    : AMDGPU::OpName::srsrc;
     MachineOperand *SRsrc = getNamedOperand(MI, RSrcOpName);
     if (SRsrc && !RI.isSGPRClass(MRI.getRegClass(SRsrc->getReg())))
       CreatedBB = loadMBUFScalarOperandsFromVGPR(*this, MI, {SRsrc}, MDT);
 
-    int SampOpName = isMIMG(MI) ? AMDGPU::OpName::ssamp : AMDGPU::OpName::samp;
+    AMDGPU::OpName SampOpName =
+        isMIMG(MI) ? AMDGPU::OpName::ssamp : AMDGPU::OpName::samp;
     MachineOperand *SSamp = getNamedOperand(MI, SampOpName);
     if (SSamp && !RI.isSGPRClass(MRI.getRegClass(SSamp->getReg())))
       CreatedBB = loadMBUFScalarOperandsFromVGPR(*this, MI, {SSamp}, MDT);
@@ -8792,7 +8794,7 @@ Register SIInstrInfo::findUsedSGPR(const MachineInstr &MI,
 }
 
 MachineOperand *SIInstrInfo::getNamedOperand(MachineInstr &MI,
-                                             unsigned OperandName) const {
+                                             AMDGPU::OpName OperandName) const {
   int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OperandName);
   if (Idx == -1)
     return nullptr;
@@ -10109,7 +10111,7 @@ bool SIInstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
 }
 
 void SIInstrInfo::enforceOperandRCAlignment(MachineInstr &MI,
-                                            unsigned OpName) const {
+                                            AMDGPU::OpName OpName) const {
   if (!ST.needsAlignedVGPRs())
     return;
 
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.h b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
index 933935a86f9f98..6b0de138251aba 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.h
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
@@ -190,9 +190,9 @@ class SIInstrInfo final : public AMDGPUGenInstrInfo {
   std::optional<DestSourcePair>
   isCopyInstrImpl(const MachineInstr &MI) const override;
 
-  bool swapSourceModifiers(MachineInstr &MI,
-                           MachineOperand &Src0, unsigned Src0OpName,
-                           MachineOperand &Src1, unsigned Src1OpName) const;
+  bool swapSourceModifiers(MachineInstr &MI, MachineOperand &Src0,
+                           AMDGPU::OpName Src0OpName, MachineOperand &Src1,
+                           AMDGPU::OpName Src1OpName) const;
   bool isLegalToSwap(const MachineInstr &MI, unsigned fromIdx,
                      const MachineOperand *fromMO, unsigned toIdx,
                      const MachineOperand *toMO) const;
@@ -1137,8 +1137,7 @@ class SIInstrInfo final : public AMDGPUGenInstrInfo {
   ///  e.g. src[012]_mod, omod, clamp.
   bool hasModifiers(unsigned Opcode) const;
 
-  bool hasModifiersSet(const MachineInstr &MI,
-                       unsigned OpName) const;
+  bool hasModifiersSet(const MachineInstr &MI, AMDGPU::OpName OpName) const;
   bool hasAnyModifiersSet(const MachineInstr &MI) const;
 
   bool canShrink(const MachineInstr &MI,
@@ -1294,17 +1293,19 @@ class SIInstrInfo final : public AMDGPUGenInstrInfo {
   /// Returns the operand named \p Op.  If \p MI does not have an
   /// operand named \c Op, this function returns nullptr.
   LLVM_READONLY
-  MachineOperand *getNamedOperand(MachineInstr &MI, unsigned OperandName) const;
+  MachineOperand *getNamedOperand(MachineInstr &MI,
+                                  AMDGPU::OpName OperandName) const;
 
   LLVM_READONLY
   const MachineOperand *getNamedOperand(const MachineInstr &MI,
-                                        unsigned OpName) const {
-    return getNamedOperand(const_cast<MachineInstr &>(MI), OpName);
+                                        AMDGPU::OpName OperandName) const {
+    return getNamedOperand(const_cast<MachineInstr &>(MI), OperandName);
   }
 
   /// Get required immediate operand
-  int64_t getNamedImmOperand(const MachineInstr &MI, unsigned OpName) const {
-    int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OpName);
+  int64_t getNamedImmOperand(const MachineInstr &MI,
+                             AMDGPU::OpName OperandName) const {
+    int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OperandName);
     return MI.getOperand(Idx).getImm();
   }
 
@@ -1461,7 +1462,7 @@ class SIInstrInfo final : public AMDGPUGenInstrInfo {
   // Enforce operand's \p OpName even alignment if required by target.
   // This is used if an operand is a 32 bit register but needs to be aligned
   // regardless.
-  void enforceOperandRCAlignment(MachineInstr &MI, unsigned OpName) const;
+  void enforceOperandRCAlignment(MachineInstr &MI, AMDGPU::OpName OpName) const;
 };
 
 /// \brief Returns true if a reg:subreg pair P has a TRC class
diff --git a/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp b/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
index 904321a344db11..42eea365a78677 100644
--- a/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
+++ b/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
@@ -228,11 +228,11 @@ class SILoadStoreOptimizer {
   CombineInfo *checkAndPrepareMerge(CombineInfo &CI, CombineInfo &Paired);
 
   void copyToDestRegs(CombineInfo &CI, CombineInfo &Paired,
-                      MachineBasicBlock::iterator InsertBefore, int OpName,
-                      Register DestReg) const;
+                      MachineBasicBlock::iterator InsertBefore,
+                      AMDGPU::OpName OpName, Register DestReg) const;
   Register copyFromSrcRegs(CombineInfo &CI, CombineInfo &Paired,
                            MachineBasicBlock::iterator InsertBefore,
-                           int OpName) const;
+                           AMDGPU::OpName OpName) const;
 
   unsigned read2Opcode(unsigned EltSize) const;
   unsigned read2ST64Opcode(unsigned EltSize) const;
@@ -699,7 +699,7 @@ static AddressRegs getRegs(unsigned Opc, const SIInstrInfo &TII) {
   if (TII.isImage(Opc)) {
     int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr0);
     if (VAddr0Idx >= 0) {
-      int RsrcName =
+      AMDGPU::OpName RsrcName =
           TII.isMIMG(Opc) ? AMDGPU::OpName::srsrc : AMDGPU::OpName::rsrc;
       int RsrcIdx = AMDGPU::getNamedOperandIdx(Opc, RsrcName);
       Result.NumVAddrs = RsrcIdx - VAddr0Idx;
@@ -968,11 +968,11 @@ bool SILoadStoreOptimizer::dmasksCanBeCombined(const CombineInfo &CI,
     return false;
 
   // Check other optional immediate operands for equality.
-  unsigned OperandsToMatch[] = {AMDGPU::OpName::cpol, AMDGPU::OpName::d16,
-                                AMDGPU::OpName::unorm, AMDGPU::OpName::da,
-                                AMDGPU::OpName::r128, AMDGPU::OpName::a16};
+  AMDGPU::OpName OperandsToMatch[] = {
+      AMDGPU::OpName::cpol, AMDGPU::OpName::d16,  AMDGPU::OpName::unorm,
+      AMDGPU::OpName::da,   AMDGPU::OpName::r128, AMDGPU::OpName::a16};
 
-  for (auto op : OperandsToMatch) {
+  for (AMDGPU::OpName op : OperandsToMatch) {
     int Idx = AMDGPU::getNamedOperandIdx(CI.I->getOpcode(), op);
     if (AMDGPU::getNamedOperandIdx(Paired.I->getOpcode(), op) != Idx)
       return false;
@@ -1256,7 +1256,7 @@ SILoadStoreOptimizer::checkAndPrepareMerge(CombineInfo &CI,
 // Paired.
 void SILoadStoreOptimizer::copyToDestRegs(
     CombineInfo &CI, CombineInfo &Paired,
-    MachineBasicBlock::iterator InsertBefore, int OpName,
+    MachineBasicBlock::iterator InsertBefore, AMDGPU::OpName OpName,
     Register DestReg) const {
   MachineBasicBlock *MBB = CI.I->getParent();
   DebugLoc DL = CI.I->getDebugLoc();
@@ -1287,7 +1287,7 @@ void SILoadStoreOptimizer::copyToDestRegs(
 Register
 SILoadStoreOptimizer::copyFromSrcRegs(CombineInfo &CI, CombineInfo &Paired,
                                       MachineBasicBlock::iterator InsertBefore,
-                                      int OpName) const {
+                                      AMDGPU::OpName OpName) const {
   MachineBasicBlock *MBB = CI.I->getParent();
   DebugLoc DL = CI.I->getDebugLoc();
 
diff --git a/llvm/lib/Target/AMDGPU/SIOptimizeExecMasking.cpp b/llvm/lib/Target/AMDGPU/SIOptimizeExecMasking.cpp
index 3fb8d5b560496b..920c3e11e47180 100644
--- a/llvm/lib/Target/AMDGPU/SIOptimizeExecMasking.cpp
+++ b/llvm/lib/Target/AMDGPU/SIOptimizeExecMasking.cpp
@@ -619,7 +619,7 @@ bool SIOptimizeExecMasking::optimizeVCMPSaveExecSequence(
                          VCmp.getDebugLoc(), TII->get(NewOpcode));
 
   auto TryAddImmediateValueFromNamedOperand =
-      [&](unsigned OperandName) -> void {
+      [&](AMDGPU::OpName OperandName) -> void {
     if (auto *Mod = TII->getNamedOperand(VCmp, OperandName))
       Builder.addImm(Mod->getImm());
   };
diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
index fad7e67ff3c763..0b363e0b55c92e 100644
--- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
+++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
@@ -19,6 +19,11 @@
 #include <functional>
 #include <utility>
 
+// Pull in OpName enum definition.
+#define GET_INSTRINFO_OPERAND_ENUM
+#include "AMDGPUGenInstrInfo.inc"
+#undef GET_INSTRINFO_OPERAND_ENUM
+
 struct amd_kernel_code_t;
 
 namespace llvm {
@@ -394,10 +399,10 @@ template <typename... Fields> struct EncodingFields {
 };
 
 LLVM_READONLY
-int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIdx);
+int16_t getNamedOperandIdx(uint16_t Opcode, OpName NamedIdx);
 
 LLVM_READONLY
-inline bool hasNamedOperand(uint64_t Opcode, uint64_t NamedIdx) {
+inline bool hasNamedOperand(uint64_t Opcode, OpName NamedIdx) {
   return getNamedOperandIdx(Opcode, NamedIdx) != -1;
 }
 
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.h b/llvm/lib/Target/RISCV/RISCVInstrInfo.h
index 1c81719c767ecb..bca979adaa9a3a 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.h
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.h
@@ -331,7 +331,7 @@ isRVVSpillForZvlsseg(unsigned Opcode);
 bool isFaultFirstLoad(const MachineInstr &MI);
 
 // Implemented in RISCVGenInstrInfo.inc
-int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIndex);
+int16_t getNamedOperandIdx(uint16_t Opcode, RISCV::OpName Name);
 
 // Return true if both input instructions have equal rounding mode. If at least
 // one of the instructions does not have rounding mode, false will be returned.
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.h b/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.h
index 8cb692f9bc0c47..7ccf37044b231f 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.h
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.h
@@ -29,8 +29,7 @@ namespace llvm {
 
 namespace WebAssembly {
 
-int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIndex);
-
+int16_t getNamedOperandIdx(uint16_t Opcode, OpName Name);
 }
 
 class WebAssemblySubtarget;
diff --git a/llvm/utils/TableGen/InstrInfoEmitter.cpp b/llvm/utils/TableGen/InstrInfoEmitter.cpp
index 97c00ad4924197..73c9aa3ae07009 100644
--- a/llvm/utils/TableGen/InstrInfoEmitter.cpp
+++ b/llvm/utils/TableGen/InstrInfoEmitter.cpp
@@ -282,20 +282,20 @@ void InstrInfoEmitter::emitOperandNameMappings(
 
   OS << "#ifdef GET_INSTRINFO_OPERAND_ENUM\n";
   OS << "#undef GET_INSTRINFO_OPERAND_ENUM\n";
-  OS << "namespace llvm::" << Namespace << "::OpName {\n";
-  OS << "enum {\n";
+  OS << "namespace llvm::" << Namespace << " {\n";
+  OS << "enum class OpName {\n";
   for (const auto &[I, Op] : enumerate(OperandNameToID))
     OS << "  " << Op.first << " = " << I << ",\n";
-  OS << "  OPERAND_LAST = " << NumOperandNames << ",\n";
-  OS << "};\n";
-  OS << "} // end namespace llvm::" << Namespace << "::OpName\n";
+  OS << "  NUM_OPERAND_NAMES = " << NumOperandNames << ",\n";
+  OS << "}; // enum class OpName\n";
+  OS << "} // end namespace llvm::" << Namespace << '\n';
   OS << "#endif //GET_INSTRINFO_OPERAND_ENUM\n\n";
 
   OS << "#ifdef GET_INSTRINFO_NAMED_OPS\n";
   OS << "#undef GET_INSTRINFO_NAMED_OPS\n";
   OS << "namespace llvm::" << Namespace << " {\n";
   OS << "LLVM_READONLY\n";
-  OS << "int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIdx) {\n";
+  OS << "int16_t getNamedOperandIdx(uint16_t Opcode, OpName Name) {\n";
   if (NumOperandNames != 0) {
     assert(MaxOperandNo <= INT16_MAX &&
            "Too many operands for the operand name -> index table");
@@ -319,7 +319,8 @@ void InstrInfoEmitter::emitOperandNameMappings(
     for (const auto &[TableIndex, Entry] : enumerate(OperandMap)) {
       for (StringRef Name : Entry.second)
         OS << "  case " << Namespace << "::" << Name << ":\n";
-      OS << "    return OperandMap[" << TableIndex << "][NamedIdx];\n";
+      OS << "    return OperandMap[" << TableIndex
+         << "][static_cast<unsigned>(Name)];\n";
     }
     OS << "  default: return -1;\n";
     OS << "  }\n";



More information about the llvm-commits mailing list