[llvm] 768aed1 - [MC] Make more use of MCInstrDesc::operands. NFC.
Jay Foad via llvm-commits
llvm-commits at lists.llvm.org
Mon Jan 23 03:32:00 PST 2023
Author: Jay Foad
Date: 2023-01-23T11:31:41Z
New Revision: 768aed13785b6a42cc3bcfe829efc231ba536909
URL: https://github.com/llvm/llvm-project/commit/768aed13785b6a42cc3bcfe829efc231ba536909
DIFF: https://github.com/llvm/llvm-project/commit/768aed13785b6a42cc3bcfe829efc231ba536909.diff
LOG: [MC] Make more use of MCInstrDesc::operands. NFC.
Change MCInstrDesc::operands to return an ArrayRef so we can easily use
it everywhere instead of the (IMHO ugly) opInfo_begin and opInfo_end.
A future patch will remove opInfo_begin and opInfo_end.
Also use it instead of raw access to the OpInfo pointer. A future patch
will remove this pointer.
Differential Revision: https://reviews.llvm.org/D142213
Added:
Modified:
llvm/include/llvm/MC/MCInstrDesc.h
llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp
llvm/lib/CodeGen/MachineInstr.cpp
llvm/lib/CodeGen/MachineVerifier.cpp
llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
llvm/lib/CodeGen/TargetInstrInfo.cpp
llvm/lib/CodeGen/TargetSchedule.cpp
llvm/lib/MC/MCParser/AsmParser.cpp
llvm/lib/MC/MCParser/MasmParser.cpp
llvm/lib/MCA/InstrBuilder.cpp
llvm/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp
llvm/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp
llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp
llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp
llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp
llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp
llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.cpp
llvm/lib/Target/AMDGPU/MCTargetDesc/SIMCCodeEmitter.cpp
llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
llvm/lib/Target/AMDGPU/SIISelLowering.cpp
llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
llvm/lib/Target/AMDGPU/SIInstrInfo.h
llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
llvm/lib/Target/ARM/ARMISelLowering.cpp
llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp
llvm/lib/Target/ARM/Thumb2InstrInfo.cpp
llvm/lib/Target/ARM/Thumb2SizeReduction.cpp
llvm/lib/Target/AVR/MCTargetDesc/AVRInstPrinter.cpp
llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCChecker.cpp
llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.cpp
llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCInstrInfo.cpp
llvm/lib/Target/Lanai/LanaiInstrInfo.cpp
llvm/lib/Target/Lanai/MCTargetDesc/LanaiMCTargetDesc.cpp
llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
llvm/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp
llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp
llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
llvm/lib/Target/PowerPC/PPCInstrInfo.h
llvm/lib/Target/SPIRV/MCTargetDesc/SPIRVInstPrinter.cpp
llvm/lib/Target/SPIRV/MCTargetDesc/SPIRVMCCodeEmitter.cpp
llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmTypeCheck.cpp
llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyInstPrinter.cpp
llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCCodeEmitter.cpp
llvm/lib/Target/WebAssembly/WebAssemblyMCInstLower.cpp
llvm/lib/Target/WebAssembly/WebAssemblySetP2AlignOperands.cpp
llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp
llvm/lib/Target/X86/X86MCInstLower.cpp
llvm/tools/llvm-exegesis/lib/MCInstrDescView.cpp
Removed:
################################################################################
diff --git a/llvm/include/llvm/MC/MCInstrDesc.h b/llvm/include/llvm/MC/MCInstrDesc.h
index f4dd4763a9426..4aa568d345e0d 100644
--- a/llvm/include/llvm/MC/MCInstrDesc.h
+++ b/llvm/include/llvm/MC/MCInstrDesc.h
@@ -14,6 +14,7 @@
#ifndef LLVM_MC_MCINSTRDESC_H
#define LLVM_MC_MCINSTRDESC_H
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/MC/MCRegister.h"
@@ -212,9 +213,9 @@ class MCInstrDesc {
int getOperandConstraint(unsigned OpNum,
MCOI::OperandConstraint Constraint) const {
if (OpNum < NumOperands &&
- (OpInfo[OpNum].Constraints & (1 << Constraint))) {
+ (operands()[OpNum].Constraints & (1 << Constraint))) {
unsigned ValuePos = 4 + Constraint * 4;
- return (int)(OpInfo[OpNum].Constraints >> ValuePos) & 0x0f;
+ return (int)(operands()[OpNum].Constraints >> ValuePos) & 0x0f;
}
return -1;
}
@@ -234,8 +235,8 @@ class MCInstrDesc {
const_opInfo_iterator opInfo_begin() const { return OpInfo; }
const_opInfo_iterator opInfo_end() const { return OpInfo + NumOperands; }
- iterator_range<const_opInfo_iterator> operands() const {
- return make_range(opInfo_begin(), opInfo_end());
+ ArrayRef<MCOperandInfo> operands() const {
+ return ArrayRef(OpInfo, NumOperands);
}
/// Return the number of MachineOperands that are register
@@ -627,7 +628,7 @@ class MCInstrDesc {
int findFirstPredOperandIdx() const {
if (isPredicable()) {
for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
- if (OpInfo[i].isPredicate())
+ if (operands()[i].isPredicate())
return i;
}
return -1;
diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp
index 3940210ad5f2b..4b6c3a1567093 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp
@@ -330,7 +330,7 @@ LegalizerInfo::getAction(const MachineInstr &MI,
const MachineRegisterInfo &MRI) const {
SmallVector<LLT, 8> Types;
SmallBitVector SeenTypes(8);
- const MCOperandInfo *OpInfo = MI.getDesc().OpInfo;
+ ArrayRef<MCOperandInfo> OpInfo = MI.getDesc().operands();
// FIXME: probably we'll need to cache the results here somehow?
for (unsigned i = 0; i < MI.getDesc().getNumOperands(); ++i) {
if (!OpInfo[i].isGenericType())
@@ -379,14 +379,14 @@ void LegalizerInfo::verify(const MCInstrInfo &MII) const {
for (unsigned Opcode = FirstOp; Opcode <= LastOp; ++Opcode) {
const MCInstrDesc &MCID = MII.get(Opcode);
const unsigned NumTypeIdxs = std::accumulate(
- MCID.opInfo_begin(), MCID.opInfo_end(), 0U,
+ MCID.operands().begin(), MCID.operands().end(), 0U,
[](unsigned Acc, const MCOperandInfo &OpInfo) {
return OpInfo.isGenericType()
? std::max(OpInfo.getGenericTypeIndex() + 1U, Acc)
: Acc;
});
const unsigned NumImmIdxs = std::accumulate(
- MCID.opInfo_begin(), MCID.opInfo_end(), 0U,
+ MCID.operands().begin(), MCID.operands().end(), 0U,
[](unsigned Acc, const MCOperandInfo &OpInfo) {
return OpInfo.isGenericImm()
? std::max(OpInfo.getGenericImmIndex() + 1U, Acc)
diff --git a/llvm/lib/CodeGen/MachineInstr.cpp b/llvm/lib/CodeGen/MachineInstr.cpp
index a8fdfa82978f0..27564c8b7dd17 100644
--- a/llvm/lib/CodeGen/MachineInstr.cpp
+++ b/llvm/lib/CodeGen/MachineInstr.cpp
@@ -1086,7 +1086,7 @@ int MachineInstr::findFirstPredOperandIdx() const {
const MCInstrDesc &MCID = getDesc();
if (MCID.isPredicable()) {
for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
- if (MCID.OpInfo[i].isPredicate())
+ if (MCID.operands()[i].isPredicate())
return i;
}
@@ -1524,7 +1524,7 @@ LLT MachineInstr::getTypeToPrint(unsigned OpIdx, SmallBitVector &PrintedTypes,
if (isVariadic() || OpIdx >= getNumExplicitOperands())
return MRI.getType(Op.getReg());
- auto &OpInfo = getDesc().OpInfo[OpIdx];
+ auto &OpInfo = getDesc().operands()[OpIdx];
if (!OpInfo.isGenericType())
return MRI.getType(Op.getReg());
diff --git a/llvm/lib/CodeGen/MachineVerifier.cpp b/llvm/lib/CodeGen/MachineVerifier.cpp
index 31e6f4dfe1c60..ddd5a027c2cda 100644
--- a/llvm/lib/CodeGen/MachineVerifier.cpp
+++ b/llvm/lib/CodeGen/MachineVerifier.cpp
@@ -978,11 +978,11 @@ void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) {
SmallVector<LLT, 4> Types;
for (unsigned I = 0, E = std::min(MCID.getNumOperands(), NumOps);
I != E; ++I) {
- if (!MCID.OpInfo[I].isGenericType())
+ if (!MCID.operands()[I].isGenericType())
continue;
// Generic instructions specify type equality constraints between some of
// their operands. Make sure these are consistent.
- size_t TypeIdx = MCID.OpInfo[I].getGenericTypeIndex();
+ size_t TypeIdx = MCID.operands()[I].getGenericTypeIndex();
Types.resize(std::max(TypeIdx + 1, Types.size()));
const MachineOperand *MO = &MI->getOperand(I);
@@ -1987,7 +1987,7 @@ MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
// The first MCID.NumDefs operands must be explicit register defines
if (MONum < NumDefs) {
- const MCOperandInfo &MCOI = MCID.OpInfo[MONum];
+ const MCOperandInfo &MCOI = MCID.operands()[MONum];
if (!MO->isReg())
report("Explicit definition must be a register", MO, MONum);
else if (!MO->isDef() && !MCOI.isOptionalDef())
@@ -1995,7 +1995,7 @@ MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
else if (MO->isImplicit())
report("Explicit definition marked as implicit", MO, MONum);
} else if (MONum < MCID.getNumOperands()) {
- const MCOperandInfo &MCOI = MCID.OpInfo[MONum];
+ const MCOperandInfo &MCOI = MCID.operands()[MONum];
// Don't check if it's the last operand in a variadic instruction. See,
// e.g., LDM_RET in the arm back end. Check non-variadic operands only.
bool IsOptional = MI->isVariadic() && MONum == MCID.getNumOperands() - 1;
diff --git a/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp b/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
index e39b2c26641b8..9e4abc9c9c673 100644
--- a/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
@@ -218,7 +218,7 @@ void InstrEmitter::CreateVirtualRegisters(SDNode *Node,
RC = VTRC;
}
- if (II.OpInfo != nullptr && II.OpInfo[i].isOptionalDef()) {
+ if (!II.operands().empty() && II.operands()[i].isOptionalDef()) {
// Optional def must be a physical register.
VRBase = cast<RegisterSDNode>(Node->getOperand(i-NumResults))->getReg();
assert(VRBase.isPhysical());
@@ -304,7 +304,7 @@ InstrEmitter::AddRegisterOperand(MachineInstrBuilder &MIB,
const MCInstrDesc &MCID = MIB->getDesc();
bool isOptDef = IIOpNum < MCID.getNumOperands() &&
- MCID.OpInfo[IIOpNum].isOptionalDef();
+ MCID.operands()[IIOpNum].isOptionalDef();
// If the instruction requires a register in a
diff erent class, create
// a new virtual register and copy the value into it, but first attempt to
diff --git a/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp b/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
index 79d5e2c78c645..71abab3b4628b 100644
--- a/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
@@ -1433,7 +1433,7 @@ DelayForLiveRegsBottomUp(SUnit *SU, SmallVectorImpl<unsigned> &LRegs) {
// of %noreg. When the OptionalDef is set to a valid register, we need to
// handle it in the same way as an ImplicitDef.
for (unsigned i = 0; i < MCID.getNumDefs(); ++i)
- if (MCID.OpInfo[i].isOptionalDef()) {
+ if (MCID.operands()[i].isOptionalDef()) {
const SDValue &OptionalDef = Node->getOperand(i - Node->getNumValues());
Register Reg = cast<RegisterSDNode>(OptionalDef)->getReg();
CheckForLiveRegDef(SU, Reg, LiveRegDefs.get(), RegAdded, LRegs, TRI);
diff --git a/llvm/lib/CodeGen/TargetInstrInfo.cpp b/llvm/lib/CodeGen/TargetInstrInfo.cpp
index fdbb42f014803..0f6cf11ca9d17 100644
--- a/llvm/lib/CodeGen/TargetInstrInfo.cpp
+++ b/llvm/lib/CodeGen/TargetInstrInfo.cpp
@@ -49,8 +49,8 @@ TargetInstrInfo::getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
if (OpNum >= MCID.getNumOperands())
return nullptr;
- short RegClass = MCID.OpInfo[OpNum].RegClass;
- if (MCID.OpInfo[OpNum].isLookupPtrRegClass())
+ short RegClass = MCID.operands()[OpNum].RegClass;
+ if (MCID.operands()[OpNum].isLookupPtrRegClass())
return TRI->getPointerRegClass(MF, RegClass);
// Instructions like INSERT_SUBREG do not have fixed register classes.
@@ -337,7 +337,7 @@ bool TargetInstrInfo::PredicateInstruction(
return false;
for (unsigned j = 0, i = 0, e = MI.getNumOperands(); i != e; ++i) {
- if (MCID.OpInfo[i].isPredicate()) {
+ if (MCID.operands()[i].isPredicate()) {
MachineOperand &MO = MI.getOperand(i);
if (MO.isReg()) {
MO.setReg(Pred[j].getReg());
diff --git a/llvm/lib/CodeGen/TargetSchedule.cpp b/llvm/lib/CodeGen/TargetSchedule.cpp
index 4f7b9d89f17ba..dba84950f49d4 100644
--- a/llvm/lib/CodeGen/TargetSchedule.cpp
+++ b/llvm/lib/CodeGen/TargetSchedule.cpp
@@ -222,9 +222,9 @@ unsigned TargetSchedModel::computeOperandLatency(
// If DefIdx does not exist in the model (e.g. implicit defs), then return
// unit latency (defaultDefLatency may be too conservative).
#ifndef NDEBUG
- if (SCDesc->isValid() && !DefMI->getOperand(DefOperIdx).isImplicit()
- && !DefMI->getDesc().OpInfo[DefOperIdx].isOptionalDef()
- && SchedModel.isComplete()) {
+ if (SCDesc->isValid() && !DefMI->getOperand(DefOperIdx).isImplicit() &&
+ !DefMI->getDesc().operands()[DefOperIdx].isOptionalDef() &&
+ SchedModel.isComplete()) {
errs() << "DefIdx " << DefIdx << " exceeds machine model writes for "
<< *DefMI << " (Try with MCSchedModel.CompleteModel set to false)";
llvm_unreachable("incomplete machine model");
diff --git a/llvm/lib/MC/MCParser/AsmParser.cpp b/llvm/lib/MC/MCParser/AsmParser.cpp
index 6e9aa1bf52163..6860c81ac7cb4 100644
--- a/llvm/lib/MC/MCParser/AsmParser.cpp
+++ b/llvm/lib/MC/MCParser/AsmParser.cpp
@@ -6048,7 +6048,7 @@ bool AsmParser::parseMSInlineAsm(
InputDecls.push_back(OpDecl);
InputDeclsAddressOf.push_back(Operand.needAddressOf());
InputConstraints.push_back(Constraint.str());
- if (Desc.OpInfo[i - 1].isBranchTarget())
+ if (Desc.operands()[i - 1].isBranchTarget())
AsmStrRewrites.emplace_back(AOK_CallInput, Start, SymName.size(), 0,
Restricted);
else
diff --git a/llvm/lib/MC/MCParser/MasmParser.cpp b/llvm/lib/MC/MCParser/MasmParser.cpp
index 5c78cfe4e7435..08a65065aa6e6 100644
--- a/llvm/lib/MC/MCParser/MasmParser.cpp
+++ b/llvm/lib/MC/MCParser/MasmParser.cpp
@@ -7458,7 +7458,7 @@ bool MasmParser::parseMSInlineAsm(
InputDecls.push_back(OpDecl);
InputDeclsAddressOf.push_back(Operand.needAddressOf());
InputConstraints.push_back(Constraint.str());
- if (Desc.OpInfo[i - 1].isBranchTarget())
+ if (Desc.operands()[i - 1].isBranchTarget())
AsmStrRewrites.emplace_back(AOK_CallInput, Start, SymName.size());
else
AsmStrRewrites.emplace_back(AOK_Input, Start, SymName.size());
diff --git a/llvm/lib/MCA/InstrBuilder.cpp b/llvm/lib/MCA/InstrBuilder.cpp
index 19277f5f06b7d..4eb381a2794bf 100644
--- a/llvm/lib/MCA/InstrBuilder.cpp
+++ b/llvm/lib/MCA/InstrBuilder.cpp
@@ -331,7 +331,7 @@ void InstrBuilder::populateWrites(InstrDesc &ID, const MCInst &MCI,
if (!Op.isReg())
continue;
- if (MCDesc.OpInfo[CurrentDef].isOptionalDef()) {
+ if (MCDesc.operands()[CurrentDef].isOptionalDef()) {
OptionalDefIdx = CurrentDef++;
continue;
}
diff --git a/llvm/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp b/llvm/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp
index 13d389cec7a07..ea8e30269ece2 100644
--- a/llvm/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp
+++ b/llvm/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp
@@ -515,7 +515,7 @@ int AArch64A57FPLoadBalancing::scavengeRegister(Chain *G, Color C,
} while (I != ChainBegin);
// Make sure we allocate in-order, to get the cheapest registers first.
- unsigned RegClassID = ChainBegin->getDesc().OpInfo[0].RegClass;
+ unsigned RegClassID = ChainBegin->getDesc().operands()[0].RegClass;
auto Ord = RCI.getOrder(TRI->getRegClass(RegClassID));
for (auto Reg : Ord) {
if (!Units.available(Reg))
diff --git a/llvm/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp b/llvm/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp
index dd84fa473f80b..6b5d665e627ae 100644
--- a/llvm/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp
+++ b/llvm/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp
@@ -336,8 +336,8 @@ DecodeStatus AArch64Disassembler::getInstruction(MCInst &MI, uint64_t &Size,
// operand for the accumulator (ZA) or implicit immediate zero which isn't
// encoded, manually insert operand.
for (unsigned i = 0; i < Desc.getNumOperands(); i++) {
- if (Desc.OpInfo[i].OperandType == MCOI::OPERAND_REGISTER) {
- switch (Desc.OpInfo[i].RegClass) {
+ if (Desc.operands()[i].OperandType == MCOI::OPERAND_REGISTER) {
+ switch (Desc.operands()[i].RegClass) {
default:
break;
case AArch64::MPRRegClassID:
@@ -350,7 +350,7 @@ DecodeStatus AArch64Disassembler::getInstruction(MCInst &MI, uint64_t &Size,
MI.insert(MI.begin() + i, MCOperand::createReg(AArch64::ZT0));
break;
}
- } else if (Desc.OpInfo[i].OperandType ==
+ } else if (Desc.operands()[i].OperandType ==
AArch64::OPERAND_IMPLICIT_IMM_0) {
MI.insert(MI.begin() + i, MCOperand::createImm(0));
}
diff --git a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp
index 6db431f92d4ff..8b61ebcfea0ee 100644
--- a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp
+++ b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp
@@ -411,7 +411,7 @@ class AArch64MCInstrAnalysis : public MCInstrAnalysis {
// condition code) and cbz (where it is a register).
const auto &Desc = Info->get(Inst.getOpcode());
for (unsigned i = 0, e = Inst.getNumOperands(); i != e; i++) {
- if (Desc.OpInfo[i].OperandType == MCOI::OPERAND_PCREL) {
+ if (Desc.operands()[i].OperandType == MCOI::OPERAND_PCREL) {
int64_t Imm = Inst.getOperand(i).getImm();
if (Inst.getOpcode() == AArch64::ADRP)
Target = (Addr & -4096) + Imm * 4096;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
index 0eb07bbc5dda5..f6d672938b627 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
@@ -368,7 +368,7 @@ const TargetRegisterClass *AMDGPUDAGToDAGISel::getOperandRegClass(SDNode *N,
unsigned OpIdx = Desc.getNumDefs() + OpNo;
if (OpIdx >= Desc.getNumOperands())
return nullptr;
- int RegClass = Desc.OpInfo[OpIdx].RegClass;
+ int RegClass = Desc.operands()[OpIdx].RegClass;
if (RegClass == -1)
return nullptr;
diff --git a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
index f0cb7dbfd8d73..2fc74564d633c 100644
--- a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
+++ b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
@@ -2114,7 +2114,7 @@ void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyMo
}
APInt Literal(64, Val);
- uint8_t OpTy = InstDesc.OpInfo[OpNum].OperandType;
+ uint8_t OpTy = InstDesc.operands()[OpNum].OperandType;
if (Imm.IsFPImm) { // We got fp literal token
switch (OpTy) {
@@ -3373,7 +3373,7 @@ bool AMDGPUAsmParser::isInlineConstant(const MCInst &Inst,
case 4:
return AMDGPU::isInlinableLiteral32(Val, hasInv2PiInlineImm());
case 2: {
- const unsigned OperandType = Desc.OpInfo[OpIdx].OperandType;
+ const unsigned OperandType = Desc.operands()[OpIdx].OperandType;
if (OperandType == AMDGPU::OPERAND_REG_IMM_INT16 ||
OperandType == AMDGPU::OPERAND_REG_INLINE_C_INT16 ||
OperandType == AMDGPU::OPERAND_REG_INLINE_AC_INT16)
@@ -3512,7 +3512,7 @@ bool AMDGPUAsmParser::validateConstantBusLimitations(
}
} else { // Expression or a literal
- if (Desc.OpInfo[OpIdx].OperandType == MCOI::OPERAND_IMMEDIATE)
+ if (Desc.operands()[OpIdx].OperandType == MCOI::OPERAND_IMMEDIATE)
continue; // special operand like VINTERP attr_chan
// An instruction may use only one literal.
@@ -3872,7 +3872,7 @@ bool AMDGPUAsmParser::validateMFMA(const MCInst &Inst,
return true;
const MCRegisterInfo *TRI = getContext().getRegisterInfo();
- if (TRI->getRegClass(Desc.OpInfo[0].RegClass).getSizeInBits() <= 128)
+ if (TRI->getRegClass(Desc.operands()[0].RegClass).getSizeInBits() <= 128)
return true;
if (TRI->regsOverlap(Src2Reg, DstReg)) {
@@ -8075,14 +8075,16 @@ void AMDGPUAsmParser::cvtVOP3OpSel(MCInst &Inst, const OperandVector &Operands,
}
static bool isRegOrImmWithInputMods(const MCInstrDesc &Desc, unsigned OpNum) {
+ return
// 1. This operand is input modifiers
- return Desc.OpInfo[OpNum].OperandType == AMDGPU::OPERAND_INPUT_MODS
+ Desc.operands()[OpNum].OperandType == AMDGPU::OPERAND_INPUT_MODS
// 2. This is not last operand
&& Desc.NumOperands > (OpNum + 1)
// 3. Next operand is register class
- && Desc.OpInfo[OpNum + 1].RegClass != -1
+ && Desc.operands()[OpNum + 1].RegClass != -1
// 4. Next register is not tied to any other operand
- && Desc.getOperandConstraint(OpNum + 1, MCOI::OperandConstraint::TIED_TO) == -1;
+ && Desc.getOperandConstraint(OpNum + 1,
+ MCOI::OperandConstraint::TIED_TO) == -1;
}
void AMDGPUAsmParser::cvtVOP3Interp(MCInst &Inst, const OperandVector &Operands)
@@ -8751,7 +8753,7 @@ void AMDGPUAsmParser::cvtVOP3DPP(MCInst &Inst, const OperandVector &Operands,
} else if (Op.isReg()) {
Op.addRegOperands(Inst, 1);
} else if (Op.isImm() &&
- Desc.OpInfo[Inst.getNumOperands()].RegClass != -1) {
+ Desc.operands()[Inst.getNumOperands()].RegClass != -1) {
assert(!Op.IsImmKindLiteral() && "Cannot use literal with DPP");
Op.addImmOperands(Inst, 1);
} else if (Op.isImm()) {
diff --git a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp
index 7659de6797e0e..c4e85210848ab 100644
--- a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp
+++ b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp
@@ -662,7 +662,8 @@ DecodeStatus AMDGPUDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
} else {
for (unsigned i = 0; i < NSAArgs; ++i) {
const unsigned VAddrIdx = VAddr0Idx + 1 + i;
- auto VAddrRCID = MCII->get(MI.getOpcode()).OpInfo[VAddrIdx].RegClass;
+ auto VAddrRCID =
+ MCII->get(MI.getOpcode()).operands()[VAddrIdx].RegClass;
MI.insert(MI.begin() + VAddrIdx,
createRegOperand(VAddrRCID, Bytes[i]));
}
@@ -955,7 +956,7 @@ DecodeStatus AMDGPUDisassembler::convertMIMGInst(MCInst &MI) const {
// Widen the register to the correct number of enabled channels.
unsigned NewVdata = AMDGPU::NoRegister;
if (DstSize != Info->VDataDwords) {
- auto DataRCID = MCII->get(NewOpcode).OpInfo[VDataIdx].RegClass;
+ auto DataRCID = MCII->get(NewOpcode).operands()[VDataIdx].RegClass;
// Get first subregister of VData
unsigned Vdata0 = MI.getOperand(VDataIdx).getReg();
@@ -978,7 +979,7 @@ DecodeStatus AMDGPUDisassembler::convertMIMGInst(MCInst &MI) const {
unsigned VAddrSub0 = MRI.getSubReg(VAddr0, AMDGPU::sub0);
VAddr0 = (VAddrSub0 != 0) ? VAddrSub0 : VAddr0;
- auto AddrRCID = MCII->get(NewOpcode).OpInfo[VAddr0Idx].RegClass;
+ auto AddrRCID = MCII->get(NewOpcode).operands()[VAddr0Idx].RegClass;
NewVAddr0 = MRI.getMatchingSuperReg(VAddr0, AMDGPU::sub0,
&MRI.getRegClass(AddrRCID));
if (NewVAddr0 == AMDGPU::NoRegister)
@@ -1070,7 +1071,7 @@ DecodeStatus AMDGPUDisassembler::convertFMAanyK(MCInst &MI,
assert(DescNumOps == MI.getNumOperands());
for (unsigned I = 0; I < DescNumOps; ++I) {
auto &Op = MI.getOperand(I);
- auto OpType = Desc.OpInfo[I].OperandType;
+ auto OpType = Desc.operands()[I].OperandType;
bool IsDeferredOp = (OpType == AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED ||
OpType == AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED);
if (Op.isImm() && Op.getImm() == AMDGPU::EncValues::LITERAL_CONST &&
diff --git a/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp b/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp
index 26aec15279ef0..b33e614a071c3 100644
--- a/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp
+++ b/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp
@@ -817,7 +817,7 @@ int GCNHazardRecognizer::createsVALUHazard(const MachineInstr &MI) {
int VDataIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdata);
int VDataRCID = -1;
if (VDataIdx != -1)
- VDataRCID = Desc.OpInfo[VDataIdx].RegClass;
+ VDataRCID = Desc.operands()[VDataIdx].RegClass;
if (TII->isMUBUF(MI) || TII->isMTBUF(MI)) {
// There is no hazard if the instruction does not use vector regs
@@ -842,13 +842,13 @@ int GCNHazardRecognizer::createsVALUHazard(const MachineInstr &MI) {
if (TII->isMIMG(MI)) {
int SRsrcIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::srsrc);
assert(SRsrcIdx != -1 &&
- AMDGPU::getRegBitWidth(Desc.OpInfo[SRsrcIdx].RegClass) == 256);
+ AMDGPU::getRegBitWidth(Desc.operands()[SRsrcIdx].RegClass) == 256);
(void)SRsrcIdx;
}
if (TII->isFLAT(MI)) {
int DataIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdata);
- if (AMDGPU::getRegBitWidth(Desc.OpInfo[DataIdx].RegClass) > 64)
+ if (AMDGPU::getRegBitWidth(Desc.operands()[DataIdx].RegClass) > 64)
return DataIdx;
}
@@ -2813,7 +2813,7 @@ bool GCNHazardRecognizer::fixVALUMaskWriteHazard(MachineInstr *MI) {
return true;
} else {
const MCInstrDesc &InstDesc = I.getDesc();
- const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpNo];
+ const MCOperandInfo &OpInfo = InstDesc.operands()[OpNo];
if (!TII.isInlineConstant(Op, OpInfo))
return true;
}
diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp
index ff51304ac02e4..e465267f2c207 100644
--- a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp
+++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp
@@ -671,7 +671,7 @@ void AMDGPUInstPrinter::printRegularOperand(const MCInst *MI, unsigned OpNo,
// Check if operand register class contains register used.
// Intention: print disassembler message when invalid code is decoded,
// for example sgpr register used in VReg or VISrc(VReg or imm) operand.
- int RCID = Desc.OpInfo[OpNo].RegClass;
+ int RCID = Desc.operands()[OpNo].RegClass;
if (RCID != -1) {
const MCRegisterClass RC = MRI.getRegClass(RCID);
auto Reg = mc2PseudoReg(Op.getReg());
@@ -681,7 +681,7 @@ void AMDGPUInstPrinter::printRegularOperand(const MCInst *MI, unsigned OpNo,
}
}
} else if (Op.isImm()) {
- const uint8_t OpTy = Desc.OpInfo[OpNo].OperandType;
+ const uint8_t OpTy = Desc.operands()[OpNo].OperandType;
switch (OpTy) {
case AMDGPU::OPERAND_REG_IMM_INT32:
case AMDGPU::OPERAND_REG_IMM_FP32:
@@ -758,7 +758,7 @@ void AMDGPUInstPrinter::printRegularOperand(const MCInst *MI, unsigned OpNo,
O << "0.0";
else {
const MCInstrDesc &Desc = MII.get(MI->getOpcode());
- int RCID = Desc.OpInfo[OpNo].RegClass;
+ int RCID = Desc.operands()[OpNo].RegClass;
unsigned RCBits = AMDGPU::getRegBitWidth(MRI.getRegClass(RCID));
if (RCBits == 32)
printImmediate32(FloatToBits(Value), STI, O);
@@ -925,7 +925,7 @@ void AMDGPUInstPrinter::printDPPCtrl(const MCInst *MI, unsigned OpNo,
AMDGPU::OpName::src0);
if (Src0Idx >= 0 &&
- Desc.OpInfo[Src0Idx].RegClass == AMDGPU::VReg_64RegClassID &&
+ Desc.operands()[Src0Idx].RegClass == AMDGPU::VReg_64RegClassID &&
!AMDGPU::isLegal64BitDPPControl(Imm)) {
O << " /* 64 bit dpp only supports row_newbcast */";
return;
diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.cpp b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.cpp
index 2aa0572811b3c..8a9fea3c8d260 100644
--- a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.cpp
+++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.cpp
@@ -128,7 +128,7 @@ class AMDGPUMCInstrAnalysis : public MCInstrAnalysis {
bool evaluateBranch(const MCInst &Inst, uint64_t Addr, uint64_t Size,
uint64_t &Target) const override {
if (Inst.getNumOperands() == 0 || !Inst.getOperand(0).isImm() ||
- Info->get(Inst.getOpcode()).OpInfo[0].OperandType !=
+ Info->get(Inst.getOpcode()).operands()[0].OperandType !=
MCOI::OPERAND_PCREL)
return false;
diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/SIMCCodeEmitter.cpp b/llvm/lib/Target/AMDGPU/MCTargetDesc/SIMCCodeEmitter.cpp
index d3aee55c5f35c..f659f08de0276 100644
--- a/llvm/lib/Target/AMDGPU/MCTargetDesc/SIMCCodeEmitter.cpp
+++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/SIMCCodeEmitter.cpp
@@ -384,7 +384,7 @@ void SIMCCodeEmitter::encodeInstruction(const MCInst &MI, raw_ostream &OS,
// Is this operand a literal immediate?
const MCOperand &Op = MI.getOperand(i);
- auto Enc = getLitEncoding(Op, Desc.OpInfo[i], STI);
+ auto Enc = getLitEncoding(Op, Desc.operands()[i], STI);
if (!Enc || *Enc != 255)
continue;
@@ -456,7 +456,7 @@ void SIMCCodeEmitter::getSDWASrcEncoding(const MCInst &MI, unsigned OpNo,
return;
} else {
const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
- auto Enc = getLitEncoding(MO, Desc.OpInfo[OpNo], STI);
+ auto Enc = getLitEncoding(MO, Desc.operands()[OpNo], STI);
if (Enc && *Enc != 255) {
Op = *Enc | SDWA9EncValues::SRC_SGPR_MASK;
return;
@@ -579,7 +579,7 @@ void SIMCCodeEmitter::getMachineOpValueCommon(
const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
if (AMDGPU::isSISrcOperand(Desc, OpNo)) {
- if (auto Enc = getLitEncoding(MO, Desc.OpInfo[OpNo], STI)) {
+ if (auto Enc = getLitEncoding(MO, Desc.operands()[OpNo], STI)) {
Op = *Enc;
return;
}
diff --git a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
index 2f2e45198c65b..dd9b70347094a 100644
--- a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
@@ -214,7 +214,7 @@ bool SIFoldOperands::updateOperand(FoldCandidate &Fold) const {
if (!(Val & SISrcMods::OP_SEL_0) && (Val & SISrcMods::OP_SEL_1)) {
// Only apply the following transformation if that operand requires
// a packed immediate.
- switch (TII->get(Opcode).OpInfo[OpNo].OperandType) {
+ switch (TII->get(Opcode).operands()[OpNo].OperandType) {
case AMDGPU::OPERAND_REG_IMM_V2FP16:
case AMDGPU::OPERAND_REG_IMM_V2INT16:
case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
@@ -436,7 +436,7 @@ bool SIFoldOperands::tryAddToFoldList(SmallVectorImpl<FoldCandidate> &FoldList,
// scalar instruction
if (TII->isSALU(MI->getOpcode())) {
const MCInstrDesc &InstDesc = MI->getDesc();
- const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpNo];
+ const MCOperandInfo &OpInfo = InstDesc.operands()[OpNo];
// Fine if the operand can be encoded as an inline constant
if (!OpToFold->isReg() && !TII->isInlineConstant(*OpToFold, OpInfo)) {
@@ -498,11 +498,10 @@ bool SIFoldOperands::tryToFoldACImm(
const MachineOperand &OpToFold, MachineInstr *UseMI, unsigned UseOpIdx,
SmallVectorImpl<FoldCandidate> &FoldList) const {
const MCInstrDesc &Desc = UseMI->getDesc();
- const MCOperandInfo *OpInfo = Desc.OpInfo;
- if (!OpInfo || UseOpIdx >= Desc.getNumOperands())
+ if (UseOpIdx >= Desc.getNumOperands())
return false;
- uint8_t OpTy = OpInfo[UseOpIdx].OperandType;
+ uint8_t OpTy = Desc.operands()[UseOpIdx].OperandType;
if ((OpTy < AMDGPU::OPERAND_REG_INLINE_AC_FIRST ||
OpTy > AMDGPU::OPERAND_REG_INLINE_AC_LAST) &&
(OpTy < AMDGPU::OPERAND_REG_INLINE_C_FIRST ||
@@ -859,9 +858,8 @@ void SIFoldOperands::foldOperand(
// Don't fold into target independent nodes. Target independent opcodes
// don't have defined register classes.
- if (UseDesc.isVariadic() ||
- UseOp.isImplicit() ||
- UseDesc.OpInfo[UseOpIdx].RegClass == -1)
+ if (UseDesc.isVariadic() || UseOp.isImplicit() ||
+ UseDesc.operands()[UseOpIdx].RegClass == -1)
return;
}
@@ -892,7 +890,7 @@ void SIFoldOperands::foldOperand(
const MCInstrDesc &FoldDesc = OpToFold.getParent()->getDesc();
const TargetRegisterClass *FoldRC =
- TRI->getRegClass(FoldDesc.OpInfo[0].RegClass);
+ TRI->getRegClass(FoldDesc.operands()[0].RegClass);
// Split 64-bit constants into 32-bits for folding.
if (UseOp.getSubReg() && AMDGPU::getRegBitWidth(FoldRC->getID()) == 64) {
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index e1d55714b3716..3af17b6b5f1f5 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -12114,7 +12114,7 @@ void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
// Prefer VGPRs over AGPRs in mAI instructions where possible.
// This saves a chain-copy of registers and better balance register
// use between vgpr and agpr as agpr tuples tend to be big.
- if (MI.getDesc().OpInfo) {
+ if (!MI.getDesc().operands().empty()) {
unsigned Opc = MI.getOpcode();
const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
for (auto I : { AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0),
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index 43e72e8b327a4..4f7e7dad7b277 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -3851,7 +3851,7 @@ static bool compareMachineOp(const MachineOperand &Op0,
bool SIInstrInfo::isImmOperandLegal(const MachineInstr &MI, unsigned OpNo,
const MachineOperand &MO) const {
const MCInstrDesc &InstDesc = MI.getDesc();
- const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpNo];
+ const MCOperandInfo &OpInfo = InstDesc.operands()[OpNo];
assert(MO.isImm() || MO.isTargetIndex() || MO.isFI() || MO.isGlobal());
@@ -4169,9 +4169,9 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr &MI,
return false;
}
- int RegClass = Desc.OpInfo[i].RegClass;
+ int RegClass = Desc.operands()[i].RegClass;
- switch (Desc.OpInfo[i].OperandType) {
+ switch (Desc.operands()[i].OperandType) {
case MCOI::OPERAND_REGISTER:
if (MI.getOperand(i).isImm() || MI.getOperand(i).isGlobal()) {
ErrInfo = "Illegal immediate value for operand.";
@@ -4401,7 +4401,7 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr &MI,
if (OpIdx == -1)
continue;
const MachineOperand &MO = MI.getOperand(OpIdx);
- if (usesConstantBus(MRI, MO, MI.getDesc().OpInfo[OpIdx])) {
+ if (usesConstantBus(MRI, MO, MI.getDesc().operands()[OpIdx])) {
if (MO.isReg()) {
SGPRUsed = MO.getReg();
if (!llvm::is_contained(SGPRsUsed, SGPRUsed)) {
@@ -4459,7 +4459,7 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr &MI,
const MachineOperand &MO = MI.getOperand(OpIdx);
- if (usesConstantBus(MRI, MO, MI.getDesc().OpInfo[OpIdx])) {
+ if (usesConstantBus(MRI, MO, MI.getDesc().operands()[OpIdx])) {
if (MO.isReg() && MO.getReg() != AMDGPU::M0) {
if (MO.getReg() != SGPRUsed)
++SGPRCount;
@@ -4502,8 +4502,8 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr &MI,
const MachineOperand &Src1 = MI.getOperand(Src1Idx);
if (!Src0.isReg() && !Src1.isReg() &&
- !isInlineConstant(Src0, Desc.OpInfo[Src0Idx]) &&
- !isInlineConstant(Src1, Desc.OpInfo[Src1Idx]) &&
+ !isInlineConstant(Src0, Desc.operands()[Src0Idx]) &&
+ !isInlineConstant(Src1, Desc.operands()[Src1Idx]) &&
!Src0.isIdenticalTo(Src1)) {
ErrInfo = "SOP2/SOPC instruction requires too many immediate constants";
return false;
@@ -4704,11 +4704,12 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr &MI,
if (Opcode != AMDGPU::V_MOV_B64_DPP_PSEUDO &&
((DstIdx >= 0 &&
- (Desc.OpInfo[DstIdx].RegClass == AMDGPU::VReg_64RegClassID ||
- Desc.OpInfo[DstIdx].RegClass == AMDGPU::VReg_64_Align2RegClassID)) ||
+ (Desc.operands()[DstIdx].RegClass == AMDGPU::VReg_64RegClassID ||
+ Desc.operands()[DstIdx].RegClass ==
+ AMDGPU::VReg_64_Align2RegClassID)) ||
((Src0Idx >= 0 &&
- (Desc.OpInfo[Src0Idx].RegClass == AMDGPU::VReg_64RegClassID ||
- Desc.OpInfo[Src0Idx].RegClass ==
+ (Desc.operands()[Src0Idx].RegClass == AMDGPU::VReg_64RegClassID ||
+ Desc.operands()[Src0Idx].RegClass ==
AMDGPU::VReg_64_Align2RegClassID)))) &&
!AMDGPU::isLegal64BitDPPControl(DC)) {
ErrInfo = "Invalid dpp_ctrl value: "
@@ -4927,7 +4928,7 @@ const TargetRegisterClass *SIInstrInfo::getRegClass(const MCInstrDesc &TID,
const {
if (OpNum >= TID.getNumOperands())
return nullptr;
- auto RegClass = TID.OpInfo[OpNum].RegClass;
+ auto RegClass = TID.operands()[OpNum].RegClass;
bool IsAllocatable = false;
if (TID.TSFlags & (SIInstrFlags::DS | SIInstrFlags::FLAT)) {
// vdst and vdata should be both VGPR or AGPR, same for the DS instructions
@@ -4956,7 +4957,7 @@ const TargetRegisterClass *SIInstrInfo::getOpRegClass(const MachineInstr &MI,
const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
const MCInstrDesc &Desc = get(MI.getOpcode());
if (MI.isVariadic() || OpNo >= Desc.getNumOperands() ||
- Desc.OpInfo[OpNo].RegClass == -1) {
+ Desc.operands()[OpNo].RegClass == -1) {
Register Reg = MI.getOperand(OpNo).getReg();
if (Reg.isVirtual())
@@ -4964,7 +4965,7 @@ const TargetRegisterClass *SIInstrInfo::getOpRegClass(const MachineInstr &MI,
return RI.getPhysRegBaseClass(Reg);
}
- unsigned RCID = Desc.OpInfo[OpNo].RegClass;
+ unsigned RCID = Desc.operands()[OpNo].RegClass;
return adjustAllocatableRegClass(ST, RI, MRI, Desc, RCID, true);
}
@@ -4973,7 +4974,7 @@ void SIInstrInfo::legalizeOpWithMove(MachineInstr &MI, unsigned OpIdx) const {
MachineBasicBlock *MBB = MI.getParent();
MachineOperand &MO = MI.getOperand(OpIdx);
MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
- unsigned RCID = get(MI.getOpcode()).OpInfo[OpIdx].RegClass;
+ unsigned RCID = get(MI.getOpcode()).operands()[OpIdx].RegClass;
const TargetRegisterClass *RC = RI.getRegClass(RCID);
unsigned Size = RI.getRegSizeInBits(*RC);
unsigned Opcode = (Size == 64) ? AMDGPU::V_MOV_B64_PSEUDO : AMDGPU::V_MOV_B32_e32;
@@ -5099,7 +5100,7 @@ bool SIInstrInfo::isOperandLegal(const MachineInstr &MI, unsigned OpIdx,
const MachineFunction &MF = *MI.getParent()->getParent();
const MachineRegisterInfo &MRI = MF.getRegInfo();
const MCInstrDesc &InstDesc = MI.getDesc();
- const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpIdx];
+ const MCOperandInfo &OpInfo = InstDesc.operands()[OpIdx];
const TargetRegisterClass *DefinedRC =
OpInfo.RegClass != -1 ? RI.getRegClass(OpInfo.RegClass) : nullptr;
if (!MO)
@@ -5122,14 +5123,15 @@ bool SIInstrInfo::isOperandLegal(const MachineInstr &MI, unsigned OpIdx,
if (Op.isReg()) {
RegSubRegPair SGPR(Op.getReg(), Op.getSubReg());
if (!SGPRsUsed.count(SGPR) &&
- usesConstantBus(MRI, Op, InstDesc.OpInfo[i])) {
+ // FIXME: This can access off the end of the operands() array.
+ usesConstantBus(MRI, Op, InstDesc.operands().begin()[i])) {
if (--ConstantBusLimit <= 0)
return false;
SGPRsUsed.insert(SGPR);
}
- } else if (InstDesc.OpInfo[i].OperandType == AMDGPU::OPERAND_KIMM32 ||
+ } else if (InstDesc.operands()[i].OperandType == AMDGPU::OPERAND_KIMM32 ||
(AMDGPU::isSISrcOperand(InstDesc, i) &&
- !isInlineConstant(Op, InstDesc.OpInfo[i]))) {
+ !isInlineConstant(Op, InstDesc.operands()[i]))) {
if (!LiteralLimit--)
return false;
if (--ConstantBusLimit <= 0)
@@ -5236,7 +5238,7 @@ void SIInstrInfo::legalizeOperandsVOP2(MachineRegisterInfo &MRI,
// VOP2 src0 instructions support all operand types, so we don't need to check
// their legality. If src1 is already legal, we don't need to do anything.
- if (isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src1))
+ if (isLegalRegOperand(MRI, InstrDesc.operands()[Src1Idx], Src1))
return;
// Special case: V_READLANE_B32 accepts only immediate or SGPR operands for
@@ -5267,7 +5269,7 @@ void SIInstrInfo::legalizeOperandsVOP2(MachineRegisterInfo &MRI,
// TODO: Other immediate-like operand kinds could be commuted if there was a
// MachineOperand::ChangeTo* for them.
if ((!Src1.isImm() && !Src1.isReg()) ||
- !isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src0)) {
+ !isLegalRegOperand(MRI, InstrDesc.operands()[Src1Idx], Src0)) {
legalizeOpWithMove(MI, Src1Idx);
return;
}
@@ -5345,7 +5347,7 @@ void SIInstrInfo::legalizeOperandsVOP3(MachineRegisterInfo &MRI,
MachineOperand &MO = MI.getOperand(Idx);
if (!MO.isReg()) {
- if (isInlineConstant(MO, get(Opc).OpInfo[Idx]))
+ if (isInlineConstant(MO, get(Opc).operands()[Idx]))
continue;
if (LiteralLimit > 0 && ConstantBusLimit > 0) {
@@ -6016,7 +6018,7 @@ SIInstrInfo::legalizeOperands(MachineInstr &MI,
if (RsrcIdx != -1) {
// We have an MUBUF instruction
MachineOperand *Rsrc = &MI.getOperand(RsrcIdx);
- unsigned RsrcRC = get(MI.getOpcode()).OpInfo[RsrcIdx].RegClass;
+ unsigned RsrcRC = get(MI.getOpcode()).operands()[RsrcIdx].RegClass;
if (RI.getCommonSubClass(MRI.getRegClass(Rsrc->getReg()),
RI.getRegClass(RsrcRC))) {
// The operands are legal.
@@ -7427,7 +7429,8 @@ Register SIInstrInfo::findUsedSGPR(const MachineInstr &MI,
// Is this operand statically required to be an SGPR based on the operand
// constraints?
- const TargetRegisterClass *OpRC = RI.getRegClass(Desc.OpInfo[Idx].RegClass);
+ const TargetRegisterClass *OpRC =
+ RI.getRegClass(Desc.operands()[Idx].RegClass);
bool IsRequiredSGPR = RI.isSGPRClass(OpRC);
if (IsRequiredSGPR)
return MO.getReg();
@@ -7621,7 +7624,7 @@ unsigned SIInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
bool HasLiteral = false;
for (int I = 0, E = MI.getNumExplicitOperands(); I != E; ++I) {
const MachineOperand &Op = MI.getOperand(I);
- const MCOperandInfo &OpInfo = Desc.OpInfo[I];
+ const MCOperandInfo &OpInfo = Desc.operands()[I];
if (!Op.isReg() && !isInlineConstant(Op, OpInfo)) {
HasLiteral = true;
break;
@@ -7895,7 +7898,7 @@ bool SIInstrInfo::isBufferSMRD(const MachineInstr &MI) const {
if (Idx == -1) // e.g. s_memtime
return false;
- const auto RCID = MI.getDesc().OpInfo[Idx].RegClass;
+ const auto RCID = MI.getDesc().operands()[Idx].RegClass;
return RI.getRegClass(RCID)->hasSubClassEq(&AMDGPU::SGPR_128RegClass);
}
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.h b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
index 6cbc02ab1dbc5..a0ca2fa8a983e 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.h
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
@@ -842,23 +842,22 @@ class SIInstrInfo final : public AMDGPUGenInstrInfo {
const MachineOperand &DefMO) const {
assert(UseMO.getParent() == &MI);
int OpIdx = MI.getOperandNo(&UseMO);
- if (!MI.getDesc().OpInfo || OpIdx >= MI.getDesc().NumOperands) {
+ if (OpIdx >= MI.getDesc().NumOperands)
return false;
- }
- return isInlineConstant(DefMO, MI.getDesc().OpInfo[OpIdx]);
+ return isInlineConstant(DefMO, MI.getDesc().operands()[OpIdx]);
}
/// \p returns true if the operand \p OpIdx in \p MI is a valid inline
/// immediate.
bool isInlineConstant(const MachineInstr &MI, unsigned OpIdx) const {
const MachineOperand &MO = MI.getOperand(OpIdx);
- return isInlineConstant(MO, MI.getDesc().OpInfo[OpIdx].OperandType);
+ return isInlineConstant(MO, MI.getDesc().operands()[OpIdx].OperandType);
}
bool isInlineConstant(const MachineInstr &MI, unsigned OpIdx,
const MachineOperand &MO) const {
- if (!MI.getDesc().OpInfo || OpIdx >= MI.getDesc().NumOperands)
+ if (OpIdx >= MI.getDesc().NumOperands)
return false;
if (MI.isCopy()) {
@@ -870,7 +869,7 @@ class SIInstrInfo final : public AMDGPUGenInstrInfo {
return isInlineConstant(MO, OpType);
}
- return isInlineConstant(MO, MI.getDesc().OpInfo[OpIdx].OperandType);
+ return isInlineConstant(MO, MI.getDesc().operands()[OpIdx].OperandType);
}
bool isInlineConstant(const MachineOperand &MO) const {
@@ -920,7 +919,7 @@ class SIInstrInfo final : public AMDGPUGenInstrInfo {
/// Return the size in bytes of the operand OpNo on the given
// instruction opcode.
unsigned getOpSize(uint16_t Opcode, unsigned OpNo) const {
- const MCOperandInfo &OpInfo = get(Opcode).OpInfo[OpNo];
+ const MCOperandInfo &OpInfo = get(Opcode).operands()[OpNo];
if (OpInfo.RegClass == -1) {
// If this is an immediate operand, this must be a 32-bit literal.
diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index fd6f3ad43dea2..c21ff06454da2 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -1159,8 +1159,8 @@ void SIPeepholeSDWA::legalizeScalarOperands(MachineInstr &MI,
continue;
unsigned I = MI.getOperandNo(&Op);
- if (Desc.OpInfo[I].RegClass == -1 ||
- !TRI->isVSSuperClass(TRI->getRegClass(Desc.OpInfo[I].RegClass)))
+ if (Desc.operands()[I].RegClass == -1 ||
+ !TRI->isVSSuperClass(TRI->getRegClass(Desc.operands()[I].RegClass)))
continue;
if (ST.hasSDWAScalar() && ConstantBusCount == 0 && Op.isReg() &&
diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
index bfa9d39d1cc5c..cf0f598bc2d01 100644
--- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
@@ -539,7 +539,7 @@ ComponentProps::ComponentProps(const MCInstrDesc &OpDesc) {
auto OperandsNum = OpDesc.getNumOperands();
unsigned CompOprIdx;
for (CompOprIdx = Component::SRC1; CompOprIdx < OperandsNum; ++CompOprIdx) {
- if (OpDesc.OpInfo[CompOprIdx].OperandType == AMDGPU::OPERAND_KIMM32) {
+ if (OpDesc.operands()[CompOprIdx].OperandType == AMDGPU::OPERAND_KIMM32) {
MandatoryLiteralIdx = CompOprIdx;
break;
}
@@ -2133,21 +2133,21 @@ bool isInlineValue(unsigned Reg) {
bool isSISrcOperand(const MCInstrDesc &Desc, unsigned OpNo) {
assert(OpNo < Desc.NumOperands);
- unsigned OpType = Desc.OpInfo[OpNo].OperandType;
+ unsigned OpType = Desc.operands()[OpNo].OperandType;
return OpType >= AMDGPU::OPERAND_SRC_FIRST &&
OpType <= AMDGPU::OPERAND_SRC_LAST;
}
bool isKImmOperand(const MCInstrDesc &Desc, unsigned OpNo) {
assert(OpNo < Desc.NumOperands);
- unsigned OpType = Desc.OpInfo[OpNo].OperandType;
+ unsigned OpType = Desc.operands()[OpNo].OperandType;
return OpType >= AMDGPU::OPERAND_KIMM_FIRST &&
OpType <= AMDGPU::OPERAND_KIMM_LAST;
}
bool isSISrcFPOperand(const MCInstrDesc &Desc, unsigned OpNo) {
assert(OpNo < Desc.NumOperands);
- unsigned OpType = Desc.OpInfo[OpNo].OperandType;
+ unsigned OpType = Desc.operands()[OpNo].OperandType;
switch (OpType) {
case AMDGPU::OPERAND_REG_IMM_FP32:
case AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED:
@@ -2176,7 +2176,7 @@ bool isSISrcFPOperand(const MCInstrDesc &Desc, unsigned OpNo) {
bool isSISrcInlinableOperand(const MCInstrDesc &Desc, unsigned OpNo) {
assert(OpNo < Desc.NumOperands);
- unsigned OpType = Desc.OpInfo[OpNo].OperandType;
+ unsigned OpType = Desc.operands()[OpNo].OperandType;
return OpType >= AMDGPU::OPERAND_REG_INLINE_C_FIRST &&
OpType <= AMDGPU::OPERAND_REG_INLINE_C_LAST;
}
@@ -2331,7 +2331,7 @@ unsigned getRegBitWidth(const MCRegisterClass &RC) {
unsigned getRegOperandSize(const MCRegisterInfo *MRI, const MCInstrDesc &Desc,
unsigned OpNo) {
assert(OpNo < Desc.NumOperands);
- unsigned RCID = Desc.OpInfo[OpNo].RegClass;
+ unsigned RCID = Desc.operands()[OpNo].RegClass;
return getRegBitWidth(MRI->getRegClass(RCID)) / 8;
}
diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
index acec7d5825b48..d527199ce12db 100644
--- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
+++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
@@ -1209,7 +1209,7 @@ inline unsigned getOperandSize(const MCOperandInfo &OpInfo) {
LLVM_READNONE
inline unsigned getOperandSize(const MCInstrDesc &Desc, unsigned OpNo) {
- return getOperandSize(Desc.OpInfo[OpNo]);
+ return getOperandSize(Desc.operands()[OpNo]);
}
/// Is this literal inlinable, and not one of the values intended for floating
diff --git a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
index 0b345085a99f0..d5127b7517613 100644
--- a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
+++ b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
@@ -619,7 +619,8 @@ bool ARMBaseInstrInfo::PredicateInstruction(
// IT block. This affects how they are printed.
const MCInstrDesc &MCID = MI.getDesc();
if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
- assert(MCID.OpInfo[1].isOptionalDef() && "CPSR def isn't expected operand");
+ assert(MCID.operands()[1].isOptionalDef() &&
+ "CPSR def isn't expected operand");
assert((MI.getOperand(1).isDead() ||
MI.getOperand(1).getReg() != ARM::CPSR) &&
"if conversion tried to stop defining used CPSR");
@@ -2382,7 +2383,7 @@ ARMBaseInstrInfo::optimizeSelect(MachineInstr &MI,
// Copy all the DefMI operands, excluding its (null) predicate.
const MCInstrDesc &DefDesc = DefMI->getDesc();
for (unsigned i = 1, e = DefDesc.getNumOperands();
- i != e && !DefDesc.OpInfo[i].isPredicate(); ++i)
+ i != e && !DefDesc.operands()[i].isPredicate(); ++i)
NewMI.add(DefMI->getOperand(i));
unsigned CondCode = MI.getOperand(3).getImm();
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index aa4b8fec6a9c6..47408e0634be9 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -12289,7 +12289,7 @@ void ARMTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
// Any ARM instruction that sets the 's' bit should specify an optional
// "cc_out" operand in the last operand position.
- if (!MI.hasOptionalDef() || !MCID->OpInfo[ccOutIdx].isOptionalDef()) {
+ if (!MI.hasOptionalDef() || !MCID->operands()[ccOutIdx].isOptionalDef()) {
assert(!NewOpc && "Optional cc_out operand required");
return;
}
diff --git a/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
index e08f64c3f7dd1..268f25bef89ca 100644
--- a/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
+++ b/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
@@ -7626,7 +7626,7 @@ bool ARMAsmParser::validateLDRDSTRD(MCInst &Inst,
static int findFirstVectorPredOperandIdx(const MCInstrDesc &MCID) {
for (unsigned i = 0; i < MCID.NumOperands; ++i) {
- if (ARM::isVpred(MCID.OpInfo[i].OperandType))
+ if (ARM::isVpred(MCID.operands()[i].OperandType))
return i;
}
return -1;
@@ -7679,7 +7679,7 @@ bool ARMAsmParser::validateInstruction(MCInst &Inst,
// to keep instructions the same shape even though one cannot
// legally be predicated, e.g. vmul.f16 vs vmul.f32.
for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) {
- if (MCID.OpInfo[i].isPredicate()) {
+ if (MCID.operands()[i].isPredicate()) {
if (Inst.getOperand(i).getImm() != ARMCC::AL)
return Error(Loc, "instruction is not predicable");
break;
@@ -10755,7 +10755,7 @@ unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
// Find the optional-def operand (cc_out).
unsigned OpNo;
for (OpNo = 0;
- !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands;
+ !MCID.operands()[OpNo].isOptionalDef() && OpNo < MCID.NumOperands;
++OpNo)
;
// If we're parsing Thumb1, reject it completely.
@@ -10833,7 +10833,7 @@ unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
}
for (unsigned I = 0; I < MCID.NumOperands; ++I)
- if (MCID.OpInfo[I].RegClass == ARM::rGPRRegClassID) {
+ if (MCID.operands()[I].RegClass == ARM::rGPRRegClassID) {
// rGPRRegClass excludes PC, and also excluded SP before ARMv8
const auto &Op = Inst.getOperand(I);
if (!Op.isReg()) {
diff --git a/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp b/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
index 6c1712625435c..fa696d8952e40 100644
--- a/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
+++ b/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
@@ -887,9 +887,9 @@ void ARMDisassembler::AddThumb1SBit(MCInst &MI, bool InITBlock) const {
MCInst::iterator I = MI.begin();
for (unsigned i = 0; i < MCID.NumOperands; ++i, ++I) {
if (I == MI.end()) break;
- if (MCID.OpInfo[i].isOptionalDef() &&
- MCID.OpInfo[i].RegClass == ARM::CCRRegClassID) {
- if (i > 0 && MCID.OpInfo[i - 1].isPredicate())
+ if (MCID.operands()[i].isOptionalDef() &&
+ MCID.operands()[i].RegClass == ARM::CCRRegClassID) {
+ if (i > 0 && MCID.operands()[i - 1].isPredicate())
continue;
MI.insert(I, MCOperand::createReg(InITBlock ? 0 : ARM::CPSR));
return;
@@ -902,7 +902,7 @@ void ARMDisassembler::AddThumb1SBit(MCInst &MI, bool InITBlock) const {
bool ARMDisassembler::isVectorPredicable(const MCInst &MI) const {
const MCInstrDesc &MCID = MCII->get(MI.getOpcode());
for (unsigned i = 0; i < MCID.NumOperands; ++i) {
- if (ARM::isVpred(MCID.OpInfo[i].OperandType))
+ if (ARM::isVpred(MCID.operands()[i].OperandType))
return true;
}
return false;
@@ -981,7 +981,7 @@ ARMDisassembler::AddThumbPredicate(MCInst &MI) const {
MCInst::iterator CCI = MI.begin();
for (unsigned i = 0; i < MCID.NumOperands; ++i, ++CCI) {
- if (MCID.OpInfo[i].isPredicate() || CCI == MI.end())
+ if (MCID.operands()[i].isPredicate() || CCI == MI.end())
break;
}
@@ -999,7 +999,7 @@ ARMDisassembler::AddThumbPredicate(MCInst &MI) const {
MCInst::iterator VCCI = MI.begin();
unsigned VCCPos;
for (VCCPos = 0; VCCPos < MCID.NumOperands; ++VCCPos, ++VCCI) {
- if (ARM::isVpred(MCID.OpInfo[VCCPos].OperandType) || VCCI == MI.end())
+ if (ARM::isVpred(MCID.operands()[VCCPos].OperandType) || VCCI == MI.end())
break;
}
@@ -1013,7 +1013,7 @@ ARMDisassembler::AddThumbPredicate(MCInst &MI) const {
++VCCI;
VCCI = MI.insert(VCCI, MCOperand::createReg(0));
++VCCI;
- if (MCID.OpInfo[VCCPos].OperandType == ARM::OPERAND_VPRED_R) {
+ if (MCID.operands()[VCCPos].OperandType == ARM::OPERAND_VPRED_R) {
int TiedOp = MCID.getOperandConstraint(VCCPos + 3, MCOI::TIED_TO);
assert(TiedOp >= 0 &&
"Inactive register in vpred_r is not tied to an output!");
@@ -1046,7 +1046,7 @@ void ARMDisassembler::UpdateThumbVFPPredicate(
}
const MCInstrDesc &MCID = MCII->get(MI.getOpcode());
- const MCOperandInfo *OpInfo = MCID.OpInfo;
+ ArrayRef<MCOperandInfo> OpInfo = MCID.operands();
MCInst::iterator I = MI.begin();
unsigned short NumOps = MCID.NumOperands;
for (unsigned i = 0; i < NumOps; ++i, ++I) {
diff --git a/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp b/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp
index d0bf0198d4010..48ad7f3a2b28e 100644
--- a/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp
+++ b/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp
@@ -178,7 +178,7 @@ bool ARM_MC::isCPSRDefined(const MCInst &MI, const MCInstrInfo *MCII) {
for (unsigned I = 0; I < MI.getNumOperands(); ++I) {
const MCOperand &MO = MI.getOperand(I);
if (MO.isReg() && MO.getReg() == ARM::CPSR &&
- Desc.OpInfo[I].isOptionalDef())
+ Desc.operands()[I].isOptionalDef())
return true;
}
return false;
@@ -422,7 +422,7 @@ class ARMMCInstrAnalysis : public MCInstrAnalysis {
// Find the PC-relative immediate operand in the instruction.
for (unsigned OpNum = 0; OpNum < Desc.getNumOperands(); ++OpNum) {
if (Inst.getOperand(OpNum).isImm() &&
- Desc.OpInfo[OpNum].OperandType == MCOI::OPERAND_PCREL) {
+ Desc.operands()[OpNum].OperandType == MCOI::OPERAND_PCREL) {
int64_t Imm = Inst.getOperand(OpNum).getImm();
Target = ARM_MC::evaluateBranchTarget(Desc, Addr, Imm);
return true;
@@ -578,7 +578,7 @@ std::optional<uint64_t> ARMMCInstrAnalysis::evaluateMemoryOperandAddress(
// Find the memory addressing operand in the instruction.
unsigned OpIndex = Desc.NumDefs;
while (OpIndex < Desc.getNumOperands() &&
- Desc.OpInfo[OpIndex].OperandType != MCOI::OPERAND_MEMORY)
+ Desc.operands()[OpIndex].OperandType != MCOI::OPERAND_MEMORY)
++OpIndex;
if (OpIndex == Desc.getNumOperands())
return std::nullopt;
diff --git a/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp b/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp
index d06f182b0d0e8..1b24c289061d7 100644
--- a/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp
+++ b/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp
@@ -776,11 +776,8 @@ ARMCC::CondCodes llvm::getITInstrPredicate(const MachineInstr &MI,
int llvm::findFirstVPTPredOperandIdx(const MachineInstr &MI) {
const MCInstrDesc &MCID = MI.getDesc();
- if (!MCID.OpInfo)
- return -1;
-
for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i)
- if (ARM::isVpred(MCID.OpInfo[i].OperandType))
+ if (ARM::isVpred(MCID.operands()[i].OperandType))
return i;
return -1;
diff --git a/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp b/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp
index 7acfb9b0d91e1..873953d28bb3c 100644
--- a/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp
+++ b/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp
@@ -839,9 +839,9 @@ Thumb2SizeReduce::ReduceTo2Addr(MachineBasicBlock &MBB, MachineInstr *MI,
// Transfer the rest of operands.
unsigned NumOps = MCID.getNumOperands();
for (unsigned i = 1, e = MI->getNumOperands(); i != e; ++i) {
- if (i < NumOps && MCID.OpInfo[i].isOptionalDef())
+ if (i < NumOps && MCID.operands()[i].isOptionalDef())
continue;
- if (SkipPred && MCID.OpInfo[i].isPredicate())
+ if (SkipPred && MCID.operands()[i].isPredicate())
continue;
MIB.add(MI->getOperand(i));
}
@@ -875,7 +875,7 @@ Thumb2SizeReduce::ReduceToNarrow(MachineBasicBlock &MBB, MachineInstr *MI,
const MCInstrDesc &MCID = MI->getDesc();
for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) {
- if (MCID.OpInfo[i].isPredicate())
+ if (MCID.operands()[i].isPredicate())
continue;
const MachineOperand &MO = MI->getOperand(i);
if (MO.isReg()) {
@@ -884,8 +884,7 @@ Thumb2SizeReduce::ReduceToNarrow(MachineBasicBlock &MBB, MachineInstr *MI,
continue;
if (Entry.LowRegs1 && !isARMLowRegister(Reg))
return false;
- } else if (MO.isImm() &&
- !MCID.OpInfo[i].isPredicate()) {
+ } else if (MO.isImm() && !MCID.operands()[i].isPredicate()) {
if (((unsigned)MO.getImm()) > Limit)
return false;
}
@@ -946,7 +945,7 @@ Thumb2SizeReduce::ReduceToNarrow(MachineBasicBlock &MBB, MachineInstr *MI,
// Transfer the rest of operands.
unsigned NumOps = MCID.getNumOperands();
for (unsigned i = 1, e = MI->getNumOperands(); i != e; ++i) {
- if (i < NumOps && MCID.OpInfo[i].isOptionalDef())
+ if (i < NumOps && MCID.operands()[i].isOptionalDef())
continue;
if ((MCID.getOpcode() == ARM::t2RSBSri ||
MCID.getOpcode() == ARM::t2RSBri ||
@@ -956,7 +955,7 @@ Thumb2SizeReduce::ReduceToNarrow(MachineBasicBlock &MBB, MachineInstr *MI,
MCID.getOpcode() == ARM::t2UXTH) && i == 2)
// Skip the zero immediate operand, it's now implicit.
continue;
- bool isPred = (i < NumOps && MCID.OpInfo[i].isPredicate());
+ bool isPred = (i < NumOps && MCID.operands()[i].isPredicate());
if (SkipPred && isPred)
continue;
const MachineOperand &MO = MI->getOperand(i);
diff --git a/llvm/lib/Target/AVR/MCTargetDesc/AVRInstPrinter.cpp b/llvm/lib/Target/AVR/MCTargetDesc/AVRInstPrinter.cpp
index d68e73ce0bb15..ffda703a24ade 100644
--- a/llvm/lib/Target/AVR/MCTargetDesc/AVRInstPrinter.cpp
+++ b/llvm/lib/Target/AVR/MCTargetDesc/AVRInstPrinter.cpp
@@ -100,7 +100,7 @@ const char *AVRInstPrinter::getPrettyRegisterName(unsigned RegNum,
void AVRInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
- const MCOperandInfo &MOI = this->MII.get(MI->getOpcode()).OpInfo[OpNo];
+ const MCOperandInfo &MOI = this->MII.get(MI->getOpcode()).operands()[OpNo];
if (MOI.RegClass == AVR::ZREGRegClassID) {
// Special case for the Z register, which sometimes doesn't have an operand
// in the MCInst.
diff --git a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCChecker.cpp b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCChecker.cpp
index 18ff901d64414..37ee3dec2b38c 100644
--- a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCChecker.cpp
+++ b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCChecker.cpp
@@ -480,7 +480,7 @@ bool HexagonMCChecker::checkNewValues() {
MCInstrDesc const &Desc = HexagonMCInstrInfo::getDesc(MCII, *ProducerInst);
const unsigned ProducerOpIndex = std::get<1>(Producer);
- if (Desc.OpInfo[ProducerOpIndex].RegClass ==
+ if (Desc.operands()[ProducerOpIndex].RegClass ==
Hexagon::DoubleRegsRegClassID) {
reportNote(ProducerInst->getLoc(),
"Double registers cannot be new-value producers");
diff --git a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.cpp b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.cpp
index 65c9b9cf7fd7b..fd82d1fb03156 100644
--- a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.cpp
+++ b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.cpp
@@ -773,7 +773,9 @@ HexagonMCCodeEmitter::getMachineOpValue(MCInst const &MI, MCOperand const &MO,
assert(!MO.isImm());
if (MO.isReg()) {
unsigned Reg = MO.getReg();
- switch (HexagonMCInstrInfo::getDesc(MCII, MI).OpInfo[OperandNumber].RegClass) {
+ switch (HexagonMCInstrInfo::getDesc(MCII, MI)
+ .operands()[OperandNumber]
+ .RegClass) {
case GeneralSubRegsRegClassID:
case GeneralDoubleLow8RegsRegClassID:
return HexagonMCInstrInfo::getDuplexRegisterNumbering(Reg);
diff --git a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCInstrInfo.cpp b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCInstrInfo.cpp
index 494b0e6cbac6e..ef1ccea6add7a 100644
--- a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCInstrInfo.cpp
+++ b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCInstrInfo.cpp
@@ -762,7 +762,7 @@ bool HexagonMCInstrInfo::isPredRegister(MCInstrInfo const &MCII,
MCInstrDesc const &Desc = HexagonMCInstrInfo::getDesc(MCII, Inst);
return Inst.getOperand(I).isReg() &&
- Desc.OpInfo[I].RegClass == Hexagon::PredRegsRegClassID;
+ Desc.operands()[I].RegClass == Hexagon::PredRegsRegClassID;
}
/// Return whether the insn can be packaged only with A and X-type insns.
@@ -932,7 +932,7 @@ HexagonMCInstrInfo::predicateInfo(MCInstrInfo const &MCII, MCInst const &MCI) {
return {0, 0, false};
MCInstrDesc const &Desc = getDesc(MCII, MCI);
for (auto I = Desc.getNumDefs(), N = Desc.getNumOperands(); I != N; ++I)
- if (Desc.OpInfo[I].RegClass == Hexagon::PredRegsRegClassID)
+ if (Desc.operands()[I].RegClass == Hexagon::PredRegsRegClassID)
return {MCI.getOperand(I).getReg(), I, isPredicatedTrue(MCII, MCI)};
return {0, 0, false};
}
diff --git a/llvm/lib/Target/Lanai/LanaiInstrInfo.cpp b/llvm/lib/Target/Lanai/LanaiInstrInfo.cpp
index 42b088a6fd00d..aa7e8846406dd 100644
--- a/llvm/lib/Target/Lanai/LanaiInstrInfo.cpp
+++ b/llvm/lib/Target/Lanai/LanaiInstrInfo.cpp
@@ -514,7 +514,7 @@ LanaiInstrInfo::optimizeSelect(MachineInstr &MI,
// Copy all the DefMI operands, excluding its (null) predicate.
const MCInstrDesc &DefDesc = DefMI->getDesc();
for (unsigned i = 1, e = DefDesc.getNumOperands();
- i != e && !DefDesc.OpInfo[i].isPredicate(); ++i)
+ i != e && !DefDesc.operands()[i].isPredicate(); ++i)
NewMI.add(DefMI->getOperand(i));
unsigned CondCode = MI.getOperand(3).getImm();
diff --git a/llvm/lib/Target/Lanai/MCTargetDesc/LanaiMCTargetDesc.cpp b/llvm/lib/Target/Lanai/MCTargetDesc/LanaiMCTargetDesc.cpp
index c434508698323..97d33ea2a0abf 100644
--- a/llvm/lib/Target/Lanai/MCTargetDesc/LanaiMCTargetDesc.cpp
+++ b/llvm/lib/Target/Lanai/MCTargetDesc/LanaiMCTargetDesc.cpp
@@ -102,7 +102,7 @@ class LanaiMCInstrAnalysis : public MCInstrAnalysis {
!isCall(Inst))
return false;
- if (Info->get(Inst.getOpcode()).OpInfo[0].OperandType ==
+ if (Info->get(Inst.getOpcode()).operands()[0].OperandType ==
MCOI::OPERAND_PCREL) {
int64_t Imm = Inst.getOperand(0).getImm();
Target = Addr + Size + Imm;
diff --git a/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp b/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
index eab3653041691..45cbddd03d928 100644
--- a/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
+++ b/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
@@ -1834,7 +1834,7 @@ static bool needsExpandMemInst(MCInst &Inst, const MCInstrDesc &MCID) {
if (NumOp != 3 && NumOp != 4)
return false;
- const MCOperandInfo &OpInfo = MCID.OpInfo[NumOp - 1];
+ const MCOperandInfo &OpInfo = MCID.operands()[NumOp - 1];
if (OpInfo.OperandType != MCOI::OPERAND_MEMORY &&
OpInfo.OperandType != MCOI::OPERAND_UNKNOWN &&
OpInfo.OperandType != MipsII::OPERAND_MEM_SIMM9)
@@ -2148,7 +2148,7 @@ bool MipsAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc,
// Check the offset of memory operand, if it is a symbol
// reference or immediate we may have to expand instructions.
if (needsExpandMemInst(Inst, MCID)) {
- switch (MCID.OpInfo[MCID.getNumOperands() - 1].OperandType) {
+ switch (MCID.operands()[MCID.getNumOperands() - 1].OperandType) {
case MipsII::OPERAND_MEM_SIMM9:
expandMem9Inst(Inst, IDLoc, Out, STI, MCID.mayLoad());
break;
@@ -2164,7 +2164,7 @@ bool MipsAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc,
if (MCID.mayLoad() && Opcode != Mips::LWP_MM) {
// Try to create 16-bit GP relative load instruction.
for (unsigned i = 0; i < MCID.getNumOperands(); i++) {
- const MCOperandInfo &OpInfo = MCID.OpInfo[i];
+ const MCOperandInfo &OpInfo = MCID.operands()[i];
if ((OpInfo.OperandType == MCOI::OPERAND_MEMORY) ||
(OpInfo.OperandType == MCOI::OPERAND_UNKNOWN)) {
MCOperand &Op = Inst.getOperand(i);
@@ -3684,7 +3684,7 @@ void MipsAsmParser::expandMem16Inst(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
unsigned TmpReg = DstReg;
const MCInstrDesc &Desc = MII.get(OpCode);
- int16_t DstRegClass = Desc.OpInfo[StartOp].RegClass;
+ int16_t DstRegClass = Desc.operands()[StartOp].RegClass;
unsigned DstRegClassID =
getContext().getRegisterInfo()->getRegClass(DstRegClass).getID();
bool IsGPR = (DstRegClassID == Mips::GPR32RegClassID) ||
@@ -3811,7 +3811,7 @@ void MipsAsmParser::expandMem9Inst(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
unsigned TmpReg = DstReg;
const MCInstrDesc &Desc = MII.get(OpCode);
- int16_t DstRegClass = Desc.OpInfo[StartOp].RegClass;
+ int16_t DstRegClass = Desc.operands()[StartOp].RegClass;
unsigned DstRegClassID =
getContext().getRegisterInfo()->getRegClass(DstRegClass).getID();
bool IsGPR = (DstRegClassID == Mips::GPR32RegClassID) ||
diff --git a/llvm/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp b/llvm/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp
index 40c807082fdc7..273dcdb0b429f 100644
--- a/llvm/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp
+++ b/llvm/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp
@@ -143,7 +143,7 @@ class MipsMCInstrAnalysis : public MCInstrAnalysis {
unsigned NumOps = Inst.getNumOperands();
if (NumOps == 0)
return false;
- switch (Info->get(Inst.getOpcode()).OpInfo[NumOps - 1].OperandType) {
+ switch (Info->get(Inst.getOpcode()).operands()[NumOps - 1].OperandType) {
case MCOI::OPERAND_UNKNOWN:
case MCOI::OPERAND_IMMEDIATE: {
// j, jal, jalx, jals
diff --git a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp
index 10e352923c1a9..0ea50e6b509b4 100644
--- a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp
+++ b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp
@@ -385,7 +385,7 @@ class PPCMCInstrAnalysis : public MCInstrAnalysis {
uint64_t &Target) const override {
unsigned NumOps = Inst.getNumOperands();
if (NumOps == 0 ||
- Info->get(Inst.getOpcode()).OpInfo[NumOps - 1].OperandType !=
+ Info->get(Inst.getOpcode()).operands()[NumOps - 1].OperandType !=
MCOI::OPERAND_PCREL)
return false;
Target = Addr + Inst.getOperand(NumOps - 1).getImm() * Size;
diff --git a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
index 77b507d113f8a..d26601906150c 100644
--- a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
+++ b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
@@ -2092,7 +2092,7 @@ bool PPCInstrInfo::onlyFoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
assert(UseIdx < UseMI.getNumOperands() && "Cannot find Reg in UseMI");
assert(UseIdx < UseMCID.getNumOperands() && "No operand description for Reg");
- const MCOperandInfo *UseInfo = &UseMCID.OpInfo[UseIdx];
+ const MCOperandInfo *UseInfo = &UseMCID.operands()[UseIdx];
// We can fold the zero if this register requires a GPRC_NOR0/G8RC_NOX0
// register (which might also be specified as a pointer class kind).
diff --git a/llvm/lib/Target/PowerPC/PPCInstrInfo.h b/llvm/lib/Target/PowerPC/PPCInstrInfo.h
index f1ef630b9c8e7..7c95f3ca2b4cf 100644
--- a/llvm/lib/Target/PowerPC/PPCInstrInfo.h
+++ b/llvm/lib/Target/PowerPC/PPCInstrInfo.h
@@ -794,7 +794,7 @@ class PPCInstrInfo : public PPCGenInstrInfo {
/// operands).
static unsigned getRegNumForOperand(const MCInstrDesc &Desc, unsigned Reg,
unsigned OpNo) {
- int16_t regClass = Desc.OpInfo[OpNo].RegClass;
+ int16_t regClass = Desc.operands()[OpNo].RegClass;
switch (regClass) {
// We store F0-F31, VF0-VF31 in MCOperand and it should be F0-F31,
// VSX32-VSX63 during encoding/disassembling
diff --git a/llvm/lib/Target/SPIRV/MCTargetDesc/SPIRVInstPrinter.cpp b/llvm/lib/Target/SPIRV/MCTargetDesc/SPIRVInstPrinter.cpp
index 790c3edd537f3..6b99b283f49fb 100644
--- a/llvm/lib/Target/SPIRV/MCTargetDesc/SPIRVInstPrinter.cpp
+++ b/llvm/lib/Target/SPIRV/MCTargetDesc/SPIRVInstPrinter.cpp
@@ -85,8 +85,8 @@ void SPIRVInstPrinter::printInst(const MCInst *MI, uint64_t Address,
const unsigned NumFixedOps = MCDesc.getNumOperands();
const unsigned LastFixedIndex = NumFixedOps - 1;
const int FirstVariableIndex = NumFixedOps;
- if (NumFixedOps > 0 &&
- MCDesc.OpInfo[LastFixedIndex].OperandType == MCOI::OPERAND_UNKNOWN) {
+ if (NumFixedOps > 0 && MCDesc.operands()[LastFixedIndex].OperandType ==
+ MCOI::OPERAND_UNKNOWN) {
// For instructions where a custom type (not reg or immediate) comes as
// the last operand before the variable_ops. This is usually a StringImm
// operand, but there are a few other cases.
diff --git a/llvm/lib/Target/SPIRV/MCTargetDesc/SPIRVMCCodeEmitter.cpp b/llvm/lib/Target/SPIRV/MCTargetDesc/SPIRVMCCodeEmitter.cpp
index f726f42c9bcb5..cd95ca63d4075 100644
--- a/llvm/lib/Target/SPIRV/MCTargetDesc/SPIRVMCCodeEmitter.cpp
+++ b/llvm/lib/Target/SPIRV/MCTargetDesc/SPIRVMCCodeEmitter.cpp
@@ -65,11 +65,11 @@ static bool hasType(const MCInst &MI, const MCInstrInfo &MII) {
// If we define an output, and have at least one other argument.
if (MCDesc.getNumDefs() == 1 && MCDesc.getNumOperands() >= 2) {
// Check if we define an ID, and take a type as operand 1.
- auto DefOpInfo = MCDesc.opInfo_begin();
- auto FirstArgOpInfo = MCDesc.opInfo_begin() + 1;
- return (DefOpInfo->RegClass == SPIRV::IDRegClassID ||
- DefOpInfo->RegClass == SPIRV::ANYIDRegClassID) &&
- FirstArgOpInfo->RegClass == SPIRV::TYPERegClassID;
+ auto &DefOpInfo = MCDesc.operands()[0];
+ auto &FirstArgOpInfo = MCDesc.operands()[1];
+ return (DefOpInfo.RegClass == SPIRV::IDRegClassID ||
+ DefOpInfo.RegClass == SPIRV::ANYIDRegClassID) &&
+ FirstArgOpInfo.RegClass == SPIRV::TYPERegClassID;
}
return false;
}
diff --git a/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp b/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
index cd1861c0f3022..1888f63e4467a 100644
--- a/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
@@ -1204,7 +1204,7 @@ MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(
// to FP conversion.
const MCInstrDesc &MCID = MI.getDesc();
for (unsigned I = 0, E = MCID.getNumOperands(); I != E; ++I) {
- const MCOperandInfo &MCOI = MCID.OpInfo[I];
+ const MCOperandInfo &MCOI = MCID.operands()[I];
if (MCOI.OperandType != MCOI::OPERAND_REGISTER || I == OpNum)
continue;
const TargetRegisterClass *RC = TRI->getRegClass(MCOI.RegClass);
@@ -1999,7 +1999,7 @@ bool SystemZInstrInfo::verifyInstruction(const MachineInstr &MI,
if (I >= MCID.getNumOperands())
break;
const MachineOperand &Op = MI.getOperand(I);
- const MCOperandInfo &MCOI = MCID.OpInfo[I];
+ const MCOperandInfo &MCOI = MCID.operands()[I];
// Addressing modes have register and immediate operands. Op should be a
// register (or frame index) operand if MCOI.RegClass contains a valid
// register class, or an immediate otherwise.
diff --git a/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmTypeCheck.cpp b/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmTypeCheck.cpp
index 5388536141975..b323b265b562b 100644
--- a/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmTypeCheck.cpp
+++ b/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmTypeCheck.cpp
@@ -331,7 +331,7 @@ bool WebAssemblyAsmTypeCheck::typeCheck(SMLoc ErrorLoc, const MCInst &Inst,
const auto &II = MII.get(RegOpc);
// First pop all the uses off the stack and check them.
for (unsigned I = II.getNumOperands(); I > II.getNumDefs(); I--) {
- const auto &Op = II.OpInfo[I - 1];
+ const auto &Op = II.operands()[I - 1];
if (Op.OperandType == MCOI::OPERAND_REGISTER) {
auto VT = WebAssembly::regClassToValType(Op.RegClass);
if (popType(ErrorLoc, VT))
@@ -340,7 +340,7 @@ bool WebAssemblyAsmTypeCheck::typeCheck(SMLoc ErrorLoc, const MCInst &Inst,
}
// Now push all the defs onto the stack.
for (unsigned I = 0; I < II.getNumDefs(); I++) {
- const auto &Op = II.OpInfo[I];
+ const auto &Op = II.operands()[I];
assert(Op.OperandType == MCOI::OPERAND_REGISTER && "Register expected");
auto VT = WebAssembly::regClassToValType(Op.RegClass);
Stack.push_back(VT);
diff --git a/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyInstPrinter.cpp b/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyInstPrinter.cpp
index ff2fa13d9da52..b925519e61627 100644
--- a/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyInstPrinter.cpp
+++ b/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyInstPrinter.cpp
@@ -240,7 +240,7 @@ void WebAssemblyInstPrinter::printInst(const MCInst *MI, uint64_t Address,
// See if this operand denotes a basic block target.
if (I < NumFixedOperands) {
// A non-variable_ops operand, check its type.
- if (Desc.OpInfo[I].OperandType != WebAssembly::OPERAND_BASIC_BLOCK)
+ if (Desc.operands()[I].OperandType != WebAssembly::OPERAND_BASIC_BLOCK)
continue;
} else {
// A variable_ops operand, which currently can be immediates (used in
diff --git a/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCCodeEmitter.cpp b/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCCodeEmitter.cpp
index 6e494b9430f71..cd692f4dda339 100644
--- a/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCCodeEmitter.cpp
+++ b/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCCodeEmitter.cpp
@@ -92,7 +92,7 @@ void WebAssemblyMCCodeEmitter::encodeInstruction(
} else if (MO.isImm()) {
if (I < Desc.getNumOperands()) {
- const MCOperandInfo &Info = Desc.OpInfo[I];
+ const MCOperandInfo &Info = Desc.operands()[I];
LLVM_DEBUG(dbgs() << "Encoding immediate: type="
<< int(Info.OperandType) << "\n");
switch (Info.OperandType) {
@@ -134,7 +134,7 @@ void WebAssemblyMCCodeEmitter::encodeInstruction(
uint64_t D = MO.getDFPImm();
support::endian::write<uint64_t>(OS, D, support::little);
} else if (MO.isExpr()) {
- const MCOperandInfo &Info = Desc.OpInfo[I];
+ const MCOperandInfo &Info = Desc.operands()[I];
llvm::MCFixupKind FixupKind;
size_t PaddedSize = 5;
switch (Info.OperandType) {
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyMCInstLower.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyMCInstLower.cpp
index e8b3542df12f4..85ece58f98b35 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyMCInstLower.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyMCInstLower.cpp
@@ -194,7 +194,7 @@ void WebAssemblyMCInstLower::lower(const MachineInstr *MI,
case MachineOperand::MO_Immediate: {
unsigned DescIndex = I - NumVariadicDefs;
if (DescIndex < Desc.NumOperands) {
- const MCOperandInfo &Info = Desc.OpInfo[DescIndex];
+ const MCOperandInfo &Info = Desc.operands()[DescIndex];
if (Info.OperandType == WebAssembly::OPERAND_TYPEINDEX) {
SmallVector<wasm::ValType, 4> Returns;
SmallVector<wasm::ValType, 4> Params;
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblySetP2AlignOperands.cpp b/llvm/lib/Target/WebAssembly/WebAssemblySetP2AlignOperands.cpp
index 89ae45722e429..a453e7388e274 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblySetP2AlignOperands.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblySetP2AlignOperands.cpp
@@ -62,7 +62,7 @@ static void rewriteP2Align(MachineInstr &MI, unsigned OperandNo) {
assert((*MI.memoperands_begin())->getSize() ==
(UINT64_C(1) << WebAssembly::GetDefaultP2Align(MI.getOpcode())) &&
"Default p2align value should be natural");
- assert(MI.getDesc().OpInfo[OperandNo].OperandType ==
+ assert(MI.getDesc().operands()[OperandNo].OperandType ==
WebAssembly::OPERAND_P2ALIGN &&
"Load and store instructions should have a p2align operand");
uint64_t P2Align = Log2((*MI.memoperands_begin())->getAlign());
diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp
index 63c6f9c11d9b0..5c045d3d5d973 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp
@@ -630,7 +630,8 @@ std::vector<std::pair<uint64_t, uint64_t>> X86MCInstrAnalysis::findPltEntries(
bool X86MCInstrAnalysis::evaluateBranch(const MCInst &Inst, uint64_t Addr,
uint64_t Size, uint64_t &Target) const {
if (Inst.getNumOperands() == 0 ||
- Info->get(Inst.getOpcode()).OpInfo[0].OperandType != MCOI::OPERAND_PCREL)
+ Info->get(Inst.getOpcode()).operands()[0].OperandType !=
+ MCOI::OPERAND_PCREL)
return false;
Target = Addr + Size + Inst.getOperand(0).getImm();
return true;
diff --git a/llvm/lib/Target/X86/X86MCInstLower.cpp b/llvm/lib/Target/X86/X86MCInstLower.cpp
index b5887b0f9db94..6f89b2e79c45c 100644
--- a/llvm/lib/Target/X86/X86MCInstLower.cpp
+++ b/llvm/lib/Target/X86/X86MCInstLower.cpp
@@ -2134,7 +2134,7 @@ static void addConstantComments(const MachineInstr *MI,
const MachineOperand &MaskOp = MI->getOperand(MaskIdx);
if (auto *C = getConstantFromPool(*MI, MaskOp)) {
- unsigned Width = getRegisterWidth(MI->getDesc().OpInfo[0]);
+ unsigned Width = getRegisterWidth(MI->getDesc().operands()[0]);
SmallVector<int, 64> Mask;
DecodePSHUFBMask(C, Width, Mask);
if (!Mask.empty())
@@ -2212,7 +2212,7 @@ static void addConstantComments(const MachineInstr *MI,
const MachineOperand &MaskOp = MI->getOperand(MaskIdx);
if (auto *C = getConstantFromPool(*MI, MaskOp)) {
- unsigned Width = getRegisterWidth(MI->getDesc().OpInfo[0]);
+ unsigned Width = getRegisterWidth(MI->getDesc().operands()[0]);
SmallVector<int, 16> Mask;
DecodeVPERMILPMask(C, ElSize, Width, Mask);
if (!Mask.empty())
@@ -2241,7 +2241,7 @@ static void addConstantComments(const MachineInstr *MI,
const MachineOperand &MaskOp = MI->getOperand(3 + X86::AddrDisp);
if (auto *C = getConstantFromPool(*MI, MaskOp)) {
- unsigned Width = getRegisterWidth(MI->getDesc().OpInfo[0]);
+ unsigned Width = getRegisterWidth(MI->getDesc().operands()[0]);
SmallVector<int, 16> Mask;
DecodeVPERMIL2PMask(C, (unsigned)CtrlOp.getImm(), ElSize, Width, Mask);
if (!Mask.empty())
@@ -2256,7 +2256,7 @@ static void addConstantComments(const MachineInstr *MI,
const MachineOperand &MaskOp = MI->getOperand(3 + X86::AddrDisp);
if (auto *C = getConstantFromPool(*MI, MaskOp)) {
- unsigned Width = getRegisterWidth(MI->getDesc().OpInfo[0]);
+ unsigned Width = getRegisterWidth(MI->getDesc().operands()[0]);
SmallVector<int, 16> Mask;
DecodeVPPERMMask(C, Width, Mask);
if (!Mask.empty())
diff --git a/llvm/tools/llvm-exegesis/lib/MCInstrDescView.cpp b/llvm/tools/llvm-exegesis/lib/MCInstrDescView.cpp
index a1c3794fd6088..eb732d30e5aa8 100644
--- a/llvm/tools/llvm-exegesis/lib/MCInstrDescView.cpp
+++ b/llvm/tools/llvm-exegesis/lib/MCInstrDescView.cpp
@@ -111,7 +111,7 @@ Instruction::create(const MCInstrInfo &InstrInfo,
SmallVector<Operand, 8> Operands;
SmallVector<Variable, 4> Variables;
for (; OpIndex < Description->getNumOperands(); ++OpIndex) {
- const auto &OpInfo = Description->opInfo_begin()[OpIndex];
+ const auto &OpInfo = Description->operands()[OpIndex];
Operand Operand;
Operand.Index = OpIndex;
Operand.IsDef = (OpIndex < Description->getNumDefs());
More information about the llvm-commits
mailing list