[llvm] [AMDGPU][NFC] Remove _DEFERRED operands. (PR #139123)
via llvm-commits
llvm-commits at lists.llvm.org
Thu May 8 11:27:50 PDT 2025
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-amdgpu
Author: Ivan Kosarev (kosarev)
<details>
<summary>Changes</summary>
All immediates are deferred now.
---
Patch is 31.65 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/139123.diff
15 Files Affected:
- (modified) llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp (+5-17)
- (modified) llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp (+7-55)
- (modified) llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.h (+1-1)
- (modified) llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp (-3)
- (modified) llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCCodeEmitter.cpp (-3)
- (modified) llvm/lib/Target/AMDGPU/SIDefines.h (-3)
- (modified) llvm/lib/Target/AMDGPU/SIInstrInfo.cpp (-4)
- (modified) llvm/lib/Target/AMDGPU/SIInstrInfo.td (+2-4)
- (modified) llvm/lib/Target/AMDGPU/SIRegisterInfo.td (+1-35)
- (modified) llvm/lib/Target/AMDGPU/SOPInstructions.td (+2-2)
- (modified) llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp (-2)
- (modified) llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h (-3)
- (modified) llvm/lib/Target/AMDGPU/VOP1Instructions.td (-2)
- (modified) llvm/lib/Target/AMDGPU/VOP2Instructions.td (+17-24)
- (modified) llvm/lib/Target/AMDGPU/VOPDInstructions.td (+10-23)
``````````diff
diff --git a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
index b50a2cf1becf7..28370b8670f05 100644
--- a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
+++ b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
@@ -1956,7 +1956,6 @@ static const fltSemantics *getOpFltSemantics(uint8_t OperandType) {
case AMDGPU::OPERAND_REG_INLINE_C_INT16:
case AMDGPU::OPERAND_REG_IMM_INT32:
case AMDGPU::OPERAND_REG_IMM_FP32:
- case AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED:
case AMDGPU::OPERAND_REG_INLINE_C_INT32:
case AMDGPU::OPERAND_REG_INLINE_C_FP32:
case AMDGPU::OPERAND_REG_INLINE_AC_INT32:
@@ -1975,14 +1974,12 @@ static const fltSemantics *getOpFltSemantics(uint8_t OperandType) {
case AMDGPU::OPERAND_REG_INLINE_AC_FP64:
return &APFloat::IEEEdouble();
case AMDGPU::OPERAND_REG_IMM_FP16:
- case AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED:
case AMDGPU::OPERAND_REG_INLINE_C_FP16:
case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
case AMDGPU::OPERAND_REG_IMM_V2FP16:
case AMDGPU::OPERAND_KIMM16:
return &APFloat::IEEEhalf();
case AMDGPU::OPERAND_REG_IMM_BF16:
- case AMDGPU::OPERAND_REG_IMM_BF16_DEFERRED:
case AMDGPU::OPERAND_REG_INLINE_C_BF16:
case AMDGPU::OPERAND_REG_INLINE_C_V2BF16:
case AMDGPU::OPERAND_REG_IMM_V2BF16:
@@ -2304,7 +2301,6 @@ void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyMo
llvm_unreachable("fp literal in 64-bit integer instruction.");
case AMDGPU::OPERAND_REG_IMM_BF16:
- case AMDGPU::OPERAND_REG_IMM_BF16_DEFERRED:
case AMDGPU::OPERAND_REG_INLINE_C_BF16:
case AMDGPU::OPERAND_REG_INLINE_C_V2BF16:
case AMDGPU::OPERAND_REG_IMM_V2BF16:
@@ -2321,14 +2317,12 @@ void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyMo
case AMDGPU::OPERAND_REG_IMM_INT32:
case AMDGPU::OPERAND_REG_IMM_FP32:
- case AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED:
case AMDGPU::OPERAND_REG_INLINE_C_INT32:
case AMDGPU::OPERAND_REG_INLINE_C_FP32:
case AMDGPU::OPERAND_REG_INLINE_AC_INT32:
case AMDGPU::OPERAND_REG_INLINE_AC_FP32:
case AMDGPU::OPERAND_REG_IMM_INT16:
case AMDGPU::OPERAND_REG_IMM_FP16:
- case AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED:
case AMDGPU::OPERAND_REG_INLINE_C_INT16:
case AMDGPU::OPERAND_REG_INLINE_C_FP16:
case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
@@ -2369,7 +2363,6 @@ void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyMo
switch (OpTy) {
case AMDGPU::OPERAND_REG_IMM_INT32:
case AMDGPU::OPERAND_REG_IMM_FP32:
- case AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED:
case AMDGPU::OPERAND_REG_INLINE_C_INT32:
case AMDGPU::OPERAND_REG_INLINE_C_FP32:
case AMDGPU::OPERAND_REG_INLINE_AC_INT32:
@@ -2425,7 +2418,6 @@ void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyMo
case AMDGPU::OPERAND_REG_INLINE_C_FP16:
case AMDGPU::OPERAND_REG_IMM_FP16:
- case AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED:
if (isSafeTruncation(Val, 16) &&
AMDGPU::isInlinableLiteralFP16(static_cast<int16_t>(Val),
AsmParser->hasInv2PiInlineImm())) {
@@ -2439,7 +2431,6 @@ void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyMo
return;
case AMDGPU::OPERAND_REG_IMM_BF16:
- case AMDGPU::OPERAND_REG_IMM_BF16_DEFERRED:
case AMDGPU::OPERAND_REG_INLINE_C_BF16:
if (isSafeTruncation(Val, 16) &&
AMDGPU::isInlinableLiteralBF16(static_cast<int16_t>(Val),
@@ -3615,13 +3606,11 @@ bool AMDGPUAsmParser::isInlineConstant(const MCInst &Inst,
return AMDGPU::isInlinableLiteralV2BF16(Val);
if (OperandType == AMDGPU::OPERAND_REG_IMM_FP16 ||
- OperandType == AMDGPU::OPERAND_REG_INLINE_C_FP16 ||
- OperandType == AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED)
+ OperandType == AMDGPU::OPERAND_REG_INLINE_C_FP16)
return AMDGPU::isInlinableLiteralFP16(Val, hasInv2PiInlineImm());
if (OperandType == AMDGPU::OPERAND_REG_IMM_BF16 ||
- OperandType == AMDGPU::OPERAND_REG_INLINE_C_BF16 ||
- OperandType == AMDGPU::OPERAND_REG_IMM_BF16_DEFERRED)
+ OperandType == AMDGPU::OPERAND_REG_INLINE_C_BF16)
return AMDGPU::isInlinableLiteralBF16(Val, hasInv2PiInlineImm());
llvm_unreachable("invalid operand type");
@@ -3671,15 +3660,14 @@ static OperandIndices getSrcOperandIndices(unsigned Opcode,
AddMandatoryLiterals ? getNamedOperandIdx(Opcode, OpName::imm) : -1;
if (isVOPD(Opcode)) {
- int16_t ImmDeferredIdx =
- AddMandatoryLiterals ? getNamedOperandIdx(Opcode, OpName::immDeferred)
- : -1;
+ int16_t ImmXIdx =
+ AddMandatoryLiterals ? getNamedOperandIdx(Opcode, OpName::immX) : -1;
return {getNamedOperandIdx(Opcode, OpName::src0X),
getNamedOperandIdx(Opcode, OpName::vsrc1X),
getNamedOperandIdx(Opcode, OpName::src0Y),
getNamedOperandIdx(Opcode, OpName::vsrc1Y),
- ImmDeferredIdx,
+ ImmXIdx,
ImmIdx};
}
diff --git a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp
index d2f18fefd9866..ca0093d1f049c 100644
--- a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp
+++ b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp
@@ -238,13 +238,6 @@ static DecodeStatus decodeSrcRegOrImmA9(MCInst &Inst, unsigned Imm,
return decodeSrcOp(Inst, 9, OpWidth, Imm, Imm | 512, Decoder);
}
-template <unsigned OpWidth>
-static DecodeStatus decodeSrcRegOrImmDeferred9(MCInst &Inst, unsigned Imm,
- uint64_t /* Addr */,
- const MCDisassembler *Decoder) {
- return decodeSrcOp(Inst, 9, OpWidth, Imm, Imm, Decoder);
-}
-
// Default decoders generated by tablegen: 'Decode<RegClass>RegisterClass'
// when RegisterClass is used as an operand. Most often used for destination
// operands.
@@ -324,22 +317,6 @@ static DecodeStatus decodeOperand_VSrcT16_Lo128(MCInst &Inst, unsigned Imm,
return addOperand(Inst, DAsm->decodeNonVGPRSrcOp(OpWidth, Imm & 0xFF));
}
-template <unsigned OpWidth>
-static DecodeStatus
-decodeOperand_VSrcT16_Lo128_Deferred(MCInst &Inst, unsigned Imm,
- uint64_t /*Addr*/,
- const MCDisassembler *Decoder) {
- const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
- assert(isUInt<9>(Imm) && "9-bit encoding expected");
-
- if (Imm & AMDGPU::EncValues::IS_VGPR) {
- bool IsHi = Imm & (1 << 7);
- unsigned RegIdx = Imm & 0x7f;
- return addOperand(Inst, DAsm->createVGPR16Operand(RegIdx, IsHi));
- }
- return addOperand(Inst, DAsm->decodeNonVGPRSrcOp(OpWidth, Imm & 0xFF));
-}
-
template <unsigned OpWidth>
static DecodeStatus decodeOperand_VSrcT16(MCInst &Inst, unsigned Imm,
uint64_t /*Addr*/,
@@ -559,31 +536,21 @@ void AMDGPUDisassembler::decodeImmOperands(MCInst &MI,
}
if (Imm == AMDGPU::EncValues::LITERAL_CONST) {
- switch (OpDesc.OperandType) {
- case AMDGPU::OPERAND_REG_IMM_BF16_DEFERRED:
- case AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED:
- case AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED:
- Op = MCOperand::createImm(AMDGPU::EncValues::LITERAL_CONST);
- continue;
- default:
- Op = decodeLiteralConstant(OpDesc.OperandType ==
- AMDGPU::OPERAND_REG_IMM_FP64);
- continue;
- }
+ Op = decodeLiteralConstant(OpDesc.OperandType ==
+ AMDGPU::OPERAND_REG_IMM_FP64);
+ continue;
}
if (AMDGPU::EncValues::INLINE_FLOATING_C_MIN <= Imm &&
Imm <= AMDGPU::EncValues::INLINE_FLOATING_C_MAX) {
switch (OpDesc.OperandType) {
case AMDGPU::OPERAND_REG_IMM_BF16:
- case AMDGPU::OPERAND_REG_IMM_BF16_DEFERRED:
case AMDGPU::OPERAND_REG_IMM_V2BF16:
case AMDGPU::OPERAND_REG_INLINE_C_BF16:
case AMDGPU::OPERAND_REG_INLINE_C_V2BF16:
Imm = getInlineImmValBF16(Imm);
break;
case AMDGPU::OPERAND_REG_IMM_FP16:
- case AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED:
case AMDGPU::OPERAND_REG_IMM_INT16:
case AMDGPU::OPERAND_REG_IMM_V2FP16:
case AMDGPU::OPERAND_REG_INLINE_C_FP16:
@@ -894,11 +861,9 @@ DecodeStatus AMDGPUDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
}
}
- int ImmLitIdx =
- AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::imm);
bool IsSOPK = MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::SOPK;
- if (ImmLitIdx != -1 && !IsSOPK)
- convertFMAanyK(MI, ImmLitIdx);
+ if (AMDGPU::hasNamedOperand(MI.getOpcode(), AMDGPU::OpName::imm) && !IsSOPK)
+ convertFMAanyK(MI);
// Some VOPC instructions, e.g., v_cmpx_f_f64, use VOP3 encoding and
// have EXEC as implicit destination. Issue a warning if encoding for
@@ -1380,22 +1345,9 @@ void AMDGPUDisassembler::convertVOPC64DPPInst(MCInst &MI) const {
}
}
-void AMDGPUDisassembler::convertFMAanyK(MCInst &MI, int ImmLitIdx) const {
+void AMDGPUDisassembler::convertFMAanyK(MCInst &MI) const {
assert(HasLiteral && "Should have decoded a literal");
- const MCInstrDesc &Desc = MCII->get(MI.getOpcode());
- unsigned DescNumOps = Desc.getNumOperands();
- insertNamedMCOperand(MI, MCOperand::createImm(Literal),
- AMDGPU::OpName::immDeferred);
- assert(DescNumOps == MI.getNumOperands());
- for (unsigned I = 0; I < DescNumOps; ++I) {
- auto &Op = MI.getOperand(I);
- auto OpType = Desc.operands()[I].OperandType;
- bool IsDeferredOp = (OpType == AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED ||
- OpType == AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED);
- if (Op.isImm() && Op.getImm() == AMDGPU::EncValues::LITERAL_CONST &&
- IsDeferredOp)
- Op.setImm(Literal);
- }
+ insertNamedMCOperand(MI, MCOperand::createImm(Literal), AMDGPU::OpName::immX);
}
const char* AMDGPUDisassembler::getRegClassName(unsigned RegClassID) const {
diff --git a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.h b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.h
index 3ca7c3e1fd682..a82dee430e01d 100644
--- a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.h
+++ b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.h
@@ -174,7 +174,7 @@ class AMDGPUDisassembler : public MCDisassembler {
void convertEXPInst(MCInst &MI) const;
void convertVINTERPInst(MCInst &MI) const;
- void convertFMAanyK(MCInst &MI, int ImmLitIdx) const;
+ void convertFMAanyK(MCInst &MI) const;
void convertSDWAInst(MCInst &MI) const;
void convertMAIInst(MCInst &MI) const;
void convertDPP8Inst(MCInst &MI) const;
diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp
index 210b0dc18ffc4..a56bca514aff3 100644
--- a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp
+++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp
@@ -715,7 +715,6 @@ void AMDGPUInstPrinter::printRegularOperand(const MCInst *MI, unsigned OpNo,
switch (OpTy) {
case AMDGPU::OPERAND_REG_IMM_INT32:
case AMDGPU::OPERAND_REG_IMM_FP32:
- case AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED:
case AMDGPU::OPERAND_REG_INLINE_C_INT32:
case AMDGPU::OPERAND_REG_INLINE_C_FP32:
case AMDGPU::OPERAND_REG_INLINE_AC_INT32:
@@ -741,12 +740,10 @@ void AMDGPUInstPrinter::printRegularOperand(const MCInst *MI, unsigned OpNo,
break;
case AMDGPU::OPERAND_REG_INLINE_C_FP16:
case AMDGPU::OPERAND_REG_IMM_FP16:
- case AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED:
printImmediateF16(Op.getImm(), STI, O);
break;
case AMDGPU::OPERAND_REG_INLINE_C_BF16:
case AMDGPU::OPERAND_REG_IMM_BF16:
- case AMDGPU::OPERAND_REG_IMM_BF16_DEFERRED:
printImmediateBF16(Op.getImm(), STI, O);
break;
case AMDGPU::OPERAND_REG_IMM_V2INT16:
diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCCodeEmitter.cpp b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCCodeEmitter.cpp
index 8997c1049a90a..34fe093a931db 100644
--- a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCCodeEmitter.cpp
+++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCCodeEmitter.cpp
@@ -273,7 +273,6 @@ AMDGPUMCCodeEmitter::getLitEncoding(const MCOperand &MO,
switch (OpInfo.OperandType) {
case AMDGPU::OPERAND_REG_IMM_INT32:
case AMDGPU::OPERAND_REG_IMM_FP32:
- case AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED:
case AMDGPU::OPERAND_REG_INLINE_C_INT32:
case AMDGPU::OPERAND_REG_INLINE_C_FP32:
case AMDGPU::OPERAND_REG_INLINE_AC_INT32:
@@ -295,14 +294,12 @@ AMDGPUMCCodeEmitter::getLitEncoding(const MCOperand &MO,
return getLit16IntEncoding(static_cast<uint32_t>(Imm), STI);
case AMDGPU::OPERAND_REG_IMM_FP16:
- case AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED:
case AMDGPU::OPERAND_REG_INLINE_C_FP16:
// FIXME Is this correct? What do inline immediates do on SI for f16 src
// which does not have f16 support?
return getLit16Encoding(static_cast<uint16_t>(Imm), STI);
case AMDGPU::OPERAND_REG_IMM_BF16:
- case AMDGPU::OPERAND_REG_IMM_BF16_DEFERRED:
case AMDGPU::OPERAND_REG_INLINE_C_BF16:
// We don't actually need to check Inv2Pi here because BF16 instructions can
// only be emitted for targets that already support the feature.
diff --git a/llvm/lib/Target/AMDGPU/SIDefines.h b/llvm/lib/Target/AMDGPU/SIDefines.h
index 7649a04a3aeba..0f603a43fd626 100644
--- a/llvm/lib/Target/AMDGPU/SIDefines.h
+++ b/llvm/lib/Target/AMDGPU/SIDefines.h
@@ -204,9 +204,6 @@ enum OperandType : unsigned {
OPERAND_REG_IMM_FP64,
OPERAND_REG_IMM_BF16,
OPERAND_REG_IMM_FP16,
- OPERAND_REG_IMM_BF16_DEFERRED,
- OPERAND_REG_IMM_FP16_DEFERRED,
- OPERAND_REG_IMM_FP32_DEFERRED,
OPERAND_REG_IMM_V2BF16,
OPERAND_REG_IMM_V2FP16,
OPERAND_REG_IMM_V2INT16,
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index 4a90dace47fb2..85276bd24bcf4 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -4376,7 +4376,6 @@ bool SIInstrInfo::isInlineConstant(int64_t Imm, uint8_t OperandType) const {
switch (OperandType) {
case AMDGPU::OPERAND_REG_IMM_INT32:
case AMDGPU::OPERAND_REG_IMM_FP32:
- case AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED:
case AMDGPU::OPERAND_REG_INLINE_C_INT32:
case AMDGPU::OPERAND_REG_INLINE_C_FP32:
case AMDGPU::OPERAND_REG_IMM_V2FP32:
@@ -4416,7 +4415,6 @@ bool SIInstrInfo::isInlineConstant(int64_t Imm, uint8_t OperandType) const {
case AMDGPU::OPERAND_REG_INLINE_C_V2BF16:
return AMDGPU::isInlinableLiteralV2BF16(Imm);
case AMDGPU::OPERAND_REG_IMM_FP16:
- case AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED:
case AMDGPU::OPERAND_REG_INLINE_C_FP16: {
if (isInt<16>(Imm) || isUInt<16>(Imm)) {
// A few special case instructions have 16-bit operands on subtargets
@@ -4431,7 +4429,6 @@ bool SIInstrInfo::isInlineConstant(int64_t Imm, uint8_t OperandType) const {
return false;
}
case AMDGPU::OPERAND_REG_IMM_BF16:
- case AMDGPU::OPERAND_REG_IMM_BF16_DEFERRED:
case AMDGPU::OPERAND_REG_INLINE_C_BF16: {
if (isInt<16>(Imm) || isUInt<16>(Imm)) {
int16_t Trunc = static_cast<int16_t>(Imm);
@@ -4842,7 +4839,6 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr &MI,
break;
case AMDGPU::OPERAND_REG_IMM_INT32:
case AMDGPU::OPERAND_REG_IMM_FP32:
- case AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED:
case AMDGPU::OPERAND_REG_IMM_V2FP32:
break;
case AMDGPU::OPERAND_REG_INLINE_C_INT32:
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.td b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
index 3710a54a828ce..79667e5ff9285 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.td
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
@@ -2701,13 +2701,11 @@ class VOPProfile <list<ValueType> _ArgVT, bit _EnableClamp = 0> {
HasSDWAOMod, Src0ModSDWA, Src1ModSDWA,
DstVT>.ret;
field dag InsVOPDX = (ins Src0RC32:$src0X, Src1RC32:$vsrc1X);
- // It is a slight misnomer to use the deferred f32 operand type for non-float
+ // It is a slight misnomer to use the f32 operand type for non-float
// operands, but this operand type will only be used if the other dual
// component is FMAAK or FMAMK
- field dag InsVOPDXDeferred = (ins !if(!eq(Src0VT.Size, 32), VSrc_f32_Deferred, VSrc_f16_Deferred):$src0X, VGPR_32:$vsrc1X);
+ field dag InsVOPDX_immX = (ins !if(!eq(Src0VT.Size, 32), VSrc_f32, VSrc_f16):$src0X, VGPR_32:$vsrc1X);
field dag InsVOPDY = (ins Src0RC32:$src0Y, Src1RC32:$vsrc1Y);
- field dag InsVOPDYDeferred = (ins !if(!eq(Src1VT.Size, 32), VSrc_f32_Deferred, VSrc_f16_Deferred):$src0Y, VGPR_32:$vsrc1Y);
-
field string Asm32 = getAsm32<HasDst, NumSrcArgs, DstVT>.ret;
field string AsmDPP = !if(HasExtDPP,
diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.td b/llvm/lib/Target/AMDGPU/SIRegisterInfo.td
index dc08b7d5a8e69..182128cb174bd 100644
--- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.td
+++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.td
@@ -1100,7 +1100,7 @@ class RegOrImmOperand <RegisterClass RegClass, string OperandTypeName>
: RegisterOperand<RegClass> {
let OperandNamespace = "AMDGPU";
let OperandType = OperandTypeName;
- let ParserMatchClass = RegImmMatcher<!subst("_Deferred", "", NAME)>;
+ let ParserMatchClass = RegImmMatcher<NAME>;
}
//===----------------------------------------------------------------------===//
@@ -1128,19 +1128,6 @@ def SSrc_b64 : SrcRegOrImm9 <SReg_64, "OPERAND_REG_IMM_INT64">;
def SSrcOrLds_b32 : SrcRegOrImm9 <SRegOrLds_32, "OPERAND_REG_IMM_INT32">;
-//===----------------------------------------------------------------------===//
-// SSrc_32_Deferred Operands with an SGPR or a 32-bit immediate for use with
-// FMAMK/FMAAK
-//===----------------------------------------------------------------------===//
-
-class SrcRegOrImmDeferred9<RegisterClass regClass, string operandType>
- : RegOrImmOperand<regClass, operandType> {
- string DecoderMethodName = "decodeSrcRegOrImmDeferred9";
- let DecoderMethod = DecoderMethodName # "<" # regClass.Size # ">";
-}
-
-def SSrc_f32_Deferred : SrcRegOrImmDeferred9<SReg_32, "OPERAND_REG_IMM_FP32_DEFERRED">;
-
//===----------------------------------------------------------------------===//
// SCSrc_* Operands with an SGPR or a inline constant
//===----------------------------------------------------------------------===//
@@ -1187,27 +1174,6 @@ def VSrc_f64 : SrcRegOrImm9 <VS_64, "OPERAND_REG_IMM_FP64"> {
def VSrc_v2b32 : SrcRegOrImm9 <VS_64, "OPERAND_REG_IMM_V2INT32">;
def VSrc_v2f32 : SrcRegOrImm9 <VS_64, "OPERAND_REG_IMM_V2FP32">;
-//===----------------------------------------------------------------------===//
-// VSrc_*_Deferred Operands with an SGPR, VGPR or a 32-bit immediate for use
-// with FMAMK/FMAAK
-//===----------------------------------------------------------------------===//
-
-def VSrc_bf16_Deferred : SrcRegOrImmDeferred9<VS_32, "OPERAND_REG_IMM_BF16_DEFERRED">;
-def VSrc_f16_Deferred : SrcRegOrImmDeferred9<VS_32, "OPERAND_REG_IMM_FP16_DEFERRED">;
-def VSrc_f32_Deferred : SrcRegOrImmDeferred9<VS_32, "OPERAND_REG_IMM_FP32_DEFERRED">;
-
-// True 16 Operands
-def VSrcT_f16_Lo128_Deferred : SrcRegOrImmDeferred9<VS_16_Lo128,
- "OPERAND_REG_IMM_FP16_DEFERRED"> {
- let DecoderMethodName = "decodeOperand_VSrcT16_Lo128_Deferred";
- let EncoderMethod = "getMachineOpValueT16Lo128";
-}
-
-def VSrcFake16_bf16_Lo128_Deferred
- : SrcRegOrImmDeferred9<VS_32_Lo128, "OPERAND_REG_IMM_BF16_DEFERRED">;
-def VSrcFake16_f16_Lo128_Deferred
- : SrcRegOrImmDeferred9<VS_32_Lo128, "OPERAND_REG_IMM_FP16_DEFERRED">;
-
//===----------------------------------------------------------------------===//
// VRegSrc_* Operands with a VGPR
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/AMDGPU/SOPInstructions.td b/llvm/lib/Target/AMDGPU/SOPInstructions.td
index 3d3f1ba3f5170..40b3dfb94ce2f 100644
--- a/llvm/lib/Target/AMDGPU/SOPInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SOPInstructions.td
@@ -872,7 +872,7 @@ let SubtargetPredicate = HasSALUFloatInsts, mayRais...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/139123
More information about the llvm-commits
mailing list