[llvm] c9cdaff - [AMDGPU] Fix operand definitions for atomic scalar memory instructions. (#71799)
via llvm-commits
llvm-commits at lists.llvm.org
Fri Nov 10 06:00:50 PST 2023
Author: Ivan Kosarev
Date: 2023-11-10T14:00:46Z
New Revision: c9cdaffe49902af71c3610806b1d688385fa2257
URL: https://github.com/llvm/llvm-project/commit/c9cdaffe49902af71c3610806b1d688385fa2257
DIFF: https://github.com/llvm/llvm-project/commit/c9cdaffe49902af71c3610806b1d688385fa2257.diff
LOG: [AMDGPU] Fix operand definitions for atomic scalar memory instructions. (#71799)
CPol and CPol_GLC1 operand classes have identical predicates, which
means AsmParser cannot differentiate between the RTN and non-RTN
variants of the instructions. When it currently selects the wrong
instruction, a hack in cvtSMEMAtomic() corrects the op-code. Using the
new predicated-value operands makes this hack and the whole conversion
function not needed.
Other uses of CPol_GLC1 operands are to be addressed separately.
Resolves about half of the remaining ~1000 pairs of ambiguous
instructions.
Part of <https://github.com/llvm/llvm-project/issues/69256>.
Added:
Modified:
llvm/lib/Target/AMDGPU/AMDGPUInstructions.td
llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
llvm/lib/Target/AMDGPU/SIInstrInfo.td
llvm/lib/Target/AMDGPU/SMInstructions.td
Removed:
################################################################################
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td b/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td
index 23242ad84b0c425..d2d09a0b1fc5485 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td
@@ -165,6 +165,20 @@ class ImmOperand<ValueType type, string name = NAME, bit optional = 0,
def s16imm : ImmOperand<i16, "S16Imm", 0, "printU16ImmOperand">;
def u16imm : ImmOperand<i16, "U16Imm", 0, "printU16ImmOperand">;
+class ValuePredicatedOperand<CustomOperand op, string valuePredicate,
+ bit optional = 0>
+ : CustomOperand<op.Type, optional> {
+ let ImmTy = op.ImmTy;
+ defvar OpPredicate = op.ParserMatchClass.PredicateMethod;
+ let PredicateMethod =
+ "getPredicate([](const AMDGPUOperand &Op) -> bool { "#
+ "return Op."#OpPredicate#"() && "#valuePredicate#"; })";
+ let ParserMethod = op.ParserMatchClass.ParserMethod;
+ let DefaultValue = op.DefaultValue;
+ let DefaultMethod = op.DefaultMethod;
+ let PrintMethod = op.PrintMethod;
+}
+
//===--------------------------------------------------------------------===//
// Custom Operands
//===--------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
index fe2729f9790aa7c..be74c627d213756 100644
--- a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
+++ b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
@@ -909,6 +909,10 @@ class AMDGPUOperand : public MCParsedAsmOperand {
bool isWaitVDST() const;
bool isWaitEXP() const;
+ auto getPredicate(std::function<bool(const AMDGPUOperand &Op)> P) const {
+ return std::bind(P, *this);
+ }
+
StringRef getToken() const {
assert(isToken());
return StringRef(Tok.Data, Tok.Length);
@@ -1773,7 +1777,6 @@ class AMDGPUAsmParser : public MCTargetAsmParser {
void cvtVOP3Interp(MCInst &Inst, const OperandVector &Operands);
void cvtVINTERP(MCInst &Inst, const OperandVector &Operands);
- void cvtSMEMAtomic(MCInst &Inst, const OperandVector &Operands);
bool parseDimId(unsigned &Encoding);
ParseStatus parseDim(OperandVector &Operands);
@@ -7722,66 +7725,6 @@ void AMDGPUAsmParser::cvtMubufImpl(MCInst &Inst,
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyCPol, 0);
}
-//===----------------------------------------------------------------------===//
-// SMEM
-//===----------------------------------------------------------------------===//
-
-void AMDGPUAsmParser::cvtSMEMAtomic(MCInst &Inst, const OperandVector &Operands) {
- OptionalImmIndexMap OptionalIdx;
- bool IsAtomicReturn = false;
-
- for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
- AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
- if (!Op.isCPol())
- continue;
- IsAtomicReturn = Op.getImm() & AMDGPU::CPol::GLC;
- break;
- }
-
- if (!IsAtomicReturn) {
- int NewOpc = AMDGPU::getAtomicNoRetOp(Inst.getOpcode());
- if (NewOpc != -1)
- Inst.setOpcode(NewOpc);
- }
-
- IsAtomicReturn = MII.get(Inst.getOpcode()).TSFlags &
- SIInstrFlags::IsAtomicRet;
-
- for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
- AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
-
- // Add the register arguments
- if (Op.isReg()) {
- Op.addRegOperands(Inst, 1);
- if (IsAtomicReturn && i == 1)
- Op.addRegOperands(Inst, 1);
- continue;
- }
-
- // Handle the case where soffset is an immediate
- if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
- Op.addImmOperands(Inst, 1);
- continue;
- }
-
- // Handle tokens like 'offen' which are sometimes hard-coded into the
- // asm string. There are no MCInst operands for these.
- if (Op.isToken()) {
- continue;
- }
- assert(Op.isImm());
-
- // Handle optional arguments
- OptionalIdx[Op.getImmTy()] = i;
- }
-
- if ((int)Inst.getNumOperands() <=
- AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::offset))
- addOptionalImmOperand(Inst, Operands, OptionalIdx,
- AMDGPUOperand::ImmTySMEMOffsetMod);
- addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyCPol, 0);
-}
-
//===----------------------------------------------------------------------===//
// smrd
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.td b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
index b0493edfa335acf..02c769bf21ac3ea 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.td
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
@@ -1078,6 +1078,8 @@ def highmod : NamedBitOperand<"high", "High">;
def CPol : CustomOperand<i32, 1>;
def CPol_0 : DefaultOperand<CPol, 0>;
def CPol_GLC1 : DefaultOperand<CPol, 1>;
+def CPol_GLC : ValuePredicatedOperand<CPol, "Op.getImm() & CPol::GLC">;
+def CPol_NonGLC : ValuePredicatedOperand<CPol, "!(Op.getImm() & CPol::GLC)", 1>;
def TFE : NamedBitOperand<"tfe">;
def UNorm : NamedBitOperand<"unorm">;
diff --git a/llvm/lib/Target/AMDGPU/SMInstructions.td b/llvm/lib/Target/AMDGPU/SMInstructions.td
index 7ca685a0cc5d5e1..6235965b6e165bc 100644
--- a/llvm/lib/Target/AMDGPU/SMInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SMInstructions.td
@@ -234,8 +234,6 @@ class SM_Atomic_Pseudo <string opName,
let IsAtomicNoRet = !not(isRet);
let IsAtomicRet = isRet;
-
- let AsmMatchConverter = "cvtSMEMAtomic";
}
class SM_Pseudo_Atomic<string opName,
@@ -245,7 +243,7 @@ class SM_Pseudo_Atomic<string opName,
bit isRet,
string opNameWithSuffix =
opName # offsets.Variant # !if(isRet, "_RTN", ""),
- Operand CPolTy = !if(isRet, CPol_GLC1, CPol)> :
+ Operand CPolTy = !if(isRet, CPol_GLC, CPol_NonGLC)> :
SM_Atomic_Pseudo<opName,
!if(isRet, (outs dataClass:$sdst), (outs)),
!con((ins dataClass:$sdata, baseClass:$sbase), offsets.Ins,
More information about the llvm-commits
mailing list