[llvm] r261839 - Revert r261742, "[AMDGPU] Assembler: Simplify handling of optional operands"
NAKAMURA Takumi via llvm-commits
llvm-commits at lists.llvm.org
Thu Feb 25 00:35:27 PST 2016
Author: chapuni
Date: Thu Feb 25 02:35:27 2016
New Revision: 261839
URL: http://llvm.org/viewvc/llvm-project?rev=261839&view=rev
Log:
Revert r261742, "[AMDGPU] Assembler: Simplify handling of optional operands"
It brought undefined behavior.
Modified:
llvm/trunk/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.td
llvm/trunk/lib/Target/AMDGPU/SIInstructions.td
Modified: llvm/trunk/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp?rev=261839&r1=261838&r2=261839&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp Thu Feb 25 02:35:27 2016
@@ -486,7 +486,6 @@ public:
OperandMatchResultTy parseFlatOptionalOps(OperandVector &Operands);
OperandMatchResultTy parseFlatAtomicOptionalOps(OperandVector &Operands);
void cvtFlat(MCInst &Inst, const OperandVector &Operands);
- void cvtFlatAtomic(MCInst &Inst, const OperandVector &Operands);
void cvtMubuf(MCInst &Inst, const OperandVector &Operands);
OperandMatchResultTy parseOffset(OperandVector &Operands);
@@ -673,8 +672,31 @@ bool AMDGPUAsmParser::MatchAndEmitInstru
SMLoc ErrorLoc = IDLoc;
if (ErrorInfo != ~0ULL) {
if (ErrorInfo >= Operands.size()) {
+ if (isForcedVOP3()) {
+ // If 64-bit encoding has been forced we can end up with no
+ // clamp or omod operands if none of the registers have modifiers,
+ // so we need to add these to the operand list.
+ AMDGPUOperand &LastOp =
+ ((AMDGPUOperand &)*Operands[Operands.size() - 1]);
+ if (LastOp.isRegKind() ||
+ (LastOp.isImm() &&
+ LastOp.getImmTy() != AMDGPUOperand::ImmTyNone)) {
+ SMLoc S = Parser.getTok().getLoc();
+ Operands.push_back(AMDGPUOperand::CreateImm(0, S,
+ AMDGPUOperand::ImmTyClamp));
+ Operands.push_back(AMDGPUOperand::CreateImm(0, S,
+ AMDGPUOperand::ImmTyOMod));
+ bool Res = MatchAndEmitInstruction(IDLoc, Opcode, Operands,
+ Out, ErrorInfo,
+ MatchingInlineAsm);
+ if (!Res)
+ return Res;
+ }
+
+ }
return Error(IDLoc, "too few operands for instruction");
}
+
ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc();
if (ErrorLoc == SMLoc())
ErrorLoc = IDLoc;
@@ -1239,6 +1261,13 @@ bool AMDGPUAsmParser::ParseInstruction(P
}
}
+ // Once we reach end of statement, continue parsing so we can add default
+ // values for optional arguments.
+ AMDGPUAsmParser::OperandMatchResultTy Res;
+ while ((Res = parseOperand(Operands, Name)) != MatchOperand_NoMatch) {
+ if (Res != MatchOperand_Success)
+ return Error(getLexer().getLoc(), "failed parsing operand.");
+ }
return false;
}
@@ -1327,18 +1356,6 @@ AMDGPUAsmParser::parseNamedBit(const cha
return MatchOperand_Success;
}
-typedef std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalImmIndexMap;
-
-void addOptionalImmOperand(MCInst& Inst, const OperandVector& Operands, OptionalImmIndexMap& OptionalIdx, enum AMDGPUOperand::ImmTy ImmT) {
- auto i = OptionalIdx.find(ImmT);
- if (i != OptionalIdx.end()) {
- unsigned Idx = i->second;
- ((AMDGPUOperand &)*Operands[Idx]).addImmOperands(Inst, 1);
- } else {
- Inst.addOperand(MCOperand::createImm(0));
- }
-}
-
static bool operandsHasOptionalOp(const OperandVector &Operands,
const OptionalOperand &OOp) {
for (unsigned i = 0; i < Operands.size(); i++) {
@@ -1375,15 +1392,11 @@ AMDGPUAsmParser::parseOptionalOps(const
if (Res != MatchOperand_Success)
return Res;
- bool DefaultValue = (Value == Op.Default);
-
if (Op.ConvertResult && !Op.ConvertResult(Value)) {
return MatchOperand_ParseFail;
}
- if (!DefaultValue) {
- Operands.push_back(AMDGPUOperand::CreateImm(Value, S, Op.Type));
- }
+ Operands.push_back(AMDGPUOperand::CreateImm(Value, S, Op.Type));
return MatchOperand_Success;
}
return MatchOperand_NoMatch;
@@ -1437,7 +1450,7 @@ bool AMDGPUOperand::isDSOffset01() const
void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
const OperandVector &Operands) {
- OptionalImmIndexMap OptionalIdx;
+ std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
@@ -1452,10 +1465,13 @@ void AMDGPUAsmParser::cvtDSOffset01(MCIn
OptionalIdx[Op.getImmTy()] = i;
}
- addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDSOffset0);
- addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDSOffset1);
- addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
-
+ unsigned Offset0Idx = OptionalIdx[AMDGPUOperand::ImmTyDSOffset0];
+ unsigned Offset1Idx = OptionalIdx[AMDGPUOperand::ImmTyDSOffset1];
+ unsigned GDSIdx = OptionalIdx[AMDGPUOperand::ImmTyGDS];
+
+ ((AMDGPUOperand &)*Operands[Offset0Idx]).addImmOperands(Inst, 1); // offset0
+ ((AMDGPUOperand &)*Operands[Offset1Idx]).addImmOperands(Inst, 1); // offset1
+ ((AMDGPUOperand &)*Operands[GDSIdx]).addImmOperands(Inst, 1); // gds
Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
}
@@ -1482,11 +1498,12 @@ void AMDGPUAsmParser::cvtDS(MCInst &Inst
OptionalIdx[Op.getImmTy()] = i;
}
- addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
- addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
+ unsigned OffsetIdx = OptionalIdx[AMDGPUOperand::ImmTyOffset];
+ ((AMDGPUOperand &)*Operands[OffsetIdx]).addImmOperands(Inst, 1); // offset
if (!GDSOnly) {
- addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
+ unsigned GDSIdx = OptionalIdx[AMDGPUOperand::ImmTyGDS];
+ ((AMDGPUOperand &)*Operands[GDSIdx]).addImmOperands(Inst, 1); // gds
}
Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
}
@@ -1625,7 +1642,7 @@ AMDGPUAsmParser::parseFlatAtomicOptional
void AMDGPUAsmParser::cvtFlat(MCInst &Inst,
const OperandVector &Operands) {
- OptionalImmIndexMap OptionalIdx;
+ std::map<AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
@@ -1636,39 +1653,27 @@ void AMDGPUAsmParser::cvtFlat(MCInst &In
continue;
}
- OptionalIdx[Op.getImmTy()] = i;
- }
- addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
- addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
- addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
-}
-
+ // Handle 'glc' token which is sometimes hard-coded into the
+ // asm string. There are no MCInst operands for these.
+ if (Op.isToken())
+ continue;
-void AMDGPUAsmParser::cvtFlatAtomic(MCInst &Inst,
- const OperandVector &Operands) {
- OptionalImmIndexMap OptionalIdx;
+ // Handle optional arguments
+ OptionalIdx[Op.getImmTy()] = i;
- bool token = false;
- for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
- AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
+ }
- // Add the register arguments
- if (Op.isReg()) {
- Op.addRegOperands(Inst, 1);
- continue;
- }
+ // flat atomic instructions don't have a glc argument.
+ if (OptionalIdx.count(AMDGPUOperand::ImmTyGLC)) {
+ unsigned GLCIdx = OptionalIdx[AMDGPUOperand::ImmTyGLC];
+ ((AMDGPUOperand &)*Operands[GLCIdx]).addImmOperands(Inst, 1);
+ }
- // Handle 'glc' token for flat atomics.
- if (Op.isToken()) {
- token = true;
- continue;
- }
+ unsigned SLCIdx = OptionalIdx[AMDGPUOperand::ImmTySLC];
+ unsigned TFEIdx = OptionalIdx[AMDGPUOperand::ImmTyTFE];
- // Handle optional arguments
- OptionalIdx[Op.getImmTy()] = token ? i - 1 : i;
- }
- addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
- addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
+ ((AMDGPUOperand &)*Operands[SLCIdx]).addImmOperands(Inst, 1);
+ ((AMDGPUOperand &)*Operands[TFEIdx]).addImmOperands(Inst, 1);
}
//===----------------------------------------------------------------------===//
@@ -1713,7 +1718,7 @@ bool AMDGPUOperand::isMubufOffset() cons
void AMDGPUAsmParser::cvtMubuf(MCInst &Inst,
const OperandVector &Operands) {
- OptionalImmIndexMap OptionalIdx;
+ std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
@@ -1741,10 +1746,17 @@ void AMDGPUAsmParser::cvtMubuf(MCInst &I
OptionalIdx[Op.getImmTy()] = i;
}
- addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
- addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
- addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
- addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
+ assert(OptionalIdx.size() == 4);
+
+ unsigned OffsetIdx = OptionalIdx[AMDGPUOperand::ImmTyOffset];
+ unsigned GLCIdx = OptionalIdx[AMDGPUOperand::ImmTyGLC];
+ unsigned SLCIdx = OptionalIdx[AMDGPUOperand::ImmTySLC];
+ unsigned TFEIdx = OptionalIdx[AMDGPUOperand::ImmTyTFE];
+
+ ((AMDGPUOperand &)*Operands[OffsetIdx]).addImmOperands(Inst, 1);
+ ((AMDGPUOperand &)*Operands[GLCIdx]).addImmOperands(Inst, 1);
+ ((AMDGPUOperand &)*Operands[SLCIdx]).addImmOperands(Inst, 1);
+ ((AMDGPUOperand &)*Operands[TFEIdx]).addImmOperands(Inst, 1);
}
//===----------------------------------------------------------------------===//
@@ -1878,8 +1890,7 @@ void AMDGPUAsmParser::cvtId(MCInst &Inst
}
void AMDGPUAsmParser::cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands) {
- uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
- if (TSFlags & SIInstrFlags::VOP3) {
+ if (operandsHaveModifiers(Operands) || isForcedVOP3()) {
cvtVOP3(Inst, Operands);
} else {
cvtId(Inst, Operands);
Modified: llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.td?rev=261839&r1=261838&r2=261839&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.td (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.td Thu Feb 25 02:35:27 2016
@@ -435,7 +435,6 @@ def MubufOffsetMatchClass : AsmOperandCl
let Name = "MubufOffset";
let ParserMethod = "parseMubufOptionalOps";
let RenderMethod = "addImmOperands";
- let IsOptional = 1;
}
class DSOffsetBaseMatchClass <string parser> : AsmOperandClass {
@@ -443,7 +442,6 @@ class DSOffsetBaseMatchClass <string par
let ParserMethod = parser;
let RenderMethod = "addImmOperands";
let PredicateMethod = "isDSOffset";
- let IsOptional = 1;
}
def DSOffsetMatchClass : DSOffsetBaseMatchClass <"parseDSOptionalOps">;
@@ -454,7 +452,6 @@ def DSOffset01MatchClass : AsmOperandCla
let ParserMethod = "parseDSOff01OptionalOps";
let RenderMethod = "addImmOperands";
let PredicateMethod = "isDSOffset01";
- let IsOptional = 1;
}
class GDSBaseMatchClass <string parser> : AsmOperandClass {
@@ -462,7 +459,6 @@ class GDSBaseMatchClass <string parser>
let PredicateMethod = "isImm";
let ParserMethod = parser;
let RenderMethod = "addImmOperands";
- let IsOptional = 1;
}
def GDSMatchClass : GDSBaseMatchClass <"parseDSOptionalOps">;
@@ -473,7 +469,6 @@ class GLCBaseMatchClass <string parser>
let PredicateMethod = "isImm";
let ParserMethod = parser;
let RenderMethod = "addImmOperands";
- let IsOptional = 1;
}
def GLCMubufMatchClass : GLCBaseMatchClass <"parseMubufOptionalOps">;
@@ -484,7 +479,6 @@ class SLCBaseMatchClass <string parser>
let PredicateMethod = "isImm";
let ParserMethod = parser;
let RenderMethod = "addImmOperands";
- let IsOptional = 1;
}
def SLCMubufMatchClass : SLCBaseMatchClass <"parseMubufOptionalOps">;
@@ -496,7 +490,6 @@ class TFEBaseMatchClass <string parser>
let PredicateMethod = "isImm";
let ParserMethod = parser;
let RenderMethod = "addImmOperands";
- let IsOptional = 1;
}
def TFEMubufMatchClass : TFEBaseMatchClass <"parseMubufOptionalOps">;
@@ -530,21 +523,13 @@ def SMRDLiteralOffsetMatchClass : SMRDOf
"isSMRDLiteralOffset"
>;
-class OptionalImmAsmOperand <string OpName> : AsmOperandClass {
- let Name = "Imm"#OpName;
- let PredicateMethod = "isImm";
- let IsOptional = 1;
-}
-
let OperandType = "OPERAND_IMMEDIATE" in {
def offen : Operand<i1> {
let PrintMethod = "printOffen";
- let ParserMatchClass = OptionalImmAsmOperand<"offen">;
}
def idxen : Operand<i1> {
let PrintMethod = "printIdxen";
- let ParserMatchClass = OptionalImmAsmOperand<"idxen">;
}
def addr64 : Operand<i1> {
let PrintMethod = "printAddr64";
@@ -2886,7 +2871,7 @@ multiclass FLAT_ATOMIC <flat op, string
dag outs_noret = (outs),
string asm_noret = asm_name#" $addr, $data"#"$slc"#"$tfe"> {
- let mayLoad = 1, mayStore = 1, glc = 0, vdst = 0, AsmMatchConverter = "cvtFlatAtomic" in {
+ let mayLoad = 1, mayStore = 1, glc = 0, vdst = 0 in {
def "" : FLAT_Pseudo <NAME, outs_noret,
(ins VReg_64:$addr, data_rc:$data,
slc_flat_atomic:$slc, tfe_flat_atomic:$tfe), []>,
@@ -2903,7 +2888,7 @@ multiclass FLAT_ATOMIC <flat op, string
asm_noret>;
}
- let glc = 1, hasPostISelHook = 1, AsmMatchConverter = "cvtFlatAtomic" in {
+ let glc = 1, hasPostISelHook = 1 in {
defm _RTN : FLAT_AtomicRet_m <op, (outs vdst_rc:$vdst),
(ins VReg_64:$addr, data_rc:$data, slc_flat_atomic:$slc,
tfe_flat_atomic:$tfe),
Modified: llvm/trunk/lib/Target/AMDGPU/SIInstructions.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIInstructions.td?rev=261839&r1=261838&r2=261839&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIInstructions.td (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIInstructions.td Thu Feb 25 02:35:27 2016
@@ -1433,7 +1433,7 @@ multiclass V_CNDMASK <vop2 op, string na
defm _e64 : VOP3_m <
op, VOP_CNDMASK.Outs, VOP_CNDMASK.Ins64,
- name#!cast<string>(VOP_CNDMASK.Asm64), [], name, 3, 0>;
+ name#!cast<string>(VOP_CNDMASK.Asm64), [], name, 3>;
}
defm V_CNDMASK_B32 : V_CNDMASK<vop2<0x0>, "v_cndmask_b32">;
More information about the llvm-commits
mailing list