[llvm] 50ead2e - [X86][AsmParser] Avoid duplicated code in MatchAndEmit(ATT/Intel)Instruction, NFC
Shengchen Kan via llvm-commits
llvm-commits at lists.llvm.org
Thu Jun 13 18:07:14 PDT 2024
Author: Shengchen Kan
Date: 2024-06-14T09:06:57+08:00
New Revision: 50ead2ee93bf1b59f35d7afda553a026b87855bb
URL: https://github.com/llvm/llvm-project/commit/50ead2ee93bf1b59f35d7afda553a026b87855bb
DIFF: https://github.com/llvm/llvm-project/commit/50ead2ee93bf1b59f35d7afda553a026b87855bb.diff
LOG: [X86][AsmParser] Avoid duplicated code in MatchAndEmit(ATT/Intel)Instruction, NFC
And VEXEncoding_* are renamed to OpcodePrefix_*.
This is in preparation for the coming pseudo rex/rex2 prefixes support.
Added:
Modified:
llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
Removed:
################################################################################
diff --git a/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp b/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
index 6623106109316..ffd66aa800584 100644
--- a/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
+++ b/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
@@ -88,15 +88,15 @@ class X86AsmParser : public MCTargetAsmParser {
bool Code16GCC;
unsigned ForcedDataPrefix = 0;
- enum VEXEncoding {
- VEXEncoding_Default,
- VEXEncoding_VEX,
- VEXEncoding_VEX2,
- VEXEncoding_VEX3,
- VEXEncoding_EVEX,
+ enum OpcodePrefix {
+ OpcodePrefix_Default,
+ OpcodePrefix_VEX,
+ OpcodePrefix_VEX2,
+ OpcodePrefix_VEX3,
+ OpcodePrefix_EVEX,
};
- VEXEncoding ForcedVEXEncoding = VEXEncoding_Default;
+ OpcodePrefix ForcedOpcodePrefix = OpcodePrefix_Default;
enum DispEncoding {
DispEncoding_Default,
@@ -1197,12 +1197,11 @@ class X86AsmParser : public MCTargetAsmParser {
bool ErrorMissingFeature(SMLoc IDLoc, const FeatureBitset &MissingFeatures,
bool MatchingInlineAsm);
- bool MatchAndEmitATTInstruction(SMLoc IDLoc, unsigned &Opcode,
+ bool matchAndEmitATTInstruction(SMLoc IDLoc, unsigned &Opcode, MCInst &Inst,
OperandVector &Operands, MCStreamer &Out,
- uint64_t &ErrorInfo,
- bool MatchingInlineAsm);
+ uint64_t &ErrorInfo, bool MatchingInlineAsm);
- bool MatchAndEmitIntelInstruction(SMLoc IDLoc, unsigned &Opcode,
+ bool matchAndEmitIntelInstruction(SMLoc IDLoc, unsigned &Opcode, MCInst &Inst,
OperandVector &Operands, MCStreamer &Out,
uint64_t &ErrorInfo,
bool MatchingInlineAsm);
@@ -3186,7 +3185,7 @@ bool X86AsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
InstInfo = &Info;
// Reset the forced VEX encoding.
- ForcedVEXEncoding = VEXEncoding_Default;
+ ForcedOpcodePrefix = OpcodePrefix_Default;
ForcedDispEncoding = DispEncoding_Default;
UseApxExtendedReg = false;
ForcedNoFlag = false;
@@ -3203,13 +3202,13 @@ bool X86AsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
Parser.Lex(); // Eat curly.
if (Prefix == "vex")
- ForcedVEXEncoding = VEXEncoding_VEX;
+ ForcedOpcodePrefix = OpcodePrefix_VEX;
else if (Prefix == "vex2")
- ForcedVEXEncoding = VEXEncoding_VEX2;
+ ForcedOpcodePrefix = OpcodePrefix_VEX2;
else if (Prefix == "vex3")
- ForcedVEXEncoding = VEXEncoding_VEX3;
+ ForcedOpcodePrefix = OpcodePrefix_VEX3;
else if (Prefix == "evex")
- ForcedVEXEncoding = VEXEncoding_EVEX;
+ ForcedOpcodePrefix = OpcodePrefix_EVEX;
else if (Prefix == "disp8")
ForcedDispEncoding = DispEncoding_Disp8;
else if (Prefix == "disp32")
@@ -3235,15 +3234,15 @@ bool X86AsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
// Parse MASM style pseudo prefixes.
if (isParsingMSInlineAsm()) {
if (Name.equals_insensitive("vex"))
- ForcedVEXEncoding = VEXEncoding_VEX;
+ ForcedOpcodePrefix = OpcodePrefix_VEX;
else if (Name.equals_insensitive("vex2"))
- ForcedVEXEncoding = VEXEncoding_VEX2;
+ ForcedOpcodePrefix = OpcodePrefix_VEX2;
else if (Name.equals_insensitive("vex3"))
- ForcedVEXEncoding = VEXEncoding_VEX3;
+ ForcedOpcodePrefix = OpcodePrefix_VEX3;
else if (Name.equals_insensitive("evex"))
- ForcedVEXEncoding = VEXEncoding_EVEX;
+ ForcedOpcodePrefix = OpcodePrefix_EVEX;
- if (ForcedVEXEncoding != VEXEncoding_Default) {
+ if (ForcedOpcodePrefix != OpcodePrefix_Default) {
if (getLexer().isNot(AsmToken::Identifier))
return Error(Parser.getTok().getLoc(), "Expected identifier");
// FIXME: The mnemonic won't match correctly if its not in lower case.
@@ -3741,7 +3740,7 @@ bool X86AsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
}
bool X86AsmParser::processInstruction(MCInst &Inst, const OperandVector &Ops) {
- if (ForcedVEXEncoding != VEXEncoding_VEX3 &&
+ if (ForcedOpcodePrefix != OpcodePrefix_VEX3 &&
X86::optimizeInstFromVEX3ToVEX2(Inst, MII.get(Inst.getOpcode())))
return true;
@@ -4002,15 +4001,55 @@ void X86AsmParser::emitInstruction(MCInst &Inst, OperandVector &Operands,
applyLVILoadHardeningMitigation(Inst, Out);
}
+static unsigned getPrefixes(OperandVector &Operands) {
+ unsigned Result = 0;
+ X86Operand &Prefix = static_cast<X86Operand &>(*Operands.back());
+ if (Prefix.isPrefix()) {
+ Result = Prefix.getPrefix();
+ Operands.pop_back();
+ }
+ return Result;
+}
+
bool X86AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
OperandVector &Operands,
MCStreamer &Out, uint64_t &ErrorInfo,
bool MatchingInlineAsm) {
- if (isParsingIntelSyntax())
- return MatchAndEmitIntelInstruction(IDLoc, Opcode, Operands, Out, ErrorInfo,
- MatchingInlineAsm);
- return MatchAndEmitATTInstruction(IDLoc, Opcode, Operands, Out, ErrorInfo,
- MatchingInlineAsm);
+ assert(!Operands.empty() && "Unexpect empty operand list!");
+ assert((*Operands[0]).isToken() && "Leading operand should always be a mnemonic!");
+
+ // First, handle aliases that expand to multiple instructions.
+ MatchFPUWaitAlias(IDLoc, static_cast<X86Operand &>(*Operands[0]), Operands,
+ Out, MatchingInlineAsm);
+ unsigned Prefixes = getPrefixes(Operands);
+
+ MCInst Inst;
+
+ // If VEX/EVEX encoding is forced, we need to pass the USE_* flag to the
+ // encoder and printer.
+ if (ForcedOpcodePrefix == OpcodePrefix_VEX)
+ Prefixes |= X86::IP_USE_VEX;
+ else if (ForcedOpcodePrefix == OpcodePrefix_VEX2)
+ Prefixes |= X86::IP_USE_VEX2;
+ else if (ForcedOpcodePrefix == OpcodePrefix_VEX3)
+ Prefixes |= X86::IP_USE_VEX3;
+ else if (ForcedOpcodePrefix == OpcodePrefix_EVEX)
+ Prefixes |= X86::IP_USE_EVEX;
+
+ // Set encoded flags for {disp8} and {disp32}.
+ if (ForcedDispEncoding == DispEncoding_Disp8)
+ Prefixes |= X86::IP_USE_DISP8;
+ else if (ForcedDispEncoding == DispEncoding_Disp32)
+ Prefixes |= X86::IP_USE_DISP32;
+
+ if (Prefixes)
+ Inst.setFlags(Prefixes);
+
+ return isParsingIntelSyntax()
+ ? matchAndEmitIntelInstruction(IDLoc, Opcode, Inst, Operands, Out,
+ ErrorInfo, MatchingInlineAsm)
+ : matchAndEmitATTInstruction(IDLoc, Opcode, Inst, Operands, Out,
+ ErrorInfo, MatchingInlineAsm);
}
void X86AsmParser::MatchFPUWaitAlias(SMLoc IDLoc, X86Operand &Op,
@@ -4053,16 +4092,6 @@ bool X86AsmParser::ErrorMissingFeature(SMLoc IDLoc,
return Error(IDLoc, OS.str(), SMRange(), MatchingInlineAsm);
}
-static unsigned getPrefixes(OperandVector &Operands) {
- unsigned Result = 0;
- X86Operand &Prefix = static_cast<X86Operand &>(*Operands.back());
- if (Prefix.isPrefix()) {
- Result = Prefix.getPrefix();
- Operands.pop_back();
- }
- return Result;
-}
-
unsigned X86AsmParser::checkTargetMatchPredicate(MCInst &Inst) {
unsigned Opc = Inst.getOpcode();
const MCInstrDesc &MCID = MII.get(Opc);
@@ -4072,63 +4101,31 @@ unsigned X86AsmParser::checkTargetMatchPredicate(MCInst &Inst) {
if (ForcedNoFlag == !(MCID.TSFlags & X86II::EVEX_NF) && !X86::isCFCMOVCC(Opc))
return Match_Unsupported;
- if (ForcedVEXEncoding == VEXEncoding_EVEX &&
+ if (ForcedOpcodePrefix == OpcodePrefix_EVEX &&
(MCID.TSFlags & X86II::EncodingMask) != X86II::EVEX)
return Match_Unsupported;
- if ((ForcedVEXEncoding == VEXEncoding_VEX ||
- ForcedVEXEncoding == VEXEncoding_VEX2 ||
- ForcedVEXEncoding == VEXEncoding_VEX3) &&
+ if ((ForcedOpcodePrefix == OpcodePrefix_VEX ||
+ ForcedOpcodePrefix == OpcodePrefix_VEX2 ||
+ ForcedOpcodePrefix == OpcodePrefix_VEX3) &&
(MCID.TSFlags & X86II::EncodingMask) != X86II::VEX)
return Match_Unsupported;
if ((MCID.TSFlags & X86II::ExplicitOpPrefixMask) ==
X86II::ExplicitVEXPrefix &&
- (ForcedVEXEncoding != VEXEncoding_VEX &&
- ForcedVEXEncoding != VEXEncoding_VEX2 &&
- ForcedVEXEncoding != VEXEncoding_VEX3))
+ (ForcedOpcodePrefix != OpcodePrefix_VEX &&
+ ForcedOpcodePrefix != OpcodePrefix_VEX2 &&
+ ForcedOpcodePrefix != OpcodePrefix_VEX3))
return Match_Unsupported;
return Match_Success;
}
-bool X86AsmParser::MatchAndEmitATTInstruction(SMLoc IDLoc, unsigned &Opcode,
- OperandVector &Operands,
- MCStreamer &Out,
- uint64_t &ErrorInfo,
- bool MatchingInlineAsm) {
- assert(!Operands.empty() && "Unexpect empty operand list!");
- assert((*Operands[0]).isToken() && "Leading operand should always be a mnemonic!");
- SMRange EmptyRange = std::nullopt;
-
- // First, handle aliases that expand to multiple instructions.
- MatchFPUWaitAlias(IDLoc, static_cast<X86Operand &>(*Operands[0]), Operands,
- Out, MatchingInlineAsm);
+bool X86AsmParser::matchAndEmitATTInstruction(
+ SMLoc IDLoc, unsigned &Opcode, MCInst &Inst, OperandVector &Operands,
+ MCStreamer &Out, uint64_t &ErrorInfo, bool MatchingInlineAsm) {
X86Operand &Op = static_cast<X86Operand &>(*Operands[0]);
- unsigned Prefixes = getPrefixes(Operands);
-
- MCInst Inst;
-
- // If VEX/EVEX encoding is forced, we need to pass the USE_* flag to the
- // encoder and printer.
- if (ForcedVEXEncoding == VEXEncoding_VEX)
- Prefixes |= X86::IP_USE_VEX;
- else if (ForcedVEXEncoding == VEXEncoding_VEX2)
- Prefixes |= X86::IP_USE_VEX2;
- else if (ForcedVEXEncoding == VEXEncoding_VEX3)
- Prefixes |= X86::IP_USE_VEX3;
- else if (ForcedVEXEncoding == VEXEncoding_EVEX)
- Prefixes |= X86::IP_USE_EVEX;
-
- // Set encoded flags for {disp8} and {disp32}.
- if (ForcedDispEncoding == DispEncoding_Disp8)
- Prefixes |= X86::IP_USE_DISP8;
- else if (ForcedDispEncoding == DispEncoding_Disp32)
- Prefixes |= X86::IP_USE_DISP32;
-
- if (Prefixes)
- Inst.setFlags(Prefixes);
-
+ SMRange EmptyRange = std::nullopt;
// In 16-bit mode, if data32 is specified, temporarily switch to 32-bit mode
// when matching the instruction.
if (ForcedDataPrefix == X86::Is32Bit)
@@ -4350,44 +4347,11 @@ bool X86AsmParser::MatchAndEmitATTInstruction(SMLoc IDLoc, unsigned &Opcode,
return true;
}
-bool X86AsmParser::MatchAndEmitIntelInstruction(SMLoc IDLoc, unsigned &Opcode,
- OperandVector &Operands,
- MCStreamer &Out,
- uint64_t &ErrorInfo,
- bool MatchingInlineAsm) {
- assert(!Operands.empty() && "Unexpect empty operand list!");
- assert((*Operands[0]).isToken() && "Leading operand should always be a mnemonic!");
- StringRef Mnemonic = (static_cast<X86Operand &>(*Operands[0])).getToken();
- SMRange EmptyRange = std::nullopt;
- StringRef Base = (static_cast<X86Operand &>(*Operands[0])).getToken();
- unsigned Prefixes = getPrefixes(Operands);
-
- // First, handle aliases that expand to multiple instructions.
- MatchFPUWaitAlias(IDLoc, static_cast<X86Operand &>(*Operands[0]), Operands, Out, MatchingInlineAsm);
+bool X86AsmParser::matchAndEmitIntelInstruction(
+ SMLoc IDLoc, unsigned &Opcode, MCInst &Inst, OperandVector &Operands,
+ MCStreamer &Out, uint64_t &ErrorInfo, bool MatchingInlineAsm) {
X86Operand &Op = static_cast<X86Operand &>(*Operands[0]);
-
- MCInst Inst;
-
- // If VEX/EVEX encoding is forced, we need to pass the USE_* flag to the
- // encoder and printer.
- if (ForcedVEXEncoding == VEXEncoding_VEX)
- Prefixes |= X86::IP_USE_VEX;
- else if (ForcedVEXEncoding == VEXEncoding_VEX2)
- Prefixes |= X86::IP_USE_VEX2;
- else if (ForcedVEXEncoding == VEXEncoding_VEX3)
- Prefixes |= X86::IP_USE_VEX3;
- else if (ForcedVEXEncoding == VEXEncoding_EVEX)
- Prefixes |= X86::IP_USE_EVEX;
-
- // Set encoded flags for {disp8} and {disp32}.
- if (ForcedDispEncoding == DispEncoding_Disp8)
- Prefixes |= X86::IP_USE_DISP8;
- else if (ForcedDispEncoding == DispEncoding_Disp32)
- Prefixes |= X86::IP_USE_DISP32;
-
- if (Prefixes)
- Inst.setFlags(Prefixes);
-
+ SMRange EmptyRange = std::nullopt;
// Find one unsized memory operand, if present.
X86Operand *UnsizedMemOp = nullptr;
for (const auto &Op : Operands) {
@@ -4402,6 +4366,7 @@ bool X86AsmParser::MatchAndEmitIntelInstruction(SMLoc IDLoc, unsigned &Opcode,
// Allow some instructions to have implicitly pointer-sized operands. This is
// compatible with gas.
+ StringRef Mnemonic = (static_cast<X86Operand &>(*Operands[0])).getToken();
if (UnsizedMemOp) {
static const char *const PtrSizedInstrs[] = {"call", "jmp", "push"};
for (const char *Instr : PtrSizedInstrs) {
@@ -4415,6 +4380,7 @@ bool X86AsmParser::MatchAndEmitIntelInstruction(SMLoc IDLoc, unsigned &Opcode,
SmallVector<unsigned, 8> Match;
FeatureBitset ErrorInfoMissingFeatures;
FeatureBitset MissingFeatures;
+ StringRef Base = (static_cast<X86Operand &>(*Operands[0])).getToken();
// If unsized push has immediate operand we should default the default pointer
// size for the size.
More information about the llvm-commits
mailing list