[llvm] 6565b58 - [X86][llvm-mc] Make the suffix matcher more accurate.
via llvm-commits
llvm-commits at lists.llvm.org
Tue May 26 23:47:43 PDT 2020
Author: Wang, Pengfei
Date: 2020-05-27T14:45:17+08:00
New Revision: 6565b5858444ba7dcf799467f5be63d2c2370715
URL: https://github.com/llvm/llvm-project/commit/6565b5858444ba7dcf799467f5be63d2c2370715
DIFF: https://github.com/llvm/llvm-project/commit/6565b5858444ba7dcf799467f5be63d2c2370715.diff
LOG: [X86][llvm-mc] Make the suffix matcher more accurate.
Summary:
Some instruction like VPMULDQ is NOT the variant of VPMULD but a new
one.
So we should make sure the suffix matcher only works for memory variant
that has the same size with the suffix.
Currently we only check for SSE/AVX* instructions, because many legacy
instructions didn't declare the alias instructions of their variants.
Differential Revision: https://reviews.llvm.org/D80608
Added:
Modified:
llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
llvm/lib/Target/X86/AsmParser/X86Operand.h
llvm/test/MC/X86/avx512-err.s
llvm/test/tools/llvm-mca/X86/BdVer2/dependent-pmuld-paddd.s
llvm/test/tools/llvm-mca/X86/BtVer2/dependent-pmuld-paddd.s
Removed:
################################################################################
diff --git a/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp b/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
index 6b06656410eb..a842a91bbb06 100644
--- a/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
+++ b/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
@@ -3441,20 +3441,47 @@ bool X86AsmParser::MatchAndEmitATTInstruction(SMLoc IDLoc, unsigned &Opcode,
// Otherwise, we assume that this may be an integer instruction, which comes
// in 8/16/32/64-bit forms using the b,w,l,q suffixes respectively.
const char *Suffixes = Base[0] != 'f' ? "bwlq" : "slt\0";
+ // MemSize corresponding to Suffixes. { 8, 16, 32, 64 } { 32, 64, 80, 0 }
+ const char *MemSize = Base[0] != 'f' ? "\x08\x10\x20\x40" : "\x20\x40\x50\0";
// Check for the various suffix matches.
uint64_t ErrorInfoIgnore;
FeatureBitset ErrorInfoMissingFeatures; // Init suppresses compiler warnings.
unsigned Match[4];
+ // Some instruction like VPMULDQ is NOT the variant of VPMULD but a new one.
+ // So we should make sure the suffix matcher only works for memory variant
+ // that has the same size with the suffix.
+ // FIXME: This flag is a workaround for legacy instructions that didn't
+ // declare non suffix variant assembly.
+ bool HasVectorReg = false;
+ X86Operand *MemOp = nullptr;
+ for (const auto &Op : Operands) {
+ X86Operand *X86Op = static_cast<X86Operand *>(Op.get());
+ if (X86Op->isVectorReg())
+ HasVectorReg = true;
+ else if (X86Op->isMem()) {
+ MemOp = X86Op;
+ assert(MemOp->Mem.Size == 0 && "Memory size always 0 under ATT syntax");
+ // Have we found an unqualified memory operand,
+ // break. IA allows only one memory operand.
+ break;
+ }
+ }
+
for (unsigned I = 0, E = array_lengthof(Match); I != E; ++I) {
Tmp.back() = Suffixes[I];
- Match[I] = MatchInstruction(Operands, Inst, ErrorInfoIgnore,
- MissingFeatures, MatchingInlineAsm,
- isParsingIntelSyntax());
- // If this returned as a missing feature failure, remember that.
- if (Match[I] == Match_MissingFeature)
- ErrorInfoMissingFeatures = MissingFeatures;
+ if (MemOp)
+ MemOp->Mem.Size = MemSize[I];
+ Match[I] = Match_MnemonicFail;
+ if (MemOp || !HasVectorReg) {
+ Match[I] =
+ MatchInstruction(Operands, Inst, ErrorInfoIgnore, MissingFeatures,
+ MatchingInlineAsm, isParsingIntelSyntax());
+ // If this returned as a missing feature failure, remember that.
+ if (Match[I] == Match_MissingFeature)
+ ErrorInfoMissingFeatures = MissingFeatures;
+ }
}
// Restore the old token.
diff --git a/llvm/lib/Target/X86/AsmParser/X86Operand.h b/llvm/lib/Target/X86/AsmParser/X86Operand.h
index 3da8009762f3..fb5f3355532e 100644
--- a/llvm/lib/Target/X86/AsmParser/X86Operand.h
+++ b/llvm/lib/Target/X86/AsmParser/X86Operand.h
@@ -456,6 +456,14 @@ struct X86Operand final : public MCParsedAsmOperand {
X86MCRegisterClasses[X86::GR64RegClassID].contains(getReg()));
}
+ bool isVectorReg() const {
+ return Kind == Register &&
+ (X86MCRegisterClasses[X86::VR64RegClassID].contains(getReg()) ||
+ X86MCRegisterClasses[X86::VR128XRegClassID].contains(getReg()) ||
+ X86MCRegisterClasses[X86::VR256XRegClassID].contains(getReg()) ||
+ X86MCRegisterClasses[X86::VR512RegClassID].contains(getReg()));
+ }
+
bool isVK1Pair() const {
return Kind == Register &&
X86MCRegisterClasses[X86::VK1RegClassID].contains(getReg());
diff --git a/llvm/test/MC/X86/avx512-err.s b/llvm/test/MC/X86/avx512-err.s
index 9d8183d8872b..0d353a6c5498 100644
--- a/llvm/test/MC/X86/avx512-err.s
+++ b/llvm/test/MC/X86/avx512-err.s
@@ -14,3 +14,9 @@ cvtsd2sil {rn-sae}, %xmm1, %eax
// ERR: Expected an identifier after {
cvtsd2sil {{sae}, %xmm1, %eax
+
+// ERR: invalid instruction mnemonic 'vpmuld'
+vpmuld %xmm1, %xmm2, %xmm3
+
+// ERR: invalid instruction mnemonic 'maskmov'
+maskmov %mm1, %mm2
diff --git a/llvm/test/tools/llvm-mca/X86/BdVer2/dependent-pmuld-paddd.s b/llvm/test/tools/llvm-mca/X86/BdVer2/dependent-pmuld-paddd.s
index bf49f18c3a81..efa99fd32dd0 100644
--- a/llvm/test/tools/llvm-mca/X86/BdVer2/dependent-pmuld-paddd.s
+++ b/llvm/test/tools/llvm-mca/X86/BdVer2/dependent-pmuld-paddd.s
@@ -1,7 +1,7 @@
# NOTE: Assertions have been autogenerated by utils/update_mca_test_checks.py
# RUN: llvm-mca -mtriple=x86_64-unknown-unknown -mcpu=bdver2 -iterations=500 -timeline < %s | FileCheck %s
-vpmuld %xmm0, %xmm0, %xmm1
+vpmuldq %xmm0, %xmm0, %xmm1
vpaddd %xmm1, %xmm1, %xmm0
vpaddd %xmm0, %xmm0, %xmm3
diff --git a/llvm/test/tools/llvm-mca/X86/BtVer2/dependent-pmuld-paddd.s b/llvm/test/tools/llvm-mca/X86/BtVer2/dependent-pmuld-paddd.s
index 586aa73b15ab..2804ef697709 100644
--- a/llvm/test/tools/llvm-mca/X86/BtVer2/dependent-pmuld-paddd.s
+++ b/llvm/test/tools/llvm-mca/X86/BtVer2/dependent-pmuld-paddd.s
@@ -1,7 +1,7 @@
# NOTE: Assertions have been autogenerated by utils/update_mca_test_checks.py
# RUN: llvm-mca -mtriple=x86_64-unknown-unknown -mcpu=btver2 -iterations=500 -timeline < %s | FileCheck %s
-vpmuld %xmm0, %xmm0, %xmm1
+vpmuldq %xmm0, %xmm0, %xmm1
vpaddd %xmm1, %xmm1, %xmm0
vpaddd %xmm0, %xmm0, %xmm3
More information about the llvm-commits
mailing list