[llvm] c66c15a - [X86] Rename some variables for memory fold and format code, NFCI
Shengchen Kan via llvm-commits
llvm-commits at lists.llvm.org
Tue Nov 28 03:08:05 PST 2023
Author: Shengchen Kan
Date: 2023-11-28T19:07:44+08:00
New Revision: c66c15a76dc7b021c29479a54aa1785928e9d1bf
URL: https://github.com/llvm/llvm-project/commit/c66c15a76dc7b021c29479a54aa1785928e9d1bf
DIFF: https://github.com/llvm/llvm-project/commit/c66c15a76dc7b021c29479a54aa1785928e9d1bf.diff
LOG: [X86] Rename some variables for memory fold and format code, NFCI
1. Rename the names of tables to simplify the print
2. Align the abbreviation in the same file Instr -> Inst
3. Clang-format
4. Capitalize the first char of the variable name
Added:
Modified:
llvm/lib/Target/X86/X86InstrFoldTables.cpp
llvm/test/TableGen/x86-fold-tables.inc
llvm/utils/TableGen/X86FoldTablesEmitter.cpp
Removed:
################################################################################
diff --git a/llvm/lib/Target/X86/X86InstrFoldTables.cpp b/llvm/lib/Target/X86/X86InstrFoldTables.cpp
index 5055f719ee16c88..10f817fcfc43df5 100644
--- a/llvm/lib/Target/X86/X86InstrFoldTables.cpp
+++ b/llvm/lib/Target/X86/X86InstrFoldTables.cpp
@@ -23,7 +23,7 @@ using namespace llvm;
// are currently emitted in X86GenInstrInfo.inc in alphabetical order. Which
// makes sorting these tables a simple matter of alphabetizing the table.
#include "X86GenFoldTables.inc"
-static const X86MemoryFoldTableEntry BroadcastFoldTable2[] = {
+static const X86MemoryFoldTableEntry BroadcastTable2[] = {
{ X86::VADDPDZ128rr, X86::VADDPDZ128rmb, TB_BCAST_SD },
{ X86::VADDPDZ256rr, X86::VADDPDZ256rmb, TB_BCAST_SD },
{ X86::VADDPDZrr, X86::VADDPDZrmb, TB_BCAST_SD },
@@ -200,7 +200,7 @@ static const X86MemoryFoldTableEntry BroadcastFoldTable2[] = {
{ X86::VXORPSZrr, X86::VXORPSZrmb, TB_BCAST_SS },
};
-static const X86MemoryFoldTableEntry BroadcastFoldTable3[] = {
+static const X86MemoryFoldTableEntry BroadcastTable3[] = {
{ X86::VFMADD132PDZ128r, X86::VFMADD132PDZ128mb, TB_BCAST_SD },
{ X86::VFMADD132PDZ256r, X86::VFMADD132PDZ256mb, TB_BCAST_SD },
{ X86::VFMADD132PDZr, X86::VFMADD132PDZmb, TB_BCAST_SD },
@@ -319,7 +319,7 @@ static const X86MemoryFoldTableEntry BroadcastFoldTable3[] = {
// Table to map instructions safe to broadcast using a
diff erent width from the
// element width.
-static const X86MemoryFoldTableEntry BroadcastSizeFoldTable2[] = {
+static const X86MemoryFoldTableEntry BroadcastSizeTable2[] = {
{ X86::VANDNPDZ128rr, X86::VANDNPSZ128rmb, TB_BCAST_SS },
{ X86::VANDNPDZ256rr, X86::VANDNPSZ256rmb, TB_BCAST_SS },
{ X86::VANDNPDZrr, X86::VANDNPSZrmb, TB_BCAST_SS },
@@ -370,7 +370,7 @@ static const X86MemoryFoldTableEntry BroadcastSizeFoldTable2[] = {
{ X86::VXORPSZrr, X86::VXORPDZrmb, TB_BCAST_SD },
};
-static const X86MemoryFoldTableEntry BroadcastSizeFoldTable3[] = {
+static const X86MemoryFoldTableEntry BroadcastSizeTable3[] = {
{ X86::VPTERNLOGDZ128rri, X86::VPTERNLOGQZ128rmbi, TB_BCAST_Q },
{ X86::VPTERNLOGDZ256rri, X86::VPTERNLOGQZ256rmbi, TB_BCAST_Q },
{ X86::VPTERNLOGDZrri, X86::VPTERNLOGQZrmbi, TB_BCAST_Q },
@@ -391,16 +391,16 @@ lookupFoldTableImpl(ArrayRef<X86MemoryFoldTableEntry> Table, unsigned RegOp) {
// Make sure the tables are sorted.
static std::atomic<bool> FoldTablesChecked(false);
if (!FoldTablesChecked.load(std::memory_order_relaxed)) {
- CHECK_SORTED_UNIQUE(MemoryFoldTable2Addr)
- CHECK_SORTED_UNIQUE(MemoryFoldTable0)
- CHECK_SORTED_UNIQUE(MemoryFoldTable1)
- CHECK_SORTED_UNIQUE(MemoryFoldTable2)
- CHECK_SORTED_UNIQUE(MemoryFoldTable3)
- CHECK_SORTED_UNIQUE(MemoryFoldTable4)
- CHECK_SORTED_UNIQUE(BroadcastFoldTable2)
- CHECK_SORTED_UNIQUE(BroadcastFoldTable3)
- CHECK_SORTED_UNIQUE(BroadcastSizeFoldTable2)
- CHECK_SORTED_UNIQUE(BroadcastSizeFoldTable3)
+ CHECK_SORTED_UNIQUE(Table2Addr)
+ CHECK_SORTED_UNIQUE(Table0)
+ CHECK_SORTED_UNIQUE(Table1)
+ CHECK_SORTED_UNIQUE(Table2)
+ CHECK_SORTED_UNIQUE(Table3)
+ CHECK_SORTED_UNIQUE(Table4)
+ CHECK_SORTED_UNIQUE(BroadcastTable2)
+ CHECK_SORTED_UNIQUE(BroadcastTable3)
+ CHECK_SORTED_UNIQUE(BroadcastSizeTable2)
+ CHECK_SORTED_UNIQUE(BroadcastSizeTable3)
FoldTablesChecked.store(true, std::memory_order_relaxed);
}
#endif
@@ -414,22 +414,22 @@ lookupFoldTableImpl(ArrayRef<X86MemoryFoldTableEntry> Table, unsigned RegOp) {
const X86MemoryFoldTableEntry *
llvm::lookupTwoAddrFoldTable(unsigned RegOp) {
- return lookupFoldTableImpl(MemoryFoldTable2Addr, RegOp);
+ return lookupFoldTableImpl(Table2Addr, RegOp);
}
const X86MemoryFoldTableEntry *
llvm::lookupFoldTable(unsigned RegOp, unsigned OpNum) {
ArrayRef<X86MemoryFoldTableEntry> FoldTable;
if (OpNum == 0)
- FoldTable = ArrayRef(MemoryFoldTable0);
+ FoldTable = ArrayRef(Table0);
else if (OpNum == 1)
- FoldTable = ArrayRef(MemoryFoldTable1);
+ FoldTable = ArrayRef(Table1);
else if (OpNum == 2)
- FoldTable = ArrayRef(MemoryFoldTable2);
+ FoldTable = ArrayRef(Table2);
else if (OpNum == 3)
- FoldTable = ArrayRef(MemoryFoldTable3);
+ FoldTable = ArrayRef(Table3);
else if (OpNum == 4)
- FoldTable = ArrayRef(MemoryFoldTable4);
+ FoldTable = ArrayRef(Table4);
else
return nullptr;
@@ -445,36 +445,36 @@ struct X86MemUnfoldTable {
std::vector<X86MemoryFoldTableEntry> Table;
X86MemUnfoldTable() {
- for (const X86MemoryFoldTableEntry &Entry : MemoryFoldTable2Addr)
+ for (const X86MemoryFoldTableEntry &Entry : Table2Addr)
// Index 0, folded load and store, no alignment requirement.
addTableEntry(Entry, TB_INDEX_0 | TB_FOLDED_LOAD | TB_FOLDED_STORE);
- for (const X86MemoryFoldTableEntry &Entry : MemoryFoldTable0)
+ for (const X86MemoryFoldTableEntry &Entry : Table0)
// Index 0, mix of loads and stores.
addTableEntry(Entry, TB_INDEX_0);
- for (const X86MemoryFoldTableEntry &Entry : MemoryFoldTable1)
+ for (const X86MemoryFoldTableEntry &Entry : Table1)
// Index 1, folded load
addTableEntry(Entry, TB_INDEX_1 | TB_FOLDED_LOAD);
- for (const X86MemoryFoldTableEntry &Entry : MemoryFoldTable2)
+ for (const X86MemoryFoldTableEntry &Entry : Table2)
// Index 2, folded load
addTableEntry(Entry, TB_INDEX_2 | TB_FOLDED_LOAD);
- for (const X86MemoryFoldTableEntry &Entry : MemoryFoldTable3)
+ for (const X86MemoryFoldTableEntry &Entry : Table3)
// Index 3, folded load
addTableEntry(Entry, TB_INDEX_3 | TB_FOLDED_LOAD);
- for (const X86MemoryFoldTableEntry &Entry : MemoryFoldTable4)
+ for (const X86MemoryFoldTableEntry &Entry : Table4)
// Index 4, folded load
addTableEntry(Entry, TB_INDEX_4 | TB_FOLDED_LOAD);
// Broadcast tables.
- for (const X86MemoryFoldTableEntry &Entry : BroadcastFoldTable2)
+ for (const X86MemoryFoldTableEntry &Entry : BroadcastTable2)
// Index 2, folded broadcast
addTableEntry(Entry, TB_INDEX_2 | TB_FOLDED_LOAD | TB_FOLDED_BCAST);
- for (const X86MemoryFoldTableEntry &Entry : BroadcastFoldTable3)
+ for (const X86MemoryFoldTableEntry &Entry : BroadcastTable3)
// Index 3, folded broadcast
addTableEntry(Entry, TB_INDEX_3 | TB_FOLDED_LOAD | TB_FOLDED_BCAST);
@@ -516,7 +516,7 @@ struct X86MemBroadcastFoldTable {
X86MemBroadcastFoldTable() {
// Broadcast tables.
- for (const X86MemoryFoldTableEntry &Reg2Bcst : BroadcastFoldTable2) {
+ for (const X86MemoryFoldTableEntry &Reg2Bcst : BroadcastTable2) {
unsigned RegOp = Reg2Bcst.KeyOp;
unsigned BcstOp = Reg2Bcst.DstOp;
if (const X86MemoryFoldTableEntry *Reg2Mem = lookupFoldTable(RegOp, 2)) {
@@ -526,7 +526,7 @@ struct X86MemBroadcastFoldTable {
Table.push_back({MemOp, BcstOp, Flags});
}
}
- for (const X86MemoryFoldTableEntry &Reg2Bcst : BroadcastSizeFoldTable2) {
+ for (const X86MemoryFoldTableEntry &Reg2Bcst : BroadcastSizeTable2) {
unsigned RegOp = Reg2Bcst.KeyOp;
unsigned BcstOp = Reg2Bcst.DstOp;
if (const X86MemoryFoldTableEntry *Reg2Mem = lookupFoldTable(RegOp, 2)) {
@@ -537,7 +537,7 @@ struct X86MemBroadcastFoldTable {
}
}
- for (const X86MemoryFoldTableEntry &Reg2Bcst : BroadcastFoldTable3) {
+ for (const X86MemoryFoldTableEntry &Reg2Bcst : BroadcastTable3) {
unsigned RegOp = Reg2Bcst.KeyOp;
unsigned BcstOp = Reg2Bcst.DstOp;
if (const X86MemoryFoldTableEntry *Reg2Mem = lookupFoldTable(RegOp, 3)) {
@@ -547,7 +547,7 @@ struct X86MemBroadcastFoldTable {
Table.push_back({MemOp, BcstOp, Flags});
}
}
- for (const X86MemoryFoldTableEntry &Reg2Bcst : BroadcastSizeFoldTable3) {
+ for (const X86MemoryFoldTableEntry &Reg2Bcst : BroadcastSizeTable3) {
unsigned RegOp = Reg2Bcst.KeyOp;
unsigned BcstOp = Reg2Bcst.DstOp;
if (const X86MemoryFoldTableEntry *Reg2Mem = lookupFoldTable(RegOp, 3)) {
diff --git a/llvm/test/TableGen/x86-fold-tables.inc b/llvm/test/TableGen/x86-fold-tables.inc
index 498469a55c4f043..8bb4e61cd911bd1 100644
--- a/llvm/test/TableGen/x86-fold-tables.inc
+++ b/llvm/test/TableGen/x86-fold-tables.inc
@@ -1,4 +1,4 @@
-static const X86MemoryFoldTableEntry MemoryFoldTable2Addr[] = {
+static const X86MemoryFoldTableEntry Table2Addr[] = {
{X86::ADD16ri_DB, X86::ADD16mi, TB_NO_REVERSE},
{X86::ADD16rr_DB, X86::ADD16mr, TB_NO_REVERSE},
{X86::ADD32ri_DB, X86::ADD32mi, TB_NO_REVERSE},
@@ -214,7 +214,7 @@ static const X86MemoryFoldTableEntry MemoryFoldTable2Addr[] = {
{X86::XOR8rr, X86::XOR8mr, TB_NO_REVERSE},
};
-static const X86MemoryFoldTableEntry MemoryFoldTable0[] = {
+static const X86MemoryFoldTableEntry Table0[] = {
{X86::BT16ri8, X86::BT16mi8, TB_FOLDED_LOAD},
{X86::BT32ri8, X86::BT32mi8, TB_FOLDED_LOAD},
{X86::BT64ri8, X86::BT64mi8, TB_FOLDED_LOAD},
@@ -407,7 +407,7 @@ static const X86MemoryFoldTableEntry MemoryFoldTable0[] = {
{X86::VPMOVWBZrr, X86::VPMOVWBZmr, TB_FOLDED_STORE},
};
-static const X86MemoryFoldTableEntry MemoryFoldTable1[] = {
+static const X86MemoryFoldTableEntry Table1[] = {
{X86::AESIMCrr, X86::AESIMCrm, TB_ALIGN_16},
{X86::AESKEYGENASSIST128rr, X86::AESKEYGENASSIST128rm, TB_ALIGN_16},
{X86::BEXTR32rr, X86::BEXTR32rm, 0},
@@ -1294,7 +1294,7 @@ static const X86MemoryFoldTableEntry MemoryFoldTable1[] = {
{X86::VUCOMISSrr_Int, X86::VUCOMISSrm_Int, TB_NO_REVERSE},
};
-static const X86MemoryFoldTableEntry MemoryFoldTable2[] = {
+static const X86MemoryFoldTableEntry Table2[] = {
{X86::ADD16rr_DB, X86::ADD16rm, TB_NO_REVERSE},
{X86::ADD32rr_DB, X86::ADD32rm, TB_NO_REVERSE},
{X86::ADD64rr_DB, X86::ADD64rm, TB_NO_REVERSE},
@@ -3251,7 +3251,7 @@ static const X86MemoryFoldTableEntry MemoryFoldTable2[] = {
{X86::XORPSrr, X86::XORPSrm, TB_ALIGN_16},
};
-static const X86MemoryFoldTableEntry MemoryFoldTable3[] = {
+static const X86MemoryFoldTableEntry Table3[] = {
{X86::VADDPDZ128rrkz, X86::VADDPDZ128rmkz, 0},
{X86::VADDPDZ256rrkz, X86::VADDPDZ256rmkz, 0},
{X86::VADDPDZrrkz, X86::VADDPDZrmkz, 0},
@@ -4861,7 +4861,7 @@ static const X86MemoryFoldTableEntry MemoryFoldTable3[] = {
{X86::VXORPSZrrkz, X86::VXORPSZrmkz, 0},
};
-static const X86MemoryFoldTableEntry MemoryFoldTable4[] = {
+static const X86MemoryFoldTableEntry Table4[] = {
{X86::VADDPDZ128rrk, X86::VADDPDZ128rmk, 0},
{X86::VADDPDZ256rrk, X86::VADDPDZ256rmk, 0},
{X86::VADDPDZrrk, X86::VADDPDZrmk, 0},
diff --git a/llvm/utils/TableGen/X86FoldTablesEmitter.cpp b/llvm/utils/TableGen/X86FoldTablesEmitter.cpp
index 7eb812c71f75005..404a149040e018f 100644
--- a/llvm/utils/TableGen/X86FoldTablesEmitter.cpp
+++ b/llvm/utils/TableGen/X86FoldTablesEmitter.cpp
@@ -36,16 +36,16 @@ const char *ExplicitAlign[] = {"MOVDQA", "MOVAPS", "MOVAPD", "MOVNTPS",
"MOVNTPD", "MOVNTDQ", "MOVNTDQA"};
// List of instructions NOT requiring explicit memory alignment.
-const char *ExplicitUnalign[] = {"MOVDQU", "MOVUPS", "MOVUPD",
- "PCMPESTRM", "PCMPESTRI",
- "PCMPISTRM", "PCMPISTRI" };
+const char *ExplicitUnalign[] = {"MOVDQU", "MOVUPS", "MOVUPD",
+ "PCMPESTRM", "PCMPESTRI", "PCMPISTRM",
+ "PCMPISTRI"};
const ManualMapEntry ManualMapSet[] = {
#define ENTRY(REG, MEM, FLAGS) {#REG, #MEM, FLAGS},
#include "X86ManualFoldTables.def"
};
-const std::set<StringRef> NoFoldSet= {
+const std::set<StringRef> NoFoldSet = {
#define NOFOLD(INSN) #INSN,
#include "X86ManualFoldTables.def"
};
@@ -86,7 +86,7 @@ class X86FoldTablesEmitter {
void print(formatted_raw_ostream &OS) const {
OS.indent(2);
OS << "{X86::" << RegInst->TheDef->getName() << ", ";
- OS << "X86::" << MemInst->TheDef->getName() << ", ";
+ OS << "X86::" << MemInst->TheDef->getName() << ", ";
std::string Attrs;
if (FoldLoad)
@@ -124,9 +124,10 @@ class X86FoldTablesEmitter {
#endif
};
- // NOTE: We check the fold tables are sorted in X86InstrFoldTables.cpp by the enum of the
- // instruction, which is computed in CodeGenTarget::ComputeInstrsByEnum. So we should
- // use the same comparator here.
+ // NOTE: We check the fold tables are sorted in X86InstrFoldTables.cpp by the
+ // enum of the instruction, which is computed in
+ // CodeGenTarget::ComputeInstrsByEnum. So we should use the same comparator
+ // here.
// FIXME: Could we share the code with CodeGenTarget::ComputeInstrsByEnum?
struct CompareInstrsByEnum {
bool operator()(const CodeGenInstruction *LHS,
@@ -162,22 +163,21 @@ class X86FoldTablesEmitter {
private:
// Decides to which table to add the entry with the given instructions.
// S sets the strategy of adding the TB_NO_REVERSE flag.
- void updateTables(const CodeGenInstruction *RegInstr,
- const CodeGenInstruction *MemInstr, uint16_t S = 0,
+ void updateTables(const CodeGenInstruction *RegInst,
+ const CodeGenInstruction *MemInst, uint16_t S = 0,
bool IsManual = false);
// Generates X86FoldTableEntry with the given instructions and fill it with
// the appropriate flags - then adds it to Table.
- void addEntryWithFlags(FoldTable &Table, const CodeGenInstruction *RegInstr,
- const CodeGenInstruction *MemInstr, uint16_t S,
+ void addEntryWithFlags(FoldTable &Table, const CodeGenInstruction *RegInst,
+ const CodeGenInstruction *MemInst, uint16_t S,
unsigned FoldedIdx, bool isManual);
// Print the given table as a static const C++ array of type
// X86MemoryFoldTableEntry.
void printTable(const FoldTable &Table, StringRef TableName,
formatted_raw_ostream &OS) {
- OS << "static const X86MemoryFoldTableEntry MemoryFold" << TableName
- << "[] = {\n";
+ OS << "static const X86MemoryFoldTableEntry " << TableName << "[] = {\n";
for (auto &E : Table)
E.second.print(OS);
@@ -388,14 +388,14 @@ class IsMatch {
} // end anonymous namespace
void X86FoldTablesEmitter::addEntryWithFlags(FoldTable &Table,
- const CodeGenInstruction *RegInstr,
- const CodeGenInstruction *MemInstr,
+ const CodeGenInstruction *RegInst,
+ const CodeGenInstruction *MemInst,
uint16_t S, unsigned FoldedIdx,
bool isManual) {
- X86FoldTableEntry Result = X86FoldTableEntry(RegInstr, MemInstr);
- Record *RegRec = RegInstr->TheDef;
- Record *MemRec = MemInstr->TheDef;
+ X86FoldTableEntry Result = X86FoldTableEntry(RegInst, MemInst);
+ Record *RegRec = RegInst->TheDef;
+ Record *MemRec = MemInst->TheDef;
Result.NoReverse = S & TB_NO_REVERSE;
Result.NoForward = S & TB_NO_FORWARD;
@@ -403,7 +403,7 @@ void X86FoldTablesEmitter::addEntryWithFlags(FoldTable &Table,
Result.FoldStore = S & TB_FOLDED_STORE;
Result.Alignment = Align(1ULL << ((S & TB_ALIGN_MASK) >> TB_ALIGN_SHIFT));
if (isManual) {
- Table[RegInstr] = Result;
+ Table[RegInst] = Result;
return;
}
@@ -422,8 +422,8 @@ void X86FoldTablesEmitter::addEntryWithFlags(FoldTable &Table,
Result.FoldStore = true;
}
- Record *RegOpRec = RegInstr->Operands[FoldedIdx].Rec;
- Record *MemOpRec = MemInstr->Operands[FoldedIdx].Rec;
+ Record *RegOpRec = RegInst->Operands[FoldedIdx].Rec;
+ Record *MemOpRec = MemInst->Operands[FoldedIdx].Rec;
// Unfolding code generates a load/store instruction according to the size of
// the register in the register form instruction.
@@ -443,18 +443,18 @@ void X86FoldTablesEmitter::addEntryWithFlags(FoldTable &Table,
Record *BaseDef =
DropLen ? Records.getDef(RegInstName.drop_back(DropLen)) : nullptr;
bool IsMoveReg =
- BaseDef ? Target.getInstruction(BaseDef).isMoveReg : RegInstr->isMoveReg;
+ BaseDef ? Target.getInstruction(BaseDef).isMoveReg : RegInst->isMoveReg;
// A masked load can not be unfolded to a full load, otherwise it would access
// unexpected memory. A simple store can not be unfolded.
if (IsMoveReg && (BaseDef || Result.FoldStore))
Result.NoReverse = true;
uint8_t Enc = byteFromBitsInit(RegRec->getValueAsBitsInit("OpEncBits"));
- if (isExplicitAlign(RegInstr)) {
+ if (isExplicitAlign(RegInst)) {
// The instruction require explicitly aligned memory.
BitsInit *VectSize = RegRec->getValueAsBitsInit("VectSize");
Result.Alignment = Align(byteFromBitsInit(VectSize));
- } else if (!Enc && !isExplicitUnalign(RegInstr) &&
+ } else if (!Enc && !isExplicitUnalign(RegInst) &&
getMemOperandSize(MemOpRec) > 64) {
// Instructions with XOP/VEX/EVEX encoding do not require alignment while
// SSE packed vector instructions require a 16 byte alignment.
@@ -467,15 +467,15 @@ void X86FoldTablesEmitter::addEntryWithFlags(FoldTable &Table,
if (RegRec->getName().contains("EXPAND"))
Result.NoReverse = true;
- Table[RegInstr] = Result;
+ Table[RegInst] = Result;
}
-void X86FoldTablesEmitter::updateTables(const CodeGenInstruction *RegInstr,
- const CodeGenInstruction *MemInstr,
+void X86FoldTablesEmitter::updateTables(const CodeGenInstruction *RegInst,
+ const CodeGenInstruction *MemInst,
uint16_t S, bool IsManual) {
- Record *RegRec = RegInstr->TheDef;
- Record *MemRec = MemInstr->TheDef;
+ Record *RegRec = RegInst->TheDef;
+ Record *MemRec = MemInst->TheDef;
unsigned MemOutSize = MemRec->getValueAsDag("OutOperandList")->getNumArgs();
unsigned RegOutSize = RegRec->getValueAsDag("OutOperandList")->getNumArgs();
unsigned MemInSize = MemRec->getValueAsDag("InOperandList")->getNumArgs();
@@ -484,7 +484,7 @@ void X86FoldTablesEmitter::updateTables(const CodeGenInstruction *RegInstr,
// Instructions which Read-Modify-Write should be added to Table2Addr.
if (!MemOutSize && RegOutSize == 1 && MemInSize == RegInSize) {
// X86 would not unfold Read-Modify-Write instructions so add TB_NO_REVERSE.
- addEntryWithFlags(Table2Addr, RegInstr, MemInstr, S | TB_NO_REVERSE, 0,
+ addEntryWithFlags(Table2Addr, RegInst, MemInst, S | TB_NO_REVERSE, 0,
IsManual);
return;
}
@@ -493,28 +493,29 @@ void X86FoldTablesEmitter::updateTables(const CodeGenInstruction *RegInstr,
// Load-Folding cases.
// If the i'th register form operand is a register and the i'th memory form
// operand is a memory operand, add instructions to Table#i.
- for (unsigned i = RegOutSize, e = RegInstr->Operands.size(); i < e; i++) {
- Record *RegOpRec = RegInstr->Operands[i].Rec;
- Record *MemOpRec = MemInstr->Operands[i].Rec;
- // PointerLikeRegClass: For instructions like TAILJMPr, TAILJMPr64, TAILJMPr64_REX
+ for (unsigned I = RegOutSize, E = RegInst->Operands.size(); I < E; I++) {
+ Record *RegOpRec = RegInst->Operands[I].Rec;
+ Record *MemOpRec = MemInst->Operands[I].Rec;
+ // PointerLikeRegClass: For instructions like TAILJMPr, TAILJMPr64,
+ // TAILJMPr64_REX
if ((isRegisterOperand(RegOpRec) ||
RegOpRec->isSubClassOf("PointerLikeRegClass")) &&
isMemoryOperand(MemOpRec)) {
- switch (i) {
+ switch (I) {
case 0:
- addEntryWithFlags(Table0, RegInstr, MemInstr, S, 0, IsManual);
+ addEntryWithFlags(Table0, RegInst, MemInst, S, 0, IsManual);
return;
case 1:
- addEntryWithFlags(Table1, RegInstr, MemInstr, S, 1, IsManual);
+ addEntryWithFlags(Table1, RegInst, MemInst, S, 1, IsManual);
return;
case 2:
- addEntryWithFlags(Table2, RegInstr, MemInstr, S, 2, IsManual);
+ addEntryWithFlags(Table2, RegInst, MemInst, S, 2, IsManual);
return;
case 3:
- addEntryWithFlags(Table3, RegInstr, MemInstr, S, 3, IsManual);
+ addEntryWithFlags(Table3, RegInst, MemInst, S, 3, IsManual);
return;
case 4:
- addEntryWithFlags(Table4, RegInstr, MemInstr, S, 4, IsManual);
+ addEntryWithFlags(Table4, RegInst, MemInst, S, 4, IsManual);
return;
}
}
@@ -527,16 +528,16 @@ void X86FoldTablesEmitter::updateTables(const CodeGenInstruction *RegInstr,
// For example:
// MOVAPSrr => (outs VR128:$dst), (ins VR128:$src)
// MOVAPSmr => (outs), (ins f128mem:$dst, VR128:$src)
- Record *RegOpRec = RegInstr->Operands[RegOutSize - 1].Rec;
- Record *MemOpRec = MemInstr->Operands[RegOutSize - 1].Rec;
+ Record *RegOpRec = RegInst->Operands[RegOutSize - 1].Rec;
+ Record *MemOpRec = MemInst->Operands[RegOutSize - 1].Rec;
if (isRegisterOperand(RegOpRec) && isMemoryOperand(MemOpRec) &&
getRegOperandSize(RegOpRec) == getMemOperandSize(MemOpRec))
- addEntryWithFlags(Table0, RegInstr, MemInstr, S, 0, IsManual);
+ addEntryWithFlags(Table0, RegInst, MemInst, S, 0, IsManual);
}
}
-void X86FoldTablesEmitter::run(raw_ostream &o) {
- formatted_raw_ostream OS(o);
+void X86FoldTablesEmitter::run(raw_ostream &O) {
+ formatted_raw_ostream OS(O);
// Holds all memory instructions
std::vector<const CodeGenInstruction *> MemInsts;
@@ -590,7 +591,6 @@ void X86FoldTablesEmitter::run(raw_ostream &o) {
continue;
// Two forms (memory & register) of the same instruction must have the same
- // opcode. try matching only with register form instructions with the same
// opcode.
std::vector<const CodeGenInstruction *> &OpcRegInsts = RegInstsIt->second;
@@ -598,11 +598,10 @@ void X86FoldTablesEmitter::run(raw_ostream &o) {
if (Match != OpcRegInsts.end()) {
const CodeGenInstruction *RegInst = *Match;
StringRef RegInstName = RegInst->TheDef->getName();
- if (RegInstName.ends_with("_REV") || RegInstName.ends_with("_alt")) {
- if (auto *RegAltRec = Records.getDef(RegInstName.drop_back(4))) {
+ if (RegInstName.ends_with("_REV") || RegInstName.ends_with("_alt"))
+ if (auto *RegAltRec = Records.getDef(RegInstName.drop_back(4)))
RegInst = &Target.getInstruction(RegAltRec);
- }
- }
+
updateTables(RegInst, MemInst);
OpcRegInsts.erase(Match);
}
@@ -631,13 +630,14 @@ void X86FoldTablesEmitter::run(raw_ostream &o) {
CheckMemFoldTable(Table3);
CheckMemFoldTable(Table4);
#endif
+#define PRINT_TABLE(TABLE) printTable(TABLE, #TABLE, OS);
// Print all tables.
- printTable(Table2Addr, "Table2Addr", OS);
- printTable(Table0, "Table0", OS);
- printTable(Table1, "Table1", OS);
- printTable(Table2, "Table2", OS);
- printTable(Table3, "Table3", OS);
- printTable(Table4, "Table4", OS);
+ PRINT_TABLE(Table2Addr)
+ PRINT_TABLE(Table0)
+ PRINT_TABLE(Table1)
+ PRINT_TABLE(Table2)
+ PRINT_TABLE(Table3)
+ PRINT_TABLE(Table4)
}
static TableGen::Emitter::OptClass<X86FoldTablesEmitter>
More information about the llvm-commits
mailing list