[llvm] d10e47d - [X86][mem-fold] Refine code, NFCI
Shengchen Kan via llvm-commits
llvm-commits at lists.llvm.org
Tue Apr 4 05:45:19 PDT 2023
Author: Shengchen Kan
Date: 2023-04-04T20:45:11+08:00
New Revision: d10e47d732163a6cbc698fe002fa597aebab78f2
URL: https://github.com/llvm/llvm-project/commit/d10e47d732163a6cbc698fe002fa597aebab78f2
DIFF: https://github.com/llvm/llvm-project/commit/d10e47d732163a6cbc698fe002fa597aebab78f2.diff
LOG: [X86][mem-fold] Refine code, NFCI
1. Remove redundant definition of constructor
2. Move the array in .inc to .def file
3. Add a licence for the .def file
Added:
llvm/utils/TableGen/X86ManualFoldTables.def
Modified:
llvm/utils/TableGen/X86FoldTablesEmitter.cpp
Removed:
llvm/utils/TableGen/X86FoldTablesEmitterManualMapSet.inc
################################################################################
diff --git a/llvm/utils/TableGen/X86FoldTablesEmitter.cpp b/llvm/utils/TableGen/X86FoldTablesEmitter.cpp
index c26c8bff347f6..ea9e06b15405e 100644
--- a/llvm/utils/TableGen/X86FoldTablesEmitter.cpp
+++ b/llvm/utils/TableGen/X86FoldTablesEmitter.cpp
@@ -29,10 +29,6 @@ struct ManualMapEntry {
const char *RegInstStr;
const char *MemInstStr;
uint16_t Strategy;
-
- ManualMapEntry(const char *RegInstStr, const char *MemInstStr,
- uint16_t Strategy = 0)
- : RegInstStr(RegInstStr), MemInstStr(MemInstStr), Strategy(Strategy) {}
};
// List of instructions requiring explicitly aligned memory.
@@ -44,7 +40,12 @@ const char *ExplicitUnalign[] = {"MOVDQU", "MOVUPS", "MOVUPD",
"PCMPESTRM", "PCMPESTRI",
"PCMPISTRM", "PCMPISTRI" };
-#include "X86FoldTablesEmitterManualMapSet.inc"
+const ManualMapEntry ManualMapSet[] = {
+#define ENTRY(REG, MEM, FLAGS) {#REG, #MEM, FLAGS},
+#include "X86ManualFoldTables.def"
+#undef ENTRY
+};
+
static bool isExplicitAlign(const CodeGenInstruction *Inst) {
return any_of(ExplicitAlign, [Inst](const char *InstStr) {
return Inst->TheDef->getName().contains(InstStr);
diff --git a/llvm/utils/TableGen/X86FoldTablesEmitterManualMapSet.inc b/llvm/utils/TableGen/X86FoldTablesEmitterManualMapSet.inc
deleted file mode 100644
index 8b7f3878f6f59..0000000000000
--- a/llvm/utils/TableGen/X86FoldTablesEmitterManualMapSet.inc
+++ /dev/null
@@ -1,83 +0,0 @@
-const ManualMapEntry ManualMapSet[] = {
- // Part1: These following records are for manually mapping instructions that
- // do not match by their encoding.
- { "ADD16ri_DB", "ADD16mi", TB_NO_REVERSE },
- { "ADD16ri8_DB", "ADD16mi8", TB_NO_REVERSE },
- { "ADD16rr_DB", "ADD16mr", TB_NO_REVERSE },
- { "ADD32ri_DB", "ADD32mi", TB_NO_REVERSE },
- { "ADD32ri8_DB", "ADD32mi8", TB_NO_REVERSE },
- { "ADD32rr_DB", "ADD32mr", TB_NO_REVERSE },
- { "ADD64ri32_DB", "ADD64mi32", TB_NO_REVERSE },
- { "ADD64ri8_DB", "ADD64mi8", TB_NO_REVERSE },
- { "ADD64rr_DB", "ADD64mr", TB_NO_REVERSE },
- { "ADD8ri_DB", "ADD8mi", TB_NO_REVERSE },
- { "ADD8rr_DB", "ADD8mr", TB_NO_REVERSE },
- { "ADD16rr_DB", "ADD16rm", TB_NO_REVERSE },
- { "ADD32rr_DB", "ADD32rm", TB_NO_REVERSE },
- { "ADD64rr_DB", "ADD64rm", TB_NO_REVERSE },
- { "ADD8rr_DB", "ADD8rm", TB_NO_REVERSE },
- { "MMX_MOVD64from64rr", "MMX_MOVQ64mr", TB_FOLDED_STORE },
- { "MMX_MOVD64grr", "MMX_MOVD64mr", TB_FOLDED_STORE },
- { "MOV64toSDrr", "MOV64mr", TB_FOLDED_STORE | TB_NO_REVERSE },
- { "MOVDI2SSrr", "MOV32mr", TB_FOLDED_STORE | TB_NO_REVERSE },
- { "MOVPQIto64rr", "MOVPQI2QImr", TB_FOLDED_STORE | TB_NO_REVERSE },
- { "MOVSDto64rr", "MOVSDmr", TB_FOLDED_STORE | TB_NO_REVERSE },
- { "MOVSS2DIrr", "MOVSSmr", TB_FOLDED_STORE },
- { "MOVLHPSrr", "MOVHPSrm", TB_NO_REVERSE },
- { "PUSH16r", "PUSH16rmm", TB_FOLDED_LOAD },
- { "PUSH32r", "PUSH32rmm", TB_FOLDED_LOAD },
- { "PUSH64r", "PUSH64rmm", TB_FOLDED_LOAD },
- { "TAILJMPr", "TAILJMPm", TB_FOLDED_LOAD },
- { "TAILJMPr64", "TAILJMPm64", TB_FOLDED_LOAD },
- { "TAILJMPr64_REX", "TAILJMPm64_REX", TB_FOLDED_LOAD },
- { "TCRETURNri", "TCRETURNmi", TB_FOLDED_LOAD | TB_NO_FORWARD },
- { "TCRETURNri64", "TCRETURNmi64", TB_FOLDED_LOAD | TB_NO_FORWARD },
- { "VMOVLHPSZrr", "VMOVHPSZ128rm", TB_NO_REVERSE },
- { "VMOVLHPSrr", "VMOVHPSrm", TB_NO_REVERSE },
- { "VMOV64toSDZrr", "MOV64mr", TB_FOLDED_STORE | TB_NO_REVERSE },
- { "VMOV64toSDrr", "MOV64mr", TB_FOLDED_STORE | TB_NO_REVERSE },
- { "VMOVDI2SSZrr", "MOV32mr", TB_FOLDED_STORE | TB_NO_REVERSE },
- { "VMOVDI2SSrr", "MOV32mr", TB_FOLDED_STORE | TB_NO_REVERSE },
- { "VMOVPQIto64Zrr", "VMOVPQI2QIZmr", TB_FOLDED_STORE | TB_NO_REVERSE },
- { "VMOVPQIto64rr", "VMOVPQI2QImr", TB_FOLDED_STORE | TB_NO_REVERSE },
- { "VMOVSDto64Zrr", "VMOVSDZmr", TB_FOLDED_STORE | TB_NO_REVERSE },
- { "VMOVSDto64rr", "VMOVSDmr", TB_FOLDED_STORE | TB_NO_REVERSE },
- { "VMOVSS2DIZrr", "VMOVSSZmr", TB_FOLDED_STORE },
- { "VMOVSS2DIrr", "VMOVSSmr", TB_FOLDED_STORE },
- { "MMX_MOVD64to64rr", "MMX_MOVQ64rm", 0 },
- { "MOV64toPQIrr", "MOVQI2PQIrm", TB_NO_REVERSE },
- { "MOV64toSDrr", "MOVSDrm_alt", TB_NO_REVERSE },
- { "MOVDI2SSrr", "MOVSSrm_alt", 0 },
- { "VMOV64toPQIZrr", "VMOVQI2PQIZrm", TB_NO_REVERSE },
- { "VMOV64toPQIrr", "VMOVQI2PQIrm", TB_NO_REVERSE },
- { "VMOV64toSDZrr", "VMOVSDZrm_alt", TB_NO_REVERSE },
- { "VMOV64toSDrr", "VMOVSDrm_alt", TB_NO_REVERSE },
- { "VMOVDI2SSZrr", "VMOVSSZrm_alt", 0 },
- { "VMOVDI2SSrr", "VMOVSSrm_alt", 0 },
- { "MOVSDrr", "MOVLPDrm", TB_NO_REVERSE },
- { "VMOVSDZrr", "VMOVLPDZ128rm", TB_NO_REVERSE },
- { "VMOVSDrr", "VMOVLPDrm", TB_NO_REVERSE },
-
- // Part2: These following records are for manually mapping instructions that
- // have same opcode.
- // INSERTPSrm has no count_s while INSERTPSrr has count_s.
- // count_s is to indicate which element in dst vector is inserted.
- // if count_s!=0, we can't fold INSERTPSrr into INSERTPSrm
- //
- // the following folding can happen when count_s==0
- // load xmm0, m32
- // insertpsrr xmm1, xmm0, imm
- // =>
- // insertpsrm xmm1, m32, imm
- { "INSERTPSrr", "INSERTPSrm", TB_NO_REVERSE | TB_NO_FORWARD },
- { "UD1Lr", "UD1Lm", TB_NO_REVERSE | TB_NO_FORWARD },
- { "UD1Qr", "UD1Qm", TB_NO_REVERSE | TB_NO_FORWARD },
- { "UD1Wr", "UD1Wm", TB_NO_REVERSE | TB_NO_FORWARD },
- // Remove {"MMX_MOVQ64rr", "MMX_MOVQ64mr"} since it will create duplicate in
- // unfolding table due to the {"MMX_MOVD64from64rr", "MMX_MOVQ64mr"}
- { "MMX_MOVQ64rr", "MMX_MOVQ64mr", TB_NO_FORWARD | TB_NO_REVERSE },
- // Remove {"MMX_MOVQ64rr", "MMX_MOVQ64rm"} since it will create duplicate in
- // unfolding table due to the {"MMX_MOVD64from64rr", "MMX_MOVQ64rm"}
- { "MMX_MOVQ64rr", "MMX_MOVQ64rm", TB_NO_FORWARD | TB_NO_REVERSE },
-};
-
diff --git a/llvm/utils/TableGen/X86ManualFoldTables.def b/llvm/utils/TableGen/X86ManualFoldTables.def
new file mode 100644
index 0000000000000..ba027091c5203
--- /dev/null
+++ b/llvm/utils/TableGen/X86ManualFoldTables.def
@@ -0,0 +1,95 @@
+//===- X86ManualFoldTables.def ----------------------------*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// \file
+// This file defines all the entries in X86 memory folding tables that need
+// special handling.
+//===----------------------------------------------------------------------===//
+
+#ifndef ENTRY
+#define ENTRY(REG, MEM, FLAGS)
+#endif
+
+// Part1: These following records are for manually mapping instructions that
+// do not match by their encoding.
+ENTRY(ADD16ri_DB, ADD16mi, TB_NO_REVERSE)
+ENTRY(ADD16ri8_DB, ADD16mi8, TB_NO_REVERSE)
+ENTRY(ADD16rr_DB, ADD16mr, TB_NO_REVERSE)
+ENTRY(ADD32ri_DB, ADD32mi, TB_NO_REVERSE)
+ENTRY(ADD32ri8_DB, ADD32mi8, TB_NO_REVERSE)
+ENTRY(ADD32rr_DB, ADD32mr, TB_NO_REVERSE)
+ENTRY(ADD64ri32_DB, ADD64mi32, TB_NO_REVERSE)
+ENTRY(ADD64ri8_DB, ADD64mi8, TB_NO_REVERSE)
+ENTRY(ADD64rr_DB, ADD64mr, TB_NO_REVERSE)
+ENTRY(ADD8ri_DB, ADD8mi, TB_NO_REVERSE)
+ENTRY(ADD8rr_DB, ADD8mr, TB_NO_REVERSE)
+ENTRY(ADD16rr_DB, ADD16rm, TB_NO_REVERSE)
+ENTRY(ADD32rr_DB, ADD32rm, TB_NO_REVERSE)
+ENTRY(ADD64rr_DB, ADD64rm, TB_NO_REVERSE)
+ENTRY(ADD8rr_DB, ADD8rm, TB_NO_REVERSE)
+ENTRY(MMX_MOVD64from64rr, MMX_MOVQ64mr, TB_FOLDED_STORE)
+ENTRY(MMX_MOVD64grr, MMX_MOVD64mr, TB_FOLDED_STORE)
+ENTRY(MOV64toSDrr, MOV64mr, TB_FOLDED_STORE | TB_NO_REVERSE)
+ENTRY(MOVDI2SSrr, MOV32mr, TB_FOLDED_STORE | TB_NO_REVERSE)
+ENTRY(MOVPQIto64rr, MOVPQI2QImr, TB_FOLDED_STORE | TB_NO_REVERSE)
+ENTRY(MOVSDto64rr, MOVSDmr, TB_FOLDED_STORE | TB_NO_REVERSE)
+ENTRY(MOVSS2DIrr, MOVSSmr, TB_FOLDED_STORE)
+ENTRY(MOVLHPSrr, MOVHPSrm, TB_NO_REVERSE)
+ENTRY(PUSH16r, PUSH16rmm, TB_FOLDED_LOAD)
+ENTRY(PUSH32r, PUSH32rmm, TB_FOLDED_LOAD)
+ENTRY(PUSH64r, PUSH64rmm, TB_FOLDED_LOAD)
+ENTRY(TAILJMPr, TAILJMPm, TB_FOLDED_LOAD)
+ENTRY(TAILJMPr64, TAILJMPm64, TB_FOLDED_LOAD)
+ENTRY(TAILJMPr64_REX, TAILJMPm64_REX, TB_FOLDED_LOAD)
+ENTRY(TCRETURNri, TCRETURNmi, TB_FOLDED_LOAD | TB_NO_FORWARD)
+ENTRY(TCRETURNri64, TCRETURNmi64, TB_FOLDED_LOAD | TB_NO_FORWARD)
+ENTRY(VMOVLHPSZrr, VMOVHPSZ128rm, TB_NO_REVERSE)
+ENTRY(VMOVLHPSrr, VMOVHPSrm, TB_NO_REVERSE)
+ENTRY(VMOV64toSDZrr, MOV64mr, TB_FOLDED_STORE | TB_NO_REVERSE)
+ENTRY(VMOV64toSDrr, MOV64mr, TB_FOLDED_STORE | TB_NO_REVERSE)
+ENTRY(VMOVDI2SSZrr, MOV32mr, TB_FOLDED_STORE | TB_NO_REVERSE)
+ENTRY(VMOVDI2SSrr, MOV32mr, TB_FOLDED_STORE | TB_NO_REVERSE)
+ENTRY(VMOVPQIto64Zrr, VMOVPQI2QIZmr, TB_FOLDED_STORE | TB_NO_REVERSE)
+ENTRY(VMOVPQIto64rr, VMOVPQI2QImr, TB_FOLDED_STORE | TB_NO_REVERSE)
+ENTRY(VMOVSDto64Zrr, VMOVSDZmr, TB_FOLDED_STORE | TB_NO_REVERSE)
+ENTRY(VMOVSDto64rr, VMOVSDmr, TB_FOLDED_STORE | TB_NO_REVERSE)
+ENTRY(VMOVSS2DIZrr, VMOVSSZmr, TB_FOLDED_STORE)
+ENTRY(VMOVSS2DIrr, VMOVSSmr, TB_FOLDED_STORE)
+ENTRY(MMX_MOVD64to64rr, MMX_MOVQ64rm, 0)
+ENTRY(MOV64toPQIrr, MOVQI2PQIrm, TB_NO_REVERSE)
+ENTRY(MOV64toSDrr, MOVSDrm_alt, TB_NO_REVERSE)
+ENTRY(MOVDI2SSrr, MOVSSrm_alt, 0)
+ENTRY(VMOV64toPQIZrr, VMOVQI2PQIZrm, TB_NO_REVERSE)
+ENTRY(VMOV64toPQIrr, VMOVQI2PQIrm, TB_NO_REVERSE)
+ENTRY(VMOV64toSDZrr, VMOVSDZrm_alt, TB_NO_REVERSE)
+ENTRY(VMOV64toSDrr, VMOVSDrm_alt, TB_NO_REVERSE)
+ENTRY(VMOVDI2SSZrr, VMOVSSZrm_alt, 0)
+ENTRY(VMOVDI2SSrr, VMOVSSrm_alt, 0)
+ENTRY(MOVSDrr, MOVLPDrm, TB_NO_REVERSE)
+ENTRY(VMOVSDZrr, VMOVLPDZ128rm, TB_NO_REVERSE)
+ENTRY(VMOVSDrr, VMOVLPDrm, TB_NO_REVERSE)
+
+// Part2: These following records are for manually mapping instructions that
+// have same opcode.
+//
+// INSERTPSrm has no count_s while INSERTPSrr has count_s.
+// count_s is to indicate which element in dst vector is inserted.
+// if count_s!=0, we can't fold INSERTPSrr into INSERTPSrm
+//
+// the following folding can happen when count_s==0
+// load xmm0, m32
+// insertpsrr xmm1, xmm0, imm
+// =>
+// insertpsrm xmm1, m32, imm
+ENTRY(INSERTPSrr, INSERTPSrm, TB_NO_REVERSE | TB_NO_FORWARD)
+ENTRY(UD1Lr, UD1Lm, TB_NO_REVERSE | TB_NO_FORWARD)
+ENTRY(UD1Qr, UD1Qm, TB_NO_REVERSE | TB_NO_FORWARD)
+ENTRY(UD1Wr, UD1Wm, TB_NO_REVERSE | TB_NO_FORWARD)
+// Exclude this b/c it would conflicts with {MMX_MOVD64from64rr, MMX_MOVQ64mr} in unfolding table
+ENTRY(MMX_MOVQ64rr, MMX_MOVQ64mr, TB_NO_FORWARD | TB_NO_REVERSE)
+// Exclude this b/c it would conflicts with {MMX_MOVD64from64rr, MMX_MOVQ64rm} in unfolding table
+ENTRY(MMX_MOVQ64rr, MMX_MOVQ64rm, TB_NO_FORWARD | TB_NO_REVERSE)
More information about the llvm-commits
mailing list