[llvm] 1e75ce4 - [X86][mem-fold] Remove the logic for FoldGenData, NFCI
Shengchen Kan via llvm-commits
llvm-commits at lists.llvm.org
Wed Apr 5 08:24:44 PDT 2023
Author: Shengchen Kan
Date: 2023-04-05T23:24:25+08:00
New Revision: 1e75ce4289d5c41ccbbe0b9877ed66aefd81a13c
URL: https://github.com/llvm/llvm-project/commit/1e75ce4289d5c41ccbbe0b9877ed66aefd81a13c
DIFF: https://github.com/llvm/llvm-project/commit/1e75ce4289d5c41ccbbe0b9877ed66aefd81a13c.diff
LOG: [X86][mem-fold] Remove the logic for FoldGenData, NFCI
Added:
Modified:
llvm/lib/Target/X86/X86InstrAVX512.td
llvm/lib/Target/X86/X86InstrArithmetic.td
llvm/lib/Target/X86/X86InstrFMA.td
llvm/lib/Target/X86/X86InstrFormats.td
llvm/lib/Target/X86/X86InstrMMX.td
llvm/lib/Target/X86/X86InstrMisc.td
llvm/lib/Target/X86/X86InstrSSE.td
llvm/lib/Target/X86/X86InstrXOP.td
llvm/utils/TableGen/X86FoldTablesEmitter.cpp
Removed:
################################################################################
diff --git a/llvm/lib/Target/X86/X86InstrAVX512.td b/llvm/lib/Target/X86/X86InstrAVX512.td
index 7ac7ef3a2a0d..d60247f9da39 100644
--- a/llvm/lib/Target/X86/X86InstrAVX512.td
+++ b/llvm/lib/Target/X86/X86InstrAVX512.td
@@ -3533,21 +3533,19 @@ multiclass avx512_store<bits<8> opc, string OpcodeStr, string BaseName,
def rr_REV : AVX512PI<opc, MRMDestReg, (outs _.RC:$dst), (ins _.RC:$src),
OpcodeStr # "\t{$src, $dst|$dst, $src}",
[], _.ExeDomain>, EVEX,
- FoldGenData<BaseName#_.ZSuffix#rr>, Sched<[Sched.RR]>,
+ Sched<[Sched.RR]>,
EVEX2VEXOverride<EVEX2VEXOvrd#"rr_REV">;
def rrk_REV : AVX512PI<opc, MRMDestReg, (outs _.RC:$dst),
(ins _.KRCWM:$mask, _.RC:$src),
OpcodeStr # "\t{$src, ${dst} {${mask}}|"#
"${dst} {${mask}}, $src}",
[], _.ExeDomain>, EVEX, EVEX_K,
- FoldGenData<BaseName#_.ZSuffix#rrk>,
Sched<[Sched.RR]>;
def rrkz_REV : AVX512PI<opc, MRMDestReg, (outs _.RC:$dst),
(ins _.KRCWM:$mask, _.RC:$src),
OpcodeStr # "\t{$src, ${dst} {${mask}} {z}|" #
"${dst} {${mask}} {z}, $src}",
[], _.ExeDomain>, EVEX, EVEX_KZ,
- FoldGenData<BaseName#_.ZSuffix#rrkz>,
Sched<[Sched.RR]>;
}
@@ -4535,7 +4533,6 @@ let hasSideEffects = 0, isCodeGenOnly = 1, ForceDisassemble = 1 in {
(ins VR128X:$src1, VR128X:$src2),
"vmovsh\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[]>, T_MAP5XS, EVEX_4V, VEX_LIG,
- FoldGenData<"VMOVSHZrr">,
Sched<[SchedWriteFShuffle.XMM]>;
let Constraints = "$src0 = $dst" in
@@ -4545,7 +4542,6 @@ let hasSideEffects = 0, isCodeGenOnly = 1, ForceDisassemble = 1 in {
"vmovsh\t{$src2, $src1, $dst {${mask}}|"#
"$dst {${mask}}, $src1, $src2}",
[]>, T_MAP5XS, EVEX_K, EVEX_4V, VEX_LIG,
- FoldGenData<"VMOVSHZrrk">,
Sched<[SchedWriteFShuffle.XMM]>;
def VMOVSHZrrkz_REV: AVX512<0x11, MRMDestReg, (outs VR128X:$dst),
@@ -4553,14 +4549,12 @@ let hasSideEffects = 0, isCodeGenOnly = 1, ForceDisassemble = 1 in {
"vmovsh\t{$src2, $src1, $dst {${mask}} {z}|"#
"$dst {${mask}} {z}, $src1, $src2}",
[]>, EVEX_KZ, T_MAP5XS, EVEX_4V, VEX_LIG,
- FoldGenData<"VMOVSHZrrkz">,
Sched<[SchedWriteFShuffle.XMM]>;
}
def VMOVSSZrr_REV: AVX512<0x11, MRMDestReg, (outs VR128X:$dst),
(ins VR128X:$src1, VR128X:$src2),
"vmovss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[]>, XS, EVEX_4V, VEX_LIG,
- FoldGenData<"VMOVSSZrr">,
Sched<[SchedWriteFShuffle.XMM]>;
let Constraints = "$src0 = $dst" in
@@ -4570,7 +4564,6 @@ let hasSideEffects = 0, isCodeGenOnly = 1, ForceDisassemble = 1 in {
"vmovss\t{$src2, $src1, $dst {${mask}}|"#
"$dst {${mask}}, $src1, $src2}",
[]>, EVEX_K, XS, EVEX_4V, VEX_LIG,
- FoldGenData<"VMOVSSZrrk">,
Sched<[SchedWriteFShuffle.XMM]>;
def VMOVSSZrrkz_REV: AVX512<0x11, MRMDestReg, (outs VR128X:$dst),
@@ -4578,14 +4571,12 @@ let hasSideEffects = 0, isCodeGenOnly = 1, ForceDisassemble = 1 in {
"vmovss\t{$src2, $src1, $dst {${mask}} {z}|"#
"$dst {${mask}} {z}, $src1, $src2}",
[]>, EVEX_KZ, XS, EVEX_4V, VEX_LIG,
- FoldGenData<"VMOVSSZrrkz">,
Sched<[SchedWriteFShuffle.XMM]>;
def VMOVSDZrr_REV: AVX512<0x11, MRMDestReg, (outs VR128X:$dst),
(ins VR128X:$src1, VR128X:$src2),
"vmovsd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[]>, XD, EVEX_4V, VEX_LIG, REX_W,
- FoldGenData<"VMOVSDZrr">,
Sched<[SchedWriteFShuffle.XMM]>;
let Constraints = "$src0 = $dst" in
@@ -4595,8 +4586,7 @@ let hasSideEffects = 0, isCodeGenOnly = 1, ForceDisassemble = 1 in {
"vmovsd\t{$src2, $src1, $dst {${mask}}|"#
"$dst {${mask}}, $src1, $src2}",
[]>, EVEX_K, XD, EVEX_4V, VEX_LIG,
- REX_W, FoldGenData<"VMOVSDZrrk">,
- Sched<[SchedWriteFShuffle.XMM]>;
+ REX_W, Sched<[SchedWriteFShuffle.XMM]>;
def VMOVSDZrrkz_REV: AVX512<0x11, MRMDestReg, (outs VR128X:$dst),
(ins f64x_info.KRCWM:$mask, VR128X:$src1,
@@ -4604,8 +4594,7 @@ let hasSideEffects = 0, isCodeGenOnly = 1, ForceDisassemble = 1 in {
"vmovsd\t{$src2, $src1, $dst {${mask}} {z}|"#
"$dst {${mask}} {z}, $src1, $src2}",
[]>, EVEX_KZ, XD, EVEX_4V, VEX_LIG,
- REX_W, FoldGenData<"VMOVSDZrrkz">,
- Sched<[SchedWriteFShuffle.XMM]>;
+ REX_W, Sched<[SchedWriteFShuffle.XMM]>;
}
def : InstAlias<"vmovsh.s\t{$src2, $src1, $dst|$dst, $src1, $src2}",
@@ -11648,8 +11637,7 @@ multiclass avx512_extract_elt_w<string OpcodeStr, X86VectorVTInfo _> {
def rr_REV : AVX512Ii8<0x15, MRMDestReg, (outs GR32orGR64:$dst),
(ins _.RC:$src1, u8imm:$src2),
OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
- EVEX, TAPD, FoldGenData<NAME#rr>,
- Sched<[WriteVecExtract]>;
+ EVEX, TAPD, Sched<[WriteVecExtract]>;
defm NAME : avx512_extract_elt_bw_m<0x15, OpcodeStr, X86pextrw, _>, TAPD;
}
diff --git a/llvm/lib/Target/X86/X86InstrArithmetic.td b/llvm/lib/Target/X86/X86InstrArithmetic.td
index cd07458d2de3..484af561fd8f 100644
--- a/llvm/lib/Target/X86/X86InstrArithmetic.td
+++ b/llvm/lib/Target/X86/X86InstrArithmetic.td
@@ -881,10 +881,10 @@ multiclass ArithBinOp_RF<bits<8> BaseOpc, bits<8> BaseOpc2, bits<8> BaseOpc4,
} // isConvertibleToThreeAddress
} // isCommutable
- def NAME#8rr_REV : BinOpRR_Rev<BaseOpc2, mnemonic, Xi8>, FoldGenData<NAME#8rr>;
- def NAME#16rr_REV : BinOpRR_Rev<BaseOpc2, mnemonic, Xi16>, FoldGenData<NAME#16rr>;
- def NAME#32rr_REV : BinOpRR_Rev<BaseOpc2, mnemonic, Xi32>, FoldGenData<NAME#32rr>;
- def NAME#64rr_REV : BinOpRR_Rev<BaseOpc2, mnemonic, Xi64>, FoldGenData<NAME#64rr>;
+ def NAME#8rr_REV : BinOpRR_Rev<BaseOpc2, mnemonic, Xi8>;
+ def NAME#16rr_REV : BinOpRR_Rev<BaseOpc2, mnemonic, Xi16>;
+ def NAME#32rr_REV : BinOpRR_Rev<BaseOpc2, mnemonic, Xi32>;
+ def NAME#64rr_REV : BinOpRR_Rev<BaseOpc2, mnemonic, Xi64>;
def NAME#8rm : BinOpRM_RF<BaseOpc2, mnemonic, Xi8 , opnodeflag>;
def NAME#16rm : BinOpRM_RF<BaseOpc2, mnemonic, Xi16, opnodeflag>;
@@ -968,10 +968,10 @@ multiclass ArithBinOp_RFF<bits<8> BaseOpc, bits<8> BaseOpc2, bits<8> BaseOpc4,
} // isConvertibleToThreeAddress
} // isCommutable
- def NAME#8rr_REV : BinOpRR_RFF_Rev<BaseOpc2, mnemonic, Xi8>, FoldGenData<NAME#8rr>;
- def NAME#16rr_REV : BinOpRR_RFF_Rev<BaseOpc2, mnemonic, Xi16>, FoldGenData<NAME#16rr>;
- def NAME#32rr_REV : BinOpRR_RFF_Rev<BaseOpc2, mnemonic, Xi32>, FoldGenData<NAME#32rr>;
- def NAME#64rr_REV : BinOpRR_RFF_Rev<BaseOpc2, mnemonic, Xi64>, FoldGenData<NAME#64rr>;
+ def NAME#8rr_REV : BinOpRR_RFF_Rev<BaseOpc2, mnemonic, Xi8>;
+ def NAME#16rr_REV : BinOpRR_RFF_Rev<BaseOpc2, mnemonic, Xi16>;
+ def NAME#32rr_REV : BinOpRR_RFF_Rev<BaseOpc2, mnemonic, Xi32>;
+ def NAME#64rr_REV : BinOpRR_RFF_Rev<BaseOpc2, mnemonic, Xi64>;
def NAME#8rm : BinOpRM_RFF<BaseOpc2, mnemonic, Xi8 , opnode>;
def NAME#16rm : BinOpRM_RFF<BaseOpc2, mnemonic, Xi16, opnode>;
@@ -1050,10 +1050,10 @@ multiclass ArithBinOp_F<bits<8> BaseOpc, bits<8> BaseOpc2, bits<8> BaseOpc4,
}
} // isCommutable
- def NAME#8rr_REV : BinOpRR_F_Rev<BaseOpc2, mnemonic, Xi8>, FoldGenData<NAME#8rr>;
- def NAME#16rr_REV : BinOpRR_F_Rev<BaseOpc2, mnemonic, Xi16>, FoldGenData<NAME#16rr>;
- def NAME#32rr_REV : BinOpRR_F_Rev<BaseOpc2, mnemonic, Xi32>, FoldGenData<NAME#32rr>;
- def NAME#64rr_REV : BinOpRR_F_Rev<BaseOpc2, mnemonic, Xi64>, FoldGenData<NAME#64rr>;
+ def NAME#8rr_REV : BinOpRR_F_Rev<BaseOpc2, mnemonic, Xi8>;
+ def NAME#16rr_REV : BinOpRR_F_Rev<BaseOpc2, mnemonic, Xi16>;
+ def NAME#32rr_REV : BinOpRR_F_Rev<BaseOpc2, mnemonic, Xi32>;
+ def NAME#64rr_REV : BinOpRR_F_Rev<BaseOpc2, mnemonic, Xi64>;
def NAME#8rm : BinOpRM_F<BaseOpc2, mnemonic, Xi8 , opnode>;
def NAME#16rm : BinOpRM_F<BaseOpc2, mnemonic, Xi16, opnode>;
diff --git a/llvm/lib/Target/X86/X86InstrFMA.td b/llvm/lib/Target/X86/X86InstrFMA.td
index 06b937209948..03e1225ad9a0 100644
--- a/llvm/lib/Target/X86/X86InstrFMA.td
+++ b/llvm/lib/Target/X86/X86InstrFMA.td
@@ -423,7 +423,7 @@ let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in
(ins RC:$src1, RC:$src2, RC:$src3),
!strconcat(OpcodeStr,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), []>,
- VEX_LIG, FoldGenData<NAME#rr>, Sched<[sched]>;
+ VEX_LIG, Sched<[sched]>;
}
multiclass fma4s_int<bits<8> opc, string OpcodeStr, Operand memop,
@@ -458,7 +458,7 @@ let isCodeGenOnly = 1, hasSideEffects = 0,
(ins VR128:$src1, VR128:$src2, VR128:$src3),
!strconcat(OpcodeStr,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
- []>, VEX_LIG, FoldGenData<NAME#rr_Int>, Sched<[sched]>;
+ []>, VEX_LIG, Sched<[sched]>;
} // isCodeGenOnly = 1
}
@@ -527,12 +527,12 @@ let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in {
(ins VR128:$src1, VR128:$src2, VR128:$src3),
!strconcat(OpcodeStr,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), []>,
- Sched<[sched.XMM]>, FoldGenData<NAME#rr>;
+ Sched<[sched.XMM]>;
def Yrr_REV : FMA4<opc, MRMSrcReg, (outs VR256:$dst),
(ins VR256:$src1, VR256:$src2, VR256:$src3),
!strconcat(OpcodeStr,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), []>,
- VEX_L, Sched<[sched.YMM]>, FoldGenData<NAME#Yrr>;
+ VEX_L, Sched<[sched.YMM]>;
} // isCodeGenOnly = 1
}
diff --git a/llvm/lib/Target/X86/X86InstrFormats.td b/llvm/lib/Target/X86/X86InstrFormats.td
index 6d278d6f838d..331ca0a07fa4 100644
--- a/llvm/lib/Target/X86/X86InstrFormats.td
+++ b/llvm/lib/Target/X86/X86InstrFormats.td
@@ -259,12 +259,6 @@ class EVEX_CD8<int esize, CD8VForm form> {
class XOP { Encoding OpEnc = EncXOP; }
class XOP_4V : XOP { bit hasVEX_4V = 1; }
-// Specify the alternative register form instruction to replace the current
-// instruction in case it was picked during generation of memory folding tables
-class FoldGenData<string _RegisterForm> {
- string FoldGenRegForm = _RegisterForm;
-}
-
// Provide a specific instruction to be used by the EVEX2VEX conversion.
class EVEX2VEXOverride<string VEXInstrName> {
string EVEX2VEXOverride = VEXInstrName;
@@ -352,10 +346,6 @@ class X86Inst<bits<8> opcod, Format f, ImmType i, dag outs, dag ins,
CD8_EltSize,
!srl(VectSize, CD8_Form{1-0}))), 0);
- // Used in the memory folding generation (TableGen backend) to point to an alternative
- // instruction to replace the current one in case it got picked during generation.
- string FoldGenRegForm = ?;
-
// Used to prevent an explicit EVEX2VEX override for this instruction.
string EVEX2VEXOverride = ?;
diff --git a/llvm/lib/Target/X86/X86InstrMMX.td b/llvm/lib/Target/X86/X86InstrMMX.td
index aa5422554c9b..acf7605b3f53 100644
--- a/llvm/lib/Target/X86/X86InstrMMX.td
+++ b/llvm/lib/Target/X86/X86InstrMMX.td
@@ -178,7 +178,7 @@ def MMX_MOVD64grr : MMXI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR64:$src),
"movd\t{$src, $dst|$dst, $src}",
[(set GR32:$dst,
(MMX_X86movd2w (x86mmx VR64:$src)))]>,
- Sched<[WriteVecMoveToGpr]>, FoldGenData<"MMX_MOVD64rr">;
+ Sched<[WriteVecMoveToGpr]>;
let isBitcast = 1 in
def MMX_MOVD64to64rr : MMXRI<0x6E, MRMSrcReg, (outs VR64:$dst), (ins GR64:$src),
diff --git a/llvm/lib/Target/X86/X86InstrMisc.td b/llvm/lib/Target/X86/X86InstrMisc.td
index c4baefd1ad53..f3ca5bc5d0b5 100644
--- a/llvm/lib/Target/X86/X86InstrMisc.td
+++ b/llvm/lib/Target/X86/X86InstrMisc.td
@@ -374,14 +374,11 @@ def MOV64ri : RIi64<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64imm:$src),
// Longer forms that use a ModR/M byte. Needed for disassembler
let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in {
def MOV8ri_alt : Ii8 <0xC6, MRM0r, (outs GR8 :$dst), (ins i8imm :$src),
- "mov{b}\t{$src, $dst|$dst, $src}", []>,
- FoldGenData<"MOV8ri">;
+ "mov{b}\t{$src, $dst|$dst, $src}", []>;
def MOV16ri_alt : Ii16<0xC7, MRM0r, (outs GR16:$dst), (ins i16imm:$src),
- "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize16,
- FoldGenData<"MOV16ri">;
+ "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize16;
def MOV32ri_alt : Ii32<0xC7, MRM0r, (outs GR32:$dst), (ins i32imm:$src),
- "mov{l}\t{$src, $dst|$dst, $src}", []>, OpSize32,
- FoldGenData<"MOV32ri">;
+ "mov{l}\t{$src, $dst|$dst, $src}", []>, OpSize32;
}
} // SchedRW
@@ -523,17 +520,13 @@ def MOV64o64a : RIi64<0xA3, RawFrmMemOffs, (outs), (ins offset64_64:$dst),
let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
SchedRW = [WriteMove], isMoveReg = 1 in {
def MOV8rr_REV : I<0x8A, MRMSrcReg, (outs GR8:$dst), (ins GR8:$src),
- "mov{b}\t{$src, $dst|$dst, $src}", []>,
- FoldGenData<"MOV8rr">;
+ "mov{b}\t{$src, $dst|$dst, $src}", []>;
def MOV16rr_REV : I<0x8B, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
- "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize16,
- FoldGenData<"MOV16rr">;
+ "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize16;
def MOV32rr_REV : I<0x8B, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
- "mov{l}\t{$src, $dst|$dst, $src}", []>, OpSize32,
- FoldGenData<"MOV32rr">;
+ "mov{l}\t{$src, $dst|$dst, $src}", []>, OpSize32;
def MOV64rr_REV : RI<0x8B, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
- "mov{q}\t{$src, $dst|$dst, $src}", []>,
- FoldGenData<"MOV64rr">;
+ "mov{q}\t{$src, $dst|$dst, $src}", []>;
}
let canFoldAsLoad = 1, isReMaterializable = 1, SchedRW = [WriteLoad] in {
diff --git a/llvm/lib/Target/X86/X86InstrSSE.td b/llvm/lib/Target/X86/X86InstrSSE.td
index 102cf5aa309c..9345bdaf530f 100644
--- a/llvm/lib/Target/X86/X86InstrSSE.td
+++ b/llvm/lib/Target/X86/X86InstrSSE.td
@@ -192,7 +192,7 @@ let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
//===----------------------------------------------------------------------===//
multiclass sse12_move_rr<SDNode OpNode, ValueType vt, string base_opc,
- string asm_opr, Domain d, string Name> {
+ string asm_opr, Domain d> {
let isCommutable = 1 in
def rr : SI<0x10, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2),
@@ -205,17 +205,16 @@ multiclass sse12_move_rr<SDNode OpNode, ValueType vt, string base_opc,
def rr_REV : SI<0x11, MRMDestReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2),
!strconcat(base_opc, asm_opr), []>,
- Sched<[SchedWriteFShuffle.XMM]>, FoldGenData<Name#rr>;
+ Sched<[SchedWriteFShuffle.XMM]>;
}
multiclass sse12_move<RegisterClass RC, SDNode OpNode, ValueType vt,
X86MemOperand x86memop, string OpcodeStr,
- Domain d, string Name, Predicate pred> {
+ Domain d, Predicate pred> {
// AVX
let Predicates = [UseAVX, OptForSize] in
defm V#NAME : sse12_move_rr<OpNode, vt, OpcodeStr,
- "\t{$src2, $src1, $dst|$dst, $src1, $src2}", d,
- "V"#Name>,
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}", d>,
VEX_4V, VEX_LIG, VEX_WIG;
def V#NAME#mr : SI<0x11, MRMDestMem, (outs), (ins x86memop:$dst, RC:$src),
@@ -226,7 +225,7 @@ multiclass sse12_move<RegisterClass RC, SDNode OpNode, ValueType vt,
let Constraints = "$src1 = $dst" in {
let Predicates = [pred, NoSSE41_Or_OptForSize] in
defm NAME : sse12_move_rr<OpNode, vt, OpcodeStr,
- "\t{$src2, $dst|$dst, $src2}", d, Name>;
+ "\t{$src2, $dst|$dst, $src2}", d>;
}
def NAME#mr : SI<0x11, MRMDestMem, (outs), (ins x86memop:$dst, RC:$src),
@@ -269,9 +268,9 @@ multiclass sse12_move_rm<RegisterClass RC, ValueType vt, X86MemOperand x86memop,
}
defm MOVSS : sse12_move<FR32, X86Movss, v4f32, f32mem, "movss",
- SSEPackedSingle, "MOVSS", UseSSE1>, XS;
+ SSEPackedSingle, UseSSE1>, XS;
defm MOVSD : sse12_move<FR64, X86Movsd, v2f64, f64mem, "movsd",
- SSEPackedDouble, "MOVSD", UseSSE2>, XD;
+ SSEPackedDouble, UseSSE2>, XD;
let canFoldAsLoad = 1, isReMaterializable = 1 in {
defm MOVSS : sse12_move_rm<FR32, v4f32, f32mem, loadf32, X86vzload32, "movss",
@@ -442,38 +441,38 @@ let SchedRW = [SchedWriteFMoveLS.XMM.RR] in {
def VMOVAPSrr_REV : VPSI<0x29, MRMDestReg, (outs VR128:$dst),
(ins VR128:$src),
"movaps\t{$src, $dst|$dst, $src}", []>,
- VEX, VEX_WIG, FoldGenData<"VMOVAPSrr">;
+ VEX, VEX_WIG;
def VMOVAPDrr_REV : VPDI<0x29, MRMDestReg, (outs VR128:$dst),
(ins VR128:$src),
"movapd\t{$src, $dst|$dst, $src}", []>,
- VEX, VEX_WIG, FoldGenData<"VMOVAPDrr">;
+ VEX, VEX_WIG;
def VMOVUPSrr_REV : VPSI<0x11, MRMDestReg, (outs VR128:$dst),
(ins VR128:$src),
"movups\t{$src, $dst|$dst, $src}", []>,
- VEX, VEX_WIG, FoldGenData<"VMOVUPSrr">;
+ VEX, VEX_WIG;
def VMOVUPDrr_REV : VPDI<0x11, MRMDestReg, (outs VR128:$dst),
(ins VR128:$src),
"movupd\t{$src, $dst|$dst, $src}", []>,
- VEX, VEX_WIG, FoldGenData<"VMOVUPDrr">;
+ VEX, VEX_WIG;
} // SchedRW
let SchedRW = [SchedWriteFMoveLS.YMM.RR] in {
def VMOVAPSYrr_REV : VPSI<0x29, MRMDestReg, (outs VR256:$dst),
(ins VR256:$src),
"movaps\t{$src, $dst|$dst, $src}", []>,
- VEX, VEX_L, VEX_WIG, FoldGenData<"VMOVAPSYrr">;
+ VEX, VEX_L, VEX_WIG;
def VMOVAPDYrr_REV : VPDI<0x29, MRMDestReg, (outs VR256:$dst),
(ins VR256:$src),
"movapd\t{$src, $dst|$dst, $src}", []>,
- VEX, VEX_L, VEX_WIG, FoldGenData<"VMOVAPDYrr">;
+ VEX, VEX_L, VEX_WIG;
def VMOVUPSYrr_REV : VPSI<0x11, MRMDestReg, (outs VR256:$dst),
(ins VR256:$src),
"movups\t{$src, $dst|$dst, $src}", []>,
- VEX, VEX_L, VEX_WIG, FoldGenData<"VMOVUPSYrr">;
+ VEX, VEX_L, VEX_WIG;
def VMOVUPDYrr_REV : VPDI<0x11, MRMDestReg, (outs VR256:$dst),
(ins VR256:$src),
"movupd\t{$src, $dst|$dst, $src}", []>,
- VEX, VEX_L, VEX_WIG, FoldGenData<"VMOVUPDYrr">;
+ VEX, VEX_L, VEX_WIG;
} // SchedRW
} // Predicate
@@ -514,17 +513,13 @@ def MOVUPDmr : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
isMoveReg = 1, SchedRW = [SchedWriteFMoveLS.XMM.RR] in {
def MOVAPSrr_REV : PSI<0x29, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
- "movaps\t{$src, $dst|$dst, $src}", []>,
- FoldGenData<"MOVAPSrr">;
+ "movaps\t{$src, $dst|$dst, $src}", []>;
def MOVAPDrr_REV : PDI<0x29, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
- "movapd\t{$src, $dst|$dst, $src}", []>,
- FoldGenData<"MOVAPDrr">;
+ "movapd\t{$src, $dst|$dst, $src}", []>;
def MOVUPSrr_REV : PSI<0x11, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
- "movups\t{$src, $dst|$dst, $src}", []>,
- FoldGenData<"MOVUPSrr">;
+ "movups\t{$src, $dst|$dst, $src}", []>;
def MOVUPDrr_REV : PDI<0x11, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
- "movupd\t{$src, $dst|$dst, $src}", []>,
- FoldGenData<"MOVUPDrr">;
+ "movupd\t{$src, $dst|$dst, $src}", []>;
}
// Reversed version with ".s" suffix for GAS compatibility.
@@ -3303,19 +3298,19 @@ let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in {
def VMOVDQArr_REV : VPDI<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
"movdqa\t{$src, $dst|$dst, $src}", []>,
Sched<[SchedWriteVecMoveLS.XMM.RR]>,
- VEX, VEX_WIG, FoldGenData<"VMOVDQArr">;
+ VEX, VEX_WIG;
def VMOVDQAYrr_REV : VPDI<0x7F, MRMDestReg, (outs VR256:$dst), (ins VR256:$src),
"movdqa\t{$src, $dst|$dst, $src}", []>,
Sched<[SchedWriteVecMoveLS.YMM.RR]>,
- VEX, VEX_L, VEX_WIG, FoldGenData<"VMOVDQAYrr">;
+ VEX, VEX_L, VEX_WIG;
def VMOVDQUrr_REV : VSSI<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
"movdqu\t{$src, $dst|$dst, $src}", []>,
Sched<[SchedWriteVecMoveLS.XMM.RR]>,
- VEX, VEX_WIG, FoldGenData<"VMOVDQUrr">;
+ VEX, VEX_WIG;
def VMOVDQUYrr_REV : VSSI<0x7F, MRMDestReg, (outs VR256:$dst), (ins VR256:$src),
"movdqu\t{$src, $dst|$dst, $src}", []>,
Sched<[SchedWriteVecMoveLS.YMM.RR]>,
- VEX, VEX_L, VEX_WIG, FoldGenData<"VMOVDQUYrr">;
+ VEX, VEX_L, VEX_WIG;
}
let canFoldAsLoad = 1, mayLoad = 1, isReMaterializable = 1,
@@ -3371,12 +3366,11 @@ def MOVDQUrr : I<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
// For Disassembler
let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in {
def MOVDQArr_REV : PDI<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
- "movdqa\t{$src, $dst|$dst, $src}", []>,
- FoldGenData<"MOVDQArr">;
+ "movdqa\t{$src, $dst|$dst, $src}", []>;
def MOVDQUrr_REV : I<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
"movdqu\t{$src, $dst|$dst, $src}", []>,
- XS, Requires<[UseSSE2]>, FoldGenData<"MOVDQUrr">;
+ XS, Requires<[UseSSE2]>;
}
} // SchedRW
@@ -5256,7 +5250,7 @@ multiclass SS41I_extract16<bits<8> opc, string OpcodeStr> {
(ins VR128:$src1, u8imm:$src2),
!strconcat(OpcodeStr,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>,
- Sched<[WriteVecExtract]>, FoldGenData<NAME#rr>;
+ Sched<[WriteVecExtract]>;
let hasSideEffects = 0, mayStore = 1 in
def mr : SS4AIi8<opc, MRMDestMem, (outs),
diff --git a/llvm/lib/Target/X86/X86InstrXOP.td b/llvm/lib/Target/X86/X86InstrXOP.td
index 1f7ade928c72..a62bb2e855c9 100644
--- a/llvm/lib/Target/X86/X86InstrXOP.td
+++ b/llvm/lib/Target/X86/X86InstrXOP.td
@@ -119,7 +119,7 @@ multiclass xop3op<bits<8> opc, string OpcodeStr, SDNode OpNode,
(ins VR128:$src1, VR128:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[]>,
- XOP_4V, REX_W, Sched<[sched]>, FoldGenData<NAME#rr>;
+ XOP_4V, REX_W, Sched<[sched]>;
}
let ExeDomain = SSEPackedInt in {
@@ -316,7 +316,7 @@ multiclass xop4op<bits<8> opc, string OpcodeStr, SDNode OpNode,
(ins VR128:$src1, VR128:$src2, VR128:$src3),
!strconcat(OpcodeStr,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
- []>, XOP_4V, REX_W, Sched<[sched]>, FoldGenData<NAME#rrr>;
+ []>, XOP_4V, REX_W, Sched<[sched]>;
}
let ExeDomain = SSEPackedInt in {
@@ -361,7 +361,7 @@ multiclass xop4op_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
(ins RC:$src1, RC:$src2, RC:$src3),
!strconcat(OpcodeStr,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
- []>, XOP_4V, REX_W, Sched<[sched]>, FoldGenData<NAME#rrr>;
+ []>, XOP_4V, REX_W, Sched<[sched]>;
}
let ExeDomain = SSEPackedInt in {
@@ -450,7 +450,7 @@ multiclass xop_vpermil2<bits<8> Opc, string OpcodeStr, RegisterClass RC,
(ins RC:$src1, RC:$src2, RC:$src3, u4imm:$src4),
!strconcat(OpcodeStr,
"\t{$src4, $src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3, $src4}"),
- []>, REX_W, Sched<[sched]>, FoldGenData<NAME#rr>;
+ []>, REX_W, Sched<[sched]>;
}
let ExeDomain = SSEPackedDouble in {
diff --git a/llvm/utils/TableGen/X86FoldTablesEmitter.cpp b/llvm/utils/TableGen/X86FoldTablesEmitter.cpp
index c15192593bc7..db5a31f7f3a6 100644
--- a/llvm/utils/TableGen/X86FoldTablesEmitter.cpp
+++ b/llvm/utils/TableGen/X86FoldTablesEmitter.cpp
@@ -220,19 +220,6 @@ static inline bool isNOREXRegClass(const Record *Op) {
return Op->getName().contains("_NOREX");
}
-// Get the alternative instruction pointed by "FoldGenRegForm" field.
-static inline const CodeGenInstruction *
-getAltRegInst(const CodeGenInstruction *I, const RecordKeeper &Records,
- const CodeGenTarget &Target) {
-
- StringRef AltRegInstStr = I->TheDef->getValueAsString("FoldGenRegForm");
- Record *AltRegInstRec = Records.getDef(AltRegInstStr);
- assert(AltRegInstRec &&
- "Alternative register form instruction def not found");
- CodeGenInstruction &AltRegInst = Target.getInstruction(AltRegInstRec);
- return &AltRegInst;
-}
-
// Function object - Operator() returns true if the given VEX instruction
// matches the EVEX instruction of this object.
class IsMatch {
@@ -583,16 +570,13 @@ void X86FoldTablesEmitter::run(raw_ostream &o) {
auto Match = find_if(OpcRegInsts, IsMatch(MemInst, Variant));
if (Match != OpcRegInsts.end()) {
const CodeGenInstruction *RegInst = *Match;
- // If the matched instruction has it's "FoldGenRegForm" set, map the
- // memory form instruction to the register form instruction pointed by
- // this field
- if (RegInst->TheDef->isValueUnset("FoldGenRegForm")) {
- updateTables(RegInst, MemInst);
- } else {
- const CodeGenInstruction *AltRegInst =
- getAltRegInst(RegInst, Records, Target);
- updateTables(AltRegInst, MemInst);
+ StringRef RegInstName = RegInst->TheDef->getName();
+ if (RegInstName.endswith("_REV") || RegInstName.endswith("_alt")) {
+ if (auto *RegAltRec = Records.getDef(RegInstName.drop_back(4))) {
+ RegInst = &Target.getInstruction(RegAltRec);
+ }
}
+ updateTables(RegInst, MemInst);
OpcRegInsts.erase(Match);
}
}
More information about the llvm-commits
mailing list