[llvm] ff32ab3 - [X86][NFC] Not imply TB in PS|PD|XS|XD
Shengchen Kan via llvm-commits
llvm-commits at lists.llvm.org
Thu Dec 21 23:44:47 PST 2023
Author: Shengchen Kan
Date: 2023-12-22T15:44:30+08:00
New Revision: ff32ab3ae7f4ae32907bb802b67a962743db7ba0
URL: https://github.com/llvm/llvm-project/commit/ff32ab3ae7f4ae32907bb802b67a962743db7ba0
DIFF: https://github.com/llvm/llvm-project/commit/ff32ab3ae7f4ae32907bb802b67a962743db7ba0.diff
LOG: [X86][NFC] Not imply TB in PS|PD|XS|XD
This can help us aovid introducing new classes T_MAP*PS|PD|XS|XD
when a new opcode map is supported.
And, T_MAP*PS|PD|XS|XD does not look better than T_MAP*, PS|PD|XS|XD.
Added:
Modified:
llvm/lib/Target/X86/X86InstrAMX.td
llvm/lib/Target/X86/X86InstrAVX512.td
llvm/lib/Target/X86/X86InstrArithmetic.td
llvm/lib/Target/X86/X86InstrFPStack.td
llvm/lib/Target/X86/X86InstrKL.td
llvm/lib/Target/X86/X86InstrMMX.td
llvm/lib/Target/X86/X86InstrMisc.td
llvm/lib/Target/X86/X86InstrRAOINT.td
llvm/lib/Target/X86/X86InstrSGX.td
llvm/lib/Target/X86/X86InstrSNP.td
llvm/lib/Target/X86/X86InstrSSE.td
llvm/lib/Target/X86/X86InstrShiftRotate.td
llvm/lib/Target/X86/X86InstrSystem.td
llvm/lib/Target/X86/X86InstrTDX.td
llvm/lib/Target/X86/X86InstrTSX.td
llvm/lib/Target/X86/X86InstrUtils.td
llvm/lib/Target/X86/X86InstrVMX.td
Removed:
################################################################################
diff --git a/llvm/lib/Target/X86/X86InstrAMX.td b/llvm/lib/Target/X86/X86InstrAMX.td
index 71e6a44c9d8e72..a4292b99511bbd 100644
--- a/llvm/lib/Target/X86/X86InstrAMX.td
+++ b/llvm/lib/Target/X86/X86InstrAMX.td
@@ -20,32 +20,32 @@ let Predicates = [HasAMXTILE, In64BitMode] in {
Defs = [TMM0,TMM1,TMM2,TMM3,TMM4,TMM5,TMM6,TMM7] in
def LDTILECFG : I <0x49, MRM0m, (outs), (ins opaquemem:$src),
"ldtilecfg\t$src",
- [(int_x86_ldtilecfg addr:$src)]>, VEX, T8PS;
+ [(int_x86_ldtilecfg addr:$src)]>, VEX, T8, PS;
let hasSideEffects = 1 in
def STTILECFG : I <0x49, MRM0m, (outs), (ins opaquemem:$src),
"sttilecfg\t$src",
- [(int_x86_sttilecfg addr:$src)]>, VEX, T8PD;
+ [(int_x86_sttilecfg addr:$src)]>, VEX, T8, PD;
let mayLoad = 1 in
def TILELOADD : I<0x4b, MRMSrcMemFSIB, (outs TILE:$dst),
(ins sibmem:$src),
"tileloadd\t{$src, $dst|$dst, $src}", []>,
- VEX, T8XD;
+ VEX, T8, XD;
let mayLoad = 1 in
def TILELOADDT1 : I<0x4b, MRMSrcMemFSIB, (outs TILE:$dst),
(ins sibmem:$src),
"tileloaddt1\t{$src, $dst|$dst, $src}", []>,
- VEX, T8PD;
+ VEX, T8, PD;
let Defs = [TMM0,TMM1,TMM2,TMM3,TMM4,TMM5,TMM6,TMM7] in
def TILERELEASE : I<0x49, MRM_C0, (outs), (ins),
- "tilerelease", [(int_x86_tilerelease)]>, VEX, T8PS;
+ "tilerelease", [(int_x86_tilerelease)]>, VEX, T8, PS;
let mayStore = 1 in
def TILESTORED : I<0x4b, MRMDestMemFSIB, (outs),
(ins sibmem:$dst, TILE:$src),
"tilestored\t{$src, $dst|$dst, $src}", []>,
- VEX, T8XS;
+ VEX, T8, XS;
def TILEZERO : I<0x49, MRMr0, (outs TILE:$dst), (ins),
"tilezero\t$dst", []>,
- VEX, T8XD;
+ VEX, T8, XD;
// Pseduo instruction for RA.
let isPseudo = true, mayLoad = 1, hasSideEffects = 1,
@@ -91,19 +91,19 @@ let Predicates = [HasAMXINT8, In64BitMode] in {
def TDPBSSD : I<0x5e, MRMSrcReg4VOp3, (outs TILE:$dst),
(ins TILE:$src1, TILE:$src2, TILE:$src3),
"tdpbssd\t{$src3, $src2, $dst|$dst, $src2, $src3}", []>,
- VEX, VVVV, T8XD;
+ VEX, VVVV, T8, XD;
def TDPBSUD : I<0x5e, MRMSrcReg4VOp3, (outs TILE:$dst),
(ins TILE:$src1, TILE:$src2, TILE:$src3),
"tdpbsud\t{$src3, $src2, $dst|$dst, $src2, $src3}", []>,
- VEX, VVVV, T8XS;
+ VEX, VVVV, T8, XS;
def TDPBUSD : I<0x5e, MRMSrcReg4VOp3, (outs TILE:$dst),
(ins TILE:$src1, TILE:$src2, TILE:$src3),
"tdpbusd\t{$src3, $src2, $dst|$dst, $src2, $src3}", []>,
- VEX, VVVV, T8PD;
+ VEX, VVVV, T8, PD;
def TDPBUUD : I<0x5e, MRMSrcReg4VOp3, (outs TILE:$dst),
(ins TILE:$src1, TILE:$src2, TILE:$src3),
"tdpbuud\t{$src3, $src2, $dst|$dst, $src2, $src3}", []>,
- VEX, VVVV, T8PS;
+ VEX, VVVV, T8, PS;
}
// Pseduo instruction for RA.
@@ -163,7 +163,7 @@ let Predicates = [HasAMXBF16, In64BitMode] in {
def TDPBF16PS : I<0x5c, MRMSrcReg4VOp3, (outs TILE:$dst),
(ins TILE:$src1, TILE:$src2, TILE:$src3),
"tdpbf16ps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
- []>, VEX, VVVV, T8XS;
+ []>, VEX, VVVV, T8, XS;
// Pseduo instruction for RA.
let isPseudo = true, Constraints = "$src4 = $dst" in
@@ -193,7 +193,7 @@ let Predicates = [HasAMXFP16, In64BitMode] in {
def TDPFP16PS : I<0x5c, MRMSrcReg4VOp3, (outs TILE:$dst),
(ins TILE:$src1, TILE:$src2, TILE:$src3),
"tdpfp16ps\t{$src3, $src2, $src1|$src1, $src2, $src3}",
- []>, VEX, VVVV, T8XD;
+ []>, VEX, VVVV, T8, XD;
}
// Pseduo instruction for RA.
@@ -222,11 +222,11 @@ let Predicates = [HasAMXCOMPLEX, In64BitMode] in {
def TCMMIMFP16PS : I<0x6c, MRMSrcReg4VOp3, (outs TILE:$dst),
(ins TILE:$src1, TILE:$src2, TILE:$src3),
"tcmmimfp16ps\t{$src3, $src2, $src1|$src1, $src2, $src3}",
- []>, T8PD, VEX, VVVV;
+ []>, T8, PD, VEX, VVVV;
def TCMMRLFP16PS : I<0x6c, MRMSrcReg4VOp3, (outs TILE:$dst),
(ins TILE:$src1, TILE:$src2, TILE:$src3),
"tcmmrlfp16ps\t{$src3, $src2, $src1|$src1, $src2, $src3}",
- []>, VEX, VVVV, WIG, T8PS;
+ []>, VEX, VVVV, WIG, T8, PS;
} // Constraints = "$src1 = $dst"
diff --git a/llvm/lib/Target/X86/X86InstrAVX512.td b/llvm/lib/Target/X86/X86InstrAVX512.td
index 86619dfd07bca8..e3a4aee3aceb77 100644
--- a/llvm/lib/Target/X86/X86InstrAVX512.td
+++ b/llvm/lib/Target/X86/X86InstrAVX512.td
@@ -1039,7 +1039,7 @@ multiclass avx512_broadcast_rm_split<bits<8> opc, string OpcodeStr,
(bitconvert
(DestInfo.VT
(UnmaskedOp (SrcInfo.VT SrcInfo.RC:$src))))))],
- DestInfo.ExeDomain>, T8PD, EVEX, Sched<[SchedRR]>;
+ DestInfo.ExeDomain>, T8, PD, EVEX, Sched<[SchedRR]>;
def rrkz : AVX512PI<opc, MRMSrcReg, (outs MaskInfo.RC:$dst),
(ins MaskInfo.KRCWM:$mask, SrcInfo.RC:$src),
!strconcat(OpcodeStr, "\t{$src, ${dst} {${mask}} {z}|",
@@ -1051,7 +1051,7 @@ multiclass avx512_broadcast_rm_split<bits<8> opc, string OpcodeStr,
(DestInfo.VT
(X86VBroadcast (SrcInfo.VT SrcInfo.RC:$src))))),
MaskInfo.ImmAllZerosV))],
- DestInfo.ExeDomain>, T8PD, EVEX, EVEX_KZ, Sched<[SchedRR]>;
+ DestInfo.ExeDomain>, T8, PD, EVEX, EVEX_KZ, Sched<[SchedRR]>;
let Constraints = "$src0 = $dst" in
def rrk : AVX512PI<opc, MRMSrcReg, (outs MaskInfo.RC:$dst),
(ins MaskInfo.RC:$src0, MaskInfo.KRCWM:$mask,
@@ -1065,7 +1065,7 @@ multiclass avx512_broadcast_rm_split<bits<8> opc, string OpcodeStr,
(DestInfo.VT
(X86VBroadcast (SrcInfo.VT SrcInfo.RC:$src))))),
MaskInfo.RC:$src0))],
- DestInfo.ExeDomain>, T8PD, EVEX, EVEX_K, Sched<[SchedRR]>;
+ DestInfo.ExeDomain>, T8, PD, EVEX, EVEX_K, Sched<[SchedRR]>;
let hasSideEffects = 0, mayLoad = 1 in
def rm : AVX512PI<opc, MRMSrcMem, (outs MaskInfo.RC:$dst),
@@ -1076,7 +1076,7 @@ multiclass avx512_broadcast_rm_split<bits<8> opc, string OpcodeStr,
(bitconvert
(DestInfo.VT
(UnmaskedBcastOp addr:$src)))))],
- DestInfo.ExeDomain>, T8PD, EVEX,
+ DestInfo.ExeDomain>, T8, PD, EVEX,
EVEX_CD8<SrcInfo.EltSize, CD8VT1>, Sched<[SchedRM]>;
def rmkz : AVX512PI<opc, MRMSrcMem, (outs MaskInfo.RC:$dst),
@@ -1090,7 +1090,7 @@ multiclass avx512_broadcast_rm_split<bits<8> opc, string OpcodeStr,
(DestInfo.VT
(SrcInfo.BroadcastLdFrag addr:$src)))),
MaskInfo.ImmAllZerosV))],
- DestInfo.ExeDomain>, T8PD, EVEX, EVEX_KZ,
+ DestInfo.ExeDomain>, T8, PD, EVEX, EVEX_KZ,
EVEX_CD8<SrcInfo.EltSize, CD8VT1>, Sched<[SchedRM]>;
let Constraints = "$src0 = $dst",
@@ -1107,7 +1107,7 @@ multiclass avx512_broadcast_rm_split<bits<8> opc, string OpcodeStr,
(DestInfo.VT
(SrcInfo.BroadcastLdFrag addr:$src)))),
MaskInfo.RC:$src0))],
- DestInfo.ExeDomain>, T8PD, EVEX, EVEX_K,
+ DestInfo.ExeDomain>, T8, PD, EVEX, EVEX_K,
EVEX_CD8<SrcInfo.EltSize, CD8VT1>, Sched<[SchedRM]>;
}
@@ -1173,7 +1173,7 @@ multiclass avx512_int_broadcast_reg<bits<8> opc, SchedWrite SchedRR,
"vpbroadcast"#_.Suffix, "$src", "$src",
(_.VT (OpNode SrcRC:$src)), /*IsCommutable*/0,
/*IsKCommutable*/0, /*IsKZCommutable*/0, vselect>,
- T8PD, EVEX, Sched<[SchedRR]>;
+ T8, PD, EVEX, Sched<[SchedRR]>;
}
multiclass avx512_int_broadcastbw_reg<bits<8> opc, string Name, SchedWrite SchedRR,
@@ -1185,7 +1185,7 @@ multiclass avx512_int_broadcastbw_reg<bits<8> opc, string Name, SchedWrite Sched
!con((ins _.RC:$src0, _.KRCWM:$mask), (ins GR32:$src)),
!con((ins _.KRCWM:$mask), (ins GR32:$src)),
"vpbroadcast"#_.Suffix, "$src", "$src", [], [], [],
- "$src0 = $dst">, T8PD, EVEX, Sched<[SchedRR]>;
+ "$src0 = $dst">, T8, PD, EVEX, Sched<[SchedRR]>;
def : Pat <(_.VT (OpNode SrcRC:$src)),
(!cast<Instruction>(Name#rr)
@@ -2082,7 +2082,7 @@ defm VPCMPEQD : avx512_icmp_packed_rmb_vl<0x76, "vpcmpeqd",
defm VPCMPEQQ : avx512_icmp_packed_rmb_vl<0x29, "vpcmpeqq",
SchedWriteVecALU, avx512vl_i64_info, HasAVX512, 1>,
- T8PD, REX_W, EVEX_CD8<64, CD8VF>;
+ T8, REX_W, EVEX_CD8<64, CD8VF>;
defm VPCMPGTB : avx512_icmp_packed_vl<0x64, "vpcmpgtb",
SchedWriteVecALU, avx512vl_i8_info, HasBWI>,
@@ -2098,7 +2098,7 @@ defm VPCMPGTD : avx512_icmp_packed_rmb_vl<0x66, "vpcmpgtd",
defm VPCMPGTQ : avx512_icmp_packed_rmb_vl<0x37, "vpcmpgtq",
SchedWriteVecALU, avx512vl_i64_info, HasAVX512>,
- T8PD, REX_W, EVEX_CD8<64, CD8VF>;
+ T8, REX_W, EVEX_CD8<64, CD8VF>;
}
multiclass avx512_icmp_cc<bits<8> opc, string Suffix, PatFrag Frag,
@@ -2625,40 +2625,40 @@ multiclass avx512_mask_mov_gpr<bits<8> opc_kr, bits<8> opc_rk,
let Predicates = [HasDQI, NoEGPR] in
defm KMOVB : avx512_mask_mov<0x90, 0x90, 0x91, "kmovb", VK8, v8i1, i8mem>,
avx512_mask_mov_gpr<0x92, 0x93, "kmovb", VK8, GR32>,
- VEX, PD;
+ VEX, TB, PD;
let Predicates = [HasDQI, HasEGPR, In64BitMode] in
defm KMOVB : avx512_mask_mov<0x90, 0x90, 0x91, "kmovb", VK8, v8i1, i8mem, "_EVEX">,
avx512_mask_mov_gpr<0x92, 0x93, "kmovb", VK8, GR32, "_EVEX">,
- EVEX, PD;
+ EVEX, TB, PD;
let Predicates = [HasAVX512, NoEGPR] in
defm KMOVW : avx512_mask_mov<0x90, 0x90, 0x91, "kmovw", VK16, v16i1, i16mem>,
avx512_mask_mov_gpr<0x92, 0x93, "kmovw", VK16, GR32>,
- VEX, PS;
+ VEX, TB, PS;
let Predicates = [HasAVX512, HasEGPR, In64BitMode] in
defm KMOVW : avx512_mask_mov<0x90, 0x90, 0x91, "kmovw", VK16, v16i1, i16mem, "_EVEX">,
avx512_mask_mov_gpr<0x92, 0x93, "kmovw", VK16, GR32, "_EVEX">,
- EVEX, PS;
+ EVEX, TB, PS;
let Predicates = [HasBWI, NoEGPR] in {
defm KMOVD : avx512_mask_mov<0x90, 0x90, 0x91, "kmovd", VK32, v32i1,i32mem>,
- VEX, PD, REX_W;
+ VEX, TB, PD, REX_W;
defm KMOVD : avx512_mask_mov_gpr<0x92, 0x93, "kmovd", VK32, GR32>,
- VEX, XD;
+ VEX, TB, XD;
defm KMOVQ : avx512_mask_mov<0x90, 0x90, 0x91, "kmovq", VK64, v64i1, i64mem>,
- VEX, PS, REX_W;
+ VEX, TB, PS, REX_W;
defm KMOVQ : avx512_mask_mov_gpr<0x92, 0x93, "kmovq", VK64, GR64>,
- VEX, XD, REX_W;
+ VEX, TB, XD, REX_W;
}
let Predicates = [HasBWI, HasEGPR, In64BitMode] in {
defm KMOVD : avx512_mask_mov<0x90, 0x90, 0x91, "kmovd", VK32, v32i1,i32mem, "_EVEX">,
- EVEX, PD, REX_W;
+ EVEX, TB, PD, REX_W;
defm KMOVD : avx512_mask_mov_gpr<0x92, 0x93, "kmovd", VK32, GR32, "_EVEX">,
- EVEX, XD;
+ EVEX, TB, XD;
defm KMOVQ : avx512_mask_mov<0x90, 0x90, 0x91, "kmovq", VK64, v64i1, i64mem, "_EVEX">,
- EVEX, PS, REX_W;
+ EVEX, TB, PS, REX_W;
defm KMOVQ : avx512_mask_mov_gpr<0x92, 0x93, "kmovq", VK64, GR64, "_EVEX">,
- EVEX, XD, REX_W;
+ EVEX, TB, XD, REX_W;
}
// GR from/to mask register
@@ -2769,13 +2769,13 @@ multiclass avx512_mask_unop_all<bits<8> opc, string OpcodeStr,
SDPatternOperator OpNode,
X86FoldableSchedWrite sched> {
defm B : avx512_mask_unop<opc, !strconcat(OpcodeStr, "b"), VK8, OpNode,
- sched, HasDQI>, VEX, PD;
+ sched, HasDQI>, VEX, TB, PD;
defm W : avx512_mask_unop<opc, !strconcat(OpcodeStr, "w"), VK16, OpNode,
- sched, HasAVX512>, VEX, PS;
+ sched, HasAVX512>, VEX, TB, PS;
defm D : avx512_mask_unop<opc, !strconcat(OpcodeStr, "d"), VK32, OpNode,
- sched, HasBWI>, VEX, PD, REX_W;
+ sched, HasBWI>, VEX, TB, PD, REX_W;
defm Q : avx512_mask_unop<opc, !strconcat(OpcodeStr, "q"), VK64, OpNode,
- sched, HasBWI>, VEX, PS, REX_W;
+ sched, HasBWI>, VEX, TB, PS, REX_W;
}
// TODO - do we need a X86SchedWriteWidths::KMASK type?
@@ -2812,13 +2812,13 @@ multiclass avx512_mask_binop_all<bits<8> opc, string OpcodeStr,
X86FoldableSchedWrite sched, bit IsCommutable,
Predicate prdW = HasAVX512> {
defm B : avx512_mask_binop<opc, !strconcat(OpcodeStr, "b"), VK8, OpNode,
- sched, HasDQI, IsCommutable>, VEX, VVVV, VEX_L, PD;
+ sched, HasDQI, IsCommutable>, VEX, VVVV, VEX_L, TB, PD;
defm W : avx512_mask_binop<opc, !strconcat(OpcodeStr, "w"), VK16, OpNode,
- sched, prdW, IsCommutable>, VEX, VVVV, VEX_L, PS;
+ sched, prdW, IsCommutable>, VEX, VVVV, VEX_L, TB, PS;
defm D : avx512_mask_binop<opc, !strconcat(OpcodeStr, "d"), VK32, OpNode,
- sched, HasBWI, IsCommutable>, VEX, VVVV, VEX_L, REX_W, PD;
+ sched, HasBWI, IsCommutable>, VEX, VVVV, VEX_L, REX_W, TB, PD;
defm Q : avx512_mask_binop<opc, !strconcat(OpcodeStr, "q"), VK64, OpNode,
- sched, HasBWI, IsCommutable>, VEX, VVVV, VEX_L, REX_W, PS;
+ sched, HasBWI, IsCommutable>, VEX, VVVV, VEX_L, REX_W, TB, PS;
}
// TODO - do we need a X86SchedWriteWidths::KMASK type?
@@ -2876,9 +2876,9 @@ multiclass avx512_mask_unpck<string Suffix, X86KVectorVTInfo Dst,
}
}
-defm KUNPCKBW : avx512_mask_unpck<"bw", v16i1_info, v8i1_info, WriteShuffle, HasAVX512>, PD;
-defm KUNPCKWD : avx512_mask_unpck<"wd", v32i1_info, v16i1_info, WriteShuffle, HasBWI>, PS;
-defm KUNPCKDQ : avx512_mask_unpck<"dq", v64i1_info, v32i1_info, WriteShuffle, HasBWI>, PS, REX_W;
+defm KUNPCKBW : avx512_mask_unpck<"bw", v16i1_info, v8i1_info, WriteShuffle, HasAVX512>, TB, PD;
+defm KUNPCKWD : avx512_mask_unpck<"wd", v32i1_info, v16i1_info, WriteShuffle, HasBWI>, TB, PS;
+defm KUNPCKDQ : avx512_mask_unpck<"dq", v64i1_info, v32i1_info, WriteShuffle, HasBWI>, TB, PS, REX_W;
// Mask bit testing
multiclass avx512_mask_testop<bits<8> opc, string OpcodeStr, RegisterClass KRC,
@@ -2895,13 +2895,13 @@ multiclass avx512_mask_testop_w<bits<8> opc, string OpcodeStr, SDNode OpNode,
X86FoldableSchedWrite sched,
Predicate prdW = HasAVX512> {
defm B : avx512_mask_testop<opc, OpcodeStr#"b", VK8, OpNode, sched, HasDQI>,
- VEX, PD;
+ VEX, TB, PD;
defm W : avx512_mask_testop<opc, OpcodeStr#"w", VK16, OpNode, sched, prdW>,
- VEX, PS;
+ VEX, TB, PS;
defm Q : avx512_mask_testop<opc, OpcodeStr#"q", VK64, OpNode, sched, HasBWI>,
- VEX, PS, REX_W;
+ VEX, TB, PS, REX_W;
defm D : avx512_mask_testop<opc, OpcodeStr#"d", VK32, OpNode, sched, HasBWI>,
- VEX, PD, REX_W;
+ VEX, TB, PD, REX_W;
}
// TODO - do we need a X86SchedWriteWidths::KMASK type?
@@ -2922,15 +2922,15 @@ multiclass avx512_mask_shiftop<bits<8> opc, string OpcodeStr, RegisterClass KRC,
multiclass avx512_mask_shiftop_w<bits<8> opc1, bits<8> opc2, string OpcodeStr,
SDNode OpNode, X86FoldableSchedWrite sched> {
defm W : avx512_mask_shiftop<opc1, !strconcat(OpcodeStr, "w"), VK16, OpNode,
- sched>, VEX, TAPD, REX_W;
+ sched>, VEX, TA, PD, REX_W;
let Predicates = [HasDQI] in
defm B : avx512_mask_shiftop<opc1, !strconcat(OpcodeStr, "b"), VK8, OpNode,
- sched>, VEX, TAPD;
+ sched>, VEX, TA, PD;
let Predicates = [HasBWI] in {
defm Q : avx512_mask_shiftop<opc2, !strconcat(OpcodeStr, "q"), VK64, OpNode,
- sched>, VEX, TAPD, REX_W;
+ sched>, VEX, TA, PD, REX_W;
defm D : avx512_mask_shiftop<opc2, !strconcat(OpcodeStr, "d"), VK32, OpNode,
- sched>, VEX, TAPD;
+ sched>, VEX, TA, PD;
}
}
@@ -3371,25 +3371,25 @@ defm VMOVAPS : avx512_alignedload_vl<0x28, "vmovaps", avx512vl_f32_info,
HasAVX512, SchedWriteFMoveLS, "VMOVAPS">,
avx512_alignedstore_vl<0x29, "vmovaps", avx512vl_f32_info,
HasAVX512, SchedWriteFMoveLS, "VMOVAPS">,
- PS, EVEX_CD8<32, CD8VF>;
+ TB, PS, EVEX_CD8<32, CD8VF>;
defm VMOVAPD : avx512_alignedload_vl<0x28, "vmovapd", avx512vl_f64_info,
HasAVX512, SchedWriteFMoveLS, "VMOVAPD">,
avx512_alignedstore_vl<0x29, "vmovapd", avx512vl_f64_info,
HasAVX512, SchedWriteFMoveLS, "VMOVAPD">,
- PD, REX_W, EVEX_CD8<64, CD8VF>;
+ TB, PD, REX_W, EVEX_CD8<64, CD8VF>;
defm VMOVUPS : avx512_load_vl<0x10, "vmovups", avx512vl_f32_info, HasAVX512,
SchedWriteFMoveLS, "VMOVUPS", 0, null_frag>,
avx512_store_vl<0x11, "vmovups", avx512vl_f32_info, HasAVX512,
SchedWriteFMoveLS, "VMOVUPS">,
- PS, EVEX_CD8<32, CD8VF>;
+ TB, PS, EVEX_CD8<32, CD8VF>;
defm VMOVUPD : avx512_load_vl<0x10, "vmovupd", avx512vl_f64_info, HasAVX512,
SchedWriteFMoveLS, "VMOVUPD", 0, null_frag>,
avx512_store_vl<0x11, "vmovupd", avx512vl_f64_info, HasAVX512,
SchedWriteFMoveLS, "VMOVUPD">,
- PD, REX_W, EVEX_CD8<64, CD8VF>;
+ TB, PD, REX_W, EVEX_CD8<64, CD8VF>;
defm VMOVDQA32 : avx512_alignedload_vl<0x6F, "vmovdqa32", avx512vl_i32_info,
HasAVX512, SchedWriteVecMoveLS,
@@ -3397,7 +3397,7 @@ defm VMOVDQA32 : avx512_alignedload_vl<0x6F, "vmovdqa32", avx512vl_i32_info,
avx512_alignedstore_vl<0x7F, "vmovdqa32", avx512vl_i32_info,
HasAVX512, SchedWriteVecMoveLS,
"VMOVDQA", 1>,
- PD, EVEX_CD8<32, CD8VF>;
+ TB, PD, EVEX_CD8<32, CD8VF>;
defm VMOVDQA64 : avx512_alignedload_vl<0x6F, "vmovdqa64", avx512vl_i64_info,
HasAVX512, SchedWriteVecMoveLS,
@@ -3405,31 +3405,31 @@ defm VMOVDQA64 : avx512_alignedload_vl<0x6F, "vmovdqa64", avx512vl_i64_info,
avx512_alignedstore_vl<0x7F, "vmovdqa64", avx512vl_i64_info,
HasAVX512, SchedWriteVecMoveLS,
"VMOVDQA">,
- PD, REX_W, EVEX_CD8<64, CD8VF>;
+ TB, PD, REX_W, EVEX_CD8<64, CD8VF>;
defm VMOVDQU8 : avx512_load_vl<0x6F, "vmovdqu8", avx512vl_i8_info, HasBWI,
SchedWriteVecMoveLS, "VMOVDQU", 1>,
avx512_store_vl<0x7F, "vmovdqu8", avx512vl_i8_info, HasBWI,
SchedWriteVecMoveLS, "VMOVDQU", 1>,
- XD, EVEX_CD8<8, CD8VF>;
+ TB, XD, EVEX_CD8<8, CD8VF>;
defm VMOVDQU16 : avx512_load_vl<0x6F, "vmovdqu16", avx512vl_i16_info, HasBWI,
SchedWriteVecMoveLS, "VMOVDQU", 1>,
avx512_store_vl<0x7F, "vmovdqu16", avx512vl_i16_info, HasBWI,
SchedWriteVecMoveLS, "VMOVDQU", 1>,
- XD, REX_W, EVEX_CD8<16, CD8VF>;
+ TB, XD, REX_W, EVEX_CD8<16, CD8VF>;
defm VMOVDQU32 : avx512_load_vl<0x6F, "vmovdqu32", avx512vl_i32_info, HasAVX512,
SchedWriteVecMoveLS, "VMOVDQU", 1, null_frag>,
avx512_store_vl<0x7F, "vmovdqu32", avx512vl_i32_info, HasAVX512,
SchedWriteVecMoveLS, "VMOVDQU", 1>,
- XS, EVEX_CD8<32, CD8VF>;
+ TB, XS, EVEX_CD8<32, CD8VF>;
defm VMOVDQU64 : avx512_load_vl<0x6F, "vmovdqu64", avx512vl_i64_info, HasAVX512,
SchedWriteVecMoveLS, "VMOVDQU", 0, null_frag>,
avx512_store_vl<0x7F, "vmovdqu64", avx512vl_i64_info, HasAVX512,
SchedWriteVecMoveLS, "VMOVDQU">,
- XS, REX_W, EVEX_CD8<64, CD8VF>;
+ TB, XS, REX_W, EVEX_CD8<64, CD8VF>;
// Special instructions to help with spilling when we don't have VLX. We need
// to load or store from a ZMM register instead. These are converted in
@@ -3816,12 +3816,12 @@ def VMOVPQIto64Zrr : I<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128X:$src),
"vmovq\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, (extractelt (v2i64 VR128X:$src),
(iPTR 0)))]>,
- PD, EVEX, REX_W, Sched<[WriteVecMoveToGpr]>,
+ TB, PD, EVEX, REX_W, Sched<[WriteVecMoveToGpr]>,
Requires<[HasAVX512]>;
let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0, mayStore = 1 in
def VMOVPQIto64Zmr : I<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, VR128X:$src),
- "vmovq\t{$src, $dst|$dst, $src}", []>, PD,
+ "vmovq\t{$src, $dst|$dst, $src}", []>, TB, PD,
EVEX, REX_W, EVEX_CD8<64, CD8VT1>, Sched<[WriteVecStore]>,
Requires<[HasAVX512, In64BitMode]>;
@@ -3830,7 +3830,7 @@ def VMOVPQI2QIZmr : I<0xD6, MRMDestMem, (outs),
"vmovq\t{$src, $dst|$dst, $src}",
[(store (extractelt (v2i64 VR128X:$src), (iPTR 0)),
addr:$dst)]>,
- EVEX, PD, REX_W, EVEX_CD8<64, CD8VT1>,
+ EVEX, TB, PD, REX_W, EVEX_CD8<64, CD8VT1>,
Sched<[WriteVecStore]>, Requires<[HasAVX512]>;
let hasSideEffects = 0, isCodeGenOnly = 1, ForceDisassemble = 1 in
@@ -3954,14 +3954,14 @@ multiclass avx512_move_scalar<string asm, SDNode OpNode, PatFrag vzload_frag,
}
defm VMOVSSZ : avx512_move_scalar<"vmovss", X86Movss, X86vzload32, f32x_info>,
- VEX_LIG, XS, EVEX_CD8<32, CD8VT1>;
+ VEX_LIG, TB, XS, EVEX_CD8<32, CD8VT1>;
defm VMOVSDZ : avx512_move_scalar<"vmovsd", X86Movsd, X86vzload64, f64x_info>,
- VEX_LIG, XD, REX_W, EVEX_CD8<64, CD8VT1>;
+ VEX_LIG, TB, XD, REX_W, EVEX_CD8<64, CD8VT1>;
defm VMOVSHZ : avx512_move_scalar<"vmovsh", X86Movsh, X86vzload16, f16x_info,
HasFP16>,
- VEX_LIG, T_MAP5XS, EVEX_CD8<16, CD8VT1>;
+ VEX_LIG, T_MAP5, XS, EVEX_CD8<16, CD8VT1>;
multiclass avx512_move_scalar_lowering<string InstrStr, SDNode OpNode,
PatLeaf ZeroFP, X86VectorVTInfo _> {
@@ -4286,7 +4286,7 @@ let hasSideEffects = 0, isCodeGenOnly = 1, ForceDisassemble = 1 in {
def VMOVSHZrr_REV: AVX512<0x11, MRMDestReg, (outs VR128X:$dst),
(ins VR128X:$src1, VR128X:$src2),
"vmovsh\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- []>, T_MAP5XS, EVEX, VVVV, VEX_LIG,
+ []>, T_MAP5, XS, EVEX, VVVV, VEX_LIG,
Sched<[SchedWriteFShuffle.XMM]>;
let Constraints = "$src0 = $dst" in
@@ -4295,20 +4295,20 @@ let hasSideEffects = 0, isCodeGenOnly = 1, ForceDisassemble = 1 in {
VR128X:$src1, VR128X:$src2),
"vmovsh\t{$src2, $src1, $dst {${mask}}|"#
"$dst {${mask}}, $src1, $src2}",
- []>, T_MAP5XS, EVEX_K, EVEX, VVVV, VEX_LIG,
+ []>, T_MAP5, XS, EVEX_K, EVEX, VVVV, VEX_LIG,
Sched<[SchedWriteFShuffle.XMM]>;
def VMOVSHZrrkz_REV: AVX512<0x11, MRMDestReg, (outs VR128X:$dst),
(ins f16x_info.KRCWM:$mask, VR128X:$src1, VR128X:$src2),
"vmovsh\t{$src2, $src1, $dst {${mask}} {z}|"#
"$dst {${mask}} {z}, $src1, $src2}",
- []>, EVEX_KZ, T_MAP5XS, EVEX, VVVV, VEX_LIG,
+ []>, EVEX_KZ, T_MAP5, XS, EVEX, VVVV, VEX_LIG,
Sched<[SchedWriteFShuffle.XMM]>;
}
def VMOVSSZrr_REV: AVX512<0x11, MRMDestReg, (outs VR128X:$dst),
(ins VR128X:$src1, VR128X:$src2),
"vmovss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- []>, XS, EVEX, VVVV, VEX_LIG,
+ []>, TB, XS, EVEX, VVVV, VEX_LIG,
Sched<[SchedWriteFShuffle.XMM]>;
let Constraints = "$src0 = $dst" in
@@ -4317,20 +4317,20 @@ let hasSideEffects = 0, isCodeGenOnly = 1, ForceDisassemble = 1 in {
VR128X:$src1, VR128X:$src2),
"vmovss\t{$src2, $src1, $dst {${mask}}|"#
"$dst {${mask}}, $src1, $src2}",
- []>, EVEX_K, XS, EVEX, VVVV, VEX_LIG,
+ []>, EVEX_K, TB, XS, EVEX, VVVV, VEX_LIG,
Sched<[SchedWriteFShuffle.XMM]>;
def VMOVSSZrrkz_REV: AVX512<0x11, MRMDestReg, (outs VR128X:$dst),
(ins f32x_info.KRCWM:$mask, VR128X:$src1, VR128X:$src2),
"vmovss\t{$src2, $src1, $dst {${mask}} {z}|"#
"$dst {${mask}} {z}, $src1, $src2}",
- []>, EVEX_KZ, XS, EVEX, VVVV, VEX_LIG,
+ []>, EVEX_KZ, TB, XS, EVEX, VVVV, VEX_LIG,
Sched<[SchedWriteFShuffle.XMM]>;
def VMOVSDZrr_REV: AVX512<0x11, MRMDestReg, (outs VR128X:$dst),
(ins VR128X:$src1, VR128X:$src2),
"vmovsd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- []>, XD, EVEX, VVVV, VEX_LIG, REX_W,
+ []>, TB, XD, EVEX, VVVV, VEX_LIG, REX_W,
Sched<[SchedWriteFShuffle.XMM]>;
let Constraints = "$src0 = $dst" in
@@ -4339,7 +4339,7 @@ let hasSideEffects = 0, isCodeGenOnly = 1, ForceDisassemble = 1 in {
VR128X:$src1, VR128X:$src2),
"vmovsd\t{$src2, $src1, $dst {${mask}}|"#
"$dst {${mask}}, $src1, $src2}",
- []>, EVEX_K, XD, EVEX, VVVV, VEX_LIG,
+ []>, EVEX_K, TB, XD, EVEX, VVVV, VEX_LIG,
REX_W, Sched<[SchedWriteFShuffle.XMM]>;
def VMOVSDZrrkz_REV: AVX512<0x11, MRMDestReg, (outs VR128X:$dst),
@@ -4347,7 +4347,7 @@ let hasSideEffects = 0, isCodeGenOnly = 1, ForceDisassemble = 1 in {
VR128X:$src2),
"vmovsd\t{$src2, $src1, $dst {${mask}} {z}|"#
"$dst {${mask}} {z}, $src1, $src2}",
- []>, EVEX_KZ, XD, EVEX, VVVV, VEX_LIG,
+ []>, EVEX_KZ, TB, XD, EVEX, VVVV, VEX_LIG,
REX_W, Sched<[SchedWriteFShuffle.XMM]>;
}
@@ -4546,20 +4546,20 @@ let Predicates = [HasAVX512] in {
def VMOVNTDQAZrm : AVX512PI<0x2A, MRMSrcMem, (outs VR512:$dst),
(ins i512mem:$src), "vmovntdqa\t{$src, $dst|$dst, $src}",
[], SSEPackedInt>, Sched<[SchedWriteVecMoveLS.ZMM.RM]>,
- EVEX, T8PD, EVEX_V512, EVEX_CD8<64, CD8VF>;
+ EVEX, T8, PD, EVEX_V512, EVEX_CD8<64, CD8VF>;
let Predicates = [HasVLX] in {
def VMOVNTDQAZ256rm : AVX512PI<0x2A, MRMSrcMem, (outs VR256X:$dst),
(ins i256mem:$src),
"vmovntdqa\t{$src, $dst|$dst, $src}",
[], SSEPackedInt>, Sched<[SchedWriteVecMoveLS.YMM.RM]>,
- EVEX, T8PD, EVEX_V256, EVEX_CD8<64, CD8VF>;
+ EVEX, T8, PD, EVEX_V256, EVEX_CD8<64, CD8VF>;
def VMOVNTDQAZ128rm : AVX512PI<0x2A, MRMSrcMem, (outs VR128X:$dst),
(ins i128mem:$src),
"vmovntdqa\t{$src, $dst|$dst, $src}",
[], SSEPackedInt>, Sched<[SchedWriteVecMoveLS.XMM.RM]>,
- EVEX, T8PD, EVEX_V128, EVEX_CD8<64, CD8VF>;
+ EVEX, T8, PD, EVEX_V128, EVEX_CD8<64, CD8VF>;
}
multiclass avx512_movnt<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
@@ -4585,11 +4585,11 @@ multiclass avx512_movnt_vl<bits<8> opc, string OpcodeStr,
}
defm VMOVNTDQ : avx512_movnt_vl<0xE7, "vmovntdq", avx512vl_i64_info,
- SchedWriteVecMoveLSNT>, PD;
+ SchedWriteVecMoveLSNT>, TB, PD;
defm VMOVNTPD : avx512_movnt_vl<0x2B, "vmovntpd", avx512vl_f64_info,
- SchedWriteFMoveLSNT>, PD, REX_W;
+ SchedWriteFMoveLSNT>, TB, PD, REX_W;
defm VMOVNTPS : avx512_movnt_vl<0x2B, "vmovntps", avx512vl_f32_info,
- SchedWriteFMoveLSNT>, PS;
+ SchedWriteFMoveLSNT>, TB, PS;
let Predicates = [HasAVX512], AddedComplexity = 400 in {
def : Pat<(alignednontemporalstore (v16i32 VR512:$src), addr:$dst),
@@ -4829,22 +4829,22 @@ defm VPADDUS : avx512_binop_rm_vl_bw<0xDC, 0xDD, "vpaddus", uaddsat,
defm VPSUBUS : avx512_binop_rm_vl_bw<0xD8, 0xD9, "vpsubus", usubsat,
SchedWriteVecALU, HasBWI, 0>;
defm VPMULLD : avx512_binop_rm_vl_d<0x40, "vpmulld", mul,
- SchedWritePMULLD, HasAVX512, 1>, T8PD;
+ SchedWritePMULLD, HasAVX512, 1>, T8;
defm VPMULLW : avx512_binop_rm_vl_w<0xD5, "vpmullw", mul,
SchedWriteVecIMul, HasBWI, 1>;
defm VPMULLQ : avx512_binop_rm_vl_q<0x40, "vpmullq", mul,
- SchedWriteVecIMul, HasDQI, 1>, T8PD,
+ SchedWriteVecIMul, HasDQI, 1>, T8,
NotEVEX2VEXConvertible;
defm VPMULHW : avx512_binop_rm_vl_w<0xE5, "vpmulhw", mulhs, SchedWriteVecIMul,
HasBWI, 1>;
defm VPMULHUW : avx512_binop_rm_vl_w<0xE4, "vpmulhuw", mulhu, SchedWriteVecIMul,
HasBWI, 1>;
defm VPMULHRSW : avx512_binop_rm_vl_w<0x0B, "vpmulhrsw", X86mulhrs,
- SchedWriteVecIMul, HasBWI, 1>, T8PD;
+ SchedWriteVecIMul, HasBWI, 1>, T8;
defm VPAVG : avx512_binop_rm_vl_bw<0xE0, 0xE3, "vpavg", avgceilu,
SchedWriteVecALU, HasBWI, 1>;
defm VPMULDQ : avx512_binop_rm_vl_q<0x28, "vpmuldq", X86pmuldq,
- SchedWriteVecIMul, HasAVX512, 1>, T8PD;
+ SchedWriteVecIMul, HasAVX512, 1>, T8;
defm VPMULUDQ : avx512_binop_rm_vl_q<0xF4, "vpmuludq", X86pmuludq,
SchedWriteVecIMul, HasAVX512, 1>;
@@ -4872,7 +4872,7 @@ multiclass avx512_binop_all<bits<8> opc, string OpcodeStr,
defm VPMULTISHIFTQB : avx512_binop_all<0x83, "vpmultishiftqb", SchedWriteVecALU,
avx512vl_i8_info, avx512vl_i8_info,
- X86multishift, HasVBMI, 0>, T8PD;
+ X86multishift, HasVBMI, 0>, T8;
multiclass avx512_packs_rmb<bits<8> opc, string OpcodeStr, SDNode OpNode,
X86VectorVTInfo _Src, X86VectorVTInfo _Dst,
@@ -4967,48 +4967,48 @@ defm VPACKSSWB : avx512_packs_all_i16_i8 <0x63, "vpacksswb", X86Packss>, AVX512B
defm VPACKUSWB : avx512_packs_all_i16_i8 <0x67, "vpackuswb", X86Packus>, AVX512BIBase;
defm VPMADDUBSW : avx512_vpmadd<0x04, "vpmaddubsw", X86vpmaddubsw,
- avx512vl_i8_info, avx512vl_i16_info>, AVX512BIBase, T8PD, WIG;
+ avx512vl_i8_info, avx512vl_i16_info>, AVX512BIBase, T8, WIG;
defm VPMADDWD : avx512_vpmadd<0xF5, "vpmaddwd", X86vpmaddwd,
avx512vl_i16_info, avx512vl_i32_info, 1>, AVX512BIBase, WIG;
defm VPMAXSB : avx512_binop_rm_vl_b<0x3C, "vpmaxsb", smax,
- SchedWriteVecALU, HasBWI, 1>, T8PD;
+ SchedWriteVecALU, HasBWI, 1>, T8;
defm VPMAXSW : avx512_binop_rm_vl_w<0xEE, "vpmaxsw", smax,
SchedWriteVecALU, HasBWI, 1>;
defm VPMAXSD : avx512_binop_rm_vl_d<0x3D, "vpmaxsd", smax,
- SchedWriteVecALU, HasAVX512, 1>, T8PD;
+ SchedWriteVecALU, HasAVX512, 1>, T8;
defm VPMAXSQ : avx512_binop_rm_vl_q<0x3D, "vpmaxsq", smax,
- SchedWriteVecALU, HasAVX512, 1>, T8PD,
+ SchedWriteVecALU, HasAVX512, 1>, T8,
NotEVEX2VEXConvertible;
defm VPMAXUB : avx512_binop_rm_vl_b<0xDE, "vpmaxub", umax,
SchedWriteVecALU, HasBWI, 1>;
defm VPMAXUW : avx512_binop_rm_vl_w<0x3E, "vpmaxuw", umax,
- SchedWriteVecALU, HasBWI, 1>, T8PD;
+ SchedWriteVecALU, HasBWI, 1>, T8;
defm VPMAXUD : avx512_binop_rm_vl_d<0x3F, "vpmaxud", umax,
- SchedWriteVecALU, HasAVX512, 1>, T8PD;
+ SchedWriteVecALU, HasAVX512, 1>, T8;
defm VPMAXUQ : avx512_binop_rm_vl_q<0x3F, "vpmaxuq", umax,
- SchedWriteVecALU, HasAVX512, 1>, T8PD,
+ SchedWriteVecALU, HasAVX512, 1>, T8,
NotEVEX2VEXConvertible;
defm VPMINSB : avx512_binop_rm_vl_b<0x38, "vpminsb", smin,
- SchedWriteVecALU, HasBWI, 1>, T8PD;
+ SchedWriteVecALU, HasBWI, 1>, T8;
defm VPMINSW : avx512_binop_rm_vl_w<0xEA, "vpminsw", smin,
SchedWriteVecALU, HasBWI, 1>;
defm VPMINSD : avx512_binop_rm_vl_d<0x39, "vpminsd", smin,
- SchedWriteVecALU, HasAVX512, 1>, T8PD;
+ SchedWriteVecALU, HasAVX512, 1>, T8;
defm VPMINSQ : avx512_binop_rm_vl_q<0x39, "vpminsq", smin,
- SchedWriteVecALU, HasAVX512, 1>, T8PD,
+ SchedWriteVecALU, HasAVX512, 1>, T8,
NotEVEX2VEXConvertible;
defm VPMINUB : avx512_binop_rm_vl_b<0xDA, "vpminub", umin,
SchedWriteVecALU, HasBWI, 1>;
defm VPMINUW : avx512_binop_rm_vl_w<0x3A, "vpminuw", umin,
- SchedWriteVecALU, HasBWI, 1>, T8PD;
+ SchedWriteVecALU, HasBWI, 1>, T8;
defm VPMINUD : avx512_binop_rm_vl_d<0x3B, "vpminud", umin,
- SchedWriteVecALU, HasAVX512, 1>, T8PD;
+ SchedWriteVecALU, HasAVX512, 1>, T8;
defm VPMINUQ : avx512_binop_rm_vl_q<0x3B, "vpminuq", umin,
- SchedWriteVecALU, HasAVX512, 1>, T8PD,
+ SchedWriteVecALU, HasAVX512, 1>, T8,
NotEVEX2VEXConvertible;
// PMULLQ: Use 512bit version to implement 128/256 bit in case NoVLX.
@@ -5445,18 +5445,18 @@ multiclass avx512_binop_s_round<bits<8> opc, string OpcodeStr, SDPatternOperator
sched.PS.Scl, IsCommutable>,
avx512_fp_scalar_round<opc, OpcodeStr#"ss", f32x_info, RndNode,
sched.PS.Scl>,
- XS, EVEX, VVVV, VEX_LIG, EVEX_CD8<32, CD8VT1>;
+ TB, XS, EVEX, VVVV, VEX_LIG, EVEX_CD8<32, CD8VT1>;
defm SDZ : avx512_fp_scalar<opc, OpcodeStr#"sd", f64x_info, OpNode, VecNode,
sched.PD.Scl, IsCommutable>,
avx512_fp_scalar_round<opc, OpcodeStr#"sd", f64x_info, RndNode,
sched.PD.Scl>,
- XD, REX_W, EVEX, VVVV, VEX_LIG, EVEX_CD8<64, CD8VT1>;
+ TB, XD, REX_W, EVEX, VVVV, VEX_LIG, EVEX_CD8<64, CD8VT1>;
let Predicates = [HasFP16] in
defm SHZ : avx512_fp_scalar<opc, OpcodeStr#"sh", f16x_info, OpNode,
VecNode, sched.PH.Scl, IsCommutable>,
avx512_fp_scalar_round<opc, OpcodeStr#"sh", f16x_info, RndNode,
sched.PH.Scl>,
- T_MAP5XS, EVEX, VVVV, VEX_LIG, EVEX_CD8<16, CD8VT1>;
+ T_MAP5, XS, EVEX, VVVV, VEX_LIG, EVEX_CD8<16, CD8VT1>;
}
multiclass avx512_binop_s_sae<bits<8> opc, string OpcodeStr, SDNode OpNode,
@@ -5465,16 +5465,16 @@ multiclass avx512_binop_s_sae<bits<8> opc, string OpcodeStr, SDNode OpNode,
defm SSZ : avx512_fp_scalar_sae<opc, OpcodeStr#"ss", f32x_info, OpNode,
VecNode, SaeNode, sched.PS.Scl, IsCommutable,
NAME#"SS">,
- XS, EVEX, VVVV, VEX_LIG, EVEX_CD8<32, CD8VT1>;
+ TB, XS, EVEX, VVVV, VEX_LIG, EVEX_CD8<32, CD8VT1>;
defm SDZ : avx512_fp_scalar_sae<opc, OpcodeStr#"sd", f64x_info, OpNode,
VecNode, SaeNode, sched.PD.Scl, IsCommutable,
NAME#"SD">,
- XD, REX_W, EVEX, VVVV, VEX_LIG, EVEX_CD8<64, CD8VT1>;
+ TB, XD, REX_W, EVEX, VVVV, VEX_LIG, EVEX_CD8<64, CD8VT1>;
let Predicates = [HasFP16] in {
defm SHZ : avx512_fp_scalar_sae<opc, OpcodeStr#"sh", f16x_info, OpNode,
VecNode, SaeNode, sched.PH.Scl, IsCommutable,
NAME#"SH">,
- T_MAP5XS, EVEX, VVVV, VEX_LIG, EVEX_CD8<16, CD8VT1>,
+ T_MAP5, XS, EVEX, VVVV, VEX_LIG, EVEX_CD8<16, CD8VT1>,
NotEVEX2VEXConvertible;
}
}
@@ -5515,29 +5515,29 @@ multiclass avx512_comutable_binop_s<bits<8> opc, string OpcodeStr,
}
}
defm VMINCSSZ : avx512_comutable_binop_s<0x5D, "vminss", f32x_info, X86fminc,
- SchedWriteFCmp.Scl, "VMINCSS">, XS,
+ SchedWriteFCmp.Scl, "VMINCSS">, TB, XS,
EVEX, VVVV, VEX_LIG, EVEX_CD8<32, CD8VT1>, SIMD_EXC;
defm VMINCSDZ : avx512_comutable_binop_s<0x5D, "vminsd", f64x_info, X86fminc,
- SchedWriteFCmp.Scl, "VMINCSD">, XD,
+ SchedWriteFCmp.Scl, "VMINCSD">, TB, XD,
REX_W, EVEX, VVVV, VEX_LIG,
EVEX_CD8<64, CD8VT1>, SIMD_EXC;
defm VMAXCSSZ : avx512_comutable_binop_s<0x5F, "vmaxss", f32x_info, X86fmaxc,
- SchedWriteFCmp.Scl, "VMAXCSS">, XS,
+ SchedWriteFCmp.Scl, "VMAXCSS">, TB, XS,
EVEX, VVVV, VEX_LIG, EVEX_CD8<32, CD8VT1>, SIMD_EXC;
defm VMAXCSDZ : avx512_comutable_binop_s<0x5F, "vmaxsd", f64x_info, X86fmaxc,
- SchedWriteFCmp.Scl, "VMAXCSD">, XD,
+ SchedWriteFCmp.Scl, "VMAXCSD">, TB, XD,
REX_W, EVEX, VVVV, VEX_LIG,
EVEX_CD8<64, CD8VT1>, SIMD_EXC;
defm VMINCSHZ : avx512_comutable_binop_s<0x5D, "vminsh", f16x_info, X86fminc,
- SchedWriteFCmp.Scl, "VMINCSH">, T_MAP5XS,
+ SchedWriteFCmp.Scl, "VMINCSH">, T_MAP5, XS,
EVEX, VVVV, VEX_LIG, EVEX_CD8<16, CD8VT1>, SIMD_EXC,
NotEVEX2VEXConvertible;
defm VMAXCSHZ : avx512_comutable_binop_s<0x5F, "vmaxsh", f16x_info, X86fmaxc,
- SchedWriteFCmp.Scl, "VMAXCSH">, T_MAP5XS,
+ SchedWriteFCmp.Scl, "VMAXCSH">, T_MAP5, XS,
EVEX, VVVV, VEX_LIG, EVEX_CD8<16, CD8VT1>, SIMD_EXC,
NotEVEX2VEXConvertible;
@@ -5607,27 +5607,27 @@ multiclass avx512_fp_binop_p<bits<8> opc, string OpcodeStr, SDPatternOperator Op
bit IsPD128Commutable = IsCommutable> {
let Predicates = [prd] in {
defm PSZ : avx512_fp_packed<opc, OpcodeStr, OpNode, MaskOpNode, v16f32_info,
- sched.PS.ZMM, IsCommutable>, EVEX_V512, PS,
+ sched.PS.ZMM, IsCommutable>, EVEX_V512, TB, PS,
EVEX_CD8<32, CD8VF>;
defm PDZ : avx512_fp_packed<opc, OpcodeStr, OpNode, MaskOpNode, v8f64_info,
- sched.PD.ZMM, IsCommutable>, EVEX_V512, PD, REX_W,
+ sched.PD.ZMM, IsCommutable>, EVEX_V512, TB, PD, REX_W,
EVEX_CD8<64, CD8VF>;
}
// Define only if AVX512VL feature is present.
let Predicates = [prd, HasVLX] in {
defm PSZ128 : avx512_fp_packed<opc, OpcodeStr, OpNode, MaskOpNode, v4f32x_info,
- sched.PS.XMM, IsCommutable>, EVEX_V128, PS,
+ sched.PS.XMM, IsCommutable>, EVEX_V128, TB, PS,
EVEX_CD8<32, CD8VF>;
defm PSZ256 : avx512_fp_packed<opc, OpcodeStr, OpNode, MaskOpNode, v8f32x_info,
- sched.PS.YMM, IsCommutable>, EVEX_V256, PS,
+ sched.PS.YMM, IsCommutable>, EVEX_V256, TB, PS,
EVEX_CD8<32, CD8VF>;
defm PDZ128 : avx512_fp_packed<opc, OpcodeStr, OpNode, MaskOpNode, v2f64x_info,
sched.PD.XMM, IsPD128Commutable,
- IsCommutable>, EVEX_V128, PD, REX_W,
+ IsCommutable>, EVEX_V128, TB, PD, REX_W,
EVEX_CD8<64, CD8VF>;
defm PDZ256 : avx512_fp_packed<opc, OpcodeStr, OpNode, MaskOpNode, v4f64x_info,
- sched.PD.YMM, IsCommutable>, EVEX_V256, PD, REX_W,
+ sched.PD.YMM, IsCommutable>, EVEX_V256, TB, PD, REX_W,
EVEX_CD8<64, CD8VF>;
}
}
@@ -5637,15 +5637,15 @@ multiclass avx512_fp_binop_ph<bits<8> opc, string OpcodeStr, SDPatternOperator O
X86SchedWriteSizes sched, bit IsCommutable = 0> {
let Predicates = [HasFP16] in {
defm PHZ : avx512_fp_packed<opc, OpcodeStr, OpNode, MaskOpNode, v32f16_info,
- sched.PH.ZMM, IsCommutable>, EVEX_V512, T_MAP5PS,
+ sched.PH.ZMM, IsCommutable>, EVEX_V512, T_MAP5, PS,
EVEX_CD8<16, CD8VF>;
}
let Predicates = [HasVLX, HasFP16] in {
defm PHZ128 : avx512_fp_packed<opc, OpcodeStr, OpNode, MaskOpNode, v8f16x_info,
- sched.PH.XMM, IsCommutable>, EVEX_V128, T_MAP5PS,
+ sched.PH.XMM, IsCommutable>, EVEX_V128, T_MAP5, PS,
EVEX_CD8<16, CD8VF>;
defm PHZ256 : avx512_fp_packed<opc, OpcodeStr, OpNode, MaskOpNode, v16f16x_info,
- sched.PH.YMM, IsCommutable>, EVEX_V256, T_MAP5PS,
+ sched.PH.YMM, IsCommutable>, EVEX_V256, T_MAP5, PS,
EVEX_CD8<16, CD8VF>;
}
}
@@ -5656,14 +5656,14 @@ multiclass avx512_fp_binop_p_round<bits<8> opc, string OpcodeStr, SDNode OpNodeR
let Predicates = [HasFP16] in {
defm PHZ : avx512_fp_round_packed<opc, OpcodeStr, OpNodeRnd, sched.PH.ZMM,
v32f16_info>,
- EVEX_V512, T_MAP5PS, EVEX_CD8<16, CD8VF>;
+ EVEX_V512, T_MAP5, PS, EVEX_CD8<16, CD8VF>;
}
defm PSZ : avx512_fp_round_packed<opc, OpcodeStr, OpNodeRnd, sched.PS.ZMM,
v16f32_info>,
- EVEX_V512, PS, EVEX_CD8<32, CD8VF>;
+ EVEX_V512, TB, PS, EVEX_CD8<32, CD8VF>;
defm PDZ : avx512_fp_round_packed<opc, OpcodeStr, OpNodeRnd, sched.PD.ZMM,
v8f64_info>,
- EVEX_V512, PD, REX_W,EVEX_CD8<64, CD8VF>;
+ EVEX_V512, TB, PD, REX_W,EVEX_CD8<64, CD8VF>;
}
let Uses = [MXCSR] in
@@ -5672,14 +5672,14 @@ multiclass avx512_fp_binop_p_sae<bits<8> opc, string OpcodeStr, SDNode OpNodeRnd
let Predicates = [HasFP16] in {
defm PHZ : avx512_fp_sae_packed<opc, OpcodeStr, OpNodeRnd, sched.PH.ZMM,
v32f16_info>,
- EVEX_V512, T_MAP5PS, EVEX_CD8<16, CD8VF>;
+ EVEX_V512, T_MAP5, PS, EVEX_CD8<16, CD8VF>;
}
defm PSZ : avx512_fp_sae_packed<opc, OpcodeStr, OpNodeRnd, sched.PS.ZMM,
v16f32_info>,
- EVEX_V512, PS, EVEX_CD8<32, CD8VF>;
+ EVEX_V512, TB, PS, EVEX_CD8<32, CD8VF>;
defm PDZ : avx512_fp_sae_packed<opc, OpcodeStr, OpNodeRnd, sched.PD.ZMM,
v8f64_info>,
- EVEX_V512, PD, REX_W,EVEX_CD8<64, CD8VF>;
+ EVEX_V512, TB, PD, REX_W,EVEX_CD8<64, CD8VF>;
}
defm VADD : avx512_fp_binop_p<0x58, "vadd", any_fadd, fadd, HasAVX512,
@@ -5770,43 +5770,43 @@ multiclass avx512_fp_scalef_all<bits<8> opc, bits<8> opcScaler, string OpcodeStr
let Predicates = [HasFP16] in {
defm PHZ : avx512_fp_scalef_p<opc, OpcodeStr, X86scalef, sched.ZMM, v32f16_info>,
avx512_fp_round_packed<opc, OpcodeStr, X86scalefRnd, sched.ZMM, v32f16_info>,
- EVEX_V512, T_MAP6PD, EVEX_CD8<16, CD8VF>;
+ EVEX_V512, T_MAP6, PD, EVEX_CD8<16, CD8VF>;
defm SHZ : avx512_fp_scalef_scalar<opcScaler, OpcodeStr, X86scalefs, sched.Scl, f16x_info>,
avx512_fp_scalar_round<opcScaler, OpcodeStr#"sh", f16x_info, X86scalefsRnd, sched.Scl>,
- EVEX, VVVV, T_MAP6PD, EVEX_CD8<16, CD8VT1>;
+ EVEX, VVVV, T_MAP6, PD, EVEX_CD8<16, CD8VT1>;
}
defm PSZ : avx512_fp_scalef_p<opc, OpcodeStr, X86scalef, sched.ZMM, v16f32_info>,
avx512_fp_round_packed<opc, OpcodeStr, X86scalefRnd, sched.ZMM, v16f32_info>,
- EVEX_V512, EVEX_CD8<32, CD8VF>, T8PD;
+ EVEX_V512, EVEX_CD8<32, CD8VF>, T8, PD;
defm PDZ : avx512_fp_scalef_p<opc, OpcodeStr, X86scalef, sched.ZMM, v8f64_info>,
avx512_fp_round_packed<opc, OpcodeStr, X86scalefRnd, sched.ZMM, v8f64_info>,
- EVEX_V512, REX_W, EVEX_CD8<64, CD8VF>, T8PD;
+ EVEX_V512, REX_W, EVEX_CD8<64, CD8VF>, T8, PD;
defm SSZ : avx512_fp_scalef_scalar<opcScaler, OpcodeStr, X86scalefs, sched.Scl, f32x_info>,
avx512_fp_scalar_round<opcScaler, OpcodeStr#"ss", f32x_info,
X86scalefsRnd, sched.Scl>,
- EVEX, VVVV, VEX_LIG, EVEX_CD8<32, CD8VT1>, T8PD;
+ EVEX, VVVV, VEX_LIG, EVEX_CD8<32, CD8VT1>, T8, PD;
defm SDZ : avx512_fp_scalef_scalar<opcScaler, OpcodeStr, X86scalefs, sched.Scl, f64x_info>,
avx512_fp_scalar_round<opcScaler, OpcodeStr#"sd", f64x_info,
X86scalefsRnd, sched.Scl>,
- EVEX, VVVV, VEX_LIG, EVEX_CD8<64, CD8VT1>, REX_W, T8PD;
+ EVEX, VVVV, VEX_LIG, EVEX_CD8<64, CD8VT1>, REX_W, T8, PD;
// Define only if AVX512VL feature is present.
let Predicates = [HasVLX] in {
defm PSZ128 : avx512_fp_scalef_p<opc, OpcodeStr, X86scalef, sched.XMM, v4f32x_info>,
- EVEX_V128, EVEX_CD8<32, CD8VF>, T8PD;
+ EVEX_V128, EVEX_CD8<32, CD8VF>, T8, PD;
defm PSZ256 : avx512_fp_scalef_p<opc, OpcodeStr, X86scalef, sched.YMM, v8f32x_info>,
- EVEX_V256, EVEX_CD8<32, CD8VF>, T8PD;
+ EVEX_V256, EVEX_CD8<32, CD8VF>, T8, PD;
defm PDZ128 : avx512_fp_scalef_p<opc, OpcodeStr, X86scalef, sched.XMM, v2f64x_info>,
- EVEX_V128, REX_W, EVEX_CD8<64, CD8VF>, T8PD;
+ EVEX_V128, REX_W, EVEX_CD8<64, CD8VF>, T8, PD;
defm PDZ256 : avx512_fp_scalef_p<opc, OpcodeStr, X86scalef, sched.YMM, v4f64x_info>,
- EVEX_V256, REX_W, EVEX_CD8<64, CD8VF>, T8PD;
+ EVEX_V256, REX_W, EVEX_CD8<64, CD8VF>, T8, PD;
}
let Predicates = [HasFP16, HasVLX] in {
defm PHZ128 : avx512_fp_scalef_p<opc, OpcodeStr, X86scalef, sched.XMM, v8f16x_info>,
- EVEX_V128, EVEX_CD8<16, CD8VF>, T_MAP6PD;
+ EVEX_V128, EVEX_CD8<16, CD8VF>, T_MAP6, PD;
defm PHZ256 : avx512_fp_scalef_p<opc, OpcodeStr, X86scalef, sched.YMM, v16f16x_info>,
- EVEX_V256, EVEX_CD8<16, CD8VF>, T_MAP6PD;
+ EVEX_V256, EVEX_CD8<16, CD8VF>, T_MAP6, PD;
}
}
defm VSCALEF : avx512_fp_scalef_all<0x2C, 0x2D, "vscalef",
@@ -5898,9 +5898,9 @@ multiclass avx512_vptest_all_forms<bits<8> opc_wb, bits<8> opc_dq, string Opcode
avx512_vptest_dq<opc_dq, OpcodeStr, sched>;
defm VPTESTM : avx512_vptest_all_forms<0x26, 0x27, "vptestm",
- SchedWriteVecLogic>, T8PD;
+ SchedWriteVecLogic>, T8, PD;
defm VPTESTNM : avx512_vptest_all_forms<0x26, 0x27, "vptestnm",
- SchedWriteVecLogic>, T8XS;
+ SchedWriteVecLogic>, T8, XS;
//===----------------------------------------------------------------------===//
// AVX-512 Shift instructions
@@ -6374,14 +6374,14 @@ multiclass avx512_permil_vec<bits<8> OpcVar, string OpcodeStr, SDNode OpNode,
"$src2, $src1", "$src1, $src2",
(_.VT (OpNode _.RC:$src1,
(Ctrl.VT Ctrl.RC:$src2)))>,
- T8PD, EVEX, VVVV, Sched<[sched]>;
+ T8, PD, EVEX, VVVV, Sched<[sched]>;
defm rm: AVX512_maskable<OpcVar, MRMSrcMem, _, (outs _.RC:$dst),
(ins _.RC:$src1, Ctrl.MemOp:$src2), OpcodeStr,
"$src2, $src1", "$src1, $src2",
(_.VT (OpNode
_.RC:$src1,
(Ctrl.VT (Ctrl.LdFrag addr:$src2))))>,
- T8PD, EVEX, VVVV, EVEX_CD8<_.EltSize, CD8VF>,
+ T8, PD, EVEX, VVVV, EVEX_CD8<_.EltSize, CD8VF>,
Sched<[sched.Folded, sched.ReadAfterFold]>;
defm rmb: AVX512_maskable<OpcVar, MRMSrcMem, _, (outs _.RC:$dst),
(ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr,
@@ -6390,7 +6390,7 @@ multiclass avx512_permil_vec<bits<8> OpcVar, string OpcodeStr, SDNode OpNode,
(_.VT (OpNode
_.RC:$src1,
(Ctrl.VT (Ctrl.BroadcastLdFrag addr:$src2))))>,
- T8PD, EVEX, VVVV, EVEX_B, EVEX_CD8<_.EltSize, CD8VF>,
+ T8, PD, EVEX, VVVV, EVEX_B, EVEX_CD8<_.EltSize, CD8VF>,
Sched<[sched.Folded, sched.ReadAfterFold]>;
}
@@ -6500,13 +6500,13 @@ multiclass avx512_mov_hilo_packed<bits<8> opc, string OpcodeStr,
// No patterns for MOVLPS/MOVHPS as the Movlhps node should only be created in
// SSE1. And MOVLPS pattern is even more complex.
defm VMOVHPSZ128 : avx512_mov_hilo_packed<0x16, "vmovhps", null_frag,
- v4f32x_info>, EVEX_CD8<32, CD8VT2>, PS;
+ v4f32x_info>, EVEX_CD8<32, CD8VT2>, TB, PS;
defm VMOVHPDZ128 : avx512_mov_hilo_packed<0x16, "vmovhpd", X86Unpckl,
- v2f64x_info>, EVEX_CD8<64, CD8VT1>, PD, REX_W;
+ v2f64x_info>, EVEX_CD8<64, CD8VT1>, TB, PD, REX_W;
defm VMOVLPSZ128 : avx512_mov_hilo_packed<0x12, "vmovlps", null_frag,
- v4f32x_info>, EVEX_CD8<32, CD8VT2>, PS;
+ v4f32x_info>, EVEX_CD8<32, CD8VT2>, TB, PS;
defm VMOVLPDZ128 : avx512_mov_hilo_packed<0x12, "vmovlpd", X86Movsd,
- v2f64x_info>, EVEX_CD8<64, CD8VT1>, PD, REX_W;
+ v2f64x_info>, EVEX_CD8<64, CD8VT1>, TB, PD, REX_W;
let Predicates = [HasAVX512] in {
// VMOVHPD patterns
@@ -6627,13 +6627,13 @@ multiclass avx512_fma3p_213_f<bits<8> opc, string OpcodeStr, SDPatternOperator O
SDNode MaskOpNode, SDNode OpNodeRnd> {
defm PH : avx512_fma3p_213_common<opc, OpcodeStr#"ph", OpNode, MaskOpNode,
OpNodeRnd, SchedWriteFMA,
- avx512vl_f16_info, HasFP16>, T_MAP6PD;
+ avx512vl_f16_info, HasFP16>, T_MAP6, PD;
defm PS : avx512_fma3p_213_common<opc, OpcodeStr#"ps", OpNode, MaskOpNode,
OpNodeRnd, SchedWriteFMA,
- avx512vl_f32_info>, T8PD;
+ avx512vl_f32_info>, T8, PD;
defm PD : avx512_fma3p_213_common<opc, OpcodeStr#"pd", OpNode, MaskOpNode,
OpNodeRnd, SchedWriteFMA,
- avx512vl_f64_info>, T8PD, REX_W;
+ avx512vl_f64_info>, T8, PD, REX_W;
}
defm VFMADD213 : avx512_fma3p_213_f<0xA8, "vfmadd213", any_fma,
@@ -6724,13 +6724,13 @@ multiclass avx512_fma3p_231_f<bits<8> opc, string OpcodeStr, SDPatternOperator O
SDNode MaskOpNode, SDNode OpNodeRnd > {
defm PH : avx512_fma3p_231_common<opc, OpcodeStr#"ph", OpNode, MaskOpNode,
OpNodeRnd, SchedWriteFMA,
- avx512vl_f16_info, HasFP16>, T_MAP6PD;
+ avx512vl_f16_info, HasFP16>, T_MAP6, PD;
defm PS : avx512_fma3p_231_common<opc, OpcodeStr#"ps", OpNode, MaskOpNode,
OpNodeRnd, SchedWriteFMA,
- avx512vl_f32_info>, T8PD;
+ avx512vl_f32_info>, T8, PD;
defm PD : avx512_fma3p_231_common<opc, OpcodeStr#"pd", OpNode, MaskOpNode,
OpNodeRnd, SchedWriteFMA,
- avx512vl_f64_info>, T8PD, REX_W;
+ avx512vl_f64_info>, T8, PD, REX_W;
}
defm VFMADD231 : avx512_fma3p_231_f<0xB8, "vfmadd231", any_fma,
@@ -6822,13 +6822,13 @@ multiclass avx512_fma3p_132_f<bits<8> opc, string OpcodeStr, SDPatternOperator O
SDNode MaskOpNode, SDNode OpNodeRnd > {
defm PH : avx512_fma3p_132_common<opc, OpcodeStr#"ph", OpNode, MaskOpNode,
OpNodeRnd, SchedWriteFMA,
- avx512vl_f16_info, HasFP16>, T_MAP6PD;
+ avx512vl_f16_info, HasFP16>, T_MAP6, PD;
defm PS : avx512_fma3p_132_common<opc, OpcodeStr#"ps", OpNode, MaskOpNode,
OpNodeRnd, SchedWriteFMA,
- avx512vl_f32_info>, T8PD;
+ avx512vl_f32_info>, T8, PD;
defm PD : avx512_fma3p_132_common<opc, OpcodeStr#"pd", OpNode, MaskOpNode,
OpNodeRnd, SchedWriteFMA,
- avx512vl_f64_info>, T8PD, REX_W;
+ avx512vl_f64_info>, T8, PD, REX_W;
}
defm VFMADD132 : avx512_fma3p_132_f<0x98, "vfmadd132", any_fma,
@@ -6929,15 +6929,15 @@ multiclass avx512_fma3s<bits<8> opc213, bits<8> opc231, bits<8> opc132,
let Predicates = [HasAVX512] in {
defm NAME : avx512_fma3s_all<opc213, opc231, opc132, OpcodeStr, OpNode,
OpNodeRnd, f32x_info, "SS">,
- EVEX_CD8<32, CD8VT1>, VEX_LIG, T8PD;
+ EVEX_CD8<32, CD8VT1>, VEX_LIG, T8, PD;
defm NAME : avx512_fma3s_all<opc213, opc231, opc132, OpcodeStr, OpNode,
OpNodeRnd, f64x_info, "SD">,
- EVEX_CD8<64, CD8VT1>, VEX_LIG, REX_W, T8PD;
+ EVEX_CD8<64, CD8VT1>, VEX_LIG, REX_W, T8, PD;
}
let Predicates = [HasFP16] in {
defm NAME : avx512_fma3s_all<opc213, opc231, opc132, OpcodeStr, OpNode,
OpNodeRnd, f16x_info, "SH">,
- EVEX_CD8<16, CD8VT1>, VEX_LIG, T_MAP6PD;
+ EVEX_CD8<16, CD8VT1>, VEX_LIG, T_MAP6, PD;
}
}
@@ -7189,13 +7189,13 @@ multiclass avx512_pmadd52_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
(ins _.RC:$src2, _.RC:$src3),
OpcodeStr, "$src3, $src2", "$src2, $src3",
(_.VT (OpNode _.RC:$src2, _.RC:$src3, _.RC:$src1)), 1, 1>,
- T8PD, EVEX, VVVV, Sched<[sched]>;
+ T8, PD, EVEX, VVVV, Sched<[sched]>;
defm m: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
(ins _.RC:$src2, _.MemOp:$src3),
OpcodeStr, "$src3, $src2", "$src2, $src3",
(_.VT (OpNode _.RC:$src2, (_.LdFrag addr:$src3), _.RC:$src1))>,
- T8PD, EVEX, VVVV, Sched<[sched.Folded, sched.ReadAfterFold,
+ T8, PD, EVEX, VVVV, Sched<[sched.Folded, sched.ReadAfterFold,
sched.ReadAfterFold]>;
defm mb: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
@@ -7205,7 +7205,7 @@ multiclass avx512_pmadd52_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
(OpNode _.RC:$src2,
(_.VT (_.BroadcastLdFrag addr:$src3)),
_.RC:$src1)>,
- T8PD, EVEX, VVVV, EVEX_B, Sched<[sched.Folded, sched.ReadAfterFold,
+ T8, PD, EVEX, VVVV, EVEX_B, Sched<[sched.Folded, sched.ReadAfterFold,
sched.ReadAfterFold]>;
}
}
@@ -7307,18 +7307,18 @@ let Predicates = [HasAVX512] in {
defm VCVTSI2SSZ : avx512_vcvtsi_common<0x2A, X86SintToFp, X86SintToFpRnd,
WriteCvtI2SS, GR32,
v4f32x_info, i32mem, loadi32, "cvtsi2ss", "l">,
- XS, EVEX_CD8<32, CD8VT1>;
+ TB, XS, EVEX_CD8<32, CD8VT1>;
defm VCVTSI642SSZ: avx512_vcvtsi_common<0x2A, X86SintToFp, X86SintToFpRnd,
WriteCvtI2SS, GR64,
v4f32x_info, i64mem, loadi64, "cvtsi2ss", "q">,
- XS, REX_W, EVEX_CD8<64, CD8VT1>;
+ TB, XS, REX_W, EVEX_CD8<64, CD8VT1>;
defm VCVTSI2SDZ : avx512_vcvtsi<0x2A, null_frag, WriteCvtI2SD, GR32,
v2f64x_info, i32mem, loadi32, "cvtsi2sd", "l", [], 0>,
- XD, VEX_LIG, EVEX_CD8<32, CD8VT1>;
+ TB, XD, VEX_LIG, EVEX_CD8<32, CD8VT1>;
defm VCVTSI642SDZ: avx512_vcvtsi_common<0x2A, X86SintToFp, X86SintToFpRnd,
WriteCvtI2SD, GR64,
v2f64x_info, i64mem, loadi64, "cvtsi2sd", "q">,
- XD, REX_W, EVEX_CD8<64, CD8VT1>;
+ TB, XD, REX_W, EVEX_CD8<64, CD8VT1>;
def : InstAlias<"vcvtsi2ss\t{$src, $src1, $dst|$dst, $src1, $src}",
(VCVTSI2SSZrm_Int VR128X:$dst, VR128X:$src1, i32mem:$src), 0, "att">;
@@ -7346,18 +7346,18 @@ def : Pat<(f64 (any_sint_to_fp GR64:$src)),
defm VCVTUSI2SSZ : avx512_vcvtsi_common<0x7B, X86UintToFp, X86UintToFpRnd,
WriteCvtI2SS, GR32,
v4f32x_info, i32mem, loadi32,
- "cvtusi2ss", "l">, XS, EVEX_CD8<32, CD8VT1>;
+ "cvtusi2ss", "l">, TB, XS, EVEX_CD8<32, CD8VT1>;
defm VCVTUSI642SSZ : avx512_vcvtsi_common<0x7B, X86UintToFp, X86UintToFpRnd,
WriteCvtI2SS, GR64,
v4f32x_info, i64mem, loadi64, "cvtusi2ss", "q">,
- XS, REX_W, EVEX_CD8<64, CD8VT1>;
+ TB, XS, REX_W, EVEX_CD8<64, CD8VT1>;
defm VCVTUSI2SDZ : avx512_vcvtsi<0x7B, null_frag, WriteCvtI2SD, GR32, v2f64x_info,
i32mem, loadi32, "cvtusi2sd", "l", [], 0>,
- XD, VEX_LIG, EVEX_CD8<32, CD8VT1>;
+ TB, XD, VEX_LIG, EVEX_CD8<32, CD8VT1>;
defm VCVTUSI642SDZ : avx512_vcvtsi_common<0x7B, X86UintToFp, X86UintToFpRnd,
WriteCvtI2SD, GR64,
v2f64x_info, i64mem, loadi64, "cvtusi2sd", "q">,
- XD, REX_W, EVEX_CD8<64, CD8VT1>;
+ TB, XD, REX_W, EVEX_CD8<64, CD8VT1>;
def : InstAlias<"vcvtusi2ss\t{$src, $src1, $dst|$dst, $src1, $src}",
(VCVTUSI2SSZrm_Int VR128X:$dst, VR128X:$src1, i32mem:$src), 0, "att">;
@@ -7422,28 +7422,28 @@ multiclass avx512_cvt_s_int_round<bits<8> opc, X86VectorVTInfo SrcVT,
// Convert float/double to signed/unsigned int 32/64
defm VCVTSS2SIZ: avx512_cvt_s_int_round<0x2D, f32x_info, i32x_info,X86cvts2si,
X86cvts2siRnd, WriteCvtSS2I, "cvtss2si", "{l}">,
- XS, EVEX_CD8<32, CD8VT1>;
+ TB, XS, EVEX_CD8<32, CD8VT1>;
defm VCVTSS2SI64Z: avx512_cvt_s_int_round<0x2D, f32x_info, i64x_info, X86cvts2si,
X86cvts2siRnd, WriteCvtSS2I, "cvtss2si", "{q}">,
- XS, REX_W, EVEX_CD8<32, CD8VT1>;
+ TB, XS, REX_W, EVEX_CD8<32, CD8VT1>;
defm VCVTSS2USIZ: avx512_cvt_s_int_round<0x79, f32x_info, i32x_info, X86cvts2usi,
X86cvts2usiRnd, WriteCvtSS2I, "cvtss2usi", "{l}">,
- XS, EVEX_CD8<32, CD8VT1>;
+ TB, XS, EVEX_CD8<32, CD8VT1>;
defm VCVTSS2USI64Z: avx512_cvt_s_int_round<0x79, f32x_info, i64x_info, X86cvts2usi,
X86cvts2usiRnd, WriteCvtSS2I, "cvtss2usi", "{q}">,
- XS, REX_W, EVEX_CD8<32, CD8VT1>;
+ TB, XS, REX_W, EVEX_CD8<32, CD8VT1>;
defm VCVTSD2SIZ: avx512_cvt_s_int_round<0x2D, f64x_info, i32x_info, X86cvts2si,
X86cvts2siRnd, WriteCvtSD2I, "cvtsd2si", "{l}">,
- XD, EVEX_CD8<64, CD8VT1>;
+ TB, XD, EVEX_CD8<64, CD8VT1>;
defm VCVTSD2SI64Z: avx512_cvt_s_int_round<0x2D, f64x_info, i64x_info, X86cvts2si,
X86cvts2siRnd, WriteCvtSD2I, "cvtsd2si", "{q}">,
- XD, REX_W, EVEX_CD8<64, CD8VT1>;
+ TB, XD, REX_W, EVEX_CD8<64, CD8VT1>;
defm VCVTSD2USIZ: avx512_cvt_s_int_round<0x79, f64x_info, i32x_info, X86cvts2usi,
X86cvts2usiRnd, WriteCvtSD2I, "cvtsd2usi", "{l}">,
- XD, EVEX_CD8<64, CD8VT1>;
+ TB, XD, EVEX_CD8<64, CD8VT1>;
defm VCVTSD2USI64Z: avx512_cvt_s_int_round<0x79, f64x_info, i64x_info, X86cvts2usi,
X86cvts2usiRnd, WriteCvtSD2I, "cvtsd2usi", "{q}">,
- XD, REX_W, EVEX_CD8<64, CD8VT1>;
+ TB, XD, REX_W, EVEX_CD8<64, CD8VT1>;
multiclass avx512_cvt_s<bits<8> opc, string asm, X86VectorVTInfo SrcVT,
X86VectorVTInfo DstVT, SDNode OpNode,
@@ -7463,13 +7463,13 @@ multiclass avx512_cvt_s<bits<8> opc, string asm, X86VectorVTInfo SrcVT,
}
defm VCVTSS2SIZ: avx512_cvt_s<0x2D, "vcvtss2si", f32x_info, i32x_info,
- lrint, WriteCvtSS2I>, XS, EVEX_CD8<32, CD8VT1>;
+ lrint, WriteCvtSS2I>, TB, XS, EVEX_CD8<32, CD8VT1>;
defm VCVTSS2SI64Z: avx512_cvt_s<0x2D, "vcvtss2si", f32x_info, i64x_info,
- llrint, WriteCvtSS2I>, REX_W, XS, EVEX_CD8<32, CD8VT1>;
+ llrint, WriteCvtSS2I>, REX_W, TB, XS, EVEX_CD8<32, CD8VT1>;
defm VCVTSD2SIZ: avx512_cvt_s<0x2D, "vcvtsd2si", f64x_info, i32x_info,
- lrint, WriteCvtSD2I>, XD, EVEX_CD8<64, CD8VT1>;
+ lrint, WriteCvtSD2I>, TB, XD, EVEX_CD8<64, CD8VT1>;
defm VCVTSD2SI64Z: avx512_cvt_s<0x2D, "vcvtsd2si", f64x_info, i64x_info,
- llrint, WriteCvtSD2I>, REX_W, XD, EVEX_CD8<64, CD8VT1>;
+ llrint, WriteCvtSD2I>, REX_W, TB, XD, EVEX_CD8<64, CD8VT1>;
let Predicates = [HasAVX512] in {
def : Pat<(i64 (lrint FR32:$src)), (VCVTSS2SI64Zrr FR32:$src)>;
@@ -7609,29 +7609,29 @@ let Predicates = [prd], ExeDomain = _SrcRC.ExeDomain in {
defm VCVTTSS2SIZ: avx512_cvt_s_all<0x2C, "vcvttss2si", f32x_info, i32x_info,
any_fp_to_sint, X86cvtts2Int, X86cvtts2IntSAE, WriteCvtSS2I,
- "{l}">, XS, EVEX_CD8<32, CD8VT1>;
+ "{l}">, TB, XS, EVEX_CD8<32, CD8VT1>;
defm VCVTTSS2SI64Z: avx512_cvt_s_all<0x2C, "vcvttss2si", f32x_info, i64x_info,
any_fp_to_sint, X86cvtts2Int, X86cvtts2IntSAE, WriteCvtSS2I,
- "{q}">, REX_W, XS, EVEX_CD8<32, CD8VT1>;
+ "{q}">, REX_W, TB, XS, EVEX_CD8<32, CD8VT1>;
defm VCVTTSD2SIZ: avx512_cvt_s_all<0x2C, "vcvttsd2si", f64x_info, i32x_info,
any_fp_to_sint, X86cvtts2Int, X86cvtts2IntSAE, WriteCvtSD2I,
- "{l}">, XD, EVEX_CD8<64, CD8VT1>;
+ "{l}">, TB, XD, EVEX_CD8<64, CD8VT1>;
defm VCVTTSD2SI64Z: avx512_cvt_s_all<0x2C, "vcvttsd2si", f64x_info, i64x_info,
any_fp_to_sint, X86cvtts2Int, X86cvtts2IntSAE, WriteCvtSD2I,
- "{q}">, REX_W, XD, EVEX_CD8<64, CD8VT1>;
+ "{q}">, REX_W, TB, XD, EVEX_CD8<64, CD8VT1>;
defm VCVTTSS2USIZ: avx512_cvt_s_all<0x78, "vcvttss2usi", f32x_info, i32x_info,
any_fp_to_uint, X86cvtts2UInt, X86cvtts2UIntSAE, WriteCvtSS2I,
- "{l}">, XS, EVEX_CD8<32, CD8VT1>;
+ "{l}">, TB, XS, EVEX_CD8<32, CD8VT1>;
defm VCVTTSS2USI64Z: avx512_cvt_s_all<0x78, "vcvttss2usi", f32x_info, i64x_info,
any_fp_to_uint, X86cvtts2UInt, X86cvtts2UIntSAE, WriteCvtSS2I,
- "{q}">, XS,REX_W, EVEX_CD8<32, CD8VT1>;
+ "{q}">, TB, XS,REX_W, EVEX_CD8<32, CD8VT1>;
defm VCVTTSD2USIZ: avx512_cvt_s_all<0x78, "vcvttsd2usi", f64x_info, i32x_info,
any_fp_to_uint, X86cvtts2UInt, X86cvtts2UIntSAE, WriteCvtSD2I,
- "{l}">, XD, EVEX_CD8<64, CD8VT1>;
+ "{l}">, TB, XD, EVEX_CD8<64, CD8VT1>;
defm VCVTTSD2USI64Z: avx512_cvt_s_all<0x78, "vcvttsd2usi", f64x_info, i64x_info,
any_fp_to_uint, X86cvtts2UInt, X86cvtts2UIntSAE, WriteCvtSD2I,
- "{q}">, XD, REX_W, EVEX_CD8<64, CD8VT1>;
+ "{q}">, TB, XD, REX_W, EVEX_CD8<64, CD8VT1>;
//===----------------------------------------------------------------------===//
// AVX-512 Convert form float to double and back
@@ -7719,22 +7719,22 @@ multiclass avx512_cvt_fp_scalar_extend<bits<8> opc, string OpcodeStr,
}
defm VCVTSD2SS : avx512_cvt_fp_scalar_trunc<0x5A, "vcvtsd2ss", X86frounds,
X86froundsRnd, WriteCvtSD2SS, f64x_info,
- f32x_info>, XD, REX_W;
+ f32x_info>, TB, XD, REX_W;
defm VCVTSS2SD : avx512_cvt_fp_scalar_extend<0x5A, "vcvtss2sd", X86fpexts,
X86fpextsSAE, WriteCvtSS2SD, f32x_info,
- f64x_info>, XS;
+ f64x_info>, TB, XS;
defm VCVTSD2SH : avx512_cvt_fp_scalar_trunc<0x5A, "vcvtsd2sh", X86frounds,
X86froundsRnd, WriteCvtSD2SS, f64x_info,
- f16x_info, HasFP16>, T_MAP5XD, REX_W;
+ f16x_info, HasFP16>, T_MAP5, XD, REX_W;
defm VCVTSH2SD : avx512_cvt_fp_scalar_extend<0x5A, "vcvtsh2sd", X86fpexts,
X86fpextsSAE, WriteCvtSS2SD, f16x_info,
- f64x_info, HasFP16>, T_MAP5XS;
+ f64x_info, HasFP16>, T_MAP5, XS;
defm VCVTSS2SH : avx512_cvt_fp_scalar_trunc<0x1D, "vcvtss2sh", X86frounds,
X86froundsRnd, WriteCvtSD2SS, f32x_info,
- f16x_info, HasFP16>, T_MAP5PS;
+ f16x_info, HasFP16>, T_MAP5, PS;
defm VCVTSH2SS : avx512_cvt_fp_scalar_extend<0x13, "vcvtsh2ss", X86fpexts,
X86fpextsSAE, WriteCvtSS2SD, f16x_info,
- f32x_info, HasFP16>, T_MAP6PS;
+ f32x_info, HasFP16>, T_MAP6, PS;
def : Pat<(f64 (any_fpextend FR32X:$src)),
(VCVTSS2SDZrr (f64 (IMPLICIT_DEF)), FR32X:$src)>,
@@ -7996,10 +7996,10 @@ multiclass avx512_cvt_trunc<bits<8> opc, string OpcodeStr,
defm VCVTPD2PS : avx512_cvt_trunc<0x5A, "vcvtpd2ps",
avx512vl_f32_info, avx512vl_f64_info, SchedWriteCvtPD2PS>,
- REX_W, PD, EVEX_CD8<64, CD8VF>;
+ REX_W, TB, PD, EVEX_CD8<64, CD8VF>;
defm VCVTPS2PD : avx512_cvt_extend<0x5A, "vcvtps2pd",
avx512vl_f64_info, avx512vl_f32_info, SchedWriteCvtPS2PD>,
- PS, EVEX_CD8<32, CD8VH>;
+ TB, PS, EVEX_CD8<32, CD8VH>;
// Extend Half to Double
multiclass avx512_cvtph2pd<bits<8> opc, string OpcodeStr,
@@ -8108,14 +8108,14 @@ multiclass avx512_cvtpd2ph<bits<8> opc, string OpcodeStr, X86SchedWriteWidths sc
defm VCVTPS2PHX : avx512_cvt_trunc<0x1D, "vcvtps2phx", avx512vl_f16_info,
avx512vl_f32_info, SchedWriteCvtPD2PS,
- HasFP16>, T_MAP5PD, EVEX_CD8<32, CD8VF>;
+ HasFP16>, T_MAP5, PD, EVEX_CD8<32, CD8VF>;
defm VCVTPH2PSX : avx512_cvt_extend<0x13, "vcvtph2psx", avx512vl_f32_info,
avx512vl_f16_info, SchedWriteCvtPS2PD,
- HasFP16>, T_MAP6PD, EVEX_CD8<16, CD8VH>;
+ HasFP16>, T_MAP6, PD, EVEX_CD8<16, CD8VH>;
defm VCVTPD2PH : avx512_cvtpd2ph<0x5A, "vcvtpd2ph", SchedWriteCvtPD2PS>,
- REX_W, T_MAP5PD, EVEX_CD8<64, CD8VF>;
+ REX_W, T_MAP5, PD, EVEX_CD8<64, CD8VF>;
defm VCVTPH2PD : avx512_cvtph2pd<0x5A, "vcvtph2pd", SchedWriteCvtPS2PD>,
- T_MAP5PS, EVEX_CD8<16, CD8VQ>;
+ T_MAP5, PS, EVEX_CD8<16, CD8VQ>;
let Predicates = [HasFP16, HasVLX] in {
// Special patterns to allow use of X86vmfpround for masking. Instruction
@@ -8596,120 +8596,120 @@ multiclass avx512_cvtqq2ps_dq2ph<bits<8> opc, string OpcodeStr, SDPatternOperato
defm VCVTDQ2PD : avx512_cvtdq2pd<0xE6, "vcvtdq2pd", any_sint_to_fp, sint_to_fp,
X86any_VSintToFP, X86VSintToFP,
- SchedWriteCvtDQ2PD>, XS, EVEX_CD8<32, CD8VH>;
+ SchedWriteCvtDQ2PD>, TB, XS, EVEX_CD8<32, CD8VH>;
defm VCVTDQ2PS : avx512_cvtdq2ps<0x5B, "vcvtdq2ps", any_sint_to_fp, sint_to_fp,
X86VSintToFpRnd, SchedWriteCvtDQ2PS>,
- PS, EVEX_CD8<32, CD8VF>;
+ TB, PS, EVEX_CD8<32, CD8VF>;
defm VCVTTPS2DQ : avx512_cvttps2dq<0x5B, "vcvttps2dq", X86any_cvttp2si,
X86cvttp2si, X86cvttp2siSAE,
- SchedWriteCvtPS2DQ>, XS, EVEX_CD8<32, CD8VF>;
+ SchedWriteCvtPS2DQ>, TB, XS, EVEX_CD8<32, CD8VF>;
defm VCVTTPD2DQ : avx512_cvttpd2dq<0xE6, "vcvttpd2dq", X86any_cvttp2si,
X86cvttp2si, X86cvttp2siSAE,
SchedWriteCvtPD2DQ>,
- PD, REX_W, EVEX_CD8<64, CD8VF>;
+ TB, PD, REX_W, EVEX_CD8<64, CD8VF>;
defm VCVTTPS2UDQ : avx512_cvttps2dq<0x78, "vcvttps2udq", X86any_cvttp2ui,
X86cvttp2ui, X86cvttp2uiSAE,
- SchedWriteCvtPS2DQ>, PS, EVEX_CD8<32, CD8VF>;
+ SchedWriteCvtPS2DQ>, TB, PS, EVEX_CD8<32, CD8VF>;
defm VCVTTPD2UDQ : avx512_cvttpd2dq<0x78, "vcvttpd2udq", X86any_cvttp2ui,
X86cvttp2ui, X86cvttp2uiSAE,
SchedWriteCvtPD2DQ>,
- PS, REX_W, EVEX_CD8<64, CD8VF>;
+ TB, PS, REX_W, EVEX_CD8<64, CD8VF>;
defm VCVTUDQ2PD : avx512_cvtdq2pd<0x7A, "vcvtudq2pd", any_uint_to_fp,
uint_to_fp, X86any_VUintToFP, X86VUintToFP,
- SchedWriteCvtDQ2PD>, XS, EVEX_CD8<32, CD8VH>;
+ SchedWriteCvtDQ2PD>, TB, XS, EVEX_CD8<32, CD8VH>;
defm VCVTUDQ2PS : avx512_cvtdq2ps<0x7A, "vcvtudq2ps", any_uint_to_fp,
uint_to_fp, X86VUintToFpRnd,
- SchedWriteCvtDQ2PS>, XD, EVEX_CD8<32, CD8VF>;
+ SchedWriteCvtDQ2PS>, TB, XD, EVEX_CD8<32, CD8VF>;
defm VCVTPS2DQ : avx512_cvtps2dq<0x5B, "vcvtps2dq", X86cvtp2Int, X86cvtp2Int,
- X86cvtp2IntRnd, SchedWriteCvtPS2DQ>, PD,
+ X86cvtp2IntRnd, SchedWriteCvtPS2DQ>, TB, PD,
EVEX_CD8<32, CD8VF>;
defm VCVTPD2DQ : avx512_cvtpd2dq<0xE6, "vcvtpd2dq", X86cvtp2Int, X86cvtp2Int,
- X86cvtp2IntRnd, SchedWriteCvtPD2DQ>, XD,
+ X86cvtp2IntRnd, SchedWriteCvtPD2DQ>, TB, XD,
REX_W, EVEX_CD8<64, CD8VF>;
defm VCVTPS2UDQ : avx512_cvtps2dq<0x79, "vcvtps2udq", X86cvtp2UInt, X86cvtp2UInt,
X86cvtp2UIntRnd, SchedWriteCvtPS2DQ>,
- PS, EVEX_CD8<32, CD8VF>;
+ TB, PS, EVEX_CD8<32, CD8VF>;
defm VCVTPD2UDQ : avx512_cvtpd2dq<0x79, "vcvtpd2udq", X86cvtp2UInt, X86cvtp2UInt,
X86cvtp2UIntRnd, SchedWriteCvtPD2DQ>, REX_W,
- PS, EVEX_CD8<64, CD8VF>;
+ TB, PS, EVEX_CD8<64, CD8VF>;
defm VCVTPD2QQ : avx512_cvtpd2qq<0x7B, "vcvtpd2qq", X86cvtp2Int, X86cvtp2Int,
X86cvtp2IntRnd, SchedWriteCvtPD2DQ>, REX_W,
- PD, EVEX_CD8<64, CD8VF>;
+ TB, PD, EVEX_CD8<64, CD8VF>;
defm VCVTPS2QQ : avx512_cvtps2qq<0x7B, "vcvtps2qq", X86cvtp2Int, X86cvtp2Int,
- X86cvtp2IntRnd, SchedWriteCvtPS2DQ>, PD,
+ X86cvtp2IntRnd, SchedWriteCvtPS2DQ>, TB, PD,
EVEX_CD8<32, CD8VH>;
defm VCVTPD2UQQ : avx512_cvtpd2qq<0x79, "vcvtpd2uqq", X86cvtp2UInt, X86cvtp2UInt,
X86cvtp2UIntRnd, SchedWriteCvtPD2DQ>, REX_W,
- PD, EVEX_CD8<64, CD8VF>;
+ TB, PD, EVEX_CD8<64, CD8VF>;
defm VCVTPS2UQQ : avx512_cvtps2qq<0x79, "vcvtps2uqq", X86cvtp2UInt, X86cvtp2UInt,
- X86cvtp2UIntRnd, SchedWriteCvtPS2DQ>, PD,
+ X86cvtp2UIntRnd, SchedWriteCvtPS2DQ>, TB, PD,
EVEX_CD8<32, CD8VH>;
defm VCVTTPD2QQ : avx512_cvttpd2qq<0x7A, "vcvttpd2qq", X86any_cvttp2si,
X86cvttp2si, X86cvttp2siSAE,
SchedWriteCvtPD2DQ>, REX_W,
- PD, EVEX_CD8<64, CD8VF>;
+ TB, PD, EVEX_CD8<64, CD8VF>;
defm VCVTTPS2QQ : avx512_cvttps2qq<0x7A, "vcvttps2qq", X86any_cvttp2si,
X86cvttp2si, X86cvttp2siSAE,
- SchedWriteCvtPS2DQ>, PD,
+ SchedWriteCvtPS2DQ>, TB, PD,
EVEX_CD8<32, CD8VH>;
defm VCVTTPD2UQQ : avx512_cvttpd2qq<0x78, "vcvttpd2uqq", X86any_cvttp2ui,
X86cvttp2ui, X86cvttp2uiSAE,
SchedWriteCvtPD2DQ>, REX_W,
- PD, EVEX_CD8<64, CD8VF>;
+ TB, PD, EVEX_CD8<64, CD8VF>;
defm VCVTTPS2UQQ : avx512_cvttps2qq<0x78, "vcvttps2uqq", X86any_cvttp2ui,
X86cvttp2ui, X86cvttp2uiSAE,
- SchedWriteCvtPS2DQ>, PD,
+ SchedWriteCvtPS2DQ>, TB, PD,
EVEX_CD8<32, CD8VH>;
defm VCVTQQ2PD : avx512_cvtqq2pd<0xE6, "vcvtqq2pd", any_sint_to_fp,
sint_to_fp, X86VSintToFpRnd,
- SchedWriteCvtDQ2PD>, REX_W, XS, EVEX_CD8<64, CD8VF>;
+ SchedWriteCvtDQ2PD>, REX_W, TB, XS, EVEX_CD8<64, CD8VF>;
defm VCVTUQQ2PD : avx512_cvtqq2pd<0x7A, "vcvtuqq2pd", any_uint_to_fp,
uint_to_fp, X86VUintToFpRnd, SchedWriteCvtDQ2PD>,
- REX_W, XS, EVEX_CD8<64, CD8VF>;
+ REX_W, TB, XS, EVEX_CD8<64, CD8VF>;
defm VCVTDQ2PH : avx512_cvtqq2ps_dq2ph<0x5B, "vcvtdq2ph", any_sint_to_fp, sint_to_fp,
X86any_VSintToFP, X86VMSintToFP,
X86VSintToFpRnd, avx512vl_f16_info, avx512vl_i32_info,
SchedWriteCvtDQ2PS, HasFP16>,
- T_MAP5PS, EVEX_CD8<32, CD8VF>;
+ T_MAP5, PS, EVEX_CD8<32, CD8VF>;
defm VCVTUDQ2PH : avx512_cvtqq2ps_dq2ph<0x7A, "vcvtudq2ph", any_uint_to_fp, uint_to_fp,
X86any_VUintToFP, X86VMUintToFP,
X86VUintToFpRnd, avx512vl_f16_info, avx512vl_i32_info,
- SchedWriteCvtDQ2PS, HasFP16>, T_MAP5XD,
+ SchedWriteCvtDQ2PS, HasFP16>, T_MAP5, XD,
EVEX_CD8<32, CD8VF>;
defm VCVTQQ2PS : avx512_cvtqq2ps_dq2ph<0x5B, "vcvtqq2ps", any_sint_to_fp, sint_to_fp,
X86any_VSintToFP, X86VMSintToFP,
X86VSintToFpRnd, avx512vl_f32_info, avx512vl_i64_info,
- SchedWriteCvtDQ2PS>, REX_W, PS,
+ SchedWriteCvtDQ2PS>, REX_W, TB, PS,
EVEX_CD8<64, CD8VF>;
defm VCVTUQQ2PS : avx512_cvtqq2ps_dq2ph<0x7A, "vcvtuqq2ps", any_uint_to_fp, uint_to_fp,
X86any_VUintToFP, X86VMUintToFP,
X86VUintToFpRnd, avx512vl_f32_info, avx512vl_i64_info,
- SchedWriteCvtDQ2PS>, REX_W, XD,
+ SchedWriteCvtDQ2PS>, REX_W, TB, XD,
EVEX_CD8<64, CD8VF>;
let Predicates = [HasVLX] in {
@@ -8912,12 +8912,12 @@ multiclass avx512_cvtph2ps<X86VectorVTInfo _dest, X86VectorVTInfo _src,
(ins _src.RC:$src), "vcvtph2ps", "$src", "$src",
(X86any_cvtph2ps (_src.VT _src.RC:$src)),
(X86cvtph2ps (_src.VT _src.RC:$src))>,
- T8PD, Sched<[sched]>;
+ T8, PD, Sched<[sched]>;
defm rm : AVX512_maskable_split<0x13, MRMSrcMem, _dest, (outs _dest.RC:$dst),
(ins x86memop:$src), "vcvtph2ps", "$src", "$src",
(X86any_cvtph2ps (_src.VT ld_dag)),
(X86cvtph2ps (_src.VT ld_dag))>,
- T8PD, Sched<[sched.Folded]>;
+ T8, PD, Sched<[sched.Folded]>;
}
multiclass avx512_cvtph2ps_sae<X86VectorVTInfo _dest, X86VectorVTInfo _src,
@@ -8927,7 +8927,7 @@ multiclass avx512_cvtph2ps_sae<X86VectorVTInfo _dest, X86VectorVTInfo _src,
(ins _src.RC:$src), "vcvtph2ps",
"{sae}, $src", "$src, {sae}",
(X86cvtph2psSAE (_src.VT _src.RC:$src))>,
- T8PD, EVEX_B, Sched<[sched]>;
+ T8, PD, EVEX_B, Sched<[sched]>;
}
let Predicates = [HasAVX512] in
@@ -9068,55 +9068,55 @@ let Defs = [EFLAGS], Predicates = [HasAVX512] in {
let Defs = [EFLAGS], Predicates = [HasAVX512] in {
defm VUCOMISSZ : sse12_ord_cmp<0x2E, FR32X, X86any_fcmp, f32, f32mem, loadf32,
- "ucomiss", SSEPackedSingle>, PS, EVEX, VEX_LIG,
+ "ucomiss", SSEPackedSingle>, TB, PS, EVEX, VEX_LIG,
EVEX_CD8<32, CD8VT1>;
defm VUCOMISDZ : sse12_ord_cmp<0x2E, FR64X, X86any_fcmp, f64, f64mem, loadf64,
- "ucomisd", SSEPackedDouble>, PD, EVEX,
+ "ucomisd", SSEPackedDouble>, TB, PD, EVEX,
VEX_LIG, REX_W, EVEX_CD8<64, CD8VT1>;
defm VCOMISSZ : sse12_ord_cmp<0x2F, FR32X, X86strict_fcmps, f32, f32mem, loadf32,
- "comiss", SSEPackedSingle>, PS, EVEX, VEX_LIG,
+ "comiss", SSEPackedSingle>, TB, PS, EVEX, VEX_LIG,
EVEX_CD8<32, CD8VT1>;
defm VCOMISDZ : sse12_ord_cmp<0x2F, FR64X, X86strict_fcmps, f64, f64mem, loadf64,
- "comisd", SSEPackedDouble>, PD, EVEX,
+ "comisd", SSEPackedDouble>, TB, PD, EVEX,
VEX_LIG, REX_W, EVEX_CD8<64, CD8VT1>;
let isCodeGenOnly = 1 in {
defm VUCOMISSZ : sse12_ord_cmp_int<0x2E, VR128X, X86ucomi, v4f32, ssmem,
- sse_load_f32, "ucomiss", SSEPackedSingle>, PS, EVEX, VEX_LIG,
+ sse_load_f32, "ucomiss", SSEPackedSingle>, TB, PS, EVEX, VEX_LIG,
EVEX_CD8<32, CD8VT1>;
defm VUCOMISDZ : sse12_ord_cmp_int<0x2E, VR128X, X86ucomi, v2f64, sdmem,
- sse_load_f64, "ucomisd", SSEPackedDouble>, PD, EVEX,
+ sse_load_f64, "ucomisd", SSEPackedDouble>, TB, PD, EVEX,
VEX_LIG, REX_W, EVEX_CD8<64, CD8VT1>;
defm VCOMISSZ : sse12_ord_cmp_int<0x2F, VR128X, X86comi, v4f32, ssmem,
- sse_load_f32, "comiss", SSEPackedSingle>, PS, EVEX, VEX_LIG,
+ sse_load_f32, "comiss", SSEPackedSingle>, TB, PS, EVEX, VEX_LIG,
EVEX_CD8<32, CD8VT1>;
defm VCOMISDZ : sse12_ord_cmp_int<0x2F, VR128X, X86comi, v2f64, sdmem,
- sse_load_f64, "comisd", SSEPackedDouble>, PD, EVEX,
+ sse_load_f64, "comisd", SSEPackedDouble>, TB, PD, EVEX,
VEX_LIG, REX_W, EVEX_CD8<64, CD8VT1>;
}
}
let Defs = [EFLAGS], Predicates = [HasFP16] in {
defm VUCOMISHZ : avx512_ord_cmp_sae<0x2E, v8f16x_info, "vucomish",
- SSEPackedSingle>, AVX512PSIi8Base, T_MAP5PS,
+ SSEPackedSingle>, AVX512PSIi8Base, T_MAP5,
EVEX_CD8<16, CD8VT1>;
defm VCOMISHZ : avx512_ord_cmp_sae<0x2F, v8f16x_info, "vcomish",
- SSEPackedSingle>, AVX512PSIi8Base, T_MAP5PS,
+ SSEPackedSingle>, AVX512PSIi8Base, T_MAP5,
EVEX_CD8<16, CD8VT1>;
defm VUCOMISHZ : sse12_ord_cmp<0x2E, FR16X, X86any_fcmp, f16, f16mem, loadf16,
- "ucomish", SSEPackedSingle>, T_MAP5PS, EVEX,
+ "ucomish", SSEPackedSingle>, T_MAP5, PS, EVEX,
VEX_LIG, EVEX_CD8<16, CD8VT1>;
defm VCOMISHZ : sse12_ord_cmp<0x2F, FR16X, X86strict_fcmps, f16, f16mem, loadf16,
- "comish", SSEPackedSingle>, T_MAP5PS, EVEX,
+ "comish", SSEPackedSingle>, T_MAP5, PS, EVEX,
VEX_LIG, EVEX_CD8<16, CD8VT1>;
let isCodeGenOnly = 1 in {
defm VUCOMISHZ : sse12_ord_cmp_int<0x2E, VR128X, X86ucomi, v8f16, shmem,
sse_load_f16, "ucomish", SSEPackedSingle>,
- T_MAP5PS, EVEX, VEX_LIG, EVEX_CD8<16, CD8VT1>;
+ T_MAP5, PS, EVEX, VEX_LIG, EVEX_CD8<16, CD8VT1>;
defm VCOMISHZ : sse12_ord_cmp_int<0x2F, VR128X, X86comi, v8f16, shmem,
sse_load_f16, "comish", SSEPackedSingle>,
- T_MAP5PS, EVEX, VEX_LIG, EVEX_CD8<16, CD8VT1>;
+ T_MAP5, PS, EVEX, VEX_LIG, EVEX_CD8<16, CD8VT1>;
}
}
@@ -9141,23 +9141,23 @@ multiclass avx512_fp14_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
defm VRCPSHZ : avx512_fp14_s<0x4D, "vrcpsh", X86rcp14s, SchedWriteFRcp.Scl,
f16x_info, HasFP16>, EVEX_CD8<16, CD8VT1>,
- T_MAP6PD;
+ T_MAP6, PD;
defm VRSQRTSHZ : avx512_fp14_s<0x4F, "vrsqrtsh", X86rsqrt14s,
SchedWriteFRsqrt.Scl, f16x_info, HasFP16>,
- EVEX_CD8<16, CD8VT1>, T_MAP6PD;
+ EVEX_CD8<16, CD8VT1>, T_MAP6, PD;
let Uses = [MXCSR] in {
defm VRCP14SSZ : avx512_fp14_s<0x4D, "vrcp14ss", X86rcp14s, SchedWriteFRcp.Scl,
f32x_info>, EVEX_CD8<32, CD8VT1>,
- T8PD;
+ T8, PD;
defm VRCP14SDZ : avx512_fp14_s<0x4D, "vrcp14sd", X86rcp14s, SchedWriteFRcp.Scl,
f64x_info>, REX_W, EVEX_CD8<64, CD8VT1>,
- T8PD;
+ T8, PD;
defm VRSQRT14SSZ : avx512_fp14_s<0x4F, "vrsqrt14ss", X86rsqrt14s,
SchedWriteFRsqrt.Scl, f32x_info>,
- EVEX_CD8<32, CD8VT1>, T8PD;
+ EVEX_CD8<32, CD8VT1>, T8, PD;
defm VRSQRT14SDZ : avx512_fp14_s<0x4F, "vrsqrt14sd", X86rsqrt14s,
SchedWriteFRsqrt.Scl, f64x_info>, REX_W,
- EVEX_CD8<64, CD8VT1>, T8PD;
+ EVEX_CD8<64, CD8VT1>, T8, PD;
}
/// avx512_fp14_p rcp14ps, rcp14pd, rsqrt14ps, rsqrt14pd
@@ -9166,19 +9166,19 @@ multiclass avx512_fp14_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
let ExeDomain = _.ExeDomain in {
defm r: AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
(ins _.RC:$src), OpcodeStr, "$src", "$src",
- (_.VT (OpNode _.RC:$src))>, EVEX, T8PD,
+ (_.VT (OpNode _.RC:$src))>, EVEX, T8, PD,
Sched<[sched]>;
defm m: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
(ins _.MemOp:$src), OpcodeStr, "$src", "$src",
(OpNode (_.VT
- (bitconvert (_.LdFrag addr:$src))))>, EVEX, T8PD,
+ (bitconvert (_.LdFrag addr:$src))))>, EVEX, T8, PD,
Sched<[sched.Folded, sched.ReadAfterFold]>;
defm mb: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
(ins _.ScalarMemOp:$src), OpcodeStr,
"${src}"#_.BroadcastStr, "${src}"#_.BroadcastStr,
(OpNode (_.VT
(_.BroadcastLdFrag addr:$src)))>,
- EVEX, T8PD, EVEX_B, Sched<[sched.Folded, sched.ReadAfterFold]>;
+ EVEX, T8, PD, EVEX_B, Sched<[sched.Folded, sched.ReadAfterFold]>;
}
}
@@ -9192,7 +9192,7 @@ multiclass avx512_fp14_p_vl_all<bits<8> opc, string OpcodeStr, SDNode OpNode,
}
let Predicates = [HasFP16] in
defm PHZ : avx512_fp14_p<opc, !strconcat(OpcodeStr, "ph"), OpNode, sched.ZMM,
- v32f16_info>, EVEX_V512, T_MAP6PD, EVEX_CD8<16, CD8VF>;
+ v32f16_info>, EVEX_V512, T_MAP6, EVEX_CD8<16, CD8VF>;
// Define only if AVX512VL feature is present.
let Predicates = [HasVLX], Uses = [MXCSR] in {
@@ -9212,10 +9212,10 @@ multiclass avx512_fp14_p_vl_all<bits<8> opc, string OpcodeStr, SDNode OpNode,
let Predicates = [HasFP16, HasVLX] in {
defm PHZ128 : avx512_fp14_p<opc, !strconcat(OpcodeStr, "ph"),
OpNode, sched.XMM, v8f16x_info>,
- EVEX_V128, T_MAP6PD, EVEX_CD8<16, CD8VF>;
+ EVEX_V128, T_MAP6, EVEX_CD8<16, CD8VF>;
defm PHZ256 : avx512_fp14_p<opc, !strconcat(OpcodeStr, "ph"),
OpNode, sched.YMM, v16f16x_info>,
- EVEX_V256, T_MAP6PD, EVEX_CD8<16, CD8VF>;
+ EVEX_V256, T_MAP6, EVEX_CD8<16, CD8VF>;
}
}
@@ -9250,16 +9250,16 @@ multiclass avx512_fp28_s<bits<8> opc, string OpcodeStr,X86VectorVTInfo _,
multiclass avx512_eri_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
SDNode OpNodeSAE, X86FoldableSchedWrite sched> {
defm SSZ : avx512_fp28_s<opc, OpcodeStr#"ss", f32x_info, OpNode, OpNodeSAE,
- sched>, EVEX_CD8<32, CD8VT1>, VEX_LIG, T8PD, EVEX, VVVV;
+ sched>, EVEX_CD8<32, CD8VT1>, VEX_LIG, T8, PD, EVEX, VVVV;
defm SDZ : avx512_fp28_s<opc, OpcodeStr#"sd", f64x_info, OpNode, OpNodeSAE,
- sched>, EVEX_CD8<64, CD8VT1>, VEX_LIG, REX_W, T8PD, EVEX, VVVV;
+ sched>, EVEX_CD8<64, CD8VT1>, VEX_LIG, REX_W, T8, PD, EVEX, VVVV;
}
multiclass avx512_vgetexpsh<bits<8> opc, string OpcodeStr, SDNode OpNode,
SDNode OpNodeSAE, X86FoldableSchedWrite sched> {
let Predicates = [HasFP16] in
defm SHZ : avx512_fp28_s<opc, OpcodeStr#"sh", f16x_info, OpNode, OpNodeSAE, sched>,
- EVEX_CD8<16, CD8VT1>, T_MAP6PD, EVEX, VVVV;
+ EVEX_CD8<16, CD8VT1>, T_MAP6, PD, EVEX, VVVV;
}
let Predicates = [HasERI] in {
@@ -9311,10 +9311,10 @@ multiclass avx512_eri<bits<8> opc, string OpcodeStr, SDNode OpNode,
SDNode OpNodeSAE, X86SchedWriteWidths sched> {
defm PSZ : avx512_fp28_p<opc, OpcodeStr#"ps", v16f32_info, OpNode, sched.ZMM>,
avx512_fp28_p_sae<opc, OpcodeStr#"ps", v16f32_info, OpNodeSAE, sched.ZMM>,
- T8PD, EVEX_V512, EVEX_CD8<32, CD8VF>;
+ T8, PD, EVEX_V512, EVEX_CD8<32, CD8VF>;
defm PDZ : avx512_fp28_p<opc, OpcodeStr#"pd", v8f64_info, OpNode, sched.ZMM>,
avx512_fp28_p_sae<opc, OpcodeStr#"pd", v8f64_info, OpNodeSAE, sched.ZMM>,
- T8PD, EVEX_V512, REX_W, EVEX_CD8<64, CD8VF>;
+ T8, PD, EVEX_V512, REX_W, EVEX_CD8<64, CD8VF>;
}
multiclass avx512_fp_unaryop_packed<bits<8> opc, string OpcodeStr,
@@ -9323,16 +9323,16 @@ multiclass avx512_fp_unaryop_packed<bits<8> opc, string OpcodeStr,
let Predicates = [HasVLX] in {
defm PSZ128 : avx512_fp28_p<opc, OpcodeStr#"ps", v4f32x_info, OpNode,
sched.XMM>,
- EVEX_V128, T8PD, EVEX_CD8<32, CD8VF>;
+ EVEX_V128, T8, PD, EVEX_CD8<32, CD8VF>;
defm PSZ256 : avx512_fp28_p<opc, OpcodeStr#"ps", v8f32x_info, OpNode,
sched.YMM>,
- EVEX_V256, T8PD, EVEX_CD8<32, CD8VF>;
+ EVEX_V256, T8, PD, EVEX_CD8<32, CD8VF>;
defm PDZ128 : avx512_fp28_p<opc, OpcodeStr#"pd", v2f64x_info, OpNode,
sched.XMM>,
- EVEX_V128, REX_W, T8PD, EVEX_CD8<64, CD8VF>;
+ EVEX_V128, REX_W, T8, PD, EVEX_CD8<64, CD8VF>;
defm PDZ256 : avx512_fp28_p<opc, OpcodeStr#"pd", v4f64x_info, OpNode,
sched.YMM>,
- EVEX_V256, REX_W, T8PD, EVEX_CD8<64, CD8VF>;
+ EVEX_V256, REX_W, T8, PD, EVEX_CD8<64, CD8VF>;
}
}
@@ -9341,12 +9341,12 @@ multiclass avx512_vgetexp_fp16<bits<8> opc, string OpcodeStr, SDNode OpNode,
let Predicates = [HasFP16] in
defm PHZ : avx512_fp28_p<opc, OpcodeStr#"ph", v32f16_info, OpNode, sched.ZMM>,
avx512_fp28_p_sae<opc, OpcodeStr#"ph", v32f16_info, OpNodeSAE, sched.ZMM>,
- T_MAP6PD, EVEX_V512, EVEX_CD8<16, CD8VF>;
+ T_MAP6, PD, EVEX_V512, EVEX_CD8<16, CD8VF>;
let Predicates = [HasFP16, HasVLX] in {
defm PHZ128 : avx512_fp28_p<opc, OpcodeStr#"ph", v8f16x_info, OpNode, sched.XMM>,
- EVEX_V128, T_MAP6PD, EVEX_CD8<16, CD8VF>;
+ EVEX_V128, T_MAP6, PD, EVEX_CD8<16, CD8VF>;
defm PHZ256 : avx512_fp28_p<opc, OpcodeStr#"ph", v16f16x_info, OpNode, sched.YMM>,
- EVEX_V256, T_MAP6PD, EVEX_CD8<16, CD8VF>;
+ EVEX_V256, T_MAP6, PD, EVEX_CD8<16, CD8VF>;
}
}
let Predicates = [HasERI] in {
@@ -9401,35 +9401,35 @@ multiclass avx512_sqrt_packed_all<bits<8> opc, string OpcodeStr,
let Predicates = [HasFP16] in
defm PHZ : avx512_sqrt_packed<opc, !strconcat(OpcodeStr, "ph"),
sched.PH.ZMM, v32f16_info>,
- EVEX_V512, T_MAP5PS, EVEX_CD8<16, CD8VF>;
+ EVEX_V512, T_MAP5, PS, EVEX_CD8<16, CD8VF>;
let Predicates = [HasFP16, HasVLX] in {
defm PHZ128 : avx512_sqrt_packed<opc, !strconcat(OpcodeStr, "ph"),
sched.PH.XMM, v8f16x_info>,
- EVEX_V128, T_MAP5PS, EVEX_CD8<16, CD8VF>;
+ EVEX_V128, T_MAP5, PS, EVEX_CD8<16, CD8VF>;
defm PHZ256 : avx512_sqrt_packed<opc, !strconcat(OpcodeStr, "ph"),
sched.PH.YMM, v16f16x_info>,
- EVEX_V256, T_MAP5PS, EVEX_CD8<16, CD8VF>;
+ EVEX_V256, T_MAP5, PS, EVEX_CD8<16, CD8VF>;
}
defm PSZ : avx512_sqrt_packed<opc, !strconcat(OpcodeStr, "ps"),
sched.PS.ZMM, v16f32_info>,
- EVEX_V512, PS, EVEX_CD8<32, CD8VF>;
+ EVEX_V512, TB, PS, EVEX_CD8<32, CD8VF>;
defm PDZ : avx512_sqrt_packed<opc, !strconcat(OpcodeStr, "pd"),
sched.PD.ZMM, v8f64_info>,
- EVEX_V512, REX_W, PD, EVEX_CD8<64, CD8VF>;
+ EVEX_V512, REX_W, TB, PD, EVEX_CD8<64, CD8VF>;
// Define only if AVX512VL feature is present.
let Predicates = [HasVLX] in {
defm PSZ128 : avx512_sqrt_packed<opc, !strconcat(OpcodeStr, "ps"),
sched.PS.XMM, v4f32x_info>,
- EVEX_V128, PS, EVEX_CD8<32, CD8VF>;
+ EVEX_V128, TB, PS, EVEX_CD8<32, CD8VF>;
defm PSZ256 : avx512_sqrt_packed<opc, !strconcat(OpcodeStr, "ps"),
sched.PS.YMM, v8f32x_info>,
- EVEX_V256, PS, EVEX_CD8<32, CD8VF>;
+ EVEX_V256, TB, PS, EVEX_CD8<32, CD8VF>;
defm PDZ128 : avx512_sqrt_packed<opc, !strconcat(OpcodeStr, "pd"),
sched.PD.XMM, v2f64x_info>,
- EVEX_V128, REX_W, PD, EVEX_CD8<64, CD8VF>;
+ EVEX_V128, REX_W, TB, PD, EVEX_CD8<64, CD8VF>;
defm PDZ256 : avx512_sqrt_packed<opc, !strconcat(OpcodeStr, "pd"),
sched.PD.YMM, v4f64x_info>,
- EVEX_V256, REX_W, PD, EVEX_CD8<64, CD8VF>;
+ EVEX_V256, REX_W, TB, PD, EVEX_CD8<64, CD8VF>;
}
}
@@ -9439,13 +9439,13 @@ multiclass avx512_sqrt_packed_all_round<bits<8> opc, string OpcodeStr,
let Predicates = [HasFP16] in
defm PHZ : avx512_sqrt_packed_round<opc, !strconcat(OpcodeStr, "ph"),
sched.PH.ZMM, v32f16_info>,
- EVEX_V512, T_MAP5PS, EVEX_CD8<16, CD8VF>;
+ EVEX_V512, T_MAP5, PS, EVEX_CD8<16, CD8VF>;
defm PSZ : avx512_sqrt_packed_round<opc, !strconcat(OpcodeStr, "ps"),
sched.PS.ZMM, v16f32_info>,
- EVEX_V512, PS, EVEX_CD8<32, CD8VF>;
+ EVEX_V512, TB, PS, EVEX_CD8<32, CD8VF>;
defm PDZ : avx512_sqrt_packed_round<opc, !strconcat(OpcodeStr, "pd"),
sched.PD.ZMM, v8f64_info>,
- EVEX_V512, REX_W, PD, EVEX_CD8<64, CD8VF>;
+ EVEX_V512, REX_W, TB, PD, EVEX_CD8<64, CD8VF>;
}
multiclass avx512_sqrt_scalar<bits<8> opc, string OpcodeStr, X86FoldableSchedWrite sched,
@@ -9501,11 +9501,11 @@ multiclass avx512_sqrt_scalar<bits<8> opc, string OpcodeStr, X86FoldableSchedWri
multiclass avx512_sqrt_scalar_all<bits<8> opc, string OpcodeStr,
X86SchedWriteSizes sched> {
defm SHZ : avx512_sqrt_scalar<opc, OpcodeStr#"sh", sched.PH.Scl, f16x_info, NAME#"SH", HasFP16>,
- EVEX_CD8<16, CD8VT1>, EVEX, VVVV, T_MAP5XS;
+ EVEX_CD8<16, CD8VT1>, EVEX, VVVV, T_MAP5, XS;
defm SSZ : avx512_sqrt_scalar<opc, OpcodeStr#"ss", sched.PS.Scl, f32x_info, NAME#"SS">,
- EVEX_CD8<32, CD8VT1>, EVEX, VVVV, XS;
+ EVEX_CD8<32, CD8VT1>, EVEX, VVVV, TB, XS;
defm SDZ : avx512_sqrt_scalar<opc, OpcodeStr#"sd", sched.PD.Scl, f64x_info, NAME#"SD">,
- EVEX_CD8<64, CD8VT1>, EVEX, VVVV, XD, REX_W;
+ EVEX_CD8<64, CD8VT1>, EVEX, VVVV, TB, XD, REX_W;
}
defm VSQRT : avx512_sqrt_packed_all<0x51, "vsqrt", SchedWriteFSqrtSizes>,
@@ -9923,16 +9923,16 @@ multiclass avx512_pmovx_bw<bits<8> opc, string OpcodeStr,
let Predicates = [HasVLX, HasBWI] in {
defm Z128: avx512_pmovx_common<opc, OpcodeStr, sched.XMM, v8i16x_info,
v16i8x_info, i64mem, LdFrag, InVecNode>,
- EVEX_CD8<8, CD8VH>, T8PD, EVEX_V128, WIG;
+ EVEX_CD8<8, CD8VH>, T8, PD, EVEX_V128, WIG;
defm Z256: avx512_pmovx_common<opc, OpcodeStr, sched.YMM, v16i16x_info,
v16i8x_info, i128mem, LdFrag, OpNode>,
- EVEX_CD8<8, CD8VH>, T8PD, EVEX_V256, WIG;
+ EVEX_CD8<8, CD8VH>, T8, PD, EVEX_V256, WIG;
}
let Predicates = [HasBWI] in {
defm Z : avx512_pmovx_common<opc, OpcodeStr, sched.ZMM, v32i16_info,
v32i8x_info, i256mem, LdFrag, OpNode>,
- EVEX_CD8<8, CD8VH>, T8PD, EVEX_V512, WIG;
+ EVEX_CD8<8, CD8VH>, T8, PD, EVEX_V512, WIG;
}
}
@@ -9943,16 +9943,16 @@ multiclass avx512_pmovx_bd<bits<8> opc, string OpcodeStr,
let Predicates = [HasVLX, HasAVX512] in {
defm Z128: avx512_pmovx_common<opc, OpcodeStr, sched.XMM, v4i32x_info,
v16i8x_info, i32mem, LdFrag, InVecNode>,
- EVEX_CD8<8, CD8VQ>, T8PD, EVEX_V128, WIG;
+ EVEX_CD8<8, CD8VQ>, T8, PD, EVEX_V128, WIG;
defm Z256: avx512_pmovx_common<opc, OpcodeStr, sched.YMM, v8i32x_info,
v16i8x_info, i64mem, LdFrag, InVecNode>,
- EVEX_CD8<8, CD8VQ>, T8PD, EVEX_V256, WIG;
+ EVEX_CD8<8, CD8VQ>, T8, PD, EVEX_V256, WIG;
}
let Predicates = [HasAVX512] in {
defm Z : avx512_pmovx_common<opc, OpcodeStr, sched.ZMM, v16i32_info,
v16i8x_info, i128mem, LdFrag, OpNode>,
- EVEX_CD8<8, CD8VQ>, T8PD, EVEX_V512, WIG;
+ EVEX_CD8<8, CD8VQ>, T8, PD, EVEX_V512, WIG;
}
}
@@ -9963,16 +9963,16 @@ multiclass avx512_pmovx_bq<bits<8> opc, string OpcodeStr,
let Predicates = [HasVLX, HasAVX512] in {
defm Z128: avx512_pmovx_common<opc, OpcodeStr, sched.XMM, v2i64x_info,
v16i8x_info, i16mem, LdFrag, InVecNode>,
- EVEX_CD8<8, CD8VO>, T8PD, EVEX_V128, WIG;
+ EVEX_CD8<8, CD8VO>, T8, PD, EVEX_V128, WIG;
defm Z256: avx512_pmovx_common<opc, OpcodeStr, sched.YMM, v4i64x_info,
v16i8x_info, i32mem, LdFrag, InVecNode>,
- EVEX_CD8<8, CD8VO>, T8PD, EVEX_V256, WIG;
+ EVEX_CD8<8, CD8VO>, T8, PD, EVEX_V256, WIG;
}
let Predicates = [HasAVX512] in {
defm Z : avx512_pmovx_common<opc, OpcodeStr, sched.ZMM, v8i64_info,
v16i8x_info, i64mem, LdFrag, InVecNode>,
- EVEX_CD8<8, CD8VO>, T8PD, EVEX_V512, WIG;
+ EVEX_CD8<8, CD8VO>, T8, PD, EVEX_V512, WIG;
}
}
@@ -9983,16 +9983,16 @@ multiclass avx512_pmovx_wd<bits<8> opc, string OpcodeStr,
let Predicates = [HasVLX, HasAVX512] in {
defm Z128: avx512_pmovx_common<opc, OpcodeStr, sched.XMM, v4i32x_info,
v8i16x_info, i64mem, LdFrag, InVecNode>,
- EVEX_CD8<16, CD8VH>, T8PD, EVEX_V128, WIG;
+ EVEX_CD8<16, CD8VH>, T8, PD, EVEX_V128, WIG;
defm Z256: avx512_pmovx_common<opc, OpcodeStr, sched.YMM, v8i32x_info,
v8i16x_info, i128mem, LdFrag, OpNode>,
- EVEX_CD8<16, CD8VH>, T8PD, EVEX_V256, WIG;
+ EVEX_CD8<16, CD8VH>, T8, PD, EVEX_V256, WIG;
}
let Predicates = [HasAVX512] in {
defm Z : avx512_pmovx_common<opc, OpcodeStr, sched.ZMM, v16i32_info,
v16i16x_info, i256mem, LdFrag, OpNode>,
- EVEX_CD8<16, CD8VH>, T8PD, EVEX_V512, WIG;
+ EVEX_CD8<16, CD8VH>, T8, PD, EVEX_V512, WIG;
}
}
@@ -10003,16 +10003,16 @@ multiclass avx512_pmovx_wq<bits<8> opc, string OpcodeStr,
let Predicates = [HasVLX, HasAVX512] in {
defm Z128: avx512_pmovx_common<opc, OpcodeStr, sched.XMM, v2i64x_info,
v8i16x_info, i32mem, LdFrag, InVecNode>,
- EVEX_CD8<16, CD8VQ>, T8PD, EVEX_V128, WIG;
+ EVEX_CD8<16, CD8VQ>, T8, PD, EVEX_V128, WIG;
defm Z256: avx512_pmovx_common<opc, OpcodeStr, sched.YMM, v4i64x_info,
v8i16x_info, i64mem, LdFrag, InVecNode>,
- EVEX_CD8<16, CD8VQ>, T8PD, EVEX_V256, WIG;
+ EVEX_CD8<16, CD8VQ>, T8, PD, EVEX_V256, WIG;
}
let Predicates = [HasAVX512] in {
defm Z : avx512_pmovx_common<opc, OpcodeStr, sched.ZMM, v8i64_info,
v8i16x_info, i128mem, LdFrag, OpNode>,
- EVEX_CD8<16, CD8VQ>, T8PD, EVEX_V512, WIG;
+ EVEX_CD8<16, CD8VQ>, T8, PD, EVEX_V512, WIG;
}
}
@@ -10024,16 +10024,16 @@ multiclass avx512_pmovx_dq<bits<8> opc, string OpcodeStr,
let Predicates = [HasVLX, HasAVX512] in {
defm Z128: avx512_pmovx_common<opc, OpcodeStr, sched.XMM, v2i64x_info,
v4i32x_info, i64mem, LdFrag, InVecNode>,
- EVEX_CD8<32, CD8VH>, T8PD, EVEX_V128;
+ EVEX_CD8<32, CD8VH>, T8, PD, EVEX_V128;
defm Z256: avx512_pmovx_common<opc, OpcodeStr, sched.YMM, v4i64x_info,
v4i32x_info, i128mem, LdFrag, OpNode>,
- EVEX_CD8<32, CD8VH>, T8PD, EVEX_V256;
+ EVEX_CD8<32, CD8VH>, T8, PD, EVEX_V256;
}
let Predicates = [HasAVX512] in {
defm Z : avx512_pmovx_common<opc, OpcodeStr, sched.ZMM, v8i64_info,
v8i32x_info, i256mem, LdFrag, OpNode>,
- EVEX_CD8<32, CD8VH>, T8PD, EVEX_V512;
+ EVEX_CD8<32, CD8VH>, T8, PD, EVEX_V512;
}
}
@@ -11258,7 +11258,7 @@ defm : avx512_unary_lowering<"VPOPCNTD", ctpop, avx512vl_i32_info, HasVPOPCNTDQ>
multiclass avx512_replicate<bits<8> opc, string OpcodeStr, SDNode OpNode,
X86SchedWriteWidths sched> {
defm NAME: avx512_unary_rm_vl<opc, OpcodeStr, OpNode, sched,
- avx512vl_f32_info, HasAVX512>, XS;
+ avx512vl_f32_info, HasAVX512>, TB, XS;
}
defm VMOVSHDUP : avx512_replicate<0x16, "vmovshdup", X86Movshdup,
@@ -11301,7 +11301,7 @@ multiclass avx512_movddup_common<bits<8> opc, string OpcodeStr,
multiclass avx512_movddup<bits<8> opc, string OpcodeStr,
X86SchedWriteWidths sched> {
defm NAME: avx512_movddup_common<opc, OpcodeStr, sched,
- avx512vl_f64_info>, XD, REX_W;
+ avx512vl_f64_info>, TB, XD, REX_W;
}
defm VMOVDDUP : avx512_movddup<0x12, "vmovddup", SchedWriteFShuffle>;
@@ -11369,9 +11369,9 @@ multiclass avx512_extract_elt_b<string OpcodeStr, X86VectorVTInfo _> {
OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR32orGR64:$dst,
(X86pextrb (_.VT _.RC:$src1), timm:$src2))]>,
- EVEX, TAPD, Sched<[WriteVecExtract]>;
+ EVEX, TA, PD, Sched<[WriteVecExtract]>;
- defm NAME : avx512_extract_elt_bw_m<0x14, OpcodeStr, X86pextrb, _>, TAPD;
+ defm NAME : avx512_extract_elt_bw_m<0x14, OpcodeStr, X86pextrb, _>, TA, PD;
}
}
@@ -11382,15 +11382,15 @@ multiclass avx512_extract_elt_w<string OpcodeStr, X86VectorVTInfo _> {
OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR32orGR64:$dst,
(X86pextrw (_.VT _.RC:$src1), timm:$src2))]>,
- EVEX, PD, Sched<[WriteVecExtract]>;
+ EVEX, TB, PD, Sched<[WriteVecExtract]>;
let hasSideEffects = 0, isCodeGenOnly = 1, ForceDisassemble = 1 in
def rr_REV : AVX512Ii8<0x15, MRMDestReg, (outs GR32orGR64:$dst),
(ins _.RC:$src1, u8imm:$src2),
OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
- EVEX, TAPD, Sched<[WriteVecExtract]>;
+ EVEX, TA, PD, Sched<[WriteVecExtract]>;
- defm NAME : avx512_extract_elt_bw_m<0x15, OpcodeStr, X86pextrw, _>, TAPD;
+ defm NAME : avx512_extract_elt_bw_m<0x15, OpcodeStr, X86pextrw, _>, TA, PD;
}
}
@@ -11402,14 +11402,14 @@ multiclass avx512_extract_elt_dq<string OpcodeStr, X86VectorVTInfo _,
OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GRC:$dst,
(extractelt (_.VT _.RC:$src1), imm:$src2))]>,
- EVEX, TAPD, Sched<[WriteVecExtract]>;
+ EVEX, TA, PD, Sched<[WriteVecExtract]>;
def mr : AVX512Ii8<0x16, MRMDestMem, (outs),
(ins _.ScalarMemOp:$dst, _.RC:$src1, u8imm:$src2),
OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(store (extractelt (_.VT _.RC:$src1),
imm:$src2),addr:$dst)]>,
- EVEX, EVEX_CD8<_.EltSize, CD8VT1>, TAPD,
+ EVEX, EVEX_CD8<_.EltSize, CD8VT1>, TA, PD,
Sched<[WriteVecExtractSt]>;
}
}
@@ -11452,17 +11452,17 @@ multiclass avx512_insert_elt_dq<bits<8> opc, string OpcodeStr,
OpcodeStr#"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
[(set _.RC:$dst,
(_.VT (insertelt _.RC:$src1, GRC:$src2, imm:$src3)))]>,
- EVEX, VVVV, TAPD, Sched<[WriteVecInsert]>;
+ EVEX, VVVV, TA, PD, Sched<[WriteVecInsert]>;
defm NAME : avx512_insert_elt_m<opc, OpcodeStr, insertelt, _,
- _.ScalarLdFrag, imm>, TAPD;
+ _.ScalarLdFrag, imm>, TA, PD;
}
}
defm VPINSRBZ : avx512_insert_elt_bw<0x20, "vpinsrb", X86pinsrb, v16i8x_info,
- extloadi8>, TAPD, WIG;
+ extloadi8>, TA, PD, WIG;
defm VPINSRWZ : avx512_insert_elt_bw<0xC4, "vpinsrw", X86pinsrw, v8i16x_info,
- extloadi16>, PD, WIG;
+ extloadi16>, TB, PD, WIG;
defm VPINSRDZ : avx512_insert_elt_dq<0x22, "vpinsrd", v4i32x_info, GR32>;
defm VPINSRQZ : avx512_insert_elt_dq<0x22, "vpinsrq", v2i64x_info, GR64>, REX_W;
@@ -11504,8 +11504,8 @@ multiclass avx512_shufp<string OpcodeStr, AVX512VLVectorVTInfo VTInfo_FP>{
AVX512AIi8Base, EVEX, VVVV;
}
-defm VSHUFPS: avx512_shufp<"vshufps", avx512vl_f32_info>, PS;
-defm VSHUFPD: avx512_shufp<"vshufpd", avx512vl_f64_info>, PD, REX_W;
+defm VSHUFPS: avx512_shufp<"vshufps", avx512vl_f32_info>, TB, PS;
+defm VSHUFPD: avx512_shufp<"vshufpd", avx512vl_f64_info>, TB, REX_W;
//===----------------------------------------------------------------------===//
// AVX-512 - Byte shift Left/Right
@@ -12217,13 +12217,13 @@ multiclass VBMI2_shift_var_rm<bits<8> Op, string OpStr, SDNode OpNode,
(ins VTI.RC:$src2, VTI.RC:$src3), OpStr,
"$src3, $src2", "$src2, $src3",
(VTI.VT (OpNode VTI.RC:$src1, VTI.RC:$src2, VTI.RC:$src3))>,
- T8PD, EVEX, VVVV, Sched<[sched]>;
+ T8, PD, EVEX, VVVV, Sched<[sched]>;
defm m: AVX512_maskable_3src<Op, MRMSrcMem, VTI, (outs VTI.RC:$dst),
(ins VTI.RC:$src2, VTI.MemOp:$src3), OpStr,
"$src3, $src2", "$src2, $src3",
(VTI.VT (OpNode VTI.RC:$src1, VTI.RC:$src2,
(VTI.VT (VTI.LdFrag addr:$src3))))>,
- T8PD, EVEX, VVVV,
+ T8, PD, EVEX, VVVV,
Sched<[sched.Folded, sched.ReadAfterFold]>;
}
}
@@ -12239,7 +12239,7 @@ multiclass VBMI2_shift_var_rmb<bits<8> Op, string OpStr, SDNode OpNode,
"$src2, ${src3}"#VTI.BroadcastStr,
(OpNode VTI.RC:$src1, VTI.RC:$src2,
(VTI.VT (VTI.BroadcastLdFrag addr:$src3)))>,
- T8PD, EVEX, VVVV, EVEX_B,
+ T8, PD, EVEX, VVVV, EVEX_B,
Sched<[sched.Folded, sched.ReadAfterFold]>;
}
@@ -12321,13 +12321,13 @@ multiclass VNNI_rmb<bits<8> Op, string OpStr, SDNode OpNode,
(VTI.VT (OpNode VTI.RC:$src1,
VTI.RC:$src2, VTI.RC:$src3)),
IsCommutable, IsCommutable>,
- EVEX, VVVV, T8PD, Sched<[sched]>;
+ EVEX, VVVV, T8, PD, Sched<[sched]>;
defm m : AVX512_maskable_3src<Op, MRMSrcMem, VTI, (outs VTI.RC:$dst),
(ins VTI.RC:$src2, VTI.MemOp:$src3), OpStr,
"$src3, $src2", "$src2, $src3",
(VTI.VT (OpNode VTI.RC:$src1, VTI.RC:$src2,
(VTI.VT (VTI.LdFrag addr:$src3))))>,
- EVEX, VVVV, EVEX_CD8<32, CD8VF>, T8PD,
+ EVEX, VVVV, EVEX_CD8<32, CD8VF>, T8, PD,
Sched<[sched.Folded, sched.ReadAfterFold,
sched.ReadAfterFold]>;
defm mb : AVX512_maskable_3src<Op, MRMSrcMem, VTI, (outs VTI.RC:$dst),
@@ -12337,7 +12337,7 @@ multiclass VNNI_rmb<bits<8> Op, string OpStr, SDNode OpNode,
(OpNode VTI.RC:$src1, VTI.RC:$src2,
(VTI.VT (VTI.BroadcastLdFrag addr:$src3)))>,
EVEX, VVVV, EVEX_CD8<32, CD8VF>, EVEX_B,
- T8PD, Sched<[sched.Folded, sched.ReadAfterFold,
+ T8, PD, Sched<[sched.Folded, sched.ReadAfterFold,
sched.ReadAfterFold]>;
}
}
@@ -12406,7 +12406,7 @@ multiclass VPSHUFBITQMB_rm<X86FoldableSchedWrite sched, X86VectorVTInfo VTI> {
(X86Vpshufbitqmb (VTI.VT VTI.RC:$src1),
(VTI.VT VTI.RC:$src2)),
(X86Vpshufbitqmb_su (VTI.VT VTI.RC:$src1),
- (VTI.VT VTI.RC:$src2))>, EVEX, VVVV, T8PD,
+ (VTI.VT VTI.RC:$src2))>, EVEX, VVVV, T8, PD,
Sched<[sched]>;
defm rm : AVX512_maskable_cmp<0x8F, MRMSrcMem, VTI, (outs VTI.KRC:$dst),
(ins VTI.RC:$src1, VTI.MemOp:$src2),
@@ -12416,7 +12416,7 @@ multiclass VPSHUFBITQMB_rm<X86FoldableSchedWrite sched, X86VectorVTInfo VTI> {
(VTI.VT (VTI.LdFrag addr:$src2))),
(X86Vpshufbitqmb_su (VTI.VT VTI.RC:$src1),
(VTI.VT (VTI.LdFrag addr:$src2)))>,
- EVEX, VVVV, EVEX_CD8<8, CD8VF>, T8PD,
+ EVEX, VVVV, EVEX_CD8<8, CD8VF>, T8, PD,
Sched<[sched.Folded, sched.ReadAfterFold]>;
}
@@ -12451,7 +12451,7 @@ multiclass GF2P8MULB_avx512_common<bits<8> Op, string OpStr, SDNode OpNode,
defm VGF2P8MULB : GF2P8MULB_avx512_common<0xCF, "vgf2p8mulb", X86GF2P8mulb,
SchedWriteVecALU>,
- EVEX_CD8<8, CD8VF>, T8PD;
+ EVEX_CD8<8, CD8VF>, T8;
multiclass GF2P8AFFINE_avx512_rmb_imm<bits<8> Op, string OpStr, SDNode OpNode,
X86FoldableSchedWrite sched, X86VectorVTInfo VTI,
@@ -12498,25 +12498,25 @@ let hasSideEffects = 0, mayLoad = 1, ExeDomain = SSEPackedSingle,
defm V4FMADDPSrm : AVX512_maskable_3src_in_asm<0x9A, MRMSrcMem, v16f32_info,
(outs VR512:$dst), (ins VR512:$src2, f128mem:$src3),
"v4fmaddps", "$src3, $src2", "$src2, $src3",
- []>, EVEX_V512, EVEX, VVVV, T8XD, EVEX_CD8<32, CD8VQ>,
+ []>, EVEX_V512, EVEX, VVVV, T8, XD, EVEX_CD8<32, CD8VQ>,
Sched<[SchedWriteFMA.ZMM.Folded]>;
defm V4FNMADDPSrm : AVX512_maskable_3src_in_asm<0xAA, MRMSrcMem, v16f32_info,
(outs VR512:$dst), (ins VR512:$src2, f128mem:$src3),
"v4fnmaddps", "$src3, $src2", "$src2, $src3",
- []>, EVEX_V512, EVEX, VVVV, T8XD, EVEX_CD8<32, CD8VQ>,
+ []>, EVEX_V512, EVEX, VVVV, T8, XD, EVEX_CD8<32, CD8VQ>,
Sched<[SchedWriteFMA.ZMM.Folded]>;
defm V4FMADDSSrm : AVX512_maskable_3src_in_asm<0x9B, MRMSrcMem, f32x_info,
(outs VR128X:$dst), (ins VR128X:$src2, f128mem:$src3),
"v4fmaddss", "$src3, $src2", "$src2, $src3",
- []>, VEX_LIG, EVEX, VVVV, T8XD, EVEX_CD8<32, CD8VF>,
+ []>, VEX_LIG, EVEX, VVVV, T8, XD, EVEX_CD8<32, CD8VF>,
Sched<[SchedWriteFMA.Scl.Folded]>;
defm V4FNMADDSSrm : AVX512_maskable_3src_in_asm<0xAB, MRMSrcMem, f32x_info,
(outs VR128X:$dst), (ins VR128X:$src2, f128mem:$src3),
"v4fnmaddss", "$src3, $src2", "$src2, $src3",
- []>, VEX_LIG, EVEX, VVVV, T8XD, EVEX_CD8<32, CD8VF>,
+ []>, VEX_LIG, EVEX, VVVV, T8, XD, EVEX_CD8<32, CD8VF>,
Sched<[SchedWriteFMA.Scl.Folded]>;
}
@@ -12529,13 +12529,13 @@ let hasSideEffects = 0, mayLoad = 1, ExeDomain = SSEPackedInt,
defm VP4DPWSSDrm : AVX512_maskable_3src_in_asm<0x52, MRMSrcMem, v16i32_info,
(outs VR512:$dst), (ins VR512:$src2, f128mem:$src3),
"vp4dpwssd", "$src3, $src2", "$src2, $src3",
- []>, EVEX_V512, EVEX, VVVV, T8XD, EVEX_CD8<32, CD8VQ>,
+ []>, EVEX_V512, EVEX, VVVV, T8, XD, EVEX_CD8<32, CD8VQ>,
Sched<[SchedWriteFMA.ZMM.Folded]>;
defm VP4DPWSSDSrm : AVX512_maskable_3src_in_asm<0x53, MRMSrcMem, v16i32_info,
(outs VR512:$dst), (ins VR512:$src2, f128mem:$src3),
"vp4dpwssds", "$src3, $src2", "$src2, $src3",
- []>, EVEX_V512, EVEX, VVVV, T8XD, EVEX_CD8<32, CD8VQ>,
+ []>, EVEX_V512, EVEX, VVVV, T8, XD, EVEX_CD8<32, CD8VQ>,
Sched<[SchedWriteFMA.ZMM.Folded]>;
}
@@ -12558,7 +12558,7 @@ multiclass avx512_vp2intersect_modes<X86FoldableSchedWrite sched, X86VectorVTInf
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set _.KRPC:$dst, (X86vp2intersect
_.RC:$src1, (_.VT _.RC:$src2)))]>,
- EVEX, VVVV, T8XD, Sched<[sched]>;
+ EVEX, VVVV, T8, XD, Sched<[sched]>;
def rm : I<0x68, MRMSrcMem,
(outs _.KRPC:$dst),
@@ -12567,7 +12567,7 @@ multiclass avx512_vp2intersect_modes<X86FoldableSchedWrite sched, X86VectorVTInf
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set _.KRPC:$dst, (X86vp2intersect
_.RC:$src1, (_.VT (bitconvert (_.LdFrag addr:$src2)))))]>,
- EVEX, VVVV, T8XD, EVEX_CD8<_.EltSize, CD8VF>,
+ EVEX, VVVV, T8, XD, EVEX_CD8<_.EltSize, CD8VF>,
Sched<[sched.Folded, sched.ReadAfterFold]>;
def rmb : I<0x68, MRMSrcMem,
@@ -12577,7 +12577,7 @@ multiclass avx512_vp2intersect_modes<X86FoldableSchedWrite sched, X86VectorVTInf
", $src1, $dst|$dst, $src1, ${src2}", _.BroadcastStr ,"}"),
[(set _.KRPC:$dst, (X86vp2intersect
_.RC:$src1, (_.VT (_.BroadcastLdFrag addr:$src2))))]>,
- EVEX, VVVV, T8XD, EVEX_B, EVEX_CD8<_.EltSize, CD8VF>,
+ EVEX, VVVV, T8, XD, EVEX_B, EVEX_CD8<_.EltSize, CD8VF>,
Sched<[sched.Folded, sched.ReadAfterFold]>;
}
@@ -12623,7 +12623,7 @@ let ExeDomain = SSEPackedSingle in
defm VCVTNE2PS2BF16 : avx512_binop_all2<0x72, "vcvtne2ps2bf16",
SchedWriteCvtPD2PS, //FIXME: Should be SchedWriteCvtPS2BF
avx512vl_f32_info, avx512vl_bf16_info,
- X86cvtne2ps2bf16, HasBF16, 0>, T8XD;
+ X86cvtne2ps2bf16, HasBF16, 0>, T8, XD;
// Truncate Float to BFloat16
multiclass avx512_cvtps2bf16<bits<8> opc, string OpcodeStr,
@@ -12660,7 +12660,7 @@ multiclass avx512_cvtps2bf16<bits<8> opc, string OpcodeStr,
}
defm VCVTNEPS2BF16 : avx512_cvtps2bf16<0x72, "vcvtneps2bf16",
- SchedWriteCvtPD2PS>, T8XS,
+ SchedWriteCvtPD2PS>, T8, XS,
EVEX_CD8<32, CD8VF>;
let Predicates = [HasBF16, HasVLX] in {
@@ -12783,7 +12783,7 @@ multiclass avx512_dpbf16ps_sizes<bits<8> opc, string OpcodeStr, SDNode OpNode,
let ExeDomain = SSEPackedSingle in
defm VDPBF16PS : avx512_dpbf16ps_sizes<0x52, "vdpbf16ps", X86dpbf16ps, SchedWriteFMA,
avx512vl_f32_info, avx512vl_bf16_info,
- HasBF16>, T8XS, EVEX_CD8<32, CD8VF>;
+ HasBF16>, T8, XS, EVEX_CD8<32, CD8VF>;
//===----------------------------------------------------------------------===//
// AVX512FP16
@@ -12792,12 +12792,12 @@ defm VDPBF16PS : avx512_dpbf16ps_sizes<0x52, "vdpbf16ps", X86dpbf16ps, SchedWrit
let Predicates = [HasFP16] in {
// Move word ( r/m16) to Packed word
def VMOVW2SHrr : AVX512<0x6E, MRMSrcReg, (outs VR128X:$dst), (ins GR32:$src),
- "vmovw\t{$src, $dst|$dst, $src}", []>, T_MAP5PD, EVEX, Sched<[WriteVecMoveFromGpr]>;
+ "vmovw\t{$src, $dst|$dst, $src}", []>, T_MAP5, PD, EVEX, Sched<[WriteVecMoveFromGpr]>;
def VMOVWrm : AVX512<0x6E, MRMSrcMem, (outs VR128X:$dst), (ins i16mem:$src),
"vmovw\t{$src, $dst|$dst, $src}",
[(set VR128X:$dst,
(v8i16 (scalar_to_vector (loadi16 addr:$src))))]>,
- T_MAP5PD, EVEX, EVEX_CD8<16, CD8VT1>, Sched<[WriteFLoad]>;
+ T_MAP5, PD, EVEX, EVEX_CD8<16, CD8VT1>, Sched<[WriteFLoad]>;
def : Pat<(f16 (bitconvert GR16:$src)),
(f16 (COPY_TO_REGCLASS
@@ -12854,13 +12854,13 @@ def : Pat<(v16i32 (X86vzmovl
// Move word from xmm register to r/m16
def VMOVSH2Wrr : AVX512<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128X:$src),
- "vmovw\t{$src, $dst|$dst, $src}", []>, T_MAP5PD, EVEX, Sched<[WriteVecMoveToGpr]>;
+ "vmovw\t{$src, $dst|$dst, $src}", []>, T_MAP5, PD, EVEX, Sched<[WriteVecMoveToGpr]>;
def VMOVWmr : AVX512<0x7E, MRMDestMem, (outs),
(ins i16mem:$dst, VR128X:$src),
"vmovw\t{$src, $dst|$dst, $src}",
[(store (i16 (extractelt (v8i16 VR128X:$src),
(iPTR 0))), addr:$dst)]>,
- T_MAP5PD, EVEX, EVEX_CD8<16, CD8VT1>, Sched<[WriteFStore]>;
+ T_MAP5, PD, EVEX, EVEX_CD8<16, CD8VT1>, Sched<[WriteFStore]>;
def : Pat<(i16 (bitconvert FR16X:$src)),
(i16 (EXTRACT_SUBREG
@@ -12872,9 +12872,9 @@ def : Pat<(i16 (extractelt (v8i16 VR128X:$src), (iPTR 0))),
// Allow "vmovw" to use GR64
let hasSideEffects = 0 in {
def VMOVW64toSHrr : AVX512<0x6E, MRMSrcReg, (outs VR128X:$dst), (ins GR64:$src),
- "vmovw\t{$src, $dst|$dst, $src}", []>, T_MAP5PD, EVEX, REX_W, Sched<[WriteVecMoveFromGpr]>;
+ "vmovw\t{$src, $dst|$dst, $src}", []>, T_MAP5, PD, EVEX, REX_W, Sched<[WriteVecMoveFromGpr]>;
def VMOVSHtoW64rr : AVX512<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128X:$src),
- "vmovw\t{$src, $dst|$dst, $src}", []>, T_MAP5PD, EVEX, REX_W, Sched<[WriteVecMoveToGpr]>;
+ "vmovw\t{$src, $dst|$dst, $src}", []>, T_MAP5, PD, EVEX, REX_W, Sched<[WriteVecMoveToGpr]>;
}
}
@@ -12920,27 +12920,27 @@ multiclass avx512_cvttph2w<bits<8> opc, string OpcodeStr, SDPatternOperator OpNo
defm VCVTPH2UW : avx512_cvtph2w<0x7D, "vcvtph2uw", X86cvtp2UInt, X86cvtp2UInt,
X86cvtp2UIntRnd, avx512vl_i16_info,
avx512vl_f16_info, SchedWriteCvtPD2DQ>,
- T_MAP5PS, EVEX_CD8<16, CD8VF>;
+ T_MAP5, PS, EVEX_CD8<16, CD8VF>;
defm VCVTUW2PH : avx512_cvtph2w<0x7D, "vcvtuw2ph", any_uint_to_fp, uint_to_fp,
X86VUintToFpRnd, avx512vl_f16_info,
avx512vl_i16_info, SchedWriteCvtPD2DQ>,
- T_MAP5XD, EVEX_CD8<16, CD8VF>;
+ T_MAP5, XD, EVEX_CD8<16, CD8VF>;
defm VCVTTPH2W : avx512_cvttph2w<0x7C, "vcvttph2w", X86any_cvttp2si,
X86cvttp2si, X86cvttp2siSAE,
avx512vl_i16_info, avx512vl_f16_info,
- SchedWriteCvtPD2DQ>, T_MAP5PD, EVEX_CD8<16, CD8VF>;
+ SchedWriteCvtPD2DQ>, T_MAP5, PD, EVEX_CD8<16, CD8VF>;
defm VCVTTPH2UW : avx512_cvttph2w<0x7C, "vcvttph2uw", X86any_cvttp2ui,
X86cvttp2ui, X86cvttp2uiSAE,
avx512vl_i16_info, avx512vl_f16_info,
- SchedWriteCvtPD2DQ>, T_MAP5PS, EVEX_CD8<16, CD8VF>;
+ SchedWriteCvtPD2DQ>, T_MAP5, PS, EVEX_CD8<16, CD8VF>;
defm VCVTPH2W : avx512_cvtph2w<0x7D, "vcvtph2w", X86cvtp2Int, X86cvtp2Int,
X86cvtp2IntRnd, avx512vl_i16_info,
avx512vl_f16_info, SchedWriteCvtPD2DQ>,
- T_MAP5PD, EVEX_CD8<16, CD8VF>;
+ T_MAP5, PD, EVEX_CD8<16, CD8VF>;
defm VCVTW2PH : avx512_cvtph2w<0x7D, "vcvtw2ph", any_sint_to_fp, sint_to_fp,
X86VSintToFpRnd, avx512vl_f16_info,
avx512vl_i16_info, SchedWriteCvtPD2DQ>,
- T_MAP5XS, EVEX_CD8<16, CD8VF>;
+ T_MAP5, XS, EVEX_CD8<16, CD8VF>;
// Convert Half to Signed/Unsigned Doubleword
multiclass avx512_cvtph2dq<bits<8> opc, string OpcodeStr, SDPatternOperator OpNode,
@@ -12980,20 +12980,20 @@ multiclass avx512_cvttph2dq<bits<8> opc, string OpcodeStr, SDPatternOperator OpN
defm VCVTPH2DQ : avx512_cvtph2dq<0x5B, "vcvtph2dq", X86cvtp2Int, X86cvtp2Int,
- X86cvtp2IntRnd, SchedWriteCvtPS2DQ>, T_MAP5PD,
+ X86cvtp2IntRnd, SchedWriteCvtPS2DQ>, T_MAP5, PD,
EVEX_CD8<16, CD8VH>;
defm VCVTPH2UDQ : avx512_cvtph2dq<0x79, "vcvtph2udq", X86cvtp2UInt, X86cvtp2UInt,
- X86cvtp2UIntRnd, SchedWriteCvtPS2DQ>, T_MAP5PS,
+ X86cvtp2UIntRnd, SchedWriteCvtPS2DQ>, T_MAP5, PS,
EVEX_CD8<16, CD8VH>;
defm VCVTTPH2DQ : avx512_cvttph2dq<0x5B, "vcvttph2dq", X86any_cvttp2si,
X86cvttp2si, X86cvttp2siSAE,
- SchedWriteCvtPS2DQ>, T_MAP5XS,
+ SchedWriteCvtPS2DQ>, T_MAP5, XS,
EVEX_CD8<16, CD8VH>;
defm VCVTTPH2UDQ : avx512_cvttph2dq<0x78, "vcvttph2udq", X86any_cvttp2ui,
X86cvttp2ui, X86cvttp2uiSAE,
- SchedWriteCvtPS2DQ>, T_MAP5PS,
+ SchedWriteCvtPS2DQ>, T_MAP5, PS,
EVEX_CD8<16, CD8VH>;
// Convert Half to Signed/Unsigned Quardword
@@ -13043,21 +13043,21 @@ multiclass avx512_cvttph2qq<bits<8> opc, string OpcodeStr, SDPatternOperator OpN
}
defm VCVTPH2QQ : avx512_cvtph2qq<0x7B, "vcvtph2qq", X86cvtp2Int, X86cvtp2Int,
- X86cvtp2IntRnd, SchedWriteCvtPS2DQ>, T_MAP5PD,
+ X86cvtp2IntRnd, SchedWriteCvtPS2DQ>, T_MAP5, PD,
EVEX_CD8<16, CD8VQ>;
defm VCVTPH2UQQ : avx512_cvtph2qq<0x79, "vcvtph2uqq", X86cvtp2UInt, X86cvtp2UInt,
- X86cvtp2UIntRnd, SchedWriteCvtPS2DQ>, T_MAP5PD,
+ X86cvtp2UIntRnd, SchedWriteCvtPS2DQ>, T_MAP5, PD,
EVEX_CD8<16, CD8VQ>;
defm VCVTTPH2QQ : avx512_cvttph2qq<0x7A, "vcvttph2qq", X86any_cvttp2si,
X86cvttp2si, X86cvttp2siSAE,
- SchedWriteCvtPS2DQ>, T_MAP5PD,
+ SchedWriteCvtPS2DQ>, T_MAP5, PD,
EVEX_CD8<16, CD8VQ>;
defm VCVTTPH2UQQ : avx512_cvttph2qq<0x78, "vcvttph2uqq", X86any_cvttp2ui,
X86cvttp2ui, X86cvttp2uiSAE,
- SchedWriteCvtPS2DQ>, T_MAP5PD,
+ SchedWriteCvtPS2DQ>, T_MAP5, PD,
EVEX_CD8<16, CD8VQ>;
// Convert Signed/Unsigned Quardword to Half
@@ -13154,53 +13154,53 @@ multiclass avx512_cvtqq2ph<bits<8> opc, string OpcodeStr, SDPatternOperator OpNo
}
defm VCVTQQ2PH : avx512_cvtqq2ph<0x5B, "vcvtqq2ph", any_sint_to_fp, sint_to_fp,
- X86VSintToFpRnd, SchedWriteCvtDQ2PS>, REX_W, T_MAP5PS,
+ X86VSintToFpRnd, SchedWriteCvtDQ2PS>, REX_W, T_MAP5, PS,
EVEX_CD8<64, CD8VF>;
defm VCVTUQQ2PH : avx512_cvtqq2ph<0x7A, "vcvtuqq2ph", any_uint_to_fp, uint_to_fp,
- X86VUintToFpRnd, SchedWriteCvtDQ2PS>, REX_W, T_MAP5XD,
+ X86VUintToFpRnd, SchedWriteCvtDQ2PS>, REX_W, T_MAP5, XD,
EVEX_CD8<64, CD8VF>;
// Convert half to signed/unsigned int 32/64
defm VCVTSH2SIZ: avx512_cvt_s_int_round<0x2D, f16x_info, i32x_info, X86cvts2si,
X86cvts2siRnd, WriteCvtSS2I, "cvtsh2si", "{l}", HasFP16>,
- T_MAP5XS, EVEX_CD8<16, CD8VT1>;
+ T_MAP5, XS, EVEX_CD8<16, CD8VT1>;
defm VCVTSH2SI64Z: avx512_cvt_s_int_round<0x2D, f16x_info, i64x_info, X86cvts2si,
X86cvts2siRnd, WriteCvtSS2I, "cvtsh2si", "{q}", HasFP16>,
- T_MAP5XS, REX_W, EVEX_CD8<16, CD8VT1>;
+ T_MAP5, XS, REX_W, EVEX_CD8<16, CD8VT1>;
defm VCVTSH2USIZ: avx512_cvt_s_int_round<0x79, f16x_info, i32x_info, X86cvts2usi,
X86cvts2usiRnd, WriteCvtSS2I, "cvtsh2usi", "{l}", HasFP16>,
- T_MAP5XS, EVEX_CD8<16, CD8VT1>;
+ T_MAP5, XS, EVEX_CD8<16, CD8VT1>;
defm VCVTSH2USI64Z: avx512_cvt_s_int_round<0x79, f16x_info, i64x_info, X86cvts2usi,
X86cvts2usiRnd, WriteCvtSS2I, "cvtsh2usi", "{q}", HasFP16>,
- T_MAP5XS, REX_W, EVEX_CD8<16, CD8VT1>;
+ T_MAP5, XS, REX_W, EVEX_CD8<16, CD8VT1>;
defm VCVTTSH2SIZ: avx512_cvt_s_all<0x2C, "vcvttsh2si", f16x_info, i32x_info,
any_fp_to_sint, X86cvtts2Int, X86cvtts2IntSAE, WriteCvtSS2I,
- "{l}", HasFP16>, T_MAP5XS, EVEX_CD8<16, CD8VT1>;
+ "{l}", HasFP16>, T_MAP5, XS, EVEX_CD8<16, CD8VT1>;
defm VCVTTSH2SI64Z: avx512_cvt_s_all<0x2C, "vcvttsh2si", f16x_info, i64x_info,
any_fp_to_sint, X86cvtts2Int, X86cvtts2IntSAE, WriteCvtSS2I,
- "{q}", HasFP16>, REX_W, T_MAP5XS, EVEX_CD8<16, CD8VT1>;
+ "{q}", HasFP16>, REX_W, T_MAP5, XS, EVEX_CD8<16, CD8VT1>;
defm VCVTTSH2USIZ: avx512_cvt_s_all<0x78, "vcvttsh2usi", f16x_info, i32x_info,
any_fp_to_uint, X86cvtts2UInt, X86cvtts2UIntSAE, WriteCvtSS2I,
- "{l}", HasFP16>, T_MAP5XS, EVEX_CD8<16, CD8VT1>;
+ "{l}", HasFP16>, T_MAP5, XS, EVEX_CD8<16, CD8VT1>;
defm VCVTTSH2USI64Z: avx512_cvt_s_all<0x78, "vcvttsh2usi", f16x_info, i64x_info,
any_fp_to_uint, X86cvtts2UInt, X86cvtts2UIntSAE, WriteCvtSS2I,
- "{q}", HasFP16>, T_MAP5XS, REX_W, EVEX_CD8<16, CD8VT1>;
+ "{q}", HasFP16>, T_MAP5, XS, REX_W, EVEX_CD8<16, CD8VT1>;
let Predicates = [HasFP16] in {
defm VCVTSI2SHZ : avx512_vcvtsi_common<0x2A, X86SintToFp, X86SintToFpRnd, WriteCvtI2SS, GR32,
v8f16x_info, i32mem, loadi32, "cvtsi2sh", "l">,
- T_MAP5XS, EVEX_CD8<32, CD8VT1>;
+ T_MAP5, XS, EVEX_CD8<32, CD8VT1>;
defm VCVTSI642SHZ: avx512_vcvtsi_common<0x2A, X86SintToFp, X86SintToFpRnd, WriteCvtI2SS, GR64,
v8f16x_info, i64mem, loadi64, "cvtsi2sh","q">,
- T_MAP5XS, REX_W, EVEX_CD8<64, CD8VT1>;
+ T_MAP5, XS, REX_W, EVEX_CD8<64, CD8VT1>;
defm VCVTUSI2SHZ : avx512_vcvtsi_common<0x7B, X86UintToFp, X86UintToFpRnd, WriteCvtI2SS, GR32,
v8f16x_info, i32mem, loadi32,
- "cvtusi2sh","l">, T_MAP5XS, EVEX_CD8<32, CD8VT1>;
+ "cvtusi2sh","l">, T_MAP5, XS, EVEX_CD8<32, CD8VT1>;
defm VCVTUSI642SHZ : avx512_vcvtsi_common<0x7B, X86UintToFp, X86UintToFpRnd, WriteCvtI2SS, GR64,
v8f16x_info, i64mem, loadi64, "cvtusi2sh", "q">,
- T_MAP5XS, REX_W, EVEX_CD8<64, CD8VT1>;
+ T_MAP5, XS, REX_W, EVEX_CD8<64, CD8VT1>;
def : InstAlias<"vcvtsi2sh\t{$src, $src1, $dst|$dst, $src1, $src}",
(VCVTSI2SHZrm_Int VR128X:$dst, VR128X:$src1, i32mem:$src), 0, "att">;
@@ -13446,14 +13446,14 @@ multiclass avx512_cfmulop_common<bits<8> opc, string OpcodeStr, SDNode OpNode,
let Uses = [MXCSR] in {
defm VFMADDCPH : avx512_cfmaop_common<0x56, "vfmaddcph", x86vfmaddc, x86vfmaddcRnd, 1>,
- T_MAP6XS, EVEX_CD8<32, CD8VF>;
+ T_MAP6, XS, EVEX_CD8<32, CD8VF>;
defm VFCMADDCPH : avx512_cfmaop_common<0x56, "vfcmaddcph", x86vfcmaddc, x86vfcmaddcRnd, 0>,
- T_MAP6XD, EVEX_CD8<32, CD8VF>;
+ T_MAP6, XD, EVEX_CD8<32, CD8VF>;
defm VFMULCPH : avx512_cfmulop_common<0xD6, "vfmulcph", x86vfmulc, x86vfmulc,
- x86vfmulcRnd, 1>, T_MAP6XS, EVEX_CD8<32, CD8VF>;
+ x86vfmulcRnd, 1>, T_MAP6, XS, EVEX_CD8<32, CD8VF>;
defm VFCMULCPH : avx512_cfmulop_common<0xD6, "vfcmulcph", x86vfcmulc,
- x86vfcmulc, x86vfcmulcRnd, 0>, T_MAP6XD, EVEX_CD8<32, CD8VF>;
+ x86vfcmulc, x86vfcmulcRnd, 0>, T_MAP6, XD, EVEX_CD8<32, CD8VF>;
}
@@ -13504,12 +13504,12 @@ multiclass avx512_cfmbinop_sh_common<bits<8> opc, string OpcodeStr, SDNode OpNod
let Uses = [MXCSR] in {
defm VFMADDCSHZ : avx512_cfmaop_sh_common<0x57, "vfmaddcsh", x86vfmaddcSh, x86vfmaddcShRnd, 1>,
- T_MAP6XS, EVEX_CD8<32, CD8VT1>, EVEX_V128, EVEX, VVVV;
+ T_MAP6, XS, EVEX_CD8<32, CD8VT1>, EVEX_V128, EVEX, VVVV;
defm VFCMADDCSHZ : avx512_cfmaop_sh_common<0x57, "vfcmaddcsh", x86vfcmaddcSh, x86vfcmaddcShRnd, 0>,
- T_MAP6XD, EVEX_CD8<32, CD8VT1>, EVEX_V128, EVEX, VVVV;
+ T_MAP6, XD, EVEX_CD8<32, CD8VT1>, EVEX_V128, EVEX, VVVV;
defm VFMULCSHZ : avx512_cfmbinop_sh_common<0xD7, "vfmulcsh", x86vfmulcSh, x86vfmulcShRnd, 1>,
- T_MAP6XS, EVEX_CD8<32, CD8VT1>, EVEX_V128, VEX_LIG, EVEX, VVVV;
+ T_MAP6, XS, EVEX_CD8<32, CD8VT1>, EVEX_V128, VEX_LIG, EVEX, VVVV;
defm VFCMULCSHZ : avx512_cfmbinop_sh_common<0xD7, "vfcmulcsh", x86vfcmulcSh, x86vfcmulcShRnd, 0>,
- T_MAP6XD, EVEX_CD8<32, CD8VT1>, EVEX_V128, VEX_LIG, EVEX, VVVV;
+ T_MAP6, XD, EVEX_CD8<32, CD8VT1>, EVEX_V128, VEX_LIG, EVEX, VVVV;
}
diff --git a/llvm/lib/Target/X86/X86InstrArithmetic.td b/llvm/lib/Target/X86/X86InstrArithmetic.td
index 6f4b69c9b5c9ff..4fb05231010d8b 100644
--- a/llvm/lib/Target/X86/X86InstrArithmetic.td
+++ b/llvm/lib/Target/X86/X86InstrArithmetic.td
@@ -1117,8 +1117,8 @@ let Predicates = [HasBMI, HasEGPR, In64BitMode] in {
// Complexity is reduced to give and with immediate a chance to match first.
let Defs = [EFLAGS], AddedComplexity = -6 in {
- defm ANDN32 : bmi_andn<"andn{l}", GR32, i32mem, loadi32, WriteALU>, T8PS;
- defm ANDN64 : bmi_andn<"andn{q}", GR64, i64mem, loadi64, WriteALU>, T8PS, REX_W;
+ defm ANDN32 : bmi_andn<"andn{l}", GR32, i32mem, loadi32, WriteALU>, T8, PS;
+ defm ANDN64 : bmi_andn<"andn{q}", GR64, i64mem, loadi64, WriteALU>, T8, PS, REX_W;
}
let Predicates = [HasBMI], AddedComplexity = -6 in {
@@ -1141,12 +1141,12 @@ let hasSideEffects = 0 in {
let Predicates = [HasBMI2, NoEGPR] in {
def rr : I<0xF6, MRMSrcReg, (outs RC:$dst1, RC:$dst2), (ins RC:$src),
!strconcat(mnemonic, "\t{$src, $dst2, $dst1|$dst1, $dst2, $src}"),
- []>, T8XD, VEX, VVVV, Sched<[WriteIMulH, sched]>;
+ []>, T8, XD, VEX, VVVV, Sched<[WriteIMulH, sched]>;
let mayLoad = 1 in
def rm : I<0xF6, MRMSrcMem, (outs RC:$dst1, RC:$dst2), (ins x86memop:$src),
!strconcat(mnemonic, "\t{$src, $dst2, $dst1|$dst1, $dst2, $src}"),
- []>, T8XD, VEX, VVVV,
+ []>, T8, XD, VEX, VVVV,
Sched<[WriteIMulHLd, sched.Folded,
// Memory operand.
ReadDefault, ReadDefault, ReadDefault, ReadDefault, ReadDefault,
@@ -1165,11 +1165,11 @@ let Predicates = [HasBMI2, NoEGPR] in {
let Predicates = [HasBMI2, HasEGPR, In64BitMode] in
def rr#_EVEX : I<0xF6, MRMSrcReg, (outs RC:$dst1, RC:$dst2), (ins RC:$src),
!strconcat(mnemonic, "\t{$src, $dst2, $dst1|$dst1, $dst2, $src}"),
- []>, T8XD, EVEX, VVVV, Sched<[WriteIMulH, sched]>;
+ []>, T8, XD, EVEX, VVVV, Sched<[WriteIMulH, sched]>;
let Predicates = [HasBMI2, HasEGPR, In64BitMode], mayLoad = 1 in
def rm#_EVEX : I<0xF6, MRMSrcMem, (outs RC:$dst1, RC:$dst2), (ins x86memop:$src),
!strconcat(mnemonic, "\t{$src, $dst2, $dst1|$dst1, $dst2, $src}"),
- []>, T8XD, EVEX, VVVV,
+ []>, T8, XD, EVEX, VVVV,
Sched<[WriteIMulHLd, sched.Folded,
// Memory operand.
ReadDefault, ReadDefault, ReadDefault, ReadDefault, ReadDefault,
@@ -1201,12 +1201,12 @@ class ADCOXOpRM <string m, X86TypeInfo t>
let OpSize = OpSizeFixed, Constraints = "$src1 = $dst",
Predicates = [HasADX] in {
-def ADCX32rr : ADCOXOpRR<"adcx", Xi32>, T8PD;
-def ADCX64rr : ADCOXOpRR<"adcx", Xi64>, T8PD;
-def ADOX32rr : ADCOXOpRR<"adox", Xi32>, T8XS;
-def ADOX64rr : ADCOXOpRR<"adox", Xi64>, T8XS;
-def ADCX32rm : ADCOXOpRM<"adcx", Xi32>, T8PD;
-def ADCX64rm : ADCOXOpRM<"adcx", Xi64>, T8PD;
-def ADOX32rm : ADCOXOpRM<"adox", Xi32>, T8XS;
-def ADOX64rm : ADCOXOpRM<"adox", Xi64>, T8XS;
+def ADCX32rr : ADCOXOpRR<"adcx", Xi32>, T8, PD;
+def ADCX64rr : ADCOXOpRR<"adcx", Xi64>, T8, PD;
+def ADOX32rr : ADCOXOpRR<"adox", Xi32>, T8, XS;
+def ADOX64rr : ADCOXOpRR<"adox", Xi64>, T8, XS;
+def ADCX32rm : ADCOXOpRM<"adcx", Xi32>, T8, PD;
+def ADCX64rm : ADCOXOpRM<"adcx", Xi64>, T8, PD;
+def ADOX32rm : ADCOXOpRM<"adox", Xi32>, T8, XS;
+def ADOX64rm : ADCOXOpRM<"adox", Xi64>, T8, XS;
}
diff --git a/llvm/lib/Target/X86/X86InstrFPStack.td b/llvm/lib/Target/X86/X86InstrFPStack.td
index 09655d93912112..dd63e921b8acdf 100644
--- a/llvm/lib/Target/X86/X86InstrFPStack.td
+++ b/llvm/lib/Target/X86/X86InstrFPStack.td
@@ -666,20 +666,20 @@ def FCOMPP : I<0xDE, MRM_D9, (outs), (ins), "fcompp", []>;
let Uses = [FPSW, FPCW] in {
def FXSAVE : I<0xAE, MRM0m, (outs), (ins opaquemem:$dst),
- "fxsave\t$dst", [(int_x86_fxsave addr:$dst)]>, PS,
+ "fxsave\t$dst", [(int_x86_fxsave addr:$dst)]>, TB, PS,
Requires<[HasFXSR]>;
def FXSAVE64 : RI<0xAE, MRM0m, (outs), (ins opaquemem:$dst),
"fxsave64\t$dst", [(int_x86_fxsave64 addr:$dst)]>,
- PS, Requires<[HasFXSR, In64BitMode]>;
+ TB, PS, Requires<[HasFXSR, In64BitMode]>;
} // Uses = [FPSW, FPCW]
let Defs = [FPSW, FPCW] in {
def FXRSTOR : I<0xAE, MRM1m, (outs), (ins opaquemem:$src),
"fxrstor\t$src", [(int_x86_fxrstor addr:$src)]>,
- PS, Requires<[HasFXSR]>;
+ TB, PS, Requires<[HasFXSR]>;
def FXRSTOR64 : RI<0xAE, MRM1m, (outs), (ins opaquemem:$src),
"fxrstor64\t$src", [(int_x86_fxrstor64 addr:$src)]>,
- PS, Requires<[HasFXSR, In64BitMode]>;
+ TB, PS, Requires<[HasFXSR, In64BitMode]>;
} // Defs = [FPSW, FPCW]
} // SchedRW
diff --git a/llvm/lib/Target/X86/X86InstrKL.td b/llvm/lib/Target/X86/X86InstrKL.td
index a3392b691c0a25..4586fc541627fe 100644
--- a/llvm/lib/Target/X86/X86InstrKL.td
+++ b/llvm/lib/Target/X86/X86InstrKL.td
@@ -19,17 +19,17 @@ let SchedRW = [WriteSystem], Predicates = [HasKL] in {
let Uses = [XMM0, EAX], Defs = [EFLAGS] in {
def LOADIWKEY : I<0xDC, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
"loadiwkey\t{$src2, $src1|$src1, $src2}",
- [(int_x86_loadiwkey XMM0, VR128:$src1, VR128:$src2, EAX)]>, T8XS;
+ [(int_x86_loadiwkey XMM0, VR128:$src1, VR128:$src2, EAX)]>, T8, XS;
}
let Uses = [XMM0], Defs = [XMM0, XMM1, XMM2, XMM4, XMM5, XMM6, EFLAGS] in {
def ENCODEKEY128 : I<0xFA, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
- "encodekey128\t{$src, $dst|$dst, $src}", []>, T8XS;
+ "encodekey128\t{$src, $dst|$dst, $src}", []>, T8, XS;
}
let Uses = [XMM0, XMM1], Defs = [XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, EFLAGS] in {
def ENCODEKEY256 : I<0xFB, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
- "encodekey256\t{$src, $dst|$dst, $src}", []>, T8XS;
+ "encodekey256\t{$src, $dst|$dst, $src}", []>, T8, XS;
}
let Constraints = "$src1 = $dst",
@@ -37,22 +37,22 @@ let SchedRW = [WriteSystem], Predicates = [HasKL] in {
def AESENC128KL : I<0xDC, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, opaquemem:$src2),
"aesenc128kl\t{$src2, $src1|$src1, $src2}",
[(set VR128:$dst, EFLAGS,
- (X86aesenc128kl VR128:$src1, addr:$src2))]>, T8XS;
+ (X86aesenc128kl VR128:$src1, addr:$src2))]>, T8, XS;
def AESDEC128KL : I<0xDD, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, opaquemem:$src2),
"aesdec128kl\t{$src2, $src1|$src1, $src2}",
[(set VR128:$dst, EFLAGS,
- (X86aesdec128kl VR128:$src1, addr:$src2))]>, T8XS;
+ (X86aesdec128kl VR128:$src1, addr:$src2))]>, T8, XS;
def AESENC256KL : I<0xDE, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, opaquemem:$src2),
"aesenc256kl\t{$src2, $src1|$src1, $src2}",
[(set VR128:$dst, EFLAGS,
- (X86aesenc256kl VR128:$src1, addr:$src2))]>, T8XS;
+ (X86aesenc256kl VR128:$src1, addr:$src2))]>, T8, XS;
def AESDEC256KL : I<0xDF, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, opaquemem:$src2),
"aesdec256kl\t{$src2, $src1|$src1, $src2}",
[(set VR128:$dst, EFLAGS,
- (X86aesdec256kl VR128:$src1, addr:$src2))]>, T8XS;
+ (X86aesdec256kl VR128:$src1, addr:$src2))]>, T8, XS;
}
} // SchedRW, Predicates
@@ -62,13 +62,13 @@ let SchedRW = [WriteSystem], Predicates = [HasWIDEKL] in {
Defs = [EFLAGS, XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7],
mayLoad = 1 in {
def AESENCWIDE128KL : I<0xD8, MRM0m, (outs), (ins opaquemem:$src),
- "aesencwide128kl\t$src", []>, T8XS;
+ "aesencwide128kl\t$src", []>, T8, XS;
def AESDECWIDE128KL : I<0xD8, MRM1m, (outs), (ins opaquemem:$src),
- "aesdecwide128kl\t$src", []>, T8XS;
+ "aesdecwide128kl\t$src", []>, T8, XS;
def AESENCWIDE256KL : I<0xD8, MRM2m, (outs), (ins opaquemem:$src),
- "aesencwide256kl\t$src", []>, T8XS;
+ "aesencwide256kl\t$src", []>, T8, XS;
def AESDECWIDE256KL : I<0xD8, MRM3m, (outs), (ins opaquemem:$src),
- "aesdecwide256kl\t$src", []>, T8XS;
+ "aesdecwide256kl\t$src", []>, T8, XS;
}
} // SchedRW, Predicates
diff --git a/llvm/lib/Target/X86/X86InstrMMX.td b/llvm/lib/Target/X86/X86InstrMMX.td
index 9796379aa0bf03..8d472ccd52df38 100644
--- a/llvm/lib/Target/X86/X86InstrMMX.td
+++ b/llvm/lib/Target/X86/X86InstrMMX.td
@@ -487,24 +487,24 @@ def MMX_PSHUFWmi : MMXIi8<0x70, MRMSrcMem,
// -- Conversion Instructions
defm MMX_CVTPS2PI : sse12_cvt_pint<0x2D, VR128, VR64, int_x86_sse_cvtps2pi,
f64mem, load, "cvtps2pi\t{$src, $dst|$dst, $src}",
- WriteCvtPS2I, SSEPackedSingle>, PS, SIMD_EXC;
+ WriteCvtPS2I, SSEPackedSingle>, TB, PS, SIMD_EXC;
defm MMX_CVTPD2PI : sse12_cvt_pint<0x2D, VR128, VR64, int_x86_sse_cvtpd2pi,
f128mem, memop, "cvtpd2pi\t{$src, $dst|$dst, $src}",
- WriteCvtPD2I, SSEPackedDouble>, PD, SIMD_EXC;
+ WriteCvtPD2I, SSEPackedDouble>, TB, PD, SIMD_EXC;
defm MMX_CVTTPS2PI : sse12_cvt_pint<0x2C, VR128, VR64, int_x86_sse_cvttps2pi,
f64mem, load, "cvttps2pi\t{$src, $dst|$dst, $src}",
- WriteCvtPS2I, SSEPackedSingle>, PS, SIMD_EXC;
+ WriteCvtPS2I, SSEPackedSingle>, TB, PS, SIMD_EXC;
defm MMX_CVTTPD2PI : sse12_cvt_pint<0x2C, VR128, VR64, int_x86_sse_cvttpd2pi,
f128mem, memop, "cvttpd2pi\t{$src, $dst|$dst, $src}",
- WriteCvtPD2I, SSEPackedDouble>, PD, SIMD_EXC;
+ WriteCvtPD2I, SSEPackedDouble>, TB, PD, SIMD_EXC;
defm MMX_CVTPI2PD : sse12_cvt_pint<0x2A, VR64, VR128, int_x86_sse_cvtpi2pd,
i64mem, load, "cvtpi2pd\t{$src, $dst|$dst, $src}",
- WriteCvtI2PD, SSEPackedDouble>, PD;
+ WriteCvtI2PD, SSEPackedDouble>, TB, PD;
let Constraints = "$src1 = $dst" in {
defm MMX_CVTPI2PS : sse12_cvt_pint_3addr<0x2A, VR64, VR128,
int_x86_sse_cvtpi2ps,
i64mem, load, "cvtpi2ps\t{$src2, $dst|$dst, $src2}",
- SSEPackedSingle>, PS, SIMD_EXC;
+ SSEPackedSingle>, TB, PS, SIMD_EXC;
}
// Extract / Insert
diff --git a/llvm/lib/Target/X86/X86InstrMisc.td b/llvm/lib/Target/X86/X86InstrMisc.td
index d3a3fb7fefc232..779f27085eae0c 100644
--- a/llvm/lib/Target/X86/X86InstrMisc.td
+++ b/llvm/lib/Target/X86/X86InstrMisc.td
@@ -165,10 +165,10 @@ def POPP64r : I<0x58, AddRegFrm, (outs GR64:$reg), (ins), "popp\t$reg", []>,
REX_W, ExplicitREX2Prefix, Requires<[In64BitMode]>;
def POP2: I<0x8F, MRM0r, (outs GR64:$reg1, GR64:$reg2), (ins),
"pop2\t{$reg2, $reg1|$reg1, $reg2}",
- []>, EVEX, VVVV, EVEX_B, T_MAP4PS;
+ []>, EVEX, VVVV, EVEX_B, T_MAP4, PS;
def POP2P: I<0x8F, MRM0r, (outs GR64:$reg1, GR64:$reg2), (ins),
"pop2p\t{$reg2, $reg1|$reg1, $reg2}",
- []>, EVEX, VVVV, EVEX_B, T_MAP4PS, REX_W;
+ []>, EVEX, VVVV, EVEX_B, T_MAP4, PS, REX_W;
} // mayLoad, SchedRW
let mayLoad = 1, mayStore = 1, SchedRW = [WriteCopy] in
@@ -186,10 +186,10 @@ def PUSHP64r : I<0x50, AddRegFrm, (outs), (ins GR64:$reg), "pushp\t$reg", []>,
REX_W, ExplicitREX2Prefix, Requires<[In64BitMode]>;
def PUSH2: I<0xFF, MRM6r, (outs), (ins GR64:$reg1, GR64:$reg2),
"push2\t{$reg2, $reg1|$reg1, $reg2}",
- []>, EVEX, VVVV, EVEX_B, T_MAP4PS;
+ []>, EVEX, VVVV, EVEX_B, T_MAP4, PS;
def PUSH2P: I<0xFF, MRM6r, (outs), (ins GR64:$reg1, GR64:$reg2),
"push2p\t{$reg2, $reg1|$reg1, $reg2}",
- []>, EVEX, VVVV, EVEX_B, T_MAP4PS, REX_W;
+ []>, EVEX, VVVV, EVEX_B, T_MAP4, PS, REX_W;
} // mayStore, SchedRW
let mayLoad = 1, mayStore = 1, SchedRW = [WriteCopy] in {
def PUSH64rmm: I<0xFF, MRM6m, (outs), (ins i64mem:$src), "push{q}\t$src", []>,
@@ -251,52 +251,52 @@ let Defs = [EFLAGS] in {
def BSF16rr : I<0xBC, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
"bsf{w}\t{$src, $dst|$dst, $src}",
[(set GR16:$dst, EFLAGS, (X86bsf GR16:$src))]>,
- PS, OpSize16, Sched<[WriteBSF]>;
+ TB, PS, OpSize16, Sched<[WriteBSF]>;
def BSF16rm : I<0xBC, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
"bsf{w}\t{$src, $dst|$dst, $src}",
[(set GR16:$dst, EFLAGS, (X86bsf (loadi16 addr:$src)))]>,
- PS, OpSize16, Sched<[WriteBSFLd]>;
+ TB, PS, OpSize16, Sched<[WriteBSFLd]>;
def BSF32rr : I<0xBC, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
"bsf{l}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, EFLAGS, (X86bsf GR32:$src))]>,
- PS, OpSize32, Sched<[WriteBSF]>;
+ TB, PS, OpSize32, Sched<[WriteBSF]>;
def BSF32rm : I<0xBC, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
"bsf{l}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, EFLAGS, (X86bsf (loadi32 addr:$src)))]>,
- PS, OpSize32, Sched<[WriteBSFLd]>;
+ TB, PS, OpSize32, Sched<[WriteBSFLd]>;
def BSF64rr : RI<0xBC, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
"bsf{q}\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, EFLAGS, (X86bsf GR64:$src))]>,
- PS, Sched<[WriteBSF]>;
+ TB, PS, Sched<[WriteBSF]>;
def BSF64rm : RI<0xBC, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
"bsf{q}\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, EFLAGS, (X86bsf (loadi64 addr:$src)))]>,
- PS, Sched<[WriteBSFLd]>;
+ TB, PS, Sched<[WriteBSFLd]>;
def BSR16rr : I<0xBD, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
"bsr{w}\t{$src, $dst|$dst, $src}",
[(set GR16:$dst, EFLAGS, (X86bsr GR16:$src))]>,
- PS, OpSize16, Sched<[WriteBSR]>;
+ TB, PS, OpSize16, Sched<[WriteBSR]>;
def BSR16rm : I<0xBD, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
"bsr{w}\t{$src, $dst|$dst, $src}",
[(set GR16:$dst, EFLAGS, (X86bsr (loadi16 addr:$src)))]>,
- PS, OpSize16, Sched<[WriteBSRLd]>;
+ TB, PS, OpSize16, Sched<[WriteBSRLd]>;
def BSR32rr : I<0xBD, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
"bsr{l}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, EFLAGS, (X86bsr GR32:$src))]>,
- PS, OpSize32, Sched<[WriteBSR]>;
+ TB, PS, OpSize32, Sched<[WriteBSR]>;
def BSR32rm : I<0xBD, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
"bsr{l}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, EFLAGS, (X86bsr (loadi32 addr:$src)))]>,
- PS, OpSize32, Sched<[WriteBSRLd]>;
+ TB, PS, OpSize32, Sched<[WriteBSRLd]>;
def BSR64rr : RI<0xBD, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
"bsr{q}\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, EFLAGS, (X86bsr GR64:$src))]>,
- PS, Sched<[WriteBSR]>;
+ TB, PS, Sched<[WriteBSR]>;
def BSR64rm : RI<0xBD, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
"bsr{q}\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, EFLAGS, (X86bsr (loadi64 addr:$src)))]>,
- PS, Sched<[WriteBSRLd]>;
+ TB, PS, Sched<[WriteBSRLd]>;
} // Defs = [EFLAGS]
let SchedRW = [WriteMicrocoded] in {
@@ -1095,29 +1095,29 @@ let Predicates = [HasMOVBE] in {
def MOVBE16rm : I<0xF0, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
"movbe{w}\t{$src, $dst|$dst, $src}",
[(set GR16:$dst, (bswap (loadi16 addr:$src)))]>,
- OpSize16, T8PS;
+ OpSize16, T8, PS;
def MOVBE32rm : I<0xF0, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
"movbe{l}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (bswap (loadi32 addr:$src)))]>,
- OpSize32, T8PS;
+ OpSize32, T8, PS;
def MOVBE64rm : RI<0xF0, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
"movbe{q}\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, (bswap (loadi64 addr:$src)))]>,
- T8PS;
+ T8, PS;
}
let SchedRW = [WriteStore] in {
def MOVBE16mr : I<0xF1, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
"movbe{w}\t{$src, $dst|$dst, $src}",
[(store (bswap GR16:$src), addr:$dst)]>,
- OpSize16, T8PS;
+ OpSize16, T8, PS;
def MOVBE32mr : I<0xF1, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
"movbe{l}\t{$src, $dst|$dst, $src}",
[(store (bswap GR32:$src), addr:$dst)]>,
- OpSize32, T8PS;
+ OpSize32, T8, PS;
def MOVBE64mr : RI<0xF1, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
"movbe{q}\t{$src, $dst|$dst, $src}",
[(store (bswap GR64:$src), addr:$dst)]>,
- T8PS;
+ T8, PS;
}
}
@@ -1127,13 +1127,13 @@ let Predicates = [HasMOVBE] in {
let Predicates = [HasRDRAND], Defs = [EFLAGS], SchedRW = [WriteSystem] in {
def RDRAND16r : I<0xC7, MRM6r, (outs GR16:$dst), (ins),
"rdrand{w}\t$dst", [(set GR16:$dst, EFLAGS, (X86rdrand))]>,
- OpSize16, PS;
+ OpSize16, TB, PS;
def RDRAND32r : I<0xC7, MRM6r, (outs GR32:$dst), (ins),
"rdrand{l}\t$dst", [(set GR32:$dst, EFLAGS, (X86rdrand))]>,
- OpSize32, PS;
+ OpSize32, TB, PS;
def RDRAND64r : RI<0xC7, MRM6r, (outs GR64:$dst), (ins),
"rdrand{q}\t$dst", [(set GR64:$dst, EFLAGS, (X86rdrand))]>,
- PS;
+ TB, PS;
}
//===----------------------------------------------------------------------===//
@@ -1141,11 +1141,11 @@ let Predicates = [HasRDRAND], Defs = [EFLAGS], SchedRW = [WriteSystem] in {
//
let Predicates = [HasRDSEED], Defs = [EFLAGS], SchedRW = [WriteSystem] in {
def RDSEED16r : I<0xC7, MRM7r, (outs GR16:$dst), (ins), "rdseed{w}\t$dst",
- [(set GR16:$dst, EFLAGS, (X86rdseed))]>, OpSize16, PS;
+ [(set GR16:$dst, EFLAGS, (X86rdseed))]>, OpSize16, TB, PS;
def RDSEED32r : I<0xC7, MRM7r, (outs GR32:$dst), (ins), "rdseed{l}\t$dst",
- [(set GR32:$dst, EFLAGS, (X86rdseed))]>, OpSize32, PS;
+ [(set GR32:$dst, EFLAGS, (X86rdseed))]>, OpSize32, TB, PS;
def RDSEED64r : RI<0xC7, MRM7r, (outs GR64:$dst), (ins), "rdseed{q}\t$dst",
- [(set GR64:$dst, EFLAGS, (X86rdseed))]>, PS;
+ [(set GR64:$dst, EFLAGS, (X86rdseed))]>, TB, PS;
}
//===----------------------------------------------------------------------===//
@@ -1155,29 +1155,29 @@ let Predicates = [HasLZCNT], Defs = [EFLAGS] in {
def LZCNT16rr : I<0xBD, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
"lzcnt{w}\t{$src, $dst|$dst, $src}",
[(set GR16:$dst, (ctlz GR16:$src)), (implicit EFLAGS)]>,
- XS, OpSize16, Sched<[WriteLZCNT]>;
+ TB, XS, OpSize16, Sched<[WriteLZCNT]>;
def LZCNT16rm : I<0xBD, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
"lzcnt{w}\t{$src, $dst|$dst, $src}",
[(set GR16:$dst, (ctlz (loadi16 addr:$src))),
- (implicit EFLAGS)]>, XS, OpSize16, Sched<[WriteLZCNTLd]>;
+ (implicit EFLAGS)]>, TB, XS, OpSize16, Sched<[WriteLZCNTLd]>;
def LZCNT32rr : I<0xBD, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
"lzcnt{l}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (ctlz GR32:$src)), (implicit EFLAGS)]>,
- XS, OpSize32, Sched<[WriteLZCNT]>;
+ TB, XS, OpSize32, Sched<[WriteLZCNT]>;
def LZCNT32rm : I<0xBD, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
"lzcnt{l}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (ctlz (loadi32 addr:$src))),
- (implicit EFLAGS)]>, XS, OpSize32, Sched<[WriteLZCNTLd]>;
+ (implicit EFLAGS)]>, TB, XS, OpSize32, Sched<[WriteLZCNTLd]>;
def LZCNT64rr : RI<0xBD, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
"lzcnt{q}\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, (ctlz GR64:$src)), (implicit EFLAGS)]>,
- XS, Sched<[WriteLZCNT]>;
+ TB, XS, Sched<[WriteLZCNT]>;
def LZCNT64rm : RI<0xBD, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
"lzcnt{q}\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, (ctlz (loadi64 addr:$src))),
- (implicit EFLAGS)]>, XS, Sched<[WriteLZCNTLd]>;
+ (implicit EFLAGS)]>, TB, XS, Sched<[WriteLZCNTLd]>;
}
//===----------------------------------------------------------------------===//
@@ -1187,29 +1187,29 @@ let Predicates = [HasBMI], Defs = [EFLAGS] in {
def TZCNT16rr : I<0xBC, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
"tzcnt{w}\t{$src, $dst|$dst, $src}",
[(set GR16:$dst, (cttz GR16:$src)), (implicit EFLAGS)]>,
- XS, OpSize16, Sched<[WriteTZCNT]>;
+ TB, XS, OpSize16, Sched<[WriteTZCNT]>;
def TZCNT16rm : I<0xBC, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
"tzcnt{w}\t{$src, $dst|$dst, $src}",
[(set GR16:$dst, (cttz (loadi16 addr:$src))),
- (implicit EFLAGS)]>, XS, OpSize16, Sched<[WriteTZCNTLd]>;
+ (implicit EFLAGS)]>, TB, XS, OpSize16, Sched<[WriteTZCNTLd]>;
def TZCNT32rr : I<0xBC, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
"tzcnt{l}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (cttz GR32:$src)), (implicit EFLAGS)]>,
- XS, OpSize32, Sched<[WriteTZCNT]>;
+ TB, XS, OpSize32, Sched<[WriteTZCNT]>;
def TZCNT32rm : I<0xBC, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
"tzcnt{l}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (cttz (loadi32 addr:$src))),
- (implicit EFLAGS)]>, XS, OpSize32, Sched<[WriteTZCNTLd]>;
+ (implicit EFLAGS)]>, TB, XS, OpSize32, Sched<[WriteTZCNTLd]>;
def TZCNT64rr : RI<0xBC, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
"tzcnt{q}\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, (cttz GR64:$src)), (implicit EFLAGS)]>,
- XS, Sched<[WriteTZCNT]>;
+ TB, XS, Sched<[WriteTZCNT]>;
def TZCNT64rm : RI<0xBC, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
"tzcnt{q}\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, (cttz (loadi64 addr:$src))),
- (implicit EFLAGS)]>, XS, Sched<[WriteTZCNTLd]>;
+ (implicit EFLAGS)]>, TB, XS, Sched<[WriteTZCNTLd]>;
}
multiclass bmi_bls<string mnemonic, Format RegMRM, Format MemMRM,
@@ -1218,11 +1218,11 @@ multiclass bmi_bls<string mnemonic, Format RegMRM, Format MemMRM,
let hasSideEffects = 0 in {
def rr#Suffix : I<0xF3, RegMRM, (outs RC:$dst), (ins RC:$src),
!strconcat(mnemonic, "\t{$src, $dst|$dst, $src}"), []>,
- T8PS, VEX, VVVV, Sched<[sched]>;
+ T8, PS, VEX, VVVV, Sched<[sched]>;
let mayLoad = 1 in
def rm#Suffix : I<0xF3, MemMRM, (outs RC:$dst), (ins x86memop:$src),
!strconcat(mnemonic, "\t{$src, $dst|$dst, $src}"), []>,
- T8PS, VEX, VVVV, Sched<[sched.Folded]>;
+ T8, PS, VEX, VVVV, Sched<[sched.Folded]>;
}
}
@@ -1288,12 +1288,12 @@ multiclass bmi4VOp3_base<bits<8> opc, string mnemonic, RegisterClass RC,
def rr#Suffix : I<opc, MRMSrcReg4VOp3, (outs RC:$dst), (ins RC:$src1, RC:$src2),
!strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set RC:$dst, (OpNode RC:$src1, RC:$src2)), (implicit EFLAGS)]>,
- T8PS, VEX, Sched<[Sched]>;
+ T8, PS, VEX, Sched<[Sched]>;
let mayLoad = 1 in
def rm#Suffix : I<opc, MRMSrcMem4VOp3, (outs RC:$dst), (ins x86memop:$src1, RC:$src2),
!strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set RC:$dst, (OpNode (ld_frag addr:$src1), RC:$src2)),
- (implicit EFLAGS)]>, T8PS, VEX,
+ (implicit EFLAGS)]>, T8, PS, VEX,
Sched<[Sched.Folded,
// x86memop:$src1
ReadDefault, ReadDefault, ReadDefault, ReadDefault,
@@ -1380,24 +1380,24 @@ multiclass bmi_pdep_pext<string mnemonic, RegisterClass RC,
let Predicates = [HasBMI2, NoEGPR] in {
defm PDEP32 : bmi_pdep_pext<"pdep{l}", GR32, i32mem,
- X86pdep, loadi32>, T8XD;
+ X86pdep, loadi32>, T8, XD;
defm PDEP64 : bmi_pdep_pext<"pdep{q}", GR64, i64mem,
- X86pdep, loadi64>, T8XD, REX_W;
+ X86pdep, loadi64>, T8, XD, REX_W;
defm PEXT32 : bmi_pdep_pext<"pext{l}", GR32, i32mem,
- X86pext, loadi32>, T8XS;
+ X86pext, loadi32>, T8, XS;
defm PEXT64 : bmi_pdep_pext<"pext{q}", GR64, i64mem,
- X86pext, loadi64>, T8XS, REX_W;
+ X86pext, loadi64>, T8, XS, REX_W;
}
let Predicates = [HasBMI2, HasEGPR] in {
defm PDEP32 : bmi_pdep_pext<"pdep{l}", GR32, i32mem,
- X86pdep, loadi32, "_EVEX">, T8XD, EVEX;
+ X86pdep, loadi32, "_EVEX">, T8, XD, EVEX;
defm PDEP64 : bmi_pdep_pext<"pdep{q}", GR64, i64mem,
- X86pdep, loadi64, "_EVEX">, T8XD, REX_W, EVEX;
+ X86pdep, loadi64, "_EVEX">, T8, XD, REX_W, EVEX;
defm PEXT32 : bmi_pdep_pext<"pext{l}", GR32, i32mem,
- X86pext, loadi32, "_EVEX">, T8XS, EVEX;
+ X86pext, loadi32, "_EVEX">, T8, XS, EVEX;
defm PEXT64 : bmi_pdep_pext<"pext{q}", GR64, i64mem,
- X86pext, loadi64, "_EVEX">, T8XS, REX_W, EVEX;
+ X86pext, loadi64, "_EVEX">, T8, XS, REX_W, EVEX;
}
//===----------------------------------------------------------------------===//
@@ -1471,22 +1471,22 @@ let SchedRW = [ WriteSystem ] in {
let SchedRW = [WriteSystem] in {
def UMONITOR16 : I<0xAE, MRM6r, (outs), (ins GR16:$src),
"umonitor\t$src", [(int_x86_umonitor GR16:$src)]>,
- XS, AdSize16, Requires<[HasWAITPKG, Not64BitMode]>;
+ TB, XS, AdSize16, Requires<[HasWAITPKG, Not64BitMode]>;
def UMONITOR32 : I<0xAE, MRM6r, (outs), (ins GR32:$src),
"umonitor\t$src", [(int_x86_umonitor GR32:$src)]>,
- XS, AdSize32, Requires<[HasWAITPKG]>;
+ TB, XS, AdSize32, Requires<[HasWAITPKG]>;
def UMONITOR64 : I<0xAE, MRM6r, (outs), (ins GR64:$src),
"umonitor\t$src", [(int_x86_umonitor GR64:$src)]>,
- XS, AdSize64, Requires<[HasWAITPKG, In64BitMode]>;
+ TB, XS, AdSize64, Requires<[HasWAITPKG, In64BitMode]>;
let Uses = [EAX, EDX], Defs = [EFLAGS] in {
def UMWAIT : I<0xAE, MRM6r,
(outs), (ins GR32orGR64:$src), "umwait\t$src",
[(set EFLAGS, (X86umwait GR32orGR64:$src, EDX, EAX))]>,
- XD, Requires<[HasWAITPKG]>;
+ TB, XD, Requires<[HasWAITPKG]>;
def TPAUSE : I<0xAE, MRM6r,
(outs), (ins GR32orGR64:$src), "tpause\t$src",
[(set EFLAGS, (X86tpause GR32orGR64:$src, EDX, EAX))]>,
- PD, Requires<[HasWAITPKG]>;
+ TB, PD, Requires<[HasWAITPKG]>;
}
} // SchedRW
@@ -1497,19 +1497,19 @@ let SchedRW = [WriteStore] in {
def MOVDIRI32 : I<0xF9, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
"movdiri\t{$src, $dst|$dst, $src}",
[(int_x86_directstore32 addr:$dst, GR32:$src)]>,
- T8PS, Requires<[HasMOVDIRI, NoEGPR]>;
+ T8, PS, Requires<[HasMOVDIRI, NoEGPR]>;
def MOVDIRI64 : RI<0xF9, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
"movdiri\t{$src, $dst|$dst, $src}",
[(int_x86_directstore64 addr:$dst, GR64:$src)]>,
- T8PS, Requires<[In64BitMode, HasMOVDIRI, NoEGPR]>;
+ T8, PS, Requires<[In64BitMode, HasMOVDIRI, NoEGPR]>;
def MOVDIRI32_EVEX : I<0xF9, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
"movdiri\t{$src, $dst|$dst, $src}",
[(int_x86_directstore32 addr:$dst, GR32:$src)]>,
- EVEX, NoCD8, T_MAP4PS, Requires<[In64BitMode, HasMOVDIRI, HasEGPR]>;
+ EVEX, NoCD8, T_MAP4, PS, Requires<[In64BitMode, HasMOVDIRI, HasEGPR]>;
def MOVDIRI64_EVEX : RI<0xF9, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
"movdiri\t{$src, $dst|$dst, $src}",
[(int_x86_directstore64 addr:$dst, GR64:$src)]>,
- EVEX, NoCD8, T_MAP4PS, Requires<[In64BitMode, HasMOVDIRI, HasEGPR]>;
+ EVEX, NoCD8, T_MAP4, PS, Requires<[In64BitMode, HasMOVDIRI, HasEGPR]>;
} // SchedRW
//===----------------------------------------------------------------------===//
@@ -1518,23 +1518,23 @@ def MOVDIRI64_EVEX : RI<0xF9, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
let SchedRW = [WriteStore] in {
def MOVDIR64B16 : I<0xF8, MRMSrcMem, (outs), (ins GR16:$dst, i512mem_GR16:$src),
"movdir64b\t{$src, $dst|$dst, $src}", []>,
- T8PD, AdSize16, Requires<[HasMOVDIR64B, Not64BitMode]>;
+ T8, PD, AdSize16, Requires<[HasMOVDIR64B, Not64BitMode]>;
def MOVDIR64B32 : I<0xF8, MRMSrcMem, (outs), (ins GR32:$dst, i512mem_GR32:$src),
"movdir64b\t{$src, $dst|$dst, $src}",
[(int_x86_movdir64b GR32:$dst, addr:$src)]>,
- T8PD, AdSize32, Requires<[HasMOVDIR64B, NoEGPR]>;
+ T8, PD, AdSize32, Requires<[HasMOVDIR64B, NoEGPR]>;
def MOVDIR64B64 : I<0xF8, MRMSrcMem, (outs), (ins GR64:$dst, i512mem_GR64:$src),
"movdir64b\t{$src, $dst|$dst, $src}",
[(int_x86_movdir64b GR64:$dst, addr:$src)]>,
- T8PD, AdSize64, Requires<[HasMOVDIR64B, NoEGPR, In64BitMode]>;
+ T8, PD, AdSize64, Requires<[HasMOVDIR64B, NoEGPR, In64BitMode]>;
def MOVDIR64B32_EVEX : I<0xF8, MRMSrcMem, (outs), (ins GR32:$dst, i512mem_GR32:$src),
"movdir64b\t{$src, $dst|$dst, $src}",
[(int_x86_movdir64b GR32:$dst, addr:$src)]>,
- EVEX, NoCD8, T_MAP4PD, AdSize32, Requires<[HasMOVDIR64B, HasEGPR, In64BitMode]>;
+ EVEX, NoCD8, T_MAP4, PD, AdSize32, Requires<[HasMOVDIR64B, HasEGPR, In64BitMode]>;
def MOVDIR64B64_EVEX : I<0xF8, MRMSrcMem, (outs), (ins GR64:$dst, i512mem_GR64:$src),
"movdir64b\t{$src, $dst|$dst, $src}",
[(int_x86_movdir64b GR64:$dst, addr:$src)]>,
- EVEX, NoCD8, T_MAP4PD, AdSize64, Requires<[HasMOVDIR64B, HasEGPR, In64BitMode]>;
+ EVEX, NoCD8, T_MAP4, PD, AdSize64, Requires<[HasMOVDIR64B, HasEGPR, In64BitMode]>;
} // SchedRW
//===----------------------------------------------------------------------===//
@@ -1544,28 +1544,28 @@ let SchedRW = [WriteStore], Defs = [EFLAGS] in {
def ENQCMD16 : I<0xF8, MRMSrcMem, (outs), (ins GR16:$dst, i512mem:$src),
"enqcmd\t{$src, $dst|$dst, $src}",
[(set EFLAGS, (X86enqcmd GR16:$dst, addr:$src))]>,
- T8XD, AdSize16, Requires<[HasENQCMD, Not64BitMode]>;
+ T8, XD, AdSize16, Requires<[HasENQCMD, Not64BitMode]>;
def ENQCMD32 : I<0xF8, MRMSrcMem, (outs), (ins GR32:$dst, i512mem:$src),
"enqcmd\t{$src, $dst|$dst, $src}",
[(set EFLAGS, (X86enqcmd GR32:$dst, addr:$src))]>,
- T8XD, AdSize32, Requires<[HasENQCMD]>;
+ T8, XD, AdSize32, Requires<[HasENQCMD]>;
def ENQCMD64 : I<0xF8, MRMSrcMem, (outs), (ins GR64:$dst, i512mem:$src),
"enqcmd\t{$src, $dst|$dst, $src}",
[(set EFLAGS, (X86enqcmd GR64:$dst, addr:$src))]>,
- T8XD, AdSize64, Requires<[HasENQCMD, In64BitMode]>;
+ T8, XD, AdSize64, Requires<[HasENQCMD, In64BitMode]>;
def ENQCMDS16 : I<0xF8, MRMSrcMem, (outs), (ins GR16:$dst, i512mem:$src),
"enqcmds\t{$src, $dst|$dst, $src}",
[(set EFLAGS, (X86enqcmds GR16:$dst, addr:$src))]>,
- T8XS, AdSize16, Requires<[HasENQCMD, Not64BitMode]>;
+ T8, XS, AdSize16, Requires<[HasENQCMD, Not64BitMode]>;
def ENQCMDS32 : I<0xF8, MRMSrcMem, (outs), (ins GR32:$dst, i512mem:$src),
"enqcmds\t{$src, $dst|$dst, $src}",
[(set EFLAGS, (X86enqcmds GR32:$dst, addr:$src))]>,
- T8XS, AdSize32, Requires<[HasENQCMD]>;
+ T8, XS, AdSize32, Requires<[HasENQCMD]>;
def ENQCMDS64 : I<0xF8, MRMSrcMem, (outs), (ins GR64:$dst, i512mem:$src),
"enqcmds\t{$src, $dst|$dst, $src}",
[(set EFLAGS, (X86enqcmds GR64:$dst, addr:$src))]>,
- T8XS, AdSize64, Requires<[HasENQCMD, In64BitMode]>;
+ T8, XS, AdSize64, Requires<[HasENQCMD, In64BitMode]>;
}
//===----------------------------------------------------------------------===//
@@ -1588,11 +1588,11 @@ let SchedRW = [WriteSystem] in {
let Uses = [EAX, EDX] in
def INVLPGB32 : I<0x01, MRM_FE, (outs), (ins),
"invlpgb", []>,
- PS, Requires<[Not64BitMode]>;
+ TB, PS, Requires<[Not64BitMode]>;
let Uses = [RAX, EDX] in
def INVLPGB64 : I<0x01, MRM_FE, (outs), (ins),
"invlpgb", []>,
- PS, Requires<[In64BitMode]>;
+ TB, PS, Requires<[In64BitMode]>;
} // SchedRW
//===----------------------------------------------------------------------===//
@@ -1602,7 +1602,7 @@ let SchedRW = [WriteSystem] in {
let SchedRW = [WriteSystem] in {
def TLBSYNC : I<0x01, MRM_FF, (outs), (ins),
"tlbsync", []>,
- PS, Requires<[]>;
+ TB, PS, Requires<[]>;
} // SchedRW
//===----------------------------------------------------------------------===//
@@ -1610,14 +1610,14 @@ let SchedRW = [WriteSystem] in {
//
let Uses = [EAX], SchedRW = [WriteSystem] in
def HRESET : Ii8<0xF0, MRM_C0, (outs), (ins i32u8imm:$imm), "hreset\t$imm", []>,
- Requires<[HasHRESET]>, TAXS;
+ Requires<[HasHRESET]>, TA, XS;
//===----------------------------------------------------------------------===//
// SERIALIZE Instruction
//
let SchedRW = [WriteSystem] in
def SERIALIZE : I<0x01, MRM_E8, (outs), (ins), "serialize",
- [(int_x86_serialize)]>, PS,
+ [(int_x86_serialize)]>, TB, PS,
Requires<[HasSERIALIZE]>;
//===----------------------------------------------------------------------===//
@@ -1625,9 +1625,9 @@ let SchedRW = [WriteSystem] in
//
let Predicates = [HasTSXLDTRK], SchedRW = [WriteSystem] in {
def XSUSLDTRK : I<0x01, MRM_E8, (outs), (ins), "xsusldtrk",
- [(int_x86_xsusldtrk)]>, XD;
+ [(int_x86_xsusldtrk)]>, TB, XD;
def XRESLDTRK : I<0x01, MRM_E9, (outs), (ins), "xresldtrk",
- [(int_x86_xresldtrk)]>, XD;
+ [(int_x86_xresldtrk)]>, TB, XD;
}
//===----------------------------------------------------------------------===//
@@ -1635,18 +1635,18 @@ let Predicates = [HasTSXLDTRK], SchedRW = [WriteSystem] in {
//
let Predicates = [HasUINTR, In64BitMode], SchedRW = [WriteSystem] in {
def UIRET : I<0x01, MRM_EC, (outs), (ins), "uiret",
- []>, XS;
+ []>, TB, XS;
def CLUI : I<0x01, MRM_EE, (outs), (ins), "clui",
- [(int_x86_clui)]>, XS;
+ [(int_x86_clui)]>, TB, XS;
def STUI : I<0x01, MRM_EF, (outs), (ins), "stui",
- [(int_x86_stui)]>, XS;
+ [(int_x86_stui)]>, TB, XS;
def SENDUIPI : I<0xC7, MRM6r, (outs), (ins GR64:$arg), "senduipi\t$arg",
- [(int_x86_senduipi GR64:$arg)]>, XS;
+ [(int_x86_senduipi GR64:$arg)]>, TB, XS;
let Defs = [EFLAGS] in
def TESTUI : I<0x01, MRM_ED, (outs), (ins), "testui",
- [(set EFLAGS, (X86testui))]>, XS;
+ [(set EFLAGS, (X86testui))]>, TB, XS;
}
//===----------------------------------------------------------------------===//
@@ -1670,14 +1670,14 @@ def CMPCCXADDmr32 : I<0xe0, MRMDestMem4VOp3CC, (outs GR32:$dst),
"cmp${cond}xadd\t{$src3, $dst, $dstsrc2|$dstsrc2, $dst, $src3}",
[(set GR32:$dst, (X86cmpccxadd addr:$dstsrc2,
GR32:$dstsrc1, GR32:$src3, timm:$cond))]>,
- VEX, VVVV, T8PD, Sched<[WriteXCHG]>;
+ VEX, VVVV, T8, PD, Sched<[WriteXCHG]>;
def CMPCCXADDmr64 : I<0xe0, MRMDestMem4VOp3CC, (outs GR64:$dst),
(ins GR64:$dstsrc1, i64mem:$dstsrc2, GR64:$src3, ccode:$cond),
"cmp${cond}xadd\t{$src3, $dst, $dstsrc2|$dstsrc2, $dst, $src3}",
[(set GR64:$dst, (X86cmpccxadd addr:$dstsrc2,
GR64:$dstsrc1, GR64:$src3, timm:$cond))]>,
- VEX, VVVV, REX_W, T8PD, Sched<[WriteXCHG]>;
+ VEX, VVVV, REX_W, T8, PD, Sched<[WriteXCHG]>;
}
let Predicates = [HasCMPCCXADD, HasEGPR, In64BitMode] in {
@@ -1686,14 +1686,14 @@ def CMPCCXADDmr32_EVEX : I<0xe0, MRMDestMem4VOp3CC, (outs GR32:$dst),
"cmp${cond}xadd\t{$src3, $dst, $dstsrc2|$dstsrc2, $dst, $src3}",
[(set GR32:$dst, (X86cmpccxadd addr:$dstsrc2,
GR32:$dstsrc1, GR32:$src3, timm:$cond))]>,
- EVEX, VVVV, NoCD8, T8PD, Sched<[WriteXCHG]>;
+ EVEX, VVVV, NoCD8, T8, PD, Sched<[WriteXCHG]>;
def CMPCCXADDmr64_EVEX : I<0xe0, MRMDestMem4VOp3CC, (outs GR64:$dst),
(ins GR64:$dstsrc1, i64mem:$dstsrc2, GR64:$src3, ccode:$cond),
"cmp${cond}xadd\t{$src3, $dst, $dstsrc2|$dstsrc2, $dst, $src3}",
[(set GR64:$dst, (X86cmpccxadd addr:$dstsrc2,
GR64:$dstsrc1, GR64:$src3, timm:$cond))]>,
- EVEX, VVVV, NoCD8, REX_W, T8PD, Sched<[WriteXCHG]>;
+ EVEX, VVVV, NoCD8, REX_W, T8, PD, Sched<[WriteXCHG]>;
}
}
@@ -1703,12 +1703,12 @@ def CMPCCXADDmr64_EVEX : I<0xe0, MRMDestMem4VOp3CC, (outs GR64:$dst),
let Predicates = [HasCLFLUSHOPT], SchedRW = [WriteLoad] in
def CLFLUSHOPT : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
- "clflushopt\t$src", [(int_x86_clflushopt addr:$src)]>, PD;
+ "clflushopt\t$src", [(int_x86_clflushopt addr:$src)]>, TB, PD;
let Predicates = [HasCLWB], SchedRW = [WriteLoad] in
def CLWB : I<0xAE, MRM6m, (outs), (ins i8mem:$src), "clwb\t$src",
- [(int_x86_clwb addr:$src)]>, PD;
+ [(int_x86_clwb addr:$src)]>, TB, PD;
let Predicates = [HasCLDEMOTE], SchedRW = [WriteLoad] in
def CLDEMOTE : I<0x1C, MRM0m, (outs), (ins i8mem:$src), "cldemote\t$src",
- [(int_x86_cldemote addr:$src)]>, PS;
+ [(int_x86_cldemote addr:$src)]>, TB, PS;
diff --git a/llvm/lib/Target/X86/X86InstrRAOINT.td b/llvm/lib/Target/X86/X86InstrRAOINT.td
index dc0e267a83e39b..601355d4f7de4e 100644
--- a/llvm/lib/Target/X86/X86InstrRAOINT.td
+++ b/llvm/lib/Target/X86/X86InstrRAOINT.td
@@ -39,7 +39,7 @@ multiclass RAOINT_BASE<string OpcodeStr> {
Sched<[WriteALURMW]>, REX_W;
}
-defm AADD : RAOINT_BASE<"add">, T8PS;
-defm AAND : RAOINT_BASE<"and">, T8PD;
-defm AOR : RAOINT_BASE<"or" >, T8XD;
-defm AXOR : RAOINT_BASE<"xor">, T8XS;
+defm AADD : RAOINT_BASE<"add">, T8, PS;
+defm AAND : RAOINT_BASE<"and">, T8, PD;
+defm AOR : RAOINT_BASE<"or" >, T8, XD;
+defm AXOR : RAOINT_BASE<"xor">, T8, XS;
diff --git a/llvm/lib/Target/X86/X86InstrSGX.td b/llvm/lib/Target/X86/X86InstrSGX.td
index 6439f717accb96..3c8d6e3c6b6b33 100644
--- a/llvm/lib/Target/X86/X86InstrSGX.td
+++ b/llvm/lib/Target/X86/X86InstrSGX.td
@@ -17,13 +17,13 @@
let SchedRW = [WriteSystem], Predicates = [HasSGX] in {
// ENCLS - Execute an Enclave System Function of Specified Leaf Number
def ENCLS : I<0x01, MRM_CF, (outs), (ins),
- "encls", []>, PS;
+ "encls", []>, TB, PS;
// ENCLU - Execute an Enclave User Function of Specified Leaf Number
def ENCLU : I<0x01, MRM_D7, (outs), (ins),
- "enclu", []>, PS;
+ "enclu", []>, TB, PS;
// ENCLV - Execute an Enclave VMM Function of Specified Leaf Number
def ENCLV : I<0x01, MRM_C0, (outs), (ins),
- "enclv", []>, PS;
+ "enclv", []>, TB, PS;
} // SchedRW
diff --git a/llvm/lib/Target/X86/X86InstrSNP.td b/llvm/lib/Target/X86/X86InstrSNP.td
index ab13fa43c92ddb..05ed6585db6dfe 100644
--- a/llvm/lib/Target/X86/X86InstrSNP.td
+++ b/llvm/lib/Target/X86/X86InstrSNP.td
@@ -17,31 +17,31 @@
let SchedRW = [WriteSystem] in {
// F3 0F 01 FF
let Uses = [RAX], Defs = [EAX, EFLAGS] in
-def PSMASH: I<0x01, MRM_FF, (outs), (ins), "psmash", []>, XS,
+def PSMASH: I<0x01, MRM_FF, (outs), (ins), "psmash", []>, TB, XS,
Requires<[In64BitMode]>;
// F2 0F 01 FF
let Uses = [RAX, RCX, RDX], Defs = [EAX, EFLAGS] in
def PVALIDATE64: I<0x01, MRM_FF, (outs), (ins), "pvalidate",[]>,
- XD, Requires<[In64BitMode]>;
+ TB, XD, Requires<[In64BitMode]>;
let Uses = [EAX, ECX, EDX], Defs = [EAX, EFLAGS] in
def PVALIDATE32: I<0x01, MRM_FF, (outs), (ins), "pvalidate",[]>,
- XD, Requires<[Not64BitMode]>;
+ TB, XD, Requires<[Not64BitMode]>;
// F2 0F 01 FE
let Uses = [RAX, RCX], Defs = [EAX, EFLAGS] in
-def RMPUPDATE: I<0x01, MRM_FE, (outs), (ins), "rmpupdate", []>, XD,
+def RMPUPDATE: I<0x01, MRM_FE, (outs), (ins), "rmpupdate", []>, TB, XD,
Requires<[In64BitMode]>;
// F3 0F 01 FE
let Uses = [RAX, RCX, RDX], Defs = [EAX, EFLAGS] in
-def RMPADJUST: I<0x01, MRM_FE, (outs), (ins), "rmpadjust", []>, XS,
+def RMPADJUST: I<0x01, MRM_FE, (outs), (ins), "rmpadjust", []>, TB, XS,
Requires<[In64BitMode]>;
// F3 0F 01 FD
let Uses = [RAX, RDX], Defs = [RAX, RCX, RDX, EFLAGS] in
-def RMPQUERY: I<0x01, MRM_FD, (outs), (ins), "rmpquery", []>, XS,
+def RMPQUERY: I<0x01, MRM_FD, (outs), (ins), "rmpquery", []>, TB, XS,
Requires<[In64BitMode]>;
} // SchedRW
diff --git a/llvm/lib/Target/X86/X86InstrSSE.td b/llvm/lib/Target/X86/X86InstrSSE.td
index d91c7740aae39b..27d3974a674ab6 100644
--- a/llvm/lib/Target/X86/X86InstrSSE.td
+++ b/llvm/lib/Target/X86/X86InstrSSE.td
@@ -268,15 +268,15 @@ multiclass sse12_move_rm<RegisterClass RC, ValueType vt, X86MemOperand x86memop,
}
defm MOVSS : sse12_move<FR32, X86Movss, v4f32, f32mem, "movss",
- SSEPackedSingle, UseSSE1>, XS;
+ SSEPackedSingle, UseSSE1>, TB, XS;
defm MOVSD : sse12_move<FR64, X86Movsd, v2f64, f64mem, "movsd",
- SSEPackedDouble, UseSSE2>, XD;
+ SSEPackedDouble, UseSSE2>, TB, XD;
let canFoldAsLoad = 1, isReMaterializable = 1 in {
defm MOVSS : sse12_move_rm<FR32, v4f32, f32mem, loadf32, X86vzload32, "movss",
- SSEPackedSingle>, XS;
+ SSEPackedSingle>, TB, XS;
defm MOVSD : sse12_move_rm<FR64, v2f64, f64mem, loadf64, X86vzload64, "movsd",
- SSEPackedDouble>, XD;
+ SSEPackedDouble>, TB, XD;
}
// Patterns
@@ -352,46 +352,46 @@ let canFoldAsLoad = 1, isReMaterializable = 1 in
let Predicates = [HasAVX, NoVLX] in {
defm VMOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32, "movaps",
SSEPackedSingle, SchedWriteFMoveLS.XMM>,
- PS, VEX, WIG;
+ TB, PS, VEX, WIG;
defm VMOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64, "movapd",
SSEPackedDouble, SchedWriteFMoveLS.XMM>,
- PD, VEX, WIG;
+ TB, PD, VEX, WIG;
defm VMOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32, "movups",
SSEPackedSingle, SchedWriteFMoveLS.XMM>,
- PS, VEX, WIG;
+ TB, PS, VEX, WIG;
defm VMOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64, "movupd",
SSEPackedDouble, SchedWriteFMoveLS.XMM>,
- PD, VEX, WIG;
+ TB, PD, VEX, WIG;
defm VMOVAPSY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv8f32, "movaps",
SSEPackedSingle, SchedWriteFMoveLS.YMM>,
- PS, VEX, VEX_L, WIG;
+ TB, PS, VEX, VEX_L, WIG;
defm VMOVAPDY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv4f64, "movapd",
SSEPackedDouble, SchedWriteFMoveLS.YMM>,
- PD, VEX, VEX_L, WIG;
+ TB, PD, VEX, VEX_L, WIG;
defm VMOVUPSY : sse12_mov_packed<0x10, VR256, f256mem, loadv8f32, "movups",
SSEPackedSingle, SchedWriteFMoveLS.YMM>,
- PS, VEX, VEX_L, WIG;
+ TB, PS, VEX, VEX_L, WIG;
defm VMOVUPDY : sse12_mov_packed<0x10, VR256, f256mem, loadv4f64, "movupd",
SSEPackedDouble, SchedWriteFMoveLS.YMM>,
- PD, VEX, VEX_L, WIG;
+ TB, PD, VEX, VEX_L, WIG;
}
let Predicates = [UseSSE1] in {
defm MOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32, "movaps",
SSEPackedSingle, SchedWriteFMoveLS.XMM>,
- PS;
+ TB, PS;
defm MOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32, "movups",
SSEPackedSingle, SchedWriteFMoveLS.XMM>,
- PS;
+ TB, PS;
}
let Predicates = [UseSSE2] in {
defm MOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64, "movapd",
SSEPackedDouble, SchedWriteFMoveLS.XMM>,
- PD;
+ TB, PD;
defm MOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64, "movupd",
SSEPackedDouble, SchedWriteFMoveLS.XMM>,
- PD;
+ TB, PD;
}
let Predicates = [HasAVX, NoVLX] in {
@@ -666,7 +666,7 @@ multiclass sse12_mov_hilo_packed_base<bits<8>opc, SDPatternOperator pdnode,
def PSrm : PI<opc, MRMSrcMem,
(outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
!strconcat(base_opc, "s", asm_opr),
- [], SSEPackedSingle>, PS,
+ [], SSEPackedSingle>, TB, PS,
Sched<[SchedWriteFShuffle.XMM.Folded, SchedWriteFShuffle.XMM.ReadAfterFold]>;
def PDrm : PI<opc, MRMSrcMem,
@@ -674,7 +674,7 @@ multiclass sse12_mov_hilo_packed_base<bits<8>opc, SDPatternOperator pdnode,
!strconcat(base_opc, "d", asm_opr),
[(set VR128:$dst, (v2f64 (pdnode VR128:$src1,
(scalar_to_vector (loadf64 addr:$src2)))))],
- SSEPackedDouble>, PD,
+ SSEPackedDouble>, TB, PD,
Sched<[SchedWriteFShuffle.XMM.Folded, SchedWriteFShuffle.XMM.ReadAfterFold]>;
}
@@ -903,36 +903,36 @@ let isCodeGenOnly = 1, Predicates = [UseAVX], Uses = [MXCSR], mayRaiseFPExceptio
defm VCVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, any_fp_to_sint, f32mem, loadf32,
"cvttss2si", "cvttss2si",
WriteCvtSS2I, SSEPackedSingle>,
- XS, VEX, VEX_LIG;
+ TB, XS, VEX, VEX_LIG;
defm VCVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, any_fp_to_sint, f32mem, loadf32,
"cvttss2si", "cvttss2si",
WriteCvtSS2I, SSEPackedSingle>,
- XS, VEX, REX_W, VEX_LIG;
+ TB, XS, VEX, REX_W, VEX_LIG;
defm VCVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, any_fp_to_sint, f64mem, loadf64,
"cvttsd2si", "cvttsd2si",
WriteCvtSD2I, SSEPackedDouble>,
- XD, VEX, VEX_LIG;
+ TB, XD, VEX, VEX_LIG;
defm VCVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, any_fp_to_sint, f64mem, loadf64,
"cvttsd2si", "cvttsd2si",
WriteCvtSD2I, SSEPackedDouble>,
- XD, VEX, REX_W, VEX_LIG;
+ TB, XD, VEX, REX_W, VEX_LIG;
defm VCVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, lrint, f32mem, loadf32,
"cvtss2si", "cvtss2si",
WriteCvtSS2I, SSEPackedSingle>,
- XS, VEX, VEX_LIG;
+ TB, XS, VEX, VEX_LIG;
defm VCVTSS2SI64 : sse12_cvt_s<0x2D, FR32, GR64, llrint, f32mem, loadf32,
"cvtss2si", "cvtss2si",
WriteCvtSS2I, SSEPackedSingle>,
- XS, VEX, REX_W, VEX_LIG;
+ TB, XS, VEX, REX_W, VEX_LIG;
defm VCVTSD2SI : sse12_cvt_s<0x2D, FR64, GR32, lrint, f64mem, loadf64,
"cvtsd2si", "cvtsd2si",
WriteCvtSD2I, SSEPackedDouble>,
- XD, VEX, VEX_LIG;
+ TB, XD, VEX, VEX_LIG;
defm VCVTSD2SI64 : sse12_cvt_s<0x2D, FR64, GR64, llrint, f64mem, loadf64,
"cvtsd2si", "cvtsd2si",
WriteCvtSD2I, SSEPackedDouble>,
- XD, VEX, REX_W, VEX_LIG;
+ TB, XD, VEX, REX_W, VEX_LIG;
}
// The assembler can recognize rr 64-bit instructions by seeing a rxx
@@ -941,16 +941,16 @@ defm VCVTSD2SI64 : sse12_cvt_s<0x2D, FR64, GR64, llrint, f64mem, loadf64,
// where appropriate to do so.
let isCodeGenOnly = 1 in {
defm VCVTSI2SS : sse12_vcvt_avx<0x2A, GR32, FR32, i32mem, "cvtsi2ss", "l",
- WriteCvtI2SS, SSEPackedSingle>, XS, VEX, VVVV,
+ WriteCvtI2SS, SSEPackedSingle>, TB, XS, VEX, VVVV,
VEX_LIG, SIMD_EXC;
defm VCVTSI642SS : sse12_vcvt_avx<0x2A, GR64, FR32, i64mem, "cvtsi2ss", "q",
- WriteCvtI2SS, SSEPackedSingle>, XS, VEX, VVVV,
+ WriteCvtI2SS, SSEPackedSingle>, TB, XS, VEX, VVVV,
REX_W, VEX_LIG, SIMD_EXC;
defm VCVTSI2SD : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd", "l",
- WriteCvtI2SD, SSEPackedDouble>, XD, VEX, VVVV,
+ WriteCvtI2SD, SSEPackedDouble>, TB, XD, VEX, VVVV,
VEX_LIG;
defm VCVTSI642SD : sse12_vcvt_avx<0x2A, GR64, FR64, i64mem, "cvtsi2sd", "q",
- WriteCvtI2SD, SSEPackedDouble>, XD, VEX, VVVV,
+ WriteCvtI2SD, SSEPackedDouble>, TB, XD, VEX, VVVV,
REX_W, VEX_LIG, SIMD_EXC;
} // isCodeGenOnly = 1
@@ -983,42 +983,42 @@ let Predicates = [UseAVX] in {
let isCodeGenOnly = 1 in {
defm CVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, any_fp_to_sint, f32mem, loadf32,
"cvttss2si", "cvttss2si",
- WriteCvtSS2I, SSEPackedSingle>, XS, SIMD_EXC;
+ WriteCvtSS2I, SSEPackedSingle>, TB, XS, SIMD_EXC;
defm CVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, any_fp_to_sint, f32mem, loadf32,
"cvttss2si", "cvttss2si",
- WriteCvtSS2I, SSEPackedSingle>, XS, REX_W, SIMD_EXC;
+ WriteCvtSS2I, SSEPackedSingle>, TB, XS, REX_W, SIMD_EXC;
defm CVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, any_fp_to_sint, f64mem, loadf64,
"cvttsd2si", "cvttsd2si",
- WriteCvtSD2I, SSEPackedDouble>, XD, SIMD_EXC;
+ WriteCvtSD2I, SSEPackedDouble>, TB, XD, SIMD_EXC;
defm CVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, any_fp_to_sint, f64mem, loadf64,
"cvttsd2si", "cvttsd2si",
- WriteCvtSD2I, SSEPackedDouble>, XD, REX_W, SIMD_EXC;
+ WriteCvtSD2I, SSEPackedDouble>, TB, XD, REX_W, SIMD_EXC;
defm CVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, lrint, f32mem, loadf32,
"cvtss2si", "cvtss2si",
- WriteCvtSS2I, SSEPackedSingle>, XS, SIMD_EXC;
+ WriteCvtSS2I, SSEPackedSingle>, TB, XS, SIMD_EXC;
defm CVTSS2SI64 : sse12_cvt_s<0x2D, FR32, GR64, llrint, f32mem, loadf32,
"cvtss2si", "cvtss2si",
- WriteCvtSS2I, SSEPackedSingle>, XS, REX_W, SIMD_EXC;
+ WriteCvtSS2I, SSEPackedSingle>, TB, XS, REX_W, SIMD_EXC;
defm CVTSD2SI : sse12_cvt_s<0x2D, FR64, GR32, lrint, f64mem, loadf64,
"cvtsd2si", "cvtsd2si",
- WriteCvtSD2I, SSEPackedDouble>, XD, SIMD_EXC;
+ WriteCvtSD2I, SSEPackedDouble>, TB, XD, SIMD_EXC;
defm CVTSD2SI64 : sse12_cvt_s<0x2D, FR64, GR64, llrint, f64mem, loadf64,
"cvtsd2si", "cvtsd2si",
- WriteCvtSD2I, SSEPackedDouble>, XD, REX_W, SIMD_EXC;
+ WriteCvtSD2I, SSEPackedDouble>, TB, XD, REX_W, SIMD_EXC;
defm CVTSI2SS : sse12_cvt_s<0x2A, GR32, FR32, any_sint_to_fp, i32mem, loadi32,
"cvtsi2ss", "cvtsi2ss{l}",
- WriteCvtI2SS, SSEPackedSingle, ReadInt2Fpu>, XS, SIMD_EXC;
+ WriteCvtI2SS, SSEPackedSingle, ReadInt2Fpu>, TB, XS, SIMD_EXC;
defm CVTSI642SS : sse12_cvt_s<0x2A, GR64, FR32, any_sint_to_fp, i64mem, loadi64,
"cvtsi2ss", "cvtsi2ss{q}",
- WriteCvtI2SS, SSEPackedSingle, ReadInt2Fpu>, XS, REX_W, SIMD_EXC;
+ WriteCvtI2SS, SSEPackedSingle, ReadInt2Fpu>, TB, XS, REX_W, SIMD_EXC;
defm CVTSI2SD : sse12_cvt_s<0x2A, GR32, FR64, any_sint_to_fp, i32mem, loadi32,
"cvtsi2sd", "cvtsi2sd{l}",
- WriteCvtI2SD, SSEPackedDouble, ReadInt2Fpu>, XD;
+ WriteCvtI2SD, SSEPackedDouble, ReadInt2Fpu>, TB, XD;
defm CVTSI642SD : sse12_cvt_s<0x2A, GR64, FR64, any_sint_to_fp, i64mem, loadi64,
"cvtsi2sd", "cvtsi2sd{q}",
- WriteCvtI2SD, SSEPackedDouble, ReadInt2Fpu>, XD, REX_W, SIMD_EXC;
+ WriteCvtI2SD, SSEPackedDouble, ReadInt2Fpu>, TB, XD, REX_W, SIMD_EXC;
} // isCodeGenOnly = 1
let Predicates = [UseSSE1] in {
@@ -1074,46 +1074,46 @@ let Uses = [MXCSR], mayRaiseFPException = 1 in {
let Predicates = [UseAVX] in {
defm VCVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, i32, v2f64,
X86cvts2si, sdmem, sse_load_f64, "cvtsd2si",
- WriteCvtSD2I, SSEPackedDouble>, XD, VEX, VEX_LIG;
+ WriteCvtSD2I, SSEPackedDouble>, TB, XD, VEX, VEX_LIG;
defm VCVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, i64, v2f64,
X86cvts2si, sdmem, sse_load_f64, "cvtsd2si",
- WriteCvtSD2I, SSEPackedDouble>, XD, VEX, REX_W, VEX_LIG;
+ WriteCvtSD2I, SSEPackedDouble>, TB, XD, VEX, REX_W, VEX_LIG;
}
defm CVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, i32, v2f64, X86cvts2si,
sdmem, sse_load_f64, "cvtsd2si", WriteCvtSD2I,
- SSEPackedDouble>, XD;
+ SSEPackedDouble>, TB, XD;
defm CVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, i64, v2f64, X86cvts2si,
sdmem, sse_load_f64, "cvtsd2si", WriteCvtSD2I,
- SSEPackedDouble>, XD, REX_W;
+ SSEPackedDouble>, TB, XD, REX_W;
}
let Predicates = [UseAVX] in {
defm VCVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
i32mem, "cvtsi2ss", "l", WriteCvtI2SS, SSEPackedSingle, 0>,
- XS, VEX, VVVV, VEX_LIG, SIMD_EXC;
+ TB, XS, VEX, VVVV, VEX_LIG, SIMD_EXC;
defm VCVTSI642SS : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
i64mem, "cvtsi2ss", "q", WriteCvtI2SS, SSEPackedSingle, 0>,
- XS, VEX, VVVV, VEX_LIG, REX_W, SIMD_EXC;
+ TB, XS, VEX, VVVV, VEX_LIG, REX_W, SIMD_EXC;
defm VCVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
i32mem, "cvtsi2sd", "l", WriteCvtI2SD, SSEPackedDouble, 0>,
- XD, VEX, VVVV, VEX_LIG;
+ TB, XD, VEX, VVVV, VEX_LIG;
defm VCVTSI642SD : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
i64mem, "cvtsi2sd", "q", WriteCvtI2SD, SSEPackedDouble, 0>,
- XD, VEX, VVVV, VEX_LIG, REX_W, SIMD_EXC;
+ TB, XD, VEX, VVVV, VEX_LIG, REX_W, SIMD_EXC;
}
let Constraints = "$src1 = $dst" in {
defm CVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
i32mem, "cvtsi2ss", "l", WriteCvtI2SS, SSEPackedSingle>,
- XS, SIMD_EXC;
+ TB, XS, SIMD_EXC;
defm CVTSI642SS : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
i64mem, "cvtsi2ss", "q", WriteCvtI2SS, SSEPackedSingle>,
- XS, REX_W, SIMD_EXC;
+ TB, XS, REX_W, SIMD_EXC;
defm CVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
i32mem, "cvtsi2sd", "l", WriteCvtI2SD, SSEPackedDouble>,
- XD;
+ TB, XD;
defm CVTSI642SD : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
i64mem, "cvtsi2sd", "q", WriteCvtI2SD, SSEPackedDouble>,
- XD, REX_W, SIMD_EXC;
+ TB, XD, REX_W, SIMD_EXC;
}
def : InstAlias<"vcvtsi2ss{l}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
@@ -1150,34 +1150,34 @@ def : InstAlias<"cvtsi2sd\t{$src, $dst|$dst, $src}",
let Predicates = [UseAVX], Uses = [MXCSR], mayRaiseFPException = 1 in {
defm VCVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, i32, v4f32, X86cvtts2Int,
ssmem, sse_load_f32, "cvttss2si",
- WriteCvtSS2I, SSEPackedSingle>, XS, VEX, VEX_LIG;
+ WriteCvtSS2I, SSEPackedSingle>, TB, XS, VEX, VEX_LIG;
defm VCVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64, i64, v4f32,
X86cvtts2Int, ssmem, sse_load_f32,
"cvttss2si", WriteCvtSS2I, SSEPackedSingle>,
- XS, VEX, VEX_LIG, REX_W;
+ TB, XS, VEX, VEX_LIG, REX_W;
defm VCVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, i32, v2f64, X86cvtts2Int,
sdmem, sse_load_f64, "cvttsd2si",
- WriteCvtSS2I, SSEPackedDouble>, XD, VEX, VEX_LIG;
+ WriteCvtSS2I, SSEPackedDouble>, TB, XD, VEX, VEX_LIG;
defm VCVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64, i64, v2f64,
X86cvtts2Int, sdmem, sse_load_f64,
"cvttsd2si", WriteCvtSS2I, SSEPackedDouble>,
- XD, VEX, VEX_LIG, REX_W;
+ TB, XD, VEX, VEX_LIG, REX_W;
}
let Uses = [MXCSR], mayRaiseFPException = 1 in {
defm CVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, i32, v4f32, X86cvtts2Int,
ssmem, sse_load_f32, "cvttss2si",
- WriteCvtSS2I, SSEPackedSingle>, XS;
+ WriteCvtSS2I, SSEPackedSingle>, TB, XS;
defm CVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64, i64, v4f32,
X86cvtts2Int, ssmem, sse_load_f32,
"cvttss2si", WriteCvtSS2I, SSEPackedSingle>,
- XS, REX_W;
+ TB, XS, REX_W;
defm CVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, i32, v2f64, X86cvtts2Int,
sdmem, sse_load_f64, "cvttsd2si",
- WriteCvtSD2I, SSEPackedDouble>, XD;
+ WriteCvtSD2I, SSEPackedDouble>, TB, XD;
defm CVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64, i64, v2f64,
X86cvtts2Int, sdmem, sse_load_f64,
"cvttsd2si", WriteCvtSD2I, SSEPackedDouble>,
- XD, REX_W;
+ TB, XD, REX_W;
}
def : InstAlias<"vcvttss2si{l}\t{$src, $dst|$dst, $src}",
@@ -1217,32 +1217,32 @@ def : InstAlias<"cvttsd2si{q}\t{$src, $dst|$dst, $src}",
let Predicates = [UseAVX], Uses = [MXCSR], mayRaiseFPException = 1 in {
defm VCVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, i32, v4f32, X86cvts2si,
ssmem, sse_load_f32, "cvtss2si",
- WriteCvtSS2I, SSEPackedSingle>, XS, VEX, VEX_LIG;
+ WriteCvtSS2I, SSEPackedSingle>, TB, XS, VEX, VEX_LIG;
defm VCVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, i64, v4f32, X86cvts2si,
ssmem, sse_load_f32, "cvtss2si",
- WriteCvtSS2I, SSEPackedSingle>, XS, VEX, REX_W, VEX_LIG;
+ WriteCvtSS2I, SSEPackedSingle>, TB, XS, VEX, REX_W, VEX_LIG;
}
let Uses = [MXCSR], mayRaiseFPException = 1 in {
defm CVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, i32, v4f32, X86cvts2si,
ssmem, sse_load_f32, "cvtss2si",
- WriteCvtSS2I, SSEPackedSingle>, XS;
+ WriteCvtSS2I, SSEPackedSingle>, TB, XS;
defm CVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, i64, v4f32, X86cvts2si,
ssmem, sse_load_f32, "cvtss2si",
- WriteCvtSS2I, SSEPackedSingle>, XS, REX_W;
+ WriteCvtSS2I, SSEPackedSingle>, TB, XS, REX_W;
defm VCVTDQ2PS : sse12_cvt_p<0x5B, VR128, i128mem, v4f32, v4i32, load,
"vcvtdq2ps\t{$src, $dst|$dst, $src}",
SSEPackedSingle, WriteCvtI2PS>,
- PS, VEX, Requires<[HasAVX, NoVLX]>, WIG;
+ TB, PS, VEX, Requires<[HasAVX, NoVLX]>, WIG;
defm VCVTDQ2PSY : sse12_cvt_p<0x5B, VR256, i256mem, v8f32, v8i32, load,
"vcvtdq2ps\t{$src, $dst|$dst, $src}",
SSEPackedSingle, WriteCvtI2PSY>,
- PS, VEX, VEX_L, Requires<[HasAVX, NoVLX]>, WIG;
+ TB, PS, VEX, VEX_L, Requires<[HasAVX, NoVLX]>, WIG;
defm CVTDQ2PS : sse12_cvt_p<0x5B, VR128, i128mem, v4f32, v4i32, memop,
"cvtdq2ps\t{$src, $dst|$dst, $src}",
SSEPackedSingle, WriteCvtI2PS>,
- PS, Requires<[UseSSE2]>;
+ TB, PS, Requires<[UseSSE2]>;
}
// AVX aliases
@@ -1295,7 +1295,7 @@ let mayLoad = 1 in
def VCVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst),
(ins FR32:$src1, f64mem:$src2),
"vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
- XD, VEX, VVVV, VEX_LIG, WIG,
+ TB, XD, VEX, VVVV, VEX_LIG, WIG,
Sched<[WriteCvtSD2SS.Folded, WriteCvtSD2SS.ReadAfterFold]>, SIMD_EXC;
}
@@ -1311,7 +1311,7 @@ def CVTSD2SSrr : SDI<0x5A, MRMSrcReg, (outs FR32:$dst), (ins FR64:$src),
def CVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src),
"cvtsd2ss\t{$src, $dst|$dst, $src}",
[(set FR32:$dst, (any_fpround (loadf64 addr:$src)))]>,
- XD, Requires<[UseSSE2, OptForSize]>,
+ TB, XD, Requires<[UseSSE2, OptForSize]>,
Sched<[WriteCvtSD2SS.Folded, WriteCvtSD2SS.ReadAfterFold]>, SIMD_EXC;
}
@@ -1321,14 +1321,14 @@ def VCVTSD2SSrr_Int: I<0x5A, MRMSrcReg,
"vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128:$dst,
(v4f32 (X86frounds VR128:$src1, (v2f64 VR128:$src2))))]>,
- XD, VEX, VVVV, VEX_LIG, WIG, Requires<[UseAVX]>,
+ TB, XD, VEX, VVVV, VEX_LIG, WIG, Requires<[UseAVX]>,
Sched<[WriteCvtSD2SS]>;
def VCVTSD2SSrm_Int: I<0x5A, MRMSrcMem,
(outs VR128:$dst), (ins VR128:$src1, sdmem:$src2),
"vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128:$dst,
(v4f32 (X86frounds VR128:$src1, (sse_load_f64 addr:$src2))))]>,
- XD, VEX, VVVV, VEX_LIG, WIG, Requires<[UseAVX]>,
+ TB, XD, VEX, VVVV, VEX_LIG, WIG, Requires<[UseAVX]>,
Sched<[WriteCvtSD2SS.Folded, WriteCvtSD2SS.ReadAfterFold]>;
let Constraints = "$src1 = $dst" in {
def CVTSD2SSrr_Int: I<0x5A, MRMSrcReg,
@@ -1336,13 +1336,13 @@ def CVTSD2SSrr_Int: I<0x5A, MRMSrcReg,
"cvtsd2ss\t{$src2, $dst|$dst, $src2}",
[(set VR128:$dst,
(v4f32 (X86frounds VR128:$src1, (v2f64 VR128:$src2))))]>,
- XD, Requires<[UseSSE2]>, Sched<[WriteCvtSD2SS]>;
+ TB, XD, Requires<[UseSSE2]>, Sched<[WriteCvtSD2SS]>;
def CVTSD2SSrm_Int: I<0x5A, MRMSrcMem,
(outs VR128:$dst), (ins VR128:$src1, sdmem:$src2),
"cvtsd2ss\t{$src2, $dst|$dst, $src2}",
[(set VR128:$dst,
(v4f32 (X86frounds VR128:$src1, (sse_load_f64 addr:$src2))))]>,
- XD, Requires<[UseSSE2]>,
+ TB, XD, Requires<[UseSSE2]>,
Sched<[WriteCvtSD2SS.Folded, WriteCvtSD2SS.ReadAfterFold]>;
}
}
@@ -1353,13 +1353,13 @@ let isCodeGenOnly = 1, hasSideEffects = 0, ExeDomain = SSEPackedSingle in {
def VCVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst),
(ins FR64:$src1, FR32:$src2),
"vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
- XS, VEX, VVVV, VEX_LIG, WIG,
+ TB, XS, VEX, VVVV, VEX_LIG, WIG,
Sched<[WriteCvtSS2SD]>, Requires<[UseAVX]>, SIMD_EXC;
let mayLoad = 1 in
def VCVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst),
(ins FR64:$src1, f32mem:$src2),
"vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
- XS, VEX, VVVV, VEX_LIG, WIG,
+ TB, XS, VEX, VVVV, VEX_LIG, WIG,
Sched<[WriteCvtSS2SD.Folded, WriteCvtSS2SD.ReadAfterFold]>,
Requires<[UseAVX, OptForSize]>, SIMD_EXC;
} // isCodeGenOnly = 1, hasSideEffects = 0
@@ -1373,11 +1373,11 @@ let isCodeGenOnly = 1, ExeDomain = SSEPackedSingle in {
def CVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src),
"cvtss2sd\t{$src, $dst|$dst, $src}",
[(set FR64:$dst, (any_fpextend FR32:$src))]>,
- XS, Requires<[UseSSE2]>, Sched<[WriteCvtSS2SD]>, SIMD_EXC;
+ TB, XS, Requires<[UseSSE2]>, Sched<[WriteCvtSS2SD]>, SIMD_EXC;
def CVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins f32mem:$src),
"cvtss2sd\t{$src, $dst|$dst, $src}",
[(set FR64:$dst, (any_fpextend (loadf32 addr:$src)))]>,
- XS, Requires<[UseSSE2, OptForSize]>,
+ TB, XS, Requires<[UseSSE2, OptForSize]>,
Sched<[WriteCvtSS2SD.Folded, WriteCvtSS2SD.ReadAfterFold]>, SIMD_EXC;
} // isCodeGenOnly = 1
@@ -1386,25 +1386,25 @@ let hasSideEffects = 0, Uses = [MXCSR], mayRaiseFPException = 1,
def VCVTSS2SDrr_Int: I<0x5A, MRMSrcReg,
(outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
"vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- []>, XS, VEX, VVVV, VEX_LIG, WIG,
+ []>, TB, XS, VEX, VVVV, VEX_LIG, WIG,
Requires<[HasAVX]>, Sched<[WriteCvtSS2SD]>;
let mayLoad = 1 in
def VCVTSS2SDrm_Int: I<0x5A, MRMSrcMem,
(outs VR128:$dst), (ins VR128:$src1, ssmem:$src2),
"vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- []>, XS, VEX, VVVV, VEX_LIG, WIG, Requires<[HasAVX]>,
+ []>, TB, XS, VEX, VVVV, VEX_LIG, WIG, Requires<[HasAVX]>,
Sched<[WriteCvtSS2SD.Folded, WriteCvtSS2SD.ReadAfterFold]>;
let Constraints = "$src1 = $dst" in { // SSE2 instructions with XS prefix
def CVTSS2SDrr_Int: I<0x5A, MRMSrcReg,
(outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
"cvtss2sd\t{$src2, $dst|$dst, $src2}",
- []>, XS, Requires<[UseSSE2]>,
+ []>, TB, XS, Requires<[UseSSE2]>,
Sched<[WriteCvtSS2SD]>;
let mayLoad = 1 in
def CVTSS2SDrm_Int: I<0x5A, MRMSrcMem,
(outs VR128:$dst), (ins VR128:$src1, ssmem:$src2),
"cvtss2sd\t{$src2, $dst|$dst, $src2}",
- []>, XS, Requires<[UseSSE2]>,
+ []>, TB, XS, Requires<[UseSSE2]>,
Sched<[WriteCvtSS2SD.Folded, WriteCvtSS2SD.ReadAfterFold]>;
}
} // hasSideEffects = 0
@@ -1699,30 +1699,30 @@ let Predicates = [HasAVX, NoVLX], Uses = [MXCSR], mayRaiseFPException = 1 in {
def VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"vcvtps2pd\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (v2f64 (X86any_vfpext (v4f32 VR128:$src))))]>,
- PS, VEX, Sched<[WriteCvtPS2PD]>, WIG;
+ TB, PS, VEX, Sched<[WriteCvtPS2PD]>, WIG;
def VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
"vcvtps2pd\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (v2f64 (extloadv2f32 addr:$src)))]>,
- PS, VEX, Sched<[WriteCvtPS2PD.Folded]>, WIG;
+ TB, PS, VEX, Sched<[WriteCvtPS2PD.Folded]>, WIG;
def VCVTPS2PDYrr : I<0x5A, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
"vcvtps2pd\t{$src, $dst|$dst, $src}",
[(set VR256:$dst, (v4f64 (any_fpextend (v4f32 VR128:$src))))]>,
- PS, VEX, VEX_L, Sched<[WriteCvtPS2PDY]>, WIG;
+ TB, PS, VEX, VEX_L, Sched<[WriteCvtPS2PDY]>, WIG;
def VCVTPS2PDYrm : I<0x5A, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
"vcvtps2pd\t{$src, $dst|$dst, $src}",
[(set VR256:$dst, (v4f64 (extloadv4f32 addr:$src)))]>,
- PS, VEX, VEX_L, Sched<[WriteCvtPS2PDY.Folded]>, WIG;
+ TB, PS, VEX, VEX_L, Sched<[WriteCvtPS2PDY.Folded]>, WIG;
}
let Predicates = [UseSSE2], Uses = [MXCSR], mayRaiseFPException = 1 in {
def CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvtps2pd\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (v2f64 (X86any_vfpext (v4f32 VR128:$src))))]>,
- PS, Sched<[WriteCvtPS2PD]>;
+ TB, PS, Sched<[WriteCvtPS2PD]>;
def CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
"cvtps2pd\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (v2f64 (extloadv2f32 addr:$src)))]>,
- PS, Sched<[WriteCvtPS2PD.Folded]>;
+ TB, PS, Sched<[WriteCvtPS2PD.Folded]>;
}
// Convert Packed DW Integers to Packed Double FP
@@ -1860,22 +1860,22 @@ let ExeDomain = SSEPackedSingle in
defm VCMPSS : sse12_cmp_scalar<FR32, f32mem, ssmem, X86cmps, v4f32, loadf32,
"cmpss\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
SchedWriteFCmpSizes.PS.Scl, sse_load_f32>,
- XS, VEX, VVVV, VEX_LIG, WIG;
+ TB, XS, VEX, VVVV, VEX_LIG, WIG;
let ExeDomain = SSEPackedDouble in
defm VCMPSD : sse12_cmp_scalar<FR64, f64mem, sdmem, X86cmps, v2f64, loadf64,
"cmpsd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
SchedWriteFCmpSizes.PD.Scl, sse_load_f64>,
- XD, VEX, VVVV, VEX_LIG, WIG;
+ TB, XD, VEX, VVVV, VEX_LIG, WIG;
let Constraints = "$src1 = $dst" in {
let ExeDomain = SSEPackedSingle in
defm CMPSS : sse12_cmp_scalar<FR32, f32mem, ssmem, X86cmps, v4f32, loadf32,
"cmpss\t{$cc, $src2, $dst|$dst, $src2, $cc}",
- SchedWriteFCmpSizes.PS.Scl, sse_load_f32>, XS;
+ SchedWriteFCmpSizes.PS.Scl, sse_load_f32>, TB, XS;
let ExeDomain = SSEPackedDouble in
defm CMPSD : sse12_cmp_scalar<FR64, f64mem, sdmem, X86cmps, v2f64, loadf64,
"cmpsd\t{$cc, $src2, $dst|$dst, $src2, $cc}",
- SchedWriteFCmpSizes.PD.Scl, sse_load_f64>, XD;
+ SchedWriteFCmpSizes.PD.Scl, sse_load_f64>, TB, XD;
}
// sse12_ord_cmp - Unordered/Ordered scalar fp compare and set EFLAGS
@@ -1919,44 +1919,44 @@ let mayLoad = 1 in
let Defs = [EFLAGS] in {
defm VUCOMISS : sse12_ord_cmp<0x2E, FR32, X86any_fcmp, f32, f32mem, loadf32,
- "ucomiss", SSEPackedSingle>, PS, VEX, VEX_LIG, WIG;
+ "ucomiss", SSEPackedSingle>, TB, PS, VEX, VEX_LIG, WIG;
defm VUCOMISD : sse12_ord_cmp<0x2E, FR64, X86any_fcmp, f64, f64mem, loadf64,
- "ucomisd", SSEPackedDouble>, PD, VEX, VEX_LIG, WIG;
+ "ucomisd", SSEPackedDouble>, TB, PD, VEX, VEX_LIG, WIG;
defm VCOMISS : sse12_ord_cmp<0x2F, FR32, X86strict_fcmps, f32, f32mem, loadf32,
- "comiss", SSEPackedSingle>, PS, VEX, VEX_LIG, WIG;
+ "comiss", SSEPackedSingle>, TB, PS, VEX, VEX_LIG, WIG;
defm VCOMISD : sse12_ord_cmp<0x2F, FR64, X86strict_fcmps, f64, f64mem, loadf64,
- "comisd", SSEPackedDouble>, PD, VEX, VEX_LIG, WIG;
+ "comisd", SSEPackedDouble>, TB, PD, VEX, VEX_LIG, WIG;
let isCodeGenOnly = 1 in {
defm VUCOMISS : sse12_ord_cmp_int<0x2E, VR128, X86ucomi, v4f32, ssmem,
- sse_load_f32, "ucomiss", SSEPackedSingle>, PS, VEX, VEX_LIG, WIG;
+ sse_load_f32, "ucomiss", SSEPackedSingle>, TB, PS, VEX, VEX_LIG, WIG;
defm VUCOMISD : sse12_ord_cmp_int<0x2E, VR128, X86ucomi, v2f64, sdmem,
- sse_load_f64, "ucomisd", SSEPackedDouble>, PD, VEX, VEX_LIG, WIG;
+ sse_load_f64, "ucomisd", SSEPackedDouble>, TB, PD, VEX, VEX_LIG, WIG;
defm VCOMISS : sse12_ord_cmp_int<0x2F, VR128, X86comi, v4f32, ssmem,
- sse_load_f32, "comiss", SSEPackedSingle>, PS, VEX, VEX_LIG, WIG;
+ sse_load_f32, "comiss", SSEPackedSingle>, TB, PS, VEX, VEX_LIG, WIG;
defm VCOMISD : sse12_ord_cmp_int<0x2F, VR128, X86comi, v2f64, sdmem,
- sse_load_f64, "comisd", SSEPackedDouble>, PD, VEX, VEX_LIG, WIG;
+ sse_load_f64, "comisd", SSEPackedDouble>, TB, PD, VEX, VEX_LIG, WIG;
}
defm UCOMISS : sse12_ord_cmp<0x2E, FR32, X86any_fcmp, f32, f32mem, loadf32,
- "ucomiss", SSEPackedSingle>, PS;
+ "ucomiss", SSEPackedSingle>, TB, PS;
defm UCOMISD : sse12_ord_cmp<0x2E, FR64, X86any_fcmp, f64, f64mem, loadf64,
- "ucomisd", SSEPackedDouble>, PD;
+ "ucomisd", SSEPackedDouble>, TB, PD;
defm COMISS : sse12_ord_cmp<0x2F, FR32, X86strict_fcmps, f32, f32mem, loadf32,
- "comiss", SSEPackedSingle>, PS;
+ "comiss", SSEPackedSingle>, TB, PS;
defm COMISD : sse12_ord_cmp<0x2F, FR64, X86strict_fcmps, f64, f64mem, loadf64,
- "comisd", SSEPackedDouble>, PD;
+ "comisd", SSEPackedDouble>, TB, PD;
let isCodeGenOnly = 1 in {
defm UCOMISS : sse12_ord_cmp_int<0x2E, VR128, X86ucomi, v4f32, ssmem,
- sse_load_f32, "ucomiss", SSEPackedSingle>, PS;
+ sse_load_f32, "ucomiss", SSEPackedSingle>, TB, PS;
defm UCOMISD : sse12_ord_cmp_int<0x2E, VR128, X86ucomi, v2f64, sdmem,
- sse_load_f64, "ucomisd", SSEPackedDouble>, PD;
+ sse_load_f64, "ucomisd", SSEPackedDouble>, TB, PD;
defm COMISS : sse12_ord_cmp_int<0x2F, VR128, X86comi, v4f32, ssmem,
- sse_load_f32, "comiss", SSEPackedSingle>, PS;
+ sse_load_f32, "comiss", SSEPackedSingle>, TB, PS;
defm COMISD : sse12_ord_cmp_int<0x2F, VR128, X86comi, v2f64, sdmem,
- sse_load_f64, "comisd", SSEPackedDouble>, PD;
+ sse_load_f64, "comisd", SSEPackedDouble>, TB, PD;
}
} // Defs = [EFLAGS]
@@ -1979,23 +1979,23 @@ multiclass sse12_cmp_packed<RegisterClass RC, X86MemOperand x86memop,
defm VCMPPS : sse12_cmp_packed<VR128, f128mem, v4f32,
"cmpps\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
- SchedWriteFCmpSizes.PS.XMM, SSEPackedSingle, loadv4f32>, PS, VEX, VVVV, WIG;
+ SchedWriteFCmpSizes.PS.XMM, SSEPackedSingle, loadv4f32>, TB, PS, VEX, VVVV, WIG;
defm VCMPPD : sse12_cmp_packed<VR128, f128mem, v2f64,
"cmppd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
- SchedWriteFCmpSizes.PD.XMM, SSEPackedDouble, loadv2f64>, PD, VEX, VVVV, WIG;
+ SchedWriteFCmpSizes.PD.XMM, SSEPackedDouble, loadv2f64>, TB, PD, VEX, VVVV, WIG;
defm VCMPPSY : sse12_cmp_packed<VR256, f256mem, v8f32,
"cmpps\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
- SchedWriteFCmpSizes.PS.YMM, SSEPackedSingle, loadv8f32>, PS, VEX, VVVV, VEX_L, WIG;
+ SchedWriteFCmpSizes.PS.YMM, SSEPackedSingle, loadv8f32>, TB, PS, VEX, VVVV, VEX_L, WIG;
defm VCMPPDY : sse12_cmp_packed<VR256, f256mem, v4f64,
"cmppd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
- SchedWriteFCmpSizes.PD.YMM, SSEPackedDouble, loadv4f64>, PD, VEX, VVVV, VEX_L, WIG;
+ SchedWriteFCmpSizes.PD.YMM, SSEPackedDouble, loadv4f64>, TB, PD, VEX, VVVV, VEX_L, WIG;
let Constraints = "$src1 = $dst" in {
defm CMPPS : sse12_cmp_packed<VR128, f128mem, v4f32,
"cmpps\t{$cc, $src2, $dst|$dst, $src2, $cc}",
- SchedWriteFCmpSizes.PS.XMM, SSEPackedSingle, memopv4f32>, PS;
+ SchedWriteFCmpSizes.PS.XMM, SSEPackedSingle, memopv4f32>, TB, PS;
defm CMPPD : sse12_cmp_packed<VR128, f128mem, v2f64,
"cmppd\t{$cc, $src2, $dst|$dst, $src2, $cc}",
- SchedWriteFCmpSizes.PD.XMM, SSEPackedDouble, memopv2f64>, PD;
+ SchedWriteFCmpSizes.PD.XMM, SSEPackedDouble, memopv2f64>, TB, PD;
}
def CommutableCMPCC : PatLeaf<(timm), [{
@@ -2076,27 +2076,27 @@ let Predicates = [HasAVX, NoVLX] in {
defm VSHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
"shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
loadv4f32, SchedWriteFShuffle.XMM, SSEPackedSingle>,
- PS, VEX, VVVV, WIG;
+ TB, PS, VEX, VVVV, WIG;
defm VSHUFPSY : sse12_shuffle<VR256, f256mem, v8f32,
"shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
loadv8f32, SchedWriteFShuffle.YMM, SSEPackedSingle>,
- PS, VEX, VVVV, VEX_L, WIG;
+ TB, PS, VEX, VVVV, VEX_L, WIG;
defm VSHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
"shufpd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
loadv2f64, SchedWriteFShuffle.XMM, SSEPackedDouble>,
- PD, VEX, VVVV, WIG;
+ TB, PD, VEX, VVVV, WIG;
defm VSHUFPDY : sse12_shuffle<VR256, f256mem, v4f64,
"shufpd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
loadv4f64, SchedWriteFShuffle.YMM, SSEPackedDouble>,
- PD, VEX, VVVV, VEX_L, WIG;
+ TB, PD, VEX, VVVV, VEX_L, WIG;
}
let Constraints = "$src1 = $dst" in {
defm SHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
"shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
- memopv4f32, SchedWriteFShuffle.XMM, SSEPackedSingle>, PS;
+ memopv4f32, SchedWriteFShuffle.XMM, SSEPackedSingle>, TB, PS;
defm SHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
"shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
- memopv2f64, SchedWriteFShuffle.XMM, SSEPackedDouble, 1>, PD;
+ memopv2f64, SchedWriteFShuffle.XMM, SSEPackedDouble, 1>, TB, PD;
}
//===----------------------------------------------------------------------===//
@@ -2126,44 +2126,44 @@ multiclass sse12_unpack_interleave<bits<8> opc, SDNode OpNode, ValueType vt,
let Predicates = [HasAVX, NoVLX] in {
defm VUNPCKHPS: sse12_unpack_interleave<0x15, X86Unpckh, v4f32, load,
VR128, f128mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- SchedWriteFShuffle.XMM, SSEPackedSingle>, PS, VEX, VVVV, WIG;
+ SchedWriteFShuffle.XMM, SSEPackedSingle>, TB, PS, VEX, VVVV, WIG;
defm VUNPCKHPD: sse12_unpack_interleave<0x15, X86Unpckh, v2f64, load,
VR128, f128mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- SchedWriteFShuffle.XMM, SSEPackedDouble, 1>, PD, VEX, VVVV, WIG;
+ SchedWriteFShuffle.XMM, SSEPackedDouble, 1>, TB, PD, VEX, VVVV, WIG;
defm VUNPCKLPS: sse12_unpack_interleave<0x14, X86Unpckl, v4f32, load,
VR128, f128mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- SchedWriteFShuffle.XMM, SSEPackedSingle>, PS, VEX, VVVV, WIG;
+ SchedWriteFShuffle.XMM, SSEPackedSingle>, TB, PS, VEX, VVVV, WIG;
defm VUNPCKLPD: sse12_unpack_interleave<0x14, X86Unpckl, v2f64, load,
VR128, f128mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- SchedWriteFShuffle.XMM, SSEPackedDouble>, PD, VEX, VVVV, WIG;
+ SchedWriteFShuffle.XMM, SSEPackedDouble>, TB, PD, VEX, VVVV, WIG;
defm VUNPCKHPSY: sse12_unpack_interleave<0x15, X86Unpckh, v8f32, load,
VR256, f256mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- SchedWriteFShuffle.YMM, SSEPackedSingle>, PS, VEX, VVVV, VEX_L, WIG;
+ SchedWriteFShuffle.YMM, SSEPackedSingle>, TB, PS, VEX, VVVV, VEX_L, WIG;
defm VUNPCKHPDY: sse12_unpack_interleave<0x15, X86Unpckh, v4f64, load,
VR256, f256mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- SchedWriteFShuffle.YMM, SSEPackedDouble>, PD, VEX, VVVV, VEX_L, WIG;
+ SchedWriteFShuffle.YMM, SSEPackedDouble>, TB, PD, VEX, VVVV, VEX_L, WIG;
defm VUNPCKLPSY: sse12_unpack_interleave<0x14, X86Unpckl, v8f32, load,
VR256, f256mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- SchedWriteFShuffle.YMM, SSEPackedSingle>, PS, VEX, VVVV, VEX_L, WIG;
+ SchedWriteFShuffle.YMM, SSEPackedSingle>, TB, PS, VEX, VVVV, VEX_L, WIG;
defm VUNPCKLPDY: sse12_unpack_interleave<0x14, X86Unpckl, v4f64, load,
VR256, f256mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- SchedWriteFShuffle.YMM, SSEPackedDouble>, PD, VEX, VVVV, VEX_L, WIG;
+ SchedWriteFShuffle.YMM, SSEPackedDouble>, TB, PD, VEX, VVVV, VEX_L, WIG;
}// Predicates = [HasAVX, NoVLX]
let Constraints = "$src1 = $dst" in {
defm UNPCKHPS: sse12_unpack_interleave<0x15, X86Unpckh, v4f32, memop,
VR128, f128mem, "unpckhps\t{$src2, $dst|$dst, $src2}",
- SchedWriteFShuffle.XMM, SSEPackedSingle>, PS;
+ SchedWriteFShuffle.XMM, SSEPackedSingle>, TB, PS;
defm UNPCKHPD: sse12_unpack_interleave<0x15, X86Unpckh, v2f64, memop,
VR128, f128mem, "unpckhpd\t{$src2, $dst|$dst, $src2}",
- SchedWriteFShuffle.XMM, SSEPackedDouble, 1>, PD;
+ SchedWriteFShuffle.XMM, SSEPackedDouble, 1>, TB, PD;
defm UNPCKLPS: sse12_unpack_interleave<0x14, X86Unpckl, v4f32, memop,
VR128, f128mem, "unpcklps\t{$src2, $dst|$dst, $src2}",
- SchedWriteFShuffle.XMM, SSEPackedSingle>, PS;
+ SchedWriteFShuffle.XMM, SSEPackedSingle>, TB, PS;
defm UNPCKLPD: sse12_unpack_interleave<0x14, X86Unpckl, v2f64, memop,
VR128, f128mem, "unpcklpd\t{$src2, $dst|$dst, $src2}",
- SchedWriteFShuffle.XMM, SSEPackedDouble>, PD;
+ SchedWriteFShuffle.XMM, SSEPackedDouble>, TB, PD;
} // Constraints = "$src1 = $dst"
let Predicates = [HasAVX1Only] in {
@@ -2208,13 +2208,13 @@ multiclass sse12_extr_sign_mask<RegisterClass RC, ValueType vt,
let Predicates = [HasAVX] in {
defm VMOVMSKPS : sse12_extr_sign_mask<VR128, v4f32, "movmskps",
- SSEPackedSingle>, PS, VEX, WIG;
+ SSEPackedSingle>, TB, PS, VEX, WIG;
defm VMOVMSKPD : sse12_extr_sign_mask<VR128, v2f64, "movmskpd",
- SSEPackedDouble>, PD, VEX, WIG;
+ SSEPackedDouble>, TB, PD, VEX, WIG;
defm VMOVMSKPSY : sse12_extr_sign_mask<VR256, v8f32, "movmskps",
- SSEPackedSingle>, PS, VEX, VEX_L, WIG;
+ SSEPackedSingle>, TB, PS, VEX, VEX_L, WIG;
defm VMOVMSKPDY : sse12_extr_sign_mask<VR256, v4f64, "movmskpd",
- SSEPackedDouble>, PD, VEX, VEX_L, WIG;
+ SSEPackedDouble>, TB, PD, VEX, VEX_L, WIG;
// Also support integer VTs to avoid a int->fp bitcast in the DAG.
def : Pat<(X86movmsk (v4i32 VR128:$src)),
@@ -2228,9 +2228,9 @@ let Predicates = [HasAVX] in {
}
defm MOVMSKPS : sse12_extr_sign_mask<VR128, v4f32, "movmskps",
- SSEPackedSingle>, PS;
+ SSEPackedSingle>, TB, PS;
defm MOVMSKPD : sse12_extr_sign_mask<VR128, v2f64, "movmskpd",
- SSEPackedDouble>, PD;
+ SSEPackedDouble>, TB, PD;
let Predicates = [UseSSE2] in {
// Also support integer VTs to avoid a int->fp bitcast in the DAG.
@@ -2312,29 +2312,29 @@ multiclass sse12_fp_packed_logical<bits<8> opc, string OpcodeStr,
let Predicates = [HasAVX, NoVLX] in {
defm V#NAME#PSY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedSingle,
!strconcat(OpcodeStr, "ps"), f256mem, sched.YMM,
- [], [], 0>, PS, VEX, VVVV, VEX_L, WIG;
+ [], [], 0>, TB, PS, VEX, VVVV, VEX_L, WIG;
defm V#NAME#PDY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedDouble,
!strconcat(OpcodeStr, "pd"), f256mem, sched.YMM,
- [], [], 0>, PD, VEX, VVVV, VEX_L, WIG;
+ [], [], 0>, TB, PD, VEX, VVVV, VEX_L, WIG;
defm V#NAME#PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
!strconcat(OpcodeStr, "ps"), f128mem, sched.XMM,
- [], [], 0>, PS, VEX, VVVV, WIG;
+ [], [], 0>, TB, PS, VEX, VVVV, WIG;
defm V#NAME#PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
!strconcat(OpcodeStr, "pd"), f128mem, sched.XMM,
- [], [], 0>, PD, VEX, VVVV, WIG;
+ [], [], 0>, TB, PD, VEX, VVVV, WIG;
}
let Constraints = "$src1 = $dst" in {
defm PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
!strconcat(OpcodeStr, "ps"), f128mem, sched.XMM,
- [], []>, PS;
+ [], []>, TB, PS;
defm PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
!strconcat(OpcodeStr, "pd"), f128mem, sched.XMM,
- [], []>, PD;
+ [], []>, TB, PD;
}
}
@@ -2636,26 +2636,26 @@ let Uses = [MXCSR], mayRaiseFPException = 1 in {
let Predicates = [HasAVX, NoVLX] in {
defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
VR128, v4f32, f128mem, loadv4f32,
- SSEPackedSingle, sched.PS.XMM, 0>, PS, VEX, VVVV, WIG;
+ SSEPackedSingle, sched.PS.XMM, 0>, TB, PS, VEX, VVVV, WIG;
defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
VR128, v2f64, f128mem, loadv2f64,
- SSEPackedDouble, sched.PD.XMM, 0>, PD, VEX, VVVV, WIG;
+ SSEPackedDouble, sched.PD.XMM, 0>, TB, PD, VEX, VVVV, WIG;
defm V#NAME#PSY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"),
OpNode, VR256, v8f32, f256mem, loadv8f32,
- SSEPackedSingle, sched.PS.YMM, 0>, PS, VEX, VVVV, VEX_L, WIG;
+ SSEPackedSingle, sched.PS.YMM, 0>, TB, PS, VEX, VVVV, VEX_L, WIG;
defm V#NAME#PDY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"),
OpNode, VR256, v4f64, f256mem, loadv4f64,
- SSEPackedDouble, sched.PD.YMM, 0>, PD, VEX, VVVV, VEX_L, WIG;
+ SSEPackedDouble, sched.PD.YMM, 0>, TB, PD, VEX, VVVV, VEX_L, WIG;
}
let Constraints = "$src1 = $dst" in {
defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR128,
v4f32, f128mem, memopv4f32, SSEPackedSingle,
- sched.PS.XMM>, PS;
+ sched.PS.XMM>, TB, PS;
defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR128,
v2f64, f128mem, memopv2f64, SSEPackedDouble,
- sched.PD.XMM>, PD;
+ sched.PD.XMM>, TB, PD;
}
}
}
@@ -2665,18 +2665,18 @@ multiclass basic_sse12_fp_binop_s<bits<8> opc, string OpcodeStr, SDPatternOperat
let Uses = [MXCSR], mayRaiseFPException = 1 in {
defm V#NAME#SS : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"),
OpNode, FR32, f32mem, SSEPackedSingle, sched.PS.Scl, 0>,
- XS, VEX, VVVV, VEX_LIG, WIG;
+ TB, XS, VEX, VVVV, VEX_LIG, WIG;
defm V#NAME#SD : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd"),
OpNode, FR64, f64mem, SSEPackedDouble, sched.PD.Scl, 0>,
- XD, VEX, VVVV, VEX_LIG, WIG;
+ TB, XD, VEX, VVVV, VEX_LIG, WIG;
let Constraints = "$src1 = $dst" in {
defm SS : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"),
OpNode, FR32, f32mem, SSEPackedSingle,
- sched.PS.Scl>, XS;
+ sched.PS.Scl>, TB, XS;
defm SD : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd"),
OpNode, FR64, f64mem, SSEPackedDouble,
- sched.PD.Scl>, XD;
+ sched.PD.Scl>, TB, XD;
}
}
}
@@ -2687,18 +2687,18 @@ multiclass basic_sse12_fp_binop_s_int<bits<8> opc, string OpcodeStr,
let Uses = [MXCSR], mayRaiseFPException = 1 in {
defm V#NAME#SS : sse12_fp_scalar_int<opc, OpNode, VR128, v4f32,
!strconcat(OpcodeStr, "ss"), ssmem, sse_load_f32,
- SSEPackedSingle, sched.PS.Scl, 0>, XS, VEX, VVVV, VEX_LIG, WIG;
+ SSEPackedSingle, sched.PS.Scl, 0>, TB, XS, VEX, VVVV, VEX_LIG, WIG;
defm V#NAME#SD : sse12_fp_scalar_int<opc, OpNode, VR128, v2f64,
!strconcat(OpcodeStr, "sd"), sdmem, sse_load_f64,
- SSEPackedDouble, sched.PD.Scl, 0>, XD, VEX, VVVV, VEX_LIG, WIG;
+ SSEPackedDouble, sched.PD.Scl, 0>, TB, XD, VEX, VVVV, VEX_LIG, WIG;
let Constraints = "$src1 = $dst" in {
defm SS : sse12_fp_scalar_int<opc, OpNode, VR128, v4f32,
!strconcat(OpcodeStr, "ss"), ssmem, sse_load_f32,
- SSEPackedSingle, sched.PS.Scl>, XS;
+ SSEPackedSingle, sched.PS.Scl>, TB, XS;
defm SD : sse12_fp_scalar_int<opc, OpNode, VR128, v2f64,
!strconcat(OpcodeStr, "sd"), sdmem, sse_load_f64,
- SSEPackedDouble, sched.PD.Scl>, XD;
+ SSEPackedDouble, sched.PD.Scl>, TB, XD;
}
}
}
@@ -3016,29 +3016,29 @@ let Predicates = [HasAVX, NoVLX] in {
multiclass sse1_fp_unop_s_intr<string OpcodeStr, Predicate AVXTarget> {
defm SS : sse_fp_unop_s_intr<v4f32, sse_load_f32,
!cast<Intrinsic>("int_x86_sse_"#OpcodeStr#_ss),
- UseSSE1>, XS;
+ UseSSE1>, TB, XS;
defm V#NAME#SS : avx_fp_unop_s_intr<v4f32, sse_load_f32,
!cast<Intrinsic>("int_x86_sse_"#OpcodeStr#_ss),
AVXTarget>,
- XS, VEX, VVVV, VEX_LIG, WIG;
+ TB, XS, VEX, VVVV, VEX_LIG, WIG;
}
multiclass sse1_fp_unop_s<bits<8> opc, string OpcodeStr, SDPatternOperator OpNode,
X86SchedWriteWidths sched, Predicate AVXTarget> {
defm SS : sse_fp_unop_s<opc, OpcodeStr#ss, FR32, f32mem,
- ssmem, OpNode, SSEPackedSingle, sched.Scl, UseSSE1>, XS;
+ ssmem, OpNode, SSEPackedSingle, sched.Scl, UseSSE1>, TB, XS;
defm V#NAME#SS : avx_fp_unop_s<opc, "v"#OpcodeStr#ss, FR32, f32,
f32mem, ssmem, OpNode, SSEPackedSingle, sched.Scl, AVXTarget>,
- XS, VEX, VVVV, VEX_LIG, WIG;
+ TB, XS, VEX, VVVV, VEX_LIG, WIG;
}
multiclass sse2_fp_unop_s<bits<8> opc, string OpcodeStr, SDPatternOperator OpNode,
X86SchedWriteWidths sched, Predicate AVXTarget> {
defm SD : sse_fp_unop_s<opc, OpcodeStr#sd, FR64, f64mem,
- sdmem, OpNode, SSEPackedDouble, sched.Scl, UseSSE2>, XD;
+ sdmem, OpNode, SSEPackedDouble, sched.Scl, UseSSE2>, TB, XD;
defm V#NAME#SD : avx_fp_unop_s<opc, "v"#OpcodeStr#sd, FR64, f64,
f64mem, sdmem, OpNode, SSEPackedDouble, sched.Scl, AVXTarget>,
- XD, VEX, VVVV, VEX_LIG, WIG;
+ TB, XD, VEX, VVVV, VEX_LIG, WIG;
}
// Square root.
@@ -3165,11 +3165,11 @@ let SchedRW = [WriteStoreNT] in {
def MOVNTImr : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
"movnti{l}\t{$src, $dst|$dst, $src}",
[(nontemporalstore (i32 GR32:$src), addr:$dst)]>,
- PS, Requires<[HasSSE2]>;
+ TB, PS, Requires<[HasSSE2]>;
def MOVNTI_64mr : RI<0xC3, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
"movnti{q}\t{$src, $dst|$dst, $src}",
[(nontemporalstore (i64 GR64:$src), addr:$dst)]>,
- PS, Requires<[HasSSE2]>;
+ TB, PS, Requires<[HasSSE2]>;
} // SchedRW = [WriteStoreNT]
let Predicates = [HasAVX, NoVLX] in {
@@ -3226,14 +3226,14 @@ let SchedRW = [WriteLoad] in {
// Flush cache
def CLFLUSH : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
"clflush\t$src", [(int_x86_sse2_clflush addr:$src)]>,
- PS, Requires<[HasCLFLUSH]>;
+ TB, PS, Requires<[HasCLFLUSH]>;
}
let SchedRW = [WriteNop] in {
// Pause. This "instruction" is encoded as "rep; nop", so even though it
// was introduced with SSE2, it's backward compatible.
def PAUSE : I<0x90, RawFrm, (outs), (ins),
- "pause", [(int_x86_sse2_pause)]>, OBXS;
+ "pause", [(int_x86_sse2_pause)]>, XS;
}
let SchedRW = [WriteFence] in {
@@ -3241,11 +3241,11 @@ let SchedRW = [WriteFence] in {
// TODO: As with mfence, we may want to ease the availability of sfence/lfence
// to include any 64-bit target.
def SFENCE : I<0xAE, MRM7X, (outs), (ins), "sfence", [(int_x86_sse_sfence)]>,
- PS, Requires<[HasSSE1]>;
+ TB, PS, Requires<[HasSSE1]>;
def LFENCE : I<0xAE, MRM5X, (outs), (ins), "lfence", [(int_x86_sse2_lfence)]>,
- PS, Requires<[HasSSE2]>;
+ TB, PS, Requires<[HasSSE2]>;
def MFENCE : I<0xAE, MRM6X, (outs), (ins), "mfence", [(int_x86_sse2_mfence)]>,
- PS, Requires<[HasMFence]>;
+ TB, PS, Requires<[HasMFence]>;
} // SchedRW
def : Pat<(X86MFence), (MFENCE)>;
@@ -3266,11 +3266,11 @@ def VSTMXCSR : VPSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
let mayLoad=1, hasSideEffects=1, Defs=[MXCSR] in
def LDMXCSR : I<0xAE, MRM2m, (outs), (ins i32mem:$src),
"ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>,
- PS, Sched<[WriteLDMXCSR]>;
+ TB, PS, Sched<[WriteLDMXCSR]>;
let mayStore=1, hasSideEffects=1, Uses=[MXCSR] in
def STMXCSR : I<0xAE, MRM3m, (outs), (ins i32mem:$dst),
"stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>,
- PS, Sched<[WriteSTMXCSR]>;
+ TB, PS, Sched<[WriteSTMXCSR]>;
//===---------------------------------------------------------------------===//
// SSE2 - Move Aligned/Unaligned Packed Integer Instructions
@@ -3327,11 +3327,11 @@ def VMOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
"vmovdqu\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (loadv2i64 addr:$src))]>,
Sched<[SchedWriteVecMoveLS.XMM.RM]>,
- XS, VEX, WIG;
+ TB, XS, VEX, WIG;
def VMOVDQUYrm : I<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
"vmovdqu\t{$src, $dst|$dst, $src}", []>,
Sched<[SchedWriteVecMoveLS.YMM.RM]>,
- XS, VEX, VEX_L, WIG;
+ TB, XS, VEX, VEX_L, WIG;
}
let mayStore = 1, hasSideEffects = 0, Predicates = [HasAVX,NoVLX] in {
@@ -3347,10 +3347,10 @@ def VMOVDQAYmr : VPDI<0x7F, MRMDestMem, (outs),
def VMOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
"vmovdqu\t{$src, $dst|$dst, $src}",
[(store (v2i64 VR128:$src), addr:$dst)]>,
- Sched<[SchedWriteVecMoveLS.XMM.MR]>, XS, VEX, WIG;
+ Sched<[SchedWriteVecMoveLS.XMM.MR]>, TB, XS, VEX, WIG;
def VMOVDQUYmr : I<0x7F, MRMDestMem, (outs), (ins i256mem:$dst, VR256:$src),
"vmovdqu\t{$src, $dst|$dst, $src}",[]>,
- Sched<[SchedWriteVecMoveLS.YMM.MR]>, XS, VEX, VEX_L, WIG;
+ Sched<[SchedWriteVecMoveLS.YMM.MR]>, TB, XS, VEX, VEX_L, WIG;
}
let SchedRW = [SchedWriteVecMoveLS.XMM.RR] in {
@@ -3360,7 +3360,7 @@ def MOVDQArr : PDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
def MOVDQUrr : I<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"movdqu\t{$src, $dst|$dst, $src}", []>,
- XS, Requires<[UseSSE2]>;
+ TB, XS, Requires<[UseSSE2]>;
}
// For Disassembler
@@ -3370,7 +3370,7 @@ def MOVDQArr_REV : PDI<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
def MOVDQUrr_REV : I<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
"movdqu\t{$src, $dst|$dst, $src}", []>,
- XS, Requires<[UseSSE2]>;
+ TB, XS, Requires<[UseSSE2]>;
}
} // SchedRW
@@ -3382,7 +3382,7 @@ def MOVDQArm : PDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
def MOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
"movdqu\t{$src, $dst|$dst, $src}",
[/*(set VR128:$dst, (loadv2i64 addr:$src))*/]>,
- XS, Requires<[UseSSE2]>;
+ TB, XS, Requires<[UseSSE2]>;
}
let mayStore = 1, hasSideEffects = 0,
@@ -3393,7 +3393,7 @@ def MOVDQAmr : PDI<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
def MOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
"movdqu\t{$src, $dst|$dst, $src}",
[/*(store (v2i64 VR128:$src), addr:$dst)*/]>,
- XS, Requires<[UseSSE2]>;
+ TB, XS, Requires<[UseSSE2]>;
}
} // ExeDomain = SSEPackedInt
@@ -3757,11 +3757,11 @@ let Predicates = [UseSSE2] in {
} // ExeDomain = SSEPackedInt
defm PSHUFD : sse2_pshuffle<"pshufd", v4i32, v8i32, X86PShufd,
- SchedWriteShuffle, NoVLX>, PD;
+ SchedWriteShuffle, NoVLX>, TB, PD;
defm PSHUFHW : sse2_pshuffle<"pshufhw", v8i16, v16i16, X86PShufhw,
- SchedWriteShuffle, NoVLX_Or_NoBWI>, XS;
+ SchedWriteShuffle, NoVLX_Or_NoBWI>, TB, XS;
defm PSHUFLW : sse2_pshuffle<"pshuflw", v8i16, v16i16, X86PShuflw,
- SchedWriteShuffle, NoVLX_Or_NoBWI>, XD;
+ SchedWriteShuffle, NoVLX_Or_NoBWI>, TB, XD;
//===---------------------------------------------------------------------===//
// Packed Integer Pack Instructions (SSE & AVX)
@@ -4004,7 +4004,7 @@ def VPEXTRWrr : Ii8<0xC5, MRMSrcReg,
"vpextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR32orGR64:$dst, (X86pextrw (v8i16 VR128:$src1),
timm:$src2))]>,
- PD, VEX, WIG, Sched<[WriteVecExtract]>;
+ TB, PD, VEX, WIG, Sched<[WriteVecExtract]>;
def PEXTRWrr : PDIi8<0xC5, MRMSrcReg,
(outs GR32orGR64:$dst), (ins VR128:$src1, u8imm:$src2),
"pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
@@ -4014,10 +4014,10 @@ def PEXTRWrr : PDIi8<0xC5, MRMSrcReg,
// Insert
let Predicates = [HasAVX, NoBWI] in
-defm VPINSRW : sse2_pinsrw<0>, PD, VEX, VVVV, WIG;
+defm VPINSRW : sse2_pinsrw<0>, TB, PD, VEX, VVVV, WIG;
let Predicates = [UseSSE2], Constraints = "$src1 = $dst" in
-defm PINSRW : sse2_pinsrw, PD;
+defm PINSRW : sse2_pinsrw, TB, PD;
} // ExeDomain = SSEPackedInt
@@ -4306,13 +4306,13 @@ let ExeDomain = SSEPackedInt, SchedRW = [WriteVecLoad] in {
def VMOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
"vmovq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
- (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
+ (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, TB, XS,
VEX, Requires<[UseAVX]>, WIG;
def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
"movq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
(v2i64 (scalar_to_vector (loadi64 addr:$src))))]>,
- XS, Requires<[UseSSE2]>; // SSE2 instruction with XS Prefix
+ TB, XS, Requires<[UseSSE2]>; // SSE2 instruction with XS Prefix
} // ExeDomain, SchedRW
//===---------------------------------------------------------------------===//
@@ -4369,11 +4369,11 @@ let ExeDomain = SSEPackedInt, SchedRW = [SchedWriteVecLogic.XMM] in {
def VMOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"vmovq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
- XS, VEX, Requires<[UseAVX]>, WIG;
+ TB, XS, VEX, Requires<[UseAVX]>, WIG;
def MOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"movq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
- XS, Requires<[UseSSE2]>;
+ TB, XS, Requires<[UseSSE2]>;
} // ExeDomain, SchedRW
let Predicates = [UseAVX] in {
@@ -4563,27 +4563,27 @@ let Predicates = [HasAVX] in {
let ExeDomain = SSEPackedSingle in {
defm VADDSUBPS : sse3_addsub<"vaddsubps", v4f32, VR128, f128mem,
SchedWriteFAddSizes.PS.XMM, loadv4f32, 0>,
- XD, VEX, VVVV, WIG;
+ TB, XD, VEX, VVVV, WIG;
defm VADDSUBPSY : sse3_addsub<"vaddsubps", v8f32, VR256, f256mem,
SchedWriteFAddSizes.PS.YMM, loadv8f32, 0>,
- XD, VEX, VVVV, VEX_L, WIG;
+ TB, XD, VEX, VVVV, VEX_L, WIG;
}
let ExeDomain = SSEPackedDouble in {
defm VADDSUBPD : sse3_addsub<"vaddsubpd", v2f64, VR128, f128mem,
SchedWriteFAddSizes.PD.XMM, loadv2f64, 0>,
- PD, VEX, VVVV, WIG;
+ TB, PD, VEX, VVVV, WIG;
defm VADDSUBPDY : sse3_addsub<"vaddsubpd", v4f64, VR256, f256mem,
SchedWriteFAddSizes.PD.YMM, loadv4f64, 0>,
- PD, VEX, VVVV, VEX_L, WIG;
+ TB, PD, VEX, VVVV, VEX_L, WIG;
}
}
let Constraints = "$src1 = $dst", Predicates = [UseSSE3] in {
let ExeDomain = SSEPackedSingle in
defm ADDSUBPS : sse3_addsub<"addsubps", v4f32, VR128, f128mem,
- SchedWriteFAddSizes.PS.XMM, memopv4f32>, XD;
+ SchedWriteFAddSizes.PS.XMM, memopv4f32>, TB, XD;
let ExeDomain = SSEPackedDouble in
defm ADDSUBPD : sse3_addsub<"addsubpd", v2f64, VR128, f128mem,
- SchedWriteFAddSizes.PD.XMM, memopv2f64>, PD;
+ SchedWriteFAddSizes.PD.XMM, memopv2f64>, TB, PD;
}
//===---------------------------------------------------------------------===//
@@ -5760,33 +5760,33 @@ let Defs = [EFLAGS], Predicates = [HasPOPCNT] in {
def POPCNT16rr : I<0xB8, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
"popcnt{w}\t{$src, $dst|$dst, $src}",
[(set GR16:$dst, (ctpop GR16:$src)), (implicit EFLAGS)]>,
- Sched<[WritePOPCNT]>, OpSize16, XS;
+ Sched<[WritePOPCNT]>, OpSize16, TB, XS;
def POPCNT16rm : I<0xB8, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
"popcnt{w}\t{$src, $dst|$dst, $src}",
[(set GR16:$dst, (ctpop (loadi16 addr:$src))),
(implicit EFLAGS)]>,
- Sched<[WritePOPCNT.Folded]>, OpSize16, XS;
+ Sched<[WritePOPCNT.Folded]>, OpSize16, TB, XS;
def POPCNT32rr : I<0xB8, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
"popcnt{l}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (ctpop GR32:$src)), (implicit EFLAGS)]>,
- Sched<[WritePOPCNT]>, OpSize32, XS;
+ Sched<[WritePOPCNT]>, OpSize32, TB, XS;
def POPCNT32rm : I<0xB8, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
"popcnt{l}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (ctpop (loadi32 addr:$src))),
(implicit EFLAGS)]>,
- Sched<[WritePOPCNT.Folded]>, OpSize32, XS;
+ Sched<[WritePOPCNT.Folded]>, OpSize32, TB, XS;
def POPCNT64rr : RI<0xB8, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
"popcnt{q}\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, (ctpop GR64:$src)), (implicit EFLAGS)]>,
- Sched<[WritePOPCNT]>, XS;
+ Sched<[WritePOPCNT]>, TB, XS;
def POPCNT64rm : RI<0xB8, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
"popcnt{q}\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, (ctpop (loadi64 addr:$src))),
(implicit EFLAGS)]>,
- Sched<[WritePOPCNT.Folded]>, XS;
+ Sched<[WritePOPCNT.Folded]>, TB, XS;
}
// SS41I_unop_rm_int_v16 - SSE 4.1 unary operator whose type is v8i16.
@@ -6290,7 +6290,7 @@ multiclass SS41I_quaternary_avx<bits<8> opc, string OpcodeStr, RegisterClass RC,
!strconcat(OpcodeStr,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
[(set RC:$dst, (VT (OpNode RC:$src3, RC:$src2, RC:$src1)))],
- SSEPackedInt>, TAPD, VEX, VVVV,
+ SSEPackedInt>, TA, PD, VEX, VVVV,
Sched<[sched]>;
def rm : Ii8Reg<opc, MRMSrcMem, (outs RC:$dst),
@@ -6299,7 +6299,7 @@ multiclass SS41I_quaternary_avx<bits<8> opc, string OpcodeStr, RegisterClass RC,
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
[(set RC:$dst,
(OpNode RC:$src3, (mem_frag addr:$src2),
- RC:$src1))], SSEPackedInt>, TAPD, VEX, VVVV,
+ RC:$src1))], SSEPackedInt>, TA, PD, VEX, VVVV,
Sched<[sched.Folded, sched.ReadAfterFold,
// x86memop:$src2
ReadDefault, ReadDefault, ReadDefault, ReadDefault,
@@ -6715,7 +6715,7 @@ multiclass SHAI_binop<bits<8> Opc, string OpcodeStr, Intrinsic IntId,
[!if(UsesXMM0,
(set VR128:$dst, (IntId VR128:$src1, VR128:$src2, XMM0)),
(set VR128:$dst, (IntId VR128:$src1, VR128:$src2)))]>,
- T8PS, Sched<[sched]>;
+ T8, PS, Sched<[sched]>;
def rm#Suffix : I<Opc, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, i128mem:$src2),
@@ -6726,7 +6726,7 @@ multiclass SHAI_binop<bits<8> Opc, string OpcodeStr, Intrinsic IntId,
(set VR128:$dst, (IntId VR128:$src1,
(memop addr:$src2), XMM0)),
(set VR128:$dst, (IntId VR128:$src1,
- (memop addr:$src2))))]>, T8PS,
+ (memop addr:$src2))))]>, T8, PS,
Sched<[sched.Folded, sched.ReadAfterFold]>;
}
@@ -6736,7 +6736,7 @@ let Constraints = "$src1 = $dst", Predicates = [HasSHA, NoEGPR] in {
"sha1rnds4\t{$src3, $src2, $dst|$dst, $src2, $src3}",
[(set VR128:$dst,
(int_x86_sha1rnds4 VR128:$src1, VR128:$src2,
- (i8 timm:$src3)))]>, TAPS,
+ (i8 timm:$src3)))]>, TA, PS,
Sched<[SchedWriteVecIMul.XMM]>;
def SHA1RNDS4rmi : Ii8<0xCC, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, i128mem:$src2, u8imm:$src3),
@@ -6744,7 +6744,7 @@ let Constraints = "$src1 = $dst", Predicates = [HasSHA, NoEGPR] in {
[(set VR128:$dst,
(int_x86_sha1rnds4 VR128:$src1,
(memop addr:$src2),
- (i8 timm:$src3)))]>, TAPS,
+ (i8 timm:$src3)))]>, TA, PS,
Sched<[SchedWriteVecIMul.XMM.Folded,
SchedWriteVecIMul.XMM.ReadAfterFold]>;
@@ -6772,7 +6772,7 @@ let Constraints = "$src1 = $dst", Predicates = [HasSHA, HasEGPR, In64BitMode] in
[(set VR128:$dst,
(int_x86_sha1rnds4 VR128:$src1, VR128:$src2,
(i8 timm:$src3)))]>,
- EVEX, NoCD8, T_MAP4PS, Sched<[SchedWriteVecIMul.XMM]>;
+ EVEX, NoCD8, T_MAP4, PS, Sched<[SchedWriteVecIMul.XMM]>;
def SHA1RNDS4rmi_EVEX: Ii8<0xD4, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, i128mem:$src2, u8imm:$src3),
"sha1rnds4\t{$src3, $src2, $dst|$dst, $src2, $src3}",
@@ -6780,31 +6780,31 @@ let Constraints = "$src1 = $dst", Predicates = [HasSHA, HasEGPR, In64BitMode] in
(int_x86_sha1rnds4 VR128:$src1,
(memop addr:$src2),
(i8 timm:$src3)))]>,
- EVEX, NoCD8, T_MAP4PS,
+ EVEX, NoCD8, T_MAP4, PS,
Sched<[SchedWriteVecIMul.XMM.Folded,
SchedWriteVecIMul.XMM.ReadAfterFold]>;
defm SHA1NEXTE : SHAI_binop<0xD8, "sha1nexte", int_x86_sha1nexte,
SchedWriteVecIMul.XMM, "_EVEX">,
- EVEX, NoCD8, T_MAP4PS;
+ EVEX, NoCD8, T_MAP4;
defm SHA1MSG1 : SHAI_binop<0xD9, "sha1msg1", int_x86_sha1msg1,
SchedWriteVecIMul.XMM, "_EVEX">,
- EVEX, NoCD8, T_MAP4PS;
+ EVEX, NoCD8, T_MAP4;
defm SHA1MSG2 : SHAI_binop<0xDA, "sha1msg2", int_x86_sha1msg2,
SchedWriteVecIMul.XMM, "_EVEX">,
- EVEX, NoCD8, T_MAP4PS;
+ EVEX, NoCD8, T_MAP4;
let Uses=[XMM0] in
defm SHA256RNDS2 : SHAI_binop<0xDB, "sha256rnds2", int_x86_sha256rnds2,
SchedWriteVecIMul.XMM, "_EVEX", 1>,
- EVEX, NoCD8, T_MAP4PS;
+ EVEX, NoCD8, T_MAP4;
defm SHA256MSG1 : SHAI_binop<0xDC, "sha256msg1", int_x86_sha256msg1,
SchedWriteVecIMul.XMM, "_EVEX">,
- EVEX, NoCD8, T_MAP4PS;
+ EVEX, NoCD8, T_MAP4;
defm SHA256MSG2 : SHAI_binop<0xDD, "sha256msg2", int_x86_sha256msg2,
SchedWriteVecIMul.XMM, "_EVEX">,
- EVEX, NoCD8, T_MAP4PS;
+ EVEX, NoCD8, T_MAP4;
}
//===----------------------------------------------------------------------===//
@@ -7035,26 +7035,26 @@ def EXTRQI : Ii8<0x78, MRMXr, (outs VR128:$dst),
"extrq\t{$idx, $len, $src|$src, $len, $idx}",
[(set VR128:$dst, (X86extrqi VR128:$src, timm:$len,
timm:$idx))]>,
- PD, Sched<[SchedWriteVecALU.XMM]>;
+ TB, PD, Sched<[SchedWriteVecALU.XMM]>;
def EXTRQ : I<0x79, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src, VR128:$mask),
"extrq\t{$mask, $src|$src, $mask}",
[(set VR128:$dst, (int_x86_sse4a_extrq VR128:$src,
VR128:$mask))]>,
- PD, Sched<[SchedWriteVecALU.XMM]>;
+ TB, PD, Sched<[SchedWriteVecALU.XMM]>;
def INSERTQI : Ii8<0x78, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src, VR128:$src2, u8imm:$len, u8imm:$idx),
"insertq\t{$idx, $len, $src2, $src|$src, $src2, $len, $idx}",
[(set VR128:$dst, (X86insertqi VR128:$src, VR128:$src2,
timm:$len, timm:$idx))]>,
- XD, Sched<[SchedWriteVecALU.XMM]>;
+ TB, XD, Sched<[SchedWriteVecALU.XMM]>;
def INSERTQ : I<0x79, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src, VR128:$mask),
"insertq\t{$mask, $src|$src, $mask}",
[(set VR128:$dst, (int_x86_sse4a_insertq VR128:$src,
VR128:$mask))]>,
- XD, Sched<[SchedWriteVecALU.XMM]>;
+ TB, XD, Sched<[SchedWriteVecALU.XMM]>;
}
} // ExeDomain = SSEPackedInt
@@ -7062,10 +7062,10 @@ def INSERTQ : I<0x79, MRMSrcReg, (outs VR128:$dst),
let AddedComplexity = 400 in { // Prefer non-temporal versions
let hasSideEffects = 0, mayStore = 1, SchedRW = [SchedWriteFMoveLSNT.Scl.MR] in {
def MOVNTSS : I<0x2B, MRMDestMem, (outs), (ins f32mem:$dst, VR128:$src),
- "movntss\t{$src, $dst|$dst, $src}", []>, XS;
+ "movntss\t{$src, $dst|$dst, $src}", []>, TB, XS;
def MOVNTSD : I<0x2B, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
- "movntsd\t{$src, $dst|$dst, $src}", []>, XD;
+ "movntsd\t{$src, $dst|$dst, $src}", []>, TB, XD;
} // SchedRW
def : Pat<(nontemporalstore FR32:$src, addr:$dst),
@@ -7474,12 +7474,12 @@ let Defs = [YMM0, YMM1, YMM2, YMM3, YMM4, YMM5, YMM6, YMM7,
YMM8, YMM9, YMM10, YMM11, YMM12, YMM13, YMM14, YMM15] in {
// Zero All YMM registers
def VZEROALL : I<0x77, RawFrm, (outs), (ins), "vzeroall",
- [(int_x86_avx_vzeroall)]>, PS, VEX, VEX_L,
+ [(int_x86_avx_vzeroall)]>, TB, PS, VEX, VEX_L,
Requires<[HasAVX]>, WIG;
// Zero Upper bits of YMM registers
def VZEROUPPER : I<0x77, RawFrm, (outs), (ins), "vzeroupper",
- [(int_x86_avx_vzeroupper)]>, PS, VEX,
+ [(int_x86_avx_vzeroupper)]>, TB, PS, VEX,
Requires<[HasAVX]>, WIG;
} // Defs
} // SchedRW
@@ -7493,11 +7493,11 @@ multiclass f16c_ph2ps<RegisterClass RC, X86MemOperand x86memop,
def rr : I<0x13, MRMSrcReg, (outs RC:$dst), (ins VR128:$src),
"vcvtph2ps\t{$src, $dst|$dst, $src}",
[(set RC:$dst, (X86any_cvtph2ps VR128:$src))]>,
- T8PD, VEX, Sched<[sched]>;
+ T8, PD, VEX, Sched<[sched]>;
let hasSideEffects = 0, mayLoad = 1 in
def rm : I<0x13, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
"vcvtph2ps\t{$src, $dst|$dst, $src}",
- []>, T8PD, VEX, Sched<[sched.Folded]>;
+ []>, T8, PD, VEX, Sched<[sched.Folded]>;
}
multiclass f16c_ps2ph<RegisterClass RC, X86MemOperand x86memop,
@@ -7506,12 +7506,12 @@ multiclass f16c_ps2ph<RegisterClass RC, X86MemOperand x86memop,
(ins RC:$src1, i32u8imm:$src2),
"vcvtps2ph\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128:$dst, (X86any_cvtps2ph RC:$src1, timm:$src2))]>,
- TAPD, VEX, Sched<[RR]>;
+ TA, PD, VEX, Sched<[RR]>;
let hasSideEffects = 0, mayStore = 1 in
def mr : Ii8<0x1D, MRMDestMem, (outs),
(ins x86memop:$dst, RC:$src1, i32u8imm:$src2),
"vcvtps2ph\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
- TAPD, VEX, Sched<[MR]>;
+ TA, PD, VEX, Sched<[MR]>;
}
let Predicates = [HasF16C, NoVLX] in {
@@ -8109,12 +8109,12 @@ multiclass GF2P8MULB_rm<string OpcodeStr, ValueType OpVT,
let isCommutable = 1 in
def rr : PDI<0xCF, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2), "",
[(set RC:$dst, (OpVT (X86GF2P8mulb RC:$src1, RC:$src2)))]>,
- Sched<[sched]>, T8PD;
+ Sched<[sched]>, T8;
def rm : PDI<0xCF, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, X86MemOp:$src2), "",
[(set RC:$dst, (OpVT (X86GF2P8mulb RC:$src1,
(MemOpFrag addr:$src2))))]>,
- Sched<[sched.Folded, sched.ReadAfterFold]>, T8PD;
+ Sched<[sched.Folded, sched.ReadAfterFold]>, T8;
}
}
@@ -8167,9 +8167,9 @@ let Predicates = [HasGFNI, HasAVX, NoVLX] in {
// GF2P8AFFINEINVQB, GF2P8AFFINEQB
let isCommutable = 0 in {
defm GF2P8AFFINEINVQB : GF2P8AFFINE_common<0xCF, "gf2p8affineinvqb",
- X86GF2P8affineinvqb>, TAPD;
+ X86GF2P8affineinvqb>, TA, PD;
defm GF2P8AFFINEQB : GF2P8AFFINE_common<0xCE, "gf2p8affineqb",
- X86GF2P8affineqb>, TAPD;
+ X86GF2P8affineqb>, TA, PD;
}
// AVX-IFMA
@@ -8234,40 +8234,40 @@ multiclass avx_dotprod_rm<bits<8> Opc, string OpcodeStr, ValueType OpVT,
let Predicates = [HasAVXVNNIINT8] in {
defm VPDPBSSD : avx_dotprod_rm<0x50,"vpdpbssd", v4i32, VR128, loadv4i32,
i128mem, X86vpdpbssd, SchedWriteVecIMul.XMM,
- 1>, T8XD;
+ 1>, T8, XD;
defm VPDPBSSDY : avx_dotprod_rm<0x50,"vpdpbssd", v8i32, VR256, loadv8i32,
i256mem, X86vpdpbssd, SchedWriteVecIMul.YMM,
- 1>, VEX_L, T8XD;
+ 1>, VEX_L, T8, XD;
defm VPDPBUUD : avx_dotprod_rm<0x50,"vpdpbuud", v4i32, VR128, loadv4i32,
i128mem, X86vpdpbuud, SchedWriteVecIMul.XMM,
- 1>, T8PS;
+ 1>, T8, PS;
defm VPDPBUUDY : avx_dotprod_rm<0x50,"vpdpbuud", v8i32, VR256, loadv8i32,
i256mem, X86vpdpbuud, SchedWriteVecIMul.YMM,
- 1>, VEX_L, T8PS;
+ 1>, VEX_L, T8, PS;
defm VPDPBSSDS : avx_dotprod_rm<0x51,"vpdpbssds", v4i32, VR128, loadv4i32,
i128mem, X86vpdpbssds, SchedWriteVecIMul.XMM,
- 1>, T8XD;
+ 1>, T8, XD;
defm VPDPBSSDSY : avx_dotprod_rm<0x51,"vpdpbssds", v8i32, VR256, loadv8i32,
i256mem, X86vpdpbssds, SchedWriteVecIMul.YMM,
- 1>, VEX_L, T8XD;
+ 1>, VEX_L, T8, XD;
defm VPDPBUUDS : avx_dotprod_rm<0x51,"vpdpbuuds", v4i32, VR128, loadv4i32,
i128mem, X86vpdpbuuds, SchedWriteVecIMul.XMM,
- 1>, T8PS;
+ 1>, T8, PS;
defm VPDPBUUDSY : avx_dotprod_rm<0x51,"vpdpbuuds", v8i32, VR256, loadv8i32,
i256mem, X86vpdpbuuds, SchedWriteVecIMul.YMM,
- 1>, VEX_L, T8PS;
+ 1>, VEX_L, T8, PS;
defm VPDPBSUD : avx_dotprod_rm<0x50,"vpdpbsud", v4i32, VR128, loadv4i32,
i128mem, X86vpdpbsud, SchedWriteVecIMul.XMM,
- 0>, T8XS;
+ 0>, T8, XS;
defm VPDPBSUDY : avx_dotprod_rm<0x50,"vpdpbsud", v8i32, VR256, loadv8i32,
i256mem, X86vpdpbsud, SchedWriteVecIMul.YMM,
- 0>, VEX_L, T8XS;
+ 0>, VEX_L, T8, XS;
defm VPDPBSUDS : avx_dotprod_rm<0x51,"vpdpbsuds", v4i32, VR128, loadv4i32,
i128mem, X86vpdpbsuds, SchedWriteVecIMul.XMM,
- 0>, T8XS;
+ 0>, T8, XS;
defm VPDPBSUDSY : avx_dotprod_rm<0x51,"vpdpbsuds", v8i32, VR256, loadv8i32,
i256mem, X86vpdpbsuds, SchedWriteVecIMul.YMM,
- 0>, VEX_L, T8XS;
+ 0>, VEX_L, T8, XS;
}
// AVX-NE-CONVERT
@@ -8306,18 +8306,18 @@ multiclass VCVTNEPS2BF16_BASE {
let Predicates = [HasAVXNECONVERT] in {
defm VBCSTNEBF162PS : AVX_NE_CONVERT_BASE<0xb1, "vbcstnebf162ps", f16mem,
- f16mem>, T8XS;
+ f16mem>, T8, XS;
defm VBCSTNESH2PS : AVX_NE_CONVERT_BASE<0xb1, "vbcstnesh2ps", f16mem, f16mem>,
- T8PD;
+ T8, PD;
defm VCVTNEEBF162PS : AVX_NE_CONVERT_BASE<0xb0, "vcvtneebf162ps", f128mem,
- f256mem>, T8XS;
+ f256mem>, T8, XS;
defm VCVTNEEPH2PS : AVX_NE_CONVERT_BASE<0xb0, "vcvtneeph2ps", f128mem,
- f256mem>, T8PD;
+ f256mem>, T8, PD;
defm VCVTNEOBF162PS : AVX_NE_CONVERT_BASE<0xb0, "vcvtneobf162ps", f128mem,
- f256mem>, T8XD;
+ f256mem>, T8, XD;
defm VCVTNEOPH2PS : AVX_NE_CONVERT_BASE<0xb0, "vcvtneoph2ps", f128mem,
- f256mem>, T8PS;
- defm VCVTNEPS2BF16 : VCVTNEPS2BF16_BASE, VEX, T8XS, ExplicitVEXPrefix;
+ f256mem>, T8, PS;
+ defm VCVTNEPS2BF16 : VCVTNEPS2BF16_BASE, VEX, T8, XS, ExplicitVEXPrefix;
def : Pat<(v8bf16 (X86vfpround (v8f32 VR256:$src))),
(VCVTNEPS2BF16Yrr VR256:$src)>;
@@ -8337,19 +8337,19 @@ def VSHA512MSG1rr : I<0xcc, MRMSrcReg, (outs VR256:$dst),
"vsha512msg1\t{$src2, $dst|$dst, $src2}",
[(set VR256:$dst,
(int_x86_vsha512msg1 VR256:$src1, VR128:$src2))]>, VEX_L,
- VEX, T8XD, Sched<[WriteVecIMul]>;
+ VEX, T8, XD, Sched<[WriteVecIMul]>;
def VSHA512MSG2rr : I<0xcd, MRMSrcReg, (outs VR256:$dst),
(ins VR256:$src1, VR256:$src2),
"vsha512msg2\t{$src2, $dst|$dst, $src2}",
[(set VR256:$dst,
(int_x86_vsha512msg2 VR256:$src1, VR256:$src2))]>, VEX_L,
- VEX, T8XD, Sched<[WriteVecIMul]>;
+ VEX, T8, XD, Sched<[WriteVecIMul]>;
def VSHA512RNDS2rr : I<0xcb, MRMSrcReg, (outs VR256:$dst),
(ins VR256:$src1, VR256:$src2, VR128:$src3),
"vsha512rnds2\t{$src3, $src2, $dst|$dst, $src2, $src3}",
[(set VR256:$dst,
(int_x86_vsha512rnds2 VR256:$src1, VR256:$src2, VR128:$src3))]>,
- VEX_L, VEX, VVVV, T8XD, Sched<[WriteVecIMul]>;
+ VEX_L, VEX, VVVV, T8, XD, Sched<[WriteVecIMul]>;
}
// FIXME: Is there a better scheduler class for SM3 than WriteVecIMul?
@@ -8389,9 +8389,9 @@ let Predicates = [HasSM3], Constraints = "$src1 = $dst" in {
}
}
-defm VSM3MSG1 : SM3_Base<"vsm3msg1">, T8PS;
-defm VSM3MSG2 : SM3_Base<"vsm3msg2">, T8PD;
-defm VSM3RNDS2 : VSM3RNDS2_Base, VEX, VVVV, TAPD;
+defm VSM3MSG1 : SM3_Base<"vsm3msg1">, T8, PS;
+defm VSM3MSG2 : SM3_Base<"vsm3msg2">, T8, PD;
+defm VSM3RNDS2 : VSM3RNDS2_Base, VEX, VVVV, TA, PD;
// FIXME: Is there a better scheduler class for SM4 than WriteVecIMul?
let Predicates = [HasSM4] in {
@@ -8412,10 +8412,10 @@ let Predicates = [HasSM4] in {
}
}
-defm VSM4KEY4 : SM4_Base<"vsm4key4", VR128, "128", loadv4i32, i128mem>, T8XS, VEX, VVVV;
-defm VSM4KEY4Y : SM4_Base<"vsm4key4", VR256, "256", loadv8i32, i256mem>, T8XS, VEX_L, VEX, VVVV;
-defm VSM4RNDS4 : SM4_Base<"vsm4rnds4", VR128, "128", loadv4i32, i128mem>, T8XD, VEX, VVVV;
-defm VSM4RNDS4Y : SM4_Base<"vsm4rnds4", VR256, "256", loadv8i32, i256mem>, T8XD, VEX_L, VEX, VVVV;
+defm VSM4KEY4 : SM4_Base<"vsm4key4", VR128, "128", loadv4i32, i128mem>, T8, XS, VEX, VVVV;
+defm VSM4KEY4Y : SM4_Base<"vsm4key4", VR256, "256", loadv8i32, i256mem>, T8, XS, VEX_L, VEX, VVVV;
+defm VSM4RNDS4 : SM4_Base<"vsm4rnds4", VR128, "128", loadv4i32, i128mem>, T8, XD, VEX, VVVV;
+defm VSM4RNDS4Y : SM4_Base<"vsm4rnds4", VR256, "256", loadv8i32, i256mem>, T8, XD, VEX_L, VEX, VVVV;
let Predicates = [HasAVXVNNIINT16], Constraints = "$src1 = $dst" in
multiclass avx_vnni_int16<bits<8> opc, string OpcodeStr, bit IsCommutable> {
@@ -8454,9 +8454,9 @@ multiclass avx_vnni_int16<bits<8> opc, string OpcodeStr, bit IsCommutable> {
VEX, VVVV, VEX_L, Sched<[SchedWriteVecIMul.YMM]>;
}
-defm VPDPWSUD : avx_vnni_int16<0xd2, "vpdpwsud", 0>, T8XS;
-defm VPDPWSUDS : avx_vnni_int16<0xd3, "vpdpwsuds", 0>, T8XS;
-defm VPDPWUSD : avx_vnni_int16<0xd2, "vpdpwusd", 0>, T8PD;
-defm VPDPWUSDS : avx_vnni_int16<0xd3, "vpdpwusds", 0>, T8PD;
-defm VPDPWUUD : avx_vnni_int16<0xd2, "vpdpwuud", 1>, T8PS;
-defm VPDPWUUDS : avx_vnni_int16<0xd3, "vpdpwuuds", 1>, T8PS;
+defm VPDPWSUD : avx_vnni_int16<0xd2, "vpdpwsud", 0>, T8, XS;
+defm VPDPWSUDS : avx_vnni_int16<0xd3, "vpdpwsuds", 0>, T8, XS;
+defm VPDPWUSD : avx_vnni_int16<0xd2, "vpdpwusd", 0>, T8, PD;
+defm VPDPWUSDS : avx_vnni_int16<0xd3, "vpdpwusds", 0>, T8, PD;
+defm VPDPWUUD : avx_vnni_int16<0xd2, "vpdpwuud", 1>, T8, PS;
+defm VPDPWUUDS : avx_vnni_int16<0xd3, "vpdpwuuds", 1>, T8, PS;
diff --git a/llvm/lib/Target/X86/X86InstrShiftRotate.td b/llvm/lib/Target/X86/X86InstrShiftRotate.td
index 48bf23f8cbf7b2..d13e3b7af69a95 100644
--- a/llvm/lib/Target/X86/X86InstrShiftRotate.td
+++ b/llvm/lib/Target/X86/X86InstrShiftRotate.td
@@ -829,12 +829,12 @@ multiclass bmi_rotate<string asm, RegisterClass RC, X86MemOperand x86memop,
let hasSideEffects = 0 in {
def ri#Suffix : Ii8<0xF0, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, u8imm:$src2),
!strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>,
- TAXD, VEX, Sched<[WriteShift]>;
+ TA, XD, VEX, Sched<[WriteShift]>;
let mayLoad = 1 in
def mi#Suffix : Ii8<0xF0, MRMSrcMem, (outs RC:$dst),
(ins x86memop:$src1, u8imm:$src2),
!strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>,
- TAXD, VEX, Sched<[WriteShiftLd]>;
+ TA, XD, VEX, Sched<[WriteShiftLd]>;
}
}
@@ -860,23 +860,23 @@ let hasSideEffects = 0 in {
let Predicates = [HasBMI2, NoEGPR] in {
defm RORX32 : bmi_rotate<"rorx{l}", GR32, i32mem>;
defm RORX64 : bmi_rotate<"rorx{q}", GR64, i64mem>, REX_W;
- defm SARX32 : bmi_shift<"sarx{l}", GR32, i32mem>, T8XS;
- defm SARX64 : bmi_shift<"sarx{q}", GR64, i64mem>, T8XS, REX_W;
- defm SHRX32 : bmi_shift<"shrx{l}", GR32, i32mem>, T8XD;
- defm SHRX64 : bmi_shift<"shrx{q}", GR64, i64mem>, T8XD, REX_W;
- defm SHLX32 : bmi_shift<"shlx{l}", GR32, i32mem>, T8PD;
- defm SHLX64 : bmi_shift<"shlx{q}", GR64, i64mem>, T8PD, REX_W;
+ defm SARX32 : bmi_shift<"sarx{l}", GR32, i32mem>, T8, XS;
+ defm SARX64 : bmi_shift<"sarx{q}", GR64, i64mem>, T8, XS, REX_W;
+ defm SHRX32 : bmi_shift<"shrx{l}", GR32, i32mem>, T8, XD;
+ defm SHRX64 : bmi_shift<"shrx{q}", GR64, i64mem>, T8, XD, REX_W;
+ defm SHLX32 : bmi_shift<"shlx{l}", GR32, i32mem>, T8, PD;
+ defm SHLX64 : bmi_shift<"shlx{q}", GR64, i64mem>, T8, PD, REX_W;
}
let Predicates = [HasBMI2, HasEGPR] in {
defm RORX32 : bmi_rotate<"rorx{l}", GR32, i32mem, "_EVEX">, EVEX;
defm RORX64 : bmi_rotate<"rorx{q}", GR64, i64mem, "_EVEX">, REX_W, EVEX;
- defm SARX32 : bmi_shift<"sarx{l}", GR32, i32mem, "_EVEX">, T8XS, EVEX;
- defm SARX64 : bmi_shift<"sarx{q}", GR64, i64mem, "_EVEX">, T8XS, REX_W, EVEX;
- defm SHRX32 : bmi_shift<"shrx{l}", GR32, i32mem, "_EVEX">, T8XD, EVEX;
- defm SHRX64 : bmi_shift<"shrx{q}", GR64, i64mem, "_EVEX">, T8XD, REX_W, EVEX;
- defm SHLX32 : bmi_shift<"shlx{l}", GR32, i32mem, "_EVEX">, T8PD, EVEX;
- defm SHLX64 : bmi_shift<"shlx{q}", GR64, i64mem, "_EVEX">, T8PD, REX_W, EVEX;
+ defm SARX32 : bmi_shift<"sarx{l}", GR32, i32mem, "_EVEX">, T8, XS, EVEX;
+ defm SARX64 : bmi_shift<"sarx{q}", GR64, i64mem, "_EVEX">, T8, XS, REX_W, EVEX;
+ defm SHRX32 : bmi_shift<"shrx{l}", GR32, i32mem, "_EVEX">, T8, XD, EVEX;
+ defm SHRX64 : bmi_shift<"shrx{q}", GR64, i64mem, "_EVEX">, T8, XD, REX_W, EVEX;
+ defm SHLX32 : bmi_shift<"shlx{l}", GR32, i32mem, "_EVEX">, T8, PD, EVEX;
+ defm SHLX64 : bmi_shift<"shlx{q}", GR64, i64mem, "_EVEX">, T8, PD, REX_W, EVEX;
}
let Predicates = [HasBMI2] in {
diff --git a/llvm/lib/Target/X86/X86InstrSystem.td b/llvm/lib/Target/X86/X86InstrSystem.td
index 25db96b31be7a8..4471071e8f9a91 100644
--- a/llvm/lib/Target/X86/X86InstrSystem.td
+++ b/llvm/lib/Target/X86/X86InstrSystem.td
@@ -426,31 +426,31 @@ let SchedRW = [WriteSystem] in {
let Uses = [EAX, ECX, EDX] in
def WRMSR : I<0x30, RawFrm, (outs), (ins), "wrmsr", []>, TB;
let Uses = [EAX, ECX, EDX] in
-def WRMSRNS : I<0x01, MRM_C6, (outs), (ins), "wrmsrns", []>, PS;
+def WRMSRNS : I<0x01, MRM_C6, (outs), (ins), "wrmsrns", []>, TB, PS;
let Defs = [EAX, EDX], Uses = [ECX] in
def RDMSR : I<0x32, RawFrm, (outs), (ins), "rdmsr", []>, TB;
let Defs = [RAX, EFLAGS], Uses = [RBX, RCX], Predicates = [In64BitMode] in
-def PBNDKB : I<0x01, MRM_C7, (outs), (ins), "pbndkb", []>, PS;
+def PBNDKB : I<0x01, MRM_C7, (outs), (ins), "pbndkb", []>, TB, PS;
let Uses = [RSI, RDI, RCX], Predicates = [In64BitMode] in {
-def WRMSRLIST : I<0x01, MRM_C6, (outs), (ins), "wrmsrlist", []>, XS;
-def RDMSRLIST : I<0x01, MRM_C6, (outs), (ins), "rdmsrlist", []>, XD;
+def WRMSRLIST : I<0x01, MRM_C6, (outs), (ins), "wrmsrlist", []>, TB, XS;
+def RDMSRLIST : I<0x01, MRM_C6, (outs), (ins), "rdmsrlist", []>, TB, XD;
}
let Predicates = [HasUSERMSR], mayLoad = 1 in {
def URDMSRrr : I<0xf8, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
"urdmsr\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst, (int_x86_urdmsr GR64:$src))]>, T8XD;
+ [(set GR64:$dst, (int_x86_urdmsr GR64:$src))]>, T8, XD;
def URDMSRri : Ii32<0xf8, MRM0r, (outs GR64:$dst), (ins i64i32imm:$imm),
"urdmsr\t{$imm, $dst|$dst, $imm}",
- [(set GR64:$dst, (int_x86_urdmsr i64immSExt32_su:$imm))]>, T_MAP7XD, VEX;
+ [(set GR64:$dst, (int_x86_urdmsr i64immSExt32_su:$imm))]>, T_MAP7, XD, VEX;
}
let Predicates = [HasUSERMSR], mayStore = 1 in {
def UWRMSRrr : I<0xf8, MRMSrcReg, (outs), (ins GR64:$src1, GR64:$src2),
"uwrmsr\t{$src1, $src2|$src2, $src1}",
- [(int_x86_uwrmsr GR64:$src1, GR64:$src2)]>, T8XS;
+ [(int_x86_uwrmsr GR64:$src1, GR64:$src2)]>, T8, XS;
def UWRMSRir : Ii32<0xf8, MRM0r, (outs), (ins GR64:$src, i64i32imm:$imm),
"uwrmsr\t{$src, $imm|$imm, $src}",
- [(int_x86_uwrmsr GR64:$src, i64immSExt32_su:$imm)]>, T_MAP7XS, VEX;
+ [(int_x86_uwrmsr GR64:$src, i64immSExt32_su:$imm)]>, T_MAP7, XS, VEX;
}
let Defs = [RAX, RDX], Uses = [ECX] in
def RDPMC : I<0x33, RawFrm, (outs), (ins), "rdpmc", []>, TB;
@@ -481,12 +481,12 @@ let Defs = [EAX, EBX, ECX, EDX], Uses = [EAX, ECX] in
// Cache instructions
let SchedRW = [WriteSystem] in {
def INVD : I<0x08, RawFrm, (outs), (ins), "invd", []>, TB;
-def WBINVD : I<0x09, RawFrm, (outs), (ins), "wbinvd", [(int_x86_wbinvd)]>, PS;
+def WBINVD : I<0x09, RawFrm, (outs), (ins), "wbinvd", [(int_x86_wbinvd)]>, TB, PS;
// wbnoinvd is like wbinvd, except without invalidation
// encoding: like wbinvd + an 0xF3 prefix
def WBNOINVD : I<0x09, RawFrm, (outs), (ins), "wbnoinvd",
- [(int_x86_wbnoinvd)]>, XS,
+ [(int_x86_wbnoinvd)]>, TB, XS,
Requires<[HasWBNOINVD]>;
} // SchedRW
@@ -497,74 +497,74 @@ let SchedRW = [WriteSystem] in {
let Uses = [SSP] in {
let Defs = [SSP] in {
def INCSSPD : I<0xAE, MRM5r, (outs), (ins GR32:$src), "incsspd\t$src",
- [(int_x86_incsspd GR32:$src)]>, XS;
+ [(int_x86_incsspd GR32:$src)]>, TB, XS;
def INCSSPQ : RI<0xAE, MRM5r, (outs), (ins GR64:$src), "incsspq\t$src",
- [(int_x86_incsspq GR64:$src)]>, XS;
+ [(int_x86_incsspq GR64:$src)]>, TB, XS;
} // Defs SSP
let Constraints = "$src = $dst" in {
def RDSSPD : I<0x1E, MRM1r, (outs GR32:$dst), (ins GR32:$src),
"rdsspd\t$dst",
- [(set GR32:$dst, (int_x86_rdsspd GR32:$src))]>, XS;
+ [(set GR32:$dst, (int_x86_rdsspd GR32:$src))]>, TB, XS;
def RDSSPQ : RI<0x1E, MRM1r, (outs GR64:$dst), (ins GR64:$src),
"rdsspq\t$dst",
- [(set GR64:$dst, (int_x86_rdsspq GR64:$src))]>, XS;
+ [(set GR64:$dst, (int_x86_rdsspq GR64:$src))]>, TB, XS;
}
let Defs = [SSP] in {
def SAVEPREVSSP : I<0x01, MRM_EA, (outs), (ins), "saveprevssp",
- [(int_x86_saveprevssp)]>, XS;
+ [(int_x86_saveprevssp)]>, TB, XS;
def RSTORSSP : I<0x01, MRM5m, (outs), (ins i32mem:$src),
"rstorssp\t$src",
- [(int_x86_rstorssp addr:$src)]>, XS;
+ [(int_x86_rstorssp addr:$src)]>, TB, XS;
} // Defs SSP
} // Uses SSP
let Predicates = [NoEGPR] in {
def WRSSD : I<0xF6, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
"wrssd\t{$src, $dst|$dst, $src}",
- [(int_x86_wrssd GR32:$src, addr:$dst)]>, T8PS;
+ [(int_x86_wrssd GR32:$src, addr:$dst)]>, T8, PS;
def WRSSQ : RI<0xF6, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
"wrssq\t{$src, $dst|$dst, $src}",
- [(int_x86_wrssq GR64:$src, addr:$dst)]>, T8PS;
+ [(int_x86_wrssq GR64:$src, addr:$dst)]>, T8, PS;
def WRUSSD : I<0xF5, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
"wrussd\t{$src, $dst|$dst, $src}",
- [(int_x86_wrussd GR32:$src, addr:$dst)]>, T8PD;
+ [(int_x86_wrussd GR32:$src, addr:$dst)]>, T8, PD;
def WRUSSQ : RI<0xF5, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
"wrussq\t{$src, $dst|$dst, $src}",
- [(int_x86_wrussq GR64:$src, addr:$dst)]>, T8PD;
+ [(int_x86_wrussq GR64:$src, addr:$dst)]>, T8, PD;
}
let Predicates = [HasEGPR, In64BitMode] in {
def WRSSD_EVEX : I<0x66, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
"wrssd\t{$src, $dst|$dst, $src}",
- [(int_x86_wrssd GR32:$src, addr:$dst)]>, EVEX, NoCD8, T_MAP4PS;
+ [(int_x86_wrssd GR32:$src, addr:$dst)]>, EVEX, NoCD8, T_MAP4, PS;
def WRSSQ_EVEX : RI<0x66, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
"wrssq\t{$src, $dst|$dst, $src}",
- [(int_x86_wrssq GR64:$src, addr:$dst)]>, EVEX, NoCD8, T_MAP4PS;
+ [(int_x86_wrssq GR64:$src, addr:$dst)]>, EVEX, NoCD8, T_MAP4, PS;
def WRUSSD_EVEX : I<0x65, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
"wrussd\t{$src, $dst|$dst, $src}",
- [(int_x86_wrussd GR32:$src, addr:$dst)]>, EVEX, NoCD8, T_MAP4PD;
+ [(int_x86_wrussd GR32:$src, addr:$dst)]>, EVEX, NoCD8, T_MAP4, PD;
def WRUSSQ_EVEX : RI<0x65, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
"wrussq\t{$src, $dst|$dst, $src}",
- [(int_x86_wrussq GR64:$src, addr:$dst)]>, EVEX, NoCD8, T_MAP4PD;
+ [(int_x86_wrussq GR64:$src, addr:$dst)]>, EVEX, NoCD8, T_MAP4, PD;
}
let Defs = [SSP] in {
let Uses = [SSP] in {
def SETSSBSY : I<0x01, MRM_E8, (outs), (ins), "setssbsy",
- [(int_x86_setssbsy)]>, XS;
+ [(int_x86_setssbsy)]>, TB, XS;
} // Uses SSP
def CLRSSBSY : I<0xAE, MRM6m, (outs), (ins i32mem:$src),
"clrssbsy\t$src",
- [(int_x86_clrssbsy addr:$src)]>, XS;
+ [(int_x86_clrssbsy addr:$src)]>, TB, XS;
} // Defs SSP
} // SchedRW
let SchedRW = [WriteSystem] in {
- def ENDBR64 : I<0x1E, MRM_FA, (outs), (ins), "endbr64", []>, XS;
- def ENDBR32 : I<0x1E, MRM_FB, (outs), (ins), "endbr32", []>, XS;
+ def ENDBR64 : I<0x1E, MRM_FA, (outs), (ins), "endbr64", []>, TB, XS;
+ def ENDBR32 : I<0x1E, MRM_FB, (outs), (ins), "endbr32", []>, TB, XS;
} // SchedRW
//===----------------------------------------------------------------------===//
@@ -574,51 +574,51 @@ let SchedRW = [WriteSystem] in {
// on Windows without needing to enable the xsave feature to be compatible with
// MSVC.
let Defs = [EDX, EAX], Uses = [ECX] in
-def XGETBV : I<0x01, MRM_D0, (outs), (ins), "xgetbv", []>, PS;
+def XGETBV : I<0x01, MRM_D0, (outs), (ins), "xgetbv", []>, TB, PS;
let Uses = [EDX, EAX, ECX] in
def XSETBV : I<0x01, MRM_D1, (outs), (ins),
"xsetbv",
- [(int_x86_xsetbv ECX, EDX, EAX)]>, PS;
+ [(int_x86_xsetbv ECX, EDX, EAX)]>, TB, PS;
let Uses = [EDX, EAX] in {
def XSAVE : I<0xAE, MRM4m, (outs), (ins opaquemem:$dst),
"xsave\t$dst",
- [(int_x86_xsave addr:$dst, EDX, EAX)]>, PS, Requires<[HasXSAVE]>;
+ [(int_x86_xsave addr:$dst, EDX, EAX)]>, TB, PS, Requires<[HasXSAVE]>;
def XSAVE64 : RI<0xAE, MRM4m, (outs), (ins opaquemem:$dst),
"xsave64\t$dst",
- [(int_x86_xsave64 addr:$dst, EDX, EAX)]>, PS, Requires<[HasXSAVE, In64BitMode]>;
+ [(int_x86_xsave64 addr:$dst, EDX, EAX)]>, TB, PS, Requires<[HasXSAVE, In64BitMode]>;
def XRSTOR : I<0xAE, MRM5m, (outs), (ins opaquemem:$dst),
"xrstor\t$dst",
- [(int_x86_xrstor addr:$dst, EDX, EAX)]>, PS, Requires<[HasXSAVE]>;
+ [(int_x86_xrstor addr:$dst, EDX, EAX)]>, TB, PS, Requires<[HasXSAVE]>;
def XRSTOR64 : RI<0xAE, MRM5m, (outs), (ins opaquemem:$dst),
"xrstor64\t$dst",
- [(int_x86_xrstor64 addr:$dst, EDX, EAX)]>, PS, Requires<[HasXSAVE, In64BitMode]>;
+ [(int_x86_xrstor64 addr:$dst, EDX, EAX)]>, TB, PS, Requires<[HasXSAVE, In64BitMode]>;
def XSAVEOPT : I<0xAE, MRM6m, (outs), (ins opaquemem:$dst),
"xsaveopt\t$dst",
- [(int_x86_xsaveopt addr:$dst, EDX, EAX)]>, PS, Requires<[HasXSAVEOPT]>;
+ [(int_x86_xsaveopt addr:$dst, EDX, EAX)]>, TB, PS, Requires<[HasXSAVEOPT]>;
def XSAVEOPT64 : RI<0xAE, MRM6m, (outs), (ins opaquemem:$dst),
"xsaveopt64\t$dst",
- [(int_x86_xsaveopt64 addr:$dst, EDX, EAX)]>, PS, Requires<[HasXSAVEOPT, In64BitMode]>;
+ [(int_x86_xsaveopt64 addr:$dst, EDX, EAX)]>, TB, PS, Requires<[HasXSAVEOPT, In64BitMode]>;
def XSAVEC : I<0xC7, MRM4m, (outs), (ins opaquemem:$dst),
"xsavec\t$dst",
- [(int_x86_xsavec addr:$dst, EDX, EAX)]>, PS, Requires<[HasXSAVEC]>;
+ [(int_x86_xsavec addr:$dst, EDX, EAX)]>, TB, PS, Requires<[HasXSAVEC]>;
def XSAVEC64 : RI<0xC7, MRM4m, (outs), (ins opaquemem:$dst),
"xsavec64\t$dst",
- [(int_x86_xsavec64 addr:$dst, EDX, EAX)]>, PS, Requires<[HasXSAVEC, In64BitMode]>;
+ [(int_x86_xsavec64 addr:$dst, EDX, EAX)]>, TB, PS, Requires<[HasXSAVEC, In64BitMode]>;
def XSAVES : I<0xC7, MRM5m, (outs), (ins opaquemem:$dst),
"xsaves\t$dst",
- [(int_x86_xsaves addr:$dst, EDX, EAX)]>, PS, Requires<[HasXSAVES]>;
+ [(int_x86_xsaves addr:$dst, EDX, EAX)]>, TB, PS, Requires<[HasXSAVES]>;
def XSAVES64 : RI<0xC7, MRM5m, (outs), (ins opaquemem:$dst),
"xsaves64\t$dst",
- [(int_x86_xsaves64 addr:$dst, EDX, EAX)]>, PS, Requires<[HasXSAVE, In64BitMode]>;
+ [(int_x86_xsaves64 addr:$dst, EDX, EAX)]>, TB, PS, Requires<[HasXSAVE, In64BitMode]>;
def XRSTORS : I<0xC7, MRM3m, (outs), (ins opaquemem:$dst),
"xrstors\t$dst",
- [(int_x86_xrstors addr:$dst, EDX, EAX)]>, PS, Requires<[HasXSAVES]>;
+ [(int_x86_xrstors addr:$dst, EDX, EAX)]>, TB, PS, Requires<[HasXSAVES]>;
def XRSTORS64 : RI<0xC7, MRM3m, (outs), (ins opaquemem:$dst),
"xrstors64\t$dst",
- [(int_x86_xrstors64 addr:$dst, EDX, EAX)]>, PS, Requires<[HasXSAVES, In64BitMode]>;
+ [(int_x86_xrstors64 addr:$dst, EDX, EAX)]>, TB, PS, Requires<[HasXSAVES, In64BitMode]>;
} // Uses
} // SchedRW
@@ -651,10 +651,10 @@ let Defs = [RAX, RDX, RSI], Uses = [RAX, RSI] in
let SchedRW = [WriteSystem] in {
let Defs = [EAX, EDX], Uses = [ECX] in
def RDPKRUr : I<0x01, MRM_EE, (outs), (ins), "rdpkru",
- [(set EAX, (X86rdpkru ECX)), (implicit EDX)]>, PS;
+ [(set EAX, (X86rdpkru ECX)), (implicit EDX)]>, TB, PS;
let Uses = [EAX, ECX, EDX] in
def WRPKRUr : I<0x01, MRM_EF, (outs), (ins), "wrpkru",
- [(X86wrpkru EAX, EDX, ECX)]>, PS;
+ [(X86wrpkru EAX, EDX, ECX)]>, TB, PS;
} // SchedRW
//===----------------------------------------------------------------------===//
@@ -662,28 +662,28 @@ let Uses = [EAX, ECX, EDX] in
let Predicates = [HasFSGSBase, In64BitMode], SchedRW = [WriteSystem] in {
def RDFSBASE : I<0xAE, MRM0r, (outs GR32:$dst), (ins),
"rdfsbase{l}\t$dst",
- [(set GR32:$dst, (int_x86_rdfsbase_32))]>, XS;
+ [(set GR32:$dst, (int_x86_rdfsbase_32))]>, TB, XS;
def RDFSBASE64 : RI<0xAE, MRM0r, (outs GR64:$dst), (ins),
"rdfsbase{q}\t$dst",
- [(set GR64:$dst, (int_x86_rdfsbase_64))]>, XS;
+ [(set GR64:$dst, (int_x86_rdfsbase_64))]>, TB, XS;
def RDGSBASE : I<0xAE, MRM1r, (outs GR32:$dst), (ins),
"rdgsbase{l}\t$dst",
- [(set GR32:$dst, (int_x86_rdgsbase_32))]>, XS;
+ [(set GR32:$dst, (int_x86_rdgsbase_32))]>, TB, XS;
def RDGSBASE64 : RI<0xAE, MRM1r, (outs GR64:$dst), (ins),
"rdgsbase{q}\t$dst",
- [(set GR64:$dst, (int_x86_rdgsbase_64))]>, XS;
+ [(set GR64:$dst, (int_x86_rdgsbase_64))]>, TB, XS;
def WRFSBASE : I<0xAE, MRM2r, (outs), (ins GR32:$src),
"wrfsbase{l}\t$src",
- [(int_x86_wrfsbase_32 GR32:$src)]>, XS;
+ [(int_x86_wrfsbase_32 GR32:$src)]>, TB, XS;
def WRFSBASE64 : RI<0xAE, MRM2r, (outs), (ins GR64:$src),
"wrfsbase{q}\t$src",
- [(int_x86_wrfsbase_64 GR64:$src)]>, XS;
+ [(int_x86_wrfsbase_64 GR64:$src)]>, TB, XS;
def WRGSBASE : I<0xAE, MRM3r, (outs), (ins GR32:$src),
"wrgsbase{l}\t$src",
- [(int_x86_wrgsbase_32 GR32:$src)]>, XS;
+ [(int_x86_wrgsbase_32 GR32:$src)]>, TB, XS;
def WRGSBASE64 : RI<0xAE, MRM3r, (outs), (ins GR64:$src),
"wrgsbase{q}\t$src",
- [(int_x86_wrgsbase_64 GR64:$src)]>, XS;
+ [(int_x86_wrgsbase_64 GR64:$src)]>, TB, XS;
}
//===----------------------------------------------------------------------===//
@@ -691,15 +691,15 @@ let Predicates = [HasFSGSBase, In64BitMode], SchedRW = [WriteSystem] in {
let SchedRW = [WriteSystem] in {
def INVPCID32 : I<0x82, MRMSrcMem, (outs), (ins GR32:$src1, i128mem:$src2),
"invpcid\t{$src2, $src1|$src1, $src2}",
- [(int_x86_invpcid GR32:$src1, addr:$src2)]>, T8PD,
+ [(int_x86_invpcid GR32:$src1, addr:$src2)]>, T8, PD,
Requires<[Not64BitMode, HasINVPCID]>;
def INVPCID64 : I<0x82, MRMSrcMem, (outs), (ins GR64:$src1, i128mem:$src2),
- "invpcid\t{$src2, $src1|$src1, $src2}", []>, T8PD,
+ "invpcid\t{$src2, $src1|$src1, $src2}", []>, T8, PD,
Requires<[In64BitMode, HasINVPCID]>;
def INVPCID64_EVEX : I<0xF2, MRMSrcMem, (outs), (ins GR64:$src1, i128mem:$src2),
"invpcid\t{$src2, $src1|$src1, $src2}", []>,
- EVEX, NoCD8, T_MAP4XS, Requires<[In64BitMode, HasINVPCID]>;
+ EVEX, NoCD8, T_MAP4, XS, Requires<[In64BitMode, HasINVPCID]>;
} // SchedRW
let Predicates = [In64BitMode, HasINVPCID] in {
@@ -718,15 +718,15 @@ let Predicates = [In64BitMode, HasINVPCID] in {
//===----------------------------------------------------------------------===//
// SMAP Instruction
let Defs = [EFLAGS], SchedRW = [WriteSystem] in {
- def CLAC : I<0x01, MRM_CA, (outs), (ins), "clac", []>, PS;
- def STAC : I<0x01, MRM_CB, (outs), (ins), "stac", []>, PS;
+ def CLAC : I<0x01, MRM_CA, (outs), (ins), "clac", []>, TB, PS;
+ def STAC : I<0x01, MRM_CB, (outs), (ins), "stac", []>, TB, PS;
}
//===----------------------------------------------------------------------===//
// SMX Instruction
let SchedRW = [WriteSystem] in {
let Uses = [RAX, RBX, RCX, RDX], Defs = [RAX, RBX, RCX] in {
- def GETSEC : I<0x37, RawFrm, (outs), (ins), "getsec", []>, PS;
+ def GETSEC : I<0x37, RawFrm, (outs), (ins), "getsec", []>, TB, PS;
} // Uses, Defs
} // SchedRW
@@ -747,9 +747,9 @@ def STI : I<0xFB, RawFrm, (outs), (ins), "sti", []>;
// RDPID Instruction
let SchedRW = [WriteSystem] in {
def RDPID32 : I<0xC7, MRM7r, (outs GR32:$dst), (ins),
- "rdpid\t$dst", [(set GR32:$dst, (int_x86_rdpid))]>, XS,
+ "rdpid\t$dst", [(set GR32:$dst, (int_x86_rdpid))]>, TB, XS,
Requires<[Not64BitMode, HasRDPID]>;
-def RDPID64 : I<0xC7, MRM7r, (outs GR64:$dst), (ins), "rdpid\t$dst", []>, XS,
+def RDPID64 : I<0xC7, MRM7r, (outs GR64:$dst), (ins), "rdpid\t$dst", []>, TB, XS,
Requires<[In64BitMode, HasRDPID]>;
} // SchedRW
@@ -765,17 +765,17 @@ let Predicates = [In64BitMode, HasRDPID] in {
// PTWRITE Instruction - Write Data to a Processor Trace Packet
let SchedRW = [WriteSystem] in {
def PTWRITEm: I<0xAE, MRM4m, (outs), (ins i32mem:$dst),
- "ptwrite{l}\t$dst", [(int_x86_ptwrite32 (loadi32 addr:$dst))]>, XS,
+ "ptwrite{l}\t$dst", [(int_x86_ptwrite32 (loadi32 addr:$dst))]>, TB, XS,
Requires<[HasPTWRITE]>;
def PTWRITE64m : RI<0xAE, MRM4m, (outs), (ins i64mem:$dst),
- "ptwrite{q}\t$dst", [(int_x86_ptwrite64 (loadi64 addr:$dst))]>, XS,
+ "ptwrite{q}\t$dst", [(int_x86_ptwrite64 (loadi64 addr:$dst))]>, TB, XS,
Requires<[In64BitMode, HasPTWRITE]>;
def PTWRITEr : I<0xAE, MRM4r, (outs), (ins GR32:$dst),
- "ptwrite{l}\t$dst", [(int_x86_ptwrite32 GR32:$dst)]>, XS,
+ "ptwrite{l}\t$dst", [(int_x86_ptwrite32 GR32:$dst)]>, TB, XS,
Requires<[HasPTWRITE]>;
def PTWRITE64r : RI<0xAE, MRM4r, (outs), (ins GR64:$dst),
- "ptwrite{q}\t$dst", [(int_x86_ptwrite64 GR64:$dst)]>, XS,
+ "ptwrite{q}\t$dst", [(int_x86_ptwrite64 GR64:$dst)]>, TB, XS,
Requires<[In64BitMode, HasPTWRITE]>;
} // SchedRW
@@ -784,7 +784,7 @@ def PTWRITE64r : RI<0xAE, MRM4r, (outs), (ins GR64:$dst),
let SchedRW = [WriteSystem] in {
let Uses = [ECX], Defs = [EAX, EDX] in
- def RDPRU : I<0x01, MRM_FD, (outs), (ins), "rdpru", []>, PS,
+ def RDPRU : I<0x01, MRM_FD, (outs), (ins), "rdpru", []>, TB, PS,
Requires<[HasRDPRU]>;
}
@@ -803,6 +803,6 @@ let Uses = [ECX], Defs = [EAX, EDX] in
let SchedRW = [WriteSystem] in {
let Uses = [RAX, RBX, RCX, RDX], Defs = [RAX, RBX, RCX, RDX, EFLAGS] in
- def PCONFIG : I<0x01, MRM_C5, (outs), (ins), "pconfig", []>, PS,
+ def PCONFIG : I<0x01, MRM_C5, (outs), (ins), "pconfig", []>, TB, PS,
Requires<[HasPCONFIG]>;
} // SchedRW
diff --git a/llvm/lib/Target/X86/X86InstrTDX.td b/llvm/lib/Target/X86/X86InstrTDX.td
index 8d7cd60820953d..fe01677b2ea175 100644
--- a/llvm/lib/Target/X86/X86InstrTDX.td
+++ b/llvm/lib/Target/X86/X86InstrTDX.td
@@ -17,23 +17,17 @@
// 64-bit only instructions
let SchedRW = [WriteSystem], Predicates = [In64BitMode] in {
// SEAMCALL - Call to SEAM VMX-root Operation Module
-def SEAMCALL : I<0x01, MRM_CF, (outs), (ins),
- "seamcall", []>, PD;
+def SEAMCALL : I<0x01, MRM_CF, (outs), (ins), "seamcall", []>, TB, PD;
// SEAMRET - Return to Legacy VMX-root Operation
-def SEAMRET : I<0x01, MRM_CD, (outs), (ins),
- "seamret", []>, PD;
+def SEAMRET : I<0x01, MRM_CD, (outs), (ins), "seamret", []>, TB, PD;
// SEAMOPS - SEAM Operations
-def SEAMOPS : I<0x01, MRM_CE, (outs), (ins),
- "seamops", []>, PD;
-
+def SEAMOPS : I<0x01, MRM_CE, (outs), (ins), "seamops", []>, TB, PD;
} // SchedRW
// common instructions
let SchedRW = [WriteSystem] in {
// TDCALL - Call SEAM Module Functions
-def TDCALL : I<0x01, MRM_CC, (outs), (ins),
- "tdcall", []>, PD;
-
+def TDCALL : I<0x01, MRM_CC, (outs), (ins), "tdcall", []>, TB, PD;
} // SchedRW
diff --git a/llvm/lib/Target/X86/X86InstrTSX.td b/llvm/lib/Target/X86/X86InstrTSX.td
index 7671eb4676eede..cc9174a0c491c7 100644
--- a/llvm/lib/Target/X86/X86InstrTSX.td
+++ b/llvm/lib/Target/X86/X86InstrTSX.td
@@ -37,11 +37,11 @@ def XABORT_DEF : I<0, Pseudo, (outs), (ins), "# XABORT DEF", []>;
}
def XEND : I<0x01, MRM_D5, (outs), (ins),
- "xend", [(int_x86_xend)]>, PS, Requires<[HasRTM]>;
+ "xend", [(int_x86_xend)]>, TB, PS, Requires<[HasRTM]>;
let Defs = [EFLAGS] in
def XTEST : I<0x01, MRM_D6, (outs), (ins),
- "xtest", [(set EFLAGS, (X86xtest))]>, PS, Requires<[HasRTM]>;
+ "xtest", [(set EFLAGS, (X86xtest))]>, TB, PS, Requires<[HasRTM]>;
def XABORT : Ii8<0xc6, MRM_F8, (outs), (ins i8imm:$imm),
"xabort\t$imm",
diff --git a/llvm/lib/Target/X86/X86InstrUtils.td b/llvm/lib/Target/X86/X86InstrUtils.td
index dd59a641dfaa2c..87eacf704de6cc 100644
--- a/llvm/lib/Target/X86/X86InstrUtils.td
+++ b/llvm/lib/Target/X86/X86InstrUtils.td
@@ -27,41 +27,18 @@ class REP { bit hasREPPrefix = 1; }
class TB { Map OpMap = TB; }
class T8 { Map OpMap = T8; }
class TA { Map OpMap = TA; }
+class T_MAP4 { Map OpMap = T_MAP4; }
+class T_MAP5 { Map OpMap = T_MAP5; }
+class T_MAP6 { Map OpMap = T_MAP6; }
+class T_MAP7 { Map OpMap = T_MAP7; }
class XOP8 { Map OpMap = XOP8; Prefix OpPrefix = PS; }
class XOP9 { Map OpMap = XOP9; Prefix OpPrefix = PS; }
class XOPA { Map OpMap = XOPA; Prefix OpPrefix = PS; }
class ThreeDNow { Map OpMap = ThreeDNow; }
-class T_MAP4 { Map OpMap = T_MAP4; }
-class T_MAP4PS : T_MAP4 { Prefix OpPrefix = PS; } // none
-class T_MAP4PD : T_MAP4 { Prefix OpPrefix = PD; } // 0x66
-class T_MAP4XS : T_MAP4 { Prefix OpPrefix = XS; } // 0xF3
-class T_MAP4XD : T_MAP4 { Prefix OpPrefix = XD; } // 0xF2
-class T_MAP5 { Map OpMap = T_MAP5; }
-class T_MAP5PS : T_MAP5 { Prefix OpPrefix = PS; } // none
-class T_MAP5PD : T_MAP5 { Prefix OpPrefix = PD; } // 0x66
-class T_MAP5XS : T_MAP5 { Prefix OpPrefix = XS; } // 0xF3
-class T_MAP5XD : T_MAP5 { Prefix OpPrefix = XD; } // 0xF2
-class T_MAP6 { Map OpMap = T_MAP6; }
-class T_MAP6PS : T_MAP6 { Prefix OpPrefix = PS; }
-class T_MAP6PD : T_MAP6 { Prefix OpPrefix = PD; }
-class T_MAP6XS : T_MAP6 { Prefix OpPrefix = XS; }
-class T_MAP6XD : T_MAP6 { Prefix OpPrefix = XD; }
-class T_MAP7 { Map OpMap = T_MAP7; }
-class T_MAP7XS : T_MAP7 { Prefix OpPrefix = XS; } // 0xF3
-class T_MAP7XD : T_MAP7 { Prefix OpPrefix = XD; } // 0xF2
-class OBXS { Prefix OpPrefix = XS; }
-class PS : TB { Prefix OpPrefix = PS; }
-class PD : TB { Prefix OpPrefix = PD; }
-class XD : TB { Prefix OpPrefix = XD; }
-class XS : TB { Prefix OpPrefix = XS; }
-class T8PS : T8 { Prefix OpPrefix = PS; }
-class T8PD : T8 { Prefix OpPrefix = PD; }
-class T8XD : T8 { Prefix OpPrefix = XD; }
-class T8XS : T8 { Prefix OpPrefix = XS; }
-class TAPS : TA { Prefix OpPrefix = PS; }
-class TAPD : TA { Prefix OpPrefix = PD; }
-class TAXD : TA { Prefix OpPrefix = XD; }
-class TAXS : TA { Prefix OpPrefix = XS; }
+class PS { Prefix OpPrefix = PS; }
+class PD { Prefix OpPrefix = PD; }
+class XD { Prefix OpPrefix = XD; }
+class XS { Prefix OpPrefix = XS; }
class VEX { Encoding OpEnc = EncVEX; }
class WIG { bit IgnoresW = 1; }
// Special version of REX_W that can be changed to VEX.W==0 for EVEX2VEX.
@@ -90,23 +67,23 @@ class XOP { Encoding OpEnc = EncXOP; }
class EVEX2VEXOverride<string VEXInstrName> {
string EVEX2VEXOverride = VEXInstrName;
}
-class AVX512BIi8Base : PD {
+class AVX512BIi8Base : TB, PD {
Domain ExeDomain = SSEPackedInt;
ImmType ImmT = Imm8;
}
-class AVX512XSIi8Base : XS {
+class AVX512XSIi8Base : TB, XS {
Domain ExeDomain = SSEPackedInt;
ImmType ImmT = Imm8;
}
-class AVX512XDIi8Base : XD {
+class AVX512XDIi8Base : TB, XD {
Domain ExeDomain = SSEPackedInt;
ImmType ImmT = Imm8;
}
-class AVX512PSIi8Base : PS {
+class AVX512PSIi8Base : TB, PS {
Domain ExeDomain = SSEPackedSingle;
ImmType ImmT = Imm8;
}
-class AVX512PDIi8Base : PD {
+class AVX512PDIi8Base : TB, PD {
Domain ExeDomain = SSEPackedDouble;
ImmType ImmT = Imm8;
}
@@ -591,26 +568,26 @@ class PIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
class SSI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : I<o, F, outs, ins, asm, pattern>, XS, Requires<[UseSSE1]>;
+ : I<o, F, outs, ins, asm, pattern>, TB, XS, Requires<[UseSSE1]>;
class SSIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : Ii8<o, F, outs, ins, asm, pattern>, XS, Requires<[UseSSE1]>;
+ : Ii8<o, F, outs, ins, asm, pattern>, TB, XS, Requires<[UseSSE1]>;
class PSI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : I<o, F, outs, ins, asm, pattern, SSEPackedSingle>, PS,
+ : I<o, F, outs, ins, asm, pattern, SSEPackedSingle>, TB, PS,
Requires<[UseSSE1]>;
class PSIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : Ii8<o, F, outs, ins, asm, pattern, SSEPackedSingle>, PS,
+ : Ii8<o, F, outs, ins, asm, pattern, SSEPackedSingle>, TB, PS,
Requires<[UseSSE1]>;
class VSSI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : I<o, F, outs, ins, !strconcat("v", asm), pattern>, XS,
+ : I<o, F, outs, ins, !strconcat("v", asm), pattern>, TB, XS,
Requires<[HasAVX]>;
class VPSI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : I<o, F, outs, ins, !strconcat("v", asm), pattern, SSEPackedSingle>, PS,
- Requires<[HasAVX]>;
+ : I<o, F, outs, ins, !strconcat("v", asm), pattern, SSEPackedSingle>,
+ TB, PS, Requires<[HasAVX]>;
// SSE2 Instruction Templates:
//
@@ -632,49 +609,49 @@ class VPSI<bits<8> o, Format F, dag outs, dag ins, string asm,
class SDI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : I<o, F, outs, ins, asm, pattern>, XD, Requires<[UseSSE2]>;
+ : I<o, F, outs, ins, asm, pattern>, TB, XD, Requires<[UseSSE2]>;
class SDIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : Ii8<o, F, outs, ins, asm, pattern>, XD, Requires<[UseSSE2]>;
+ : Ii8<o, F, outs, ins, asm, pattern>, TB, XD, Requires<[UseSSE2]>;
class S2SI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : I<o, F, outs, ins, asm, pattern>, XS, Requires<[UseSSE2]>;
+ : I<o, F, outs, ins, asm, pattern>, TB, XS, Requires<[UseSSE2]>;
class S2SIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : Ii8<o, F, outs, ins, asm, pattern>, XS, Requires<[UseSSE2]>;
+ : Ii8<o, F, outs, ins, asm, pattern>, TB, XS, Requires<[UseSSE2]>;
class PDI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : I<o, F, outs, ins, asm, pattern, SSEPackedDouble>, PD,
+ : I<o, F, outs, ins, asm, pattern, SSEPackedDouble>, TB, PD,
Requires<[UseSSE2]>;
class PDIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : Ii8<o, F, outs, ins, asm, pattern, SSEPackedDouble>, PD,
+ : Ii8<o, F, outs, ins, asm, pattern, SSEPackedDouble>, TB, PD,
Requires<[UseSSE2]>;
class VSDI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : I<o, F, outs, ins, !strconcat("v", asm), pattern>, XD,
+ : I<o, F, outs, ins, !strconcat("v", asm), pattern>, TB, XD,
Requires<[UseAVX]>;
class VS2SI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : I<o, F, outs, ins, !strconcat("v", asm), pattern>, XS,
+ : I<o, F, outs, ins, !strconcat("v", asm), pattern>, TB, XS,
Requires<[HasAVX]>;
class VPDI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
: I<o, F, outs, ins, !strconcat("v", asm), pattern, SSEPackedDouble>,
- PD, Requires<[HasAVX]>;
+ TB, PD, Requires<[HasAVX]>;
class VS2I<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : I<o, F, outs, ins, !strconcat("v", asm), pattern>, PD,
+ : I<o, F, outs, ins, !strconcat("v", asm), pattern>, TB, PD,
Requires<[UseAVX]>;
class S2I<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : I<o, F, outs, ins, asm, pattern>, PD, Requires<[UseSSE2]>;
+ : I<o, F, outs, ins, asm, pattern>, TB, PD, Requires<[UseSSE2]>;
class MMXSDIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : Ii8<o, F, outs, ins, asm, pattern>, XD, Requires<[HasMMX, HasSSE2]>;
+ : Ii8<o, F, outs, ins, asm, pattern>, TB, XD, Requires<[HasMMX, HasSSE2]>;
class MMXS2SIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : Ii8<o, F, outs, ins, asm, pattern>, XS, Requires<[HasMMX, HasSSE2]>;
+ : Ii8<o, F, outs, ins, asm, pattern>, TB, XS, Requires<[HasMMX, HasSSE2]>;
// SSE3 Instruction Templates:
//
@@ -684,15 +661,15 @@ class MMXS2SIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
class S3SI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : I<o, F, outs, ins, asm, pattern, SSEPackedSingle>, XS,
+ : I<o, F, outs, ins, asm, pattern, SSEPackedSingle>, TB, XS,
Requires<[UseSSE3]>;
class S3DI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : I<o, F, outs, ins, asm, pattern, SSEPackedDouble>, XD,
+ : I<o, F, outs, ins, asm, pattern, SSEPackedDouble>, TB, XD,
Requires<[UseSSE3]>;
class S3I<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : I<o, F, outs, ins, asm, pattern, SSEPackedDouble>, PD,
+ : I<o, F, outs, ins, asm, pattern, SSEPackedDouble>, TB, PD,
Requires<[UseSSE3]>;
@@ -709,19 +686,19 @@ class S3I<bits<8> o, Format F, dag outs, dag ins, string asm,
class SS38I<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : I<o, F, outs, ins, asm, pattern, SSEPackedInt>, T8PD,
+ : I<o, F, outs, ins, asm, pattern, SSEPackedInt>, T8, PD,
Requires<[UseSSSE3]>;
class SS3AI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : Ii8<o, F, outs, ins, asm, pattern, SSEPackedInt>, TAPD,
+ : Ii8<o, F, outs, ins, asm, pattern, SSEPackedInt>, TA, PD,
Requires<[UseSSSE3]>;
class MMXSS38I<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : I<o, F, outs, ins, asm, pattern, SSEPackedInt>, T8PS,
+ : I<o, F, outs, ins, asm, pattern, SSEPackedInt>, T8, PS,
Requires<[HasMMX, HasSSSE3]>;
class MMXSS3AI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : Ii8<o, F, outs, ins, asm, pattern, SSEPackedInt>, TAPS,
+ : Ii8<o, F, outs, ins, asm, pattern, SSEPackedInt>, TA, PS,
Requires<[HasMMX, HasSSSE3]>;
// SSE4.1 Instruction Templates:
@@ -731,11 +708,11 @@ class MMXSS3AI<bits<8> o, Format F, dag outs, dag ins, string asm,
//
class SS48I<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : I<o, F, outs, ins, asm, pattern, SSEPackedInt>, T8PD,
+ : I<o, F, outs, ins, asm, pattern, SSEPackedInt>, T8, PD,
Requires<[UseSSE41]>;
class SS4AIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : Ii8<o, F, outs, ins, asm, pattern, SSEPackedInt>, TAPD,
+ : Ii8<o, F, outs, ins, asm, pattern, SSEPackedInt>, TA, PD,
Requires<[UseSSE41]>;
// SSE4.2 Instruction Templates:
@@ -743,13 +720,13 @@ class SS4AIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
// SS428I - SSE 4.2 instructions with T8 prefix.
class SS428I<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : I<o, F, outs, ins, asm, pattern, SSEPackedInt>, T8PD,
+ : I<o, F, outs, ins, asm, pattern, SSEPackedInt>, T8, PD,
Requires<[UseSSE42]>;
// SS42AI = SSE 4.2 instructions with TA prefix
class SS42AI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : Ii8<o, F, outs, ins, asm, pattern, SSEPackedInt>, TAPD,
+ : Ii8<o, F, outs, ins, asm, pattern, SSEPackedInt>, TA, PD,
Requires<[UseSSE42]>;
// CRC32I - SSE 4.2 CRC32 instructions.
@@ -757,42 +734,42 @@ class SS42AI<bits<8> o, Format F, dag outs, dag ins, string asm,
// controlled by the SSE42 flag.
class CRC32I<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : I<o, F, outs, ins, asm, pattern>, T8XD, Requires<[HasCRC32]>;
+ : I<o, F, outs, ins, asm, pattern>, T8, XD, Requires<[HasCRC32]>;
// AVX Instruction Templates:
// Instructions introduced in AVX (no SSE equivalent forms)
//
-// AVX8I - AVX instructions with T8PD prefix.
-// AVXAIi8 - AVX instructions with TAPD prefix and ImmT = Imm8.
+// AVX8I - AVX instructions with T8, PD prefix.
+// AVXAIi8 - AVX instructions with TA, PD prefix and ImmT = Imm8.
class AVX8I<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : I<o, F, outs, ins, asm, pattern, SSEPackedInt>, T8PD,
+ : I<o, F, outs, ins, asm, pattern, SSEPackedInt>, T8, PD,
Requires<[HasAVX]>;
class AVXAIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : Ii8<o, F, outs, ins, asm, pattern, SSEPackedInt>, TAPD,
+ : Ii8<o, F, outs, ins, asm, pattern, SSEPackedInt>, TA, PD,
Requires<[HasAVX]>;
// AVX2 Instruction Templates:
// Instructions introduced in AVX2 (no SSE equivalent forms)
//
-// AVX28I - AVX2 instructions with T8PD prefix.
-// AVX2AIi8 - AVX2 instructions with TAPD prefix and ImmT = Imm8.
+// AVX28I - AVX2 instructions with T8, PD prefix.
+// AVX2AIi8 - AVX2 instructions with TA, PD prefix and ImmT = Imm8.
class AVX28I<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : I<o, F, outs, ins, asm, pattern, SSEPackedInt>, T8PD,
+ : I<o, F, outs, ins, asm, pattern, SSEPackedInt>, T8, PD,
Requires<[HasAVX2]>;
class AVX2AIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : Ii8<o, F, outs, ins, asm, pattern, SSEPackedInt>, TAPD,
+ : Ii8<o, F, outs, ins, asm, pattern, SSEPackedInt>, TA, PD,
Requires<[HasAVX2]>;
// AVX-512 Instruction Templates:
// Instructions introduced in AVX-512 (no SSE equivalent forms)
//
-// AVX5128I - AVX-512 instructions with T8PD prefix.
-// AVX512AIi8 - AVX-512 instructions with TAPD prefix and ImmT = Imm8.
+// AVX5128I - AVX-512 instructions with T8, PD prefix.
+// AVX512AIi8 - AVX-512 instructions with TA, PD prefix and ImmT = Imm8.
// AVX512PDI - AVX-512 instructions with PD, double packed.
// AVX512PSI - AVX-512 instructions with PS, single packed.
// AVX512XS8I - AVX-512 instructions with T8 and XS prefixes.
@@ -802,39 +779,39 @@ class AVX2AIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
class AVX5128I<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : I<o, F, outs, ins, asm, pattern, SSEPackedInt>, T8PD,
+ : I<o, F, outs, ins, asm, pattern, SSEPackedInt>, T8, PD,
Requires<[HasAVX512]>;
-class AVX5128IBase : T8PD {
+class AVX5128IBase : T8, PD {
Domain ExeDomain = SSEPackedInt;
}
class AVX512XS8I<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : I<o, F, outs, ins, asm, pattern, SSEPackedInt>, T8XS,
+ : I<o, F, outs, ins, asm, pattern, SSEPackedInt>, T8, XS,
Requires<[HasAVX512]>;
class AVX512XSI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : I<o, F, outs, ins, asm, pattern>, XS,
+ : I<o, F, outs, ins, asm, pattern>, TB, XS,
Requires<[HasAVX512]>;
class AVX512XDI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : I<o, F, outs, ins, asm, pattern, SSEPackedInt>, XD,
+ : I<o, F, outs, ins, asm, pattern, SSEPackedInt>, TB, XD,
Requires<[HasAVX512]>;
class AVX512BI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : I<o, F, outs, ins, asm, pattern, SSEPackedInt>, PD,
+ : I<o, F, outs, ins, asm, pattern, SSEPackedInt>, TB, PD,
Requires<[HasAVX512]>;
-class AVX512BIBase : PD {
+class AVX512BIBase : TB, PD {
Domain ExeDomain = SSEPackedInt;
}
class AVX512BIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : Ii8<o, F, outs, ins, asm, pattern, SSEPackedInt>, PD,
+ : Ii8<o, F, outs, ins, asm, pattern, SSEPackedInt>, TB, PD,
Requires<[HasAVX512]>;
class AVX512AIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : Ii8<o, F, outs, ins, asm, pattern, SSEPackedInt>, TAPD,
+ : Ii8<o, F, outs, ins, asm, pattern, SSEPackedInt>, TA, PD,
Requires<[HasAVX512]>;
-class AVX512AIi8Base : TAPD {
+class AVX512AIi8Base : TA, PD {
ImmType ImmT = Imm8;
}
class AVX512Ii8<bits<8> o, Format F, dag outs, dag ins, string asm,
@@ -843,11 +820,11 @@ class AVX512Ii8<bits<8> o, Format F, dag outs, dag ins, string asm,
Requires<[HasAVX512]>;
class AVX512PDI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : I<o, F, outs, ins, asm, pattern, SSEPackedDouble>, PD,
+ : I<o, F, outs, ins, asm, pattern, SSEPackedDouble>, TB, PD,
Requires<[HasAVX512]>;
class AVX512PSI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : I<o, F, outs, ins, asm, pattern, SSEPackedSingle>, PS,
+ : I<o, F, outs, ins, asm, pattern, SSEPackedSingle>, TB, PS,
Requires<[HasAVX512]>;
class AVX512PIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, Domain d>
@@ -857,7 +834,7 @@ class AVX512PI<bits<8> o, Format F, dag outs, dag ins, string asm,
: I<o, F, outs, ins, asm, pattern, d>, Requires<[HasAVX512]>;
class AVX512FMA3S<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag>pattern>
- : I<o, F, outs, ins, asm, pattern>, T8PD,
+ : I<o, F, outs, ins, asm, pattern>, T8, PD,
EVEX, VVVV, Requires<[HasAVX512]>;
class AVX512<bits<8> o, Format F, dag outs, dag ins, string asm,
@@ -870,45 +847,45 @@ class AVX512<bits<8> o, Format F, dag outs, dag ins, string asm,
// These use the same encoding as the SSE4.2 T8 and TA encodings.
class AES8I<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag>pattern>
- : I<o, F, outs, ins, asm, pattern, SSEPackedInt>, T8PD,
+ : I<o, F, outs, ins, asm, pattern, SSEPackedInt>, T8, PD,
Requires<[NoAVX, HasAES]>;
class AESAI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : Ii8<o, F, outs, ins, asm, pattern, SSEPackedInt>, TAPD,
+ : Ii8<o, F, outs, ins, asm, pattern, SSEPackedInt>, TA, PD,
Requires<[NoAVX, HasAES]>;
// PCLMUL Instruction Templates
class PCLMULIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag>pattern>
- : Ii8<o, F, outs, ins, asm, pattern, SSEPackedInt>, TAPD;
+ : Ii8<o, F, outs, ins, asm, pattern, SSEPackedInt>, TA, PD;
// FMA3 Instruction Templates
class FMA3<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag>pattern>
- : I<o, F, outs, ins, asm, pattern>, T8PD,
+ : I<o, F, outs, ins, asm, pattern>, T8, PD,
VEX, VVVV, FMASC, Requires<[HasFMA, NoFMA4, NoVLX]>;
class FMA3S<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag>pattern>
- : I<o, F, outs, ins, asm, pattern>, T8PD,
+ : I<o, F, outs, ins, asm, pattern>, T8, PD,
VEX, VVVV, FMASC, Requires<[HasFMA, NoFMA4, NoAVX512]>;
class FMA3S_Int<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag>pattern>
- : I<o, F, outs, ins, asm, pattern>, T8PD,
+ : I<o, F, outs, ins, asm, pattern>, T8, PD,
VEX, VVVV, FMASC, Requires<[HasFMA, NoAVX512]>;
// FMA4 Instruction Templates
class FMA4<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag>pattern>
- : Ii8Reg<o, F, outs, ins, asm, pattern>, TAPD,
+ : Ii8Reg<o, F, outs, ins, asm, pattern>, TA, PD,
VEX, VVVV, FMASC, Requires<[HasFMA4, NoVLX]>;
class FMA4S<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag>pattern>
- : Ii8Reg<o, F, outs, ins, asm, pattern>, TAPD,
+ : Ii8Reg<o, F, outs, ins, asm, pattern>, TA, PD,
VEX, VVVV, FMASC, Requires<[HasFMA4, NoAVX512]>;
class FMA4S_Int<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag>pattern>
- : Ii8Reg<o, F, outs, ins, asm, pattern>, TAPD,
+ : Ii8Reg<o, F, outs, ins, asm, pattern>, TA, PD,
VEX, VVVV, FMASC, Requires<[HasFMA4]>;
// XOP 2, 3 and 4 Operand Instruction Template
@@ -931,7 +908,7 @@ class IXOPi8Reg<bits<8> o, Format F, dag outs, dag ins, string asm,
// XOP 5 operand instruction (VEX encoding!)
class IXOP5<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag>pattern>
- : Ii8Reg<o, F, outs, ins, asm, pattern, SSEPackedInt>, TAPD,
+ : Ii8Reg<o, F, outs, ins, asm, pattern, SSEPackedInt>, TA, PD,
VEX, VVVV, Requires<[HasXOP]>;
// X86-64 Instruction templates...
@@ -970,14 +947,14 @@ class VRS2I<bits<8> o, Format F, dag outs, dag ins, string asm,
// MMXIi8 - MMX instructions with ImmT == Imm8 and PS prefix.
class MMXI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : I<o, F, outs, ins, asm, pattern>, PS, Requires<[HasMMX]>;
+ : I<o, F, outs, ins, asm, pattern>, TB, PS, Requires<[HasMMX]>;
class MMXRI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : I<o, F, outs, ins, asm, pattern>, PS, REX_W,
+ : I<o, F, outs, ins, asm, pattern>, TB, PS, REX_W,
Requires<[HasMMX,In64BitMode]>;
class MMXIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : Ii8<o, F, outs, ins, asm, pattern>, PS, Requires<[HasMMX]>;
+ : Ii8<o, F, outs, ins, asm, pattern>, TB, PS, Requires<[HasMMX]>;
/// ITy - This instruction base class takes the type info for the instruction.
/// Using this, it:
diff --git a/llvm/lib/Target/X86/X86InstrVMX.td b/llvm/lib/Target/X86/X86InstrVMX.td
index c3fba9c5728ca9..f2fc0dbaa3703a 100644
--- a/llvm/lib/Target/X86/X86InstrVMX.td
+++ b/llvm/lib/Target/X86/X86InstrVMX.td
@@ -17,33 +17,33 @@
let SchedRW = [WriteSystem] in {
// 66 0F 38 80
def INVEPT32 : I<0x80, MRMSrcMem, (outs), (ins GR32:$src1, i128mem:$src2),
- "invept\t{$src2, $src1|$src1, $src2}", []>, T8PD,
+ "invept\t{$src2, $src1|$src1, $src2}", []>, T8, PD,
Requires<[Not64BitMode]>;
def INVEPT64 : I<0x80, MRMSrcMem, (outs), (ins GR64:$src1, i128mem:$src2),
- "invept\t{$src2, $src1|$src1, $src2}", []>, T8PD,
+ "invept\t{$src2, $src1|$src1, $src2}", []>, T8, PD,
Requires<[In64BitMode]>;
def INVEPT64_EVEX : I<0xF0, MRMSrcMem, (outs), (ins GR64:$src1, i128mem:$src2),
"invept\t{$src2, $src1|$src1, $src2}", []>,
- EVEX, NoCD8, T_MAP4XS, Requires<[In64BitMode]>;
+ EVEX, NoCD8, T_MAP4, XS, Requires<[In64BitMode]>;
// 66 0F 38 81
def INVVPID32 : I<0x81, MRMSrcMem, (outs), (ins GR32:$src1, i128mem:$src2),
- "invvpid\t{$src2, $src1|$src1, $src2}", []>, T8PD,
+ "invvpid\t{$src2, $src1|$src1, $src2}", []>, T8, PD,
Requires<[Not64BitMode]>;
def INVVPID64 : I<0x81, MRMSrcMem, (outs), (ins GR64:$src1, i128mem:$src2),
- "invvpid\t{$src2, $src1|$src1, $src2}", []>, T8PD,
+ "invvpid\t{$src2, $src1|$src1, $src2}", []>, T8, PD,
Requires<[In64BitMode]>;
def INVVPID64_EVEX : I<0xF1, MRMSrcMem, (outs), (ins GR64:$src1, i128mem:$src2),
"invvpid\t{$src2, $src1|$src1, $src2}", []>,
- EVEX, NoCD8, T_MAP4XS, Requires<[In64BitMode]>;
+ EVEX, NoCD8, T_MAP4, XS, Requires<[In64BitMode]>;
// 0F 01 C1
def VMCALL : I<0x01, MRM_C1, (outs), (ins), "vmcall", []>, TB;
def VMCLEARm : I<0xC7, MRM6m, (outs), (ins i64mem:$vmcs),
- "vmclear\t$vmcs", []>, PD;
+ "vmclear\t$vmcs", []>, TB, PD;
// OF 01 D4
-def VMFUNC : I<0x01, MRM_D4, (outs), (ins), "vmfunc", []>, PS;
+def VMFUNC : I<0x01, MRM_D4, (outs), (ins), "vmfunc", []>, TB, PS;
// 0F 01 C2
def VMLAUNCH : I<0x01, MRM_C2, (outs), (ins), "vmlaunch", []>, TB;
@@ -51,35 +51,35 @@ def VMLAUNCH : I<0x01, MRM_C2, (outs), (ins), "vmlaunch", []>, TB;
// 0F 01 C3
def VMRESUME : I<0x01, MRM_C3, (outs), (ins), "vmresume", []>, TB;
def VMPTRLDm : I<0xC7, MRM6m, (outs), (ins i64mem:$vmcs),
- "vmptrld\t$vmcs", []>, PS;
+ "vmptrld\t$vmcs", []>, TB, PS;
def VMPTRSTm : I<0xC7, MRM7m, (outs), (ins i64mem:$vmcs),
- "vmptrst\t$vmcs", []>, PS;
+ "vmptrst\t$vmcs", []>, TB, PS;
def VMREAD64rr : I<0x78, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
- "vmread{q}\t{$src, $dst|$dst, $src}", []>, PS, Requires<[In64BitMode]>;
+ "vmread{q}\t{$src, $dst|$dst, $src}", []>, TB, PS, Requires<[In64BitMode]>;
def VMREAD32rr : I<0x78, MRMDestReg, (outs GR32:$dst), (ins GR32:$src),
- "vmread{l}\t{$src, $dst|$dst, $src}", []>, PS, Requires<[Not64BitMode]>;
+ "vmread{l}\t{$src, $dst|$dst, $src}", []>, TB, PS, Requires<[Not64BitMode]>;
let mayStore = 1 in {
def VMREAD64mr : I<0x78, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
- "vmread{q}\t{$src, $dst|$dst, $src}", []>, PS, Requires<[In64BitMode]>;
+ "vmread{q}\t{$src, $dst|$dst, $src}", []>, TB, PS, Requires<[In64BitMode]>;
def VMREAD32mr : I<0x78, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
- "vmread{l}\t{$src, $dst|$dst, $src}", []>, PS, Requires<[Not64BitMode]>;
+ "vmread{l}\t{$src, $dst|$dst, $src}", []>, TB, PS, Requires<[Not64BitMode]>;
} // mayStore
def VMWRITE64rr : I<0x79, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
- "vmwrite{q}\t{$src, $dst|$dst, $src}", []>, PS, Requires<[In64BitMode]>;
+ "vmwrite{q}\t{$src, $dst|$dst, $src}", []>, TB, PS, Requires<[In64BitMode]>;
def VMWRITE32rr : I<0x79, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
- "vmwrite{l}\t{$src, $dst|$dst, $src}", []>, PS, Requires<[Not64BitMode]>;
+ "vmwrite{l}\t{$src, $dst|$dst, $src}", []>, TB, PS, Requires<[Not64BitMode]>;
let mayLoad = 1 in {
def VMWRITE64rm : I<0x79, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
- "vmwrite{q}\t{$src, $dst|$dst, $src}", []>, PS, Requires<[In64BitMode]>;
+ "vmwrite{q}\t{$src, $dst|$dst, $src}", []>, TB, PS, Requires<[In64BitMode]>;
def VMWRITE32rm : I<0x79, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
- "vmwrite{l}\t{$src, $dst|$dst, $src}", []>, PS, Requires<[Not64BitMode]>;
+ "vmwrite{l}\t{$src, $dst|$dst, $src}", []>, TB, PS, Requires<[Not64BitMode]>;
} // mayLoad
// 0F 01 C4
def VMXOFF : I<0x01, MRM_C4, (outs), (ins), "vmxoff", []>, TB;
def VMXON : I<0xC7, MRM6m, (outs), (ins i64mem:$vmxon),
- "vmxon\t$vmxon", []>, XS;
+ "vmxon\t$vmxon", []>, TB, XS;
} // SchedRW
More information about the llvm-commits
mailing list