[llvm] 6e20df1 - [X86][NFC] Set default OpPrefix to PS for XOP/VEX/EVEX instructions
Shengchen Kan via llvm-commits
llvm-commits at lists.llvm.org
Sat Dec 23 18:21:00 PST 2023
Author: Shengchen Kan
Date: 2023-12-24T10:20:40+08:00
New Revision: 6e20df1a3b0f7654b2821fe182c7ae9bd52672e6
URL: https://github.com/llvm/llvm-project/commit/6e20df1a3b0f7654b2821fe182c7ae9bd52672e6
DIFF: https://github.com/llvm/llvm-project/commit/6e20df1a3b0f7654b2821fe182c7ae9bd52672e6.diff
LOG: [X86][NFC] Set default OpPrefix to PS for XOP/VEX/EVEX instructions
It helps simplify the class definitions. Now, the only explicit usage of PS is
to check prefix 0x66/0xf2/0xf3 can not be used a prefix, e.g. wbinvd.
See 82974e0114f02ffc07557e217d87f8dc4e100a26 for more details.
Added:
Modified:
llvm/lib/Target/X86/X86InstrAMX.td
llvm/lib/Target/X86/X86InstrAVX512.td
llvm/lib/Target/X86/X86InstrArithmetic.td
llvm/lib/Target/X86/X86InstrFPStack.td
llvm/lib/Target/X86/X86InstrFormats.td
llvm/lib/Target/X86/X86InstrMMX.td
llvm/lib/Target/X86/X86InstrMisc.td
llvm/lib/Target/X86/X86InstrRAOINT.td
llvm/lib/Target/X86/X86InstrSGX.td
llvm/lib/Target/X86/X86InstrSSE.td
llvm/lib/Target/X86/X86InstrSystem.td
llvm/lib/Target/X86/X86InstrTSX.td
llvm/lib/Target/X86/X86InstrUtils.td
llvm/lib/Target/X86/X86InstrVMX.td
Removed:
################################################################################
diff --git a/llvm/lib/Target/X86/X86InstrAMX.td b/llvm/lib/Target/X86/X86InstrAMX.td
index a4292b99511bbd..7f3e193d9a1b9c 100644
--- a/llvm/lib/Target/X86/X86InstrAMX.td
+++ b/llvm/lib/Target/X86/X86InstrAMX.td
@@ -20,7 +20,7 @@ let Predicates = [HasAMXTILE, In64BitMode] in {
Defs = [TMM0,TMM1,TMM2,TMM3,TMM4,TMM5,TMM6,TMM7] in
def LDTILECFG : I <0x49, MRM0m, (outs), (ins opaquemem:$src),
"ldtilecfg\t$src",
- [(int_x86_ldtilecfg addr:$src)]>, VEX, T8, PS;
+ [(int_x86_ldtilecfg addr:$src)]>, VEX, T8;
let hasSideEffects = 1 in
def STTILECFG : I <0x49, MRM0m, (outs), (ins opaquemem:$src),
"sttilecfg\t$src",
@@ -37,7 +37,7 @@ let Predicates = [HasAMXTILE, In64BitMode] in {
VEX, T8, PD;
let Defs = [TMM0,TMM1,TMM2,TMM3,TMM4,TMM5,TMM6,TMM7] in
def TILERELEASE : I<0x49, MRM_C0, (outs), (ins),
- "tilerelease", [(int_x86_tilerelease)]>, VEX, T8, PS;
+ "tilerelease", [(int_x86_tilerelease)]>, VEX, T8;
let mayStore = 1 in
def TILESTORED : I<0x4b, MRMDestMemFSIB, (outs),
(ins sibmem:$dst, TILE:$src),
@@ -103,7 +103,7 @@ let Predicates = [HasAMXINT8, In64BitMode] in {
def TDPBUUD : I<0x5e, MRMSrcReg4VOp3, (outs TILE:$dst),
(ins TILE:$src1, TILE:$src2, TILE:$src3),
"tdpbuud\t{$src3, $src2, $dst|$dst, $src2, $src3}", []>,
- VEX, VVVV, T8, PS;
+ VEX, VVVV, T8;
}
// Pseduo instruction for RA.
@@ -226,7 +226,7 @@ let Predicates = [HasAMXCOMPLEX, In64BitMode] in {
def TCMMRLFP16PS : I<0x6c, MRMSrcReg4VOp3, (outs TILE:$dst),
(ins TILE:$src1, TILE:$src2, TILE:$src3),
"tcmmrlfp16ps\t{$src3, $src2, $src1|$src1, $src2, $src3}",
- []>, VEX, VVVV, WIG, T8, PS;
+ []>, VEX, VVVV, WIG, T8;
} // Constraints = "$src1 = $dst"
diff --git a/llvm/lib/Target/X86/X86InstrAVX512.td b/llvm/lib/Target/X86/X86InstrAVX512.td
index e3a4aee3aceb77..7c3c1d5fe42b3c 100644
--- a/llvm/lib/Target/X86/X86InstrAVX512.td
+++ b/llvm/lib/Target/X86/X86InstrAVX512.td
@@ -2634,11 +2634,11 @@ let Predicates = [HasDQI, HasEGPR, In64BitMode] in
let Predicates = [HasAVX512, NoEGPR] in
defm KMOVW : avx512_mask_mov<0x90, 0x90, 0x91, "kmovw", VK16, v16i1, i16mem>,
avx512_mask_mov_gpr<0x92, 0x93, "kmovw", VK16, GR32>,
- VEX, TB, PS;
+ VEX, TB;
let Predicates = [HasAVX512, HasEGPR, In64BitMode] in
defm KMOVW : avx512_mask_mov<0x90, 0x90, 0x91, "kmovw", VK16, v16i1, i16mem, "_EVEX">,
avx512_mask_mov_gpr<0x92, 0x93, "kmovw", VK16, GR32, "_EVEX">,
- EVEX, TB, PS;
+ EVEX, TB;
let Predicates = [HasBWI, NoEGPR] in {
defm KMOVD : avx512_mask_mov<0x90, 0x90, 0x91, "kmovd", VK32, v32i1,i32mem>,
@@ -2646,7 +2646,7 @@ let Predicates = [HasBWI, NoEGPR] in {
defm KMOVD : avx512_mask_mov_gpr<0x92, 0x93, "kmovd", VK32, GR32>,
VEX, TB, XD;
defm KMOVQ : avx512_mask_mov<0x90, 0x90, 0x91, "kmovq", VK64, v64i1, i64mem>,
- VEX, TB, PS, REX_W;
+ VEX, TB, REX_W;
defm KMOVQ : avx512_mask_mov_gpr<0x92, 0x93, "kmovq", VK64, GR64>,
VEX, TB, XD, REX_W;
}
@@ -2656,7 +2656,7 @@ let Predicates = [HasBWI, HasEGPR, In64BitMode] in {
defm KMOVD : avx512_mask_mov_gpr<0x92, 0x93, "kmovd", VK32, GR32, "_EVEX">,
EVEX, TB, XD;
defm KMOVQ : avx512_mask_mov<0x90, 0x90, 0x91, "kmovq", VK64, v64i1, i64mem, "_EVEX">,
- EVEX, TB, PS, REX_W;
+ EVEX, TB, REX_W;
defm KMOVQ : avx512_mask_mov_gpr<0x92, 0x93, "kmovq", VK64, GR64, "_EVEX">,
EVEX, TB, XD, REX_W;
}
@@ -2771,11 +2771,11 @@ multiclass avx512_mask_unop_all<bits<8> opc, string OpcodeStr,
defm B : avx512_mask_unop<opc, !strconcat(OpcodeStr, "b"), VK8, OpNode,
sched, HasDQI>, VEX, TB, PD;
defm W : avx512_mask_unop<opc, !strconcat(OpcodeStr, "w"), VK16, OpNode,
- sched, HasAVX512>, VEX, TB, PS;
+ sched, HasAVX512>, VEX, TB;
defm D : avx512_mask_unop<opc, !strconcat(OpcodeStr, "d"), VK32, OpNode,
sched, HasBWI>, VEX, TB, PD, REX_W;
defm Q : avx512_mask_unop<opc, !strconcat(OpcodeStr, "q"), VK64, OpNode,
- sched, HasBWI>, VEX, TB, PS, REX_W;
+ sched, HasBWI>, VEX, TB, REX_W;
}
// TODO - do we need a X86SchedWriteWidths::KMASK type?
@@ -2814,11 +2814,11 @@ multiclass avx512_mask_binop_all<bits<8> opc, string OpcodeStr,
defm B : avx512_mask_binop<opc, !strconcat(OpcodeStr, "b"), VK8, OpNode,
sched, HasDQI, IsCommutable>, VEX, VVVV, VEX_L, TB, PD;
defm W : avx512_mask_binop<opc, !strconcat(OpcodeStr, "w"), VK16, OpNode,
- sched, prdW, IsCommutable>, VEX, VVVV, VEX_L, TB, PS;
+ sched, prdW, IsCommutable>, VEX, VVVV, VEX_L, TB;
defm D : avx512_mask_binop<opc, !strconcat(OpcodeStr, "d"), VK32, OpNode,
sched, HasBWI, IsCommutable>, VEX, VVVV, VEX_L, REX_W, TB, PD;
defm Q : avx512_mask_binop<opc, !strconcat(OpcodeStr, "q"), VK64, OpNode,
- sched, HasBWI, IsCommutable>, VEX, VVVV, VEX_L, REX_W, TB, PS;
+ sched, HasBWI, IsCommutable>, VEX, VVVV, VEX_L, REX_W, TB;
}
// TODO - do we need a X86SchedWriteWidths::KMASK type?
@@ -2877,8 +2877,8 @@ multiclass avx512_mask_unpck<string Suffix, X86KVectorVTInfo Dst,
}
defm KUNPCKBW : avx512_mask_unpck<"bw", v16i1_info, v8i1_info, WriteShuffle, HasAVX512>, TB, PD;
-defm KUNPCKWD : avx512_mask_unpck<"wd", v32i1_info, v16i1_info, WriteShuffle, HasBWI>, TB, PS;
-defm KUNPCKDQ : avx512_mask_unpck<"dq", v64i1_info, v32i1_info, WriteShuffle, HasBWI>, TB, PS, REX_W;
+defm KUNPCKWD : avx512_mask_unpck<"wd", v32i1_info, v16i1_info, WriteShuffle, HasBWI>, TB;
+defm KUNPCKDQ : avx512_mask_unpck<"dq", v64i1_info, v32i1_info, WriteShuffle, HasBWI>, TB, REX_W;
// Mask bit testing
multiclass avx512_mask_testop<bits<8> opc, string OpcodeStr, RegisterClass KRC,
@@ -2897,9 +2897,9 @@ multiclass avx512_mask_testop_w<bits<8> opc, string OpcodeStr, SDNode OpNode,
defm B : avx512_mask_testop<opc, OpcodeStr#"b", VK8, OpNode, sched, HasDQI>,
VEX, TB, PD;
defm W : avx512_mask_testop<opc, OpcodeStr#"w", VK16, OpNode, sched, prdW>,
- VEX, TB, PS;
+ VEX, TB;
defm Q : avx512_mask_testop<opc, OpcodeStr#"q", VK64, OpNode, sched, HasBWI>,
- VEX, TB, PS, REX_W;
+ VEX, TB, REX_W;
defm D : avx512_mask_testop<opc, OpcodeStr#"d", VK32, OpNode, sched, HasBWI>,
VEX, TB, PD, REX_W;
}
@@ -3371,7 +3371,7 @@ defm VMOVAPS : avx512_alignedload_vl<0x28, "vmovaps", avx512vl_f32_info,
HasAVX512, SchedWriteFMoveLS, "VMOVAPS">,
avx512_alignedstore_vl<0x29, "vmovaps", avx512vl_f32_info,
HasAVX512, SchedWriteFMoveLS, "VMOVAPS">,
- TB, PS, EVEX_CD8<32, CD8VF>;
+ TB, EVEX_CD8<32, CD8VF>;
defm VMOVAPD : avx512_alignedload_vl<0x28, "vmovapd", avx512vl_f64_info,
HasAVX512, SchedWriteFMoveLS, "VMOVAPD">,
@@ -3383,7 +3383,7 @@ defm VMOVUPS : avx512_load_vl<0x10, "vmovups", avx512vl_f32_info, HasAVX512,
SchedWriteFMoveLS, "VMOVUPS", 0, null_frag>,
avx512_store_vl<0x11, "vmovups", avx512vl_f32_info, HasAVX512,
SchedWriteFMoveLS, "VMOVUPS">,
- TB, PS, EVEX_CD8<32, CD8VF>;
+ TB, EVEX_CD8<32, CD8VF>;
defm VMOVUPD : avx512_load_vl<0x10, "vmovupd", avx512vl_f64_info, HasAVX512,
SchedWriteFMoveLS, "VMOVUPD", 0, null_frag>,
@@ -4589,7 +4589,7 @@ defm VMOVNTDQ : avx512_movnt_vl<0xE7, "vmovntdq", avx512vl_i64_info,
defm VMOVNTPD : avx512_movnt_vl<0x2B, "vmovntpd", avx512vl_f64_info,
SchedWriteFMoveLSNT>, TB, PD, REX_W;
defm VMOVNTPS : avx512_movnt_vl<0x2B, "vmovntps", avx512vl_f32_info,
- SchedWriteFMoveLSNT>, TB, PS;
+ SchedWriteFMoveLSNT>, TB;
let Predicates = [HasAVX512], AddedComplexity = 400 in {
def : Pat<(alignednontemporalstore (v16i32 VR512:$src), addr:$dst),
@@ -5607,7 +5607,7 @@ multiclass avx512_fp_binop_p<bits<8> opc, string OpcodeStr, SDPatternOperator Op
bit IsPD128Commutable = IsCommutable> {
let Predicates = [prd] in {
defm PSZ : avx512_fp_packed<opc, OpcodeStr, OpNode, MaskOpNode, v16f32_info,
- sched.PS.ZMM, IsCommutable>, EVEX_V512, TB, PS,
+ sched.PS.ZMM, IsCommutable>, EVEX_V512, TB,
EVEX_CD8<32, CD8VF>;
defm PDZ : avx512_fp_packed<opc, OpcodeStr, OpNode, MaskOpNode, v8f64_info,
sched.PD.ZMM, IsCommutable>, EVEX_V512, TB, PD, REX_W,
@@ -5617,10 +5617,10 @@ multiclass avx512_fp_binop_p<bits<8> opc, string OpcodeStr, SDPatternOperator Op
// Define only if AVX512VL feature is present.
let Predicates = [prd, HasVLX] in {
defm PSZ128 : avx512_fp_packed<opc, OpcodeStr, OpNode, MaskOpNode, v4f32x_info,
- sched.PS.XMM, IsCommutable>, EVEX_V128, TB, PS,
+ sched.PS.XMM, IsCommutable>, EVEX_V128, TB,
EVEX_CD8<32, CD8VF>;
defm PSZ256 : avx512_fp_packed<opc, OpcodeStr, OpNode, MaskOpNode, v8f32x_info,
- sched.PS.YMM, IsCommutable>, EVEX_V256, TB, PS,
+ sched.PS.YMM, IsCommutable>, EVEX_V256, TB,
EVEX_CD8<32, CD8VF>;
defm PDZ128 : avx512_fp_packed<opc, OpcodeStr, OpNode, MaskOpNode, v2f64x_info,
sched.PD.XMM, IsPD128Commutable,
@@ -5637,15 +5637,15 @@ multiclass avx512_fp_binop_ph<bits<8> opc, string OpcodeStr, SDPatternOperator O
X86SchedWriteSizes sched, bit IsCommutable = 0> {
let Predicates = [HasFP16] in {
defm PHZ : avx512_fp_packed<opc, OpcodeStr, OpNode, MaskOpNode, v32f16_info,
- sched.PH.ZMM, IsCommutable>, EVEX_V512, T_MAP5, PS,
+ sched.PH.ZMM, IsCommutable>, EVEX_V512, T_MAP5,
EVEX_CD8<16, CD8VF>;
}
let Predicates = [HasVLX, HasFP16] in {
defm PHZ128 : avx512_fp_packed<opc, OpcodeStr, OpNode, MaskOpNode, v8f16x_info,
- sched.PH.XMM, IsCommutable>, EVEX_V128, T_MAP5, PS,
+ sched.PH.XMM, IsCommutable>, EVEX_V128, T_MAP5,
EVEX_CD8<16, CD8VF>;
defm PHZ256 : avx512_fp_packed<opc, OpcodeStr, OpNode, MaskOpNode, v16f16x_info,
- sched.PH.YMM, IsCommutable>, EVEX_V256, T_MAP5, PS,
+ sched.PH.YMM, IsCommutable>, EVEX_V256, T_MAP5,
EVEX_CD8<16, CD8VF>;
}
}
@@ -5656,11 +5656,11 @@ multiclass avx512_fp_binop_p_round<bits<8> opc, string OpcodeStr, SDNode OpNodeR
let Predicates = [HasFP16] in {
defm PHZ : avx512_fp_round_packed<opc, OpcodeStr, OpNodeRnd, sched.PH.ZMM,
v32f16_info>,
- EVEX_V512, T_MAP5, PS, EVEX_CD8<16, CD8VF>;
+ EVEX_V512, T_MAP5, EVEX_CD8<16, CD8VF>;
}
defm PSZ : avx512_fp_round_packed<opc, OpcodeStr, OpNodeRnd, sched.PS.ZMM,
v16f32_info>,
- EVEX_V512, TB, PS, EVEX_CD8<32, CD8VF>;
+ EVEX_V512, TB, EVEX_CD8<32, CD8VF>;
defm PDZ : avx512_fp_round_packed<opc, OpcodeStr, OpNodeRnd, sched.PD.ZMM,
v8f64_info>,
EVEX_V512, TB, PD, REX_W,EVEX_CD8<64, CD8VF>;
@@ -5672,11 +5672,11 @@ multiclass avx512_fp_binop_p_sae<bits<8> opc, string OpcodeStr, SDNode OpNodeRnd
let Predicates = [HasFP16] in {
defm PHZ : avx512_fp_sae_packed<opc, OpcodeStr, OpNodeRnd, sched.PH.ZMM,
v32f16_info>,
- EVEX_V512, T_MAP5, PS, EVEX_CD8<16, CD8VF>;
+ EVEX_V512, T_MAP5, EVEX_CD8<16, CD8VF>;
}
defm PSZ : avx512_fp_sae_packed<opc, OpcodeStr, OpNodeRnd, sched.PS.ZMM,
v16f32_info>,
- EVEX_V512, TB, PS, EVEX_CD8<32, CD8VF>;
+ EVEX_V512, TB, EVEX_CD8<32, CD8VF>;
defm PDZ : avx512_fp_sae_packed<opc, OpcodeStr, OpNodeRnd, sched.PD.ZMM,
v8f64_info>,
EVEX_V512, TB, PD, REX_W,EVEX_CD8<64, CD8VF>;
@@ -6500,11 +6500,11 @@ multiclass avx512_mov_hilo_packed<bits<8> opc, string OpcodeStr,
// No patterns for MOVLPS/MOVHPS as the Movlhps node should only be created in
// SSE1. And MOVLPS pattern is even more complex.
defm VMOVHPSZ128 : avx512_mov_hilo_packed<0x16, "vmovhps", null_frag,
- v4f32x_info>, EVEX_CD8<32, CD8VT2>, TB, PS;
+ v4f32x_info>, EVEX_CD8<32, CD8VT2>, TB;
defm VMOVHPDZ128 : avx512_mov_hilo_packed<0x16, "vmovhpd", X86Unpckl,
v2f64x_info>, EVEX_CD8<64, CD8VT1>, TB, PD, REX_W;
defm VMOVLPSZ128 : avx512_mov_hilo_packed<0x12, "vmovlps", null_frag,
- v4f32x_info>, EVEX_CD8<32, CD8VT2>, TB, PS;
+ v4f32x_info>, EVEX_CD8<32, CD8VT2>, TB;
defm VMOVLPDZ128 : avx512_mov_hilo_packed<0x12, "vmovlpd", X86Movsd,
v2f64x_info>, EVEX_CD8<64, CD8VT1>, TB, PD, REX_W;
@@ -7731,10 +7731,10 @@ defm VCVTSH2SD : avx512_cvt_fp_scalar_extend<0x5A, "vcvtsh2sd", X86fpexts,
f64x_info, HasFP16>, T_MAP5, XS;
defm VCVTSS2SH : avx512_cvt_fp_scalar_trunc<0x1D, "vcvtss2sh", X86frounds,
X86froundsRnd, WriteCvtSD2SS, f32x_info,
- f16x_info, HasFP16>, T_MAP5, PS;
+ f16x_info, HasFP16>, T_MAP5;
defm VCVTSH2SS : avx512_cvt_fp_scalar_extend<0x13, "vcvtsh2ss", X86fpexts,
X86fpextsSAE, WriteCvtSS2SD, f16x_info,
- f32x_info, HasFP16>, T_MAP6, PS;
+ f32x_info, HasFP16>, T_MAP6;
def : Pat<(f64 (any_fpextend FR32X:$src)),
(VCVTSS2SDZrr (f64 (IMPLICIT_DEF)), FR32X:$src)>,
@@ -7999,7 +7999,7 @@ defm VCVTPD2PS : avx512_cvt_trunc<0x5A, "vcvtpd2ps",
REX_W, TB, PD, EVEX_CD8<64, CD8VF>;
defm VCVTPS2PD : avx512_cvt_extend<0x5A, "vcvtps2pd",
avx512vl_f64_info, avx512vl_f32_info, SchedWriteCvtPS2PD>,
- TB, PS, EVEX_CD8<32, CD8VH>;
+ TB, EVEX_CD8<32, CD8VH>;
// Extend Half to Double
multiclass avx512_cvtph2pd<bits<8> opc, string OpcodeStr,
@@ -8115,7 +8115,7 @@ defm VCVTPH2PSX : avx512_cvt_extend<0x13, "vcvtph2psx", avx512vl_f32_info,
defm VCVTPD2PH : avx512_cvtpd2ph<0x5A, "vcvtpd2ph", SchedWriteCvtPD2PS>,
REX_W, T_MAP5, PD, EVEX_CD8<64, CD8VF>;
defm VCVTPH2PD : avx512_cvtph2pd<0x5A, "vcvtph2pd", SchedWriteCvtPS2PD>,
- T_MAP5, PS, EVEX_CD8<16, CD8VQ>;
+ T_MAP5, EVEX_CD8<16, CD8VQ>;
let Predicates = [HasFP16, HasVLX] in {
// Special patterns to allow use of X86vmfpround for masking. Instruction
@@ -8600,7 +8600,7 @@ defm VCVTDQ2PD : avx512_cvtdq2pd<0xE6, "vcvtdq2pd", any_sint_to_fp, sint_to_fp,
defm VCVTDQ2PS : avx512_cvtdq2ps<0x5B, "vcvtdq2ps", any_sint_to_fp, sint_to_fp,
X86VSintToFpRnd, SchedWriteCvtDQ2PS>,
- TB, PS, EVEX_CD8<32, CD8VF>;
+ TB, EVEX_CD8<32, CD8VF>;
defm VCVTTPS2DQ : avx512_cvttps2dq<0x5B, "vcvttps2dq", X86any_cvttp2si,
X86cvttp2si, X86cvttp2siSAE,
@@ -8613,12 +8613,12 @@ defm VCVTTPD2DQ : avx512_cvttpd2dq<0xE6, "vcvttpd2dq", X86any_cvttp2si,
defm VCVTTPS2UDQ : avx512_cvttps2dq<0x78, "vcvttps2udq", X86any_cvttp2ui,
X86cvttp2ui, X86cvttp2uiSAE,
- SchedWriteCvtPS2DQ>, TB, PS, EVEX_CD8<32, CD8VF>;
+ SchedWriteCvtPS2DQ>, TB, EVEX_CD8<32, CD8VF>;
defm VCVTTPD2UDQ : avx512_cvttpd2dq<0x78, "vcvttpd2udq", X86any_cvttp2ui,
X86cvttp2ui, X86cvttp2uiSAE,
SchedWriteCvtPD2DQ>,
- TB, PS, REX_W, EVEX_CD8<64, CD8VF>;
+ TB, REX_W, EVEX_CD8<64, CD8VF>;
defm VCVTUDQ2PD : avx512_cvtdq2pd<0x7A, "vcvtudq2pd", any_uint_to_fp,
uint_to_fp, X86any_VUintToFP, X86VUintToFP,
@@ -8638,11 +8638,11 @@ defm VCVTPD2DQ : avx512_cvtpd2dq<0xE6, "vcvtpd2dq", X86cvtp2Int, X86cvtp2Int,
defm VCVTPS2UDQ : avx512_cvtps2dq<0x79, "vcvtps2udq", X86cvtp2UInt, X86cvtp2UInt,
X86cvtp2UIntRnd, SchedWriteCvtPS2DQ>,
- TB, PS, EVEX_CD8<32, CD8VF>;
+ TB, EVEX_CD8<32, CD8VF>;
defm VCVTPD2UDQ : avx512_cvtpd2dq<0x79, "vcvtpd2udq", X86cvtp2UInt, X86cvtp2UInt,
X86cvtp2UIntRnd, SchedWriteCvtPD2DQ>, REX_W,
- TB, PS, EVEX_CD8<64, CD8VF>;
+ TB, EVEX_CD8<64, CD8VF>;
defm VCVTPD2QQ : avx512_cvtpd2qq<0x7B, "vcvtpd2qq", X86cvtp2Int, X86cvtp2Int,
X86cvtp2IntRnd, SchedWriteCvtPD2DQ>, REX_W,
@@ -8692,7 +8692,7 @@ defm VCVTDQ2PH : avx512_cvtqq2ps_dq2ph<0x5B, "vcvtdq2ph", any_sint_to_fp, sint_t
X86any_VSintToFP, X86VMSintToFP,
X86VSintToFpRnd, avx512vl_f16_info, avx512vl_i32_info,
SchedWriteCvtDQ2PS, HasFP16>,
- T_MAP5, PS, EVEX_CD8<32, CD8VF>;
+ T_MAP5, EVEX_CD8<32, CD8VF>;
defm VCVTUDQ2PH : avx512_cvtqq2ps_dq2ph<0x7A, "vcvtudq2ph", any_uint_to_fp, uint_to_fp,
X86any_VUintToFP, X86VMUintToFP,
@@ -8703,7 +8703,7 @@ defm VCVTUDQ2PH : avx512_cvtqq2ps_dq2ph<0x7A, "vcvtudq2ph", any_uint_to_fp, uint
defm VCVTQQ2PS : avx512_cvtqq2ps_dq2ph<0x5B, "vcvtqq2ps", any_sint_to_fp, sint_to_fp,
X86any_VSintToFP, X86VMSintToFP,
X86VSintToFpRnd, avx512vl_f32_info, avx512vl_i64_info,
- SchedWriteCvtDQ2PS>, REX_W, TB, PS,
+ SchedWriteCvtDQ2PS>, REX_W, TB,
EVEX_CD8<64, CD8VF>;
defm VCVTUQQ2PS : avx512_cvtqq2ps_dq2ph<0x7A, "vcvtuqq2ps", any_uint_to_fp, uint_to_fp,
@@ -9068,27 +9068,27 @@ let Defs = [EFLAGS], Predicates = [HasAVX512] in {
let Defs = [EFLAGS], Predicates = [HasAVX512] in {
defm VUCOMISSZ : sse12_ord_cmp<0x2E, FR32X, X86any_fcmp, f32, f32mem, loadf32,
- "ucomiss", SSEPackedSingle>, TB, PS, EVEX, VEX_LIG,
+ "ucomiss", SSEPackedSingle>, TB, EVEX, VEX_LIG,
EVEX_CD8<32, CD8VT1>;
defm VUCOMISDZ : sse12_ord_cmp<0x2E, FR64X, X86any_fcmp, f64, f64mem, loadf64,
"ucomisd", SSEPackedDouble>, TB, PD, EVEX,
VEX_LIG, REX_W, EVEX_CD8<64, CD8VT1>;
defm VCOMISSZ : sse12_ord_cmp<0x2F, FR32X, X86strict_fcmps, f32, f32mem, loadf32,
- "comiss", SSEPackedSingle>, TB, PS, EVEX, VEX_LIG,
+ "comiss", SSEPackedSingle>, TB, EVEX, VEX_LIG,
EVEX_CD8<32, CD8VT1>;
defm VCOMISDZ : sse12_ord_cmp<0x2F, FR64X, X86strict_fcmps, f64, f64mem, loadf64,
"comisd", SSEPackedDouble>, TB, PD, EVEX,
VEX_LIG, REX_W, EVEX_CD8<64, CD8VT1>;
let isCodeGenOnly = 1 in {
defm VUCOMISSZ : sse12_ord_cmp_int<0x2E, VR128X, X86ucomi, v4f32, ssmem,
- sse_load_f32, "ucomiss", SSEPackedSingle>, TB, PS, EVEX, VEX_LIG,
+ sse_load_f32, "ucomiss", SSEPackedSingle>, TB, EVEX, VEX_LIG,
EVEX_CD8<32, CD8VT1>;
defm VUCOMISDZ : sse12_ord_cmp_int<0x2E, VR128X, X86ucomi, v2f64, sdmem,
sse_load_f64, "ucomisd", SSEPackedDouble>, TB, PD, EVEX,
VEX_LIG, REX_W, EVEX_CD8<64, CD8VT1>;
defm VCOMISSZ : sse12_ord_cmp_int<0x2F, VR128X, X86comi, v4f32, ssmem,
- sse_load_f32, "comiss", SSEPackedSingle>, TB, PS, EVEX, VEX_LIG,
+ sse_load_f32, "comiss", SSEPackedSingle>, TB, EVEX, VEX_LIG,
EVEX_CD8<32, CD8VT1>;
defm VCOMISDZ : sse12_ord_cmp_int<0x2F, VR128X, X86comi, v2f64, sdmem,
sse_load_f64, "comisd", SSEPackedDouble>, TB, PD, EVEX,
@@ -9104,19 +9104,19 @@ let Defs = [EFLAGS], Predicates = [HasFP16] in {
SSEPackedSingle>, AVX512PSIi8Base, T_MAP5,
EVEX_CD8<16, CD8VT1>;
defm VUCOMISHZ : sse12_ord_cmp<0x2E, FR16X, X86any_fcmp, f16, f16mem, loadf16,
- "ucomish", SSEPackedSingle>, T_MAP5, PS, EVEX,
+ "ucomish", SSEPackedSingle>, T_MAP5, EVEX,
VEX_LIG, EVEX_CD8<16, CD8VT1>;
defm VCOMISHZ : sse12_ord_cmp<0x2F, FR16X, X86strict_fcmps, f16, f16mem, loadf16,
- "comish", SSEPackedSingle>, T_MAP5, PS, EVEX,
+ "comish", SSEPackedSingle>, T_MAP5, EVEX,
VEX_LIG, EVEX_CD8<16, CD8VT1>;
let isCodeGenOnly = 1 in {
defm VUCOMISHZ : sse12_ord_cmp_int<0x2E, VR128X, X86ucomi, v8f16, shmem,
sse_load_f16, "ucomish", SSEPackedSingle>,
- T_MAP5, PS, EVEX, VEX_LIG, EVEX_CD8<16, CD8VT1>;
+ T_MAP5, EVEX, VEX_LIG, EVEX_CD8<16, CD8VT1>;
defm VCOMISHZ : sse12_ord_cmp_int<0x2F, VR128X, X86comi, v8f16, shmem,
sse_load_f16, "comish", SSEPackedSingle>,
- T_MAP5, PS, EVEX, VEX_LIG, EVEX_CD8<16, CD8VT1>;
+ T_MAP5, EVEX, VEX_LIG, EVEX_CD8<16, CD8VT1>;
}
}
@@ -9401,18 +9401,18 @@ multiclass avx512_sqrt_packed_all<bits<8> opc, string OpcodeStr,
let Predicates = [HasFP16] in
defm PHZ : avx512_sqrt_packed<opc, !strconcat(OpcodeStr, "ph"),
sched.PH.ZMM, v32f16_info>,
- EVEX_V512, T_MAP5, PS, EVEX_CD8<16, CD8VF>;
+ EVEX_V512, T_MAP5, EVEX_CD8<16, CD8VF>;
let Predicates = [HasFP16, HasVLX] in {
defm PHZ128 : avx512_sqrt_packed<opc, !strconcat(OpcodeStr, "ph"),
sched.PH.XMM, v8f16x_info>,
- EVEX_V128, T_MAP5, PS, EVEX_CD8<16, CD8VF>;
+ EVEX_V128, T_MAP5, EVEX_CD8<16, CD8VF>;
defm PHZ256 : avx512_sqrt_packed<opc, !strconcat(OpcodeStr, "ph"),
sched.PH.YMM, v16f16x_info>,
- EVEX_V256, T_MAP5, PS, EVEX_CD8<16, CD8VF>;
+ EVEX_V256, T_MAP5, EVEX_CD8<16, CD8VF>;
}
defm PSZ : avx512_sqrt_packed<opc, !strconcat(OpcodeStr, "ps"),
sched.PS.ZMM, v16f32_info>,
- EVEX_V512, TB, PS, EVEX_CD8<32, CD8VF>;
+ EVEX_V512, TB, EVEX_CD8<32, CD8VF>;
defm PDZ : avx512_sqrt_packed<opc, !strconcat(OpcodeStr, "pd"),
sched.PD.ZMM, v8f64_info>,
EVEX_V512, REX_W, TB, PD, EVEX_CD8<64, CD8VF>;
@@ -9420,10 +9420,10 @@ multiclass avx512_sqrt_packed_all<bits<8> opc, string OpcodeStr,
let Predicates = [HasVLX] in {
defm PSZ128 : avx512_sqrt_packed<opc, !strconcat(OpcodeStr, "ps"),
sched.PS.XMM, v4f32x_info>,
- EVEX_V128, TB, PS, EVEX_CD8<32, CD8VF>;
+ EVEX_V128, TB, EVEX_CD8<32, CD8VF>;
defm PSZ256 : avx512_sqrt_packed<opc, !strconcat(OpcodeStr, "ps"),
sched.PS.YMM, v8f32x_info>,
- EVEX_V256, TB, PS, EVEX_CD8<32, CD8VF>;
+ EVEX_V256, TB, EVEX_CD8<32, CD8VF>;
defm PDZ128 : avx512_sqrt_packed<opc, !strconcat(OpcodeStr, "pd"),
sched.PD.XMM, v2f64x_info>,
EVEX_V128, REX_W, TB, PD, EVEX_CD8<64, CD8VF>;
@@ -9439,10 +9439,10 @@ multiclass avx512_sqrt_packed_all_round<bits<8> opc, string OpcodeStr,
let Predicates = [HasFP16] in
defm PHZ : avx512_sqrt_packed_round<opc, !strconcat(OpcodeStr, "ph"),
sched.PH.ZMM, v32f16_info>,
- EVEX_V512, T_MAP5, PS, EVEX_CD8<16, CD8VF>;
+ EVEX_V512, T_MAP5, EVEX_CD8<16, CD8VF>;
defm PSZ : avx512_sqrt_packed_round<opc, !strconcat(OpcodeStr, "ps"),
sched.PS.ZMM, v16f32_info>,
- EVEX_V512, TB, PS, EVEX_CD8<32, CD8VF>;
+ EVEX_V512, TB, EVEX_CD8<32, CD8VF>;
defm PDZ : avx512_sqrt_packed_round<opc, !strconcat(OpcodeStr, "pd"),
sched.PD.ZMM, v8f64_info>,
EVEX_V512, REX_W, TB, PD, EVEX_CD8<64, CD8VF>;
@@ -10663,7 +10663,7 @@ multiclass avx512_fp_packed_imm<bits<8> opc, string OpcodeStr, SDNode OpNode,
multiclass avx512_3Op_rm_imm8<bits<8> opc, string OpcodeStr, SDNode OpNode,
X86FoldableSchedWrite sched, X86VectorVTInfo DestInfo,
X86VectorVTInfo SrcInfo>{
- let ExeDomain = DestInfo.ExeDomain in {
+ let ExeDomain = DestInfo.ExeDomain, ImmT = Imm8 in {
defm rri : AVX512_maskable<opc, MRMSrcReg, DestInfo, (outs DestInfo.RC:$dst),
(ins SrcInfo.RC:$src1, SrcInfo.RC:$src2, u8imm:$src3),
OpcodeStr, "$src3, $src2, $src1", "$src1, $src2, $src3",
@@ -10689,7 +10689,7 @@ multiclass avx512_3Op_imm8<bits<8> opc, string OpcodeStr, SDNode OpNode,
X86FoldableSchedWrite sched, X86VectorVTInfo _>:
avx512_3Op_rm_imm8<opc, OpcodeStr, OpNode, sched, _, _>{
- let ExeDomain = _.ExeDomain in
+ let ExeDomain = _.ExeDomain, ImmT = Imm8 in
defm rmbi : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
(ins _.RC:$src1, _.ScalarMemOp:$src2, u8imm:$src3),
OpcodeStr, "$src3, ${src2}"#_.BroadcastStr#", $src1",
@@ -11501,11 +11501,11 @@ multiclass avx512_shufp<string OpcodeStr, AVX512VLVectorVTInfo VTInfo_FP>{
defm NAME: avx512_common_3Op_imm8<OpcodeStr, VTInfo_FP, 0xC6, X86Shufp,
SchedWriteFShuffle>,
EVEX_CD8<VTInfo_FP.info512.EltSize, CD8VF>,
- AVX512AIi8Base, EVEX, VVVV;
+ TA, EVEX, VVVV;
}
-defm VSHUFPS: avx512_shufp<"vshufps", avx512vl_f32_info>, TB, PS;
-defm VSHUFPD: avx512_shufp<"vshufpd", avx512vl_f64_info>, TB, REX_W;
+defm VSHUFPS: avx512_shufp<"vshufps", avx512vl_f32_info>, TB;
+defm VSHUFPD: avx512_shufp<"vshufpd", avx512vl_f64_info>, TB, PD, REX_W;
//===----------------------------------------------------------------------===//
// AVX-512 - Byte shift Left/Right
@@ -12920,7 +12920,7 @@ multiclass avx512_cvttph2w<bits<8> opc, string OpcodeStr, SDPatternOperator OpNo
defm VCVTPH2UW : avx512_cvtph2w<0x7D, "vcvtph2uw", X86cvtp2UInt, X86cvtp2UInt,
X86cvtp2UIntRnd, avx512vl_i16_info,
avx512vl_f16_info, SchedWriteCvtPD2DQ>,
- T_MAP5, PS, EVEX_CD8<16, CD8VF>;
+ T_MAP5, EVEX_CD8<16, CD8VF>;
defm VCVTUW2PH : avx512_cvtph2w<0x7D, "vcvtuw2ph", any_uint_to_fp, uint_to_fp,
X86VUintToFpRnd, avx512vl_f16_info,
avx512vl_i16_info, SchedWriteCvtPD2DQ>,
@@ -12932,7 +12932,7 @@ defm VCVTTPH2W : avx512_cvttph2w<0x7C, "vcvttph2w", X86any_cvttp2si,
defm VCVTTPH2UW : avx512_cvttph2w<0x7C, "vcvttph2uw", X86any_cvttp2ui,
X86cvttp2ui, X86cvttp2uiSAE,
avx512vl_i16_info, avx512vl_f16_info,
- SchedWriteCvtPD2DQ>, T_MAP5, PS, EVEX_CD8<16, CD8VF>;
+ SchedWriteCvtPD2DQ>, T_MAP5, EVEX_CD8<16, CD8VF>;
defm VCVTPH2W : avx512_cvtph2w<0x7D, "vcvtph2w", X86cvtp2Int, X86cvtp2Int,
X86cvtp2IntRnd, avx512vl_i16_info,
avx512vl_f16_info, SchedWriteCvtPD2DQ>,
@@ -12983,7 +12983,7 @@ defm VCVTPH2DQ : avx512_cvtph2dq<0x5B, "vcvtph2dq", X86cvtp2Int, X86cvtp2Int,
X86cvtp2IntRnd, SchedWriteCvtPS2DQ>, T_MAP5, PD,
EVEX_CD8<16, CD8VH>;
defm VCVTPH2UDQ : avx512_cvtph2dq<0x79, "vcvtph2udq", X86cvtp2UInt, X86cvtp2UInt,
- X86cvtp2UIntRnd, SchedWriteCvtPS2DQ>, T_MAP5, PS,
+ X86cvtp2UIntRnd, SchedWriteCvtPS2DQ>, T_MAP5,
EVEX_CD8<16, CD8VH>;
defm VCVTTPH2DQ : avx512_cvttph2dq<0x5B, "vcvttph2dq", X86any_cvttp2si,
@@ -12993,7 +12993,7 @@ defm VCVTTPH2DQ : avx512_cvttph2dq<0x5B, "vcvttph2dq", X86any_cvttp2si,
defm VCVTTPH2UDQ : avx512_cvttph2dq<0x78, "vcvttph2udq", X86any_cvttp2ui,
X86cvttp2ui, X86cvttp2uiSAE,
- SchedWriteCvtPS2DQ>, T_MAP5, PS,
+ SchedWriteCvtPS2DQ>, T_MAP5,
EVEX_CD8<16, CD8VH>;
// Convert Half to Signed/Unsigned Quardword
@@ -13154,7 +13154,7 @@ multiclass avx512_cvtqq2ph<bits<8> opc, string OpcodeStr, SDPatternOperator OpNo
}
defm VCVTQQ2PH : avx512_cvtqq2ph<0x5B, "vcvtqq2ph", any_sint_to_fp, sint_to_fp,
- X86VSintToFpRnd, SchedWriteCvtDQ2PS>, REX_W, T_MAP5, PS,
+ X86VSintToFpRnd, SchedWriteCvtDQ2PS>, REX_W, T_MAP5,
EVEX_CD8<64, CD8VF>;
defm VCVTUQQ2PH : avx512_cvtqq2ph<0x7A, "vcvtuqq2ph", any_uint_to_fp, uint_to_fp,
diff --git a/llvm/lib/Target/X86/X86InstrArithmetic.td b/llvm/lib/Target/X86/X86InstrArithmetic.td
index 4fb05231010d8b..abd0d87354f8e6 100644
--- a/llvm/lib/Target/X86/X86InstrArithmetic.td
+++ b/llvm/lib/Target/X86/X86InstrArithmetic.td
@@ -1117,8 +1117,8 @@ let Predicates = [HasBMI, HasEGPR, In64BitMode] in {
// Complexity is reduced to give and with immediate a chance to match first.
let Defs = [EFLAGS], AddedComplexity = -6 in {
- defm ANDN32 : bmi_andn<"andn{l}", GR32, i32mem, loadi32, WriteALU>, T8, PS;
- defm ANDN64 : bmi_andn<"andn{q}", GR64, i64mem, loadi64, WriteALU>, T8, PS, REX_W;
+ defm ANDN32 : bmi_andn<"andn{l}", GR32, i32mem, loadi32, WriteALU>, T8;
+ defm ANDN64 : bmi_andn<"andn{q}", GR64, i64mem, loadi64, WriteALU>, T8, REX_W;
}
let Predicates = [HasBMI], AddedComplexity = -6 in {
diff --git a/llvm/lib/Target/X86/X86InstrFPStack.td b/llvm/lib/Target/X86/X86InstrFPStack.td
index dd63e921b8acdf..6a9a74ce15f2a7 100644
--- a/llvm/lib/Target/X86/X86InstrFPStack.td
+++ b/llvm/lib/Target/X86/X86InstrFPStack.td
@@ -666,20 +666,20 @@ def FCOMPP : I<0xDE, MRM_D9, (outs), (ins), "fcompp", []>;
let Uses = [FPSW, FPCW] in {
def FXSAVE : I<0xAE, MRM0m, (outs), (ins opaquemem:$dst),
- "fxsave\t$dst", [(int_x86_fxsave addr:$dst)]>, TB, PS,
+ "fxsave\t$dst", [(int_x86_fxsave addr:$dst)]>, TB,
Requires<[HasFXSR]>;
def FXSAVE64 : RI<0xAE, MRM0m, (outs), (ins opaquemem:$dst),
"fxsave64\t$dst", [(int_x86_fxsave64 addr:$dst)]>,
- TB, PS, Requires<[HasFXSR, In64BitMode]>;
+ TB, Requires<[HasFXSR, In64BitMode]>;
} // Uses = [FPSW, FPCW]
let Defs = [FPSW, FPCW] in {
def FXRSTOR : I<0xAE, MRM1m, (outs), (ins opaquemem:$src),
"fxrstor\t$src", [(int_x86_fxrstor addr:$src)]>,
- TB, PS, Requires<[HasFXSR]>;
+ TB, Requires<[HasFXSR]>;
def FXRSTOR64 : RI<0xAE, MRM1m, (outs), (ins opaquemem:$src),
"fxrstor64\t$src", [(int_x86_fxrstor64 addr:$src)]>,
- TB, PS, Requires<[HasFXSR, In64BitMode]>;
+ TB, Requires<[HasFXSR, In64BitMode]>;
} // Defs = [FPSW, FPCW]
} // SchedRW
diff --git a/llvm/lib/Target/X86/X86InstrFormats.td b/llvm/lib/Target/X86/X86InstrFormats.td
index df05a5788a50ac..f94072a0c7076a 100644
--- a/llvm/lib/Target/X86/X86InstrFormats.td
+++ b/llvm/lib/Target/X86/X86InstrFormats.td
@@ -234,7 +234,9 @@ class X86Inst<bits<8> opcod, Format f, ImmType i, dag outs, dag ins,
// based on address size of the mode?
bits<2> AdSizeBits = AdSize.Value;
- Prefix OpPrefix = NoPrfx; // Which prefix byte does this inst have?
+ Encoding OpEnc = EncNormal; // Encoding used by this instruction
+ // Which prefix byte does this inst have?
+ Prefix OpPrefix = !if(!eq(OpEnc, EncNormal), NoPrfx, PS);
bits<3> OpPrefixBits = OpPrefix.Value;
Map OpMap = OB; // Which opcode map does this inst have?
bits<4> OpMapBits = OpMap.Value;
@@ -243,7 +245,6 @@ class X86Inst<bits<8> opcod, Format f, ImmType i, dag outs, dag ins,
bit hasLockPrefix = 0; // Does this inst have a 0xF0 prefix?
Domain ExeDomain = d;
bit hasREPPrefix = 0; // Does this inst have a REP prefix?
- Encoding OpEnc = EncNormal; // Encoding used by this instruction
bits<2> OpEncBits = OpEnc.Value;
bit IgnoresW = 0; // Does this inst ignore REX_W field?
bit EVEX_W1_VEX_W0 = 0; // This EVEX inst with VEX.W==1 can become a VEX
diff --git a/llvm/lib/Target/X86/X86InstrMMX.td b/llvm/lib/Target/X86/X86InstrMMX.td
index 8d472ccd52df38..8d6bc8d0ee2cfc 100644
--- a/llvm/lib/Target/X86/X86InstrMMX.td
+++ b/llvm/lib/Target/X86/X86InstrMMX.td
@@ -487,13 +487,13 @@ def MMX_PSHUFWmi : MMXIi8<0x70, MRMSrcMem,
// -- Conversion Instructions
defm MMX_CVTPS2PI : sse12_cvt_pint<0x2D, VR128, VR64, int_x86_sse_cvtps2pi,
f64mem, load, "cvtps2pi\t{$src, $dst|$dst, $src}",
- WriteCvtPS2I, SSEPackedSingle>, TB, PS, SIMD_EXC;
+ WriteCvtPS2I, SSEPackedSingle>, TB, SIMD_EXC;
defm MMX_CVTPD2PI : sse12_cvt_pint<0x2D, VR128, VR64, int_x86_sse_cvtpd2pi,
f128mem, memop, "cvtpd2pi\t{$src, $dst|$dst, $src}",
WriteCvtPD2I, SSEPackedDouble>, TB, PD, SIMD_EXC;
defm MMX_CVTTPS2PI : sse12_cvt_pint<0x2C, VR128, VR64, int_x86_sse_cvttps2pi,
f64mem, load, "cvttps2pi\t{$src, $dst|$dst, $src}",
- WriteCvtPS2I, SSEPackedSingle>, TB, PS, SIMD_EXC;
+ WriteCvtPS2I, SSEPackedSingle>, TB, SIMD_EXC;
defm MMX_CVTTPD2PI : sse12_cvt_pint<0x2C, VR128, VR64, int_x86_sse_cvttpd2pi,
f128mem, memop, "cvttpd2pi\t{$src, $dst|$dst, $src}",
WriteCvtPD2I, SSEPackedDouble>, TB, PD, SIMD_EXC;
@@ -504,7 +504,7 @@ let Constraints = "$src1 = $dst" in {
defm MMX_CVTPI2PS : sse12_cvt_pint_3addr<0x2A, VR64, VR128,
int_x86_sse_cvtpi2ps,
i64mem, load, "cvtpi2ps\t{$src2, $dst|$dst, $src2}",
- SSEPackedSingle>, TB, PS, SIMD_EXC;
+ SSEPackedSingle>, TB, SIMD_EXC;
}
// Extract / Insert
diff --git a/llvm/lib/Target/X86/X86InstrMisc.td b/llvm/lib/Target/X86/X86InstrMisc.td
index 779f27085eae0c..305bd74f7bd70a 100644
--- a/llvm/lib/Target/X86/X86InstrMisc.td
+++ b/llvm/lib/Target/X86/X86InstrMisc.td
@@ -165,10 +165,10 @@ def POPP64r : I<0x58, AddRegFrm, (outs GR64:$reg), (ins), "popp\t$reg", []>,
REX_W, ExplicitREX2Prefix, Requires<[In64BitMode]>;
def POP2: I<0x8F, MRM0r, (outs GR64:$reg1, GR64:$reg2), (ins),
"pop2\t{$reg2, $reg1|$reg1, $reg2}",
- []>, EVEX, VVVV, EVEX_B, T_MAP4, PS;
+ []>, EVEX, VVVV, EVEX_B, T_MAP4;
def POP2P: I<0x8F, MRM0r, (outs GR64:$reg1, GR64:$reg2), (ins),
"pop2p\t{$reg2, $reg1|$reg1, $reg2}",
- []>, EVEX, VVVV, EVEX_B, T_MAP4, PS, REX_W;
+ []>, EVEX, VVVV, EVEX_B, T_MAP4, REX_W;
} // mayLoad, SchedRW
let mayLoad = 1, mayStore = 1, SchedRW = [WriteCopy] in
@@ -186,10 +186,10 @@ def PUSHP64r : I<0x50, AddRegFrm, (outs), (ins GR64:$reg), "pushp\t$reg", []>,
REX_W, ExplicitREX2Prefix, Requires<[In64BitMode]>;
def PUSH2: I<0xFF, MRM6r, (outs), (ins GR64:$reg1, GR64:$reg2),
"push2\t{$reg2, $reg1|$reg1, $reg2}",
- []>, EVEX, VVVV, EVEX_B, T_MAP4, PS;
+ []>, EVEX, VVVV, EVEX_B, T_MAP4;
def PUSH2P: I<0xFF, MRM6r, (outs), (ins GR64:$reg1, GR64:$reg2),
"push2p\t{$reg2, $reg1|$reg1, $reg2}",
- []>, EVEX, VVVV, EVEX_B, T_MAP4, PS, REX_W;
+ []>, EVEX, VVVV, EVEX_B, T_MAP4, REX_W;
} // mayStore, SchedRW
let mayLoad = 1, mayStore = 1, SchedRW = [WriteCopy] in {
def PUSH64rmm: I<0xFF, MRM6m, (outs), (ins i64mem:$src), "push{q}\t$src", []>,
@@ -251,52 +251,52 @@ let Defs = [EFLAGS] in {
def BSF16rr : I<0xBC, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
"bsf{w}\t{$src, $dst|$dst, $src}",
[(set GR16:$dst, EFLAGS, (X86bsf GR16:$src))]>,
- TB, PS, OpSize16, Sched<[WriteBSF]>;
+ TB, OpSize16, Sched<[WriteBSF]>;
def BSF16rm : I<0xBC, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
"bsf{w}\t{$src, $dst|$dst, $src}",
[(set GR16:$dst, EFLAGS, (X86bsf (loadi16 addr:$src)))]>,
- TB, PS, OpSize16, Sched<[WriteBSFLd]>;
+ TB, OpSize16, Sched<[WriteBSFLd]>;
def BSF32rr : I<0xBC, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
"bsf{l}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, EFLAGS, (X86bsf GR32:$src))]>,
- TB, PS, OpSize32, Sched<[WriteBSF]>;
+ TB, OpSize32, Sched<[WriteBSF]>;
def BSF32rm : I<0xBC, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
"bsf{l}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, EFLAGS, (X86bsf (loadi32 addr:$src)))]>,
- TB, PS, OpSize32, Sched<[WriteBSFLd]>;
+ TB, OpSize32, Sched<[WriteBSFLd]>;
def BSF64rr : RI<0xBC, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
"bsf{q}\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, EFLAGS, (X86bsf GR64:$src))]>,
- TB, PS, Sched<[WriteBSF]>;
+ TB, Sched<[WriteBSF]>;
def BSF64rm : RI<0xBC, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
"bsf{q}\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, EFLAGS, (X86bsf (loadi64 addr:$src)))]>,
- TB, PS, Sched<[WriteBSFLd]>;
+ TB, Sched<[WriteBSFLd]>;
def BSR16rr : I<0xBD, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
"bsr{w}\t{$src, $dst|$dst, $src}",
[(set GR16:$dst, EFLAGS, (X86bsr GR16:$src))]>,
- TB, PS, OpSize16, Sched<[WriteBSR]>;
+ TB, OpSize16, Sched<[WriteBSR]>;
def BSR16rm : I<0xBD, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
"bsr{w}\t{$src, $dst|$dst, $src}",
[(set GR16:$dst, EFLAGS, (X86bsr (loadi16 addr:$src)))]>,
- TB, PS, OpSize16, Sched<[WriteBSRLd]>;
+ TB, OpSize16, Sched<[WriteBSRLd]>;
def BSR32rr : I<0xBD, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
"bsr{l}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, EFLAGS, (X86bsr GR32:$src))]>,
- TB, PS, OpSize32, Sched<[WriteBSR]>;
+ TB, OpSize32, Sched<[WriteBSR]>;
def BSR32rm : I<0xBD, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
"bsr{l}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, EFLAGS, (X86bsr (loadi32 addr:$src)))]>,
- TB, PS, OpSize32, Sched<[WriteBSRLd]>;
+ TB, OpSize32, Sched<[WriteBSRLd]>;
def BSR64rr : RI<0xBD, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
"bsr{q}\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, EFLAGS, (X86bsr GR64:$src))]>,
- TB, PS, Sched<[WriteBSR]>;
+ TB, Sched<[WriteBSR]>;
def BSR64rm : RI<0xBD, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
"bsr{q}\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, EFLAGS, (X86bsr (loadi64 addr:$src)))]>,
- TB, PS, Sched<[WriteBSRLd]>;
+ TB, Sched<[WriteBSRLd]>;
} // Defs = [EFLAGS]
let SchedRW = [WriteMicrocoded] in {
@@ -1095,29 +1095,29 @@ let Predicates = [HasMOVBE] in {
def MOVBE16rm : I<0xF0, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
"movbe{w}\t{$src, $dst|$dst, $src}",
[(set GR16:$dst, (bswap (loadi16 addr:$src)))]>,
- OpSize16, T8, PS;
+ OpSize16, T8;
def MOVBE32rm : I<0xF0, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
"movbe{l}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (bswap (loadi32 addr:$src)))]>,
- OpSize32, T8, PS;
+ OpSize32, T8;
def MOVBE64rm : RI<0xF0, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
"movbe{q}\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, (bswap (loadi64 addr:$src)))]>,
- T8, PS;
+ T8;
}
let SchedRW = [WriteStore] in {
def MOVBE16mr : I<0xF1, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
"movbe{w}\t{$src, $dst|$dst, $src}",
[(store (bswap GR16:$src), addr:$dst)]>,
- OpSize16, T8, PS;
+ OpSize16, T8;
def MOVBE32mr : I<0xF1, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
"movbe{l}\t{$src, $dst|$dst, $src}",
[(store (bswap GR32:$src), addr:$dst)]>,
- OpSize32, T8, PS;
+ OpSize32, T8;
def MOVBE64mr : RI<0xF1, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
"movbe{q}\t{$src, $dst|$dst, $src}",
[(store (bswap GR64:$src), addr:$dst)]>,
- T8, PS;
+ T8;
}
}
@@ -1127,13 +1127,13 @@ let Predicates = [HasMOVBE] in {
let Predicates = [HasRDRAND], Defs = [EFLAGS], SchedRW = [WriteSystem] in {
def RDRAND16r : I<0xC7, MRM6r, (outs GR16:$dst), (ins),
"rdrand{w}\t$dst", [(set GR16:$dst, EFLAGS, (X86rdrand))]>,
- OpSize16, TB, PS;
+ OpSize16, TB;
def RDRAND32r : I<0xC7, MRM6r, (outs GR32:$dst), (ins),
"rdrand{l}\t$dst", [(set GR32:$dst, EFLAGS, (X86rdrand))]>,
- OpSize32, TB, PS;
+ OpSize32, TB;
def RDRAND64r : RI<0xC7, MRM6r, (outs GR64:$dst), (ins),
"rdrand{q}\t$dst", [(set GR64:$dst, EFLAGS, (X86rdrand))]>,
- TB, PS;
+ TB;
}
//===----------------------------------------------------------------------===//
@@ -1141,11 +1141,11 @@ let Predicates = [HasRDRAND], Defs = [EFLAGS], SchedRW = [WriteSystem] in {
//
let Predicates = [HasRDSEED], Defs = [EFLAGS], SchedRW = [WriteSystem] in {
def RDSEED16r : I<0xC7, MRM7r, (outs GR16:$dst), (ins), "rdseed{w}\t$dst",
- [(set GR16:$dst, EFLAGS, (X86rdseed))]>, OpSize16, TB, PS;
+ [(set GR16:$dst, EFLAGS, (X86rdseed))]>, OpSize16, TB;
def RDSEED32r : I<0xC7, MRM7r, (outs GR32:$dst), (ins), "rdseed{l}\t$dst",
- [(set GR32:$dst, EFLAGS, (X86rdseed))]>, OpSize32, TB, PS;
+ [(set GR32:$dst, EFLAGS, (X86rdseed))]>, OpSize32, TB;
def RDSEED64r : RI<0xC7, MRM7r, (outs GR64:$dst), (ins), "rdseed{q}\t$dst",
- [(set GR64:$dst, EFLAGS, (X86rdseed))]>, TB, PS;
+ [(set GR64:$dst, EFLAGS, (X86rdseed))]>, TB;
}
//===----------------------------------------------------------------------===//
@@ -1218,11 +1218,11 @@ multiclass bmi_bls<string mnemonic, Format RegMRM, Format MemMRM,
let hasSideEffects = 0 in {
def rr#Suffix : I<0xF3, RegMRM, (outs RC:$dst), (ins RC:$src),
!strconcat(mnemonic, "\t{$src, $dst|$dst, $src}"), []>,
- T8, PS, VEX, VVVV, Sched<[sched]>;
+ T8, VEX, VVVV, Sched<[sched]>;
let mayLoad = 1 in
def rm#Suffix : I<0xF3, MemMRM, (outs RC:$dst), (ins x86memop:$src),
!strconcat(mnemonic, "\t{$src, $dst|$dst, $src}"), []>,
- T8, PS, VEX, VVVV, Sched<[sched.Folded]>;
+ T8, VEX, VVVV, Sched<[sched.Folded]>;
}
}
@@ -1288,12 +1288,12 @@ multiclass bmi4VOp3_base<bits<8> opc, string mnemonic, RegisterClass RC,
def rr#Suffix : I<opc, MRMSrcReg4VOp3, (outs RC:$dst), (ins RC:$src1, RC:$src2),
!strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set RC:$dst, (OpNode RC:$src1, RC:$src2)), (implicit EFLAGS)]>,
- T8, PS, VEX, Sched<[Sched]>;
+ T8, VEX, Sched<[Sched]>;
let mayLoad = 1 in
def rm#Suffix : I<opc, MRMSrcMem4VOp3, (outs RC:$dst), (ins x86memop:$src1, RC:$src2),
!strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set RC:$dst, (OpNode (ld_frag addr:$src1), RC:$src2)),
- (implicit EFLAGS)]>, T8, PS, VEX,
+ (implicit EFLAGS)]>, T8, VEX,
Sched<[Sched.Folded,
// x86memop:$src1
ReadDefault, ReadDefault, ReadDefault, ReadDefault,
@@ -1497,19 +1497,19 @@ let SchedRW = [WriteStore] in {
def MOVDIRI32 : I<0xF9, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
"movdiri\t{$src, $dst|$dst, $src}",
[(int_x86_directstore32 addr:$dst, GR32:$src)]>,
- T8, PS, Requires<[HasMOVDIRI, NoEGPR]>;
+ T8, Requires<[HasMOVDIRI, NoEGPR]>;
def MOVDIRI64 : RI<0xF9, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
"movdiri\t{$src, $dst|$dst, $src}",
[(int_x86_directstore64 addr:$dst, GR64:$src)]>,
- T8, PS, Requires<[In64BitMode, HasMOVDIRI, NoEGPR]>;
+ T8, Requires<[In64BitMode, HasMOVDIRI, NoEGPR]>;
def MOVDIRI32_EVEX : I<0xF9, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
"movdiri\t{$src, $dst|$dst, $src}",
[(int_x86_directstore32 addr:$dst, GR32:$src)]>,
- EVEX, NoCD8, T_MAP4, PS, Requires<[In64BitMode, HasMOVDIRI, HasEGPR]>;
+ EVEX, NoCD8, T_MAP4, Requires<[In64BitMode, HasMOVDIRI, HasEGPR]>;
def MOVDIRI64_EVEX : RI<0xF9, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
"movdiri\t{$src, $dst|$dst, $src}",
[(int_x86_directstore64 addr:$dst, GR64:$src)]>,
- EVEX, NoCD8, T_MAP4, PS, Requires<[In64BitMode, HasMOVDIRI, HasEGPR]>;
+ EVEX, NoCD8, T_MAP4, Requires<[In64BitMode, HasMOVDIRI, HasEGPR]>;
} // SchedRW
//===----------------------------------------------------------------------===//
@@ -1588,11 +1588,11 @@ let SchedRW = [WriteSystem] in {
let Uses = [EAX, EDX] in
def INVLPGB32 : I<0x01, MRM_FE, (outs), (ins),
"invlpgb", []>,
- TB, PS, Requires<[Not64BitMode]>;
+ TB, Requires<[Not64BitMode]>;
let Uses = [RAX, EDX] in
def INVLPGB64 : I<0x01, MRM_FE, (outs), (ins),
"invlpgb", []>,
- TB, PS, Requires<[In64BitMode]>;
+ TB, Requires<[In64BitMode]>;
} // SchedRW
//===----------------------------------------------------------------------===//
@@ -1602,7 +1602,7 @@ let SchedRW = [WriteSystem] in {
let SchedRW = [WriteSystem] in {
def TLBSYNC : I<0x01, MRM_FF, (outs), (ins),
"tlbsync", []>,
- TB, PS, Requires<[]>;
+ TB, Requires<[]>;
} // SchedRW
//===----------------------------------------------------------------------===//
@@ -1617,7 +1617,7 @@ let Uses = [EAX], SchedRW = [WriteSystem] in
//
let SchedRW = [WriteSystem] in
def SERIALIZE : I<0x01, MRM_E8, (outs), (ins), "serialize",
- [(int_x86_serialize)]>, TB, PS,
+ [(int_x86_serialize)]>, TB,
Requires<[HasSERIALIZE]>;
//===----------------------------------------------------------------------===//
@@ -1711,4 +1711,4 @@ def CLWB : I<0xAE, MRM6m, (outs), (ins i8mem:$src), "clwb\t$src",
let Predicates = [HasCLDEMOTE], SchedRW = [WriteLoad] in
def CLDEMOTE : I<0x1C, MRM0m, (outs), (ins i8mem:$src), "cldemote\t$src",
- [(int_x86_cldemote addr:$src)]>, TB, PS;
+ [(int_x86_cldemote addr:$src)]>, TB;
diff --git a/llvm/lib/Target/X86/X86InstrRAOINT.td b/llvm/lib/Target/X86/X86InstrRAOINT.td
index 601355d4f7de4e..bc17b00f3573a9 100644
--- a/llvm/lib/Target/X86/X86InstrRAOINT.td
+++ b/llvm/lib/Target/X86/X86InstrRAOINT.td
@@ -39,7 +39,7 @@ multiclass RAOINT_BASE<string OpcodeStr> {
Sched<[WriteALURMW]>, REX_W;
}
-defm AADD : RAOINT_BASE<"add">, T8, PS;
+defm AADD : RAOINT_BASE<"add">, T8;
defm AAND : RAOINT_BASE<"and">, T8, PD;
defm AOR : RAOINT_BASE<"or" >, T8, XD;
defm AXOR : RAOINT_BASE<"xor">, T8, XS;
diff --git a/llvm/lib/Target/X86/X86InstrSGX.td b/llvm/lib/Target/X86/X86InstrSGX.td
index 3c8d6e3c6b6b33..747f5aa86653d6 100644
--- a/llvm/lib/Target/X86/X86InstrSGX.td
+++ b/llvm/lib/Target/X86/X86InstrSGX.td
@@ -17,13 +17,13 @@
let SchedRW = [WriteSystem], Predicates = [HasSGX] in {
// ENCLS - Execute an Enclave System Function of Specified Leaf Number
def ENCLS : I<0x01, MRM_CF, (outs), (ins),
- "encls", []>, TB, PS;
+ "encls", []>, TB;
// ENCLU - Execute an Enclave User Function of Specified Leaf Number
def ENCLU : I<0x01, MRM_D7, (outs), (ins),
- "enclu", []>, TB, PS;
+ "enclu", []>, TB;
// ENCLV - Execute an Enclave VMM Function of Specified Leaf Number
def ENCLV : I<0x01, MRM_C0, (outs), (ins),
- "enclv", []>, TB, PS;
+ "enclv", []>, TB;
} // SchedRW
diff --git a/llvm/lib/Target/X86/X86InstrSSE.td b/llvm/lib/Target/X86/X86InstrSSE.td
index 27d3974a674ab6..df1f0b5b4ca727 100644
--- a/llvm/lib/Target/X86/X86InstrSSE.td
+++ b/llvm/lib/Target/X86/X86InstrSSE.td
@@ -352,26 +352,26 @@ let canFoldAsLoad = 1, isReMaterializable = 1 in
let Predicates = [HasAVX, NoVLX] in {
defm VMOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32, "movaps",
SSEPackedSingle, SchedWriteFMoveLS.XMM>,
- TB, PS, VEX, WIG;
+ TB, VEX, WIG;
defm VMOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64, "movapd",
SSEPackedDouble, SchedWriteFMoveLS.XMM>,
TB, PD, VEX, WIG;
defm VMOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32, "movups",
SSEPackedSingle, SchedWriteFMoveLS.XMM>,
- TB, PS, VEX, WIG;
+ TB, VEX, WIG;
defm VMOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64, "movupd",
SSEPackedDouble, SchedWriteFMoveLS.XMM>,
TB, PD, VEX, WIG;
defm VMOVAPSY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv8f32, "movaps",
SSEPackedSingle, SchedWriteFMoveLS.YMM>,
- TB, PS, VEX, VEX_L, WIG;
+ TB, VEX, VEX_L, WIG;
defm VMOVAPDY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv4f64, "movapd",
SSEPackedDouble, SchedWriteFMoveLS.YMM>,
TB, PD, VEX, VEX_L, WIG;
defm VMOVUPSY : sse12_mov_packed<0x10, VR256, f256mem, loadv8f32, "movups",
SSEPackedSingle, SchedWriteFMoveLS.YMM>,
- TB, PS, VEX, VEX_L, WIG;
+ TB, VEX, VEX_L, WIG;
defm VMOVUPDY : sse12_mov_packed<0x10, VR256, f256mem, loadv4f64, "movupd",
SSEPackedDouble, SchedWriteFMoveLS.YMM>,
TB, PD, VEX, VEX_L, WIG;
@@ -380,10 +380,10 @@ defm VMOVUPDY : sse12_mov_packed<0x10, VR256, f256mem, loadv4f64, "movupd",
let Predicates = [UseSSE1] in {
defm MOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32, "movaps",
SSEPackedSingle, SchedWriteFMoveLS.XMM>,
- TB, PS;
+ TB;
defm MOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32, "movups",
SSEPackedSingle, SchedWriteFMoveLS.XMM>,
- TB, PS;
+ TB;
}
let Predicates = [UseSSE2] in {
defm MOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64, "movapd",
@@ -666,7 +666,7 @@ multiclass sse12_mov_hilo_packed_base<bits<8>opc, SDPatternOperator pdnode,
def PSrm : PI<opc, MRMSrcMem,
(outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
!strconcat(base_opc, "s", asm_opr),
- [], SSEPackedSingle>, TB, PS,
+ [], SSEPackedSingle>, TB,
Sched<[SchedWriteFShuffle.XMM.Folded, SchedWriteFShuffle.XMM.ReadAfterFold]>;
def PDrm : PI<opc, MRMSrcMem,
@@ -1233,16 +1233,16 @@ defm CVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, i64, v4f32, X86cvts2si,
defm VCVTDQ2PS : sse12_cvt_p<0x5B, VR128, i128mem, v4f32, v4i32, load,
"vcvtdq2ps\t{$src, $dst|$dst, $src}",
SSEPackedSingle, WriteCvtI2PS>,
- TB, PS, VEX, Requires<[HasAVX, NoVLX]>, WIG;
+ TB, VEX, Requires<[HasAVX, NoVLX]>, WIG;
defm VCVTDQ2PSY : sse12_cvt_p<0x5B, VR256, i256mem, v8f32, v8i32, load,
"vcvtdq2ps\t{$src, $dst|$dst, $src}",
SSEPackedSingle, WriteCvtI2PSY>,
- TB, PS, VEX, VEX_L, Requires<[HasAVX, NoVLX]>, WIG;
+ TB, VEX, VEX_L, Requires<[HasAVX, NoVLX]>, WIG;
defm CVTDQ2PS : sse12_cvt_p<0x5B, VR128, i128mem, v4f32, v4i32, memop,
"cvtdq2ps\t{$src, $dst|$dst, $src}",
SSEPackedSingle, WriteCvtI2PS>,
- TB, PS, Requires<[UseSSE2]>;
+ TB, Requires<[UseSSE2]>;
}
// AVX aliases
@@ -1699,30 +1699,30 @@ let Predicates = [HasAVX, NoVLX], Uses = [MXCSR], mayRaiseFPException = 1 in {
def VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"vcvtps2pd\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (v2f64 (X86any_vfpext (v4f32 VR128:$src))))]>,
- TB, PS, VEX, Sched<[WriteCvtPS2PD]>, WIG;
+ TB, VEX, Sched<[WriteCvtPS2PD]>, WIG;
def VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
"vcvtps2pd\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (v2f64 (extloadv2f32 addr:$src)))]>,
- TB, PS, VEX, Sched<[WriteCvtPS2PD.Folded]>, WIG;
+ TB, VEX, Sched<[WriteCvtPS2PD.Folded]>, WIG;
def VCVTPS2PDYrr : I<0x5A, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
"vcvtps2pd\t{$src, $dst|$dst, $src}",
[(set VR256:$dst, (v4f64 (any_fpextend (v4f32 VR128:$src))))]>,
- TB, PS, VEX, VEX_L, Sched<[WriteCvtPS2PDY]>, WIG;
+ TB, VEX, VEX_L, Sched<[WriteCvtPS2PDY]>, WIG;
def VCVTPS2PDYrm : I<0x5A, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
"vcvtps2pd\t{$src, $dst|$dst, $src}",
[(set VR256:$dst, (v4f64 (extloadv4f32 addr:$src)))]>,
- TB, PS, VEX, VEX_L, Sched<[WriteCvtPS2PDY.Folded]>, WIG;
+ TB, VEX, VEX_L, Sched<[WriteCvtPS2PDY.Folded]>, WIG;
}
let Predicates = [UseSSE2], Uses = [MXCSR], mayRaiseFPException = 1 in {
def CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvtps2pd\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (v2f64 (X86any_vfpext (v4f32 VR128:$src))))]>,
- TB, PS, Sched<[WriteCvtPS2PD]>;
+ TB, Sched<[WriteCvtPS2PD]>;
def CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
"cvtps2pd\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (v2f64 (extloadv2f32 addr:$src)))]>,
- TB, PS, Sched<[WriteCvtPS2PD.Folded]>;
+ TB, Sched<[WriteCvtPS2PD.Folded]>;
}
// Convert Packed DW Integers to Packed Double FP
@@ -1919,42 +1919,42 @@ let mayLoad = 1 in
let Defs = [EFLAGS] in {
defm VUCOMISS : sse12_ord_cmp<0x2E, FR32, X86any_fcmp, f32, f32mem, loadf32,
- "ucomiss", SSEPackedSingle>, TB, PS, VEX, VEX_LIG, WIG;
+ "ucomiss", SSEPackedSingle>, TB, VEX, VEX_LIG, WIG;
defm VUCOMISD : sse12_ord_cmp<0x2E, FR64, X86any_fcmp, f64, f64mem, loadf64,
"ucomisd", SSEPackedDouble>, TB, PD, VEX, VEX_LIG, WIG;
defm VCOMISS : sse12_ord_cmp<0x2F, FR32, X86strict_fcmps, f32, f32mem, loadf32,
- "comiss", SSEPackedSingle>, TB, PS, VEX, VEX_LIG, WIG;
+ "comiss", SSEPackedSingle>, TB, VEX, VEX_LIG, WIG;
defm VCOMISD : sse12_ord_cmp<0x2F, FR64, X86strict_fcmps, f64, f64mem, loadf64,
"comisd", SSEPackedDouble>, TB, PD, VEX, VEX_LIG, WIG;
let isCodeGenOnly = 1 in {
defm VUCOMISS : sse12_ord_cmp_int<0x2E, VR128, X86ucomi, v4f32, ssmem,
- sse_load_f32, "ucomiss", SSEPackedSingle>, TB, PS, VEX, VEX_LIG, WIG;
+ sse_load_f32, "ucomiss", SSEPackedSingle>, TB, VEX, VEX_LIG, WIG;
defm VUCOMISD : sse12_ord_cmp_int<0x2E, VR128, X86ucomi, v2f64, sdmem,
sse_load_f64, "ucomisd", SSEPackedDouble>, TB, PD, VEX, VEX_LIG, WIG;
defm VCOMISS : sse12_ord_cmp_int<0x2F, VR128, X86comi, v4f32, ssmem,
- sse_load_f32, "comiss", SSEPackedSingle>, TB, PS, VEX, VEX_LIG, WIG;
+ sse_load_f32, "comiss", SSEPackedSingle>, TB, VEX, VEX_LIG, WIG;
defm VCOMISD : sse12_ord_cmp_int<0x2F, VR128, X86comi, v2f64, sdmem,
sse_load_f64, "comisd", SSEPackedDouble>, TB, PD, VEX, VEX_LIG, WIG;
}
defm UCOMISS : sse12_ord_cmp<0x2E, FR32, X86any_fcmp, f32, f32mem, loadf32,
- "ucomiss", SSEPackedSingle>, TB, PS;
+ "ucomiss", SSEPackedSingle>, TB;
defm UCOMISD : sse12_ord_cmp<0x2E, FR64, X86any_fcmp, f64, f64mem, loadf64,
"ucomisd", SSEPackedDouble>, TB, PD;
defm COMISS : sse12_ord_cmp<0x2F, FR32, X86strict_fcmps, f32, f32mem, loadf32,
- "comiss", SSEPackedSingle>, TB, PS;
+ "comiss", SSEPackedSingle>, TB;
defm COMISD : sse12_ord_cmp<0x2F, FR64, X86strict_fcmps, f64, f64mem, loadf64,
"comisd", SSEPackedDouble>, TB, PD;
let isCodeGenOnly = 1 in {
defm UCOMISS : sse12_ord_cmp_int<0x2E, VR128, X86ucomi, v4f32, ssmem,
- sse_load_f32, "ucomiss", SSEPackedSingle>, TB, PS;
+ sse_load_f32, "ucomiss", SSEPackedSingle>, TB;
defm UCOMISD : sse12_ord_cmp_int<0x2E, VR128, X86ucomi, v2f64, sdmem,
sse_load_f64, "ucomisd", SSEPackedDouble>, TB, PD;
defm COMISS : sse12_ord_cmp_int<0x2F, VR128, X86comi, v4f32, ssmem,
- sse_load_f32, "comiss", SSEPackedSingle>, TB, PS;
+ sse_load_f32, "comiss", SSEPackedSingle>, TB;
defm COMISD : sse12_ord_cmp_int<0x2F, VR128, X86comi, v2f64, sdmem,
sse_load_f64, "comisd", SSEPackedDouble>, TB, PD;
}
@@ -1979,20 +1979,20 @@ multiclass sse12_cmp_packed<RegisterClass RC, X86MemOperand x86memop,
defm VCMPPS : sse12_cmp_packed<VR128, f128mem, v4f32,
"cmpps\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
- SchedWriteFCmpSizes.PS.XMM, SSEPackedSingle, loadv4f32>, TB, PS, VEX, VVVV, WIG;
+ SchedWriteFCmpSizes.PS.XMM, SSEPackedSingle, loadv4f32>, TB, VEX, VVVV, WIG;
defm VCMPPD : sse12_cmp_packed<VR128, f128mem, v2f64,
"cmppd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
SchedWriteFCmpSizes.PD.XMM, SSEPackedDouble, loadv2f64>, TB, PD, VEX, VVVV, WIG;
defm VCMPPSY : sse12_cmp_packed<VR256, f256mem, v8f32,
"cmpps\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
- SchedWriteFCmpSizes.PS.YMM, SSEPackedSingle, loadv8f32>, TB, PS, VEX, VVVV, VEX_L, WIG;
+ SchedWriteFCmpSizes.PS.YMM, SSEPackedSingle, loadv8f32>, TB, VEX, VVVV, VEX_L, WIG;
defm VCMPPDY : sse12_cmp_packed<VR256, f256mem, v4f64,
"cmppd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
SchedWriteFCmpSizes.PD.YMM, SSEPackedDouble, loadv4f64>, TB, PD, VEX, VVVV, VEX_L, WIG;
let Constraints = "$src1 = $dst" in {
defm CMPPS : sse12_cmp_packed<VR128, f128mem, v4f32,
"cmpps\t{$cc, $src2, $dst|$dst, $src2, $cc}",
- SchedWriteFCmpSizes.PS.XMM, SSEPackedSingle, memopv4f32>, TB, PS;
+ SchedWriteFCmpSizes.PS.XMM, SSEPackedSingle, memopv4f32>, TB;
defm CMPPD : sse12_cmp_packed<VR128, f128mem, v2f64,
"cmppd\t{$cc, $src2, $dst|$dst, $src2, $cc}",
SchedWriteFCmpSizes.PD.XMM, SSEPackedDouble, memopv2f64>, TB, PD;
@@ -2076,11 +2076,11 @@ let Predicates = [HasAVX, NoVLX] in {
defm VSHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
"shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
loadv4f32, SchedWriteFShuffle.XMM, SSEPackedSingle>,
- TB, PS, VEX, VVVV, WIG;
+ TB, VEX, VVVV, WIG;
defm VSHUFPSY : sse12_shuffle<VR256, f256mem, v8f32,
"shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
loadv8f32, SchedWriteFShuffle.YMM, SSEPackedSingle>,
- TB, PS, VEX, VVVV, VEX_L, WIG;
+ TB, VEX, VVVV, VEX_L, WIG;
defm VSHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
"shufpd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
loadv2f64, SchedWriteFShuffle.XMM, SSEPackedDouble>,
@@ -2093,7 +2093,7 @@ let Predicates = [HasAVX, NoVLX] in {
let Constraints = "$src1 = $dst" in {
defm SHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
"shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
- memopv4f32, SchedWriteFShuffle.XMM, SSEPackedSingle>, TB, PS;
+ memopv4f32, SchedWriteFShuffle.XMM, SSEPackedSingle>, TB;
defm SHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
"shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
memopv2f64, SchedWriteFShuffle.XMM, SSEPackedDouble, 1>, TB, PD;
@@ -2126,26 +2126,26 @@ multiclass sse12_unpack_interleave<bits<8> opc, SDNode OpNode, ValueType vt,
let Predicates = [HasAVX, NoVLX] in {
defm VUNPCKHPS: sse12_unpack_interleave<0x15, X86Unpckh, v4f32, load,
VR128, f128mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- SchedWriteFShuffle.XMM, SSEPackedSingle>, TB, PS, VEX, VVVV, WIG;
+ SchedWriteFShuffle.XMM, SSEPackedSingle>, TB, VEX, VVVV, WIG;
defm VUNPCKHPD: sse12_unpack_interleave<0x15, X86Unpckh, v2f64, load,
VR128, f128mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
SchedWriteFShuffle.XMM, SSEPackedDouble, 1>, TB, PD, VEX, VVVV, WIG;
defm VUNPCKLPS: sse12_unpack_interleave<0x14, X86Unpckl, v4f32, load,
VR128, f128mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- SchedWriteFShuffle.XMM, SSEPackedSingle>, TB, PS, VEX, VVVV, WIG;
+ SchedWriteFShuffle.XMM, SSEPackedSingle>, TB, VEX, VVVV, WIG;
defm VUNPCKLPD: sse12_unpack_interleave<0x14, X86Unpckl, v2f64, load,
VR128, f128mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
SchedWriteFShuffle.XMM, SSEPackedDouble>, TB, PD, VEX, VVVV, WIG;
defm VUNPCKHPSY: sse12_unpack_interleave<0x15, X86Unpckh, v8f32, load,
VR256, f256mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- SchedWriteFShuffle.YMM, SSEPackedSingle>, TB, PS, VEX, VVVV, VEX_L, WIG;
+ SchedWriteFShuffle.YMM, SSEPackedSingle>, TB, VEX, VVVV, VEX_L, WIG;
defm VUNPCKHPDY: sse12_unpack_interleave<0x15, X86Unpckh, v4f64, load,
VR256, f256mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
SchedWriteFShuffle.YMM, SSEPackedDouble>, TB, PD, VEX, VVVV, VEX_L, WIG;
defm VUNPCKLPSY: sse12_unpack_interleave<0x14, X86Unpckl, v8f32, load,
VR256, f256mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- SchedWriteFShuffle.YMM, SSEPackedSingle>, TB, PS, VEX, VVVV, VEX_L, WIG;
+ SchedWriteFShuffle.YMM, SSEPackedSingle>, TB, VEX, VVVV, VEX_L, WIG;
defm VUNPCKLPDY: sse12_unpack_interleave<0x14, X86Unpckl, v4f64, load,
VR256, f256mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
SchedWriteFShuffle.YMM, SSEPackedDouble>, TB, PD, VEX, VVVV, VEX_L, WIG;
@@ -2154,13 +2154,13 @@ defm VUNPCKLPDY: sse12_unpack_interleave<0x14, X86Unpckl, v4f64, load,
let Constraints = "$src1 = $dst" in {
defm UNPCKHPS: sse12_unpack_interleave<0x15, X86Unpckh, v4f32, memop,
VR128, f128mem, "unpckhps\t{$src2, $dst|$dst, $src2}",
- SchedWriteFShuffle.XMM, SSEPackedSingle>, TB, PS;
+ SchedWriteFShuffle.XMM, SSEPackedSingle>, TB;
defm UNPCKHPD: sse12_unpack_interleave<0x15, X86Unpckh, v2f64, memop,
VR128, f128mem, "unpckhpd\t{$src2, $dst|$dst, $src2}",
SchedWriteFShuffle.XMM, SSEPackedDouble, 1>, TB, PD;
defm UNPCKLPS: sse12_unpack_interleave<0x14, X86Unpckl, v4f32, memop,
VR128, f128mem, "unpcklps\t{$src2, $dst|$dst, $src2}",
- SchedWriteFShuffle.XMM, SSEPackedSingle>, TB, PS;
+ SchedWriteFShuffle.XMM, SSEPackedSingle>, TB;
defm UNPCKLPD: sse12_unpack_interleave<0x14, X86Unpckl, v2f64, memop,
VR128, f128mem, "unpcklpd\t{$src2, $dst|$dst, $src2}",
SchedWriteFShuffle.XMM, SSEPackedDouble>, TB, PD;
@@ -2208,11 +2208,11 @@ multiclass sse12_extr_sign_mask<RegisterClass RC, ValueType vt,
let Predicates = [HasAVX] in {
defm VMOVMSKPS : sse12_extr_sign_mask<VR128, v4f32, "movmskps",
- SSEPackedSingle>, TB, PS, VEX, WIG;
+ SSEPackedSingle>, TB, VEX, WIG;
defm VMOVMSKPD : sse12_extr_sign_mask<VR128, v2f64, "movmskpd",
SSEPackedDouble>, TB, PD, VEX, WIG;
defm VMOVMSKPSY : sse12_extr_sign_mask<VR256, v8f32, "movmskps",
- SSEPackedSingle>, TB, PS, VEX, VEX_L, WIG;
+ SSEPackedSingle>, TB, VEX, VEX_L, WIG;
defm VMOVMSKPDY : sse12_extr_sign_mask<VR256, v4f64, "movmskpd",
SSEPackedDouble>, TB, PD, VEX, VEX_L, WIG;
@@ -2228,7 +2228,7 @@ let Predicates = [HasAVX] in {
}
defm MOVMSKPS : sse12_extr_sign_mask<VR128, v4f32, "movmskps",
- SSEPackedSingle>, TB, PS;
+ SSEPackedSingle>, TB;
defm MOVMSKPD : sse12_extr_sign_mask<VR128, v2f64, "movmskpd",
SSEPackedDouble>, TB, PD;
@@ -2312,7 +2312,7 @@ multiclass sse12_fp_packed_logical<bits<8> opc, string OpcodeStr,
let Predicates = [HasAVX, NoVLX] in {
defm V#NAME#PSY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedSingle,
!strconcat(OpcodeStr, "ps"), f256mem, sched.YMM,
- [], [], 0>, TB, PS, VEX, VVVV, VEX_L, WIG;
+ [], [], 0>, TB, VEX, VVVV, VEX_L, WIG;
defm V#NAME#PDY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedDouble,
!strconcat(OpcodeStr, "pd"), f256mem, sched.YMM,
@@ -2320,7 +2320,7 @@ multiclass sse12_fp_packed_logical<bits<8> opc, string OpcodeStr,
defm V#NAME#PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
!strconcat(OpcodeStr, "ps"), f128mem, sched.XMM,
- [], [], 0>, TB, PS, VEX, VVVV, WIG;
+ [], [], 0>, TB, VEX, VVVV, WIG;
defm V#NAME#PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
!strconcat(OpcodeStr, "pd"), f128mem, sched.XMM,
@@ -2330,7 +2330,7 @@ multiclass sse12_fp_packed_logical<bits<8> opc, string OpcodeStr,
let Constraints = "$src1 = $dst" in {
defm PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
!strconcat(OpcodeStr, "ps"), f128mem, sched.XMM,
- [], []>, TB, PS;
+ [], []>, TB;
defm PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
!strconcat(OpcodeStr, "pd"), f128mem, sched.XMM,
@@ -2636,14 +2636,14 @@ let Uses = [MXCSR], mayRaiseFPException = 1 in {
let Predicates = [HasAVX, NoVLX] in {
defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
VR128, v4f32, f128mem, loadv4f32,
- SSEPackedSingle, sched.PS.XMM, 0>, TB, PS, VEX, VVVV, WIG;
+ SSEPackedSingle, sched.PS.XMM, 0>, TB, VEX, VVVV, WIG;
defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
VR128, v2f64, f128mem, loadv2f64,
SSEPackedDouble, sched.PD.XMM, 0>, TB, PD, VEX, VVVV, WIG;
defm V#NAME#PSY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"),
OpNode, VR256, v8f32, f256mem, loadv8f32,
- SSEPackedSingle, sched.PS.YMM, 0>, TB, PS, VEX, VVVV, VEX_L, WIG;
+ SSEPackedSingle, sched.PS.YMM, 0>, TB, VEX, VVVV, VEX_L, WIG;
defm V#NAME#PDY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"),
OpNode, VR256, v4f64, f256mem, loadv4f64,
SSEPackedDouble, sched.PD.YMM, 0>, TB, PD, VEX, VVVV, VEX_L, WIG;
@@ -2652,7 +2652,7 @@ let Uses = [MXCSR], mayRaiseFPException = 1 in {
let Constraints = "$src1 = $dst" in {
defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR128,
v4f32, f128mem, memopv4f32, SSEPackedSingle,
- sched.PS.XMM>, TB, PS;
+ sched.PS.XMM>, TB;
defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR128,
v2f64, f128mem, memopv2f64, SSEPackedDouble,
sched.PD.XMM>, TB, PD;
@@ -3165,11 +3165,11 @@ let SchedRW = [WriteStoreNT] in {
def MOVNTImr : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
"movnti{l}\t{$src, $dst|$dst, $src}",
[(nontemporalstore (i32 GR32:$src), addr:$dst)]>,
- TB, PS, Requires<[HasSSE2]>;
+ TB, Requires<[HasSSE2]>;
def MOVNTI_64mr : RI<0xC3, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
"movnti{q}\t{$src, $dst|$dst, $src}",
[(nontemporalstore (i64 GR64:$src), addr:$dst)]>,
- TB, PS, Requires<[HasSSE2]>;
+ TB, Requires<[HasSSE2]>;
} // SchedRW = [WriteStoreNT]
let Predicates = [HasAVX, NoVLX] in {
@@ -3226,7 +3226,7 @@ let SchedRW = [WriteLoad] in {
// Flush cache
def CLFLUSH : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
"clflush\t$src", [(int_x86_sse2_clflush addr:$src)]>,
- TB, PS, Requires<[HasCLFLUSH]>;
+ TB, Requires<[HasCLFLUSH]>;
}
let SchedRW = [WriteNop] in {
@@ -3241,11 +3241,11 @@ let SchedRW = [WriteFence] in {
// TODO: As with mfence, we may want to ease the availability of sfence/lfence
// to include any 64-bit target.
def SFENCE : I<0xAE, MRM7X, (outs), (ins), "sfence", [(int_x86_sse_sfence)]>,
- TB, PS, Requires<[HasSSE1]>;
+ TB, Requires<[HasSSE1]>;
def LFENCE : I<0xAE, MRM5X, (outs), (ins), "lfence", [(int_x86_sse2_lfence)]>,
- TB, PS, Requires<[HasSSE2]>;
+ TB, Requires<[HasSSE2]>;
def MFENCE : I<0xAE, MRM6X, (outs), (ins), "mfence", [(int_x86_sse2_mfence)]>,
- TB, PS, Requires<[HasMFence]>;
+ TB, Requires<[HasMFence]>;
} // SchedRW
def : Pat<(X86MFence), (MFENCE)>;
@@ -3266,11 +3266,11 @@ def VSTMXCSR : VPSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
let mayLoad=1, hasSideEffects=1, Defs=[MXCSR] in
def LDMXCSR : I<0xAE, MRM2m, (outs), (ins i32mem:$src),
"ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>,
- TB, PS, Sched<[WriteLDMXCSR]>;
+ TB, Sched<[WriteLDMXCSR]>;
let mayStore=1, hasSideEffects=1, Uses=[MXCSR] in
def STMXCSR : I<0xAE, MRM3m, (outs), (ins i32mem:$dst),
"stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>,
- TB, PS, Sched<[WriteSTMXCSR]>;
+ TB, Sched<[WriteSTMXCSR]>;
//===---------------------------------------------------------------------===//
// SSE2 - Move Aligned/Unaligned Packed Integer Instructions
@@ -6715,7 +6715,7 @@ multiclass SHAI_binop<bits<8> Opc, string OpcodeStr, Intrinsic IntId,
[!if(UsesXMM0,
(set VR128:$dst, (IntId VR128:$src1, VR128:$src2, XMM0)),
(set VR128:$dst, (IntId VR128:$src1, VR128:$src2)))]>,
- T8, PS, Sched<[sched]>;
+ T8, Sched<[sched]>;
def rm#Suffix : I<Opc, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, i128mem:$src2),
@@ -6726,7 +6726,7 @@ multiclass SHAI_binop<bits<8> Opc, string OpcodeStr, Intrinsic IntId,
(set VR128:$dst, (IntId VR128:$src1,
(memop addr:$src2), XMM0)),
(set VR128:$dst, (IntId VR128:$src1,
- (memop addr:$src2))))]>, T8, PS,
+ (memop addr:$src2))))]>, T8,
Sched<[sched.Folded, sched.ReadAfterFold]>;
}
@@ -6736,7 +6736,7 @@ let Constraints = "$src1 = $dst", Predicates = [HasSHA, NoEGPR] in {
"sha1rnds4\t{$src3, $src2, $dst|$dst, $src2, $src3}",
[(set VR128:$dst,
(int_x86_sha1rnds4 VR128:$src1, VR128:$src2,
- (i8 timm:$src3)))]>, TA, PS,
+ (i8 timm:$src3)))]>, TA,
Sched<[SchedWriteVecIMul.XMM]>;
def SHA1RNDS4rmi : Ii8<0xCC, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, i128mem:$src2, u8imm:$src3),
@@ -6744,7 +6744,7 @@ let Constraints = "$src1 = $dst", Predicates = [HasSHA, NoEGPR] in {
[(set VR128:$dst,
(int_x86_sha1rnds4 VR128:$src1,
(memop addr:$src2),
- (i8 timm:$src3)))]>, TA, PS,
+ (i8 timm:$src3)))]>, TA,
Sched<[SchedWriteVecIMul.XMM.Folded,
SchedWriteVecIMul.XMM.ReadAfterFold]>;
@@ -6772,7 +6772,7 @@ let Constraints = "$src1 = $dst", Predicates = [HasSHA, HasEGPR, In64BitMode] in
[(set VR128:$dst,
(int_x86_sha1rnds4 VR128:$src1, VR128:$src2,
(i8 timm:$src3)))]>,
- EVEX, NoCD8, T_MAP4, PS, Sched<[SchedWriteVecIMul.XMM]>;
+ EVEX, NoCD8, T_MAP4, Sched<[SchedWriteVecIMul.XMM]>;
def SHA1RNDS4rmi_EVEX: Ii8<0xD4, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, i128mem:$src2, u8imm:$src3),
"sha1rnds4\t{$src3, $src2, $dst|$dst, $src2, $src3}",
@@ -6780,7 +6780,7 @@ let Constraints = "$src1 = $dst", Predicates = [HasSHA, HasEGPR, In64BitMode] in
(int_x86_sha1rnds4 VR128:$src1,
(memop addr:$src2),
(i8 timm:$src3)))]>,
- EVEX, NoCD8, T_MAP4, PS,
+ EVEX, NoCD8, T_MAP4,
Sched<[SchedWriteVecIMul.XMM.Folded,
SchedWriteVecIMul.XMM.ReadAfterFold]>;
@@ -7474,12 +7474,12 @@ let Defs = [YMM0, YMM1, YMM2, YMM3, YMM4, YMM5, YMM6, YMM7,
YMM8, YMM9, YMM10, YMM11, YMM12, YMM13, YMM14, YMM15] in {
// Zero All YMM registers
def VZEROALL : I<0x77, RawFrm, (outs), (ins), "vzeroall",
- [(int_x86_avx_vzeroall)]>, TB, PS, VEX, VEX_L,
+ [(int_x86_avx_vzeroall)]>, TB, VEX, VEX_L,
Requires<[HasAVX]>, WIG;
// Zero Upper bits of YMM registers
def VZEROUPPER : I<0x77, RawFrm, (outs), (ins), "vzeroupper",
- [(int_x86_avx_vzeroupper)]>, TB, PS, VEX,
+ [(int_x86_avx_vzeroupper)]>, TB, VEX,
Requires<[HasAVX]>, WIG;
} // Defs
} // SchedRW
@@ -8240,10 +8240,10 @@ let Predicates = [HasAVXVNNIINT8] in {
1>, VEX_L, T8, XD;
defm VPDPBUUD : avx_dotprod_rm<0x50,"vpdpbuud", v4i32, VR128, loadv4i32,
i128mem, X86vpdpbuud, SchedWriteVecIMul.XMM,
- 1>, T8, PS;
+ 1>, T8;
defm VPDPBUUDY : avx_dotprod_rm<0x50,"vpdpbuud", v8i32, VR256, loadv8i32,
i256mem, X86vpdpbuud, SchedWriteVecIMul.YMM,
- 1>, VEX_L, T8, PS;
+ 1>, VEX_L, T8;
defm VPDPBSSDS : avx_dotprod_rm<0x51,"vpdpbssds", v4i32, VR128, loadv4i32,
i128mem, X86vpdpbssds, SchedWriteVecIMul.XMM,
1>, T8, XD;
@@ -8252,10 +8252,10 @@ let Predicates = [HasAVXVNNIINT8] in {
1>, VEX_L, T8, XD;
defm VPDPBUUDS : avx_dotprod_rm<0x51,"vpdpbuuds", v4i32, VR128, loadv4i32,
i128mem, X86vpdpbuuds, SchedWriteVecIMul.XMM,
- 1>, T8, PS;
+ 1>, T8;
defm VPDPBUUDSY : avx_dotprod_rm<0x51,"vpdpbuuds", v8i32, VR256, loadv8i32,
i256mem, X86vpdpbuuds, SchedWriteVecIMul.YMM,
- 1>, VEX_L, T8, PS;
+ 1>, VEX_L, T8;
defm VPDPBSUD : avx_dotprod_rm<0x50,"vpdpbsud", v4i32, VR128, loadv4i32,
i128mem, X86vpdpbsud, SchedWriteVecIMul.XMM,
0>, T8, XS;
@@ -8316,7 +8316,7 @@ let Predicates = [HasAVXNECONVERT] in {
defm VCVTNEOBF162PS : AVX_NE_CONVERT_BASE<0xb0, "vcvtneobf162ps", f128mem,
f256mem>, T8, XD;
defm VCVTNEOPH2PS : AVX_NE_CONVERT_BASE<0xb0, "vcvtneoph2ps", f128mem,
- f256mem>, T8, PS;
+ f256mem>, T8;
defm VCVTNEPS2BF16 : VCVTNEPS2BF16_BASE, VEX, T8, XS, ExplicitVEXPrefix;
def : Pat<(v8bf16 (X86vfpround (v8f32 VR256:$src))),
@@ -8389,7 +8389,7 @@ let Predicates = [HasSM3], Constraints = "$src1 = $dst" in {
}
}
-defm VSM3MSG1 : SM3_Base<"vsm3msg1">, T8, PS;
+defm VSM3MSG1 : SM3_Base<"vsm3msg1">, T8;
defm VSM3MSG2 : SM3_Base<"vsm3msg2">, T8, PD;
defm VSM3RNDS2 : VSM3RNDS2_Base, VEX, VVVV, TA, PD;
@@ -8458,5 +8458,5 @@ defm VPDPWSUD : avx_vnni_int16<0xd2, "vpdpwsud", 0>, T8, XS;
defm VPDPWSUDS : avx_vnni_int16<0xd3, "vpdpwsuds", 0>, T8, XS;
defm VPDPWUSD : avx_vnni_int16<0xd2, "vpdpwusd", 0>, T8, PD;
defm VPDPWUSDS : avx_vnni_int16<0xd3, "vpdpwusds", 0>, T8, PD;
-defm VPDPWUUD : avx_vnni_int16<0xd2, "vpdpwuud", 1>, T8, PS;
-defm VPDPWUUDS : avx_vnni_int16<0xd3, "vpdpwuuds", 1>, T8, PS;
+defm VPDPWUUD : avx_vnni_int16<0xd2, "vpdpwuud", 1>, T8;
+defm VPDPWUUDS : avx_vnni_int16<0xd3, "vpdpwuuds", 1>, T8;
diff --git a/llvm/lib/Target/X86/X86InstrSystem.td b/llvm/lib/Target/X86/X86InstrSystem.td
index 4471071e8f9a91..efb58c6102dd1d 100644
--- a/llvm/lib/Target/X86/X86InstrSystem.td
+++ b/llvm/lib/Target/X86/X86InstrSystem.td
@@ -426,11 +426,11 @@ let SchedRW = [WriteSystem] in {
let Uses = [EAX, ECX, EDX] in
def WRMSR : I<0x30, RawFrm, (outs), (ins), "wrmsr", []>, TB;
let Uses = [EAX, ECX, EDX] in
-def WRMSRNS : I<0x01, MRM_C6, (outs), (ins), "wrmsrns", []>, TB, PS;
+def WRMSRNS : I<0x01, MRM_C6, (outs), (ins), "wrmsrns", []>, TB;
let Defs = [EAX, EDX], Uses = [ECX] in
def RDMSR : I<0x32, RawFrm, (outs), (ins), "rdmsr", []>, TB;
let Defs = [RAX, EFLAGS], Uses = [RBX, RCX], Predicates = [In64BitMode] in
-def PBNDKB : I<0x01, MRM_C7, (outs), (ins), "pbndkb", []>, TB, PS;
+def PBNDKB : I<0x01, MRM_C7, (outs), (ins), "pbndkb", []>, TB;
let Uses = [RSI, RDI, RCX], Predicates = [In64BitMode] in {
def WRMSRLIST : I<0x01, MRM_C6, (outs), (ins), "wrmsrlist", []>, TB, XS;
def RDMSRLIST : I<0x01, MRM_C6, (outs), (ins), "rdmsrlist", []>, TB, XD;
@@ -523,10 +523,10 @@ let SchedRW = [WriteSystem] in {
let Predicates = [NoEGPR] in {
def WRSSD : I<0xF6, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
"wrssd\t{$src, $dst|$dst, $src}",
- [(int_x86_wrssd GR32:$src, addr:$dst)]>, T8, PS;
+ [(int_x86_wrssd GR32:$src, addr:$dst)]>, T8;
def WRSSQ : RI<0xF6, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
"wrssq\t{$src, $dst|$dst, $src}",
- [(int_x86_wrssq GR64:$src, addr:$dst)]>, T8, PS;
+ [(int_x86_wrssq GR64:$src, addr:$dst)]>, T8;
def WRUSSD : I<0xF5, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
"wrussd\t{$src, $dst|$dst, $src}",
[(int_x86_wrussd GR32:$src, addr:$dst)]>, T8, PD;
@@ -538,10 +538,10 @@ let Predicates = [NoEGPR] in {
let Predicates = [HasEGPR, In64BitMode] in {
def WRSSD_EVEX : I<0x66, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
"wrssd\t{$src, $dst|$dst, $src}",
- [(int_x86_wrssd GR32:$src, addr:$dst)]>, EVEX, NoCD8, T_MAP4, PS;
+ [(int_x86_wrssd GR32:$src, addr:$dst)]>, EVEX, NoCD8, T_MAP4;
def WRSSQ_EVEX : RI<0x66, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
"wrssq\t{$src, $dst|$dst, $src}",
- [(int_x86_wrssq GR64:$src, addr:$dst)]>, EVEX, NoCD8, T_MAP4, PS;
+ [(int_x86_wrssq GR64:$src, addr:$dst)]>, EVEX, NoCD8, T_MAP4;
def WRUSSD_EVEX : I<0x65, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
"wrussd\t{$src, $dst|$dst, $src}",
[(int_x86_wrussd GR32:$src, addr:$dst)]>, EVEX, NoCD8, T_MAP4, PD;
@@ -574,51 +574,51 @@ let SchedRW = [WriteSystem] in {
// on Windows without needing to enable the xsave feature to be compatible with
// MSVC.
let Defs = [EDX, EAX], Uses = [ECX] in
-def XGETBV : I<0x01, MRM_D0, (outs), (ins), "xgetbv", []>, TB, PS;
+def XGETBV : I<0x01, MRM_D0, (outs), (ins), "xgetbv", []>, TB;
let Uses = [EDX, EAX, ECX] in
def XSETBV : I<0x01, MRM_D1, (outs), (ins),
"xsetbv",
- [(int_x86_xsetbv ECX, EDX, EAX)]>, TB, PS;
+ [(int_x86_xsetbv ECX, EDX, EAX)]>, TB;
let Uses = [EDX, EAX] in {
def XSAVE : I<0xAE, MRM4m, (outs), (ins opaquemem:$dst),
"xsave\t$dst",
- [(int_x86_xsave addr:$dst, EDX, EAX)]>, TB, PS, Requires<[HasXSAVE]>;
+ [(int_x86_xsave addr:$dst, EDX, EAX)]>, TB, Requires<[HasXSAVE]>;
def XSAVE64 : RI<0xAE, MRM4m, (outs), (ins opaquemem:$dst),
"xsave64\t$dst",
- [(int_x86_xsave64 addr:$dst, EDX, EAX)]>, TB, PS, Requires<[HasXSAVE, In64BitMode]>;
+ [(int_x86_xsave64 addr:$dst, EDX, EAX)]>, TB, Requires<[HasXSAVE, In64BitMode]>;
def XRSTOR : I<0xAE, MRM5m, (outs), (ins opaquemem:$dst),
"xrstor\t$dst",
- [(int_x86_xrstor addr:$dst, EDX, EAX)]>, TB, PS, Requires<[HasXSAVE]>;
+ [(int_x86_xrstor addr:$dst, EDX, EAX)]>, TB, Requires<[HasXSAVE]>;
def XRSTOR64 : RI<0xAE, MRM5m, (outs), (ins opaquemem:$dst),
"xrstor64\t$dst",
- [(int_x86_xrstor64 addr:$dst, EDX, EAX)]>, TB, PS, Requires<[HasXSAVE, In64BitMode]>;
+ [(int_x86_xrstor64 addr:$dst, EDX, EAX)]>, TB, Requires<[HasXSAVE, In64BitMode]>;
def XSAVEOPT : I<0xAE, MRM6m, (outs), (ins opaquemem:$dst),
"xsaveopt\t$dst",
- [(int_x86_xsaveopt addr:$dst, EDX, EAX)]>, TB, PS, Requires<[HasXSAVEOPT]>;
+ [(int_x86_xsaveopt addr:$dst, EDX, EAX)]>, TB, Requires<[HasXSAVEOPT]>;
def XSAVEOPT64 : RI<0xAE, MRM6m, (outs), (ins opaquemem:$dst),
"xsaveopt64\t$dst",
- [(int_x86_xsaveopt64 addr:$dst, EDX, EAX)]>, TB, PS, Requires<[HasXSAVEOPT, In64BitMode]>;
+ [(int_x86_xsaveopt64 addr:$dst, EDX, EAX)]>, TB, Requires<[HasXSAVEOPT, In64BitMode]>;
def XSAVEC : I<0xC7, MRM4m, (outs), (ins opaquemem:$dst),
"xsavec\t$dst",
- [(int_x86_xsavec addr:$dst, EDX, EAX)]>, TB, PS, Requires<[HasXSAVEC]>;
+ [(int_x86_xsavec addr:$dst, EDX, EAX)]>, TB, Requires<[HasXSAVEC]>;
def XSAVEC64 : RI<0xC7, MRM4m, (outs), (ins opaquemem:$dst),
"xsavec64\t$dst",
- [(int_x86_xsavec64 addr:$dst, EDX, EAX)]>, TB, PS, Requires<[HasXSAVEC, In64BitMode]>;
+ [(int_x86_xsavec64 addr:$dst, EDX, EAX)]>, TB, Requires<[HasXSAVEC, In64BitMode]>;
def XSAVES : I<0xC7, MRM5m, (outs), (ins opaquemem:$dst),
"xsaves\t$dst",
- [(int_x86_xsaves addr:$dst, EDX, EAX)]>, TB, PS, Requires<[HasXSAVES]>;
+ [(int_x86_xsaves addr:$dst, EDX, EAX)]>, TB, Requires<[HasXSAVES]>;
def XSAVES64 : RI<0xC7, MRM5m, (outs), (ins opaquemem:$dst),
"xsaves64\t$dst",
- [(int_x86_xsaves64 addr:$dst, EDX, EAX)]>, TB, PS, Requires<[HasXSAVE, In64BitMode]>;
+ [(int_x86_xsaves64 addr:$dst, EDX, EAX)]>, TB, Requires<[HasXSAVE, In64BitMode]>;
def XRSTORS : I<0xC7, MRM3m, (outs), (ins opaquemem:$dst),
"xrstors\t$dst",
- [(int_x86_xrstors addr:$dst, EDX, EAX)]>, TB, PS, Requires<[HasXSAVES]>;
+ [(int_x86_xrstors addr:$dst, EDX, EAX)]>, TB, Requires<[HasXSAVES]>;
def XRSTORS64 : RI<0xC7, MRM3m, (outs), (ins opaquemem:$dst),
"xrstors64\t$dst",
- [(int_x86_xrstors64 addr:$dst, EDX, EAX)]>, TB, PS, Requires<[HasXSAVES, In64BitMode]>;
+ [(int_x86_xrstors64 addr:$dst, EDX, EAX)]>, TB, Requires<[HasXSAVES, In64BitMode]>;
} // Uses
} // SchedRW
@@ -651,10 +651,10 @@ let Defs = [RAX, RDX, RSI], Uses = [RAX, RSI] in
let SchedRW = [WriteSystem] in {
let Defs = [EAX, EDX], Uses = [ECX] in
def RDPKRUr : I<0x01, MRM_EE, (outs), (ins), "rdpkru",
- [(set EAX, (X86rdpkru ECX)), (implicit EDX)]>, TB, PS;
+ [(set EAX, (X86rdpkru ECX)), (implicit EDX)]>, TB;
let Uses = [EAX, ECX, EDX] in
def WRPKRUr : I<0x01, MRM_EF, (outs), (ins), "wrpkru",
- [(X86wrpkru EAX, EDX, ECX)]>, TB, PS;
+ [(X86wrpkru EAX, EDX, ECX)]>, TB;
} // SchedRW
//===----------------------------------------------------------------------===//
@@ -718,15 +718,15 @@ let Predicates = [In64BitMode, HasINVPCID] in {
//===----------------------------------------------------------------------===//
// SMAP Instruction
let Defs = [EFLAGS], SchedRW = [WriteSystem] in {
- def CLAC : I<0x01, MRM_CA, (outs), (ins), "clac", []>, TB, PS;
- def STAC : I<0x01, MRM_CB, (outs), (ins), "stac", []>, TB, PS;
+ def CLAC : I<0x01, MRM_CA, (outs), (ins), "clac", []>, TB;
+ def STAC : I<0x01, MRM_CB, (outs), (ins), "stac", []>, TB;
}
//===----------------------------------------------------------------------===//
// SMX Instruction
let SchedRW = [WriteSystem] in {
let Uses = [RAX, RBX, RCX, RDX], Defs = [RAX, RBX, RCX] in {
- def GETSEC : I<0x37, RawFrm, (outs), (ins), "getsec", []>, TB, PS;
+ def GETSEC : I<0x37, RawFrm, (outs), (ins), "getsec", []>, TB;
} // Uses, Defs
} // SchedRW
@@ -784,7 +784,7 @@ def PTWRITE64r : RI<0xAE, MRM4r, (outs), (ins GR64:$dst),
let SchedRW = [WriteSystem] in {
let Uses = [ECX], Defs = [EAX, EDX] in
- def RDPRU : I<0x01, MRM_FD, (outs), (ins), "rdpru", []>, TB, PS,
+ def RDPRU : I<0x01, MRM_FD, (outs), (ins), "rdpru", []>, TB,
Requires<[HasRDPRU]>;
}
@@ -803,6 +803,6 @@ let Uses = [ECX], Defs = [EAX, EDX] in
let SchedRW = [WriteSystem] in {
let Uses = [RAX, RBX, RCX, RDX], Defs = [RAX, RBX, RCX, RDX, EFLAGS] in
- def PCONFIG : I<0x01, MRM_C5, (outs), (ins), "pconfig", []>, TB, PS,
+ def PCONFIG : I<0x01, MRM_C5, (outs), (ins), "pconfig", []>, TB,
Requires<[HasPCONFIG]>;
} // SchedRW
diff --git a/llvm/lib/Target/X86/X86InstrTSX.td b/llvm/lib/Target/X86/X86InstrTSX.td
index cc9174a0c491c7..57604b682d54e9 100644
--- a/llvm/lib/Target/X86/X86InstrTSX.td
+++ b/llvm/lib/Target/X86/X86InstrTSX.td
@@ -37,11 +37,11 @@ def XABORT_DEF : I<0, Pseudo, (outs), (ins), "# XABORT DEF", []>;
}
def XEND : I<0x01, MRM_D5, (outs), (ins),
- "xend", [(int_x86_xend)]>, TB, PS, Requires<[HasRTM]>;
+ "xend", [(int_x86_xend)]>, TB, Requires<[HasRTM]>;
let Defs = [EFLAGS] in
def XTEST : I<0x01, MRM_D6, (outs), (ins),
- "xtest", [(set EFLAGS, (X86xtest))]>, TB, PS, Requires<[HasRTM]>;
+ "xtest", [(set EFLAGS, (X86xtest))]>, TB, Requires<[HasRTM]>;
def XABORT : Ii8<0xc6, MRM_F8, (outs), (ins i8imm:$imm),
"xabort\t$imm",
diff --git a/llvm/lib/Target/X86/X86InstrUtils.td b/llvm/lib/Target/X86/X86InstrUtils.td
index 87eacf704de6cc..919e941abfd11f 100644
--- a/llvm/lib/Target/X86/X86InstrUtils.td
+++ b/llvm/lib/Target/X86/X86InstrUtils.td
@@ -31,9 +31,9 @@ class T_MAP4 { Map OpMap = T_MAP4; }
class T_MAP5 { Map OpMap = T_MAP5; }
class T_MAP6 { Map OpMap = T_MAP6; }
class T_MAP7 { Map OpMap = T_MAP7; }
-class XOP8 { Map OpMap = XOP8; Prefix OpPrefix = PS; }
-class XOP9 { Map OpMap = XOP9; Prefix OpPrefix = PS; }
-class XOPA { Map OpMap = XOPA; Prefix OpPrefix = PS; }
+class XOP8 { Map OpMap = XOP8; }
+class XOP9 { Map OpMap = XOP9; }
+class XOPA { Map OpMap = XOPA; }
class ThreeDNow { Map OpMap = ThreeDNow; }
class PS { Prefix OpPrefix = PS; }
class PD { Prefix OpPrefix = PD; }
@@ -79,7 +79,7 @@ class AVX512XDIi8Base : TB, XD {
Domain ExeDomain = SSEPackedInt;
ImmType ImmT = Imm8;
}
-class AVX512PSIi8Base : TB, PS {
+class AVX512PSIi8Base : TB {
Domain ExeDomain = SSEPackedSingle;
ImmType ImmT = Imm8;
}
@@ -574,11 +574,11 @@ class SSIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
: Ii8<o, F, outs, ins, asm, pattern>, TB, XS, Requires<[UseSSE1]>;
class PSI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : I<o, F, outs, ins, asm, pattern, SSEPackedSingle>, TB, PS,
+ : I<o, F, outs, ins, asm, pattern, SSEPackedSingle>, TB,
Requires<[UseSSE1]>;
class PSIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : Ii8<o, F, outs, ins, asm, pattern, SSEPackedSingle>, TB, PS,
+ : Ii8<o, F, outs, ins, asm, pattern, SSEPackedSingle>, TB,
Requires<[UseSSE1]>;
class VSSI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
@@ -587,7 +587,7 @@ class VSSI<bits<8> o, Format F, dag outs, dag ins, string asm,
class VPSI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
: I<o, F, outs, ins, !strconcat("v", asm), pattern, SSEPackedSingle>,
- TB, PS, Requires<[HasAVX]>;
+ TB, Requires<[HasAVX]>;
// SSE2 Instruction Templates:
//
@@ -694,11 +694,11 @@ class SS3AI<bits<8> o, Format F, dag outs, dag ins, string asm,
Requires<[UseSSSE3]>;
class MMXSS38I<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : I<o, F, outs, ins, asm, pattern, SSEPackedInt>, T8, PS,
+ : I<o, F, outs, ins, asm, pattern, SSEPackedInt>, T8,
Requires<[HasMMX, HasSSSE3]>;
class MMXSS3AI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : Ii8<o, F, outs, ins, asm, pattern, SSEPackedInt>, TA, PS,
+ : Ii8<o, F, outs, ins, asm, pattern, SSEPackedInt>, TA,
Requires<[HasMMX, HasSSSE3]>;
// SSE4.1 Instruction Templates:
@@ -824,7 +824,7 @@ class AVX512PDI<bits<8> o, Format F, dag outs, dag ins, string asm,
Requires<[HasAVX512]>;
class AVX512PSI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : I<o, F, outs, ins, asm, pattern, SSEPackedSingle>, TB, PS,
+ : I<o, F, outs, ins, asm, pattern, SSEPackedSingle>, TB,
Requires<[HasAVX512]>;
class AVX512PIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern, Domain d>
@@ -947,14 +947,14 @@ class VRS2I<bits<8> o, Format F, dag outs, dag ins, string asm,
// MMXIi8 - MMX instructions with ImmT == Imm8 and PS prefix.
class MMXI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : I<o, F, outs, ins, asm, pattern>, TB, PS, Requires<[HasMMX]>;
+ : I<o, F, outs, ins, asm, pattern>, TB, Requires<[HasMMX]>;
class MMXRI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : I<o, F, outs, ins, asm, pattern>, TB, PS, REX_W,
+ : I<o, F, outs, ins, asm, pattern>, TB, REX_W,
Requires<[HasMMX,In64BitMode]>;
class MMXIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : Ii8<o, F, outs, ins, asm, pattern>, TB, PS, Requires<[HasMMX]>;
+ : Ii8<o, F, outs, ins, asm, pattern>, TB, Requires<[HasMMX]>;
/// ITy - This instruction base class takes the type info for the instruction.
/// Using this, it:
diff --git a/llvm/lib/Target/X86/X86InstrVMX.td b/llvm/lib/Target/X86/X86InstrVMX.td
index f2fc0dbaa3703a..7cc468fe15ad4e 100644
--- a/llvm/lib/Target/X86/X86InstrVMX.td
+++ b/llvm/lib/Target/X86/X86InstrVMX.td
@@ -43,7 +43,7 @@ def VMCLEARm : I<0xC7, MRM6m, (outs), (ins i64mem:$vmcs),
"vmclear\t$vmcs", []>, TB, PD;
// OF 01 D4
-def VMFUNC : I<0x01, MRM_D4, (outs), (ins), "vmfunc", []>, TB, PS;
+def VMFUNC : I<0x01, MRM_D4, (outs), (ins), "vmfunc", []>, TB;
// 0F 01 C2
def VMLAUNCH : I<0x01, MRM_C2, (outs), (ins), "vmlaunch", []>, TB;
@@ -51,31 +51,31 @@ def VMLAUNCH : I<0x01, MRM_C2, (outs), (ins), "vmlaunch", []>, TB;
// 0F 01 C3
def VMRESUME : I<0x01, MRM_C3, (outs), (ins), "vmresume", []>, TB;
def VMPTRLDm : I<0xC7, MRM6m, (outs), (ins i64mem:$vmcs),
- "vmptrld\t$vmcs", []>, TB, PS;
+ "vmptrld\t$vmcs", []>, TB;
def VMPTRSTm : I<0xC7, MRM7m, (outs), (ins i64mem:$vmcs),
- "vmptrst\t$vmcs", []>, TB, PS;
+ "vmptrst\t$vmcs", []>, TB;
def VMREAD64rr : I<0x78, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
- "vmread{q}\t{$src, $dst|$dst, $src}", []>, TB, PS, Requires<[In64BitMode]>;
+ "vmread{q}\t{$src, $dst|$dst, $src}", []>, TB, Requires<[In64BitMode]>;
def VMREAD32rr : I<0x78, MRMDestReg, (outs GR32:$dst), (ins GR32:$src),
- "vmread{l}\t{$src, $dst|$dst, $src}", []>, TB, PS, Requires<[Not64BitMode]>;
+ "vmread{l}\t{$src, $dst|$dst, $src}", []>, TB, Requires<[Not64BitMode]>;
let mayStore = 1 in {
def VMREAD64mr : I<0x78, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
- "vmread{q}\t{$src, $dst|$dst, $src}", []>, TB, PS, Requires<[In64BitMode]>;
+ "vmread{q}\t{$src, $dst|$dst, $src}", []>, TB, Requires<[In64BitMode]>;
def VMREAD32mr : I<0x78, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
- "vmread{l}\t{$src, $dst|$dst, $src}", []>, TB, PS, Requires<[Not64BitMode]>;
+ "vmread{l}\t{$src, $dst|$dst, $src}", []>, TB, Requires<[Not64BitMode]>;
} // mayStore
def VMWRITE64rr : I<0x79, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
- "vmwrite{q}\t{$src, $dst|$dst, $src}", []>, TB, PS, Requires<[In64BitMode]>;
+ "vmwrite{q}\t{$src, $dst|$dst, $src}", []>, TB, Requires<[In64BitMode]>;
def VMWRITE32rr : I<0x79, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
- "vmwrite{l}\t{$src, $dst|$dst, $src}", []>, TB, PS, Requires<[Not64BitMode]>;
+ "vmwrite{l}\t{$src, $dst|$dst, $src}", []>, TB, Requires<[Not64BitMode]>;
let mayLoad = 1 in {
def VMWRITE64rm : I<0x79, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
- "vmwrite{q}\t{$src, $dst|$dst, $src}", []>, TB, PS, Requires<[In64BitMode]>;
+ "vmwrite{q}\t{$src, $dst|$dst, $src}", []>, TB, Requires<[In64BitMode]>;
def VMWRITE32rm : I<0x79, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
- "vmwrite{l}\t{$src, $dst|$dst, $src}", []>, TB, PS, Requires<[Not64BitMode]>;
+ "vmwrite{l}\t{$src, $dst|$dst, $src}", []>, TB, Requires<[Not64BitMode]>;
} // mayLoad
// 0F 01 C4
More information about the llvm-commits
mailing list