[llvm] 33e2e71 - [X86][Tablgen] Rename IgnoresVEX_W to IgnoresW, VEX_WIG to WIG, NFCI
Shengchen Kan via llvm-commits
llvm-commits at lists.llvm.org
Wed Apr 19 20:15:53 PDT 2023
Author: Shengchen Kan
Date: 2023-04-20T11:15:09+08:00
New Revision: 33e2e713c6bf653278206fe268f2f05619057a29
URL: https://github.com/llvm/llvm-project/commit/33e2e713c6bf653278206fe268f2f05619057a29
DIFF: https://github.com/llvm/llvm-project/commit/33e2e713c6bf653278206fe268f2f05619057a29.diff
LOG: [X86][Tablgen] Rename IgnoresVEX_W to IgnoresW, VEX_WIG to WIG, NFCI
We no longer distinguish REX.W from VEX.W in .td.
Added:
Modified:
llvm/lib/Target/X86/X86InstrAMX.td
llvm/lib/Target/X86/X86InstrAVX512.td
llvm/lib/Target/X86/X86InstrFormats.td
llvm/lib/Target/X86/X86InstrSSE.td
llvm/utils/TableGen/X86DisassemblerTables.cpp
llvm/utils/TableGen/X86EVEX2VEXTablesEmitter.cpp
llvm/utils/TableGen/X86FoldTablesEmitter.cpp
llvm/utils/TableGen/X86RecognizableInstr.cpp
llvm/utils/TableGen/X86RecognizableInstr.h
Removed:
################################################################################
diff --git a/llvm/lib/Target/X86/X86InstrAMX.td b/llvm/lib/Target/X86/X86InstrAMX.td
index 6948deb4bc14b..2dbb3e5ee3169 100644
--- a/llvm/lib/Target/X86/X86InstrAMX.td
+++ b/llvm/lib/Target/X86/X86InstrAMX.td
@@ -226,7 +226,7 @@ let Predicates = [HasAMXCOMPLEX, In64BitMode] in {
def TCMMRLFP16PS : I<0x6c, MRMSrcReg4VOp3, (outs TILE:$dst),
(ins TILE:$src1, TILE:$src2, TILE:$src3),
"tcmmrlfp16ps\t{$src3, $src2, $src1|$src1, $src2, $src3}",
- []>, VEX_4V, VEX_WIG, T8PS;
+ []>, VEX_4V, WIG, T8PS;
} // Constraints = "$src1 = $dst"
diff --git a/llvm/lib/Target/X86/X86InstrAVX512.td b/llvm/lib/Target/X86/X86InstrAVX512.td
index 5205c1d2974b5..af8656cc7d7bf 100644
--- a/llvm/lib/Target/X86/X86InstrAVX512.td
+++ b/llvm/lib/Target/X86/X86InstrAVX512.td
@@ -1178,14 +1178,14 @@ def VEXTRACTPSZrr : AVX512AIi8<0x17, MRMDestReg, (outs GR32orGR64:$dst),
(ins VR128X:$src1, u8imm:$src2),
"vextractps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR32orGR64:$dst, (extractelt (bc_v4i32 (v4f32 VR128X:$src1)), imm:$src2))]>,
- EVEX, VEX_WIG, Sched<[WriteVecExtract]>;
+ EVEX, WIG, Sched<[WriteVecExtract]>;
def VEXTRACTPSZmr : AVX512AIi8<0x17, MRMDestMem, (outs),
(ins f32mem:$dst, VR128X:$src1, u8imm:$src2),
"vextractps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(store (extractelt (bc_v4i32 (v4f32 VR128X:$src1)), imm:$src2),
addr:$dst)]>,
- EVEX, VEX_WIG, EVEX_CD8<32, CD8VT1>, Sched<[WriteVecExtractSt]>;
+ EVEX, WIG, EVEX_CD8<32, CD8VT1>, Sched<[WriteVecExtractSt]>;
//===---------------------------------------------------------------------===//
// AVX-512 BROADCAST
@@ -2273,11 +2273,11 @@ let AddedComplexity = 2 in {
// FIXME: Is there a better scheduler class for VPCMP?
defm VPCMPEQB : avx512_icmp_packed_vl<0x74, "vpcmpeqb",
SchedWriteVecALU, avx512vl_i8_info, HasBWI, 1>,
- EVEX_CD8<8, CD8VF>, VEX_WIG;
+ EVEX_CD8<8, CD8VF>, WIG;
defm VPCMPEQW : avx512_icmp_packed_vl<0x75, "vpcmpeqw",
SchedWriteVecALU, avx512vl_i16_info, HasBWI, 1>,
- EVEX_CD8<16, CD8VF>, VEX_WIG;
+ EVEX_CD8<16, CD8VF>, WIG;
defm VPCMPEQD : avx512_icmp_packed_rmb_vl<0x76, "vpcmpeqd",
SchedWriteVecALU, avx512vl_i32_info, HasAVX512, 1>,
@@ -2289,11 +2289,11 @@ defm VPCMPEQQ : avx512_icmp_packed_rmb_vl<0x29, "vpcmpeqq",
defm VPCMPGTB : avx512_icmp_packed_vl<0x64, "vpcmpgtb",
SchedWriteVecALU, avx512vl_i8_info, HasBWI>,
- EVEX_CD8<8, CD8VF>, VEX_WIG;
+ EVEX_CD8<8, CD8VF>, WIG;
defm VPCMPGTW : avx512_icmp_packed_vl<0x65, "vpcmpgtw",
SchedWriteVecALU, avx512vl_i16_info, HasBWI>,
- EVEX_CD8<16, CD8VF>, VEX_WIG;
+ EVEX_CD8<16, CD8VF>, WIG;
defm VPCMPGTD : avx512_icmp_packed_rmb_vl<0x66, "vpcmpgtd",
SchedWriteVecALU, avx512vl_i32_info, HasAVX512>,
@@ -4988,7 +4988,7 @@ multiclass avx512_binop_rm_vl_w<bits<8> opc, string OpcodeStr, SDNode OpNode,
bit IsCommutable = 0> {
defm NAME : avx512_binop_rm_vl<opc, OpcodeStr, OpNode, avx512vl_i16_info,
sched, prd, IsCommutable>, EVEX_CD8<16, CD8VF>,
- VEX_WIG;
+ WIG;
}
multiclass avx512_binop_rm_vl_b<bits<8> opc, string OpcodeStr, SDNode OpNode,
@@ -4996,7 +4996,7 @@ multiclass avx512_binop_rm_vl_b<bits<8> opc, string OpcodeStr, SDNode OpNode,
bit IsCommutable = 0> {
defm NAME : avx512_binop_rm_vl<opc, OpcodeStr, OpNode, avx512vl_i8_info,
sched, prd, IsCommutable>, EVEX_CD8<8, CD8VF>,
- VEX_WIG;
+ WIG;
}
multiclass avx512_binop_rm_vl_dq<bits<8> opc_d, bits<8> opc_q, string OpcodeStr,
@@ -5179,14 +5179,14 @@ multiclass avx512_packs_all_i16_i8<bits<8> opc, string OpcodeStr,
SDNode OpNode> {
let Predicates = [HasBWI] in
defm NAME#Z : avx512_packs_rm<opc, OpcodeStr, OpNode, v32i16_info, v64i8_info,
- SchedWriteShuffle.ZMM>, EVEX_V512, VEX_WIG;
+ SchedWriteShuffle.ZMM>, EVEX_V512, WIG;
let Predicates = [HasBWI, HasVLX] in {
defm NAME#Z256 : avx512_packs_rm<opc, OpcodeStr, OpNode, v16i16x_info,
v32i8x_info, SchedWriteShuffle.YMM>,
- EVEX_V256, VEX_WIG;
+ EVEX_V256, WIG;
defm NAME#Z128 : avx512_packs_rm<opc, OpcodeStr, OpNode, v8i16x_info,
v16i8x_info, SchedWriteShuffle.XMM>,
- EVEX_V128, VEX_WIG;
+ EVEX_V128, WIG;
}
}
@@ -5213,9 +5213,9 @@ defm VPACKSSWB : avx512_packs_all_i16_i8 <0x63, "vpacksswb", X86Packss>, AVX512B
defm VPACKUSWB : avx512_packs_all_i16_i8 <0x67, "vpackuswb", X86Packus>, AVX512BIBase;
defm VPMADDUBSW : avx512_vpmadd<0x04, "vpmaddubsw", X86vpmaddubsw,
- avx512vl_i8_info, avx512vl_i16_info>, AVX512BIBase, T8PD, VEX_WIG;
+ avx512vl_i8_info, avx512vl_i16_info>, AVX512BIBase, T8PD, WIG;
defm VPMADDWD : avx512_vpmadd<0xF5, "vpmaddwd", X86vpmaddwd,
- avx512vl_i16_info, avx512vl_i32_info, 1>, AVX512BIBase, VEX_WIG;
+ avx512vl_i16_info, avx512vl_i32_info, 1>, AVX512BIBase, WIG;
defm VPMAXSB : avx512_binop_rm_vl_b<0x3C, "vpmaxsb", smax,
SchedWriteVecALU, HasBWI, 1>, T8PD;
@@ -6257,12 +6257,12 @@ multiclass avx512_shift_rmi_w<bits<8> opcw, Format ImmFormR, Format ImmFormM,
X86SchedWriteWidths sched> {
let Predicates = [HasBWI] in
defm WZ: avx512_shift_rmi<opcw, ImmFormR, ImmFormM, OpcodeStr, OpNode,
- sched.ZMM, v32i16_info>, EVEX_V512, VEX_WIG;
+ sched.ZMM, v32i16_info>, EVEX_V512, WIG;
let Predicates = [HasVLX, HasBWI] in {
defm WZ256: avx512_shift_rmi<opcw, ImmFormR, ImmFormM, OpcodeStr, OpNode,
- sched.YMM, v16i16x_info>, EVEX_V256, VEX_WIG;
+ sched.YMM, v16i16x_info>, EVEX_V256, WIG;
defm WZ128: avx512_shift_rmi<opcw, ImmFormR, ImmFormM, OpcodeStr, OpNode,
- sched.XMM, v8i16x_info>, EVEX_V128, VEX_WIG;
+ sched.XMM, v8i16x_info>, EVEX_V128, WIG;
}
}
@@ -6705,7 +6705,7 @@ multiclass avx512_pshufb_sizes<bits<8> opc, string OpcodeStr, SDNode OpNode,
}
defm VPSHUFB: avx512_pshufb_sizes<0x00, "vpshufb", X86pshufb,
- SchedWriteVarShuffle>, VEX_WIG;
+ SchedWriteVarShuffle>, WIG;
//===----------------------------------------------------------------------===//
// Move Low to High and High to Low packed FP Instructions
@@ -10182,16 +10182,16 @@ multiclass avx512_pmovx_bw<bits<8> opc, string OpcodeStr,
let Predicates = [HasVLX, HasBWI] in {
defm Z128: avx512_pmovx_common<opc, OpcodeStr, sched.XMM, v8i16x_info,
v16i8x_info, i64mem, LdFrag, InVecNode>,
- EVEX_CD8<8, CD8VH>, T8PD, EVEX_V128, VEX_WIG;
+ EVEX_CD8<8, CD8VH>, T8PD, EVEX_V128, WIG;
defm Z256: avx512_pmovx_common<opc, OpcodeStr, sched.YMM, v16i16x_info,
v16i8x_info, i128mem, LdFrag, OpNode>,
- EVEX_CD8<8, CD8VH>, T8PD, EVEX_V256, VEX_WIG;
+ EVEX_CD8<8, CD8VH>, T8PD, EVEX_V256, WIG;
}
let Predicates = [HasBWI] in {
defm Z : avx512_pmovx_common<opc, OpcodeStr, sched.ZMM, v32i16_info,
v32i8x_info, i256mem, LdFrag, OpNode>,
- EVEX_CD8<8, CD8VH>, T8PD, EVEX_V512, VEX_WIG;
+ EVEX_CD8<8, CD8VH>, T8PD, EVEX_V512, WIG;
}
}
@@ -10202,16 +10202,16 @@ multiclass avx512_pmovx_bd<bits<8> opc, string OpcodeStr,
let Predicates = [HasVLX, HasAVX512] in {
defm Z128: avx512_pmovx_common<opc, OpcodeStr, sched.XMM, v4i32x_info,
v16i8x_info, i32mem, LdFrag, InVecNode>,
- EVEX_CD8<8, CD8VQ>, T8PD, EVEX_V128, VEX_WIG;
+ EVEX_CD8<8, CD8VQ>, T8PD, EVEX_V128, WIG;
defm Z256: avx512_pmovx_common<opc, OpcodeStr, sched.YMM, v8i32x_info,
v16i8x_info, i64mem, LdFrag, InVecNode>,
- EVEX_CD8<8, CD8VQ>, T8PD, EVEX_V256, VEX_WIG;
+ EVEX_CD8<8, CD8VQ>, T8PD, EVEX_V256, WIG;
}
let Predicates = [HasAVX512] in {
defm Z : avx512_pmovx_common<opc, OpcodeStr, sched.ZMM, v16i32_info,
v16i8x_info, i128mem, LdFrag, OpNode>,
- EVEX_CD8<8, CD8VQ>, T8PD, EVEX_V512, VEX_WIG;
+ EVEX_CD8<8, CD8VQ>, T8PD, EVEX_V512, WIG;
}
}
@@ -10222,16 +10222,16 @@ multiclass avx512_pmovx_bq<bits<8> opc, string OpcodeStr,
let Predicates = [HasVLX, HasAVX512] in {
defm Z128: avx512_pmovx_common<opc, OpcodeStr, sched.XMM, v2i64x_info,
v16i8x_info, i16mem, LdFrag, InVecNode>,
- EVEX_CD8<8, CD8VO>, T8PD, EVEX_V128, VEX_WIG;
+ EVEX_CD8<8, CD8VO>, T8PD, EVEX_V128, WIG;
defm Z256: avx512_pmovx_common<opc, OpcodeStr, sched.YMM, v4i64x_info,
v16i8x_info, i32mem, LdFrag, InVecNode>,
- EVEX_CD8<8, CD8VO>, T8PD, EVEX_V256, VEX_WIG;
+ EVEX_CD8<8, CD8VO>, T8PD, EVEX_V256, WIG;
}
let Predicates = [HasAVX512] in {
defm Z : avx512_pmovx_common<opc, OpcodeStr, sched.ZMM, v8i64_info,
v16i8x_info, i64mem, LdFrag, InVecNode>,
- EVEX_CD8<8, CD8VO>, T8PD, EVEX_V512, VEX_WIG;
+ EVEX_CD8<8, CD8VO>, T8PD, EVEX_V512, WIG;
}
}
@@ -10242,16 +10242,16 @@ multiclass avx512_pmovx_wd<bits<8> opc, string OpcodeStr,
let Predicates = [HasVLX, HasAVX512] in {
defm Z128: avx512_pmovx_common<opc, OpcodeStr, sched.XMM, v4i32x_info,
v8i16x_info, i64mem, LdFrag, InVecNode>,
- EVEX_CD8<16, CD8VH>, T8PD, EVEX_V128, VEX_WIG;
+ EVEX_CD8<16, CD8VH>, T8PD, EVEX_V128, WIG;
defm Z256: avx512_pmovx_common<opc, OpcodeStr, sched.YMM, v8i32x_info,
v8i16x_info, i128mem, LdFrag, OpNode>,
- EVEX_CD8<16, CD8VH>, T8PD, EVEX_V256, VEX_WIG;
+ EVEX_CD8<16, CD8VH>, T8PD, EVEX_V256, WIG;
}
let Predicates = [HasAVX512] in {
defm Z : avx512_pmovx_common<opc, OpcodeStr, sched.ZMM, v16i32_info,
v16i16x_info, i256mem, LdFrag, OpNode>,
- EVEX_CD8<16, CD8VH>, T8PD, EVEX_V512, VEX_WIG;
+ EVEX_CD8<16, CD8VH>, T8PD, EVEX_V512, WIG;
}
}
@@ -10262,16 +10262,16 @@ multiclass avx512_pmovx_wq<bits<8> opc, string OpcodeStr,
let Predicates = [HasVLX, HasAVX512] in {
defm Z128: avx512_pmovx_common<opc, OpcodeStr, sched.XMM, v2i64x_info,
v8i16x_info, i32mem, LdFrag, InVecNode>,
- EVEX_CD8<16, CD8VQ>, T8PD, EVEX_V128, VEX_WIG;
+ EVEX_CD8<16, CD8VQ>, T8PD, EVEX_V128, WIG;
defm Z256: avx512_pmovx_common<opc, OpcodeStr, sched.YMM, v4i64x_info,
v8i16x_info, i64mem, LdFrag, InVecNode>,
- EVEX_CD8<16, CD8VQ>, T8PD, EVEX_V256, VEX_WIG;
+ EVEX_CD8<16, CD8VQ>, T8PD, EVEX_V256, WIG;
}
let Predicates = [HasAVX512] in {
defm Z : avx512_pmovx_common<opc, OpcodeStr, sched.ZMM, v8i64_info,
v8i16x_info, i128mem, LdFrag, OpNode>,
- EVEX_CD8<16, CD8VQ>, T8PD, EVEX_V512, VEX_WIG;
+ EVEX_CD8<16, CD8VQ>, T8PD, EVEX_V512, WIG;
}
}
@@ -11434,9 +11434,9 @@ multiclass avx512_unary_rm_vl_bw<bits<8> opc_b, bits<8> opc_w, string OpcodeStr,
SDNode OpNode, X86SchedWriteWidths sched,
Predicate prd> {
defm W : avx512_unary_rm_vl<opc_w, OpcodeStr#"w", OpNode, sched,
- avx512vl_i16_info, prd>, VEX_WIG;
+ avx512vl_i16_info, prd>, WIG;
defm B : avx512_unary_rm_vl<opc_b, OpcodeStr#"b", OpNode, sched,
- avx512vl_i8_info, prd>, VEX_WIG;
+ avx512vl_i8_info, prd>, WIG;
}
multiclass avx512_unary_rm_vl_all<bits<8> opc_b, bits<8> opc_w,
@@ -11673,8 +11673,8 @@ multiclass avx512_extract_elt_dq<string OpcodeStr, X86VectorVTInfo _,
}
}
-defm VPEXTRBZ : avx512_extract_elt_b<"vpextrb", v16i8x_info>, VEX_WIG;
-defm VPEXTRWZ : avx512_extract_elt_w<"vpextrw", v8i16x_info>, VEX_WIG;
+defm VPEXTRBZ : avx512_extract_elt_b<"vpextrb", v16i8x_info>, WIG;
+defm VPEXTRWZ : avx512_extract_elt_w<"vpextrw", v8i16x_info>, WIG;
defm VPEXTRDZ : avx512_extract_elt_dq<"vpextrd", v4i32x_info, GR32>;
defm VPEXTRQZ : avx512_extract_elt_dq<"vpextrq", v2i64x_info, GR64>, REX_W;
@@ -11719,9 +11719,9 @@ multiclass avx512_insert_elt_dq<bits<8> opc, string OpcodeStr,
}
defm VPINSRBZ : avx512_insert_elt_bw<0x20, "vpinsrb", X86pinsrb, v16i8x_info,
- extloadi8>, TAPD, VEX_WIG;
+ extloadi8>, TAPD, WIG;
defm VPINSRWZ : avx512_insert_elt_bw<0xC4, "vpinsrw", X86pinsrw, v8i16x_info,
- extloadi16>, PD, VEX_WIG;
+ extloadi16>, PD, WIG;
defm VPINSRDZ : avx512_insert_elt_dq<0x22, "vpinsrd", v4i32x_info, GR32>;
defm VPINSRQZ : avx512_insert_elt_dq<0x22, "vpinsrq", v2i64x_info, GR64>, REX_W;
@@ -11802,10 +11802,10 @@ multiclass avx512_shift_packed_all<bits<8> opc, SDNode OpNode, Format MRMr,
}
defm VPSLLDQ : avx512_shift_packed_all<0x73, X86vshldq, MRM7r, MRM7m, "vpslldq",
SchedWriteShuffle, HasBWI>,
- AVX512PDIi8Base, EVEX_4V, VEX_WIG;
+ AVX512PDIi8Base, EVEX_4V, WIG;
defm VPSRLDQ : avx512_shift_packed_all<0x73, X86vshrdq, MRM3r, MRM3m, "vpsrldq",
SchedWriteShuffle, HasBWI>,
- AVX512PDIi8Base, EVEX_4V, VEX_WIG;
+ AVX512PDIi8Base, EVEX_4V, WIG;
multiclass avx512_psadbw_packed<bits<8> opc, SDNode OpNode,
string OpcodeStr, X86FoldableSchedWrite sched,
@@ -11843,7 +11843,7 @@ multiclass avx512_psadbw_packed_all<bits<8> opc, SDNode OpNode,
}
defm VPSADBW : avx512_psadbw_packed_all<0xf6, X86psadbw, "vpsadbw",
- SchedWritePSADBW, HasBWI>, EVEX_4V, VEX_WIG;
+ SchedWritePSADBW, HasBWI>, EVEX_4V, WIG;
// Transforms to swizzle an immediate to enable better matching when
// memory operand isn't in the right place.
@@ -12424,17 +12424,17 @@ multiclass avx512_vaes<bits<8> Op, string OpStr, string IntPrefix> {
defm Z128 : AESI_binop_rm_int<Op, OpStr,
!cast<Intrinsic>(IntPrefix),
loadv2i64, 0, VR128X, i128mem>,
- EVEX_4V, EVEX_CD8<64, CD8VF>, EVEX_V128, VEX_WIG;
+ EVEX_4V, EVEX_CD8<64, CD8VF>, EVEX_V128, WIG;
defm Z256 : AESI_binop_rm_int<Op, OpStr,
!cast<Intrinsic>(IntPrefix#"_256"),
loadv4i64, 0, VR256X, i256mem>,
- EVEX_4V, EVEX_CD8<64, CD8VF>, EVEX_V256, VEX_WIG;
+ EVEX_4V, EVEX_CD8<64, CD8VF>, EVEX_V256, WIG;
}
let Predicates = [HasAVX512, HasVAES] in
defm Z : AESI_binop_rm_int<Op, OpStr,
!cast<Intrinsic>(IntPrefix#"_512"),
loadv8i64, 0, VR512, i512mem>,
- EVEX_4V, EVEX_CD8<64, CD8VF>, EVEX_V512, VEX_WIG;
+ EVEX_4V, EVEX_CD8<64, CD8VF>, EVEX_V512, WIG;
}
defm VAESENC : avx512_vaes<0xDC, "vaesenc", "int_x86_aesni_aesenc">;
@@ -12448,15 +12448,15 @@ defm VAESDECLAST : avx512_vaes<0xDF, "vaesdeclast", "int_x86_aesni_aesdeclast">
let Predicates = [HasAVX512, HasVPCLMULQDQ] in
defm VPCLMULQDQZ : vpclmulqdq<VR512, i512mem, loadv8i64, int_x86_pclmulqdq_512>,
- EVEX_4V, EVEX_V512, EVEX_CD8<64, CD8VF>, VEX_WIG;
+ EVEX_4V, EVEX_V512, EVEX_CD8<64, CD8VF>, WIG;
let Predicates = [HasVLX, HasVPCLMULQDQ] in {
defm VPCLMULQDQZ128 : vpclmulqdq<VR128X, i128mem, loadv2i64, int_x86_pclmulqdq>,
- EVEX_4V, EVEX_V128, EVEX_CD8<64, CD8VF>, VEX_WIG;
+ EVEX_4V, EVEX_V128, EVEX_CD8<64, CD8VF>, WIG;
defm VPCLMULQDQZ256: vpclmulqdq<VR256X, i256mem, loadv4i64,
int_x86_pclmulqdq_256>, EVEX_4V, EVEX_V256,
- EVEX_CD8<64, CD8VF>, VEX_WIG;
+ EVEX_CD8<64, CD8VF>, WIG;
}
// Aliases
diff --git a/llvm/lib/Target/X86/X86InstrFormats.td b/llvm/lib/Target/X86/X86InstrFormats.td
index 331ca0a07fa4d..f45869e15267c 100644
--- a/llvm/lib/Target/X86/X86InstrFormats.td
+++ b/llvm/lib/Target/X86/X86InstrFormats.td
@@ -231,7 +231,7 @@ class TAPD : TA { Prefix OpPrefix = PD; }
class TAXD : TA { Prefix OpPrefix = XD; }
class TAXS : TA { Prefix OpPrefix = XS; }
class VEX { Encoding OpEnc = EncVEX; }
-class VEX_WIG { bit IgnoresVEX_W = 1; }
+class WIG { bit IgnoresW = 1; }
// Special version of REX_W that can be changed to VEX.W==0 for EVEX2VEX.
class VEX_W1X { bit hasREX_W = 1; bit EVEX_W1_VEX_W0 = 1; }
class VEX_4V : VEX { bit hasVEX_4V = 1; }
@@ -314,7 +314,7 @@ class X86Inst<bits<8> opcod, Format f, ImmType i, dag outs, dag ins,
bit hasREPPrefix = 0; // Does this inst have a REP prefix?
Encoding OpEnc = EncNormal; // Encoding used by this instruction
bits<2> OpEncBits = OpEnc.Value;
- bit IgnoresVEX_W = 0; // Does this inst ignore REX_W field?
+ bit IgnoresW = 0; // Does this inst ignore REX_W field?
bit EVEX_W1_VEX_W0 = 0; // This EVEX inst with VEX.W==1 can become a VEX
// instruction with VEX.W == 0.
bit hasVEX_4V = 0; // Does this inst require the VEX.VVVV field?
diff --git a/llvm/lib/Target/X86/X86InstrSSE.td b/llvm/lib/Target/X86/X86InstrSSE.td
index 92b4d5d253c01..4296528bfc9bf 100644
--- a/llvm/lib/Target/X86/X86InstrSSE.td
+++ b/llvm/lib/Target/X86/X86InstrSSE.td
@@ -215,12 +215,12 @@ multiclass sse12_move<RegisterClass RC, SDNode OpNode, ValueType vt,
let Predicates = [UseAVX, OptForSize] in
defm V#NAME : sse12_move_rr<OpNode, vt, OpcodeStr,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}", d>,
- VEX_4V, VEX_LIG, VEX_WIG;
+ VEX_4V, VEX_LIG, WIG;
def V#NAME#mr : SI<0x11, MRMDestMem, (outs), (ins x86memop:$dst, RC:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(store RC:$src, addr:$dst)], d>,
- VEX, VEX_LIG, Sched<[WriteFStore]>, VEX_WIG;
+ VEX, VEX_LIG, Sched<[WriteFStore]>, WIG;
// SSE1 & 2
let Constraints = "$src1 = $dst" in {
let Predicates = [pred, NoSSE41_Or_OptForSize] in
@@ -248,7 +248,7 @@ multiclass sse12_move_rm<RegisterClass RC, ValueType vt, X86MemOperand x86memop,
def V#NAME#rm : SI<0x10, MRMSrcMem, (outs VR128:$dst), (ins x86memop:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set VR128:$dst, (vt (vzloadfrag addr:$src)))], d>,
- VEX, VEX_LIG, Sched<[WriteFLoad]>, VEX_WIG;
+ VEX, VEX_LIG, Sched<[WriteFLoad]>, WIG;
def NAME#rm : SI<0x10, MRMSrcMem, (outs VR128:$dst), (ins x86memop:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set VR128:$dst, (vt (vzloadfrag addr:$src)))], d>,
@@ -259,7 +259,7 @@ multiclass sse12_move_rm<RegisterClass RC, ValueType vt, X86MemOperand x86memop,
def V#NAME#rm_alt : SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set RC:$dst, (mem_pat addr:$src))], d>,
- VEX, VEX_LIG, Sched<[WriteFLoad]>, VEX_WIG;
+ VEX, VEX_LIG, Sched<[WriteFLoad]>, WIG;
def NAME#rm_alt : SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set RC:$dst, (mem_pat addr:$src))], d>,
@@ -352,29 +352,29 @@ let canFoldAsLoad = 1, isReMaterializable = 1 in
let Predicates = [HasAVX, NoVLX] in {
defm VMOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32, "movaps",
SSEPackedSingle, SchedWriteFMoveLS.XMM>,
- PS, VEX, VEX_WIG;
+ PS, VEX, WIG;
defm VMOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64, "movapd",
SSEPackedDouble, SchedWriteFMoveLS.XMM>,
- PD, VEX, VEX_WIG;
+ PD, VEX, WIG;
defm VMOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32, "movups",
SSEPackedSingle, SchedWriteFMoveLS.XMM>,
- PS, VEX, VEX_WIG;
+ PS, VEX, WIG;
defm VMOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64, "movupd",
SSEPackedDouble, SchedWriteFMoveLS.XMM>,
- PD, VEX, VEX_WIG;
+ PD, VEX, WIG;
defm VMOVAPSY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv8f32, "movaps",
SSEPackedSingle, SchedWriteFMoveLS.YMM>,
- PS, VEX, VEX_L, VEX_WIG;
+ PS, VEX, VEX_L, WIG;
defm VMOVAPDY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv4f64, "movapd",
SSEPackedDouble, SchedWriteFMoveLS.YMM>,
- PD, VEX, VEX_L, VEX_WIG;
+ PD, VEX, VEX_L, WIG;
defm VMOVUPSY : sse12_mov_packed<0x10, VR256, f256mem, loadv8f32, "movups",
SSEPackedSingle, SchedWriteFMoveLS.YMM>,
- PS, VEX, VEX_L, VEX_WIG;
+ PS, VEX, VEX_L, WIG;
defm VMOVUPDY : sse12_mov_packed<0x10, VR256, f256mem, loadv4f64, "movupd",
SSEPackedDouble, SchedWriteFMoveLS.YMM>,
- PD, VEX, VEX_L, VEX_WIG;
+ PD, VEX, VEX_L, WIG;
}
let Predicates = [UseSSE1] in {
@@ -399,38 +399,38 @@ let SchedRW = [SchedWriteFMoveLS.XMM.MR] in {
def VMOVAPSmr : VPSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
"movaps\t{$src, $dst|$dst, $src}",
[(alignedstore (v4f32 VR128:$src), addr:$dst)]>,
- VEX, VEX_WIG;
+ VEX, WIG;
def VMOVAPDmr : VPDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
"movapd\t{$src, $dst|$dst, $src}",
[(alignedstore (v2f64 VR128:$src), addr:$dst)]>,
- VEX, VEX_WIG;
+ VEX, WIG;
def VMOVUPSmr : VPSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
"movups\t{$src, $dst|$dst, $src}",
[(store (v4f32 VR128:$src), addr:$dst)]>,
- VEX, VEX_WIG;
+ VEX, WIG;
def VMOVUPDmr : VPDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
"movupd\t{$src, $dst|$dst, $src}",
[(store (v2f64 VR128:$src), addr:$dst)]>,
- VEX, VEX_WIG;
+ VEX, WIG;
} // SchedRW
let SchedRW = [SchedWriteFMoveLS.YMM.MR] in {
def VMOVAPSYmr : VPSI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
"movaps\t{$src, $dst|$dst, $src}",
[(alignedstore (v8f32 VR256:$src), addr:$dst)]>,
- VEX, VEX_L, VEX_WIG;
+ VEX, VEX_L, WIG;
def VMOVAPDYmr : VPDI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
"movapd\t{$src, $dst|$dst, $src}",
[(alignedstore (v4f64 VR256:$src), addr:$dst)]>,
- VEX, VEX_L, VEX_WIG;
+ VEX, VEX_L, WIG;
def VMOVUPSYmr : VPSI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
"movups\t{$src, $dst|$dst, $src}",
[(store (v8f32 VR256:$src), addr:$dst)]>,
- VEX, VEX_L, VEX_WIG;
+ VEX, VEX_L, WIG;
def VMOVUPDYmr : VPDI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
"movupd\t{$src, $dst|$dst, $src}",
[(store (v4f64 VR256:$src), addr:$dst)]>,
- VEX, VEX_L, VEX_WIG;
+ VEX, VEX_L, WIG;
} // SchedRW
} // Predicate
@@ -441,38 +441,38 @@ let SchedRW = [SchedWriteFMoveLS.XMM.RR] in {
def VMOVAPSrr_REV : VPSI<0x29, MRMDestReg, (outs VR128:$dst),
(ins VR128:$src),
"movaps\t{$src, $dst|$dst, $src}", []>,
- VEX, VEX_WIG;
+ VEX, WIG;
def VMOVAPDrr_REV : VPDI<0x29, MRMDestReg, (outs VR128:$dst),
(ins VR128:$src),
"movapd\t{$src, $dst|$dst, $src}", []>,
- VEX, VEX_WIG;
+ VEX, WIG;
def VMOVUPSrr_REV : VPSI<0x11, MRMDestReg, (outs VR128:$dst),
(ins VR128:$src),
"movups\t{$src, $dst|$dst, $src}", []>,
- VEX, VEX_WIG;
+ VEX, WIG;
def VMOVUPDrr_REV : VPDI<0x11, MRMDestReg, (outs VR128:$dst),
(ins VR128:$src),
"movupd\t{$src, $dst|$dst, $src}", []>,
- VEX, VEX_WIG;
+ VEX, WIG;
} // SchedRW
let SchedRW = [SchedWriteFMoveLS.YMM.RR] in {
def VMOVAPSYrr_REV : VPSI<0x29, MRMDestReg, (outs VR256:$dst),
(ins VR256:$src),
"movaps\t{$src, $dst|$dst, $src}", []>,
- VEX, VEX_L, VEX_WIG;
+ VEX, VEX_L, WIG;
def VMOVAPDYrr_REV : VPDI<0x29, MRMDestReg, (outs VR256:$dst),
(ins VR256:$src),
"movapd\t{$src, $dst|$dst, $src}", []>,
- VEX, VEX_L, VEX_WIG;
+ VEX, VEX_L, WIG;
def VMOVUPSYrr_REV : VPSI<0x11, MRMDestReg, (outs VR256:$dst),
(ins VR256:$src),
"movups\t{$src, $dst|$dst, $src}", []>,
- VEX, VEX_L, VEX_WIG;
+ VEX, VEX_L, WIG;
def VMOVUPDYrr_REV : VPDI<0x11, MRMDestReg, (outs VR256:$dst),
(ins VR256:$src),
"movupd\t{$src, $dst|$dst, $src}", []>,
- VEX, VEX_L, VEX_WIG;
+ VEX, VEX_L, WIG;
} // SchedRW
} // Predicate
@@ -683,7 +683,7 @@ multiclass sse12_mov_hilo_packed<bits<8>opc, SDPatternOperator pdnode,
let Predicates = [UseAVX] in
defm V#NAME : sse12_mov_hilo_packed_base<opc, pdnode, base_opc,
"\t{$src2, $src1, $dst|$dst, $src1, $src2}">,
- VEX_4V, VEX_WIG;
+ VEX_4V, WIG;
let Constraints = "$src1 = $dst" in
defm NAME : sse12_mov_hilo_packed_base<opc, pdnode, base_opc,
@@ -698,12 +698,12 @@ let mayStore = 1, hasSideEffects = 0 in
def VMOVLPSmr : VPSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
"movlps\t{$src, $dst|$dst, $src}",
[]>,
- VEX, VEX_WIG;
+ VEX, WIG;
def VMOVLPDmr : VPDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
"movlpd\t{$src, $dst|$dst, $src}",
[(store (f64 (extractelt (v2f64 VR128:$src),
(iPTR 0))), addr:$dst)]>,
- VEX, VEX_WIG;
+ VEX, WIG;
}// UseAVX
let mayStore = 1, hasSideEffects = 0 in
def MOVLPSmr : PSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
@@ -744,12 +744,12 @@ let Predicates = [UseAVX] in {
let mayStore = 1, hasSideEffects = 0 in
def VMOVHPSmr : VPSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
"movhps\t{$src, $dst|$dst, $src}",
- []>, VEX, VEX_WIG;
+ []>, VEX, WIG;
def VMOVHPDmr : VPDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
"movhpd\t{$src, $dst|$dst, $src}",
[(store (f64 (extractelt
(v2f64 (X86Unpckh VR128:$src, VR128:$src)),
- (iPTR 0))), addr:$dst)]>, VEX, VEX_WIG;
+ (iPTR 0))), addr:$dst)]>, VEX, WIG;
} // UseAVX
let mayStore = 1, hasSideEffects = 0 in
def MOVHPSmr : PSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
@@ -823,14 +823,14 @@ let Predicates = [UseAVX] in {
"movlhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128:$dst,
(v4f32 (X86Movlhps VR128:$src1, VR128:$src2)))]>,
- VEX_4V, Sched<[SchedWriteFShuffle.XMM]>, VEX_WIG;
+ VEX_4V, Sched<[SchedWriteFShuffle.XMM]>, WIG;
let isCommutable = 1 in
def VMOVHLPSrr : VPSI<0x12, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2),
"movhlps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128:$dst,
(v4f32 (X86Movhlps VR128:$src1, VR128:$src2)))]>,
- VEX_4V, Sched<[SchedWriteFShuffle.XMM]>, VEX_WIG;
+ VEX_4V, Sched<[SchedWriteFShuffle.XMM]>, WIG;
}
let Constraints = "$src1 = $dst" in {
def MOVLHPSrr : PSI<0x16, MRMSrcReg, (outs VR128:$dst),
@@ -1233,11 +1233,11 @@ defm CVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, i64, v4f32, X86cvts2si,
defm VCVTDQ2PS : sse12_cvt_p<0x5B, VR128, i128mem, v4f32, v4i32, load,
"vcvtdq2ps\t{$src, $dst|$dst, $src}",
SSEPackedSingle, WriteCvtI2PS>,
- PS, VEX, Requires<[HasAVX, NoVLX]>, VEX_WIG;
+ PS, VEX, Requires<[HasAVX, NoVLX]>, WIG;
defm VCVTDQ2PSY : sse12_cvt_p<0x5B, VR256, i256mem, v8f32, v8i32, load,
"vcvtdq2ps\t{$src, $dst|$dst, $src}",
SSEPackedSingle, WriteCvtI2PSY>,
- PS, VEX, VEX_L, Requires<[HasAVX, NoVLX]>, VEX_WIG;
+ PS, VEX, VEX_L, Requires<[HasAVX, NoVLX]>, WIG;
defm CVTDQ2PS : sse12_cvt_p<0x5B, VR128, i128mem, v4f32, v4i32, memop,
"cvtdq2ps\t{$src, $dst|$dst, $src}",
@@ -1289,13 +1289,13 @@ let isCodeGenOnly = 1, hasSideEffects = 0, Predicates = [UseAVX],
def VCVTSD2SSrr : VSDI<0x5A, MRMSrcReg, (outs FR32:$dst),
(ins FR32:$src1, FR64:$src2),
"cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
- VEX_4V, VEX_LIG, VEX_WIG,
+ VEX_4V, VEX_LIG, WIG,
Sched<[WriteCvtSD2SS]>, SIMD_EXC;
let mayLoad = 1 in
def VCVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst),
(ins FR32:$src1, f64mem:$src2),
"vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
- XD, VEX_4V, VEX_LIG, VEX_WIG,
+ XD, VEX_4V, VEX_LIG, WIG,
Sched<[WriteCvtSD2SS.Folded, WriteCvtSD2SS.ReadAfterFold]>, SIMD_EXC;
}
@@ -1321,14 +1321,14 @@ def VCVTSD2SSrr_Int: I<0x5A, MRMSrcReg,
"vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128:$dst,
(v4f32 (X86frounds VR128:$src1, (v2f64 VR128:$src2))))]>,
- XD, VEX_4V, VEX_LIG, VEX_WIG, Requires<[UseAVX]>,
+ XD, VEX_4V, VEX_LIG, WIG, Requires<[UseAVX]>,
Sched<[WriteCvtSD2SS]>;
def VCVTSD2SSrm_Int: I<0x5A, MRMSrcMem,
(outs VR128:$dst), (ins VR128:$src1, sdmem:$src2),
"vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128:$dst,
(v4f32 (X86frounds VR128:$src1, (sse_load_f64 addr:$src2))))]>,
- XD, VEX_4V, VEX_LIG, VEX_WIG, Requires<[UseAVX]>,
+ XD, VEX_4V, VEX_LIG, WIG, Requires<[UseAVX]>,
Sched<[WriteCvtSD2SS.Folded, WriteCvtSD2SS.ReadAfterFold]>;
let Constraints = "$src1 = $dst" in {
def CVTSD2SSrr_Int: I<0x5A, MRMSrcReg,
@@ -1353,13 +1353,13 @@ let isCodeGenOnly = 1, hasSideEffects = 0, ExeDomain = SSEPackedSingle in {
def VCVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst),
(ins FR64:$src1, FR32:$src2),
"vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
- XS, VEX_4V, VEX_LIG, VEX_WIG,
+ XS, VEX_4V, VEX_LIG, WIG,
Sched<[WriteCvtSS2SD]>, Requires<[UseAVX]>, SIMD_EXC;
let mayLoad = 1 in
def VCVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst),
(ins FR64:$src1, f32mem:$src2),
"vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
- XS, VEX_4V, VEX_LIG, VEX_WIG,
+ XS, VEX_4V, VEX_LIG, WIG,
Sched<[WriteCvtSS2SD.Folded, WriteCvtSS2SD.ReadAfterFold]>,
Requires<[UseAVX, OptForSize]>, SIMD_EXC;
} // isCodeGenOnly = 1, hasSideEffects = 0
@@ -1386,13 +1386,13 @@ let hasSideEffects = 0, Uses = [MXCSR], mayRaiseFPException = 1,
def VCVTSS2SDrr_Int: I<0x5A, MRMSrcReg,
(outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
"vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- []>, XS, VEX_4V, VEX_LIG, VEX_WIG,
+ []>, XS, VEX_4V, VEX_LIG, WIG,
Requires<[HasAVX]>, Sched<[WriteCvtSS2SD]>;
let mayLoad = 1 in
def VCVTSS2SDrm_Int: I<0x5A, MRMSrcMem,
(outs VR128:$dst), (ins VR128:$src1, ssmem:$src2),
"vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- []>, XS, VEX_4V, VEX_LIG, VEX_WIG, Requires<[HasAVX]>,
+ []>, XS, VEX_4V, VEX_LIG, WIG, Requires<[HasAVX]>,
Sched<[WriteCvtSS2SD.Folded, WriteCvtSS2SD.ReadAfterFold]>;
let Constraints = "$src1 = $dst" in { // SSE2 instructions with XS prefix
def CVTSS2SDrr_Int: I<0x5A, MRMSrcReg,
@@ -1527,22 +1527,22 @@ let Predicates = [HasAVX, NoVLX] in {
def VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvtps2dq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (v4i32 (X86cvtp2Int (v4f32 VR128:$src))))]>,
- VEX, Sched<[WriteCvtPS2I]>, VEX_WIG, SIMD_EXC;
+ VEX, Sched<[WriteCvtPS2I]>, WIG, SIMD_EXC;
def VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"cvtps2dq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
(v4i32 (X86cvtp2Int (loadv4f32 addr:$src))))]>,
- VEX, Sched<[WriteCvtPS2ILd]>, VEX_WIG, SIMD_EXC;
+ VEX, Sched<[WriteCvtPS2ILd]>, WIG, SIMD_EXC;
def VCVTPS2DQYrr : VPDI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
"cvtps2dq\t{$src, $dst|$dst, $src}",
[(set VR256:$dst,
(v8i32 (X86cvtp2Int (v8f32 VR256:$src))))]>,
- VEX, VEX_L, Sched<[WriteCvtPS2IY]>, VEX_WIG, SIMD_EXC;
+ VEX, VEX_L, Sched<[WriteCvtPS2IY]>, WIG, SIMD_EXC;
def VCVTPS2DQYrm : VPDI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
"cvtps2dq\t{$src, $dst|$dst, $src}",
[(set VR256:$dst,
(v8i32 (X86cvtp2Int (loadv8f32 addr:$src))))]>,
- VEX, VEX_L, Sched<[WriteCvtPS2IYLd]>, VEX_WIG, SIMD_EXC;
+ VEX, VEX_L, Sched<[WriteCvtPS2IYLd]>, WIG, SIMD_EXC;
}
def CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvtps2dq\t{$src, $dst|$dst, $src}",
@@ -1564,26 +1564,26 @@ def VCVTPD2DQrr : SDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"vcvtpd2dq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
(v4i32 (X86cvtp2Int (v2f64 VR128:$src))))]>,
- VEX, Sched<[WriteCvtPD2I]>, VEX_WIG;
+ VEX, Sched<[WriteCvtPD2I]>, WIG;
// XMM only
def VCVTPD2DQrm : SDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"vcvtpd2dq{x}\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
(v4i32 (X86cvtp2Int (loadv2f64 addr:$src))))]>, VEX,
- Sched<[WriteCvtPD2ILd]>, VEX_WIG;
+ Sched<[WriteCvtPD2ILd]>, WIG;
// YMM only
def VCVTPD2DQYrr : SDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
"vcvtpd2dq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
(v4i32 (X86cvtp2Int (v4f64 VR256:$src))))]>,
- VEX, VEX_L, Sched<[WriteCvtPD2IY]>, VEX_WIG;
+ VEX, VEX_L, Sched<[WriteCvtPD2IY]>, WIG;
def VCVTPD2DQYrm : SDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
"vcvtpd2dq{y}\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
(v4i32 (X86cvtp2Int (loadv4f64 addr:$src))))]>,
- VEX, VEX_L, Sched<[WriteCvtPD2IYLd]>, VEX_WIG;
+ VEX, VEX_L, Sched<[WriteCvtPD2IYLd]>, WIG;
}
def : InstAlias<"vcvtpd2dqx\t{$src, $dst|$dst, $src}",
@@ -1610,23 +1610,23 @@ def VCVTTPS2DQrr : VS2SI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvttps2dq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
(v4i32 (X86any_cvttp2si (v4f32 VR128:$src))))]>,
- VEX, Sched<[WriteCvtPS2I]>, VEX_WIG;
+ VEX, Sched<[WriteCvtPS2I]>, WIG;
def VCVTTPS2DQrm : VS2SI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"cvttps2dq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
(v4i32 (X86any_cvttp2si (loadv4f32 addr:$src))))]>,
- VEX, Sched<[WriteCvtPS2ILd]>, VEX_WIG;
+ VEX, Sched<[WriteCvtPS2ILd]>, WIG;
def VCVTTPS2DQYrr : VS2SI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
"cvttps2dq\t{$src, $dst|$dst, $src}",
[(set VR256:$dst,
(v8i32 (X86any_cvttp2si (v8f32 VR256:$src))))]>,
- VEX, VEX_L, Sched<[WriteCvtPS2IY]>, VEX_WIG;
+ VEX, VEX_L, Sched<[WriteCvtPS2IY]>, WIG;
def VCVTTPS2DQYrm : VS2SI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
"cvttps2dq\t{$src, $dst|$dst, $src}",
[(set VR256:$dst,
(v8i32 (X86any_cvttp2si (loadv8f32 addr:$src))))]>,
VEX, VEX_L,
- Sched<[WriteCvtPS2IYLd]>, VEX_WIG;
+ Sched<[WriteCvtPS2IYLd]>, WIG;
}
def CVTTPS2DQrr : S2SI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
@@ -1650,24 +1650,24 @@ def VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvttpd2dq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
(v4i32 (X86any_cvttp2si (v2f64 VR128:$src))))]>,
- VEX, Sched<[WriteCvtPD2I]>, VEX_WIG;
+ VEX, Sched<[WriteCvtPD2I]>, WIG;
def VCVTTPD2DQrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"cvttpd2dq{x}\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
(v4i32 (X86any_cvttp2si (loadv2f64 addr:$src))))]>,
- VEX, Sched<[WriteCvtPD2ILd]>, VEX_WIG;
+ VEX, Sched<[WriteCvtPD2ILd]>, WIG;
// YMM only
def VCVTTPD2DQYrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
"cvttpd2dq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
(v4i32 (X86any_cvttp2si (v4f64 VR256:$src))))]>,
- VEX, VEX_L, Sched<[WriteCvtPD2IY]>, VEX_WIG;
+ VEX, VEX_L, Sched<[WriteCvtPD2IY]>, WIG;
def VCVTTPD2DQYrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
"cvttpd2dq{y}\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
(v4i32 (X86any_cvttp2si (loadv4f64 addr:$src))))]>,
- VEX, VEX_L, Sched<[WriteCvtPD2IYLd]>, VEX_WIG;
+ VEX, VEX_L, Sched<[WriteCvtPD2IYLd]>, WIG;
} // Predicates = [HasAVX, NoVLX]
def : InstAlias<"vcvttpd2dqx\t{$src, $dst|$dst, $src}",
@@ -1699,19 +1699,19 @@ let Predicates = [HasAVX, NoVLX], Uses = [MXCSR], mayRaiseFPException = 1 in {
def VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"vcvtps2pd\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (v2f64 (X86any_vfpext (v4f32 VR128:$src))))]>,
- PS, VEX, Sched<[WriteCvtPS2PD]>, VEX_WIG;
+ PS, VEX, Sched<[WriteCvtPS2PD]>, WIG;
def VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
"vcvtps2pd\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (v2f64 (extloadv2f32 addr:$src)))]>,
- PS, VEX, Sched<[WriteCvtPS2PD.Folded]>, VEX_WIG;
+ PS, VEX, Sched<[WriteCvtPS2PD.Folded]>, WIG;
def VCVTPS2PDYrr : I<0x5A, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
"vcvtps2pd\t{$src, $dst|$dst, $src}",
[(set VR256:$dst, (v4f64 (any_fpextend (v4f32 VR128:$src))))]>,
- PS, VEX, VEX_L, Sched<[WriteCvtPS2PDY]>, VEX_WIG;
+ PS, VEX, VEX_L, Sched<[WriteCvtPS2PDY]>, WIG;
def VCVTPS2PDYrm : I<0x5A, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
"vcvtps2pd\t{$src, $dst|$dst, $src}",
[(set VR256:$dst, (v4f64 (extloadv4f32 addr:$src)))]>,
- PS, VEX, VEX_L, Sched<[WriteCvtPS2PDY.Folded]>, VEX_WIG;
+ PS, VEX, VEX_L, Sched<[WriteCvtPS2PDY.Folded]>, WIG;
}
let Predicates = [UseSSE2], Uses = [MXCSR], mayRaiseFPException = 1 in {
@@ -1735,23 +1735,23 @@ def VCVTDQ2PDrm : S2SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
(bc_v4i32
(v2i64 (scalar_to_vector
(loadi64 addr:$src)))))))]>,
- VEX, Sched<[WriteCvtI2PDLd]>, VEX_WIG;
+ VEX, Sched<[WriteCvtI2PDLd]>, WIG;
def VCVTDQ2PDrr : S2SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"vcvtdq2pd\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
(v2f64 (X86any_VSintToFP (v4i32 VR128:$src))))]>,
- VEX, Sched<[WriteCvtI2PD]>, VEX_WIG;
+ VEX, Sched<[WriteCvtI2PD]>, WIG;
def VCVTDQ2PDYrm : S2SI<0xE6, MRMSrcMem, (outs VR256:$dst), (ins i128mem:$src),
"vcvtdq2pd\t{$src, $dst|$dst, $src}",
[(set VR256:$dst,
(v4f64 (any_sint_to_fp (loadv4i32 addr:$src))))]>,
VEX, VEX_L, Sched<[WriteCvtI2PDYLd]>,
- VEX_WIG;
+ WIG;
def VCVTDQ2PDYrr : S2SI<0xE6, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
"vcvtdq2pd\t{$src, $dst|$dst, $src}",
[(set VR256:$dst,
(v4f64 (any_sint_to_fp (v4i32 VR128:$src))))]>,
- VEX, VEX_L, Sched<[WriteCvtI2PDY]>, VEX_WIG;
+ VEX, VEX_L, Sched<[WriteCvtI2PDY]>, WIG;
}
let hasSideEffects = 0, mayLoad = 1 in
@@ -1790,20 +1790,20 @@ let Predicates = [HasAVX, NoVLX], Uses = [MXCSR], mayRaiseFPException = 1 in {
def VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvtpd2ps\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (v4f32 (X86any_vfpround (v2f64 VR128:$src))))]>,
- VEX, Sched<[WriteCvtPD2PS]>, VEX_WIG;
+ VEX, Sched<[WriteCvtPD2PS]>, WIG;
def VCVTPD2PSrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"cvtpd2ps{x}\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (v4f32 (X86any_vfpround (loadv2f64 addr:$src))))]>,
- VEX, Sched<[WriteCvtPD2PS.Folded]>, VEX_WIG;
+ VEX, Sched<[WriteCvtPD2PS.Folded]>, WIG;
def VCVTPD2PSYrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
"cvtpd2ps\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (v4f32 (X86any_vfpround (v4f64 VR256:$src))))]>,
- VEX, VEX_L, Sched<[WriteCvtPD2PSY]>, VEX_WIG;
+ VEX, VEX_L, Sched<[WriteCvtPD2PSY]>, WIG;
def VCVTPD2PSYrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
"cvtpd2ps{y}\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (v4f32 (X86any_vfpround (loadv4f64 addr:$src))))]>,
- VEX, VEX_L, Sched<[WriteCvtPD2PSY.Folded]>, VEX_WIG;
+ VEX, VEX_L, Sched<[WriteCvtPD2PSY.Folded]>, WIG;
} // Predicates = [HasAVX, NoVLX]
def : InstAlias<"vcvtpd2psx\t{$src, $dst|$dst, $src}",
@@ -1860,12 +1860,12 @@ let ExeDomain = SSEPackedSingle in
defm VCMPSS : sse12_cmp_scalar<FR32, f32mem, ssmem, X86cmps, v4f32, loadf32,
"cmpss\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
SchedWriteFCmpSizes.PS.Scl, sse_load_f32>,
- XS, VEX_4V, VEX_LIG, VEX_WIG;
+ XS, VEX_4V, VEX_LIG, WIG;
let ExeDomain = SSEPackedDouble in
defm VCMPSD : sse12_cmp_scalar<FR64, f64mem, sdmem, X86cmps, v2f64, loadf64,
"cmpsd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
SchedWriteFCmpSizes.PD.Scl, sse_load_f64>,
- XD, VEX_4V, VEX_LIG, VEX_WIG;
+ XD, VEX_4V, VEX_LIG, WIG;
let Constraints = "$src1 = $dst" in {
let ExeDomain = SSEPackedSingle in
@@ -1919,24 +1919,24 @@ let mayLoad = 1 in
let Defs = [EFLAGS] in {
defm VUCOMISS : sse12_ord_cmp<0x2E, FR32, X86any_fcmp, f32, f32mem, loadf32,
- "ucomiss", SSEPackedSingle>, PS, VEX, VEX_LIG, VEX_WIG;
+ "ucomiss", SSEPackedSingle>, PS, VEX, VEX_LIG, WIG;
defm VUCOMISD : sse12_ord_cmp<0x2E, FR64, X86any_fcmp, f64, f64mem, loadf64,
- "ucomisd", SSEPackedDouble>, PD, VEX, VEX_LIG, VEX_WIG;
+ "ucomisd", SSEPackedDouble>, PD, VEX, VEX_LIG, WIG;
defm VCOMISS : sse12_ord_cmp<0x2F, FR32, X86strict_fcmps, f32, f32mem, loadf32,
- "comiss", SSEPackedSingle>, PS, VEX, VEX_LIG, VEX_WIG;
+ "comiss", SSEPackedSingle>, PS, VEX, VEX_LIG, WIG;
defm VCOMISD : sse12_ord_cmp<0x2F, FR64, X86strict_fcmps, f64, f64mem, loadf64,
- "comisd", SSEPackedDouble>, PD, VEX, VEX_LIG, VEX_WIG;
+ "comisd", SSEPackedDouble>, PD, VEX, VEX_LIG, WIG;
let isCodeGenOnly = 1 in {
defm VUCOMISS : sse12_ord_cmp_int<0x2E, VR128, X86ucomi, v4f32, ssmem,
- sse_load_f32, "ucomiss", SSEPackedSingle>, PS, VEX, VEX_LIG, VEX_WIG;
+ sse_load_f32, "ucomiss", SSEPackedSingle>, PS, VEX, VEX_LIG, WIG;
defm VUCOMISD : sse12_ord_cmp_int<0x2E, VR128, X86ucomi, v2f64, sdmem,
- sse_load_f64, "ucomisd", SSEPackedDouble>, PD, VEX, VEX_LIG, VEX_WIG;
+ sse_load_f64, "ucomisd", SSEPackedDouble>, PD, VEX, VEX_LIG, WIG;
defm VCOMISS : sse12_ord_cmp_int<0x2F, VR128, X86comi, v4f32, ssmem,
- sse_load_f32, "comiss", SSEPackedSingle>, PS, VEX, VEX_LIG, VEX_WIG;
+ sse_load_f32, "comiss", SSEPackedSingle>, PS, VEX, VEX_LIG, WIG;
defm VCOMISD : sse12_ord_cmp_int<0x2F, VR128, X86comi, v2f64, sdmem,
- sse_load_f64, "comisd", SSEPackedDouble>, PD, VEX, VEX_LIG, VEX_WIG;
+ sse_load_f64, "comisd", SSEPackedDouble>, PD, VEX, VEX_LIG, WIG;
}
defm UCOMISS : sse12_ord_cmp<0x2E, FR32, X86any_fcmp, f32, f32mem, loadf32,
"ucomiss", SSEPackedSingle>, PS;
@@ -1979,16 +1979,16 @@ multiclass sse12_cmp_packed<RegisterClass RC, X86MemOperand x86memop,
defm VCMPPS : sse12_cmp_packed<VR128, f128mem, v4f32,
"cmpps\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
- SchedWriteFCmpSizes.PS.XMM, SSEPackedSingle, loadv4f32>, PS, VEX_4V, VEX_WIG;
+ SchedWriteFCmpSizes.PS.XMM, SSEPackedSingle, loadv4f32>, PS, VEX_4V, WIG;
defm VCMPPD : sse12_cmp_packed<VR128, f128mem, v2f64,
"cmppd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
- SchedWriteFCmpSizes.PD.XMM, SSEPackedDouble, loadv2f64>, PD, VEX_4V, VEX_WIG;
+ SchedWriteFCmpSizes.PD.XMM, SSEPackedDouble, loadv2f64>, PD, VEX_4V, WIG;
defm VCMPPSY : sse12_cmp_packed<VR256, f256mem, v8f32,
"cmpps\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
- SchedWriteFCmpSizes.PS.YMM, SSEPackedSingle, loadv8f32>, PS, VEX_4V, VEX_L, VEX_WIG;
+ SchedWriteFCmpSizes.PS.YMM, SSEPackedSingle, loadv8f32>, PS, VEX_4V, VEX_L, WIG;
defm VCMPPDY : sse12_cmp_packed<VR256, f256mem, v4f64,
"cmppd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
- SchedWriteFCmpSizes.PD.YMM, SSEPackedDouble, loadv4f64>, PD, VEX_4V, VEX_L, VEX_WIG;
+ SchedWriteFCmpSizes.PD.YMM, SSEPackedDouble, loadv4f64>, PD, VEX_4V, VEX_L, WIG;
let Constraints = "$src1 = $dst" in {
defm CMPPS : sse12_cmp_packed<VR128, f128mem, v4f32,
"cmpps\t{$cc, $src2, $dst|$dst, $src2, $cc}",
@@ -2076,19 +2076,19 @@ let Predicates = [HasAVX, NoVLX] in {
defm VSHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
"shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
loadv4f32, SchedWriteFShuffle.XMM, SSEPackedSingle>,
- PS, VEX_4V, VEX_WIG;
+ PS, VEX_4V, WIG;
defm VSHUFPSY : sse12_shuffle<VR256, f256mem, v8f32,
"shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
loadv8f32, SchedWriteFShuffle.YMM, SSEPackedSingle>,
- PS, VEX_4V, VEX_L, VEX_WIG;
+ PS, VEX_4V, VEX_L, WIG;
defm VSHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
"shufpd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
loadv2f64, SchedWriteFShuffle.XMM, SSEPackedDouble>,
- PD, VEX_4V, VEX_WIG;
+ PD, VEX_4V, WIG;
defm VSHUFPDY : sse12_shuffle<VR256, f256mem, v4f64,
"shufpd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
loadv4f64, SchedWriteFShuffle.YMM, SSEPackedDouble>,
- PD, VEX_4V, VEX_L, VEX_WIG;
+ PD, VEX_4V, VEX_L, WIG;
}
let Constraints = "$src1 = $dst" in {
defm SHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
@@ -2126,29 +2126,29 @@ multiclass sse12_unpack_interleave<bits<8> opc, SDNode OpNode, ValueType vt,
let Predicates = [HasAVX, NoVLX] in {
defm VUNPCKHPS: sse12_unpack_interleave<0x15, X86Unpckh, v4f32, load,
VR128, f128mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- SchedWriteFShuffle.XMM, SSEPackedSingle>, PS, VEX_4V, VEX_WIG;
+ SchedWriteFShuffle.XMM, SSEPackedSingle>, PS, VEX_4V, WIG;
defm VUNPCKHPD: sse12_unpack_interleave<0x15, X86Unpckh, v2f64, load,
VR128, f128mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- SchedWriteFShuffle.XMM, SSEPackedDouble, 1>, PD, VEX_4V, VEX_WIG;
+ SchedWriteFShuffle.XMM, SSEPackedDouble, 1>, PD, VEX_4V, WIG;
defm VUNPCKLPS: sse12_unpack_interleave<0x14, X86Unpckl, v4f32, load,
VR128, f128mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- SchedWriteFShuffle.XMM, SSEPackedSingle>, PS, VEX_4V, VEX_WIG;
+ SchedWriteFShuffle.XMM, SSEPackedSingle>, PS, VEX_4V, WIG;
defm VUNPCKLPD: sse12_unpack_interleave<0x14, X86Unpckl, v2f64, load,
VR128, f128mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- SchedWriteFShuffle.XMM, SSEPackedDouble>, PD, VEX_4V, VEX_WIG;
+ SchedWriteFShuffle.XMM, SSEPackedDouble>, PD, VEX_4V, WIG;
defm VUNPCKHPSY: sse12_unpack_interleave<0x15, X86Unpckh, v8f32, load,
VR256, f256mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- SchedWriteFShuffle.YMM, SSEPackedSingle>, PS, VEX_4V, VEX_L, VEX_WIG;
+ SchedWriteFShuffle.YMM, SSEPackedSingle>, PS, VEX_4V, VEX_L, WIG;
defm VUNPCKHPDY: sse12_unpack_interleave<0x15, X86Unpckh, v4f64, load,
VR256, f256mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- SchedWriteFShuffle.YMM, SSEPackedDouble>, PD, VEX_4V, VEX_L, VEX_WIG;
+ SchedWriteFShuffle.YMM, SSEPackedDouble>, PD, VEX_4V, VEX_L, WIG;
defm VUNPCKLPSY: sse12_unpack_interleave<0x14, X86Unpckl, v8f32, load,
VR256, f256mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- SchedWriteFShuffle.YMM, SSEPackedSingle>, PS, VEX_4V, VEX_L, VEX_WIG;
+ SchedWriteFShuffle.YMM, SSEPackedSingle>, PS, VEX_4V, VEX_L, WIG;
defm VUNPCKLPDY: sse12_unpack_interleave<0x14, X86Unpckl, v4f64, load,
VR256, f256mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- SchedWriteFShuffle.YMM, SSEPackedDouble>, PD, VEX_4V, VEX_L, VEX_WIG;
+ SchedWriteFShuffle.YMM, SSEPackedDouble>, PD, VEX_4V, VEX_L, WIG;
}// Predicates = [HasAVX, NoVLX]
let Constraints = "$src1 = $dst" in {
@@ -2208,13 +2208,13 @@ multiclass sse12_extr_sign_mask<RegisterClass RC, ValueType vt,
let Predicates = [HasAVX] in {
defm VMOVMSKPS : sse12_extr_sign_mask<VR128, v4f32, "movmskps",
- SSEPackedSingle>, PS, VEX, VEX_WIG;
+ SSEPackedSingle>, PS, VEX, WIG;
defm VMOVMSKPD : sse12_extr_sign_mask<VR128, v2f64, "movmskpd",
- SSEPackedDouble>, PD, VEX, VEX_WIG;
+ SSEPackedDouble>, PD, VEX, WIG;
defm VMOVMSKPSY : sse12_extr_sign_mask<VR256, v8f32, "movmskps",
- SSEPackedSingle>, PS, VEX, VEX_L, VEX_WIG;
+ SSEPackedSingle>, PS, VEX, VEX_L, WIG;
defm VMOVMSKPDY : sse12_extr_sign_mask<VR256, v4f64, "movmskpd",
- SSEPackedDouble>, PD, VEX, VEX_L, VEX_WIG;
+ SSEPackedDouble>, PD, VEX, VEX_L, WIG;
// Also support integer VTs to avoid a int->fp bitcast in the DAG.
def : Pat<(X86movmsk (v4i32 VR128:$src)),
@@ -2276,7 +2276,7 @@ multiclass PDI_binop_all<bits<8> opc, string OpcodeStr, SDNode Opcode,
let Predicates = [HasAVX, prd] in
defm V#NAME : PDI_binop_rm<opc, !strconcat("v", OpcodeStr), Opcode, OpVT128,
VR128, load, i128mem, sched.XMM,
- IsCommutable, 0>, VEX_4V, VEX_WIG;
+ IsCommutable, 0>, VEX_4V, WIG;
let Constraints = "$src1 = $dst" in
defm NAME : PDI_binop_rm<opc, OpcodeStr, Opcode, OpVT128, VR128,
@@ -2285,7 +2285,7 @@ let Constraints = "$src1 = $dst" in
let Predicates = [HasAVX2, prd] in
defm V#NAME#Y : PDI_binop_rm<opc, !strconcat("v", OpcodeStr), Opcode,
OpVT256, VR256, load, i256mem, sched.YMM,
- IsCommutable, 0>, VEX_4V, VEX_L, VEX_WIG;
+ IsCommutable, 0>, VEX_4V, VEX_L, WIG;
}
// These are ordered here for pattern ordering requirements with the fp versions
@@ -2312,19 +2312,19 @@ multiclass sse12_fp_packed_logical<bits<8> opc, string OpcodeStr,
let Predicates = [HasAVX, NoVLX] in {
defm V#NAME#PSY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedSingle,
!strconcat(OpcodeStr, "ps"), f256mem, sched.YMM,
- [], [], 0>, PS, VEX_4V, VEX_L, VEX_WIG;
+ [], [], 0>, PS, VEX_4V, VEX_L, WIG;
defm V#NAME#PDY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedDouble,
!strconcat(OpcodeStr, "pd"), f256mem, sched.YMM,
- [], [], 0>, PD, VEX_4V, VEX_L, VEX_WIG;
+ [], [], 0>, PD, VEX_4V, VEX_L, WIG;
defm V#NAME#PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
!strconcat(OpcodeStr, "ps"), f128mem, sched.XMM,
- [], [], 0>, PS, VEX_4V, VEX_WIG;
+ [], [], 0>, PS, VEX_4V, WIG;
defm V#NAME#PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
!strconcat(OpcodeStr, "pd"), f128mem, sched.XMM,
- [], [], 0>, PD, VEX_4V, VEX_WIG;
+ [], [], 0>, PD, VEX_4V, WIG;
}
let Constraints = "$src1 = $dst" in {
@@ -2636,17 +2636,17 @@ let Uses = [MXCSR], mayRaiseFPException = 1 in {
let Predicates = [HasAVX, NoVLX] in {
defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
VR128, v4f32, f128mem, loadv4f32,
- SSEPackedSingle, sched.PS.XMM, 0>, PS, VEX_4V, VEX_WIG;
+ SSEPackedSingle, sched.PS.XMM, 0>, PS, VEX_4V, WIG;
defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
VR128, v2f64, f128mem, loadv2f64,
- SSEPackedDouble, sched.PD.XMM, 0>, PD, VEX_4V, VEX_WIG;
+ SSEPackedDouble, sched.PD.XMM, 0>, PD, VEX_4V, WIG;
defm V#NAME#PSY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"),
OpNode, VR256, v8f32, f256mem, loadv8f32,
- SSEPackedSingle, sched.PS.YMM, 0>, PS, VEX_4V, VEX_L, VEX_WIG;
+ SSEPackedSingle, sched.PS.YMM, 0>, PS, VEX_4V, VEX_L, WIG;
defm V#NAME#PDY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"),
OpNode, VR256, v4f64, f256mem, loadv4f64,
- SSEPackedDouble, sched.PD.YMM, 0>, PD, VEX_4V, VEX_L, VEX_WIG;
+ SSEPackedDouble, sched.PD.YMM, 0>, PD, VEX_4V, VEX_L, WIG;
}
let Constraints = "$src1 = $dst" in {
@@ -2665,10 +2665,10 @@ multiclass basic_sse12_fp_binop_s<bits<8> opc, string OpcodeStr, SDPatternOperat
let Uses = [MXCSR], mayRaiseFPException = 1 in {
defm V#NAME#SS : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"),
OpNode, FR32, f32mem, SSEPackedSingle, sched.PS.Scl, 0>,
- XS, VEX_4V, VEX_LIG, VEX_WIG;
+ XS, VEX_4V, VEX_LIG, WIG;
defm V#NAME#SD : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd"),
OpNode, FR64, f64mem, SSEPackedDouble, sched.PD.Scl, 0>,
- XD, VEX_4V, VEX_LIG, VEX_WIG;
+ XD, VEX_4V, VEX_LIG, WIG;
let Constraints = "$src1 = $dst" in {
defm SS : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"),
@@ -2687,10 +2687,10 @@ multiclass basic_sse12_fp_binop_s_int<bits<8> opc, string OpcodeStr,
let Uses = [MXCSR], mayRaiseFPException = 1 in {
defm V#NAME#SS : sse12_fp_scalar_int<opc, OpNode, VR128, v4f32,
!strconcat(OpcodeStr, "ss"), ssmem, sse_load_f32,
- SSEPackedSingle, sched.PS.Scl, 0>, XS, VEX_4V, VEX_LIG, VEX_WIG;
+ SSEPackedSingle, sched.PS.Scl, 0>, XS, VEX_4V, VEX_LIG, WIG;
defm V#NAME#SD : sse12_fp_scalar_int<opc, OpNode, VR128, v2f64,
!strconcat(OpcodeStr, "sd"), sdmem, sse_load_f64,
- SSEPackedDouble, sched.PD.Scl, 0>, XD, VEX_4V, VEX_LIG, VEX_WIG;
+ SSEPackedDouble, sched.PD.Scl, 0>, XD, VEX_4V, VEX_LIG, WIG;
let Constraints = "$src1 = $dst" in {
defm SS : sse12_fp_scalar_int<opc, OpNode, VR128, v4f32,
@@ -2949,22 +2949,22 @@ let Predicates = prds in {
!strconcat("v", OpcodeStr,
"ps\t{$src, $dst|$dst, $src}"),
[(set VR128:$dst, (v4f32 (OpNode VR128:$src)))]>,
- VEX, Sched<[sched.XMM]>, VEX_WIG;
+ VEX, Sched<[sched.XMM]>, WIG;
def V#NAME#PSm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
!strconcat("v", OpcodeStr,
"ps\t{$src, $dst|$dst, $src}"),
[(set VR128:$dst, (OpNode (loadv4f32 addr:$src)))]>,
- VEX, Sched<[sched.XMM.Folded]>, VEX_WIG;
+ VEX, Sched<[sched.XMM.Folded]>, WIG;
def V#NAME#PSYr : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
!strconcat("v", OpcodeStr,
"ps\t{$src, $dst|$dst, $src}"),
[(set VR256:$dst, (v8f32 (OpNode VR256:$src)))]>,
- VEX, VEX_L, Sched<[sched.YMM]>, VEX_WIG;
+ VEX, VEX_L, Sched<[sched.YMM]>, WIG;
def V#NAME#PSYm : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
!strconcat("v", OpcodeStr,
"ps\t{$src, $dst|$dst, $src}"),
[(set VR256:$dst, (OpNode (loadv8f32 addr:$src)))]>,
- VEX, VEX_L, Sched<[sched.YMM.Folded]>, VEX_WIG;
+ VEX, VEX_L, Sched<[sched.YMM.Folded]>, WIG;
}
def PSr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
@@ -2985,22 +2985,22 @@ let Predicates = [HasAVX, NoVLX] in {
!strconcat("v", OpcodeStr,
"pd\t{$src, $dst|$dst, $src}"),
[(set VR128:$dst, (v2f64 (OpNode VR128:$src)))]>,
- VEX, Sched<[sched.XMM]>, VEX_WIG;
+ VEX, Sched<[sched.XMM]>, WIG;
def V#NAME#PDm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
!strconcat("v", OpcodeStr,
"pd\t{$src, $dst|$dst, $src}"),
[(set VR128:$dst, (OpNode (loadv2f64 addr:$src)))]>,
- VEX, Sched<[sched.XMM.Folded]>, VEX_WIG;
+ VEX, Sched<[sched.XMM.Folded]>, WIG;
def V#NAME#PDYr : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
!strconcat("v", OpcodeStr,
"pd\t{$src, $dst|$dst, $src}"),
[(set VR256:$dst, (v4f64 (OpNode VR256:$src)))]>,
- VEX, VEX_L, Sched<[sched.YMM]>, VEX_WIG;
+ VEX, VEX_L, Sched<[sched.YMM]>, WIG;
def V#NAME#PDYm : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
!strconcat("v", OpcodeStr,
"pd\t{$src, $dst|$dst, $src}"),
[(set VR256:$dst, (OpNode (loadv4f64 addr:$src)))]>,
- VEX, VEX_L, Sched<[sched.YMM.Folded]>, VEX_WIG;
+ VEX, VEX_L, Sched<[sched.YMM.Folded]>, WIG;
}
def PDr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
@@ -3020,7 +3020,7 @@ multiclass sse1_fp_unop_s_intr<string OpcodeStr, Predicate AVXTarget> {
defm V#NAME#SS : avx_fp_unop_s_intr<v4f32, sse_load_f32,
!cast<Intrinsic>("int_x86_sse_"#OpcodeStr#_ss),
AVXTarget>,
- XS, VEX_4V, VEX_LIG, VEX_WIG;
+ XS, VEX_4V, VEX_LIG, WIG;
}
multiclass sse1_fp_unop_s<bits<8> opc, string OpcodeStr, SDPatternOperator OpNode,
@@ -3029,7 +3029,7 @@ multiclass sse1_fp_unop_s<bits<8> opc, string OpcodeStr, SDPatternOperator OpNod
ssmem, OpNode, SSEPackedSingle, sched.Scl, UseSSE1>, XS;
defm V#NAME#SS : avx_fp_unop_s<opc, "v"#OpcodeStr#ss, FR32, f32,
f32mem, ssmem, OpNode, SSEPackedSingle, sched.Scl, AVXTarget>,
- XS, VEX_4V, VEX_LIG, VEX_WIG;
+ XS, VEX_4V, VEX_LIG, WIG;
}
multiclass sse2_fp_unop_s<bits<8> opc, string OpcodeStr, SDPatternOperator OpNode,
@@ -3038,7 +3038,7 @@ multiclass sse2_fp_unop_s<bits<8> opc, string OpcodeStr, SDPatternOperator OpNod
sdmem, OpNode, SSEPackedDouble, sched.Scl, UseSSE2>, XD;
defm V#NAME#SD : avx_fp_unop_s<opc, "v"#OpcodeStr#sd, FR64, f64,
f64mem, sdmem, OpNode, SSEPackedDouble, sched.Scl, AVXTarget>,
- XD, VEX_4V, VEX_LIG, VEX_WIG;
+ XD, VEX_4V, VEX_LIG, WIG;
}
// Square root.
@@ -3109,12 +3109,12 @@ def VMOVNTPSmr : VPSI<0x2B, MRMDestMem, (outs),
(ins f128mem:$dst, VR128:$src),
"movntps\t{$src, $dst|$dst, $src}",
[(alignednontemporalstore (v4f32 VR128:$src),
- addr:$dst)]>, VEX, VEX_WIG;
+ addr:$dst)]>, VEX, WIG;
def VMOVNTPDmr : VPDI<0x2B, MRMDestMem, (outs),
(ins f128mem:$dst, VR128:$src),
"movntpd\t{$src, $dst|$dst, $src}",
[(alignednontemporalstore (v2f64 VR128:$src),
- addr:$dst)]>, VEX, VEX_WIG;
+ addr:$dst)]>, VEX, WIG;
} // SchedRW
let SchedRW = [SchedWriteFMoveLSNT.YMM.MR] in {
@@ -3122,12 +3122,12 @@ def VMOVNTPSYmr : VPSI<0x2B, MRMDestMem, (outs),
(ins f256mem:$dst, VR256:$src),
"movntps\t{$src, $dst|$dst, $src}",
[(alignednontemporalstore (v8f32 VR256:$src),
- addr:$dst)]>, VEX, VEX_L, VEX_WIG;
+ addr:$dst)]>, VEX, VEX_L, WIG;
def VMOVNTPDYmr : VPDI<0x2B, MRMDestMem, (outs),
(ins f256mem:$dst, VR256:$src),
"movntpd\t{$src, $dst|$dst, $src}",
[(alignednontemporalstore (v4f64 VR256:$src),
- addr:$dst)]>, VEX, VEX_L, VEX_WIG;
+ addr:$dst)]>, VEX, VEX_L, WIG;
} // SchedRW
let ExeDomain = SSEPackedInt in {
@@ -3135,13 +3135,13 @@ def VMOVNTDQmr : VPDI<0xE7, MRMDestMem, (outs),
(ins i128mem:$dst, VR128:$src),
"movntdq\t{$src, $dst|$dst, $src}",
[(alignednontemporalstore (v2i64 VR128:$src),
- addr:$dst)]>, VEX, VEX_WIG,
+ addr:$dst)]>, VEX, WIG,
Sched<[SchedWriteVecMoveLSNT.XMM.MR]>;
def VMOVNTDQYmr : VPDI<0xE7, MRMDestMem, (outs),
(ins i256mem:$dst, VR256:$src),
"movntdq\t{$src, $dst|$dst, $src}",
[(alignednontemporalstore (v4i64 VR256:$src),
- addr:$dst)]>, VEX, VEX_L, VEX_WIG,
+ addr:$dst)]>, VEX, VEX_L, WIG,
Sched<[SchedWriteVecMoveLSNT.YMM.MR]>;
} // ExeDomain
} // Predicates
@@ -3257,11 +3257,11 @@ def : Pat<(X86MFence), (MFENCE)>;
let mayLoad=1, hasSideEffects=1, Defs=[MXCSR] in
def VLDMXCSR : VPSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
"ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>,
- VEX, Sched<[WriteLDMXCSR]>, VEX_WIG;
+ VEX, Sched<[WriteLDMXCSR]>, WIG;
let mayStore=1, hasSideEffects=1, Uses=[MXCSR] in
def VSTMXCSR : VPSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
"stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>,
- VEX, Sched<[WriteSTMXCSR]>, VEX_WIG;
+ VEX, Sched<[WriteSTMXCSR]>, WIG;
let mayLoad=1, hasSideEffects=1, Defs=[MXCSR] in
def LDMXCSR : I<0xAE, MRM2m, (outs), (ins i32mem:$src),
@@ -3281,16 +3281,16 @@ let ExeDomain = SSEPackedInt in { // SSE integer instructions
let hasSideEffects = 0 in {
def VMOVDQArr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"movdqa\t{$src, $dst|$dst, $src}", []>,
- Sched<[SchedWriteVecMoveLS.XMM.RR]>, VEX, VEX_WIG;
+ Sched<[SchedWriteVecMoveLS.XMM.RR]>, VEX, WIG;
def VMOVDQUrr : VSSI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"movdqu\t{$src, $dst|$dst, $src}", []>,
- Sched<[SchedWriteVecMoveLS.XMM.RR]>, VEX, VEX_WIG;
+ Sched<[SchedWriteVecMoveLS.XMM.RR]>, VEX, WIG;
def VMOVDQAYrr : VPDI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
"movdqa\t{$src, $dst|$dst, $src}", []>,
- Sched<[SchedWriteVecMoveLS.YMM.RR]>, VEX, VEX_L, VEX_WIG;
+ Sched<[SchedWriteVecMoveLS.YMM.RR]>, VEX, VEX_L, WIG;
def VMOVDQUYrr : VSSI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
"movdqu\t{$src, $dst|$dst, $src}", []>,
- Sched<[SchedWriteVecMoveLS.YMM.RR]>, VEX, VEX_L, VEX_WIG;
+ Sched<[SchedWriteVecMoveLS.YMM.RR]>, VEX, VEX_L, WIG;
}
// For Disassembler
@@ -3298,19 +3298,19 @@ let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in {
def VMOVDQArr_REV : VPDI<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
"movdqa\t{$src, $dst|$dst, $src}", []>,
Sched<[SchedWriteVecMoveLS.XMM.RR]>,
- VEX, VEX_WIG;
+ VEX, WIG;
def VMOVDQAYrr_REV : VPDI<0x7F, MRMDestReg, (outs VR256:$dst), (ins VR256:$src),
"movdqa\t{$src, $dst|$dst, $src}", []>,
Sched<[SchedWriteVecMoveLS.YMM.RR]>,
- VEX, VEX_L, VEX_WIG;
+ VEX, VEX_L, WIG;
def VMOVDQUrr_REV : VSSI<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
"movdqu\t{$src, $dst|$dst, $src}", []>,
Sched<[SchedWriteVecMoveLS.XMM.RR]>,
- VEX, VEX_WIG;
+ VEX, WIG;
def VMOVDQUYrr_REV : VSSI<0x7F, MRMDestReg, (outs VR256:$dst), (ins VR256:$src),
"movdqu\t{$src, $dst|$dst, $src}", []>,
Sched<[SchedWriteVecMoveLS.YMM.RR]>,
- VEX, VEX_L, VEX_WIG;
+ VEX, VEX_L, WIG;
}
let canFoldAsLoad = 1, mayLoad = 1, isReMaterializable = 1,
@@ -3318,20 +3318,20 @@ let canFoldAsLoad = 1, mayLoad = 1, isReMaterializable = 1,
def VMOVDQArm : VPDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
"movdqa\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (alignedloadv2i64 addr:$src))]>,
- Sched<[SchedWriteVecMoveLS.XMM.RM]>, VEX, VEX_WIG;
+ Sched<[SchedWriteVecMoveLS.XMM.RM]>, VEX, WIG;
def VMOVDQAYrm : VPDI<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
"movdqa\t{$src, $dst|$dst, $src}", []>,
Sched<[SchedWriteVecMoveLS.YMM.RM]>,
- VEX, VEX_L, VEX_WIG;
+ VEX, VEX_L, WIG;
def VMOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
"vmovdqu\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (loadv2i64 addr:$src))]>,
Sched<[SchedWriteVecMoveLS.XMM.RM]>,
- XS, VEX, VEX_WIG;
+ XS, VEX, WIG;
def VMOVDQUYrm : I<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
"vmovdqu\t{$src, $dst|$dst, $src}", []>,
Sched<[SchedWriteVecMoveLS.YMM.RM]>,
- XS, VEX, VEX_L, VEX_WIG;
+ XS, VEX, VEX_L, WIG;
}
let mayStore = 1, hasSideEffects = 0, Predicates = [HasAVX,NoVLX] in {
@@ -3339,18 +3339,18 @@ def VMOVDQAmr : VPDI<0x7F, MRMDestMem, (outs),
(ins i128mem:$dst, VR128:$src),
"movdqa\t{$src, $dst|$dst, $src}",
[(alignedstore (v2i64 VR128:$src), addr:$dst)]>,
- Sched<[SchedWriteVecMoveLS.XMM.MR]>, VEX, VEX_WIG;
+ Sched<[SchedWriteVecMoveLS.XMM.MR]>, VEX, WIG;
def VMOVDQAYmr : VPDI<0x7F, MRMDestMem, (outs),
(ins i256mem:$dst, VR256:$src),
"movdqa\t{$src, $dst|$dst, $src}", []>,
- Sched<[SchedWriteVecMoveLS.YMM.MR]>, VEX, VEX_L, VEX_WIG;
+ Sched<[SchedWriteVecMoveLS.YMM.MR]>, VEX, VEX_L, WIG;
def VMOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
"vmovdqu\t{$src, $dst|$dst, $src}",
[(store (v2i64 VR128:$src), addr:$dst)]>,
- Sched<[SchedWriteVecMoveLS.XMM.MR]>, XS, VEX, VEX_WIG;
+ Sched<[SchedWriteVecMoveLS.XMM.MR]>, XS, VEX, WIG;
def VMOVDQUYmr : I<0x7F, MRMDestMem, (outs), (ins i256mem:$dst, VR256:$src),
"vmovdqu\t{$src, $dst|$dst, $src}",[]>,
- Sched<[SchedWriteVecMoveLS.YMM.MR]>, XS, VEX, VEX_L, VEX_WIG;
+ Sched<[SchedWriteVecMoveLS.YMM.MR]>, XS, VEX, VEX_L, WIG;
}
let SchedRW = [SchedWriteVecMoveLS.XMM.RR] in {
@@ -3537,12 +3537,12 @@ defm PMULUDQ : PDI_binop_all<0xF4, "pmuludq", X86pmuludq, v2i64, v4i64,
let Predicates = [HasAVX, NoVLX_Or_NoBWI] in
defm VPMADDWD : PDI_binop_rm2<0xF5, "vpmaddwd", X86vpmaddwd, v4i32, v8i16, VR128,
load, i128mem, SchedWriteVecIMul.XMM, 0>,
- VEX_4V, VEX_WIG;
+ VEX_4V, WIG;
let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in
defm VPMADDWDY : PDI_binop_rm2<0xF5, "vpmaddwd", X86vpmaddwd, v8i32, v16i16,
VR256, load, i256mem, SchedWriteVecIMul.YMM,
- 0>, VEX_4V, VEX_L, VEX_WIG;
+ 0>, VEX_4V, VEX_L, WIG;
let Constraints = "$src1 = $dst" in
defm PMADDWD : PDI_binop_rm2<0xF5, "pmaddwd", X86vpmaddwd, v4i32, v8i16, VR128,
memop, i128mem, SchedWriteVecIMul.XMM>;
@@ -3550,11 +3550,11 @@ defm PMADDWD : PDI_binop_rm2<0xF5, "pmaddwd", X86vpmaddwd, v4i32, v8i16, VR128,
let Predicates = [HasAVX, NoVLX_Or_NoBWI] in
defm VPSADBW : PDI_binop_rm2<0xF6, "vpsadbw", X86psadbw, v2i64, v16i8, VR128,
load, i128mem, SchedWritePSADBW.XMM, 0>,
- VEX_4V, VEX_WIG;
+ VEX_4V, WIG;
let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in
defm VPSADBWY : PDI_binop_rm2<0xF6, "vpsadbw", X86psadbw, v4i64, v32i8, VR256,
load, i256mem, SchedWritePSADBW.YMM, 0>,
- VEX_4V, VEX_L, VEX_WIG;
+ VEX_4V, VEX_L, WIG;
let Constraints = "$src1 = $dst" in
defm PSADBW : PDI_binop_rm2<0xF6, "psadbw", X86psadbw, v2i64, v16i8, VR128,
memop, i128mem, SchedWritePSADBW.XMM>;
@@ -3604,12 +3604,12 @@ multiclass PDI_binop_rmi_all<bits<8> opc, bits<8> opc2, Format ImmForm,
let Predicates = [HasAVX, prd] in
defm V#NAME : PDI_binop_rmi<opc, opc2, ImmForm, !strconcat("v", OpcodeStr),
OpNode, OpNode2, VR128, sched.XMM, schedImm.XMM,
- DstVT128, SrcVT, load, 0>, VEX_4V, VEX_WIG;
+ DstVT128, SrcVT, load, 0>, VEX_4V, WIG;
let Predicates = [HasAVX2, prd] in
defm V#NAME#Y : PDI_binop_rmi<opc, opc2, ImmForm, !strconcat("v", OpcodeStr),
OpNode, OpNode2, VR256, sched.YMM, schedImm.YMM,
DstVT256, SrcVT, load, 0>, VEX_4V, VEX_L,
- VEX_WIG;
+ WIG;
let Constraints = "$src1 = $dst" in
defm NAME : PDI_binop_rmi<opc, opc2, ImmForm, OpcodeStr, OpNode, OpNode2,
VR128, sched.XMM, schedImm.XMM, DstVT128, SrcVT,
@@ -3631,11 +3631,11 @@ multiclass PDI_binop_ri_all<bits<8> opc, Format ImmForm, string OpcodeStr,
SDNode OpNode, X86SchedWriteWidths sched> {
let Predicates = [HasAVX, NoVLX_Or_NoBWI] in
defm V#NAME : PDI_binop_ri<opc, ImmForm, !strconcat("v", OpcodeStr), OpNode,
- VR128, v16i8, sched.XMM, 0>, VEX_4V, VEX_WIG;
+ VR128, v16i8, sched.XMM, 0>, VEX_4V, WIG;
let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in
defm V#NAME#Y : PDI_binop_ri<opc, ImmForm, !strconcat("v", OpcodeStr), OpNode,
VR256, v32i8, sched.YMM, 0>,
- VEX_4V, VEX_L, VEX_WIG;
+ VEX_4V, VEX_L, WIG;
let Constraints = "$src1 = $dst" in
defm NAME : PDI_binop_ri<opc, ImmForm, OpcodeStr, OpNode, VR128, v16i8,
sched.XMM>;
@@ -3707,7 +3707,7 @@ let Predicates = [HasAVX, prd] in {
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR128:$dst,
(vt128 (OpNode VR128:$src1, (i8 timm:$src2))))]>,
- VEX, Sched<[sched.XMM]>, VEX_WIG;
+ VEX, Sched<[sched.XMM]>, WIG;
def V#NAME#mi : Ii8<0x70, MRMSrcMem, (outs VR128:$dst),
(ins i128mem:$src1, u8imm:$src2),
!strconcat("v", OpcodeStr,
@@ -3715,7 +3715,7 @@ let Predicates = [HasAVX, prd] in {
[(set VR128:$dst,
(vt128 (OpNode (load addr:$src1),
(i8 timm:$src2))))]>, VEX,
- Sched<[sched.XMM.Folded]>, VEX_WIG;
+ Sched<[sched.XMM.Folded]>, WIG;
}
let Predicates = [HasAVX2, prd] in {
@@ -3725,7 +3725,7 @@ let Predicates = [HasAVX2, prd] in {
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR256:$dst,
(vt256 (OpNode VR256:$src1, (i8 timm:$src2))))]>,
- VEX, VEX_L, Sched<[sched.YMM]>, VEX_WIG;
+ VEX, VEX_L, Sched<[sched.YMM]>, WIG;
def V#NAME#Ymi : Ii8<0x70, MRMSrcMem, (outs VR256:$dst),
(ins i256mem:$src1, u8imm:$src2),
!strconcat("v", OpcodeStr,
@@ -3733,7 +3733,7 @@ let Predicates = [HasAVX2, prd] in {
[(set VR256:$dst,
(vt256 (OpNode (load addr:$src1),
(i8 timm:$src2))))]>, VEX, VEX_L,
- Sched<[sched.YMM.Folded]>, VEX_WIG;
+ Sched<[sched.YMM.Folded]>, WIG;
}
let Predicates = [UseSSE2] in {
@@ -3821,33 +3821,33 @@ multiclass sse4_pack<bits<8> opc, string OpcodeStr, ValueType OutVT,
let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
defm VPACKSSWB : sse2_pack<0x63, "vpacksswb", v16i8, v8i16, X86Packss, VR128,
i128mem, SchedWriteShuffle.XMM, load, 0>,
- VEX_4V, VEX_WIG;
+ VEX_4V, WIG;
defm VPACKSSDW : sse2_pack<0x6B, "vpackssdw", v8i16, v4i32, X86Packss, VR128,
i128mem, SchedWriteShuffle.XMM, load, 0>,
- VEX_4V, VEX_WIG;
+ VEX_4V, WIG;
defm VPACKUSWB : sse2_pack<0x67, "vpackuswb", v16i8, v8i16, X86Packus, VR128,
i128mem, SchedWriteShuffle.XMM, load, 0>,
- VEX_4V, VEX_WIG;
+ VEX_4V, WIG;
defm VPACKUSDW : sse4_pack<0x2B, "vpackusdw", v8i16, v4i32, X86Packus, VR128,
i128mem, SchedWriteShuffle.XMM, load, 0>,
- VEX_4V, VEX_WIG;
+ VEX_4V, WIG;
}
let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in {
defm VPACKSSWBY : sse2_pack<0x63, "vpacksswb", v32i8, v16i16, X86Packss, VR256,
i256mem, SchedWriteShuffle.YMM, load, 0>,
- VEX_4V, VEX_L, VEX_WIG;
+ VEX_4V, VEX_L, WIG;
defm VPACKSSDWY : sse2_pack<0x6B, "vpackssdw", v16i16, v8i32, X86Packss, VR256,
i256mem, SchedWriteShuffle.YMM, load, 0>,
- VEX_4V, VEX_L, VEX_WIG;
+ VEX_4V, VEX_L, WIG;
defm VPACKUSWBY : sse2_pack<0x67, "vpackuswb", v32i8, v16i16, X86Packus, VR256,
i256mem, SchedWriteShuffle.YMM, load, 0>,
- VEX_4V, VEX_L, VEX_WIG;
+ VEX_4V, VEX_L, WIG;
defm VPACKUSDWY : sse4_pack<0x2B, "vpackusdw", v16i16, v8i32, X86Packus, VR256,
i256mem, SchedWriteShuffle.YMM, load, 0>,
- VEX_4V, VEX_L, VEX_WIG;
+ VEX_4V, VEX_L, WIG;
}
let Constraints = "$src1 = $dst" in {
@@ -3892,61 +3892,61 @@ multiclass sse2_unpack<bits<8> opc, string OpcodeStr, ValueType vt,
let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
defm VPUNPCKLBW : sse2_unpack<0x60, "vpunpcklbw", v16i8, X86Unpckl, VR128,
i128mem, SchedWriteShuffle.XMM, load, 0>,
- VEX_4V, VEX_WIG;
+ VEX_4V, WIG;
defm VPUNPCKLWD : sse2_unpack<0x61, "vpunpcklwd", v8i16, X86Unpckl, VR128,
i128mem, SchedWriteShuffle.XMM, load, 0>,
- VEX_4V, VEX_WIG;
+ VEX_4V, WIG;
defm VPUNPCKHBW : sse2_unpack<0x68, "vpunpckhbw", v16i8, X86Unpckh, VR128,
i128mem, SchedWriteShuffle.XMM, load, 0>,
- VEX_4V, VEX_WIG;
+ VEX_4V, WIG;
defm VPUNPCKHWD : sse2_unpack<0x69, "vpunpckhwd", v8i16, X86Unpckh, VR128,
i128mem, SchedWriteShuffle.XMM, load, 0>,
- VEX_4V, VEX_WIG;
+ VEX_4V, WIG;
}
let Predicates = [HasAVX, NoVLX] in {
defm VPUNPCKLDQ : sse2_unpack<0x62, "vpunpckldq", v4i32, X86Unpckl, VR128,
i128mem, SchedWriteShuffle.XMM, load, 0>,
- VEX_4V, VEX_WIG;
+ VEX_4V, WIG;
defm VPUNPCKLQDQ : sse2_unpack<0x6C, "vpunpcklqdq", v2i64, X86Unpckl, VR128,
i128mem, SchedWriteShuffle.XMM, load, 0>,
- VEX_4V, VEX_WIG;
+ VEX_4V, WIG;
defm VPUNPCKHDQ : sse2_unpack<0x6A, "vpunpckhdq", v4i32, X86Unpckh, VR128,
i128mem, SchedWriteShuffle.XMM, load, 0>,
- VEX_4V, VEX_WIG;
+ VEX_4V, WIG;
defm VPUNPCKHQDQ : sse2_unpack<0x6D, "vpunpckhqdq", v2i64, X86Unpckh, VR128,
i128mem, SchedWriteShuffle.XMM, load, 0>,
- VEX_4V, VEX_WIG;
+ VEX_4V, WIG;
}
let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in {
defm VPUNPCKLBWY : sse2_unpack<0x60, "vpunpcklbw", v32i8, X86Unpckl, VR256,
i256mem, SchedWriteShuffle.YMM, load, 0>,
- VEX_4V, VEX_L, VEX_WIG;
+ VEX_4V, VEX_L, WIG;
defm VPUNPCKLWDY : sse2_unpack<0x61, "vpunpcklwd", v16i16, X86Unpckl, VR256,
i256mem, SchedWriteShuffle.YMM, load, 0>,
- VEX_4V, VEX_L, VEX_WIG;
+ VEX_4V, VEX_L, WIG;
defm VPUNPCKHBWY : sse2_unpack<0x68, "vpunpckhbw", v32i8, X86Unpckh, VR256,
i256mem, SchedWriteShuffle.YMM, load, 0>,
- VEX_4V, VEX_L, VEX_WIG;
+ VEX_4V, VEX_L, WIG;
defm VPUNPCKHWDY : sse2_unpack<0x69, "vpunpckhwd", v16i16, X86Unpckh, VR256,
i256mem, SchedWriteShuffle.YMM, load, 0>,
- VEX_4V, VEX_L, VEX_WIG;
+ VEX_4V, VEX_L, WIG;
}
let Predicates = [HasAVX2, NoVLX] in {
defm VPUNPCKLDQY : sse2_unpack<0x62, "vpunpckldq", v8i32, X86Unpckl, VR256,
i256mem, SchedWriteShuffle.YMM, load, 0>,
- VEX_4V, VEX_L, VEX_WIG;
+ VEX_4V, VEX_L, WIG;
defm VPUNPCKLQDQY : sse2_unpack<0x6C, "vpunpcklqdq", v4i64, X86Unpckl, VR256,
i256mem, SchedWriteShuffle.YMM, load, 0>,
- VEX_4V, VEX_L, VEX_WIG;
+ VEX_4V, VEX_L, WIG;
defm VPUNPCKHDQY : sse2_unpack<0x6A, "vpunpckhdq", v8i32, X86Unpckh, VR256,
i256mem, SchedWriteShuffle.YMM, load, 0>,
- VEX_4V, VEX_L, VEX_WIG;
+ VEX_4V, VEX_L, WIG;
defm VPUNPCKHQDQY : sse2_unpack<0x6D, "vpunpckhqdq", v4i64, X86Unpckh, VR256,
i256mem, SchedWriteShuffle.YMM, load, 0>,
- VEX_4V, VEX_L, VEX_WIG;
+ VEX_4V, VEX_L, WIG;
}
let Constraints = "$src1 = $dst" in {
@@ -4004,7 +4004,7 @@ def VPEXTRWrr : Ii8<0xC5, MRMSrcReg,
"vpextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR32orGR64:$dst, (X86pextrw (v8i16 VR128:$src1),
timm:$src2))]>,
- PD, VEX, VEX_WIG, Sched<[WriteVecExtract]>;
+ PD, VEX, WIG, Sched<[WriteVecExtract]>;
def PEXTRWrr : PDIi8<0xC5, MRMSrcReg,
(outs GR32orGR64:$dst), (ins VR128:$src1, u8imm:$src2),
"pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
@@ -4014,7 +4014,7 @@ def PEXTRWrr : PDIi8<0xC5, MRMSrcReg,
// Insert
let Predicates = [HasAVX, NoBWI] in
-defm VPINSRW : sse2_pinsrw<0>, PD, VEX_4V, VEX_WIG;
+defm VPINSRW : sse2_pinsrw<0>, PD, VEX_4V, WIG;
let Predicates = [UseSSE2], Constraints = "$src1 = $dst" in
defm PINSRW : sse2_pinsrw, PD;
@@ -4045,14 +4045,14 @@ def VPMOVMSKBrr : VPDI<0xD7, MRMSrcReg, (outs GR32orGR64:$dst),
(ins VR128:$src),
"pmovmskb\t{$src, $dst|$dst, $src}",
[(set GR32orGR64:$dst, (X86movmsk (v16i8 VR128:$src)))]>,
- Sched<[WriteVecMOVMSK]>, VEX, VEX_WIG;
+ Sched<[WriteVecMOVMSK]>, VEX, WIG;
let Predicates = [HasAVX2] in {
def VPMOVMSKBYrr : VPDI<0xD7, MRMSrcReg, (outs GR32orGR64:$dst),
(ins VR256:$src),
"pmovmskb\t{$src, $dst|$dst, $src}",
[(set GR32orGR64:$dst, (X86movmsk (v32i8 VR256:$src)))]>,
- Sched<[WriteVecMOVMSKY]>, VEX, VEX_L, VEX_WIG;
+ Sched<[WriteVecMOVMSKY]>, VEX, VEX_L, WIG;
}
def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32orGR64:$dst), (ins VR128:$src),
@@ -4075,13 +4075,13 @@ def VMASKMOVDQU : VPDI<0xF7, MRMSrcReg, (outs),
(ins VR128:$src, VR128:$mask),
"maskmovdqu\t{$mask, $src|$src, $mask}",
[(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>,
- VEX, VEX_WIG;
+ VEX, WIG;
let Uses = [RDI], Predicates = [HasAVX,In64BitMode] in
def VMASKMOVDQU64 : VPDI<0xF7, MRMSrcReg, (outs),
(ins VR128:$src, VR128:$mask),
"maskmovdqu\t{$mask, $src|$src, $mask}",
[(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>,
- VEX, VEX_WIG;
+ VEX, WIG;
let Uses = [EDI], Predicates = [UseSSE2] in
def MASKMOVDQU : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
@@ -4307,7 +4307,7 @@ def VMOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
"vmovq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
(v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
- VEX, Requires<[UseAVX]>, VEX_WIG;
+ VEX, Requires<[UseAVX]>, WIG;
def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
"movq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
@@ -4323,7 +4323,7 @@ def VMOVPQI2QImr : VS2I<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
"movq\t{$src, $dst|$dst, $src}",
[(store (i64 (extractelt (v2i64 VR128:$src),
(iPTR 0))), addr:$dst)]>,
- VEX, VEX_WIG;
+ VEX, WIG;
def MOVPQI2QImr : S2I<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
"movq\t{$src, $dst|$dst, $src}",
[(store (i64 (extractelt (v2i64 VR128:$src),
@@ -4334,7 +4334,7 @@ def MOVPQI2QImr : S2I<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
SchedRW = [SchedWriteVecLogic.XMM] in {
def VMOVPQI2QIrr : VS2I<0xD6, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
- "movq\t{$src, $dst|$dst, $src}", []>, VEX, VEX_WIG;
+ "movq\t{$src, $dst|$dst, $src}", []>, VEX, WIG;
def MOVPQI2QIrr : S2I<0xD6, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
"movq\t{$src, $dst|$dst, $src}", []>;
}
@@ -4369,7 +4369,7 @@ let ExeDomain = SSEPackedInt, SchedRW = [SchedWriteVecLogic.XMM] in {
def VMOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"vmovq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
- XS, VEX, Requires<[UseAVX]>, VEX_WIG;
+ XS, VEX, Requires<[UseAVX]>, WIG;
def MOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"movq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
@@ -4418,16 +4418,16 @@ def rm : S3SI<op, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
let Predicates = [HasAVX, NoVLX] in {
defm VMOVSHDUP : sse3_replicate_sfp<0x16, X86Movshdup, "vmovshdup",
v4f32, VR128, loadv4f32, f128mem,
- SchedWriteFShuffle.XMM>, VEX, VEX_WIG;
+ SchedWriteFShuffle.XMM>, VEX, WIG;
defm VMOVSLDUP : sse3_replicate_sfp<0x12, X86Movsldup, "vmovsldup",
v4f32, VR128, loadv4f32, f128mem,
- SchedWriteFShuffle.XMM>, VEX, VEX_WIG;
+ SchedWriteFShuffle.XMM>, VEX, WIG;
defm VMOVSHDUPY : sse3_replicate_sfp<0x16, X86Movshdup, "vmovshdup",
v8f32, VR256, loadv8f32, f256mem,
- SchedWriteFShuffle.YMM>, VEX, VEX_L, VEX_WIG;
+ SchedWriteFShuffle.YMM>, VEX, VEX_L, WIG;
defm VMOVSLDUPY : sse3_replicate_sfp<0x12, X86Movsldup, "vmovsldup",
v8f32, VR256, loadv8f32, f256mem,
- SchedWriteFShuffle.YMM>, VEX, VEX_L, VEX_WIG;
+ SchedWriteFShuffle.YMM>, VEX, VEX_L, WIG;
}
defm MOVSHDUP : sse3_replicate_sfp<0x16, X86Movshdup, "movshdup", v4f32, VR128,
memopv4f32, f128mem, SchedWriteFShuffle.XMM>;
@@ -4496,9 +4496,9 @@ def rm : S3DI<0x12, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
let Predicates = [HasAVX, NoVLX] in {
defm VMOVDDUP : sse3_replicate_dfp<"vmovddup", SchedWriteFShuffle>,
- VEX, VEX_WIG;
+ VEX, WIG;
defm VMOVDDUPY : sse3_replicate_dfp_y<"vmovddup", SchedWriteFShuffle>,
- VEX, VEX_L, VEX_WIG;
+ VEX, VEX_L, WIG;
}
defm MOVDDUP : sse3_replicate_dfp<"movddup", SchedWriteFShuffle>;
@@ -4522,11 +4522,11 @@ let Predicates = [HasAVX] in {
def VLDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
"vlddqu\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>,
- Sched<[SchedWriteVecMoveLS.XMM.RM]>, VEX, VEX_WIG;
+ Sched<[SchedWriteVecMoveLS.XMM.RM]>, VEX, WIG;
def VLDDQUYrm : S3DI<0xF0, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
"vlddqu\t{$src, $dst|$dst, $src}",
[(set VR256:$dst, (int_x86_avx_ldu_dq_256 addr:$src))]>,
- Sched<[SchedWriteVecMoveLS.YMM.RM]>, VEX, VEX_L, VEX_WIG;
+ Sched<[SchedWriteVecMoveLS.YMM.RM]>, VEX, VEX_L, WIG;
} // Predicates
def LDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
@@ -4563,18 +4563,18 @@ let Predicates = [HasAVX] in {
let ExeDomain = SSEPackedSingle in {
defm VADDSUBPS : sse3_addsub<"vaddsubps", v4f32, VR128, f128mem,
SchedWriteFAddSizes.PS.XMM, loadv4f32, 0>,
- XD, VEX_4V, VEX_WIG;
+ XD, VEX_4V, WIG;
defm VADDSUBPSY : sse3_addsub<"vaddsubps", v8f32, VR256, f256mem,
SchedWriteFAddSizes.PS.YMM, loadv8f32, 0>,
- XD, VEX_4V, VEX_L, VEX_WIG;
+ XD, VEX_4V, VEX_L, WIG;
}
let ExeDomain = SSEPackedDouble in {
defm VADDSUBPD : sse3_addsub<"vaddsubpd", v2f64, VR128, f128mem,
SchedWriteFAddSizes.PD.XMM, loadv2f64, 0>,
- PD, VEX_4V, VEX_WIG;
+ PD, VEX_4V, WIG;
defm VADDSUBPDY : sse3_addsub<"vaddsubpd", v4f64, VR256, f256mem,
SchedWriteFAddSizes.PD.YMM, loadv4f64, 0>,
- PD, VEX_4V, VEX_L, VEX_WIG;
+ PD, VEX_4V, VEX_L, WIG;
}
}
let Constraints = "$src1 = $dst", Predicates = [UseSSE3] in {
@@ -4635,23 +4635,23 @@ let Uses = [MXCSR], mayRaiseFPException = 1 in {
let Predicates = [HasAVX] in {
let ExeDomain = SSEPackedSingle in {
defm VHADDPS : S3D_Int<0x7C, "vhaddps", v4f32, VR128, f128mem,
- X86fhadd, WriteFHAdd, loadv4f32, 0>, VEX_4V, VEX_WIG;
+ X86fhadd, WriteFHAdd, loadv4f32, 0>, VEX_4V, WIG;
defm VHSUBPS : S3D_Int<0x7D, "vhsubps", v4f32, VR128, f128mem,
- X86fhsub, WriteFHAdd, loadv4f32, 0>, VEX_4V, VEX_WIG;
+ X86fhsub, WriteFHAdd, loadv4f32, 0>, VEX_4V, WIG;
defm VHADDPSY : S3D_Int<0x7C, "vhaddps", v8f32, VR256, f256mem,
- X86fhadd, WriteFHAddY, loadv8f32, 0>, VEX_4V, VEX_L, VEX_WIG;
+ X86fhadd, WriteFHAddY, loadv8f32, 0>, VEX_4V, VEX_L, WIG;
defm VHSUBPSY : S3D_Int<0x7D, "vhsubps", v8f32, VR256, f256mem,
- X86fhsub, WriteFHAddY, loadv8f32, 0>, VEX_4V, VEX_L, VEX_WIG;
+ X86fhsub, WriteFHAddY, loadv8f32, 0>, VEX_4V, VEX_L, WIG;
}
let ExeDomain = SSEPackedDouble in {
defm VHADDPD : S3_Int<0x7C, "vhaddpd", v2f64, VR128, f128mem,
- X86fhadd, WriteFHAdd, loadv2f64, 0>, VEX_4V, VEX_WIG;
+ X86fhadd, WriteFHAdd, loadv2f64, 0>, VEX_4V, WIG;
defm VHSUBPD : S3_Int<0x7D, "vhsubpd", v2f64, VR128, f128mem,
- X86fhsub, WriteFHAdd, loadv2f64, 0>, VEX_4V, VEX_WIG;
+ X86fhsub, WriteFHAdd, loadv2f64, 0>, VEX_4V, WIG;
defm VHADDPDY : S3_Int<0x7C, "vhaddpd", v4f64, VR256, f256mem,
- X86fhadd, WriteFHAddY, loadv4f64, 0>, VEX_4V, VEX_L, VEX_WIG;
+ X86fhadd, WriteFHAddY, loadv4f64, 0>, VEX_4V, VEX_L, WIG;
defm VHSUBPDY : S3_Int<0x7D, "vhsubpd", v4f64, VR256, f256mem,
- X86fhsub, WriteFHAddY, loadv4f64, 0>, VEX_4V, VEX_L, VEX_WIG;
+ X86fhsub, WriteFHAddY, loadv4f64, 0>, VEX_4V, VEX_L, WIG;
}
}
@@ -4710,23 +4710,23 @@ multiclass SS3I_unop_rm_y<bits<8> opc, string OpcodeStr, ValueType vt,
let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
defm VPABSB : SS3I_unop_rm<0x1C, "vpabsb", v16i8, abs, SchedWriteVecALU,
- load>, VEX, VEX_WIG;
+ load>, VEX, WIG;
defm VPABSW : SS3I_unop_rm<0x1D, "vpabsw", v8i16, abs, SchedWriteVecALU,
- load>, VEX, VEX_WIG;
+ load>, VEX, WIG;
}
let Predicates = [HasAVX, NoVLX] in {
defm VPABSD : SS3I_unop_rm<0x1E, "vpabsd", v4i32, abs, SchedWriteVecALU,
- load>, VEX, VEX_WIG;
+ load>, VEX, WIG;
}
let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in {
defm VPABSB : SS3I_unop_rm_y<0x1C, "vpabsb", v32i8, abs, SchedWriteVecALU>,
- VEX, VEX_L, VEX_WIG;
+ VEX, VEX_L, WIG;
defm VPABSW : SS3I_unop_rm_y<0x1D, "vpabsw", v16i16, abs, SchedWriteVecALU>,
- VEX, VEX_L, VEX_WIG;
+ VEX, VEX_L, WIG;
}
let Predicates = [HasAVX2, NoVLX] in {
defm VPABSD : SS3I_unop_rm_y<0x1E, "vpabsd", v8i32, abs, SchedWriteVecALU>,
- VEX, VEX_L, VEX_WIG;
+ VEX, VEX_L, WIG;
}
defm PABSB : SS3I_unop_rm<0x1C, "pabsb", v16i8, abs, SchedWriteVecALU,
@@ -4806,45 +4806,45 @@ let ImmT = NoImm, Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
let isCommutable = 0 in {
defm VPSHUFB : SS3I_binop_rm<0x00, "vpshufb", X86pshufb, v16i8, v16i8,
VR128, load, i128mem,
- SchedWriteVarShuffle.XMM, 0>, VEX_4V, VEX_WIG;
+ SchedWriteVarShuffle.XMM, 0>, VEX_4V, WIG;
defm VPMADDUBSW : SS3I_binop_rm<0x04, "vpmaddubsw", X86vpmaddubsw, v8i16,
v16i8, VR128, load, i128mem,
- SchedWriteVecIMul.XMM, 0>, VEX_4V, VEX_WIG;
+ SchedWriteVecIMul.XMM, 0>, VEX_4V, WIG;
}
defm VPMULHRSW : SS3I_binop_rm<0x0B, "vpmulhrsw", X86mulhrs, v8i16, v8i16,
VR128, load, i128mem,
- SchedWriteVecIMul.XMM, 0>, VEX_4V, VEX_WIG;
+ SchedWriteVecIMul.XMM, 0>, VEX_4V, WIG;
}
let ImmT = NoImm, Predicates = [HasAVX] in {
let isCommutable = 0 in {
defm VPHADDW : SS3I_binop_rm<0x01, "vphaddw", X86hadd, v8i16, v8i16, VR128,
load, i128mem,
- SchedWritePHAdd.XMM, 0>, VEX_4V, VEX_WIG;
+ SchedWritePHAdd.XMM, 0>, VEX_4V, WIG;
defm VPHADDD : SS3I_binop_rm<0x02, "vphaddd", X86hadd, v4i32, v4i32, VR128,
load, i128mem,
- SchedWritePHAdd.XMM, 0>, VEX_4V, VEX_WIG;
+ SchedWritePHAdd.XMM, 0>, VEX_4V, WIG;
defm VPHSUBW : SS3I_binop_rm<0x05, "vphsubw", X86hsub, v8i16, v8i16, VR128,
load, i128mem,
- SchedWritePHAdd.XMM, 0>, VEX_4V, VEX_WIG;
+ SchedWritePHAdd.XMM, 0>, VEX_4V, WIG;
defm VPHSUBD : SS3I_binop_rm<0x06, "vphsubd", X86hsub, v4i32, v4i32, VR128,
load, i128mem,
- SchedWritePHAdd.XMM, 0>, VEX_4V, VEX_WIG;
+ SchedWritePHAdd.XMM, 0>, VEX_4V, WIG;
defm VPSIGNB : SS3I_binop_rm_int<0x08, "vpsignb",
int_x86_ssse3_psign_b_128,
- SchedWriteVecALU.XMM, load, 0>, VEX_4V, VEX_WIG;
+ SchedWriteVecALU.XMM, load, 0>, VEX_4V, WIG;
defm VPSIGNW : SS3I_binop_rm_int<0x09, "vpsignw",
int_x86_ssse3_psign_w_128,
- SchedWriteVecALU.XMM, load, 0>, VEX_4V, VEX_WIG;
+ SchedWriteVecALU.XMM, load, 0>, VEX_4V, WIG;
defm VPSIGND : SS3I_binop_rm_int<0x0A, "vpsignd",
int_x86_ssse3_psign_d_128,
- SchedWriteVecALU.XMM, load, 0>, VEX_4V, VEX_WIG;
+ SchedWriteVecALU.XMM, load, 0>, VEX_4V, WIG;
defm VPHADDSW : SS3I_binop_rm_int<0x03, "vphaddsw",
int_x86_ssse3_phadd_sw_128,
- SchedWritePHAdd.XMM, load, 0>, VEX_4V, VEX_WIG;
+ SchedWritePHAdd.XMM, load, 0>, VEX_4V, WIG;
defm VPHSUBSW : SS3I_binop_rm_int<0x07, "vphsubsw",
int_x86_ssse3_phsub_sw_128,
- SchedWritePHAdd.XMM, load, 0>, VEX_4V, VEX_WIG;
+ SchedWritePHAdd.XMM, load, 0>, VEX_4V, WIG;
}
}
@@ -4852,42 +4852,42 @@ let ImmT = NoImm, Predicates = [HasAVX2, NoVLX_Or_NoBWI] in {
let isCommutable = 0 in {
defm VPSHUFBY : SS3I_binop_rm<0x00, "vpshufb", X86pshufb, v32i8, v32i8,
VR256, load, i256mem,
- SchedWriteVarShuffle.YMM, 0>, VEX_4V, VEX_L, VEX_WIG;
+ SchedWriteVarShuffle.YMM, 0>, VEX_4V, VEX_L, WIG;
defm VPMADDUBSWY : SS3I_binop_rm<0x04, "vpmaddubsw", X86vpmaddubsw, v16i16,
v32i8, VR256, load, i256mem,
- SchedWriteVecIMul.YMM, 0>, VEX_4V, VEX_L, VEX_WIG;
+ SchedWriteVecIMul.YMM, 0>, VEX_4V, VEX_L, WIG;
}
defm VPMULHRSWY : SS3I_binop_rm<0x0B, "vpmulhrsw", X86mulhrs, v16i16, v16i16,
VR256, load, i256mem,
- SchedWriteVecIMul.YMM, 0>, VEX_4V, VEX_L, VEX_WIG;
+ SchedWriteVecIMul.YMM, 0>, VEX_4V, VEX_L, WIG;
}
let ImmT = NoImm, Predicates = [HasAVX2] in {
let isCommutable = 0 in {
defm VPHADDWY : SS3I_binop_rm<0x01, "vphaddw", X86hadd, v16i16, v16i16,
VR256, load, i256mem,
- SchedWritePHAdd.YMM, 0>, VEX_4V, VEX_L, VEX_WIG;
+ SchedWritePHAdd.YMM, 0>, VEX_4V, VEX_L, WIG;
defm VPHADDDY : SS3I_binop_rm<0x02, "vphaddd", X86hadd, v8i32, v8i32, VR256,
load, i256mem,
- SchedWritePHAdd.YMM, 0>, VEX_4V, VEX_L, VEX_WIG;
+ SchedWritePHAdd.YMM, 0>, VEX_4V, VEX_L, WIG;
defm VPHSUBWY : SS3I_binop_rm<0x05, "vphsubw", X86hsub, v16i16, v16i16,
VR256, load, i256mem,
- SchedWritePHAdd.YMM, 0>, VEX_4V, VEX_L, VEX_WIG;
+ SchedWritePHAdd.YMM, 0>, VEX_4V, VEX_L, WIG;
defm VPHSUBDY : SS3I_binop_rm<0x06, "vphsubd", X86hsub, v8i32, v8i32, VR256,
load, i256mem,
- SchedWritePHAdd.YMM, 0>, VEX_4V, VEX_L, VEX_WIG;
+ SchedWritePHAdd.YMM, 0>, VEX_4V, VEX_L, WIG;
defm VPSIGNB : SS3I_binop_rm_int_y<0x08, "vpsignb", int_x86_avx2_psign_b,
- SchedWriteVecALU.YMM>, VEX_4V, VEX_L, VEX_WIG;
+ SchedWriteVecALU.YMM>, VEX_4V, VEX_L, WIG;
defm VPSIGNW : SS3I_binop_rm_int_y<0x09, "vpsignw", int_x86_avx2_psign_w,
- SchedWriteVecALU.YMM>, VEX_4V, VEX_L, VEX_WIG;
+ SchedWriteVecALU.YMM>, VEX_4V, VEX_L, WIG;
defm VPSIGND : SS3I_binop_rm_int_y<0x0A, "vpsignd", int_x86_avx2_psign_d,
- SchedWriteVecALU.YMM>, VEX_4V, VEX_L, VEX_WIG;
+ SchedWriteVecALU.YMM>, VEX_4V, VEX_L, WIG;
defm VPHADDSW : SS3I_binop_rm_int_y<0x03, "vphaddsw",
int_x86_avx2_phadd_sw,
- SchedWritePHAdd.YMM>, VEX_4V, VEX_L, VEX_WIG;
+ SchedWritePHAdd.YMM>, VEX_4V, VEX_L, WIG;
defm VPHSUBSW : SS3I_binop_rm_int_y<0x07, "vphsubsw",
int_x86_avx2_phsub_sw,
- SchedWritePHAdd.YMM>, VEX_4V, VEX_L, VEX_WIG;
+ SchedWritePHAdd.YMM>, VEX_4V, VEX_L, WIG;
}
}
@@ -4956,10 +4956,10 @@ multiclass ssse3_palignr<string asm, ValueType VT, RegisterClass RC,
let Predicates = [HasAVX, NoVLX_Or_NoBWI] in
defm VPALIGNR : ssse3_palignr<"vpalignr", v16i8, VR128, load, i128mem,
- SchedWriteShuffle.XMM, 0>, VEX_4V, VEX_WIG;
+ SchedWriteShuffle.XMM, 0>, VEX_4V, WIG;
let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in
defm VPALIGNRY : ssse3_palignr<"vpalignr", v32i8, VR256, load, i256mem,
- SchedWriteShuffle.YMM, 0>, VEX_4V, VEX_L, VEX_WIG;
+ SchedWriteShuffle.YMM, 0>, VEX_4V, VEX_L, WIG;
let Constraints = "$src1 = $dst", Predicates = [UseSSSE3] in
defm PALIGNR : ssse3_palignr<"palignr", v16i8, VR128, memop, i128mem,
SchedWriteShuffle.XMM>;
@@ -5014,11 +5014,11 @@ multiclass SS41I_pmovx_rm_all<bits<8> opc, string OpcodeStr,
let Predicates = [HasAVX, prd] in
defm V#NAME : SS41I_pmovx_rrrm<opc, !strconcat("v", OpcodeStr), MemOp,
VR128, VR128, SchedWriteVecExtend.XMM>,
- VEX, VEX_WIG;
+ VEX, WIG;
let Predicates = [HasAVX2, prd] in
defm V#NAME#Y : SS41I_pmovx_rrrm<opc, !strconcat("v", OpcodeStr), MemYOp,
VR256, VR128, SchedWriteVecExtend.YMM>,
- VEX, VEX_L, VEX_WIG;
+ VEX, VEX_L, WIG;
}
multiclass SS41I_pmovx_rm<bits<8> opc, string OpcodeStr, X86MemOperand MemOp,
@@ -5238,7 +5238,7 @@ multiclass SS41I_extract8<bits<8> opc, string OpcodeStr> {
}
let Predicates = [HasAVX, NoBWI] in
- defm VPEXTRB : SS41I_extract8<0x14, "vpextrb">, VEX, VEX_WIG;
+ defm VPEXTRB : SS41I_extract8<0x14, "vpextrb">, VEX, WIG;
defm PEXTRB : SS41I_extract8<0x14, "pextrb">;
@@ -5262,7 +5262,7 @@ multiclass SS41I_extract16<bits<8> opc, string OpcodeStr> {
}
let Predicates = [HasAVX, NoBWI] in
- defm VPEXTRW : SS41I_extract16<0x15, "vpextrw">, VEX, VEX_WIG;
+ defm VPEXTRW : SS41I_extract16<0x15, "vpextrw">, VEX, WIG;
defm PEXTRW : SS41I_extract16<0x15, "pextrw">;
@@ -5337,7 +5337,7 @@ multiclass SS41I_extractf32<bits<8> opc, string OpcodeStr> {
let ExeDomain = SSEPackedSingle in {
let Predicates = [UseAVX] in
- defm VEXTRACTPS : SS41I_extractf32<0x17, "vextractps">, VEX, VEX_WIG;
+ defm VEXTRACTPS : SS41I_extractf32<0x17, "vextractps">, VEX, WIG;
defm EXTRACTPS : SS41I_extractf32<0x17, "extractps">;
}
@@ -5367,7 +5367,7 @@ multiclass SS41I_insert8<bits<8> opc, string asm, bit Is2Addr = 1> {
}
let Predicates = [HasAVX, NoBWI] in {
- defm VPINSRB : SS41I_insert8<0x20, "vpinsrb", 0>, VEX_4V, VEX_WIG;
+ defm VPINSRB : SS41I_insert8<0x20, "vpinsrb", 0>, VEX_4V, WIG;
def : Pat<(X86pinsrb VR128:$src1, (i32 (anyext (i8 GR8:$src2))), timm:$src3),
(VPINSRBrr VR128:$src1, (INSERT_SUBREG (i32 (IMPLICIT_DEF)),
GR8:$src2, sub_8bit), timm:$src3)>;
@@ -5459,7 +5459,7 @@ multiclass SS41I_insertf32<bits<8> opc, string asm, bit Is2Addr = 1> {
let ExeDomain = SSEPackedSingle in {
let Predicates = [UseAVX] in
defm VINSERTPS : SS41I_insertf32<0x21, "vinsertps", 0>,
- VEX_4V, VEX_WIG;
+ VEX_4V, WIG;
let Constraints = "$src1 = $dst" in
defm INSERTPS : SS41I_insertf32<0x21, "insertps", 1>;
}
@@ -5620,27 +5620,27 @@ let Predicates = [HasAVX, NoVLX] in {
// Intrinsic form
defm VROUNDPS : sse41_fp_unop_p<0x08, "vroundps", f128mem, VR128, v4f32,
loadv4f32, X86any_VRndScale, SchedWriteFRnd.XMM>,
- VEX, VEX_WIG;
+ VEX, WIG;
defm VROUNDPSY : sse41_fp_unop_p<0x08, "vroundps", f256mem, VR256, v8f32,
loadv8f32, X86any_VRndScale, SchedWriteFRnd.YMM>,
- VEX, VEX_L, VEX_WIG;
+ VEX, VEX_L, WIG;
}
let ExeDomain = SSEPackedDouble, Uses = [MXCSR], mayRaiseFPException = 1 in {
defm VROUNDPD : sse41_fp_unop_p<0x09, "vroundpd", f128mem, VR128, v2f64,
loadv2f64, X86any_VRndScale, SchedWriteFRnd.XMM>,
- VEX, VEX_WIG;
+ VEX, WIG;
defm VROUNDPDY : sse41_fp_unop_p<0x09, "vroundpd", f256mem, VR256, v4f64,
loadv4f64, X86any_VRndScale, SchedWriteFRnd.YMM>,
- VEX, VEX_L, VEX_WIG;
+ VEX, VEX_L, WIG;
}
}
let Predicates = [UseAVX] in {
defm VROUND : sse41_fp_binop_s<0x0A, 0x0B, "vround", SchedWriteFRnd.Scl,
v4f32, v2f64, X86RndScales, 0>,
- VEX_4V, VEX_LIG, VEX_WIG, SIMD_EXC;
+ VEX_4V, VEX_LIG, WIG, SIMD_EXC;
defm VROUND : avx_fp_unop_rm<0x0A, 0x0B, "vround", SchedWriteFRnd.Scl>,
- VEX_4V, VEX_LIG, VEX_WIG, SIMD_EXC;
+ VEX_4V, VEX_LIG, WIG, SIMD_EXC;
}
let Predicates = [UseAVX] in {
@@ -5694,22 +5694,22 @@ let Defs = [EFLAGS], Predicates = [HasAVX] in {
def VPTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
"vptest\t{$src2, $src1|$src1, $src2}",
[(set EFLAGS, (X86ptest VR128:$src1, (v2i64 VR128:$src2)))]>,
- Sched<[SchedWriteVecTest.XMM]>, VEX, VEX_WIG;
+ Sched<[SchedWriteVecTest.XMM]>, VEX, WIG;
def VPTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
"vptest\t{$src2, $src1|$src1, $src2}",
[(set EFLAGS,(X86ptest VR128:$src1, (loadv2i64 addr:$src2)))]>,
Sched<[SchedWriteVecTest.XMM.Folded, SchedWriteVecTest.XMM.ReadAfterFold]>,
- VEX, VEX_WIG;
+ VEX, WIG;
def VPTESTYrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR256:$src1, VR256:$src2),
"vptest\t{$src2, $src1|$src1, $src2}",
[(set EFLAGS, (X86ptest VR256:$src1, (v4i64 VR256:$src2)))]>,
- Sched<[SchedWriteVecTest.YMM]>, VEX, VEX_L, VEX_WIG;
+ Sched<[SchedWriteVecTest.YMM]>, VEX, VEX_L, WIG;
def VPTESTYrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR256:$src1, i256mem:$src2),
"vptest\t{$src2, $src1|$src1, $src2}",
[(set EFLAGS,(X86ptest VR256:$src1, (loadv4i64 addr:$src2)))]>,
Sched<[SchedWriteVecTest.YMM.Folded, SchedWriteVecTest.YMM.ReadAfterFold]>,
- VEX, VEX_L, VEX_WIG;
+ VEX, VEX_L, WIG;
}
let Defs = [EFLAGS] in {
@@ -5811,7 +5811,7 @@ multiclass SS41I_unop_rm_int_v16<bits<8> opc, string OpcodeStr,
let Predicates = [HasAVX] in
defm VPHMINPOSUW : SS41I_unop_rm_int_v16<0x41, "vphminposuw",
X86phminpos, load,
- WritePHMINPOS>, VEX, VEX_WIG;
+ WritePHMINPOS>, VEX, WIG;
defm PHMINPOSUW : SS41I_unop_rm_int_v16<0x41, "phminposuw",
X86phminpos, memop,
WritePHMINPOS>;
@@ -5842,65 +5842,65 @@ multiclass SS48I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
let Predicates = [HasAVX, NoVLX] in {
defm VPMINSD : SS48I_binop_rm<0x39, "vpminsd", smin, v4i32, VR128,
load, i128mem, SchedWriteVecALU.XMM, 0>,
- VEX_4V, VEX_WIG;
+ VEX_4V, WIG;
defm VPMINUD : SS48I_binop_rm<0x3B, "vpminud", umin, v4i32, VR128,
load, i128mem, SchedWriteVecALU.XMM, 0>,
- VEX_4V, VEX_WIG;
+ VEX_4V, WIG;
defm VPMAXSD : SS48I_binop_rm<0x3D, "vpmaxsd", smax, v4i32, VR128,
load, i128mem, SchedWriteVecALU.XMM, 0>,
- VEX_4V, VEX_WIG;
+ VEX_4V, WIG;
defm VPMAXUD : SS48I_binop_rm<0x3F, "vpmaxud", umax, v4i32, VR128,
load, i128mem, SchedWriteVecALU.XMM, 0>,
- VEX_4V, VEX_WIG;
+ VEX_4V, WIG;
defm VPMULDQ : SS48I_binop_rm<0x28, "vpmuldq", X86pmuldq, v2i64, VR128,
load, i128mem, SchedWriteVecIMul.XMM, 0>,
- VEX_4V, VEX_WIG;
+ VEX_4V, WIG;
}
let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
defm VPMINSB : SS48I_binop_rm<0x38, "vpminsb", smin, v16i8, VR128,
load, i128mem, SchedWriteVecALU.XMM, 0>,
- VEX_4V, VEX_WIG;
+ VEX_4V, WIG;
defm VPMINUW : SS48I_binop_rm<0x3A, "vpminuw", umin, v8i16, VR128,
load, i128mem, SchedWriteVecALU.XMM, 0>,
- VEX_4V, VEX_WIG;
+ VEX_4V, WIG;
defm VPMAXSB : SS48I_binop_rm<0x3C, "vpmaxsb", smax, v16i8, VR128,
load, i128mem, SchedWriteVecALU.XMM, 0>,
- VEX_4V, VEX_WIG;
+ VEX_4V, WIG;
defm VPMAXUW : SS48I_binop_rm<0x3E, "vpmaxuw", umax, v8i16, VR128,
load, i128mem, SchedWriteVecALU.XMM, 0>,
- VEX_4V, VEX_WIG;
+ VEX_4V, WIG;
}
let Predicates = [HasAVX2, NoVLX] in {
defm VPMINSDY : SS48I_binop_rm<0x39, "vpminsd", smin, v8i32, VR256,
load, i256mem, SchedWriteVecALU.YMM, 0>,
- VEX_4V, VEX_L, VEX_WIG;
+ VEX_4V, VEX_L, WIG;
defm VPMINUDY : SS48I_binop_rm<0x3B, "vpminud", umin, v8i32, VR256,
load, i256mem, SchedWriteVecALU.YMM, 0>,
- VEX_4V, VEX_L, VEX_WIG;
+ VEX_4V, VEX_L, WIG;
defm VPMAXSDY : SS48I_binop_rm<0x3D, "vpmaxsd", smax, v8i32, VR256,
load, i256mem, SchedWriteVecALU.YMM, 0>,
- VEX_4V, VEX_L, VEX_WIG;
+ VEX_4V, VEX_L, WIG;
defm VPMAXUDY : SS48I_binop_rm<0x3F, "vpmaxud", umax, v8i32, VR256,
load, i256mem, SchedWriteVecALU.YMM, 0>,
- VEX_4V, VEX_L, VEX_WIG;
+ VEX_4V, VEX_L, WIG;
defm VPMULDQY : SS48I_binop_rm<0x28, "vpmuldq", X86pmuldq, v4i64, VR256,
load, i256mem, SchedWriteVecIMul.YMM, 0>,
- VEX_4V, VEX_L, VEX_WIG;
+ VEX_4V, VEX_L, WIG;
}
let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in {
defm VPMINSBY : SS48I_binop_rm<0x38, "vpminsb", smin, v32i8, VR256,
load, i256mem, SchedWriteVecALU.YMM, 0>,
- VEX_4V, VEX_L, VEX_WIG;
+ VEX_4V, VEX_L, WIG;
defm VPMINUWY : SS48I_binop_rm<0x3A, "vpminuw", umin, v16i16, VR256,
load, i256mem, SchedWriteVecALU.YMM, 0>,
- VEX_4V, VEX_L, VEX_WIG;
+ VEX_4V, VEX_L, WIG;
defm VPMAXSBY : SS48I_binop_rm<0x3C, "vpmaxsb", smax, v32i8, VR256,
load, i256mem, SchedWriteVecALU.YMM, 0>,
- VEX_4V, VEX_L, VEX_WIG;
+ VEX_4V, VEX_L, WIG;
defm VPMAXUWY : SS48I_binop_rm<0x3E, "vpmaxuw", umax, v16i16, VR256,
load, i256mem, SchedWriteVecALU.YMM, 0>,
- VEX_4V, VEX_L, VEX_WIG;
+ VEX_4V, VEX_L, WIG;
}
let Constraints = "$src1 = $dst" in {
@@ -5927,20 +5927,20 @@ let Constraints = "$src1 = $dst" in {
let Predicates = [HasAVX, NoVLX] in
defm VPMULLD : SS48I_binop_rm<0x40, "vpmulld", mul, v4i32, VR128,
load, i128mem, SchedWritePMULLD.XMM, 0>,
- VEX_4V, VEX_WIG;
+ VEX_4V, WIG;
let Predicates = [HasAVX] in
defm VPCMPEQQ : SS48I_binop_rm<0x29, "vpcmpeqq", X86pcmpeq, v2i64, VR128,
load, i128mem, SchedWriteVecALU.XMM, 0>,
- VEX_4V, VEX_WIG;
+ VEX_4V, WIG;
let Predicates = [HasAVX2, NoVLX] in
defm VPMULLDY : SS48I_binop_rm<0x40, "vpmulld", mul, v8i32, VR256,
load, i256mem, SchedWritePMULLD.YMM, 0>,
- VEX_4V, VEX_L, VEX_WIG;
+ VEX_4V, VEX_L, WIG;
let Predicates = [HasAVX2] in
defm VPCMPEQQY : SS48I_binop_rm<0x29, "vpcmpeqq", X86pcmpeq, v4i64, VR256,
load, i256mem, SchedWriteVecALU.YMM, 0>,
- VEX_4V, VEX_L, VEX_WIG;
+ VEX_4V, VEX_L, WIG;
let Constraints = "$src1 = $dst" in {
defm PMULLD : SS48I_binop_rm<0x40, "pmulld", mul, v4i32, VR128,
@@ -6088,22 +6088,22 @@ let Predicates = [HasAVX] in {
let isCommutable = 0 in {
defm VMPSADBW : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_sse41_mpsadbw,
VR128, load, i128mem, 0,
- SchedWriteMPSAD.XMM>, VEX_4V, VEX_WIG;
+ SchedWriteMPSAD.XMM>, VEX_4V, WIG;
}
let Uses = [MXCSR], mayRaiseFPException = 1 in {
let ExeDomain = SSEPackedSingle in
defm VDPPS : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_sse41_dpps,
VR128, load, f128mem, 0,
- SchedWriteDPPS.XMM>, VEX_4V, VEX_WIG;
+ SchedWriteDPPS.XMM>, VEX_4V, WIG;
let ExeDomain = SSEPackedDouble in
defm VDPPD : SS41I_binop_rmi_int<0x41, "vdppd", int_x86_sse41_dppd,
VR128, load, f128mem, 0,
- SchedWriteDPPD.XMM>, VEX_4V, VEX_WIG;
+ SchedWriteDPPD.XMM>, VEX_4V, WIG;
let ExeDomain = SSEPackedSingle in
defm VDPPSY : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_avx_dp_ps_256,
VR256, load, i256mem, 0,
- SchedWriteDPPS.YMM>, VEX_4V, VEX_L, VEX_WIG;
+ SchedWriteDPPS.YMM>, VEX_4V, VEX_L, WIG;
}
}
@@ -6111,7 +6111,7 @@ let Predicates = [HasAVX2] in {
let isCommutable = 0 in {
defm VMPSADBWY : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_avx2_mpsadbw,
VR256, load, i256mem, 0,
- SchedWriteMPSAD.YMM>, VEX_4V, VEX_L, VEX_WIG;
+ SchedWriteMPSAD.YMM>, VEX_4V, VEX_L, WIG;
}
}
@@ -6170,30 +6170,30 @@ let Predicates = [HasAVX] in {
defm VBLENDPS : SS41I_blend_rmi<0x0C, "vblendps", X86Blendi, v4f32,
VR128, load, f128mem, 0, SSEPackedSingle,
SchedWriteFBlend.XMM, BlendCommuteImm4>,
- VEX_4V, VEX_WIG;
+ VEX_4V, WIG;
defm VBLENDPSY : SS41I_blend_rmi<0x0C, "vblendps", X86Blendi, v8f32,
VR256, load, f256mem, 0, SSEPackedSingle,
SchedWriteFBlend.YMM, BlendCommuteImm8>,
- VEX_4V, VEX_L, VEX_WIG;
+ VEX_4V, VEX_L, WIG;
defm VBLENDPD : SS41I_blend_rmi<0x0D, "vblendpd", X86Blendi, v2f64,
VR128, load, f128mem, 0, SSEPackedDouble,
SchedWriteFBlend.XMM, BlendCommuteImm2>,
- VEX_4V, VEX_WIG;
+ VEX_4V, WIG;
defm VBLENDPDY : SS41I_blend_rmi<0x0D, "vblendpd", X86Blendi, v4f64,
VR256, load, f256mem, 0, SSEPackedDouble,
SchedWriteFBlend.YMM, BlendCommuteImm4>,
- VEX_4V, VEX_L, VEX_WIG;
+ VEX_4V, VEX_L, WIG;
defm VPBLENDW : SS41I_blend_rmi<0x0E, "vpblendw", X86Blendi, v8i16,
VR128, load, i128mem, 0, SSEPackedInt,
SchedWriteBlend.XMM, BlendCommuteImm8>,
- VEX_4V, VEX_WIG;
+ VEX_4V, WIG;
}
let Predicates = [HasAVX2] in {
defm VPBLENDWY : SS41I_blend_rmi<0x0E, "vpblendw", X86Blendi, v16i16,
VR256, load, i256mem, 0, SSEPackedInt,
SchedWriteBlend.YMM, BlendCommuteImm8>,
- VEX_4V, VEX_L, VEX_WIG;
+ VEX_4V, VEX_L, WIG;
}
// Emulate vXi32/vXi64 blends with vXf32/vXf64 or pblendw.
@@ -6473,11 +6473,11 @@ let AddedComplexity = 400 in { // Prefer non-temporal versions
let Predicates = [HasAVX, NoVLX] in
def VMOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
"vmovntdqa\t{$src, $dst|$dst, $src}", []>,
- Sched<[SchedWriteVecMoveLSNT.XMM.RM]>, VEX, VEX_WIG;
+ Sched<[SchedWriteVecMoveLSNT.XMM.RM]>, VEX, WIG;
let Predicates = [HasAVX2, NoVLX] in
def VMOVNTDQAYrm : SS48I<0x2A, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
"vmovntdqa\t{$src, $dst|$dst, $src}", []>,
- Sched<[SchedWriteVecMoveLSNT.YMM.RM]>, VEX, VEX_L, VEX_WIG;
+ Sched<[SchedWriteVecMoveLSNT.YMM.RM]>, VEX, VEX_L, WIG;
def MOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
"movntdqa\t{$src, $dst|$dst, $src}", []>,
Sched<[SchedWriteVecMoveLSNT.XMM.RM]>;
@@ -6564,12 +6564,12 @@ multiclass SS42I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
let Predicates = [HasAVX] in
defm VPCMPGTQ : SS42I_binop_rm<0x37, "vpcmpgtq", X86pcmpgt, v2i64, VR128,
load, i128mem, SchedWriteVecALU.XMM, 0>,
- VEX_4V, VEX_WIG;
+ VEX_4V, WIG;
let Predicates = [HasAVX2] in
defm VPCMPGTQY : SS42I_binop_rm<0x37, "vpcmpgtq", X86pcmpgt, v4i64, VR256,
load, i256mem, SchedWriteVecALU.YMM, 0>,
- VEX_4V, VEX_L, VEX_WIG;
+ VEX_4V, VEX_L, WIG;
let Constraints = "$src1 = $dst" in
defm PCMPGTQ : SS42I_binop_rm<0x37, "pcmpgtq", X86pcmpgt, v2i64, VR128,
@@ -6593,7 +6593,7 @@ multiclass pcmpistrm_SS42AI<string asm> {
let Defs = [XMM0, EFLAGS], hasSideEffects = 0 in {
let Predicates = [HasAVX] in
- defm VPCMPISTRM : pcmpistrm_SS42AI<"vpcmpistrm">, VEX, VEX_WIG;
+ defm VPCMPISTRM : pcmpistrm_SS42AI<"vpcmpistrm">, VEX, WIG;
defm PCMPISTRM : pcmpistrm_SS42AI<"pcmpistrm"> ;
}
@@ -6611,7 +6611,7 @@ multiclass SS42AI_pcmpestrm<string asm> {
let Defs = [XMM0, EFLAGS], Uses = [EAX, EDX], hasSideEffects = 0 in {
let Predicates = [HasAVX] in
- defm VPCMPESTRM : SS42AI_pcmpestrm<"vpcmpestrm">, VEX, VEX_WIG;
+ defm VPCMPESTRM : SS42AI_pcmpestrm<"vpcmpestrm">, VEX, WIG;
defm PCMPESTRM : SS42AI_pcmpestrm<"pcmpestrm">;
}
@@ -6629,7 +6629,7 @@ multiclass SS42AI_pcmpistri<string asm> {
let Defs = [ECX, EFLAGS], hasSideEffects = 0 in {
let Predicates = [HasAVX] in
- defm VPCMPISTRI : SS42AI_pcmpistri<"vpcmpistri">, VEX, VEX_WIG;
+ defm VPCMPISTRI : SS42AI_pcmpistri<"vpcmpistri">, VEX, WIG;
defm PCMPISTRI : SS42AI_pcmpistri<"pcmpistri">;
}
@@ -6647,7 +6647,7 @@ multiclass SS42AI_pcmpestri<string asm> {
let Defs = [ECX, EFLAGS], Uses = [EAX, EDX], hasSideEffects = 0 in {
let Predicates = [HasAVX] in
- defm VPCMPESTRI : SS42AI_pcmpestri<"vpcmpestri">, VEX, VEX_WIG;
+ defm VPCMPESTRI : SS42AI_pcmpestri<"vpcmpestri">, VEX, WIG;
defm PCMPESTRI : SS42AI_pcmpestri<"pcmpestri">;
}
@@ -6796,28 +6796,28 @@ multiclass AESI_binop_rm_int<bits<8> opc, string OpcodeStr,
// Perform One Round of an AES Encryption/Decryption Flow
let Predicates = [HasAVX, NoVLX_Or_NoVAES, HasAES] in {
defm VAESENC : AESI_binop_rm_int<0xDC, "vaesenc",
- int_x86_aesni_aesenc, load>, VEX_4V, VEX_WIG;
+ int_x86_aesni_aesenc, load>, VEX_4V, WIG;
defm VAESENCLAST : AESI_binop_rm_int<0xDD, "vaesenclast",
- int_x86_aesni_aesenclast, load>, VEX_4V, VEX_WIG;
+ int_x86_aesni_aesenclast, load>, VEX_4V, WIG;
defm VAESDEC : AESI_binop_rm_int<0xDE, "vaesdec",
- int_x86_aesni_aesdec, load>, VEX_4V, VEX_WIG;
+ int_x86_aesni_aesdec, load>, VEX_4V, WIG;
defm VAESDECLAST : AESI_binop_rm_int<0xDF, "vaesdeclast",
- int_x86_aesni_aesdeclast, load>, VEX_4V, VEX_WIG;
+ int_x86_aesni_aesdeclast, load>, VEX_4V, WIG;
}
let Predicates = [NoVLX, HasVAES] in {
defm VAESENCY : AESI_binop_rm_int<0xDC, "vaesenc",
int_x86_aesni_aesenc_256, load, 0, VR256,
- i256mem>, VEX_4V, VEX_L, VEX_WIG;
+ i256mem>, VEX_4V, VEX_L, WIG;
defm VAESENCLASTY : AESI_binop_rm_int<0xDD, "vaesenclast",
int_x86_aesni_aesenclast_256, load, 0, VR256,
- i256mem>, VEX_4V, VEX_L, VEX_WIG;
+ i256mem>, VEX_4V, VEX_L, WIG;
defm VAESDECY : AESI_binop_rm_int<0xDE, "vaesdec",
int_x86_aesni_aesdec_256, load, 0, VR256,
- i256mem>, VEX_4V, VEX_L, VEX_WIG;
+ i256mem>, VEX_4V, VEX_L, WIG;
defm VAESDECLASTY : AESI_binop_rm_int<0xDF, "vaesdeclast",
int_x86_aesni_aesdeclast_256, load, 0, VR256,
- i256mem>, VEX_4V, VEX_L, VEX_WIG;
+ i256mem>, VEX_4V, VEX_L, WIG;
}
let Constraints = "$src1 = $dst" in {
@@ -6838,12 +6838,12 @@ let Predicates = [HasAVX, HasAES] in {
"vaesimc\t{$src1, $dst|$dst, $src1}",
[(set VR128:$dst,
(int_x86_aesni_aesimc VR128:$src1))]>, Sched<[WriteAESIMC]>,
- VEX, VEX_WIG;
+ VEX, WIG;
def VAESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
(ins i128mem:$src1),
"vaesimc\t{$src1, $dst|$dst, $src1}",
[(set VR128:$dst, (int_x86_aesni_aesimc (load addr:$src1)))]>,
- Sched<[WriteAESIMC.Folded]>, VEX, VEX_WIG;
+ Sched<[WriteAESIMC.Folded]>, VEX, WIG;
}
def AESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1),
@@ -6863,13 +6863,13 @@ let Predicates = [HasAVX, HasAES] in {
"vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128:$dst,
(int_x86_aesni_aeskeygenassist VR128:$src1, timm:$src2))]>,
- Sched<[WriteAESKeyGen]>, VEX, VEX_WIG;
+ Sched<[WriteAESKeyGen]>, VEX, WIG;
def VAESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
(ins i128mem:$src1, u8imm:$src2),
"vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128:$dst,
(int_x86_aesni_aeskeygenassist (load addr:$src1), timm:$src2))]>,
- Sched<[WriteAESKeyGen.Folded]>, VEX, VEX_WIG;
+ Sched<[WriteAESKeyGen.Folded]>, VEX, WIG;
}
def AESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, u8imm:$src2),
@@ -6958,11 +6958,11 @@ multiclass vpclmulqdq<RegisterClass RC, X86MemOperand MemOp,
let Predicates = [HasAVX, NoVLX_Or_NoVPCLMULQDQ, HasPCLMUL] in
defm VPCLMULQDQ : vpclmulqdq<VR128, i128mem, load,
- int_x86_pclmulqdq>, VEX_4V, VEX_WIG;
+ int_x86_pclmulqdq>, VEX_4V, WIG;
let Predicates = [NoVLX, HasVPCLMULQDQ] in
defm VPCLMULQDQY : vpclmulqdq<VR256, i256mem, load,
- int_x86_pclmulqdq_256>, VEX_4V, VEX_L, VEX_WIG;
+ int_x86_pclmulqdq_256>, VEX_4V, VEX_L, WIG;
multiclass vpclmulqdq_aliases_impl<string InstStr, RegisterClass RC,
X86MemOperand MemOp, string Hi, string Lo> {
@@ -7444,12 +7444,12 @@ let Defs = [YMM0, YMM1, YMM2, YMM3, YMM4, YMM5, YMM6, YMM7,
// Zero All YMM registers
def VZEROALL : I<0x77, RawFrm, (outs), (ins), "vzeroall",
[(int_x86_avx_vzeroall)]>, PS, VEX, VEX_L,
- Requires<[HasAVX]>, VEX_WIG;
+ Requires<[HasAVX]>, WIG;
// Zero Upper bits of YMM registers
def VZEROUPPER : I<0x77, RawFrm, (outs), (ins), "vzeroupper",
[(int_x86_avx_vzeroupper)]>, PS, VEX,
- Requires<[HasAVX]>, VEX_WIG;
+ Requires<[HasAVX]>, WIG;
} // Defs
} // SchedRW
diff --git a/llvm/utils/TableGen/X86DisassemblerTables.cpp b/llvm/utils/TableGen/X86DisassemblerTables.cpp
index 601591d9f53da..708c92aecfc85 100644
--- a/llvm/utils/TableGen/X86DisassemblerTables.cpp
+++ b/llvm/utils/TableGen/X86DisassemblerTables.cpp
@@ -76,7 +76,7 @@ static inline const char* stringForOperandEncoding(OperandEncoding encoding) {
/// @return - True if child is a subset of parent, false otherwise.
static inline bool inheritsFrom(InstructionContext child,
InstructionContext parent, bool noPrefix = true,
- bool VEX_LIG = false, bool VEX_WIG = false,
+ bool VEX_LIG = false, bool WIG = false,
bool AdSize64 = false) {
if (child == parent)
return true;
@@ -144,20 +144,20 @@ static inline bool inheritsFrom(InstructionContext child,
case IC_64BIT_REXW_ADSIZE:
return false;
case IC_VEX:
- return (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_VEX_L_W)) ||
- (VEX_WIG && inheritsFrom(child, IC_VEX_W)) ||
+ return (VEX_LIG && WIG && inheritsFrom(child, IC_VEX_L_W)) ||
+ (WIG && inheritsFrom(child, IC_VEX_W)) ||
(VEX_LIG && inheritsFrom(child, IC_VEX_L));
case IC_VEX_XS:
- return (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_VEX_L_W_XS)) ||
- (VEX_WIG && inheritsFrom(child, IC_VEX_W_XS)) ||
+ return (VEX_LIG && WIG && inheritsFrom(child, IC_VEX_L_W_XS)) ||
+ (WIG && inheritsFrom(child, IC_VEX_W_XS)) ||
(VEX_LIG && inheritsFrom(child, IC_VEX_L_XS));
case IC_VEX_XD:
- return (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_VEX_L_W_XD)) ||
- (VEX_WIG && inheritsFrom(child, IC_VEX_W_XD)) ||
+ return (VEX_LIG && WIG && inheritsFrom(child, IC_VEX_L_W_XD)) ||
+ (WIG && inheritsFrom(child, IC_VEX_W_XD)) ||
(VEX_LIG && inheritsFrom(child, IC_VEX_L_XD));
case IC_VEX_OPSIZE:
- return (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_VEX_L_W_OPSIZE)) ||
- (VEX_WIG && inheritsFrom(child, IC_VEX_W_OPSIZE)) ||
+ return (VEX_LIG && WIG && inheritsFrom(child, IC_VEX_L_W_OPSIZE)) ||
+ (WIG && inheritsFrom(child, IC_VEX_W_OPSIZE)) ||
(VEX_LIG && inheritsFrom(child, IC_VEX_L_OPSIZE));
case IC_VEX_W:
return VEX_LIG && inheritsFrom(child, IC_VEX_L_W);
@@ -168,88 +168,88 @@ static inline bool inheritsFrom(InstructionContext child,
case IC_VEX_W_OPSIZE:
return VEX_LIG && inheritsFrom(child, IC_VEX_L_W_OPSIZE);
case IC_VEX_L:
- return VEX_WIG && inheritsFrom(child, IC_VEX_L_W);
+ return WIG && inheritsFrom(child, IC_VEX_L_W);
case IC_VEX_L_XS:
- return VEX_WIG && inheritsFrom(child, IC_VEX_L_W_XS);
+ return WIG && inheritsFrom(child, IC_VEX_L_W_XS);
case IC_VEX_L_XD:
- return VEX_WIG && inheritsFrom(child, IC_VEX_L_W_XD);
+ return WIG && inheritsFrom(child, IC_VEX_L_W_XD);
case IC_VEX_L_OPSIZE:
- return VEX_WIG && inheritsFrom(child, IC_VEX_L_W_OPSIZE);
+ return WIG && inheritsFrom(child, IC_VEX_L_W_OPSIZE);
case IC_VEX_L_W:
case IC_VEX_L_W_XS:
case IC_VEX_L_W_XD:
case IC_VEX_L_W_OPSIZE:
return false;
case IC_EVEX:
- return (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L_W)) ||
- (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W)) ||
- (VEX_WIG && inheritsFrom(child, IC_EVEX_W)) ||
+ return (VEX_LIG && WIG && inheritsFrom(child, IC_EVEX_L_W)) ||
+ (VEX_LIG && WIG && inheritsFrom(child, IC_EVEX_L2_W)) ||
+ (WIG && inheritsFrom(child, IC_EVEX_W)) ||
(VEX_LIG && inheritsFrom(child, IC_EVEX_L)) ||
(VEX_LIG && inheritsFrom(child, IC_EVEX_L2));
case IC_EVEX_XS:
- return (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_XS)) ||
- (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_XS)) ||
- (VEX_WIG && inheritsFrom(child, IC_EVEX_W_XS)) ||
+ return (VEX_LIG && WIG && inheritsFrom(child, IC_EVEX_L_W_XS)) ||
+ (VEX_LIG && WIG && inheritsFrom(child, IC_EVEX_L2_W_XS)) ||
+ (WIG && inheritsFrom(child, IC_EVEX_W_XS)) ||
(VEX_LIG && inheritsFrom(child, IC_EVEX_L_XS)) ||
(VEX_LIG && inheritsFrom(child, IC_EVEX_L2_XS));
case IC_EVEX_XD:
- return (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_XD)) ||
- (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_XD)) ||
- (VEX_WIG && inheritsFrom(child, IC_EVEX_W_XD)) ||
+ return (VEX_LIG && WIG && inheritsFrom(child, IC_EVEX_L_W_XD)) ||
+ (VEX_LIG && WIG && inheritsFrom(child, IC_EVEX_L2_W_XD)) ||
+ (WIG && inheritsFrom(child, IC_EVEX_W_XD)) ||
(VEX_LIG && inheritsFrom(child, IC_EVEX_L_XD)) ||
(VEX_LIG && inheritsFrom(child, IC_EVEX_L2_XD));
case IC_EVEX_OPSIZE:
- return (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_OPSIZE)) ||
- (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_OPSIZE)) ||
- (VEX_WIG && inheritsFrom(child, IC_EVEX_W_OPSIZE)) ||
+ return (VEX_LIG && WIG && inheritsFrom(child, IC_EVEX_L_W_OPSIZE)) ||
+ (VEX_LIG && WIG && inheritsFrom(child, IC_EVEX_L2_W_OPSIZE)) ||
+ (WIG && inheritsFrom(child, IC_EVEX_W_OPSIZE)) ||
(VEX_LIG && inheritsFrom(child, IC_EVEX_L_OPSIZE)) ||
(VEX_LIG && inheritsFrom(child, IC_EVEX_L2_OPSIZE));
case IC_EVEX_K:
- return (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_K)) ||
- (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_K)) ||
- (VEX_WIG && inheritsFrom(child, IC_EVEX_W_K)) ||
+ return (VEX_LIG && WIG && inheritsFrom(child, IC_EVEX_L_W_K)) ||
+ (VEX_LIG && WIG && inheritsFrom(child, IC_EVEX_L2_W_K)) ||
+ (WIG && inheritsFrom(child, IC_EVEX_W_K)) ||
(VEX_LIG && inheritsFrom(child, IC_EVEX_L_K)) ||
(VEX_LIG && inheritsFrom(child, IC_EVEX_L2_K));
case IC_EVEX_XS_K:
- return (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_XS_K)) ||
- (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_XS_K)) ||
- (VEX_WIG && inheritsFrom(child, IC_EVEX_W_XS_K)) ||
+ return (VEX_LIG && WIG && inheritsFrom(child, IC_EVEX_L_W_XS_K)) ||
+ (VEX_LIG && WIG && inheritsFrom(child, IC_EVEX_L2_W_XS_K)) ||
+ (WIG && inheritsFrom(child, IC_EVEX_W_XS_K)) ||
(VEX_LIG && inheritsFrom(child, IC_EVEX_L_XS_K)) ||
(VEX_LIG && inheritsFrom(child, IC_EVEX_L2_XS_K));
case IC_EVEX_XD_K:
- return (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_XD_K)) ||
- (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_XD_K)) ||
- (VEX_WIG && inheritsFrom(child, IC_EVEX_W_XD_K)) ||
+ return (VEX_LIG && WIG && inheritsFrom(child, IC_EVEX_L_W_XD_K)) ||
+ (VEX_LIG && WIG && inheritsFrom(child, IC_EVEX_L2_W_XD_K)) ||
+ (WIG && inheritsFrom(child, IC_EVEX_W_XD_K)) ||
(VEX_LIG && inheritsFrom(child, IC_EVEX_L_XD_K)) ||
(VEX_LIG && inheritsFrom(child, IC_EVEX_L2_XD_K));
case IC_EVEX_OPSIZE_K:
- return (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_OPSIZE_K)) ||
- (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_OPSIZE_K)) ||
- (VEX_WIG && inheritsFrom(child, IC_EVEX_W_OPSIZE_K)) ||
+ return (VEX_LIG && WIG && inheritsFrom(child, IC_EVEX_L_W_OPSIZE_K)) ||
+ (VEX_LIG && WIG && inheritsFrom(child, IC_EVEX_L2_W_OPSIZE_K)) ||
+ (WIG && inheritsFrom(child, IC_EVEX_W_OPSIZE_K)) ||
(VEX_LIG && inheritsFrom(child, IC_EVEX_L_OPSIZE_K)) ||
(VEX_LIG && inheritsFrom(child, IC_EVEX_L2_OPSIZE_K));
case IC_EVEX_KZ:
- return (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_KZ)) ||
- (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_KZ)) ||
- (VEX_WIG && inheritsFrom(child, IC_EVEX_W_KZ)) ||
+ return (VEX_LIG && WIG && inheritsFrom(child, IC_EVEX_L_W_KZ)) ||
+ (VEX_LIG && WIG && inheritsFrom(child, IC_EVEX_L2_W_KZ)) ||
+ (WIG && inheritsFrom(child, IC_EVEX_W_KZ)) ||
(VEX_LIG && inheritsFrom(child, IC_EVEX_L_KZ)) ||
(VEX_LIG && inheritsFrom(child, IC_EVEX_L2_KZ));
case IC_EVEX_XS_KZ:
- return (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_XS_KZ)) ||
- (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_XS_KZ)) ||
- (VEX_WIG && inheritsFrom(child, IC_EVEX_W_XS_KZ)) ||
+ return (VEX_LIG && WIG && inheritsFrom(child, IC_EVEX_L_W_XS_KZ)) ||
+ (VEX_LIG && WIG && inheritsFrom(child, IC_EVEX_L2_W_XS_KZ)) ||
+ (WIG && inheritsFrom(child, IC_EVEX_W_XS_KZ)) ||
(VEX_LIG && inheritsFrom(child, IC_EVEX_L_XS_KZ)) ||
(VEX_LIG && inheritsFrom(child, IC_EVEX_L2_XS_KZ));
case IC_EVEX_XD_KZ:
- return (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_XD_KZ)) ||
- (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_XD_KZ)) ||
- (VEX_WIG && inheritsFrom(child, IC_EVEX_W_XD_KZ)) ||
+ return (VEX_LIG && WIG && inheritsFrom(child, IC_EVEX_L_W_XD_KZ)) ||
+ (VEX_LIG && WIG && inheritsFrom(child, IC_EVEX_L2_W_XD_KZ)) ||
+ (WIG && inheritsFrom(child, IC_EVEX_W_XD_KZ)) ||
(VEX_LIG && inheritsFrom(child, IC_EVEX_L_XD_KZ)) ||
(VEX_LIG && inheritsFrom(child, IC_EVEX_L2_XD_KZ));
case IC_EVEX_OPSIZE_KZ:
- return (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_OPSIZE_KZ)) ||
- (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_OPSIZE_KZ)) ||
- (VEX_WIG && inheritsFrom(child, IC_EVEX_W_OPSIZE_KZ)) ||
+ return (VEX_LIG && WIG && inheritsFrom(child, IC_EVEX_L_W_OPSIZE_KZ)) ||
+ (VEX_LIG && WIG && inheritsFrom(child, IC_EVEX_L2_W_OPSIZE_KZ)) ||
+ (WIG && inheritsFrom(child, IC_EVEX_W_OPSIZE_KZ)) ||
(VEX_LIG && inheritsFrom(child, IC_EVEX_L_OPSIZE_KZ)) ||
(VEX_LIG && inheritsFrom(child, IC_EVEX_L2_OPSIZE_KZ));
case IC_EVEX_W:
@@ -289,29 +289,29 @@ static inline bool inheritsFrom(InstructionContext child,
return (VEX_LIG && inheritsFrom(child, IC_EVEX_L_W_OPSIZE_KZ)) ||
(VEX_LIG && inheritsFrom(child, IC_EVEX_L2_W_OPSIZE_KZ));
case IC_EVEX_L:
- return VEX_WIG && inheritsFrom(child, IC_EVEX_L_W);
+ return WIG && inheritsFrom(child, IC_EVEX_L_W);
case IC_EVEX_L_XS:
- return VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_XS);
+ return WIG && inheritsFrom(child, IC_EVEX_L_W_XS);
case IC_EVEX_L_XD:
- return VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_XD);
+ return WIG && inheritsFrom(child, IC_EVEX_L_W_XD);
case IC_EVEX_L_OPSIZE:
- return VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_OPSIZE);
+ return WIG && inheritsFrom(child, IC_EVEX_L_W_OPSIZE);
case IC_EVEX_L_K:
- return VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_K);
+ return WIG && inheritsFrom(child, IC_EVEX_L_W_K);
case IC_EVEX_L_XS_K:
- return VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_XS_K);
+ return WIG && inheritsFrom(child, IC_EVEX_L_W_XS_K);
case IC_EVEX_L_XD_K:
- return VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_XD_K);
+ return WIG && inheritsFrom(child, IC_EVEX_L_W_XD_K);
case IC_EVEX_L_OPSIZE_K:
- return VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_OPSIZE_K);
+ return WIG && inheritsFrom(child, IC_EVEX_L_W_OPSIZE_K);
case IC_EVEX_L_KZ:
- return VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_KZ);
+ return WIG && inheritsFrom(child, IC_EVEX_L_W_KZ);
case IC_EVEX_L_XS_KZ:
- return VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_XS_KZ);
+ return WIG && inheritsFrom(child, IC_EVEX_L_W_XS_KZ);
case IC_EVEX_L_XD_KZ:
- return VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_XD_KZ);
+ return WIG && inheritsFrom(child, IC_EVEX_L_W_XD_KZ);
case IC_EVEX_L_OPSIZE_KZ:
- return VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_OPSIZE_KZ);
+ return WIG && inheritsFrom(child, IC_EVEX_L_W_OPSIZE_KZ);
case IC_EVEX_L_W:
case IC_EVEX_L_W_XS:
case IC_EVEX_L_W_XD:
@@ -328,29 +328,29 @@ static inline bool inheritsFrom(InstructionContext child,
case IC_EVEX_L_W_OPSIZE_KZ:
return false;
case IC_EVEX_L2:
- return VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W);
+ return WIG && inheritsFrom(child, IC_EVEX_L2_W);
case IC_EVEX_L2_XS:
- return VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_XS);
+ return WIG && inheritsFrom(child, IC_EVEX_L2_W_XS);
case IC_EVEX_L2_XD:
- return VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_XD);
+ return WIG && inheritsFrom(child, IC_EVEX_L2_W_XD);
case IC_EVEX_L2_OPSIZE:
- return VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_OPSIZE);
+ return WIG && inheritsFrom(child, IC_EVEX_L2_W_OPSIZE);
case IC_EVEX_L2_K:
- return VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_K);
+ return WIG && inheritsFrom(child, IC_EVEX_L2_W_K);
case IC_EVEX_L2_XS_K:
- return VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_XS_K);
+ return WIG && inheritsFrom(child, IC_EVEX_L2_W_XS_K);
case IC_EVEX_L2_XD_K:
- return VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_XD_K);
+ return WIG && inheritsFrom(child, IC_EVEX_L2_W_XD_K);
case IC_EVEX_L2_OPSIZE_K:
- return VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_OPSIZE_K);
+ return WIG && inheritsFrom(child, IC_EVEX_L2_W_OPSIZE_K);
case IC_EVEX_L2_KZ:
- return VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_KZ);
+ return WIG && inheritsFrom(child, IC_EVEX_L2_W_KZ);
case IC_EVEX_L2_XS_KZ:
- return VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_XS_KZ);
+ return WIG && inheritsFrom(child, IC_EVEX_L2_W_XS_KZ);
case IC_EVEX_L2_XD_KZ:
- return VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_XD_KZ);
+ return WIG && inheritsFrom(child, IC_EVEX_L2_W_XD_KZ);
case IC_EVEX_L2_OPSIZE_KZ:
- return VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_OPSIZE_KZ);
+ return WIG && inheritsFrom(child, IC_EVEX_L2_W_OPSIZE_KZ);
case IC_EVEX_L2_W:
case IC_EVEX_L2_W_XS:
case IC_EVEX_L2_W_XD:
@@ -367,79 +367,79 @@ static inline bool inheritsFrom(InstructionContext child,
case IC_EVEX_L2_W_OPSIZE_KZ:
return false;
case IC_EVEX_B:
- return (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_B)) ||
- (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_B)) ||
- (VEX_WIG && inheritsFrom(child, IC_EVEX_W_B)) ||
+ return (VEX_LIG && WIG && inheritsFrom(child, IC_EVEX_L_W_B)) ||
+ (VEX_LIG && WIG && inheritsFrom(child, IC_EVEX_L2_W_B)) ||
+ (WIG && inheritsFrom(child, IC_EVEX_W_B)) ||
(VEX_LIG && inheritsFrom(child, IC_EVEX_L_B)) ||
(VEX_LIG && inheritsFrom(child, IC_EVEX_L2_B));
case IC_EVEX_XS_B:
- return (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_XS_B)) ||
- (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_XS_B)) ||
- (VEX_WIG && inheritsFrom(child, IC_EVEX_W_XS_B)) ||
+ return (VEX_LIG && WIG && inheritsFrom(child, IC_EVEX_L_W_XS_B)) ||
+ (VEX_LIG && WIG && inheritsFrom(child, IC_EVEX_L2_W_XS_B)) ||
+ (WIG && inheritsFrom(child, IC_EVEX_W_XS_B)) ||
(VEX_LIG && inheritsFrom(child, IC_EVEX_L_XS_B)) ||
(VEX_LIG && inheritsFrom(child, IC_EVEX_L2_XS_B));
case IC_EVEX_XD_B:
- return (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_XD_B)) ||
- (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_XD_B)) ||
- (VEX_WIG && inheritsFrom(child, IC_EVEX_W_XD_B)) ||
+ return (VEX_LIG && WIG && inheritsFrom(child, IC_EVEX_L_W_XD_B)) ||
+ (VEX_LIG && WIG && inheritsFrom(child, IC_EVEX_L2_W_XD_B)) ||
+ (WIG && inheritsFrom(child, IC_EVEX_W_XD_B)) ||
(VEX_LIG && inheritsFrom(child, IC_EVEX_L_XD_B)) ||
(VEX_LIG && inheritsFrom(child, IC_EVEX_L2_XD_B));
case IC_EVEX_OPSIZE_B:
- return (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_OPSIZE_B)) ||
- (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_OPSIZE_B)) ||
- (VEX_WIG && inheritsFrom(child, IC_EVEX_W_OPSIZE_B)) ||
+ return (VEX_LIG && WIG && inheritsFrom(child, IC_EVEX_L_W_OPSIZE_B)) ||
+ (VEX_LIG && WIG && inheritsFrom(child, IC_EVEX_L2_W_OPSIZE_B)) ||
+ (WIG && inheritsFrom(child, IC_EVEX_W_OPSIZE_B)) ||
(VEX_LIG && inheritsFrom(child, IC_EVEX_L_OPSIZE_B)) ||
(VEX_LIG && inheritsFrom(child, IC_EVEX_L2_OPSIZE_B));
case IC_EVEX_K_B:
- return (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_K_B)) ||
- (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_K_B)) ||
- (VEX_WIG && inheritsFrom(child, IC_EVEX_W_K_B)) ||
+ return (VEX_LIG && WIG && inheritsFrom(child, IC_EVEX_L_W_K_B)) ||
+ (VEX_LIG && WIG && inheritsFrom(child, IC_EVEX_L2_W_K_B)) ||
+ (WIG && inheritsFrom(child, IC_EVEX_W_K_B)) ||
(VEX_LIG && inheritsFrom(child, IC_EVEX_L_K_B)) ||
(VEX_LIG && inheritsFrom(child, IC_EVEX_L2_K_B));
case IC_EVEX_XS_K_B:
- return (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_XS_K_B)) ||
- (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_XS_K_B)) ||
- (VEX_WIG && inheritsFrom(child, IC_EVEX_W_XS_K_B)) ||
+ return (VEX_LIG && WIG && inheritsFrom(child, IC_EVEX_L_W_XS_K_B)) ||
+ (VEX_LIG && WIG && inheritsFrom(child, IC_EVEX_L2_W_XS_K_B)) ||
+ (WIG && inheritsFrom(child, IC_EVEX_W_XS_K_B)) ||
(VEX_LIG && inheritsFrom(child, IC_EVEX_L_XS_K_B)) ||
(VEX_LIG && inheritsFrom(child, IC_EVEX_L2_XS_K_B));
case IC_EVEX_XD_K_B:
- return (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_XD_K_B)) ||
- (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_XD_K_B)) ||
- (VEX_WIG && inheritsFrom(child, IC_EVEX_W_XD_K_B)) ||
+ return (VEX_LIG && WIG && inheritsFrom(child, IC_EVEX_L_W_XD_K_B)) ||
+ (VEX_LIG && WIG && inheritsFrom(child, IC_EVEX_L2_W_XD_K_B)) ||
+ (WIG && inheritsFrom(child, IC_EVEX_W_XD_K_B)) ||
(VEX_LIG && inheritsFrom(child, IC_EVEX_L_XD_K_B)) ||
(VEX_LIG && inheritsFrom(child, IC_EVEX_L2_XD_K_B));
case IC_EVEX_OPSIZE_K_B:
- return (VEX_LIG && VEX_WIG &&
+ return (VEX_LIG && WIG &&
inheritsFrom(child, IC_EVEX_L_W_OPSIZE_K_B)) ||
- (VEX_LIG && VEX_WIG &&
+ (VEX_LIG && WIG &&
inheritsFrom(child, IC_EVEX_L2_W_OPSIZE_K_B)) ||
- (VEX_WIG && inheritsFrom(child, IC_EVEX_W_OPSIZE_K_B)) ||
+ (WIG && inheritsFrom(child, IC_EVEX_W_OPSIZE_K_B)) ||
(VEX_LIG && inheritsFrom(child, IC_EVEX_L_OPSIZE_K_B)) ||
(VEX_LIG && inheritsFrom(child, IC_EVEX_L2_OPSIZE_K_B));
case IC_EVEX_KZ_B:
- return (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_KZ_B)) ||
- (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_KZ_B)) ||
- (VEX_WIG && inheritsFrom(child, IC_EVEX_W_KZ_B)) ||
+ return (VEX_LIG && WIG && inheritsFrom(child, IC_EVEX_L_W_KZ_B)) ||
+ (VEX_LIG && WIG && inheritsFrom(child, IC_EVEX_L2_W_KZ_B)) ||
+ (WIG && inheritsFrom(child, IC_EVEX_W_KZ_B)) ||
(VEX_LIG && inheritsFrom(child, IC_EVEX_L_KZ_B)) ||
(VEX_LIG && inheritsFrom(child, IC_EVEX_L2_KZ_B));
case IC_EVEX_XS_KZ_B:
- return (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_XS_KZ_B)) ||
- (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_XS_KZ_B)) ||
- (VEX_WIG && inheritsFrom(child, IC_EVEX_W_XS_KZ_B)) ||
+ return (VEX_LIG && WIG && inheritsFrom(child, IC_EVEX_L_W_XS_KZ_B)) ||
+ (VEX_LIG && WIG && inheritsFrom(child, IC_EVEX_L2_W_XS_KZ_B)) ||
+ (WIG && inheritsFrom(child, IC_EVEX_W_XS_KZ_B)) ||
(VEX_LIG && inheritsFrom(child, IC_EVEX_L_XS_KZ_B)) ||
(VEX_LIG && inheritsFrom(child, IC_EVEX_L2_XS_KZ_B));
case IC_EVEX_XD_KZ_B:
- return (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_XD_KZ_B)) ||
- (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_XD_KZ_B)) ||
- (VEX_WIG && inheritsFrom(child, IC_EVEX_W_XD_KZ_B)) ||
+ return (VEX_LIG && WIG && inheritsFrom(child, IC_EVEX_L_W_XD_KZ_B)) ||
+ (VEX_LIG && WIG && inheritsFrom(child, IC_EVEX_L2_W_XD_KZ_B)) ||
+ (WIG && inheritsFrom(child, IC_EVEX_W_XD_KZ_B)) ||
(VEX_LIG && inheritsFrom(child, IC_EVEX_L_XD_KZ_B)) ||
(VEX_LIG && inheritsFrom(child, IC_EVEX_L2_XD_KZ_B));
case IC_EVEX_OPSIZE_KZ_B:
- return (VEX_LIG && VEX_WIG &&
+ return (VEX_LIG && WIG &&
inheritsFrom(child, IC_EVEX_L_W_OPSIZE_KZ_B)) ||
- (VEX_LIG && VEX_WIG &&
+ (VEX_LIG && WIG &&
inheritsFrom(child, IC_EVEX_L2_W_OPSIZE_KZ_B)) ||
- (VEX_WIG && inheritsFrom(child, IC_EVEX_W_OPSIZE_KZ_B)) ||
+ (WIG && inheritsFrom(child, IC_EVEX_W_OPSIZE_KZ_B)) ||
(VEX_LIG && inheritsFrom(child, IC_EVEX_L_OPSIZE_KZ_B)) ||
(VEX_LIG && inheritsFrom(child, IC_EVEX_L2_OPSIZE_KZ_B));
case IC_EVEX_W_B:
@@ -479,29 +479,29 @@ static inline bool inheritsFrom(InstructionContext child,
return (VEX_LIG && inheritsFrom(child, IC_EVEX_L_W_OPSIZE_KZ_B)) ||
(VEX_LIG && inheritsFrom(child, IC_EVEX_L2_W_OPSIZE_KZ_B));
case IC_EVEX_L_B:
- return VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_B);
+ return WIG && inheritsFrom(child, IC_EVEX_L_W_B);
case IC_EVEX_L_XS_B:
- return VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_XS_B);
+ return WIG && inheritsFrom(child, IC_EVEX_L_W_XS_B);
case IC_EVEX_L_XD_B:
- return VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_XD_B);
+ return WIG && inheritsFrom(child, IC_EVEX_L_W_XD_B);
case IC_EVEX_L_OPSIZE_B:
- return VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_OPSIZE_B);
+ return WIG && inheritsFrom(child, IC_EVEX_L_W_OPSIZE_B);
case IC_EVEX_L_K_B:
- return VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_K_B);
+ return WIG && inheritsFrom(child, IC_EVEX_L_W_K_B);
case IC_EVEX_L_XS_K_B:
- return VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_XS_K_B);
+ return WIG && inheritsFrom(child, IC_EVEX_L_W_XS_K_B);
case IC_EVEX_L_XD_K_B:
- return VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_XD_K_B);
+ return WIG && inheritsFrom(child, IC_EVEX_L_W_XD_K_B);
case IC_EVEX_L_OPSIZE_K_B:
- return VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_OPSIZE_K_B);
+ return WIG && inheritsFrom(child, IC_EVEX_L_W_OPSIZE_K_B);
case IC_EVEX_L_KZ_B:
- return VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_KZ_B);
+ return WIG && inheritsFrom(child, IC_EVEX_L_W_KZ_B);
case IC_EVEX_L_XS_KZ_B:
- return VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_XS_KZ_B);
+ return WIG && inheritsFrom(child, IC_EVEX_L_W_XS_KZ_B);
case IC_EVEX_L_XD_KZ_B:
- return VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_XD_KZ_B);
+ return WIG && inheritsFrom(child, IC_EVEX_L_W_XD_KZ_B);
case IC_EVEX_L_OPSIZE_KZ_B:
- return VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_OPSIZE_KZ_B);
+ return WIG && inheritsFrom(child, IC_EVEX_L_W_OPSIZE_KZ_B);
case IC_EVEX_L_W_B:
case IC_EVEX_L_W_XS_B:
case IC_EVEX_L_W_XD_B:
@@ -518,29 +518,29 @@ static inline bool inheritsFrom(InstructionContext child,
case IC_EVEX_L_W_OPSIZE_KZ_B:
return false;
case IC_EVEX_L2_B:
- return VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_B);
+ return WIG && inheritsFrom(child, IC_EVEX_L2_W_B);
case IC_EVEX_L2_XS_B:
- return VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_XS_B);
+ return WIG && inheritsFrom(child, IC_EVEX_L2_W_XS_B);
case IC_EVEX_L2_XD_B:
- return VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_XD_B);
+ return WIG && inheritsFrom(child, IC_EVEX_L2_W_XD_B);
case IC_EVEX_L2_OPSIZE_B:
- return VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_OPSIZE_B);
+ return WIG && inheritsFrom(child, IC_EVEX_L2_W_OPSIZE_B);
case IC_EVEX_L2_K_B:
- return VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_K_B);
+ return WIG && inheritsFrom(child, IC_EVEX_L2_W_K_B);
case IC_EVEX_L2_XS_K_B:
- return VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_XS_K_B);
+ return WIG && inheritsFrom(child, IC_EVEX_L2_W_XS_K_B);
case IC_EVEX_L2_XD_K_B:
- return VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_XD_K_B);
+ return WIG && inheritsFrom(child, IC_EVEX_L2_W_XD_K_B);
case IC_EVEX_L2_OPSIZE_K_B:
- return VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_OPSIZE_K_B);
+ return WIG && inheritsFrom(child, IC_EVEX_L2_W_OPSIZE_K_B);
case IC_EVEX_L2_KZ_B:
- return VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_KZ_B);
+ return WIG && inheritsFrom(child, IC_EVEX_L2_W_KZ_B);
case IC_EVEX_L2_XS_KZ_B:
- return VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_XS_KZ_B);
+ return WIG && inheritsFrom(child, IC_EVEX_L2_W_XS_KZ_B);
case IC_EVEX_L2_XD_KZ_B:
- return VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_XD_KZ_B);
+ return WIG && inheritsFrom(child, IC_EVEX_L2_W_XD_KZ_B);
case IC_EVEX_L2_OPSIZE_KZ_B:
- return VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_OPSIZE_KZ_B);
+ return WIG && inheritsFrom(child, IC_EVEX_L2_W_OPSIZE_KZ_B);
case IC_EVEX_L2_W_B:
case IC_EVEX_L2_W_XS_B:
case IC_EVEX_L2_W_XD_B:
@@ -1068,7 +1068,7 @@ void DisassemblerTables::setTableFields(OpcodeType type,
bool is32bit,
bool noPrefix,
bool ignoresVEX_L,
- bool ignoresVEX_W,
+ bool ignoresW,
unsigned addressSize) {
ContextDecision &decision = *Tables[type];
@@ -1080,7 +1080,7 @@ void DisassemblerTables::setTableFields(OpcodeType type,
bool adSize64 = addressSize == 64;
if (inheritsFrom((InstructionContext)index,
InstructionSpecifiers[uid].insnContext, noPrefix,
- ignoresVEX_L, ignoresVEX_W, adSize64))
+ ignoresVEX_L, ignoresW, adSize64))
setTableFields(decision.opcodeDecisions[index].modRMDecisions[opcode],
filter,
uid,
diff --git a/llvm/utils/TableGen/X86EVEX2VEXTablesEmitter.cpp b/llvm/utils/TableGen/X86EVEX2VEXTablesEmitter.cpp
index 99a2d348b6ced..35792ab67a4f5 100644
--- a/llvm/utils/TableGen/X86EVEX2VEXTablesEmitter.cpp
+++ b/llvm/utils/TableGen/X86EVEX2VEXTablesEmitter.cpp
@@ -116,8 +116,8 @@ class IsMatch {
RecognizableInstrBase EVEXRI(*EVEXInst);
bool VEX_W = VEXRI.HasREX_W;
bool EVEX_W = EVEXRI.HasREX_W;
- bool VEX_WIG = VEXRI.IgnoresVEX_W;
- bool EVEX_WIG = EVEXRI.IgnoresVEX_W;
+ bool VEX_WIG = VEXRI.IgnoresW;
+ bool EVEX_WIG = EVEXRI.IgnoresW;
bool EVEX_W1_VEX_W0 = EVEXInst->TheDef->getValueAsBit("EVEX_W1_VEX_W0");
if (VEXRI.IsCodeGenOnly != EVEXRI.IsCodeGenOnly ||
diff --git a/llvm/utils/TableGen/X86FoldTablesEmitter.cpp b/llvm/utils/TableGen/X86FoldTablesEmitter.cpp
index 688f9bd38ee23..475a577c9f301 100644
--- a/llvm/utils/TableGen/X86FoldTablesEmitter.cpp
+++ b/llvm/utils/TableGen/X86FoldTablesEmitter.cpp
@@ -305,7 +305,7 @@ class IsMatch {
if (std::make_tuple(RegRI.Encoding, RegRI.Opcode, RegRI.OpPrefix,
RegRI.OpMap, RegRI.OpSize, RegRI.AdSize, RegRI.HasREX_W,
RegRI.HasVEX_4V, RegRI.HasVEX_L, RegRI.IgnoresVEX_L,
- RegRI.IgnoresVEX_W, RegRI.HasEVEX_K, RegRI.HasEVEX_KZ,
+ RegRI.IgnoresW, RegRI.HasEVEX_K, RegRI.HasEVEX_KZ,
RegRI.HasEVEX_L2, RegRec->getValueAsBit("hasEVEX_RC"),
RegRec->getValueAsBit("hasLockPrefix"),
RegRec->getValueAsBit("hasNoTrackPrefix"),
@@ -313,7 +313,7 @@ class IsMatch {
std::make_tuple(MemRI.Encoding, MemRI.Opcode, MemRI.OpPrefix,
MemRI.OpMap, MemRI.OpSize, MemRI.AdSize, MemRI.HasREX_W,
MemRI.HasVEX_4V, MemRI.HasVEX_L, MemRI.IgnoresVEX_L,
- MemRI.IgnoresVEX_W, MemRI.HasEVEX_K, MemRI.HasEVEX_KZ,
+ MemRI.IgnoresW, MemRI.HasEVEX_K, MemRI.HasEVEX_KZ,
MemRI.HasEVEX_L2, MemRec->getValueAsBit("hasEVEX_RC"),
MemRec->getValueAsBit("hasLockPrefix"),
MemRec->getValueAsBit("hasNoTrackPrefix"),
diff --git a/llvm/utils/TableGen/X86RecognizableInstr.cpp b/llvm/utils/TableGen/X86RecognizableInstr.cpp
index 642165d366f2a..7cbee156ffe0c 100644
--- a/llvm/utils/TableGen/X86RecognizableInstr.cpp
+++ b/llvm/utils/TableGen/X86RecognizableInstr.cpp
@@ -118,7 +118,7 @@ RecognizableInstrBase::RecognizableInstrBase(const CodeGenInstruction &insn) {
AdSize = byteFromRec(Rec, "AdSizeBits");
HasREX_W = Rec->getValueAsBit("hasREX_W");
HasVEX_4V = Rec->getValueAsBit("hasVEX_4V");
- IgnoresVEX_W = Rec->getValueAsBit("IgnoresVEX_W");
+ IgnoresW = Rec->getValueAsBit("IgnoresW");
IgnoresVEX_L = Rec->getValueAsBit("ignoresVEX_L");
HasEVEX_L2 = Rec->getValueAsBit("hasEVEX_L2");
HasEVEX_K = Rec->getValueAsBit("hasEVEX_K");
@@ -882,11 +882,11 @@ void RecognizableInstr::emitDecodePath(DisassemblerTables &tables) const {
tables.setTableFields(*opcodeType, insnContext(), currentOpcode, *filter,
UID, Is32Bit, OpPrefix == 0,
IgnoresVEX_L || EncodeRC,
- IgnoresVEX_W, AddressSize);
+ IgnoresW, AddressSize);
} else {
tables.setTableFields(*opcodeType, insnContext(), opcodeToSet, *filter, UID,
Is32Bit, OpPrefix == 0, IgnoresVEX_L || EncodeRC,
- IgnoresVEX_W, AddressSize);
+ IgnoresW, AddressSize);
}
#undef MAP
diff --git a/llvm/utils/TableGen/X86RecognizableInstr.h b/llvm/utils/TableGen/X86RecognizableInstr.h
index 4325e29ef4ff0..5efacdb27465b 100644
--- a/llvm/utils/TableGen/X86RecognizableInstr.h
+++ b/llvm/utils/TableGen/X86RecognizableInstr.h
@@ -182,8 +182,8 @@ struct RecognizableInstrBase {
bool HasREX_W;
/// The hasVEX_4V field from the record
bool HasVEX_4V;
- /// The IgnoresVEX_W field from the record
- bool IgnoresVEX_W;
+ /// The IgnoresW field from the record
+ bool IgnoresW;
/// The hasVEX_L field from the record
bool HasVEX_L;
/// The ignoreVEX_L field from the record
More information about the llvm-commits
mailing list