[llvm] r329940 - [X86] Remove explicit SSE/AVX schedule itineraries from defs (PR37093)
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Thu Apr 12 12:25:07 PDT 2018
Author: rksimon
Date: Thu Apr 12 12:25:07 2018
New Revision: 329940
URL: http://llvm.org/viewvc/llvm-project?rev=329940&view=rev
Log:
[X86] Remove explicit SSE/AVX schedule itineraries from defs (PR37093)
Modified:
llvm/trunk/lib/Target/X86/X86InstrAVX512.td
llvm/trunk/lib/Target/X86/X86InstrSSE.td
llvm/trunk/lib/Target/X86/X86Schedule.td
Modified: llvm/trunk/lib/Target/X86/X86InstrAVX512.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrAVX512.td?rev=329940&r1=329939&r2=329940&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrAVX512.td (original)
+++ llvm/trunk/lib/Target/X86/X86InstrAVX512.td Thu Apr 12 12:25:07 2018
@@ -212,7 +212,7 @@ multiclass AVX512_maskable_custom<bits<8
list<dag> Pattern,
list<dag> MaskingPattern,
list<dag> ZeroMaskingPattern,
- InstrItinClass itin,
+ InstrItinClass itin = NoItinerary,
string MaskingConstraint = "",
bit IsCommutable = 0,
bit IsKCommutable = 0> {
@@ -252,7 +252,7 @@ multiclass AVX512_maskable_common<bits<8
string OpcodeStr,
string AttSrcAsm, string IntelSrcAsm,
dag RHS, dag MaskingRHS,
- InstrItinClass itin,
+ InstrItinClass itin = NoItinerary,
SDNode Select = vselect,
string MaskingConstraint = "",
bit IsCommutable = 0,
@@ -274,7 +274,7 @@ multiclass AVX512_maskable_split<bits<8>
dag Outs, dag Ins, string OpcodeStr,
string AttSrcAsm, string IntelSrcAsm,
dag RHS, dag MaskRHS,
- InstrItinClass itin,
+ InstrItinClass itin = NoItinerary,
bit IsCommutable = 0, bit IsKCommutable = 0,
SDNode Select = vselect> :
AVX512_maskable_custom<O, F, Outs, Ins,
@@ -295,7 +295,7 @@ multiclass AVX512_maskable<bits<8> O, Fo
dag Outs, dag Ins, string OpcodeStr,
string AttSrcAsm, string IntelSrcAsm,
dag RHS,
- InstrItinClass itin,
+ InstrItinClass itin = NoItinerary,
bit IsCommutable = 0, bit IsKCommutable = 0,
SDNode Select = vselect> :
AVX512_maskable_common<O, F, _, Outs, Ins,
@@ -311,7 +311,7 @@ multiclass AVX512_maskable_scalar<bits<8
dag Outs, dag Ins, string OpcodeStr,
string AttSrcAsm, string IntelSrcAsm,
dag RHS,
- InstrItinClass itin,
+ InstrItinClass itin = NoItinerary,
bit IsCommutable = 0> :
AVX512_maskable<O, F, _, Outs, Ins, OpcodeStr, AttSrcAsm, IntelSrcAsm,
RHS, itin, IsCommutable, 0, X86selects>;
@@ -323,7 +323,7 @@ multiclass AVX512_maskable_scalar<bits<8
multiclass AVX512_maskable_3src<bits<8> O, Format F, X86VectorVTInfo _,
dag Outs, dag NonTiedIns, string OpcodeStr,
string AttSrcAsm, string IntelSrcAsm,
- dag RHS, InstrItinClass itin,
+ dag RHS, InstrItinClass itin = NoItinerary,
bit IsCommutable = 0,
bit IsKCommutable = 0,
SDNode Select = vselect,
@@ -340,7 +340,7 @@ multiclass AVX512_maskable_3src<bits<8>
multiclass AVX512_maskable_3src_scalar<bits<8> O, Format F, X86VectorVTInfo _,
dag Outs, dag NonTiedIns, string OpcodeStr,
string AttSrcAsm, string IntelSrcAsm,
- dag RHS, InstrItinClass itin,
+ dag RHS, InstrItinClass itin = NoItinerary,
bit IsCommutable = 0,
bit IsKCommutable = 0,
bit MaskOnly = 0> :
@@ -370,7 +370,7 @@ multiclass AVX512_maskable_custom_cmp<bi
string AttSrcAsm, string IntelSrcAsm,
list<dag> Pattern,
list<dag> MaskingPattern,
- InstrItinClass itin,
+ InstrItinClass itin = NoItinerary,
bit IsCommutable = 0> {
let isCommutable = IsCommutable in
def NAME: AVX512<O, F, Outs, Ins,
@@ -390,7 +390,7 @@ multiclass AVX512_maskable_common_cmp<bi
string OpcodeStr,
string AttSrcAsm, string IntelSrcAsm,
dag RHS, dag MaskingRHS,
- InstrItinClass itin,
+ InstrItinClass itin = NoItinerary,
bit IsCommutable = 0> :
AVX512_maskable_custom_cmp<O, F, Outs, Ins, MaskingIns, OpcodeStr,
AttSrcAsm, IntelSrcAsm,
@@ -400,7 +400,7 @@ multiclass AVX512_maskable_common_cmp<bi
multiclass AVX512_maskable_cmp<bits<8> O, Format F, X86VectorVTInfo _,
dag Outs, dag Ins, string OpcodeStr,
string AttSrcAsm, string IntelSrcAsm,
- dag RHS, InstrItinClass itin,
+ dag RHS, InstrItinClass itin = NoItinerary,
bit IsCommutable = 0> :
AVX512_maskable_common_cmp<O, F, _, Outs, Ins,
!con((ins _.KRCWM:$mask), Ins),
@@ -422,7 +422,7 @@ multiclass AVX512_maskable_logic<bits<8>
dag Outs, dag Ins, string OpcodeStr,
string AttSrcAsm, string IntelSrcAsm,
dag RHS, dag MaskedRHS,
- InstrItinClass itin,
+ InstrItinClass itin = NoItinerary,
bit IsCommutable = 0, SDNode Select = vselect> :
AVX512_maskable_custom<O, F, Outs, Ins,
!con((ins _.RC:$src0, _.KRCWM:$mask), Ins),
@@ -778,15 +778,15 @@ let ExeDomain = SSEPackedSingle in {
def VINSERTPSZrr : AVX512AIi8<0x21, MRMSrcReg, (outs VR128X:$dst),
(ins VR128X:$src1, VR128X:$src2, u8imm:$src3),
"vinsertps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
- [(set VR128X:$dst, (X86insertps VR128X:$src1, VR128X:$src2, imm:$src3))],
- IIC_SSE_INSERTPS_RR>, EVEX_4V, Sched<[WriteFShuffle]>;
+ [(set VR128X:$dst, (X86insertps VR128X:$src1, VR128X:$src2, imm:$src3))]>,
+ EVEX_4V, Sched<[WriteFShuffle]>;
def VINSERTPSZrm: AVX512AIi8<0x21, MRMSrcMem, (outs VR128X:$dst),
(ins VR128X:$src1, f32mem:$src2, u8imm:$src3),
"vinsertps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
[(set VR128X:$dst, (X86insertps VR128X:$src1,
(v4f32 (scalar_to_vector (loadf32 addr:$src2))),
- imm:$src3))], IIC_SSE_INSERTPS_RM>, EVEX_4V,
- EVEX_CD8<32, CD8VT1>, Sched<[WriteFShuffleLd, ReadAfterLd]>;
+ imm:$src3))]>,
+ EVEX_4V, EVEX_CD8<32, CD8VT1>, Sched<[WriteFShuffleLd, ReadAfterLd]>;
}
//===----------------------------------------------------------------------===//
@@ -1106,14 +1106,14 @@ defm : vextract_for_mask_cast<"VEXTRACTI
def VEXTRACTPSZrr : AVX512AIi8<0x17, MRMDestReg, (outs GR32:$dst),
(ins VR128X:$src1, u8imm:$src2),
"vextractps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set GR32:$dst, (extractelt (bc_v4i32 (v4f32 VR128X:$src1)), imm:$src2))],
- IIC_SSE_EXTRACTPS_RR>, EVEX, VEX_WIG, Sched<[WriteFBlend]>;
+ [(set GR32:$dst, (extractelt (bc_v4i32 (v4f32 VR128X:$src1)), imm:$src2))]>,
+ EVEX, VEX_WIG, Sched<[WriteFBlend]>;
def VEXTRACTPSZmr : AVX512AIi8<0x17, MRMDestMem, (outs),
(ins f32mem:$dst, VR128X:$src1, u8imm:$src2),
"vextractps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(store (extractelt (bc_v4i32 (v4f32 VR128X:$src1)), imm:$src2),
- addr:$dst)], IIC_SSE_EXTRACTPS_RM>,
+ addr:$dst)]>,
EVEX, VEX_WIG, EVEX_CD8<32, CD8VT1>, Sched<[WriteFBlendLd, WriteRMW]>;
//===---------------------------------------------------------------------===//
@@ -1703,8 +1703,8 @@ multiclass avx512_mask_broadcastm<bits<8
X86VectorVTInfo _, RegisterClass KRC> {
def rr : AVX512XS8I<opc, MRMSrcReg, (outs _.RC:$dst), (ins KRC:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set _.RC:$dst, (_.VT (X86VBroadcastm KRC:$src)))],
- IIC_SSE_PSHUF_RI>, EVEX, Sched<[WriteShuffle]>;
+ [(set _.RC:$dst, (_.VT (X86VBroadcastm KRC:$src)))]>,
+ EVEX, Sched<[WriteShuffle]>;
}
multiclass avx512_mask_broadcast<bits<8> opc, string OpcodeStr,
@@ -2721,15 +2721,15 @@ multiclass avx512_mask_mov<bits<8> opc_k
ValueType vvt, X86MemOperand x86memop> {
let hasSideEffects = 0, SchedRW = [WriteMove] in
def kk : I<opc_kk, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), [],
- IIC_SSE_MOVDQ>, Sched<[WriteMove]>;
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>,
+ Sched<[WriteMove]>;
def km : I<opc_km, MRMSrcMem, (outs KRC:$dst), (ins x86memop:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set KRC:$dst, (vvt (load addr:$src)))], IIC_SSE_MOVDQ>,
+ [(set KRC:$dst, (vvt (load addr:$src)))]>,
Sched<[WriteLoad]>;
def mk : I<opc_mk, MRMDestMem, (outs), (ins x86memop:$dst, KRC:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(store KRC:$src, addr:$dst)], IIC_SSE_MOVDQ>,
+ [(store KRC:$src, addr:$dst)]>,
Sched<[WriteStore]>;
}
@@ -2738,11 +2738,11 @@ multiclass avx512_mask_mov_gpr<bits<8> o
RegisterClass KRC, RegisterClass GRC> {
let hasSideEffects = 0 in {
def kr : I<opc_kr, MRMSrcReg, (outs KRC:$dst), (ins GRC:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), [],
- IIC_SSE_MOVD_ToGP>, Sched<[WriteMove]>;
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>,
+ Sched<[WriteMove]>;
def rk : I<opc_rk, MRMSrcReg, (outs GRC:$dst), (ins KRC:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), [],
- IIC_SSE_MOVD_ToGP>, Sched<[WriteMove]>;
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>,
+ Sched<[WriteMove]>;
}
}
@@ -3446,24 +3446,24 @@ defm VMOVDQU64 : avx512_load_vl<0x6F, "v
let isReMaterializable = 1, canFoldAsLoad = 1,
isPseudo = 1, SchedRW = [WriteLoad], mayLoad = 1, hasSideEffects = 0 in {
def VMOVAPSZ128rm_NOVLX : I<0, Pseudo, (outs VR128X:$dst), (ins f128mem:$src),
- "", [], IIC_SSE_MOVA_P_RM>;
+ "", []>;
def VMOVAPSZ256rm_NOVLX : I<0, Pseudo, (outs VR256X:$dst), (ins f256mem:$src),
- "", [], IIC_SSE_MOVA_P_RM>;
+ "", []>;
def VMOVUPSZ128rm_NOVLX : I<0, Pseudo, (outs VR128X:$dst), (ins f128mem:$src),
- "", [], IIC_SSE_MOVA_P_RM>;
+ "", []>;
def VMOVUPSZ256rm_NOVLX : I<0, Pseudo, (outs VR256X:$dst), (ins f256mem:$src),
- "", [], IIC_SSE_MOVA_P_RM>;
+ "", []>;
}
let isPseudo = 1, SchedRW = [WriteStore], mayStore = 1, hasSideEffects = 0 in {
def VMOVAPSZ128mr_NOVLX : I<0, Pseudo, (outs), (ins f128mem:$dst, VR128X:$src),
- "", [], IIC_SSE_MOVA_P_MR>;
+ "", []>;
def VMOVAPSZ256mr_NOVLX : I<0, Pseudo, (outs), (ins f256mem:$dst, VR256X:$src),
- "", [], IIC_SSE_MOVA_P_MR>;
+ "", []>;
def VMOVUPSZ128mr_NOVLX : I<0, Pseudo, (outs), (ins f128mem:$dst, VR128X:$src),
- "", [], IIC_SSE_MOVA_P_MR>;
+ "", []>;
def VMOVUPSZ256mr_NOVLX : I<0, Pseudo, (outs), (ins f256mem:$dst, VR256X:$src),
- "", [], IIC_SSE_MOVA_P_MR>;
+ "", []>;
}
def : Pat<(v8i64 (vselect VK8WM:$mask, (bc_v8i64 (v16i32 immAllZerosV)),
@@ -3651,40 +3651,40 @@ let ExeDomain = SSEPackedInt in {
def VMOVDI2PDIZrr : AVX512BI<0x6E, MRMSrcReg, (outs VR128X:$dst), (ins GR32:$src),
"vmovd\t{$src, $dst|$dst, $src}",
[(set VR128X:$dst,
- (v4i32 (scalar_to_vector GR32:$src)))], IIC_SSE_MOVDQ>,
+ (v4i32 (scalar_to_vector GR32:$src)))]>,
EVEX, Sched<[WriteMove]>;
def VMOVDI2PDIZrm : AVX512BI<0x6E, MRMSrcMem, (outs VR128X:$dst), (ins i32mem:$src),
"vmovd\t{$src, $dst|$dst, $src}",
[(set VR128X:$dst,
- (v4i32 (scalar_to_vector (loadi32 addr:$src))))],
- IIC_SSE_MOVDQ>, EVEX, EVEX_CD8<32, CD8VT1>, Sched<[WriteLoad]>;
+ (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>,
+ EVEX, EVEX_CD8<32, CD8VT1>, Sched<[WriteLoad]>;
def VMOV64toPQIZrr : AVX512BI<0x6E, MRMSrcReg, (outs VR128X:$dst), (ins GR64:$src),
"vmovq\t{$src, $dst|$dst, $src}",
[(set VR128X:$dst,
- (v2i64 (scalar_to_vector GR64:$src)))],
- IIC_SSE_MOVDQ>, EVEX, VEX_W, Sched<[WriteMove]>;
+ (v2i64 (scalar_to_vector GR64:$src)))]>,
+ EVEX, VEX_W, Sched<[WriteMove]>;
let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0, mayLoad = 1 in
def VMOV64toPQIZrm : AVX512BI<0x6E, MRMSrcMem, (outs VR128X:$dst),
(ins i64mem:$src),
- "vmovq\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVDQ>,
+ "vmovq\t{$src, $dst|$dst, $src}", []>,
EVEX, VEX_W, EVEX_CD8<64, CD8VT1>, Sched<[WriteLoad]>;
let isCodeGenOnly = 1 in {
def VMOV64toSDZrr : AVX512BI<0x6E, MRMSrcReg, (outs FR64X:$dst), (ins GR64:$src),
"vmovq\t{$src, $dst|$dst, $src}",
- [(set FR64X:$dst, (bitconvert GR64:$src))],
- IIC_SSE_MOVDQ>, EVEX, VEX_W, Sched<[WriteMove]>;
+ [(set FR64X:$dst, (bitconvert GR64:$src))]>,
+ EVEX, VEX_W, Sched<[WriteMove]>;
def VMOV64toSDZrm : AVX512XSI<0x7E, MRMSrcMem, (outs FR64X:$dst), (ins i64mem:$src),
"vmovq\t{$src, $dst|$dst, $src}",
[(set FR64X:$dst, (bitconvert (loadi64 addr:$src)))]>,
EVEX, VEX_W, EVEX_CD8<8, CD8VT8>, Sched<[WriteLoad]>;
def VMOVSDto64Zrr : AVX512BI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64X:$src),
"vmovq\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst, (bitconvert FR64X:$src))],
- IIC_SSE_MOVDQ>, EVEX, VEX_W, Sched<[WriteMove]>;
+ [(set GR64:$dst, (bitconvert FR64X:$src))]>,
+ EVEX, VEX_W, Sched<[WriteMove]>;
def VMOVSDto64Zmr : AVX512BI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64X:$src),
"vmovq\t{$src, $dst|$dst, $src}",
- [(store (i64 (bitconvert FR64X:$src)), addr:$dst)],
- IIC_SSE_MOVDQ>, EVEX, VEX_W, Sched<[WriteStore]>,
+ [(store (i64 (bitconvert FR64X:$src)), addr:$dst)]>,
+ EVEX, VEX_W, Sched<[WriteStore]>,
EVEX_CD8<64, CD8VT1>;
}
} // ExeDomain = SSEPackedInt
@@ -3694,13 +3694,13 @@ def VMOVSDto64Zmr : AVX512BI<0x7E, MRMDe
let ExeDomain = SSEPackedInt, isCodeGenOnly = 1 in {
def VMOVDI2SSZrr : AVX512BI<0x6E, MRMSrcReg, (outs FR32X:$dst), (ins GR32:$src),
"vmovd\t{$src, $dst|$dst, $src}",
- [(set FR32X:$dst, (bitconvert GR32:$src))],
- IIC_SSE_MOVDQ>, EVEX, Sched<[WriteMove]>;
+ [(set FR32X:$dst, (bitconvert GR32:$src))]>,
+ EVEX, Sched<[WriteMove]>;
def VMOVDI2SSZrm : AVX512BI<0x6E, MRMSrcMem, (outs FR32X:$dst), (ins i32mem:$src),
"vmovd\t{$src, $dst|$dst, $src}",
- [(set FR32X:$dst, (bitconvert (loadi32 addr:$src)))],
- IIC_SSE_MOVDQ>, EVEX, EVEX_CD8<32, CD8VT1>, Sched<[WriteLoad]>;
+ [(set FR32X:$dst, (bitconvert (loadi32 addr:$src)))]>,
+ EVEX, EVEX_CD8<32, CD8VT1>, Sched<[WriteLoad]>;
} // ExeDomain = SSEPackedInt, isCodeGenOnly = 1
// Move doubleword from xmm register to r/m32
@@ -3709,13 +3709,13 @@ let ExeDomain = SSEPackedInt in {
def VMOVPDI2DIZrr : AVX512BI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128X:$src),
"vmovd\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (extractelt (v4i32 VR128X:$src),
- (iPTR 0)))], IIC_SSE_MOVD_ToGP>,
+ (iPTR 0)))]>,
EVEX, Sched<[WriteMove]>;
def VMOVPDI2DIZmr : AVX512BI<0x7E, MRMDestMem, (outs),
(ins i32mem:$dst, VR128X:$src),
"vmovd\t{$src, $dst|$dst, $src}",
[(store (i32 (extractelt (v4i32 VR128X:$src),
- (iPTR 0))), addr:$dst)], IIC_SSE_MOVDQ>,
+ (iPTR 0))), addr:$dst)]>,
EVEX, EVEX_CD8<32, CD8VT1>, Sched<[WriteStore]>;
} // ExeDomain = SSEPackedInt
@@ -3725,28 +3725,28 @@ let ExeDomain = SSEPackedInt in {
def VMOVPQIto64Zrr : I<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128X:$src),
"vmovq\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, (extractelt (v2i64 VR128X:$src),
- (iPTR 0)))],
- IIC_SSE_MOVD_ToGP>, PD, EVEX, VEX_W, Sched<[WriteMove]>,
+ (iPTR 0)))]>,
+ PD, EVEX, VEX_W, Sched<[WriteMove]>,
Requires<[HasAVX512, In64BitMode]>;
let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0, mayStore = 1 in
def VMOVPQIto64Zmr : I<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, VR128X:$src),
- "vmovq\t{$src, $dst|$dst, $src}",
- [], IIC_SSE_MOVD_ToGP>, PD, EVEX, VEX_W, Sched<[WriteStore]>,
+ "vmovq\t{$src, $dst|$dst, $src}", []>, PD,
+ EVEX, VEX_W, Sched<[WriteStore]>,
Requires<[HasAVX512, In64BitMode]>;
def VMOVPQI2QIZmr : I<0xD6, MRMDestMem, (outs),
(ins i64mem:$dst, VR128X:$src),
"vmovq\t{$src, $dst|$dst, $src}",
[(store (extractelt (v2i64 VR128X:$src), (iPTR 0)),
- addr:$dst)], IIC_SSE_MOVDQ>,
+ addr:$dst)]>,
EVEX, PD, VEX_W, EVEX_CD8<64, CD8VT1>,
Sched<[WriteStore]>, Requires<[HasAVX512, In64BitMode]>;
let hasSideEffects = 0 in
def VMOVPQI2QIZrr : AVX512BI<0xD6, MRMDestReg, (outs VR128X:$dst),
(ins VR128X:$src),
- "vmovq.s\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVDQ>,
+ "vmovq.s\t{$src, $dst|$dst, $src}", []>,
EVEX, VEX_W, Sched<[WriteVecLogic]>;
} // ExeDomain = SSEPackedInt
@@ -3756,13 +3756,13 @@ let ExeDomain = SSEPackedInt, isCodeGenO
def VMOVSS2DIZrr : AVX512BI<0x7E, MRMDestReg, (outs GR32:$dst),
(ins FR32X:$src),
"vmovd\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (bitconvert FR32X:$src))],
- IIC_SSE_MOVD_ToGP>, EVEX, Sched<[WriteMove]>;
+ [(set GR32:$dst, (bitconvert FR32X:$src))]>,
+ EVEX, Sched<[WriteMove]>;
def VMOVSS2DIZmr : AVX512BI<0x7E, MRMDestMem, (outs),
(ins i32mem:$dst, FR32X:$src),
"vmovd\t{$src, $dst|$dst, $src}",
- [(store (i32 (bitconvert FR32X:$src)), addr:$dst)],
- IIC_SSE_MOVDQ>, EVEX, EVEX_CD8<32, CD8VT1>, Sched<[WriteStore]>;
+ [(store (i32 (bitconvert FR32X:$src)), addr:$dst)]>,
+ EVEX, EVEX_CD8<32, CD8VT1>, Sched<[WriteStore]>;
} // ExeDomain = SSEPackedInt, isCodeGenOnly = 1
// Move Quadword Int to Packed Quadword Int
@@ -3792,7 +3792,7 @@ multiclass avx512_move_scalar<string asm
(ins _.RC:$src1, _.RC:$src2),
!strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set _.RC:$dst, (_.VT (OpNode _.RC:$src1, _.RC:$src2)))],
- _.ExeDomain,IIC_SSE_MOV_S_RR>, EVEX_4V, Sched<[WriteFShuffle]>;
+ _.ExeDomain>, EVEX_4V, Sched<[WriteFShuffle]>;
def rrkz : AVX512PI<0x10, MRMSrcReg, (outs _.RC:$dst),
(ins _.KRCWM:$mask, _.RC:$src1, _.RC:$src2),
!strconcat(asm, "\t{$src2, $src1, $dst {${mask}} {z}|",
@@ -3800,7 +3800,7 @@ multiclass avx512_move_scalar<string asm
[(set _.RC:$dst, (_.VT (X86selects _.KRCWM:$mask,
(_.VT (OpNode _.RC:$src1, _.RC:$src2)),
_.ImmAllZerosV)))],
- _.ExeDomain,IIC_SSE_MOV_S_RR>, EVEX_4V, EVEX_KZ, Sched<[WriteFShuffle]>;
+ _.ExeDomain>, EVEX_4V, EVEX_KZ, Sched<[WriteFShuffle]>;
let Constraints = "$src0 = $dst" in
def rrk : AVX512PI<0x10, MRMSrcReg, (outs _.RC:$dst),
(ins _.RC:$src0, _.KRCWM:$mask, _.RC:$src1, _.RC:$src2),
@@ -3809,34 +3809,34 @@ multiclass avx512_move_scalar<string asm
[(set _.RC:$dst, (_.VT (X86selects _.KRCWM:$mask,
(_.VT (OpNode _.RC:$src1, _.RC:$src2)),
(_.VT _.RC:$src0))))],
- _.ExeDomain,IIC_SSE_MOV_S_RR>, EVEX_4V, EVEX_K, Sched<[WriteFShuffle]>;
+ _.ExeDomain>, EVEX_4V, EVEX_K, Sched<[WriteFShuffle]>;
let canFoldAsLoad = 1, isReMaterializable = 1 in
def rm : AVX512PI<0x10, MRMSrcMem, (outs _.FRC:$dst), (ins _.ScalarMemOp:$src),
!strconcat(asm, "\t{$src, $dst|$dst, $src}"),
[(set _.FRC:$dst, (_.ScalarLdFrag addr:$src))],
- _.ExeDomain, IIC_SSE_MOV_S_RM>, EVEX, Sched<[WriteLoad]>;
+ _.ExeDomain>, EVEX, Sched<[WriteLoad]>;
let mayLoad = 1, hasSideEffects = 0 in {
let Constraints = "$src0 = $dst" in
def rmk : AVX512PI<0x10, MRMSrcMem, (outs _.RC:$dst),
(ins _.RC:$src0, _.KRCWM:$mask, _.ScalarMemOp:$src),
!strconcat(asm, "\t{$src, $dst {${mask}}|",
"$dst {${mask}}, $src}"),
- [], _.ExeDomain, IIC_SSE_MOV_S_RM>, EVEX, EVEX_K, Sched<[WriteLoad]>;
+ [], _.ExeDomain>, EVEX, EVEX_K, Sched<[WriteLoad]>;
def rmkz : AVX512PI<0x10, MRMSrcMem, (outs _.RC:$dst),
(ins _.KRCWM:$mask, _.ScalarMemOp:$src),
!strconcat(asm, "\t{$src, $dst {${mask}} {z}|",
"$dst {${mask}} {z}, $src}"),
- [], _.ExeDomain, IIC_SSE_MOV_S_RM>, EVEX, EVEX_KZ, Sched<[WriteLoad]>;
+ [], _.ExeDomain>, EVEX, EVEX_KZ, Sched<[WriteLoad]>;
}
def mr: AVX512PI<0x11, MRMDestMem, (outs), (ins _.ScalarMemOp:$dst, _.FRC:$src),
!strconcat(asm, "\t{$src, $dst|$dst, $src}"),
- [(store _.FRC:$src, addr:$dst)], _.ExeDomain, IIC_SSE_MOV_S_MR>,
+ [(store _.FRC:$src, addr:$dst)], _.ExeDomain>,
EVEX, Sched<[WriteStore]>;
let mayStore = 1, hasSideEffects = 0 in
def mrk: AVX512PI<0x11, MRMDestMem, (outs),
(ins _.ScalarMemOp:$dst, VK1WM:$mask, _.FRC:$src),
!strconcat(asm, "\t{$src, $dst {${mask}}|$dst {${mask}}, $src}"),
- [], _.ExeDomain, IIC_SSE_MOV_S_MR>, EVEX, EVEX_K, Sched<[WriteStore]>;
+ [], _.ExeDomain>, EVEX, EVEX_K, Sched<[WriteStore]>;
}
defm VMOVSSZ : avx512_move_scalar<"vmovss", X86Movss, f32x_info>,
@@ -4002,7 +4002,7 @@ let hasSideEffects = 0 in {
def VMOVSSZrr_REV: AVX512<0x11, MRMDestReg, (outs VR128X:$dst),
(ins VR128X:$src1, VR128X:$src2),
"vmovss.s\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [], IIC_SSE_MOV_S_RR>, XS, EVEX_4V, VEX_LIG,
+ []>, XS, EVEX_4V, VEX_LIG,
FoldGenData<"VMOVSSZrr">, Sched<[WriteFShuffle]>;
let Constraints = "$src0 = $dst" in
@@ -4011,20 +4011,20 @@ let Constraints = "$src0 = $dst" in
VR128X:$src1, VR128X:$src2),
"vmovss.s\t{$src2, $src1, $dst {${mask}}|"#
"$dst {${mask}}, $src1, $src2}",
- [], IIC_SSE_MOV_S_RR>, EVEX_K, XS, EVEX_4V, VEX_LIG,
+ []>, EVEX_K, XS, EVEX_4V, VEX_LIG,
FoldGenData<"VMOVSSZrrk">, Sched<[WriteFShuffle]>;
def VMOVSSZrrkz_REV: AVX512<0x11, MRMDestReg, (outs VR128X:$dst),
(ins f32x_info.KRCWM:$mask, VR128X:$src1, VR128X:$src2),
"vmovss.s\t{$src2, $src1, $dst {${mask}} {z}|"#
"$dst {${mask}} {z}, $src1, $src2}",
- [], IIC_SSE_MOV_S_RR>, EVEX_KZ, XS, EVEX_4V, VEX_LIG,
+ []>, EVEX_KZ, XS, EVEX_4V, VEX_LIG,
FoldGenData<"VMOVSSZrrkz">, Sched<[WriteFShuffle]>;
def VMOVSDZrr_REV: AVX512<0x11, MRMDestReg, (outs VR128X:$dst),
(ins VR128X:$src1, VR128X:$src2),
"vmovsd.s\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [], IIC_SSE_MOV_S_RR>, XD, EVEX_4V, VEX_LIG, VEX_W,
+ []>, XD, EVEX_4V, VEX_LIG, VEX_W,
FoldGenData<"VMOVSDZrr">, Sched<[WriteFShuffle]>;
let Constraints = "$src0 = $dst" in
@@ -4033,7 +4033,7 @@ let Constraints = "$src0 = $dst" in
VR128X:$src1, VR128X:$src2),
"vmovsd.s\t{$src2, $src1, $dst {${mask}}|"#
"$dst {${mask}}, $src1, $src2}",
- [], IIC_SSE_MOV_S_RR>, EVEX_K, XD, EVEX_4V, VEX_LIG,
+ []>, EVEX_K, XD, EVEX_4V, VEX_LIG,
VEX_W, FoldGenData<"VMOVSDZrrk">, Sched<[WriteFShuffle]>;
def VMOVSDZrrkz_REV: AVX512<0x11, MRMDestReg, (outs VR128X:$dst),
@@ -4041,7 +4041,7 @@ let Constraints = "$src0 = $dst" in
VR128X:$src2),
"vmovsd.s\t{$src2, $src1, $dst {${mask}} {z}|"#
"$dst {${mask}} {z}, $src1, $src2}",
- [], IIC_SSE_MOV_S_RR>, EVEX_KZ, XD, EVEX_4V, VEX_LIG,
+ []>, EVEX_KZ, XD, EVEX_4V, VEX_LIG,
VEX_W, FoldGenData<"VMOVSDZrrkz">, Sched<[WriteFShuffle]>;
}
@@ -4184,8 +4184,8 @@ def VMOVZPQILo2PQIZrr : AVX512XSI<0x7E,
(ins VR128X:$src),
"vmovq\t{$src, $dst|$dst, $src}",
[(set VR128X:$dst, (v2i64 (X86vzmovl
- (v2i64 VR128X:$src))))],
- IIC_SSE_MOVQ_RR>, EVEX, VEX_W;
+ (v2i64 VR128X:$src))))]>,
+ EVEX, VEX_W;
}
let Predicates = [HasAVX512] in {
@@ -6057,13 +6057,13 @@ defm VPSHUFB: avx512_pshufb_sizes<0x00,
def VMOVLHPSZrr : AVX512PSI<0x16, MRMSrcReg, (outs VR128X:$dst),
(ins VR128X:$src1, VR128X:$src2),
"vmovlhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set VR128X:$dst, (v4f32 (X86Movlhps VR128X:$src1, VR128X:$src2)))],
- IIC_SSE_MOV_LH>, Sched<[WriteFShuffle]>, EVEX_4V;
+ [(set VR128X:$dst, (v4f32 (X86Movlhps VR128X:$src1, VR128X:$src2)))]>,
+ Sched<[WriteFShuffle]>, EVEX_4V;
def VMOVHLPSZrr : AVX512PSI<0x12, MRMSrcReg, (outs VR128X:$dst),
(ins VR128X:$src1, VR128X:$src2),
"vmovhlps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set VR128X:$dst, (v4f32 (X86Movhlps VR128X:$src1, VR128X:$src2)))],
- IIC_SSE_MOV_LH>, Sched<[WriteFShuffle]>, EVEX_4V;
+ [(set VR128X:$dst, (v4f32 (X86Movhlps VR128X:$src1, VR128X:$src2)))]>,
+ Sched<[WriteFShuffle]>, EVEX_4V;
//===----------------------------------------------------------------------===//
// VMOVHPS/PD VMOVLPS Instructions
@@ -6079,8 +6079,8 @@ multiclass avx512_mov_hilo_packed<bits<8
[(set _.RC:$dst,
(OpNode _.RC:$src1,
(_.VT (bitconvert
- (v2f64 (scalar_to_vector (loadf64 addr:$src2)))))))],
- IIC_SSE_MOV_LH>, Sched<[WriteFShuffleLd, ReadAfterLd]>, EVEX_4V;
+ (v2f64 (scalar_to_vector (loadf64 addr:$src2)))))))]>,
+ Sched<[WriteFShuffleLd, ReadAfterLd]>, EVEX_4V;
}
defm VMOVHPSZ128 : avx512_mov_hilo_packed<0x16, "vmovhps", X86Movlhps,
@@ -6122,28 +6122,26 @@ def VMOVHPSZ128mr : AVX512PSI<0x17, MRMD
[(store (f64 (extractelt
(X86Unpckh (bc_v2f64 (v4f32 VR128X:$src)),
(bc_v2f64 (v4f32 VR128X:$src))),
- (iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>,
+ (iPTR 0))), addr:$dst)]>,
EVEX, EVEX_CD8<32, CD8VT2>;
def VMOVHPDZ128mr : AVX512PDI<0x17, MRMDestMem, (outs),
(ins f64mem:$dst, VR128X:$src),
"vmovhpd\t{$src, $dst|$dst, $src}",
[(store (f64 (extractelt
(v2f64 (X86Unpckh VR128X:$src, VR128X:$src)),
- (iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>,
+ (iPTR 0))), addr:$dst)]>,
EVEX, EVEX_CD8<64, CD8VT1>, VEX_W;
def VMOVLPSZ128mr : AVX512PSI<0x13, MRMDestMem, (outs),
(ins f64mem:$dst, VR128X:$src),
"vmovlps\t{$src, $dst|$dst, $src}",
[(store (f64 (extractelt (bc_v2f64 (v4f32 VR128X:$src)),
- (iPTR 0))), addr:$dst)],
- IIC_SSE_MOV_LH>,
+ (iPTR 0))), addr:$dst)]>,
EVEX, EVEX_CD8<32, CD8VT2>;
def VMOVLPDZ128mr : AVX512PDI<0x13, MRMDestMem, (outs),
(ins f64mem:$dst, VR128X:$src),
"vmovlpd\t{$src, $dst|$dst, $src}",
[(store (f64 (extractelt (v2f64 VR128X:$src),
- (iPTR 0))), addr:$dst)],
- IIC_SSE_MOV_LH>,
+ (iPTR 0))), addr:$dst)]>,
EVEX, EVEX_CD8<64, CD8VT1>, VEX_W;
} // SchedRW
@@ -8887,8 +8885,8 @@ defm VSCATTERPF1QPD: avx512_gather_scatt
multiclass cvt_by_vec_width<bits<8> opc, X86VectorVTInfo Vec, string OpcodeStr > {
def rr : AVX512XS8I<opc, MRMSrcReg, (outs Vec.RC:$dst), (ins Vec.KRC:$src),
!strconcat(OpcodeStr##Vec.Suffix, "\t{$src, $dst|$dst, $src}"),
- [(set Vec.RC:$dst, (Vec.VT (sext Vec.KRC:$src)))],
- IIC_SSE_MOV_S_RR>, EVEX, Sched<[WriteMove]>;
+ [(set Vec.RC:$dst, (Vec.VT (sext Vec.KRC:$src)))]>,
+ EVEX, Sched<[WriteMove]>;
}
multiclass cvt_mask_by_elt_width<bits<8> opc, AVX512VLVectorVTInfo VTInfo,
@@ -8910,8 +8908,8 @@ defm VPMOVM2Q : cvt_mask_by_elt_width<0x
multiclass convert_vector_to_mask_common<bits<8> opc, X86VectorVTInfo _, string OpcodeStr > {
def rr : AVX512XS8I<opc, MRMSrcReg, (outs _.KRC:$dst), (ins _.RC:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set _.KRC:$dst, (X86pcmpgtm _.ImmAllZerosV, (_.VT _.RC:$src)))],
- IIC_SSE_MOV_S_RR>, EVEX, Sched<[WriteMove]>;
+ [(set _.KRC:$dst, (X86pcmpgtm _.ImmAllZerosV, (_.VT _.RC:$src)))]>,
+ EVEX, Sched<[WriteMove]>;
}
// Use 512bit version to implement 128/256 bit in case NoVLX.
@@ -9961,14 +9959,14 @@ multiclass avx512_extract_elt_w<string O
(ins _.RC:$src1, u8imm:$src2),
OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR32orGR64:$dst,
- (X86pextrw (_.VT _.RC:$src1), imm:$src2))],
- IIC_SSE_PEXTRW>, EVEX, PD, Sched<[WriteShuffle]>;
+ (X86pextrw (_.VT _.RC:$src1), imm:$src2))]>,
+ EVEX, PD, Sched<[WriteShuffle]>;
let hasSideEffects = 0 in
def rr_REV : AVX512Ii8<0x15, MRMDestReg, (outs GR32orGR64:$dst),
(ins _.RC:$src1, u8imm:$src2),
- OpcodeStr#".s\t{$src2, $src1, $dst|$dst, $src1, $src2}", [],
- IIC_SSE_PEXTRW>, EVEX, TAPD, FoldGenData<NAME#rr>,
+ OpcodeStr#".s\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
+ EVEX, TAPD, FoldGenData<NAME#rr>,
Sched<[WriteShuffle]>;
defm NAME : avx512_extract_elt_bw_m<0x15, OpcodeStr, X86pextrw, _>, TAPD;
Modified: llvm/trunk/lib/Target/X86/X86InstrSSE.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrSSE.td?rev=329940&r1=329939&r2=329940&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrSSE.td (original)
+++ llvm/trunk/lib/Target/X86/X86InstrSSE.td Thu Apr 12 12:25:07 2018
@@ -323,14 +323,14 @@ multiclass sse12_fp_packed_logical_rm<bi
!if(Is2Addr,
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
- pat_rr, IIC_SSE_BIT_P_RR, d>,
+ pat_rr, NoItinerary, d>,
Sched<[WriteVecLogic]>;
let hasSideEffects = 0, mayLoad = 1 in
def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
!if(Is2Addr,
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
- pat_rm, IIC_SSE_BIT_P_RM, d>,
+ pat_rm, NoItinerary, d>,
Sched<[WriteVecLogicLd, ReadAfterLd]>;
}
@@ -407,15 +407,14 @@ multiclass sse12_move_rr<SDNode OpNode,
(ins VR128:$src1, VR128:$src2),
!strconcat(base_opc, asm_opr),
[(set VR128:$dst, (vt (OpNode VR128:$src1, VR128:$src2)))],
- IIC_SSE_MOV_S_RR, d>, Sched<[WriteFShuffle]>;
+ NoItinerary, d>, Sched<[WriteFShuffle]>;
// For the disassembler
let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in
def rr_REV : SI<0x11, MRMDestReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2),
- !strconcat(base_opc, asm_opr),
- [], IIC_SSE_MOV_S_RR>, Sched<[WriteFShuffle]>,
- FoldGenData<Name#rr>;
+ !strconcat(base_opc, asm_opr), []>,
+ Sched<[WriteFShuffle]>, FoldGenData<Name#rr>;
}
multiclass sse12_move<RegisterClass RC, SDNode OpNode, ValueType vt,
@@ -429,7 +428,7 @@ multiclass sse12_move<RegisterClass RC,
def V#NAME#mr : SI<0x11, MRMDestMem, (outs), (ins x86memop:$dst, RC:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(store RC:$src, addr:$dst)], IIC_SSE_MOV_S_MR, d>,
+ [(store RC:$src, addr:$dst)], NoItinerary, d>,
VEX, VEX_LIG, Sched<[WriteStore]>, VEX_WIG;
// SSE1 & 2
let Constraints = "$src1 = $dst" in {
@@ -439,7 +438,7 @@ multiclass sse12_move<RegisterClass RC,
def NAME#mr : SI<0x11, MRMDestMem, (outs), (ins x86memop:$dst, RC:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(store RC:$src, addr:$dst)], IIC_SSE_MOV_S_MR, d>,
+ [(store RC:$src, addr:$dst)], NoItinerary, d>,
Sched<[WriteStore]>;
}
@@ -449,11 +448,11 @@ multiclass sse12_move_rm<RegisterClass R
def V#NAME#rm : SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set RC:$dst, (mem_pat addr:$src))],
- IIC_SSE_MOV_S_RM, d>, VEX, VEX_LIG, Sched<[WriteLoad]>, VEX_WIG;
+ NoItinerary, d>, VEX, VEX_LIG, Sched<[WriteLoad]>, VEX_WIG;
def NAME#rm : SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set RC:$dst, (mem_pat addr:$src))],
- IIC_SSE_MOV_S_RM, d>, Sched<[WriteLoad]>;
+ NoItinerary, d>, Sched<[WriteLoad]>;
}
defm MOVSS : sse12_move<FR32, X86Movss, v4f32, f32mem, "movss",
@@ -696,36 +695,36 @@ defm MOVUPD : sse12_mov_packed<0x10, VR1
let SchedRW = [WriteFStore], Predicates = [HasAVX, NoVLX] in {
def VMOVAPSmr : VPSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
"movaps\t{$src, $dst|$dst, $src}",
- [(alignedstore (v4f32 VR128:$src), addr:$dst)],
- IIC_SSE_MOVA_P_MR>, VEX, VEX_WIG;
+ [(alignedstore (v4f32 VR128:$src), addr:$dst)]>,
+ VEX, VEX_WIG;
def VMOVAPDmr : VPDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
"movapd\t{$src, $dst|$dst, $src}",
- [(alignedstore (v2f64 VR128:$src), addr:$dst)],
- IIC_SSE_MOVA_P_MR>, VEX, VEX_WIG;
+ [(alignedstore (v2f64 VR128:$src), addr:$dst)]>,
+ VEX, VEX_WIG;
def VMOVUPSmr : VPSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
"movups\t{$src, $dst|$dst, $src}",
- [(store (v4f32 VR128:$src), addr:$dst)],
- IIC_SSE_MOVU_P_MR>, VEX, VEX_WIG;
+ [(store (v4f32 VR128:$src), addr:$dst)]>,
+ VEX, VEX_WIG;
def VMOVUPDmr : VPDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
"movupd\t{$src, $dst|$dst, $src}",
- [(store (v2f64 VR128:$src), addr:$dst)],
- IIC_SSE_MOVU_P_MR>, VEX, VEX_WIG;
+ [(store (v2f64 VR128:$src), addr:$dst)]>,
+ VEX, VEX_WIG;
def VMOVAPSYmr : VPSI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
"movaps\t{$src, $dst|$dst, $src}",
- [(alignedstore (v8f32 VR256:$src), addr:$dst)],
- IIC_SSE_MOVA_P_MR>, VEX, VEX_L, VEX_WIG;
+ [(alignedstore (v8f32 VR256:$src), addr:$dst)]>,
+ VEX, VEX_L, VEX_WIG;
def VMOVAPDYmr : VPDI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
"movapd\t{$src, $dst|$dst, $src}",
- [(alignedstore (v4f64 VR256:$src), addr:$dst)],
- IIC_SSE_MOVA_P_MR>, VEX, VEX_L, VEX_WIG;
+ [(alignedstore (v4f64 VR256:$src), addr:$dst)]>,
+ VEX, VEX_L, VEX_WIG;
def VMOVUPSYmr : VPSI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
"movups\t{$src, $dst|$dst, $src}",
- [(store (v8f32 VR256:$src), addr:$dst)],
- IIC_SSE_MOVU_P_MR>, VEX, VEX_L, VEX_WIG;
+ [(store (v8f32 VR256:$src), addr:$dst)]>,
+ VEX, VEX_L, VEX_WIG;
def VMOVUPDYmr : VPDI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
"movupd\t{$src, $dst|$dst, $src}",
- [(store (v4f64 VR256:$src), addr:$dst)],
- IIC_SSE_MOVU_P_MR>, VEX, VEX_L, VEX_WIG;
+ [(store (v4f64 VR256:$src), addr:$dst)]>,
+ VEX, VEX_L, VEX_WIG;
} // SchedRW
// For disassembler
@@ -733,44 +732,36 @@ let isCodeGenOnly = 1, ForceDisassemble
SchedRW = [WriteFMove] in {
def VMOVAPSrr_REV : VPSI<0x29, MRMDestReg, (outs VR128:$dst),
(ins VR128:$src),
- "movaps\t{$src, $dst|$dst, $src}", [],
- IIC_SSE_MOVA_P_RR>, VEX, VEX_WIG,
- FoldGenData<"VMOVAPSrr">;
+ "movaps\t{$src, $dst|$dst, $src}", []>,
+ VEX, VEX_WIG, FoldGenData<"VMOVAPSrr">;
def VMOVAPDrr_REV : VPDI<0x29, MRMDestReg, (outs VR128:$dst),
(ins VR128:$src),
- "movapd\t{$src, $dst|$dst, $src}", [],
- IIC_SSE_MOVA_P_RR>, VEX, VEX_WIG,
- FoldGenData<"VMOVAPDrr">;
+ "movapd\t{$src, $dst|$dst, $src}", []>,
+ VEX, VEX_WIG, FoldGenData<"VMOVAPDrr">;
def VMOVUPSrr_REV : VPSI<0x11, MRMDestReg, (outs VR128:$dst),
(ins VR128:$src),
- "movups\t{$src, $dst|$dst, $src}", [],
- IIC_SSE_MOVU_P_RR>, VEX, VEX_WIG,
- FoldGenData<"VMOVUPSrr">;
+ "movups\t{$src, $dst|$dst, $src}", []>,
+ VEX, VEX_WIG, FoldGenData<"VMOVUPSrr">;
def VMOVUPDrr_REV : VPDI<0x11, MRMDestReg, (outs VR128:$dst),
(ins VR128:$src),
- "movupd\t{$src, $dst|$dst, $src}", [],
- IIC_SSE_MOVU_P_RR>, VEX, VEX_WIG,
- FoldGenData<"VMOVUPDrr">;
+ "movupd\t{$src, $dst|$dst, $src}", []>,
+ VEX, VEX_WIG, FoldGenData<"VMOVUPDrr">;
def VMOVAPSYrr_REV : VPSI<0x29, MRMDestReg, (outs VR256:$dst),
(ins VR256:$src),
- "movaps\t{$src, $dst|$dst, $src}", [],
- IIC_SSE_MOVA_P_RR>, VEX, VEX_L, VEX_WIG,
- FoldGenData<"VMOVAPSYrr">;
+ "movaps\t{$src, $dst|$dst, $src}", []>,
+ VEX, VEX_L, VEX_WIG, FoldGenData<"VMOVAPSYrr">;
def VMOVAPDYrr_REV : VPDI<0x29, MRMDestReg, (outs VR256:$dst),
(ins VR256:$src),
- "movapd\t{$src, $dst|$dst, $src}", [],
- IIC_SSE_MOVA_P_RR>, VEX, VEX_L, VEX_WIG,
- FoldGenData<"VMOVAPDYrr">;
+ "movapd\t{$src, $dst|$dst, $src}", []>,
+ VEX, VEX_L, VEX_WIG, FoldGenData<"VMOVAPDYrr">;
def VMOVUPSYrr_REV : VPSI<0x11, MRMDestReg, (outs VR256:$dst),
(ins VR256:$src),
- "movups\t{$src, $dst|$dst, $src}", [],
- IIC_SSE_MOVU_P_RR>, VEX, VEX_L, VEX_WIG,
- FoldGenData<"VMOVUPSYrr">;
+ "movups\t{$src, $dst|$dst, $src}", []>,
+ VEX, VEX_L, VEX_WIG, FoldGenData<"VMOVUPSYrr">;
def VMOVUPDYrr_REV : VPDI<0x11, MRMDestReg, (outs VR256:$dst),
(ins VR256:$src),
- "movupd\t{$src, $dst|$dst, $src}", [],
- IIC_SSE_MOVU_P_RR>, VEX, VEX_L, VEX_WIG,
- FoldGenData<"VMOVUPDYrr">;
+ "movupd\t{$src, $dst|$dst, $src}", []>,
+ VEX, VEX_L, VEX_WIG, FoldGenData<"VMOVUPDYrr">;
}
// Aliases to help the assembler pick two byte VEX encodings by swapping the
@@ -795,37 +786,33 @@ def : InstAlias<"vmovupd\t{$src, $dst|$d
let SchedRW = [WriteFStore] in {
def MOVAPSmr : PSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
"movaps\t{$src, $dst|$dst, $src}",
- [(alignedstore (v4f32 VR128:$src), addr:$dst)],
- IIC_SSE_MOVA_P_MR>;
+ [(alignedstore (v4f32 VR128:$src), addr:$dst)]>;
def MOVAPDmr : PDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
"movapd\t{$src, $dst|$dst, $src}",
- [(alignedstore (v2f64 VR128:$src), addr:$dst)],
- IIC_SSE_MOVA_P_MR>;
+ [(alignedstore (v2f64 VR128:$src), addr:$dst)]>;
def MOVUPSmr : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
"movups\t{$src, $dst|$dst, $src}",
- [(store (v4f32 VR128:$src), addr:$dst)],
- IIC_SSE_MOVU_P_MR>;
+ [(store (v4f32 VR128:$src), addr:$dst)]>;
def MOVUPDmr : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
"movupd\t{$src, $dst|$dst, $src}",
- [(store (v2f64 VR128:$src), addr:$dst)],
- IIC_SSE_MOVU_P_MR>;
+ [(store (v2f64 VR128:$src), addr:$dst)]>;
} // SchedRW
// For disassembler
let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
SchedRW = [WriteFMove] in {
def MOVAPSrr_REV : PSI<0x29, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
- "movaps\t{$src, $dst|$dst, $src}", [],
- IIC_SSE_MOVA_P_RR>, FoldGenData<"MOVAPSrr">;
+ "movaps\t{$src, $dst|$dst, $src}", []>,
+ FoldGenData<"MOVAPSrr">;
def MOVAPDrr_REV : PDI<0x29, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
- "movapd\t{$src, $dst|$dst, $src}", [],
- IIC_SSE_MOVA_P_RR>, FoldGenData<"MOVAPDrr">;
+ "movapd\t{$src, $dst|$dst, $src}", []>,
+ FoldGenData<"MOVAPDrr">;
def MOVUPSrr_REV : PSI<0x11, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
- "movups\t{$src, $dst|$dst, $src}", [],
- IIC_SSE_MOVU_P_RR>, FoldGenData<"MOVUPSrr">;
+ "movups\t{$src, $dst|$dst, $src}", []>,
+ FoldGenData<"MOVUPSrr">;
def MOVUPDrr_REV : PDI<0x11, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
- "movupd\t{$src, $dst|$dst, $src}", [],
- IIC_SSE_MOVU_P_RR>, FoldGenData<"MOVUPDrr">;
+ "movupd\t{$src, $dst|$dst, $src}", []>,
+ FoldGenData<"MOVUPDrr">;
}
let Predicates = [HasAVX, NoVLX] in {
@@ -886,15 +873,14 @@ let Predicates = [UseSSE1] in {
//===----------------------------------------------------------------------===//
multiclass sse12_mov_hilo_packed_base<bits<8>opc, SDNode psnode, SDNode pdnode,
- string base_opc, string asm_opr,
- InstrItinClass itin> {
+ string base_opc, string asm_opr> {
def PSrm : PI<opc, MRMSrcMem,
(outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
!strconcat(base_opc, "s", asm_opr),
[(set VR128:$dst,
(psnode VR128:$src1,
(bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))))],
- itin, SSEPackedSingle>, PS,
+ NoItinerary, SSEPackedSingle>, PS,
Sched<[WriteFShuffleLd, ReadAfterLd]>;
def PDrm : PI<opc, MRMSrcMem,
@@ -902,52 +888,48 @@ multiclass sse12_mov_hilo_packed_base<bi
!strconcat(base_opc, "d", asm_opr),
[(set VR128:$dst, (v2f64 (pdnode VR128:$src1,
(scalar_to_vector (loadf64 addr:$src2)))))],
- itin, SSEPackedDouble>, PD,
+ NoItinerary, SSEPackedDouble>, PD,
Sched<[WriteFShuffleLd, ReadAfterLd]>;
}
multiclass sse12_mov_hilo_packed<bits<8>opc, SDNode psnode, SDNode pdnode,
- string base_opc, InstrItinClass itin> {
+ string base_opc> {
let Predicates = [UseAVX] in
defm V#NAME : sse12_mov_hilo_packed_base<opc, psnode, pdnode, base_opc,
- "\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- itin>, VEX_4V, VEX_WIG;
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}">,
+ VEX_4V, VEX_WIG;
let Constraints = "$src1 = $dst" in
defm NAME : sse12_mov_hilo_packed_base<opc, psnode, pdnode, base_opc,
- "\t{$src2, $dst|$dst, $src2}",
- itin>;
+ "\t{$src2, $dst|$dst, $src2}">;
}
let AddedComplexity = 20 in {
- defm MOVL : sse12_mov_hilo_packed<0x12, X86Movlps, X86Movlpd, "movlp",
- IIC_SSE_MOV_LH>;
+ defm MOVL : sse12_mov_hilo_packed<0x12, X86Movlps, X86Movlpd, "movlp">;
}
let SchedRW = [WriteStore] in {
let Predicates = [UseAVX] in {
def VMOVLPSmr : VPSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
- "movlps\t{$src, $dst|$dst, $src}",
- [(store (f64 (extractelt (bc_v2f64 (v4f32 VR128:$src)),
- (iPTR 0))), addr:$dst)],
- IIC_SSE_MOV_LH>, VEX, VEX_WIG;
+ "movlps\t{$src, $dst|$dst, $src}",
+ [(store (f64 (extractelt (bc_v2f64 (v4f32 VR128:$src)),
+ (iPTR 0))), addr:$dst)]>,
+ VEX, VEX_WIG;
def VMOVLPDmr : VPDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
- "movlpd\t{$src, $dst|$dst, $src}",
- [(store (f64 (extractelt (v2f64 VR128:$src),
- (iPTR 0))), addr:$dst)],
- IIC_SSE_MOV_LH>, VEX, VEX_WIG;
+ "movlpd\t{$src, $dst|$dst, $src}",
+ [(store (f64 (extractelt (v2f64 VR128:$src),
+ (iPTR 0))), addr:$dst)]>,
+ VEX, VEX_WIG;
}// UseAVX
def MOVLPSmr : PSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
"movlps\t{$src, $dst|$dst, $src}",
[(store (f64 (extractelt (bc_v2f64 (v4f32 VR128:$src)),
- (iPTR 0))), addr:$dst)],
- IIC_SSE_MOV_LH>;
+ (iPTR 0))), addr:$dst)]>;
def MOVLPDmr : PDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
"movlpd\t{$src, $dst|$dst, $src}",
[(store (f64 (extractelt (v2f64 VR128:$src),
- (iPTR 0))), addr:$dst)],
- IIC_SSE_MOV_LH>;
+ (iPTR 0))), addr:$dst)]>;
} // SchedRW
let Predicates = [UseAVX] in {
@@ -1009,8 +991,7 @@ let Predicates = [UseSSE2] in {
//===----------------------------------------------------------------------===//
let AddedComplexity = 20 in {
- defm MOVH : sse12_mov_hilo_packed<0x16, X86Movlhps, X86Unpckl, "movhp",
- IIC_SSE_MOV_LH>;
+ defm MOVH : sse12_mov_hilo_packed<0x16, X86Movlhps, X86Unpckl, "movhp">;
}
let SchedRW = [WriteStore] in {
@@ -1022,24 +1003,24 @@ def VMOVHPSmr : VPSI<0x17, MRMDestMem, (
[(store (f64 (extractelt
(X86Unpckh (bc_v2f64 (v4f32 VR128:$src)),
(bc_v2f64 (v4f32 VR128:$src))),
- (iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>, VEX, VEX_WIG;
+ (iPTR 0))), addr:$dst)]>, VEX, VEX_WIG;
def VMOVHPDmr : VPDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
"movhpd\t{$src, $dst|$dst, $src}",
[(store (f64 (extractelt
(v2f64 (X86Unpckh VR128:$src, VR128:$src)),
- (iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>, VEX, VEX_WIG;
+ (iPTR 0))), addr:$dst)]>, VEX, VEX_WIG;
} // UseAVX
def MOVHPSmr : PSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
"movhps\t{$src, $dst|$dst, $src}",
[(store (f64 (extractelt
(X86Unpckh (bc_v2f64 (v4f32 VR128:$src)),
(bc_v2f64 (v4f32 VR128:$src))),
- (iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>;
+ (iPTR 0))), addr:$dst)]>;
def MOVHPDmr : PDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
"movhpd\t{$src, $dst|$dst, $src}",
[(store (f64 (extractelt
(v2f64 (X86Unpckh VR128:$src, VR128:$src)),
- (iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>;
+ (iPTR 0))), addr:$dst)]>;
} // SchedRW
let Predicates = [UseAVX] in {
@@ -1107,15 +1088,13 @@ let AddedComplexity = 20, Predicates = [
(ins VR128:$src1, VR128:$src2),
"movlhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128:$dst,
- (v4f32 (X86Movlhps VR128:$src1, VR128:$src2)))],
- IIC_SSE_MOV_LH>,
+ (v4f32 (X86Movlhps VR128:$src1, VR128:$src2)))]>,
VEX_4V, Sched<[WriteFShuffle]>, VEX_WIG;
def VMOVHLPSrr : VPSI<0x12, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2),
"movhlps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128:$dst,
- (v4f32 (X86Movhlps VR128:$src1, VR128:$src2)))],
- IIC_SSE_MOV_LH>,
+ (v4f32 (X86Movhlps VR128:$src1, VR128:$src2)))]>,
VEX_4V, Sched<[WriteFShuffle]>, VEX_WIG;
}
let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
@@ -1123,15 +1102,15 @@ let Constraints = "$src1 = $dst", AddedC
(ins VR128:$src1, VR128:$src2),
"movlhps\t{$src2, $dst|$dst, $src2}",
[(set VR128:$dst,
- (v4f32 (X86Movlhps VR128:$src1, VR128:$src2)))],
- IIC_SSE_MOV_LH>, Sched<[WriteFShuffle]>;
+ (v4f32 (X86Movlhps VR128:$src1, VR128:$src2)))]>,
+ Sched<[WriteFShuffle]>;
let isCommutable = 1 in
def MOVHLPSrr : PSI<0x12, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2),
"movhlps\t{$src2, $dst|$dst, $src2}",
[(set VR128:$dst,
- (v4f32 (X86Movhlps VR128:$src1, VR128:$src2)))],
- IIC_SSE_MOV_LH>, Sched<[WriteFShuffle]>;
+ (v4f32 (X86Movhlps VR128:$src1, VR128:$src2)))]>,
+ Sched<[WriteFShuffle]>;
}
//===----------------------------------------------------------------------===//
@@ -1558,14 +1537,14 @@ def : InstAlias<"cvtsd2si{q}\t{$src, $ds
let hasSideEffects = 0, Predicates = [UseAVX] in {
def VCVTSD2SSrr : VSDI<0x5A, MRMSrcReg, (outs FR32:$dst),
(ins FR32:$src1, FR64:$src2),
- "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}", [],
- IIC_SSE_CVT_Scalar_RR>, VEX_4V, VEX_LIG,
+ "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
+ VEX_4V, VEX_LIG,
Sched<[WriteCvtF2F]>, VEX_WIG, NotMemoryFoldable;
let mayLoad = 1 in
def VCVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst),
(ins FR32:$src1, f64mem:$src2),
- "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [], IIC_SSE_CVT_Scalar_RM>, XD, VEX_4V, VEX_LIG,
+ "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
+ XD, VEX_4V, VEX_LIG,
Sched<[WriteCvtF2FLd, ReadAfterLd]>, VEX_WIG, NotMemoryFoldable;
}
@@ -1575,12 +1554,11 @@ def : Pat<(f32 (fpround FR64:$src)),
def CVTSD2SSrr : SDI<0x5A, MRMSrcReg, (outs FR32:$dst), (ins FR64:$src),
"cvtsd2ss\t{$src, $dst|$dst, $src}",
- [(set FR32:$dst, (fpround FR64:$src))],
- IIC_SSE_CVT_Scalar_RR>, Sched<[WriteCvtF2F]>;
+ [(set FR32:$dst, (fpround FR64:$src))]>,
+ Sched<[WriteCvtF2F]>;
def CVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src),
"cvtsd2ss\t{$src, $dst|$dst, $src}",
- [(set FR32:$dst, (fpround (loadf64 addr:$src)))],
- IIC_SSE_CVT_Scalar_RM>,
+ [(set FR32:$dst, (fpround (loadf64 addr:$src)))]>,
XD,
Requires<[UseSSE2, OptForSize]>, Sched<[WriteCvtF2FLd]>;
@@ -1589,15 +1567,15 @@ def VCVTSD2SSrr_Int: I<0x5A, MRMSrcReg,
(outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
"vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128:$dst,
- (int_x86_sse2_cvtsd2ss VR128:$src1, VR128:$src2))],
- IIC_SSE_CVT_Scalar_RR>, XD, VEX_4V, VEX_WIG,
+ (int_x86_sse2_cvtsd2ss VR128:$src1, VR128:$src2))]>,
+ XD, VEX_4V, VEX_WIG,
Requires<[HasAVX]>, Sched<[WriteCvtF2F]>;
def VCVTSD2SSrm_Int: I<0x5A, MRMSrcMem,
(outs VR128:$dst), (ins VR128:$src1, sdmem:$src2),
"vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128:$dst, (int_x86_sse2_cvtsd2ss
- VR128:$src1, sse_load_f64:$src2))],
- IIC_SSE_CVT_Scalar_RM>, XD, VEX_4V, VEX_WIG,
+ VR128:$src1, sse_load_f64:$src2))]>,
+ XD, VEX_4V, VEX_WIG,
Requires<[HasAVX]>, Sched<[WriteCvtF2FLd, ReadAfterLd]>;
let Constraints = "$src1 = $dst" in {
@@ -1605,15 +1583,14 @@ def CVTSD2SSrr_Int: I<0x5A, MRMSrcReg,
(outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
"cvtsd2ss\t{$src2, $dst|$dst, $src2}",
[(set VR128:$dst,
- (int_x86_sse2_cvtsd2ss VR128:$src1, VR128:$src2))],
- IIC_SSE_CVT_Scalar_RR>, XD, Requires<[UseSSE2]>,
- Sched<[WriteCvtF2F]>;
+ (int_x86_sse2_cvtsd2ss VR128:$src1, VR128:$src2))]>,
+ XD, Requires<[UseSSE2]>, Sched<[WriteCvtF2F]>;
def CVTSD2SSrm_Int: I<0x5A, MRMSrcMem,
(outs VR128:$dst), (ins VR128:$src1, sdmem:$src2),
"cvtsd2ss\t{$src2, $dst|$dst, $src2}",
[(set VR128:$dst, (int_x86_sse2_cvtsd2ss
- VR128:$src1, sse_load_f64:$src2))],
- IIC_SSE_CVT_Scalar_RM>, XD, Requires<[UseSSE2]>,
+ VR128:$src1, sse_load_f64:$src2))]>,
+ XD, Requires<[UseSSE2]>,
Sched<[WriteCvtF2FLd, ReadAfterLd]>;
}
} // isCodeGenOnly = 1
@@ -1623,14 +1600,14 @@ def CVTSD2SSrm_Int: I<0x5A, MRMSrcMem,
let hasSideEffects = 0, Predicates = [UseAVX] in {
def VCVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst),
(ins FR64:$src1, FR32:$src2),
- "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [], IIC_SSE_CVT_Scalar_RR>, XS, VEX_4V, VEX_LIG,
+ "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
+ XS, VEX_4V, VEX_LIG,
Sched<[WriteCvtF2F]>, VEX_WIG, NotMemoryFoldable;
let mayLoad = 1 in
def VCVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst),
(ins FR64:$src1, f32mem:$src2),
- "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [], IIC_SSE_CVT_Scalar_RM>, XS, VEX_4V, VEX_LIG,
+ "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
+ XS, VEX_4V, VEX_LIG,
Sched<[WriteCvtF2FLd, ReadAfterLd]>, VEX_WIG, NotMemoryFoldable;
}
@@ -1648,14 +1625,12 @@ def : Pat<(extloadf32 addr:$src),
def CVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src),
"cvtss2sd\t{$src, $dst|$dst, $src}",
- [(set FR64:$dst, (fpextend FR32:$src))],
- IIC_SSE_CVT_Scalar_RR>, XS,
- Requires<[UseSSE2]>, Sched<[WriteCvtF2F]>;
+ [(set FR64:$dst, (fpextend FR32:$src))]>,
+ XS, Requires<[UseSSE2]>, Sched<[WriteCvtF2F]>;
def CVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins f32mem:$src),
"cvtss2sd\t{$src, $dst|$dst, $src}",
- [(set FR64:$dst, (extloadf32 addr:$src))],
- IIC_SSE_CVT_Scalar_RM>, XS,
- Requires<[UseSSE2, OptForSize]>, Sched<[WriteCvtF2FLd]>;
+ [(set FR64:$dst, (extloadf32 addr:$src))]>,
+ XS, Requires<[UseSSE2, OptForSize]>, Sched<[WriteCvtF2FLd]>;
// extload f32 -> f64. This matches load+fpextend because we have a hack in
// the isel (PreprocessForFPConvert) that can introduce loads after dag
@@ -1672,30 +1647,30 @@ def VCVTSS2SDrr_Int: I<0x5A, MRMSrcReg,
(outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
"vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128:$dst,
- (int_x86_sse2_cvtss2sd VR128:$src1, VR128:$src2))],
- IIC_SSE_CVT_Scalar_RR>, XS, VEX_4V, VEX_WIG,
+ (int_x86_sse2_cvtss2sd VR128:$src1, VR128:$src2))]>,
+ XS, VEX_4V, VEX_WIG,
Requires<[HasAVX]>, Sched<[WriteCvtF2F]>;
def VCVTSS2SDrm_Int: I<0x5A, MRMSrcMem,
(outs VR128:$dst), (ins VR128:$src1, ssmem:$src2),
"vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128:$dst,
- (int_x86_sse2_cvtss2sd VR128:$src1, sse_load_f32:$src2))],
- IIC_SSE_CVT_Scalar_RM>, XS, VEX_4V, VEX_WIG,
+ (int_x86_sse2_cvtss2sd VR128:$src1, sse_load_f32:$src2))]>,
+ XS, VEX_4V, VEX_WIG,
Requires<[HasAVX]>, Sched<[WriteCvtF2FLd, ReadAfterLd]>;
let Constraints = "$src1 = $dst" in { // SSE2 instructions with XS prefix
def CVTSS2SDrr_Int: I<0x5A, MRMSrcReg,
(outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
"cvtss2sd\t{$src2, $dst|$dst, $src2}",
[(set VR128:$dst,
- (int_x86_sse2_cvtss2sd VR128:$src1, VR128:$src2))],
- IIC_SSE_CVT_Scalar_RR>, XS, Requires<[UseSSE2]>,
+ (int_x86_sse2_cvtss2sd VR128:$src1, VR128:$src2))]>,
+ XS, Requires<[UseSSE2]>,
Sched<[WriteCvtF2F]>;
def CVTSS2SDrm_Int: I<0x5A, MRMSrcMem,
(outs VR128:$dst), (ins VR128:$src1, ssmem:$src2),
"cvtss2sd\t{$src2, $dst|$dst, $src2}",
[(set VR128:$dst,
- (int_x86_sse2_cvtss2sd VR128:$src1, sse_load_f32:$src2))],
- IIC_SSE_CVT_Scalar_RM>, XS, Requires<[UseSSE2]>,
+ (int_x86_sse2_cvtss2sd VR128:$src1, sse_load_f32:$src2))]>,
+ XS, Requires<[UseSSE2]>,
Sched<[WriteCvtF2FLd, ReadAfterLd]>;
}
} // isCodeGenOnly = 1
@@ -1777,33 +1752,33 @@ let Predicates = [HasAVX, NoVLX] in {
// Convert packed single/double fp to doubleword
def VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvtps2dq\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (v4i32 (X86cvtp2Int (v4f32 VR128:$src))))],
- IIC_SSE_CVT_PS_RR>, VEX, Sched<[WriteCvtF2I]>, VEX_WIG;
+ [(set VR128:$dst, (v4i32 (X86cvtp2Int (v4f32 VR128:$src))))]>,
+ VEX, Sched<[WriteCvtF2I]>, VEX_WIG;
def VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"cvtps2dq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
- (v4i32 (X86cvtp2Int (loadv4f32 addr:$src))))],
- IIC_SSE_CVT_PS_RM>, VEX, Sched<[WriteCvtF2ILd]>, VEX_WIG;
+ (v4i32 (X86cvtp2Int (loadv4f32 addr:$src))))]>,
+ VEX, Sched<[WriteCvtF2ILd]>, VEX_WIG;
def VCVTPS2DQYrr : VPDI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
"cvtps2dq\t{$src, $dst|$dst, $src}",
[(set VR256:$dst,
- (v8i32 (X86cvtp2Int (v8f32 VR256:$src))))],
- IIC_SSE_CVT_PS_RR>, VEX, VEX_L, Sched<[WriteCvtF2I]>, VEX_WIG;
+ (v8i32 (X86cvtp2Int (v8f32 VR256:$src))))]>,
+ VEX, VEX_L, Sched<[WriteCvtF2I]>, VEX_WIG;
def VCVTPS2DQYrm : VPDI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
"cvtps2dq\t{$src, $dst|$dst, $src}",
[(set VR256:$dst,
- (v8i32 (X86cvtp2Int (loadv8f32 addr:$src))))],
- IIC_SSE_CVT_PS_RM>, VEX, VEX_L, Sched<[WriteCvtF2ILd]>, VEX_WIG;
+ (v8i32 (X86cvtp2Int (loadv8f32 addr:$src))))]>,
+ VEX, VEX_L, Sched<[WriteCvtF2ILd]>, VEX_WIG;
}
def CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvtps2dq\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (v4i32 (X86cvtp2Int (v4f32 VR128:$src))))],
- IIC_SSE_CVT_PS_RR>, Sched<[WriteCvtF2I]>;
+ [(set VR128:$dst, (v4i32 (X86cvtp2Int (v4f32 VR128:$src))))]>,
+ Sched<[WriteCvtF2I]>;
def CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"cvtps2dq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
- (v4i32 (X86cvtp2Int (memopv4f32 addr:$src))))],
- IIC_SSE_CVT_PS_RM>, Sched<[WriteCvtF2ILd]>;
+ (v4i32 (X86cvtp2Int (memopv4f32 addr:$src))))]>,
+ Sched<[WriteCvtF2ILd]>;
// Convert Packed Double FP to Packed DW Integers
@@ -1848,13 +1823,13 @@ def : InstAlias<"vcvtpd2dqy\t{$src, $dst
def CVTPD2DQrm : SDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"cvtpd2dq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
- (v4i32 (X86cvtp2Int (memopv2f64 addr:$src))))],
- IIC_SSE_CVT_PD_RM>, Sched<[WriteCvtF2ILd]>;
+ (v4i32 (X86cvtp2Int (memopv2f64 addr:$src))))]>,
+ Sched<[WriteCvtF2ILd]>;
def CVTPD2DQrr : SDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvtpd2dq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
- (v4i32 (X86cvtp2Int (v2f64 VR128:$src))))],
- IIC_SSE_CVT_PD_RR>, Sched<[WriteCvtF2I]>;
+ (v4i32 (X86cvtp2Int (v2f64 VR128:$src))))]>,
+ Sched<[WriteCvtF2I]>;
// Convert with truncation packed single/double fp to doubleword
// SSE2 packed instructions with XS prefix
@@ -1862,43 +1837,43 @@ let Predicates = [HasAVX, NoVLX] in {
def VCVTTPS2DQrr : VS2SI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvttps2dq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
- (v4i32 (fp_to_sint (v4f32 VR128:$src))))],
- IIC_SSE_CVT_PS_RR>, VEX, Sched<[WriteCvtF2I]>, VEX_WIG;
+ (v4i32 (fp_to_sint (v4f32 VR128:$src))))]>,
+ VEX, Sched<[WriteCvtF2I]>, VEX_WIG;
def VCVTTPS2DQrm : VS2SI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"cvttps2dq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
- (v4i32 (fp_to_sint (loadv4f32 addr:$src))))],
- IIC_SSE_CVT_PS_RM>, VEX, Sched<[WriteCvtF2ILd]>, VEX_WIG;
+ (v4i32 (fp_to_sint (loadv4f32 addr:$src))))]>,
+ VEX, Sched<[WriteCvtF2ILd]>, VEX_WIG;
def VCVTTPS2DQYrr : VS2SI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
"cvttps2dq\t{$src, $dst|$dst, $src}",
[(set VR256:$dst,
- (v8i32 (fp_to_sint (v8f32 VR256:$src))))],
- IIC_SSE_CVT_PS_RR>, VEX, VEX_L, Sched<[WriteCvtF2I]>, VEX_WIG;
+ (v8i32 (fp_to_sint (v8f32 VR256:$src))))]>,
+ VEX, VEX_L, Sched<[WriteCvtF2I]>, VEX_WIG;
def VCVTTPS2DQYrm : VS2SI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
"cvttps2dq\t{$src, $dst|$dst, $src}",
[(set VR256:$dst,
- (v8i32 (fp_to_sint (loadv8f32 addr:$src))))],
- IIC_SSE_CVT_PS_RM>, VEX, VEX_L,
+ (v8i32 (fp_to_sint (loadv8f32 addr:$src))))]>,
+ VEX, VEX_L,
Sched<[WriteCvtF2ILd]>, VEX_WIG;
}
def CVTTPS2DQrr : S2SI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvttps2dq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
- (v4i32 (fp_to_sint (v4f32 VR128:$src))))],
- IIC_SSE_CVT_PS_RR>, Sched<[WriteCvtF2I]>;
+ (v4i32 (fp_to_sint (v4f32 VR128:$src))))]>,
+ Sched<[WriteCvtF2I]>;
def CVTTPS2DQrm : S2SI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"cvttps2dq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
- (v4i32 (fp_to_sint (memopv4f32 addr:$src))))],
- IIC_SSE_CVT_PS_RM>, Sched<[WriteCvtF2ILd]>;
+ (v4i32 (fp_to_sint (memopv4f32 addr:$src))))]>,
+ Sched<[WriteCvtF2ILd]>;
let Predicates = [HasAVX, NoVLX] in
def VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvttpd2dq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
- (v4i32 (X86cvttp2si (v2f64 VR128:$src))))],
- IIC_SSE_CVT_PD_RR>, VEX, Sched<[WriteCvtF2I]>, VEX_WIG;
+ (v4i32 (X86cvttp2si (v2f64 VR128:$src))))]>,
+ VEX, Sched<[WriteCvtF2I]>, VEX_WIG;
// The assembler can recognize rr 256-bit instructions by seeing a ymm
// register, but the same isn't true when using memory operands instead.
@@ -1911,8 +1886,8 @@ let Predicates = [HasAVX, NoVLX] in
def VCVTTPD2DQrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"cvttpd2dq{x}\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
- (v4i32 (X86cvttp2si (loadv2f64 addr:$src))))],
- IIC_SSE_CVT_PD_RM>, VEX, Sched<[WriteCvtF2ILd]>, VEX_WIG;
+ (v4i32 (X86cvttp2si (loadv2f64 addr:$src))))]>,
+ VEX, Sched<[WriteCvtF2ILd]>, VEX_WIG;
def : InstAlias<"vcvttpd2dqx\t{$src, $dst|$dst, $src}",
(VCVTTPD2DQrm VR128:$dst, f128mem:$src), 0>;
@@ -1921,13 +1896,13 @@ let Predicates = [HasAVX, NoVLX] in {
def VCVTTPD2DQYrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
"cvttpd2dq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
- (v4i32 (fp_to_sint (v4f64 VR256:$src))))],
- IIC_SSE_CVT_PD_RR>, VEX, VEX_L, Sched<[WriteCvtF2I]>, VEX_WIG;
+ (v4i32 (fp_to_sint (v4f64 VR256:$src))))]>,
+ VEX, VEX_L, Sched<[WriteCvtF2I]>, VEX_WIG;
def VCVTTPD2DQYrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
"cvttpd2dq{y}\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
- (v4i32 (fp_to_sint (loadv4f64 addr:$src))))],
- IIC_SSE_CVT_PD_RM>, VEX, VEX_L, Sched<[WriteCvtF2ILd]>, VEX_WIG;
+ (v4i32 (fp_to_sint (loadv4f64 addr:$src))))]>,
+ VEX, VEX_L, Sched<[WriteCvtF2ILd]>, VEX_WIG;
}
def : InstAlias<"vcvttpd2dqy\t{$src, $dst|$dst, $src}",
(VCVTTPD2DQYrr VR128:$dst, VR256:$src), 0>;
@@ -1954,13 +1929,13 @@ let Predicates = [HasAVX, NoVLX] in {
def CVTTPD2DQrr : PDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvttpd2dq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
- (v4i32 (X86cvttp2si (v2f64 VR128:$src))))],
- IIC_SSE_CVT_PD_RR>, Sched<[WriteCvtF2I]>;
+ (v4i32 (X86cvttp2si (v2f64 VR128:$src))))]>,
+ Sched<[WriteCvtF2I]>;
def CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (outs VR128:$dst),(ins f128mem:$src),
"cvttpd2dq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
- (v4i32 (X86cvttp2si (memopv2f64 addr:$src))))],
- IIC_SSE_CVT_PD_RM>, Sched<[WriteCvtF2ILd]>;
+ (v4i32 (X86cvttp2si (memopv2f64 addr:$src))))]>,
+ Sched<[WriteCvtF2ILd]>;
let Predicates = [UseSSE2] in {
let AddedComplexity = 15 in {
@@ -1984,31 +1959,31 @@ let Predicates = [HasAVX, NoVLX] in {
// SSE2 instructions without OpSize prefix
def VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"vcvtps2pd\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (v2f64 (X86vfpext (v4f32 VR128:$src))))],
- IIC_SSE_CVT_PD_RR>, PS, VEX, Sched<[WriteCvtF2F]>, VEX_WIG;
+ [(set VR128:$dst, (v2f64 (X86vfpext (v4f32 VR128:$src))))]>,
+ PS, VEX, Sched<[WriteCvtF2F]>, VEX_WIG;
def VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
"vcvtps2pd\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (v2f64 (extloadv2f32 addr:$src)))],
- IIC_SSE_CVT_PD_RM>, PS, VEX, Sched<[WriteCvtF2FLd]>, VEX_WIG;
+ [(set VR128:$dst, (v2f64 (extloadv2f32 addr:$src)))]>,
+ PS, VEX, Sched<[WriteCvtF2FLd]>, VEX_WIG;
def VCVTPS2PDYrr : I<0x5A, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
"vcvtps2pd\t{$src, $dst|$dst, $src}",
- [(set VR256:$dst, (v4f64 (fpextend (v4f32 VR128:$src))))],
- IIC_SSE_CVT_PD_RR>, PS, VEX, VEX_L, Sched<[WriteCvtF2F]>, VEX_WIG;
+ [(set VR256:$dst, (v4f64 (fpextend (v4f32 VR128:$src))))]>,
+ PS, VEX, VEX_L, Sched<[WriteCvtF2F]>, VEX_WIG;
def VCVTPS2PDYrm : I<0x5A, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
"vcvtps2pd\t{$src, $dst|$dst, $src}",
- [(set VR256:$dst, (v4f64 (extloadv4f32 addr:$src)))],
- IIC_SSE_CVT_PD_RM>, PS, VEX, VEX_L, Sched<[WriteCvtF2FLd]>, VEX_WIG;
+ [(set VR256:$dst, (v4f64 (extloadv4f32 addr:$src)))]>,
+ PS, VEX, VEX_L, Sched<[WriteCvtF2FLd]>, VEX_WIG;
}
let Predicates = [UseSSE2] in {
def CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvtps2pd\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (v2f64 (X86vfpext (v4f32 VR128:$src))))],
- IIC_SSE_CVT_PD_RR>, PS, Sched<[WriteCvtF2F]>;
+ [(set VR128:$dst, (v2f64 (X86vfpext (v4f32 VR128:$src))))]>,
+ PS, Sched<[WriteCvtF2F]>;
def CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
"cvtps2pd\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (v2f64 (extloadv2f32 addr:$src)))],
- IIC_SSE_CVT_PD_RM>, PS, Sched<[WriteCvtF2FLd]>;
+ [(set VR128:$dst, (v2f64 (extloadv2f32 addr:$src)))]>,
+ PS, Sched<[WriteCvtF2FLd]>;
}
// Convert Packed DW Integers to Packed Double FP
@@ -2017,38 +1992,37 @@ let hasSideEffects = 0, mayLoad = 1 in
def VCVTDQ2PDrm : S2SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
"vcvtdq2pd\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
- (v2f64 (X86VSintToFP (bc_v4i32 (loadv2i64 addr:$src)))))],
- IIC_SSE_CVT_PD_RM>, VEX, Sched<[WriteCvtI2FLd]>, VEX_WIG;
+ (v2f64 (X86VSintToFP (bc_v4i32 (loadv2i64 addr:$src)))))]>,
+ VEX, Sched<[WriteCvtI2FLd]>, VEX_WIG;
def VCVTDQ2PDrr : S2SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"vcvtdq2pd\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
- (v2f64 (X86VSintToFP (v4i32 VR128:$src))))],
- IIC_SSE_CVT_PD_RR>, VEX, Sched<[WriteCvtI2F]>, VEX_WIG;
+ (v2f64 (X86VSintToFP (v4i32 VR128:$src))))]>,
+ VEX, Sched<[WriteCvtI2F]>, VEX_WIG;
def VCVTDQ2PDYrm : S2SI<0xE6, MRMSrcMem, (outs VR256:$dst), (ins i128mem:$src),
"vcvtdq2pd\t{$src, $dst|$dst, $src}",
[(set VR256:$dst,
- (v4f64 (sint_to_fp (bc_v4i32 (loadv2i64 addr:$src)))))],
- IIC_SSE_CVT_PD_RM>, VEX, VEX_L, Sched<[WriteCvtI2FLd]>,
+ (v4f64 (sint_to_fp (bc_v4i32 (loadv2i64 addr:$src)))))]>,
+ VEX, VEX_L, Sched<[WriteCvtI2FLd]>,
VEX_WIG;
def VCVTDQ2PDYrr : S2SI<0xE6, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
"vcvtdq2pd\t{$src, $dst|$dst, $src}",
[(set VR256:$dst,
- (v4f64 (sint_to_fp (v4i32 VR128:$src))))],
- IIC_SSE_CVT_PD_RR>, VEX, VEX_L, Sched<[WriteCvtI2F]>,
- VEX_WIG;
+ (v4f64 (sint_to_fp (v4i32 VR128:$src))))]>,
+ VEX, VEX_L, Sched<[WriteCvtI2F]>, VEX_WIG;
}
let hasSideEffects = 0, mayLoad = 1 in
def CVTDQ2PDrm : S2SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
"cvtdq2pd\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
- (v2f64 (X86VSintToFP (bc_v4i32 (loadv2i64 addr:$src)))))],
- IIC_SSE_CVT_PD_RM>, Sched<[WriteCvtI2FLd]>;
+ (v2f64 (X86VSintToFP (bc_v4i32 (loadv2i64 addr:$src)))))]>,
+ Sched<[WriteCvtI2FLd]>;
def CVTDQ2PDrr : S2SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvtdq2pd\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
- (v2f64 (X86VSintToFP (v4i32 VR128:$src))))],
- IIC_SSE_CVT_PD_RR>, Sched<[WriteCvtI2F]>;
+ (v2f64 (X86VSintToFP (v4i32 VR128:$src))))]>,
+ Sched<[WriteCvtI2F]>;
// AVX register conversion intrinsics
let Predicates = [HasAVX, NoVLX] in {
@@ -2073,8 +2047,8 @@ let Predicates = [UseSSE2] in {
let Predicates = [HasAVX, NoVLX] in
def VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvtpd2ps\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (X86vfpround (v2f64 VR128:$src)))],
- IIC_SSE_CVT_PD_RR>, VEX, Sched<[WriteCvtF2F]>, VEX_WIG;
+ [(set VR128:$dst, (X86vfpround (v2f64 VR128:$src)))]>,
+ VEX, Sched<[WriteCvtF2F]>, VEX_WIG;
// XMM only
def : InstAlias<"vcvtpd2psx\t{$src, $dst|$dst, $src}",
@@ -2082,8 +2056,8 @@ def : InstAlias<"vcvtpd2psx\t{$src, $dst
let Predicates = [HasAVX, NoVLX] in
def VCVTPD2PSrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"cvtpd2ps{x}\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (X86vfpround (loadv2f64 addr:$src)))],
- IIC_SSE_CVT_PD_RM>, VEX, Sched<[WriteCvtF2FLd]>, VEX_WIG;
+ [(set VR128:$dst, (X86vfpround (loadv2f64 addr:$src)))]>,
+ VEX, Sched<[WriteCvtF2FLd]>, VEX_WIG;
def : InstAlias<"vcvtpd2psx\t{$src, $dst|$dst, $src}",
(VCVTPD2PSrm VR128:$dst, f128mem:$src), 0>;
@@ -2091,12 +2065,12 @@ def : InstAlias<"vcvtpd2psx\t{$src, $dst
let Predicates = [HasAVX, NoVLX] in {
def VCVTPD2PSYrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
"cvtpd2ps\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (fpround VR256:$src))],
- IIC_SSE_CVT_PD_RR>, VEX, VEX_L, Sched<[WriteCvtF2F]>, VEX_WIG;
+ [(set VR128:$dst, (fpround VR256:$src))]>,
+ VEX, VEX_L, Sched<[WriteCvtF2F]>, VEX_WIG;
def VCVTPD2PSYrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
"cvtpd2ps{y}\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (fpround (loadv4f64 addr:$src)))],
- IIC_SSE_CVT_PD_RM>, VEX, VEX_L, Sched<[WriteCvtF2FLd]>, VEX_WIG;
+ [(set VR128:$dst, (fpround (loadv4f64 addr:$src)))]>,
+ VEX, VEX_L, Sched<[WriteCvtF2FLd]>, VEX_WIG;
}
def : InstAlias<"vcvtpd2psy\t{$src, $dst|$dst, $src}",
(VCVTPD2PSYrr VR128:$dst, VR256:$src), 0>;
@@ -2105,12 +2079,12 @@ def : InstAlias<"vcvtpd2psy\t{$src, $dst
def CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvtpd2ps\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (X86vfpround (v2f64 VR128:$src)))],
- IIC_SSE_CVT_PD_RR>, Sched<[WriteCvtF2F]>;
+ [(set VR128:$dst, (X86vfpround (v2f64 VR128:$src)))]>,
+ Sched<[WriteCvtF2F]>;
def CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"cvtpd2ps\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (X86vfpround (memopv2f64 addr:$src)))],
- IIC_SSE_CVT_PD_RM>, Sched<[WriteCvtF2FLd]>;
+ [(set VR128:$dst, (X86vfpround (memopv2f64 addr:$src)))]>,
+ Sched<[WriteCvtF2FLd]>;
// AVX 256-bit register conversion intrinsics
// FIXME: Migrate SSE conversion intrinsics matching to use patterns as below
@@ -2169,12 +2143,11 @@ multiclass sse12_cmp_scalar<RegisterClas
// Accept explicit immediate argument form instead of comparison code.
let isAsmParserOnly = 1, hasSideEffects = 0 in {
def rr_alt : SIi8<0xC2, MRMSrcReg, (outs RC:$dst),
- (ins RC:$src1, RC:$src2, u8imm:$cc), asm_alt, [],
- IIC_SSE_ALU_F32S_RR>, Sched<[itins.Sched]>;
+ (ins RC:$src1, RC:$src2, u8imm:$cc), asm_alt, []>,
+ Sched<[itins.Sched]>;
let mayLoad = 1 in
def rm_alt : SIi8<0xC2, MRMSrcMem, (outs RC:$dst),
- (ins RC:$src1, x86memop:$src2, u8imm:$cc), asm_alt, [],
- IIC_SSE_ALU_F32S_RM>,
+ (ins RC:$src1, x86memop:$src2, u8imm:$cc), asm_alt, []>,
Sched<[itins.Sched.Folded, ReadAfterLd]>;
}
}
@@ -2593,7 +2566,7 @@ multiclass sse12_extr_sign_mask<Register
string asm, Domain d> {
def rr : PI<0x50, MRMSrcReg, (outs GR32orGR64:$dst), (ins RC:$src),
!strconcat(asm, "\t{$src, $dst|$dst, $src}"),
- [(set GR32orGR64:$dst, (X86movmsk (vt RC:$src)))], IIC_SSE_MOVMSK, d>,
+ [(set GR32orGR64:$dst, (X86movmsk (vt RC:$src)))], NoItinerary, d>,
Sched<[WriteFMOVMSK]>;
}
@@ -3541,98 +3514,89 @@ let ExeDomain = SSEPackedInt in { // SSE
let hasSideEffects = 0, SchedRW = [WriteVecMove] in {
def VMOVDQArr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RR>,
- VEX, VEX_WIG;
+ "movdqa\t{$src, $dst|$dst, $src}", []>, VEX, VEX_WIG;
def VMOVDQAYrr : VPDI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
- "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RR>,
- VEX, VEX_L, VEX_WIG;
+ "movdqa\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L, VEX_WIG;
def VMOVDQUrr : VSSI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "movdqu\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVU_P_RR>,
- VEX, VEX_WIG;
+ "movdqu\t{$src, $dst|$dst, $src}", []>, VEX, VEX_WIG;
def VMOVDQUYrr : VSSI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
- "movdqu\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVU_P_RR>,
- VEX, VEX_L, VEX_WIG;
+ "movdqu\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L, VEX_WIG;
}
// For Disassembler
let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
SchedRW = [WriteVecMove] in {
def VMOVDQArr_REV : VPDI<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
- "movdqa\t{$src, $dst|$dst, $src}", [],
- IIC_SSE_MOVA_P_RR>,
+ "movdqa\t{$src, $dst|$dst, $src}", []>,
VEX, VEX_WIG, FoldGenData<"VMOVDQArr">;
def VMOVDQAYrr_REV : VPDI<0x7F, MRMDestReg, (outs VR256:$dst), (ins VR256:$src),
- "movdqa\t{$src, $dst|$dst, $src}", [],
- IIC_SSE_MOVA_P_RR>, VEX, VEX_L, VEX_WIG,
- FoldGenData<"VMOVDQAYrr">;
+ "movdqa\t{$src, $dst|$dst, $src}", []>,
+ VEX, VEX_L, VEX_WIG, FoldGenData<"VMOVDQAYrr">;
def VMOVDQUrr_REV : VSSI<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
- "movdqu\t{$src, $dst|$dst, $src}", [],
- IIC_SSE_MOVU_P_RR>,
+ "movdqu\t{$src, $dst|$dst, $src}", []>,
VEX, VEX_WIG, FoldGenData<"VMOVDQUrr">;
def VMOVDQUYrr_REV : VSSI<0x7F, MRMDestReg, (outs VR256:$dst), (ins VR256:$src),
- "movdqu\t{$src, $dst|$dst, $src}", [],
- IIC_SSE_MOVU_P_RR>, VEX, VEX_L, VEX_WIG,
- FoldGenData<"VMOVDQUYrr">;
+ "movdqu\t{$src, $dst|$dst, $src}", []>,
+ VEX, VEX_L, VEX_WIG, FoldGenData<"VMOVDQUYrr">;
}
let canFoldAsLoad = 1, mayLoad = 1, isReMaterializable = 1,
hasSideEffects = 0, SchedRW = [WriteVecLoad], Predicates = [HasAVX,NoVLX] in {
def VMOVDQArm : VPDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
- "movdqa\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (alignedloadv2i64 addr:$src))],
- IIC_SSE_MOVA_P_RM>, VEX, VEX_WIG;
+ "movdqa\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (alignedloadv2i64 addr:$src))]>,
+ VEX, VEX_WIG;
def VMOVDQAYrm : VPDI<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
- "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RM>,
- VEX, VEX_L, VEX_WIG;
+ "movdqa\t{$src, $dst|$dst, $src}", []>,
+ VEX, VEX_L, VEX_WIG;
def VMOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
- "vmovdqu\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (loadv2i64 addr:$src))],
- IIC_SSE_MOVU_P_RM>, XS, VEX, VEX_WIG;
+ "vmovdqu\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (loadv2i64 addr:$src))]>,
+ XS, VEX, VEX_WIG;
def VMOVDQUYrm : I<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
- "vmovdqu\t{$src, $dst|$dst, $src}",[], IIC_SSE_MOVU_P_RM>,
- XS, VEX, VEX_L, VEX_WIG;
+ "vmovdqu\t{$src, $dst|$dst, $src}", []>,
+ XS, VEX, VEX_L, VEX_WIG;
}
let mayStore = 1, hasSideEffects = 0, SchedRW = [WriteVecStore],
Predicates = [HasAVX,NoVLX] in {
def VMOVDQAmr : VPDI<0x7F, MRMDestMem, (outs),
- (ins i128mem:$dst, VR128:$src),
- "movdqa\t{$src, $dst|$dst, $src}",
- [(alignedstore (v2i64 VR128:$src), addr:$dst)],
- IIC_SSE_MOVA_P_MR>, VEX, VEX_WIG;
+ (ins i128mem:$dst, VR128:$src),
+ "movdqa\t{$src, $dst|$dst, $src}",
+ [(alignedstore (v2i64 VR128:$src), addr:$dst)]>,
+ VEX, VEX_WIG;
def VMOVDQAYmr : VPDI<0x7F, MRMDestMem, (outs),
- (ins i256mem:$dst, VR256:$src),
- "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_MR>,
- VEX, VEX_L, VEX_WIG;
+ (ins i256mem:$dst, VR256:$src),
+ "movdqa\t{$src, $dst|$dst, $src}", []>,
+ VEX, VEX_L, VEX_WIG;
def VMOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
- "vmovdqu\t{$src, $dst|$dst, $src}",
- [(store (v2i64 VR128:$src), addr:$dst)], IIC_SSE_MOVU_P_MR>,
- XS, VEX, VEX_WIG;
+ "vmovdqu\t{$src, $dst|$dst, $src}",
+ [(store (v2i64 VR128:$src), addr:$dst)]>,
+ XS, VEX, VEX_WIG;
def VMOVDQUYmr : I<0x7F, MRMDestMem, (outs), (ins i256mem:$dst, VR256:$src),
- "vmovdqu\t{$src, $dst|$dst, $src}",[], IIC_SSE_MOVU_P_MR>,
- XS, VEX, VEX_L, VEX_WIG;
+ "vmovdqu\t{$src, $dst|$dst, $src}",[]>,
+ XS, VEX, VEX_L, VEX_WIG;
}
let SchedRW = [WriteVecMove] in {
let hasSideEffects = 0 in {
def MOVDQArr : PDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RR>;
+ "movdqa\t{$src, $dst|$dst, $src}", []>;
def MOVDQUrr : I<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "movdqu\t{$src, $dst|$dst, $src}",
- [], IIC_SSE_MOVU_P_RR>, XS, Requires<[UseSSE2]>;
+ "movdqu\t{$src, $dst|$dst, $src}", []>,
+ XS, Requires<[UseSSE2]>;
}
// For Disassembler
let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in {
def MOVDQArr_REV : PDI<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
- "movdqa\t{$src, $dst|$dst, $src}", [],
- IIC_SSE_MOVA_P_RR>, FoldGenData<"MOVDQArr">;
+ "movdqa\t{$src, $dst|$dst, $src}", []>,
+ FoldGenData<"MOVDQArr">;
def MOVDQUrr_REV : I<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
- "movdqu\t{$src, $dst|$dst, $src}",
- [], IIC_SSE_MOVU_P_RR>, XS, Requires<[UseSSE2]>,
- FoldGenData<"MOVDQUrr">;
+ "movdqu\t{$src, $dst|$dst, $src}", []>,
+ XS, Requires<[UseSSE2]>, FoldGenData<"MOVDQUrr">;
}
} // SchedRW
@@ -3640,24 +3604,20 @@ let canFoldAsLoad = 1, mayLoad = 1, isRe
hasSideEffects = 0, SchedRW = [WriteVecLoad] in {
def MOVDQArm : PDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
"movdqa\t{$src, $dst|$dst, $src}",
- [/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/],
- IIC_SSE_MOVA_P_RM>;
+ [/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/]>;
def MOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
"movdqu\t{$src, $dst|$dst, $src}",
- [/*(set VR128:$dst, (loadv2i64 addr:$src))*/],
- IIC_SSE_MOVU_P_RM>,
+ [/*(set VR128:$dst, (loadv2i64 addr:$src))*/]>,
XS, Requires<[UseSSE2]>;
}
let mayStore = 1, hasSideEffects = 0, SchedRW = [WriteVecStore] in {
def MOVDQAmr : PDI<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
"movdqa\t{$src, $dst|$dst, $src}",
- [/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/],
- IIC_SSE_MOVA_P_MR>;
+ [/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/]>;
def MOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
"movdqu\t{$src, $dst|$dst, $src}",
- [/*(store (v2i64 VR128:$src), addr:$dst)*/],
- IIC_SSE_MOVU_P_MR>,
+ [/*(store (v2i64 VR128:$src), addr:$dst)*/]>,
XS, Requires<[UseSSE2]>;
}
@@ -3861,8 +3821,8 @@ multiclass PDI_binop_ri<bits<8> opc, For
!if(Is2Addr,
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
- [(set RC:$dst, (VT (OpNode RC:$src1, (i8 imm:$src2))))],
- IIC_SSE_INTSHDQ_P_RI>, Sched<[WriteVecShift]>;
+ [(set RC:$dst, (VT (OpNode RC:$src1, (i8 imm:$src2))))]>,
+ Sched<[WriteVecShift]>;
}
multiclass PDI_binop_ri_all<bits<8> opc, Format ImmForm, string OpcodeStr,
@@ -4212,8 +4172,8 @@ multiclass sse2_pinsrw<bit Is2Addr = 1>
"pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
"vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
[(set VR128:$dst,
- (X86pinsrw VR128:$src1, GR32orGR64:$src2, imm:$src3))],
- IIC_SSE_PINSRW>, Sched<[WriteShuffle]>;
+ (X86pinsrw VR128:$src1, GR32orGR64:$src2, imm:$src3))]>,
+ Sched<[WriteShuffle]>;
def rm : Ii8<0xC4, MRMSrcMem,
(outs VR128:$dst), (ins VR128:$src1,
i16mem:$src2, u8imm:$src3),
@@ -4222,7 +4182,7 @@ multiclass sse2_pinsrw<bit Is2Addr = 1>
"vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
[(set VR128:$dst,
(X86pinsrw VR128:$src1, (extloadi16 addr:$src2),
- imm:$src3))], IIC_SSE_PINSRW>,
+ imm:$src3))]>,
Sched<[WriteShuffleLd, ReadAfterLd]>;
}
@@ -4232,13 +4192,13 @@ def VPEXTRWrr : Ii8<0xC5, MRMSrcReg,
(outs GR32orGR64:$dst), (ins VR128:$src1, u8imm:$src2),
"vpextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR32orGR64:$dst, (X86pextrw (v8i16 VR128:$src1),
- imm:$src2))], IIC_SSE_PEXTRW>,
+ imm:$src2))]>,
PD, VEX, Sched<[WriteShuffle]>;
def PEXTRWrr : PDIi8<0xC5, MRMSrcReg,
(outs GR32orGR64:$dst), (ins VR128:$src1, u8imm:$src2),
"pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR32orGR64:$dst, (X86pextrw (v8i16 VR128:$src1),
- imm:$src2))], IIC_SSE_PEXTRW>,
+ imm:$src2))]>,
Sched<[WriteShuffle]>;
// Insert
@@ -4259,21 +4219,20 @@ let ExeDomain = SSEPackedInt, SchedRW =
def VPMOVMSKBrr : VPDI<0xD7, MRMSrcReg, (outs GR32orGR64:$dst),
(ins VR128:$src),
"pmovmskb\t{$src, $dst|$dst, $src}",
- [(set GR32orGR64:$dst, (X86movmsk (v16i8 VR128:$src)))],
- IIC_SSE_MOVMSK>, VEX, VEX_WIG;
+ [(set GR32orGR64:$dst, (X86movmsk (v16i8 VR128:$src)))]>,
+ VEX, VEX_WIG;
let Predicates = [HasAVX2] in {
def VPMOVMSKBYrr : VPDI<0xD7, MRMSrcReg, (outs GR32orGR64:$dst),
(ins VR256:$src),
"pmovmskb\t{$src, $dst|$dst, $src}",
- [(set GR32orGR64:$dst, (X86movmsk (v32i8 VR256:$src)))],
- IIC_SSE_MOVMSK>, VEX, VEX_L, VEX_WIG;
+ [(set GR32orGR64:$dst, (X86movmsk (v32i8 VR256:$src)))]>,
+ VEX, VEX_L, VEX_WIG;
}
def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32orGR64:$dst), (ins VR128:$src),
"pmovmskb\t{$src, $dst|$dst, $src}",
- [(set GR32orGR64:$dst, (X86movmsk (v16i8 VR128:$src)))],
- IIC_SSE_MOVMSK>;
+ [(set GR32orGR64:$dst, (X86movmsk (v16i8 VR128:$src)))]>;
} // ExeDomain = SSEPackedInt
@@ -4287,25 +4246,23 @@ let Uses = [EDI], Predicates = [HasAVX,N
def VMASKMOVDQU : VPDI<0xF7, MRMSrcReg, (outs),
(ins VR128:$src, VR128:$mask),
"maskmovdqu\t{$mask, $src|$src, $mask}",
- [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)],
- IIC_SSE_MASKMOV>, VEX, VEX_WIG;
+ [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>,
+ VEX, VEX_WIG;
let Uses = [RDI], Predicates = [HasAVX,In64BitMode] in
def VMASKMOVDQU64 : VPDI<0xF7, MRMSrcReg, (outs),
(ins VR128:$src, VR128:$mask),
"maskmovdqu\t{$mask, $src|$src, $mask}",
- [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)],
- IIC_SSE_MASKMOV>, VEX, VEX_WIG;
+ [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>,
+ VEX, VEX_WIG;
let Uses = [EDI], Predicates = [UseSSE2,Not64BitMode] in
def MASKMOVDQU : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
"maskmovdqu\t{$mask, $src|$src, $mask}",
- [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)],
- IIC_SSE_MASKMOV>;
+ [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>;
let Uses = [RDI], Predicates = [UseSSE2,In64BitMode] in
def MASKMOVDQU64 : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
"maskmovdqu\t{$mask, $src|$src, $mask}",
- [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)],
- IIC_SSE_MASKMOV>;
+ [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>;
} // ExeDomain = SSEPackedInt
@@ -4318,55 +4275,54 @@ def MASKMOVDQU64 : PDI<0xF7, MRMSrcReg,
//
let ExeDomain = SSEPackedInt in {
def VMOVDI2PDIrr : VS2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
- "movd\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst,
- (v4i32 (scalar_to_vector GR32:$src)))], IIC_SSE_MOVDQ>,
- VEX, Sched<[WriteMove]>;
+ "movd\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst,
+ (v4i32 (scalar_to_vector GR32:$src)))]>,
+ VEX, Sched<[WriteMove]>;
def VMOVDI2PDIrm : VS2I<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
- "movd\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst,
- (v4i32 (scalar_to_vector (loadi32 addr:$src))))],
- IIC_SSE_MOVDQ>,
- VEX, Sched<[WriteLoad]>;
-def VMOV64toPQIrr : VRS2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
- "movq\t{$src, $dst|$dst, $src}",
+ "movd\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
- (v2i64 (scalar_to_vector GR64:$src)))],
- IIC_SSE_MOVDQ>, VEX, Sched<[WriteMove]>;
+ (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>,
+ VEX, Sched<[WriteLoad]>;
+def VMOV64toPQIrr : VRS2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
+ "movq\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst,
+ (v2i64 (scalar_to_vector GR64:$src)))]>,
+ VEX, Sched<[WriteMove]>;
let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0, mayLoad = 1 in
def VMOV64toPQIrm : VRS2I<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
- "movq\t{$src, $dst|$dst, $src}",
- [], IIC_SSE_MOVDQ>, VEX, Sched<[WriteLoad]>;
+ "movq\t{$src, $dst|$dst, $src}", []>,
+ VEX, Sched<[WriteLoad]>;
let isCodeGenOnly = 1 in
def VMOV64toSDrr : VRS2I<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
- "movq\t{$src, $dst|$dst, $src}",
- [(set FR64:$dst, (bitconvert GR64:$src))],
- IIC_SSE_MOVDQ>, VEX, Sched<[WriteMove]>;
+ "movq\t{$src, $dst|$dst, $src}",
+ [(set FR64:$dst, (bitconvert GR64:$src))]>,
+ VEX, Sched<[WriteMove]>;
def MOVDI2PDIrr : S2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
"movd\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
- (v4i32 (scalar_to_vector GR32:$src)))], IIC_SSE_MOVDQ>,
- Sched<[WriteMove]>;
+ (v4i32 (scalar_to_vector GR32:$src)))]>,
+ Sched<[WriteMove]>;
def MOVDI2PDIrm : S2I<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
"movd\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
- (v4i32 (scalar_to_vector (loadi32 addr:$src))))],
- IIC_SSE_MOVDQ>, Sched<[WriteLoad]>;
+ (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>,
+ Sched<[WriteLoad]>;
def MOV64toPQIrr : RS2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
"movq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
- (v2i64 (scalar_to_vector GR64:$src)))],
- IIC_SSE_MOVDQ>, Sched<[WriteMove]>;
+ (v2i64 (scalar_to_vector GR64:$src)))]>,
+ Sched<[WriteMove]>;
let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0, mayLoad = 1 in
def MOV64toPQIrm : RS2I<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
- "movq\t{$src, $dst|$dst, $src}",
- [], IIC_SSE_MOVDQ>, Sched<[WriteLoad]>;
+ "movq\t{$src, $dst|$dst, $src}", []>,
+ Sched<[WriteLoad]>;
let isCodeGenOnly = 1 in
def MOV64toSDrr : RS2I<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
"movq\t{$src, $dst|$dst, $src}",
- [(set FR64:$dst, (bitconvert GR64:$src))],
- IIC_SSE_MOVDQ>, Sched<[WriteMove]>;
+ [(set FR64:$dst, (bitconvert GR64:$src))]>,
+ Sched<[WriteMove]>;
} // ExeDomain = SSEPackedInt
//===---------------------------------------------------------------------===//
@@ -4375,23 +4331,22 @@ def MOV64toSDrr : RS2I<0x6E, MRMSrcReg,
let ExeDomain = SSEPackedInt, isCodeGenOnly = 1 in {
def VMOVDI2SSrr : VS2I<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
"movd\t{$src, $dst|$dst, $src}",
- [(set FR32:$dst, (bitconvert GR32:$src))],
- IIC_SSE_MOVDQ>, VEX, Sched<[WriteMove]>;
+ [(set FR32:$dst, (bitconvert GR32:$src))]>,
+ VEX, Sched<[WriteMove]>;
def VMOVDI2SSrm : VS2I<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
"movd\t{$src, $dst|$dst, $src}",
- [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))],
- IIC_SSE_MOVDQ>,
+ [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>,
VEX, Sched<[WriteLoad]>;
def MOVDI2SSrr : S2I<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
"movd\t{$src, $dst|$dst, $src}",
- [(set FR32:$dst, (bitconvert GR32:$src))],
- IIC_SSE_MOVDQ>, Sched<[WriteMove]>;
+ [(set FR32:$dst, (bitconvert GR32:$src))]>,
+ Sched<[WriteMove]>;
def MOVDI2SSrm : S2I<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
"movd\t{$src, $dst|$dst, $src}",
- [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))],
- IIC_SSE_MOVDQ>, Sched<[WriteLoad]>;
+ [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>,
+ Sched<[WriteLoad]>;
} // ExeDomain = SSEPackedInt, isCodeGenOnly = 1
//===---------------------------------------------------------------------===//
@@ -4399,26 +4354,26 @@ let ExeDomain = SSEPackedInt, isCodeGenO
//
let ExeDomain = SSEPackedInt in {
def VMOVPDI2DIrr : VS2I<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
- "movd\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (extractelt (v4i32 VR128:$src),
- (iPTR 0)))], IIC_SSE_MOVD_ToGP>, VEX,
- Sched<[WriteMove]>;
+ "movd\t{$src, $dst|$dst, $src}",
+ [(set GR32:$dst, (extractelt (v4i32 VR128:$src),
+ (iPTR 0)))]>, VEX,
+ Sched<[WriteMove]>;
def VMOVPDI2DImr : VS2I<0x7E, MRMDestMem, (outs),
- (ins i32mem:$dst, VR128:$src),
- "movd\t{$src, $dst|$dst, $src}",
- [(store (i32 (extractelt (v4i32 VR128:$src),
- (iPTR 0))), addr:$dst)], IIC_SSE_MOVDQ>,
- VEX, Sched<[WriteStore]>;
+ (ins i32mem:$dst, VR128:$src),
+ "movd\t{$src, $dst|$dst, $src}",
+ [(store (i32 (extractelt (v4i32 VR128:$src),
+ (iPTR 0))), addr:$dst)]>,
+ VEX, Sched<[WriteStore]>;
def MOVPDI2DIrr : S2I<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
"movd\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (extractelt (v4i32 VR128:$src),
- (iPTR 0)))], IIC_SSE_MOVD_ToGP>,
+ (iPTR 0)))]>,
Sched<[WriteMove]>;
def MOVPDI2DImr : S2I<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src),
"movd\t{$src, $dst|$dst, $src}",
[(store (i32 (extractelt (v4i32 VR128:$src),
- (iPTR 0))), addr:$dst)],
- IIC_SSE_MOVDQ>, Sched<[WriteStore]>;
+ (iPTR 0))), addr:$dst)]>,
+ Sched<[WriteStore]>;
} // ExeDomain = SSEPackedInt
//===---------------------------------------------------------------------===//
// Move Packed Doubleword Int first element to Doubleword Int
@@ -4428,26 +4383,24 @@ let SchedRW = [WriteMove] in {
def VMOVPQIto64rr : VRS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
"movq\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, (extractelt (v2i64 VR128:$src),
- (iPTR 0)))],
- IIC_SSE_MOVD_ToGP>,
+ (iPTR 0)))]>,
VEX;
def MOVPQIto64rr : RS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
"movq\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, (extractelt (v2i64 VR128:$src),
- (iPTR 0)))],
- IIC_SSE_MOVD_ToGP>;
+ (iPTR 0)))]>;
} //SchedRW
let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0, mayStore = 1 in
def VMOVPQIto64mr : VRS2I<0x7E, MRMDestMem, (outs),
(ins i64mem:$dst, VR128:$src),
- "movq\t{$src, $dst|$dst, $src}",
- [], IIC_SSE_MOVDQ>, VEX, Sched<[WriteStore]>;
+ "movq\t{$src, $dst|$dst, $src}", []>,
+ VEX, Sched<[WriteStore]>;
let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0, mayStore = 1 in
def MOVPQIto64mr : RS2I<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
- "movq\t{$src, $dst|$dst, $src}",
- [], IIC_SSE_MOVDQ>, Sched<[WriteStore]>;
+ "movq\t{$src, $dst|$dst, $src}", []>,
+ Sched<[WriteStore]>;
} // ExeDomain = SSEPackedInt
//===---------------------------------------------------------------------===//
@@ -4461,25 +4414,25 @@ let ExeDomain = SSEPackedInt, isCodeGenO
VEX, Sched<[WriteLoad]>;
def VMOVSDto64rr : VRS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
"movq\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst, (bitconvert FR64:$src))],
- IIC_SSE_MOVDQ>, VEX, Sched<[WriteMove]>;
+ [(set GR64:$dst, (bitconvert FR64:$src))]>,
+ VEX, Sched<[WriteMove]>;
def VMOVSDto64mr : VRS2I<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
"movq\t{$src, $dst|$dst, $src}",
- [(store (i64 (bitconvert FR64:$src)), addr:$dst)],
- IIC_SSE_MOVDQ>, VEX, Sched<[WriteStore]>;
+ [(store (i64 (bitconvert FR64:$src)), addr:$dst)]>,
+ VEX, Sched<[WriteStore]>;
def MOV64toSDrm : S2SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
"movq\t{$src, $dst|$dst, $src}",
- [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))],
- IIC_SSE_MOVDQ>, Sched<[WriteLoad]>;
+ [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>,
+ Sched<[WriteLoad]>;
def MOVSDto64rr : RS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
"movq\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst, (bitconvert FR64:$src))],
- IIC_SSE_MOVD_ToGP>, Sched<[WriteMove]>;
+ [(set GR64:$dst, (bitconvert FR64:$src))]>,
+ Sched<[WriteMove]>;
def MOVSDto64mr : RS2I<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
"movq\t{$src, $dst|$dst, $src}",
- [(store (i64 (bitconvert FR64:$src)), addr:$dst)],
- IIC_SSE_MOVDQ>, Sched<[WriteStore]>;
+ [(store (i64 (bitconvert FR64:$src)), addr:$dst)]>,
+ Sched<[WriteStore]>;
} // ExeDomain = SSEPackedInt, isCodeGenOnly = 1
//===---------------------------------------------------------------------===//
@@ -4488,20 +4441,20 @@ let ExeDomain = SSEPackedInt, isCodeGenO
let ExeDomain = SSEPackedInt, isCodeGenOnly = 1 in {
def VMOVSS2DIrr : VS2I<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
"movd\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (bitconvert FR32:$src))],
- IIC_SSE_MOVD_ToGP>, VEX, Sched<[WriteMove]>;
+ [(set GR32:$dst, (bitconvert FR32:$src))]>,
+ VEX, Sched<[WriteMove]>;
def VMOVSS2DImr : VS2I<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
"movd\t{$src, $dst|$dst, $src}",
- [(store (i32 (bitconvert FR32:$src)), addr:$dst)],
- IIC_SSE_MOVDQ>, VEX, Sched<[WriteStore]>;
+ [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>,
+ VEX, Sched<[WriteStore]>;
def MOVSS2DIrr : S2I<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
"movd\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (bitconvert FR32:$src))],
- IIC_SSE_MOVD_ToGP>, Sched<[WriteMove]>;
+ [(set GR32:$dst, (bitconvert FR32:$src))]>,
+ Sched<[WriteMove]>;
def MOVSS2DImr : S2I<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
"movd\t{$src, $dst|$dst, $src}",
- [(store (i32 (bitconvert FR32:$src)), addr:$dst)],
- IIC_SSE_MOVDQ>, Sched<[WriteStore]>;
+ [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>,
+ Sched<[WriteStore]>;
} // ExeDomain = SSEPackedInt, isCodeGenOnly = 1
let Predicates = [UseAVX] in {
@@ -4593,9 +4546,8 @@ def VMOVQI2PQIrm : I<0x7E, MRMSrcMem, (o
def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
"movq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
- (v2i64 (scalar_to_vector (loadi64 addr:$src))))],
- IIC_SSE_MOVDQ>, XS,
- Requires<[UseSSE2]>; // SSE2 instruction with XS Prefix
+ (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>,
+ XS, Requires<[UseSSE2]>; // SSE2 instruction with XS Prefix
} // ExeDomain, SchedRW
//===---------------------------------------------------------------------===//
@@ -4603,24 +4555,23 @@ def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (ou
//
let ExeDomain = SSEPackedInt, SchedRW = [WriteStore] in {
def VMOVPQI2QImr : VS2I<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
- "movq\t{$src, $dst|$dst, $src}",
- [(store (i64 (extractelt (v2i64 VR128:$src),
- (iPTR 0))), addr:$dst)],
- IIC_SSE_MOVDQ>, VEX, VEX_WIG;
+ "movq\t{$src, $dst|$dst, $src}",
+ [(store (i64 (extractelt (v2i64 VR128:$src),
+ (iPTR 0))), addr:$dst)]>,
+ VEX, VEX_WIG;
def MOVPQI2QImr : S2I<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
"movq\t{$src, $dst|$dst, $src}",
[(store (i64 (extractelt (v2i64 VR128:$src),
- (iPTR 0))), addr:$dst)],
- IIC_SSE_MOVDQ>;
+ (iPTR 0))), addr:$dst)]>;
} // ExeDomain, SchedRW
// For disassembler only
let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
SchedRW = [WriteVecLogic] in {
def VMOVPQI2QIrr : VS2I<0xD6, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
- "movq\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVQ_RR>, VEX, VEX_WIG;
+ "movq\t{$src, $dst|$dst, $src}", []>, VEX, VEX_WIG;
def MOVPQI2QIrr : S2I<0xD6, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
- "movq\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVQ_RR>;
+ "movq\t{$src, $dst|$dst, $src}", []>;
}
// Aliases to help the assembler pick two byte VEX encodings by swapping the
@@ -4662,15 +4613,13 @@ let ExeDomain = SSEPackedInt, SchedRW =
let AddedComplexity = 15 in
def VMOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"vmovq\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))],
- IIC_SSE_MOVQ_RR>,
- XS, VEX, Requires<[UseAVX]>, VEX_WIG;
+ [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
+ XS, VEX, Requires<[UseAVX]>, VEX_WIG;
let AddedComplexity = 15 in
def MOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"movq\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))],
- IIC_SSE_MOVQ_RR>,
- XS, Requires<[UseSSE2]>;
+ [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
+ XS, Requires<[UseSSE2]>;
} // ExeDomain, SchedRW
let AddedComplexity = 20 in {
@@ -4693,12 +4642,12 @@ multiclass sse3_replicate_sfp<bits<8> op
X86MemOperand x86memop> {
def rr : S3SI<op, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set RC:$dst, (vt (OpNode RC:$src)))],
- IIC_SSE_MOV_LH>, Sched<[WriteFShuffle]>;
+ [(set RC:$dst, (vt (OpNode RC:$src)))]>,
+ Sched<[WriteFShuffle]>;
def rm : S3SI<op, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set RC:$dst, (OpNode (mem_frag addr:$src)))],
- IIC_SSE_MOV_LH>, Sched<[WriteFShuffleLd]>;
+ [(set RC:$dst, (OpNode (mem_frag addr:$src)))]>,
+ Sched<[WriteFShuffleLd]>;
}
let Predicates = [HasAVX, NoVLX] in {
@@ -4759,14 +4708,14 @@ def SSE_MOVDDUP : OpndItins<
multiclass sse3_replicate_dfp<string OpcodeStr> {
def rr : S3DI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (v2f64 (X86Movddup VR128:$src)))],
- IIC_SSE_MOV_LH>, Sched<[WriteFShuffle]>;
+ [(set VR128:$dst, (v2f64 (X86Movddup VR128:$src)))]>,
+ Sched<[WriteFShuffle]>;
def rm : S3DI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set VR128:$dst,
(v2f64 (X86Movddup
- (scalar_to_vector (loadf64 addr:$src)))))],
- IIC_SSE_MOV_LH>, Sched<[WriteFShuffleLd]>;
+ (scalar_to_vector (loadf64 addr:$src)))))]>,
+ Sched<[WriteFShuffleLd]>;
}
// FIXME: Merge with above classes when there are patterns for the ymm version
@@ -5597,8 +5546,8 @@ multiclass SS41I_extract16<bits<8> opc,
def rr_REV : SS4AIi8<opc, MRMDestReg, (outs GR32orGR64:$dst),
(ins VR128:$src1, u8imm:$src2),
!strconcat(OpcodeStr,
- "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [], IIC_SSE_PEXTRW>, Sched<[WriteShuffle]>, FoldGenData<NAME#ri>;
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>,
+ Sched<[WriteShuffle]>, FoldGenData<NAME#ri>;
let hasSideEffects = 0, mayStore = 1,
SchedRW = [WriteShuffleLd, WriteRMW] in
@@ -7130,7 +7079,7 @@ let Constraints = "$src1 = $dst", Predic
"sha1rnds4\t{$src3, $src2, $dst|$dst, $src2, $src3}",
[(set VR128:$dst,
(int_x86_sha1rnds4 VR128:$src1, VR128:$src2,
- (i8 imm:$src3)))], IIC_SSE_INTMUL_P_RR>, TA,
+ (i8 imm:$src3)))]>, TA,
Sched<[WriteVecIMul]>;
def SHA1RNDS4rmi : Ii8<0xCC, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, i128mem:$src2, u8imm:$src3),
@@ -7138,7 +7087,7 @@ let Constraints = "$src1 = $dst", Predic
[(set VR128:$dst,
(int_x86_sha1rnds4 VR128:$src1,
(bc_v4i32 (memopv2i64 addr:$src2)),
- (i8 imm:$src3)))], IIC_SSE_INTMUL_P_RM>, TA,
+ (i8 imm:$src3)))]>, TA,
Sched<[WriteVecIMul.Folded, ReadAfterLd]>;
defm SHA1NEXTE : SHAI_binop<0xC8, "sha1nexte", int_x86_sha1nexte,
@@ -7391,26 +7340,26 @@ def EXTRQI : Ii8<0x78, MRMXr, (outs VR12
(ins VR128:$src, u8imm:$len, u8imm:$idx),
"extrq\t{$idx, $len, $src|$src, $len, $idx}",
[(set VR128:$dst, (X86extrqi VR128:$src, imm:$len,
- imm:$idx))], IIC_SSE_INTALU_P_RR>,
+ imm:$idx))]>,
PD, Sched<[WriteVecALU]>;
def EXTRQ : I<0x79, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src, VR128:$mask),
"extrq\t{$mask, $src|$src, $mask}",
[(set VR128:$dst, (int_x86_sse4a_extrq VR128:$src,
- VR128:$mask))], IIC_SSE_INTALU_P_RR>,
+ VR128:$mask))]>,
PD, Sched<[WriteVecALU]>;
def INSERTQI : Ii8<0x78, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src, VR128:$src2, u8imm:$len, u8imm:$idx),
"insertq\t{$idx, $len, $src2, $src|$src, $src2, $len, $idx}",
[(set VR128:$dst, (X86insertqi VR128:$src, VR128:$src2,
- imm:$len, imm:$idx))], IIC_SSE_INTALU_P_RR>,
+ imm:$len, imm:$idx))]>,
XD, Sched<[WriteVecALU]>;
def INSERTQ : I<0x79, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src, VR128:$mask),
"insertq\t{$mask, $src|$src, $mask}",
[(set VR128:$dst, (int_x86_sse4a_insertq VR128:$src,
- VR128:$mask))], IIC_SSE_INTALU_P_RR>,
+ VR128:$mask))]>,
XD, Sched<[WriteVecALU]>;
}
} // ExeDomain = SSEPackedInt
@@ -7630,22 +7579,22 @@ multiclass avx_movmask_rm<bits<8> opc_rm
def rm : AVX8I<opc_rm, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, f128mem:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set VR128:$dst, (IntLd addr:$src2, VR128:$src1))],
- IIC_SSE_MASKMOV>, VEX_4V, Sched<[WriteFLoad]>;
+ [(set VR128:$dst, (IntLd addr:$src2, VR128:$src1))]>,
+ VEX_4V, Sched<[WriteFLoad]>;
def Yrm : AVX8I<opc_rm, MRMSrcMem, (outs VR256:$dst),
(ins VR256:$src1, f256mem:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set VR256:$dst, (IntLd256 addr:$src2, VR256:$src1))],
- IIC_SSE_MASKMOV>, VEX_4V, VEX_L, Sched<[WriteFLoad]>;
+ [(set VR256:$dst, (IntLd256 addr:$src2, VR256:$src1))]>,
+ VEX_4V, VEX_L, Sched<[WriteFLoad]>;
def mr : AVX8I<opc_mr, MRMDestMem, (outs),
(ins f128mem:$dst, VR128:$src1, VR128:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(IntSt addr:$dst, VR128:$src1, VR128:$src2)], IIC_SSE_MASKMOV>,
+ [(IntSt addr:$dst, VR128:$src1, VR128:$src2)]>,
VEX_4V, Sched<[WriteFStore]>;
def Ymr : AVX8I<opc_mr, MRMDestMem, (outs),
(ins f256mem:$dst, VR256:$src1, VR256:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(IntSt256 addr:$dst, VR256:$src1, VR256:$src2)], IIC_SSE_MASKMOV>,
+ [(IntSt256 addr:$dst, VR256:$src1, VR256:$src2)]>,
VEX_4V, VEX_L, Sched<[WriteFStore]>;
}
@@ -8270,22 +8219,22 @@ multiclass avx2_pmovmask<string OpcodeSt
def rm : AVX28I<0x8c, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, i128mem:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set VR128:$dst, (IntLd128 addr:$src2, VR128:$src1))],
- IIC_SSE_MASKMOV>, VEX_4V, Sched<[WriteVecLoad]>;
+ [(set VR128:$dst, (IntLd128 addr:$src2, VR128:$src1))]>,
+ VEX_4V, Sched<[WriteVecLoad]>;
def Yrm : AVX28I<0x8c, MRMSrcMem, (outs VR256:$dst),
(ins VR256:$src1, i256mem:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set VR256:$dst, (IntLd256 addr:$src2, VR256:$src1))],
- IIC_SSE_MASKMOV>, VEX_4V, VEX_L, Sched<[WriteVecLoad]>;
+ [(set VR256:$dst, (IntLd256 addr:$src2, VR256:$src1))]>,
+ VEX_4V, VEX_L, Sched<[WriteVecLoad]>;
def mr : AVX28I<0x8e, MRMDestMem, (outs),
(ins i128mem:$dst, VR128:$src1, VR128:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(IntSt128 addr:$dst, VR128:$src1, VR128:$src2)], IIC_SSE_MASKMOV>,
+ [(IntSt128 addr:$dst, VR128:$src1, VR128:$src2)]>,
VEX_4V, Sched<[WriteVecStore]>;
def Ymr : AVX28I<0x8e, MRMDestMem, (outs),
(ins i256mem:$dst, VR256:$src1, VR256:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(IntSt256 addr:$dst, VR256:$src1, VR256:$src2)], IIC_SSE_MASKMOV>,
+ [(IntSt256 addr:$dst, VR256:$src1, VR256:$src2)]>,
VEX_4V, VEX_L, Sched<[WriteVecStore]>;
}
Modified: llvm/trunk/lib/Target/X86/X86Schedule.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86Schedule.td?rev=329940&r1=329939&r2=329940&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86Schedule.td (original)
+++ llvm/trunk/lib/Target/X86/X86Schedule.td Thu Apr 12 12:25:07 2018
@@ -227,12 +227,6 @@ def IIC_SSE_PSHUF_MI : InstrItinClass;
def IIC_SSE_PACK : InstrItinClass;
def IIC_SSE_UNPCK : InstrItinClass;
-def IIC_SSE_MOVMSK : InstrItinClass;
-def IIC_SSE_MASKMOV : InstrItinClass;
-
-def IIC_SSE_PEXTRW : InstrItinClass;
-def IIC_SSE_PINSRW : InstrItinClass;
-
def IIC_SSE_PABS_RR : InstrItinClass;
def IIC_SSE_PABS_RM : InstrItinClass;
@@ -267,10 +261,6 @@ def IIC_SSE_MOVU_P_RR : InstrItinClass;
def IIC_SSE_MOVU_P_RM : InstrItinClass;
def IIC_SSE_MOVU_P_MR : InstrItinClass;
-def IIC_SSE_MOVDQ : InstrItinClass;
-def IIC_SSE_MOVD_ToGP : InstrItinClass;
-def IIC_SSE_MOVQ_RR : InstrItinClass;
-
def IIC_SSE_MOV_LH : InstrItinClass;
def IIC_SSE_PHADDSUBD_RR : InstrItinClass;
More information about the llvm-commits
mailing list