[llvm] r319884 - [X86][AVX512] Cleanup scalar move scheduler classes
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Wed Dec 6 03:23:13 PST 2017
Author: rksimon
Date: Wed Dec 6 03:23:13 2017
New Revision: 319884
URL: http://llvm.org/viewvc/llvm-project?rev=319884&view=rev
Log:
[X86][AVX512] Cleanup scalar move scheduler classes
Modified:
llvm/trunk/lib/Target/X86/X86InstrAVX512.td
Modified: llvm/trunk/lib/Target/X86/X86InstrAVX512.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrAVX512.td?rev=319884&r1=319883&r2=319884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrAVX512.td (original)
+++ llvm/trunk/lib/Target/X86/X86InstrAVX512.td Wed Dec 6 03:23:13 2017
@@ -1776,14 +1776,14 @@ defm VPERMT2PD : avx512_perm_t_sizes<0x7
// AVX-512 - BLEND using mask
//
-let Sched = WriteFVarBlend in
-def AVX512_BLENDM : OpndItins<
- IIC_SSE_ALU_F32P_RR, IIC_SSE_ALU_F32P_RM
+let Sched = WriteFVarBlend in
+def AVX512_BLENDM : OpndItins<
+ IIC_SSE_ALU_F32P_RR, IIC_SSE_ALU_F32P_RM
>;
-let Sched = WriteVarBlend in
-def AVX512_PBLENDM : OpndItins<
- IIC_SSE_INTALU_P_RR, IIC_SSE_INTALU_P_RM
+let Sched = WriteVarBlend in
+def AVX512_PBLENDM : OpndItins<
+ IIC_SSE_INTALU_P_RR, IIC_SSE_INTALU_P_RM
>;
multiclass avx512_blendmask<bits<8> opc, string OpcodeStr, OpndItins itins,
@@ -3533,22 +3533,22 @@ def VMOVDI2PDIZrr : AVX512BI<0x6E, MRMSr
"vmovd\t{$src, $dst|$dst, $src}",
[(set VR128X:$dst,
(v4i32 (scalar_to_vector GR32:$src)))], IIC_SSE_MOVDQ>,
- EVEX;
+ EVEX, Sched<[WriteMove]>;
def VMOVDI2PDIZrm : AVX512BI<0x6E, MRMSrcMem, (outs VR128X:$dst), (ins i32mem:$src),
"vmovd\t{$src, $dst|$dst, $src}",
[(set VR128X:$dst,
(v4i32 (scalar_to_vector (loadi32 addr:$src))))],
- IIC_SSE_MOVDQ>, EVEX, EVEX_CD8<32, CD8VT1>;
+ IIC_SSE_MOVDQ>, EVEX, EVEX_CD8<32, CD8VT1>, Sched<[WriteLoad]>;
def VMOV64toPQIZrr : AVX512BI<0x6E, MRMSrcReg, (outs VR128X:$dst), (ins GR64:$src),
"vmovq\t{$src, $dst|$dst, $src}",
[(set VR128X:$dst,
(v2i64 (scalar_to_vector GR64:$src)))],
- IIC_SSE_MOVDQ>, EVEX, VEX_W;
+ IIC_SSE_MOVDQ>, EVEX, VEX_W, Sched<[WriteMove]>;
let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0, mayLoad = 1 in
def VMOV64toPQIZrm : AVX512BI<0x6E, MRMSrcMem, (outs VR128X:$dst),
(ins i64mem:$src),
- "vmovq\t{$src, $dst|$dst, $src}", []>,
- EVEX, VEX_W, EVEX_CD8<64, CD8VT1>;
+ "vmovq\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVDQ>,
+ EVEX, VEX_W, EVEX_CD8<64, CD8VT1>, Sched<[WriteLoad]>;
let isCodeGenOnly = 1 in {
def VMOV64toSDZrr : AVX512BI<0x6E, MRMSrcReg, (outs FR64X:$dst), (ins GR64:$src),
"vmovq\t{$src, $dst|$dst, $src}",
@@ -3557,7 +3557,7 @@ def VMOV64toSDZrr : AVX512BI<0x6E, MRMSr
def VMOV64toSDZrm : AVX512XSI<0x7E, MRMSrcMem, (outs FR64X:$dst), (ins i64mem:$src),
"vmovq\t{$src, $dst|$dst, $src}",
[(set FR64X:$dst, (bitconvert (loadi64 addr:$src)))]>,
- EVEX, VEX_W, EVEX_CD8<8, CD8VT8>;
+ EVEX, VEX_W, EVEX_CD8<8, CD8VT8>, Sched<[WriteLoad]>;
def VMOVSDto64Zrr : AVX512BI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64X:$src),
"vmovq\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, (bitconvert FR64X:$src))],
@@ -3576,12 +3576,12 @@ let ExeDomain = SSEPackedInt, isCodeGenO
def VMOVDI2SSZrr : AVX512BI<0x6E, MRMSrcReg, (outs FR32X:$dst), (ins GR32:$src),
"vmovd\t{$src, $dst|$dst, $src}",
[(set FR32X:$dst, (bitconvert GR32:$src))],
- IIC_SSE_MOVDQ>, EVEX;
+ IIC_SSE_MOVDQ>, EVEX, Sched<[WriteMove]>;
def VMOVDI2SSZrm : AVX512BI<0x6E, MRMSrcMem, (outs FR32X:$dst), (ins i32mem:$src),
"vmovd\t{$src, $dst|$dst, $src}",
[(set FR32X:$dst, (bitconvert (loadi32 addr:$src)))],
- IIC_SSE_MOVDQ>, EVEX, EVEX_CD8<32, CD8VT1>;
+ IIC_SSE_MOVDQ>, EVEX, EVEX_CD8<32, CD8VT1>, Sched<[WriteLoad]>;
} // ExeDomain = SSEPackedInt, isCodeGenOnly = 1
// Move doubleword from xmm register to r/m32
@@ -3591,13 +3591,13 @@ def VMOVPDI2DIZrr : AVX512BI<0x7E, MRMD
"vmovd\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (extractelt (v4i32 VR128X:$src),
(iPTR 0)))], IIC_SSE_MOVD_ToGP>,
- EVEX;
+ EVEX, Sched<[WriteMove]>;
def VMOVPDI2DIZmr : AVX512BI<0x7E, MRMDestMem, (outs),
(ins i32mem:$dst, VR128X:$src),
"vmovd\t{$src, $dst|$dst, $src}",
[(store (i32 (extractelt (v4i32 VR128X:$src),
(iPTR 0))), addr:$dst)], IIC_SSE_MOVDQ>,
- EVEX, EVEX_CD8<32, CD8VT1>;
+ EVEX, EVEX_CD8<32, CD8VT1>, Sched<[WriteStore]>;
} // ExeDomain = SSEPackedInt
// Move quadword from xmm1 register to r/m64
@@ -3607,13 +3607,13 @@ def VMOVPQIto64Zrr : I<0x7E, MRMDestReg,
"vmovq\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, (extractelt (v2i64 VR128X:$src),
(iPTR 0)))],
- IIC_SSE_MOVD_ToGP>, PD, EVEX, VEX_W,
+ IIC_SSE_MOVD_ToGP>, PD, EVEX, VEX_W, Sched<[WriteMove]>,
Requires<[HasAVX512, In64BitMode]>;
let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0, mayStore = 1 in
def VMOVPQIto64Zmr : I<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, VR128X:$src),
"vmovq\t{$src, $dst|$dst, $src}",
- [], IIC_SSE_MOVD_ToGP>, PD, EVEX, VEX_W,
+ [], IIC_SSE_MOVD_ToGP>, PD, EVEX, VEX_W, Sched<[WriteStore]>,
Requires<[HasAVX512, In64BitMode]>;
def VMOVPQI2QIZmr : I<0xD6, MRMDestMem, (outs),
@@ -3627,8 +3627,8 @@ def VMOVPQI2QIZmr : I<0xD6, MRMDestMem,
let hasSideEffects = 0 in
def VMOVPQI2QIZrr : AVX512BI<0xD6, MRMDestReg, (outs VR128X:$dst),
(ins VR128X:$src),
- "vmovq.s\t{$src, $dst|$dst, $src}",[]>,
- EVEX, VEX_W;
+ "vmovq.s\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVDQ>,
+ EVEX, VEX_W, Sched<[WriteMove]>;
} // ExeDomain = SSEPackedInt
// Move Scalar Single to Double Int
@@ -3638,12 +3638,12 @@ def VMOVSS2DIZrr : AVX512BI<0x7E, MRMDe
(ins FR32X:$src),
"vmovd\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (bitconvert FR32X:$src))],
- IIC_SSE_MOVD_ToGP>, EVEX;
+ IIC_SSE_MOVD_ToGP>, EVEX, Sched<[WriteMove]>;
def VMOVSS2DIZmr : AVX512BI<0x7E, MRMDestMem, (outs),
(ins i32mem:$dst, FR32X:$src),
"vmovd\t{$src, $dst|$dst, $src}",
[(store (i32 (bitconvert FR32X:$src)), addr:$dst)],
- IIC_SSE_MOVDQ>, EVEX, EVEX_CD8<32, CD8VT1>;
+ IIC_SSE_MOVDQ>, EVEX, EVEX_CD8<32, CD8VT1>, Sched<[WriteStore]>;
} // ExeDomain = SSEPackedInt, isCodeGenOnly = 1
// Move Quadword Int to Packed Quadword Int
@@ -3654,7 +3654,7 @@ def VMOVQI2PQIZrm : AVX512XSI<0x7E, MRMS
"vmovq\t{$src, $dst|$dst, $src}",
[(set VR128X:$dst,
(v2i64 (scalar_to_vector (loadi64 addr:$src))))]>,
- EVEX, VEX_W, EVEX_CD8<8, CD8VT8>;
+ EVEX, VEX_W, EVEX_CD8<8, CD8VT8>, Sched<[WriteLoad]>;
} // ExeDomain = SSEPackedInt
//===----------------------------------------------------------------------===//
@@ -3667,7 +3667,7 @@ multiclass avx512_move_scalar<string asm
(ins _.RC:$src1, _.RC:$src2),
!strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set _.RC:$dst, (_.VT (OpNode _.RC:$src1, _.RC:$src2)))],
- _.ExeDomain,IIC_SSE_MOV_S_RR>, EVEX_4V;
+ _.ExeDomain,IIC_SSE_MOV_S_RR>, EVEX_4V, Sched<[WriteMove]>;
def rrkz : AVX512PI<0x10, MRMSrcReg, (outs _.RC:$dst),
(ins _.KRCWM:$mask, _.RC:$src1, _.RC:$src2),
!strconcat(asm, "\t{$src2, $src1, $dst {${mask}} {z}|",
@@ -3675,7 +3675,7 @@ multiclass avx512_move_scalar<string asm
[(set _.RC:$dst, (_.VT (X86selects _.KRCWM:$mask,
(_.VT (OpNode _.RC:$src1, _.RC:$src2)),
_.ImmAllZerosV)))],
- _.ExeDomain,IIC_SSE_MOV_S_RR>, EVEX_4V, EVEX_KZ;
+ _.ExeDomain,IIC_SSE_MOV_S_RR>, EVEX_4V, EVEX_KZ, Sched<[WriteMove]>;
let Constraints = "$src0 = $dst" in
def rrk : AVX512PI<0x10, MRMSrcReg, (outs _.RC:$dst),
(ins _.RC:$src0, _.KRCWM:$mask, _.RC:$src1, _.RC:$src2),
@@ -3684,34 +3684,34 @@ multiclass avx512_move_scalar<string asm
[(set _.RC:$dst, (_.VT (X86selects _.KRCWM:$mask,
(_.VT (OpNode _.RC:$src1, _.RC:$src2)),
(_.VT _.RC:$src0))))],
- _.ExeDomain,IIC_SSE_MOV_S_RR>, EVEX_4V, EVEX_K;
+ _.ExeDomain,IIC_SSE_MOV_S_RR>, EVEX_4V, EVEX_K, Sched<[WriteMove]>;
let canFoldAsLoad = 1, isReMaterializable = 1 in
def rm : AVX512PI<0x10, MRMSrcMem, (outs _.FRC:$dst), (ins _.ScalarMemOp:$src),
!strconcat(asm, "\t{$src, $dst|$dst, $src}"),
[(set _.FRC:$dst, (_.ScalarLdFrag addr:$src))],
- _.ExeDomain, IIC_SSE_MOV_S_RM>, EVEX;
+ _.ExeDomain, IIC_SSE_MOV_S_RM>, EVEX, Sched<[WriteLoad]>;
let mayLoad = 1, hasSideEffects = 0 in {
let Constraints = "$src0 = $dst" in
def rmk : AVX512PI<0x10, MRMSrcMem, (outs _.RC:$dst),
(ins _.RC:$src0, _.KRCWM:$mask, _.ScalarMemOp:$src),
!strconcat(asm, "\t{$src, $dst {${mask}}|",
"$dst {${mask}}, $src}"),
- [], _.ExeDomain, IIC_SSE_MOV_S_RM>, EVEX, EVEX_K;
+ [], _.ExeDomain, IIC_SSE_MOV_S_RM>, EVEX, EVEX_K, Sched<[WriteLoad]>;
def rmkz : AVX512PI<0x10, MRMSrcMem, (outs _.RC:$dst),
(ins _.KRCWM:$mask, _.ScalarMemOp:$src),
!strconcat(asm, "\t{$src, $dst {${mask}} {z}|",
"$dst {${mask}} {z}, $src}"),
- [], _.ExeDomain, IIC_SSE_MOV_S_RM>, EVEX, EVEX_KZ;
+ [], _.ExeDomain, IIC_SSE_MOV_S_RM>, EVEX, EVEX_KZ, Sched<[WriteLoad]>;
}
def mr: AVX512PI<0x11, MRMDestMem, (outs), (ins _.ScalarMemOp:$dst, _.FRC:$src),
!strconcat(asm, "\t{$src, $dst|$dst, $src}"),
[(store _.FRC:$src, addr:$dst)], _.ExeDomain, IIC_SSE_MOV_S_MR>,
- EVEX;
+ EVEX, Sched<[WriteStore]>;
let mayStore = 1, hasSideEffects = 0 in
def mrk: AVX512PI<0x11, MRMDestMem, (outs),
(ins _.ScalarMemOp:$dst, VK1WM:$mask, _.FRC:$src),
!strconcat(asm, "\t{$src, $dst {${mask}}|$dst {${mask}}, $src}"),
- [], _.ExeDomain, IIC_SSE_MOV_S_MR>, EVEX, EVEX_K;
+ [], _.ExeDomain, IIC_SSE_MOV_S_MR>, EVEX, EVEX_K, Sched<[WriteStore]>;
}
defm VMOVSSZ : avx512_move_scalar<"vmovss", X86Movss, f32x_info>,
More information about the llvm-commits
mailing list