[llvm] r256483 - [AVX512] Bring vmovq instructions names into alignment with the AVX and SSE names. Add a missing encoding to disassembler and assembler.
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Sun Dec 27 22:11:43 PST 2015
Author: ctopper
Date: Mon Dec 28 00:11:42 2015
New Revision: 256483
URL: http://llvm.org/viewvc/llvm-project?rev=256483&view=rev
Log:
[AVX512] Bring vmovq instructions names into alignment with the AVX and SSE names. Add a missing encoding to disassembler and assembler.
I believe this also fixes a case where a 64-bit memory form that is documented as being unsupported in 32-bit mode was able to be selected there.
Modified:
llvm/trunk/lib/Target/X86/X86InstrAVX512.td
llvm/trunk/test/MC/X86/avx512-encodings.s
Modified: llvm/trunk/lib/Target/X86/X86InstrAVX512.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrAVX512.td?rev=256483&r1=256482&r2=256483&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrAVX512.td (original)
+++ llvm/trunk/lib/Target/X86/X86InstrAVX512.td Mon Dec 28 00:11:42 2015
@@ -2840,6 +2840,11 @@ def VMOV64toPQIZrr : AVX512BI<0x6E, MRMS
[(set VR128X:$dst,
(v2i64 (scalar_to_vector GR64:$src)))],
IIC_SSE_MOVDQ>, EVEX, VEX_W, VEX_LIG;
+let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0, mayLoad = 1 in
+def VMOV64toPQIZrm : AVX512BI<0x6E, MRMSrcMem, (outs VR128X:$dst),
+ (ins i64mem:$src),
+ "vmovq\t{$src, $dst|$dst, $src}", []>,
+ EVEX, VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
let isCodeGenOnly = 1 in {
def VMOV64toSDZrr : AVX512BI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
"vmovq\t{$src, $dst|$dst, $src}",
@@ -2849,12 +2854,12 @@ def VMOVSDto64Zrr : AVX512BI<0x7E, MRMDe
"vmovq\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, (bitconvert FR64:$src))],
IIC_SSE_MOVDQ>, EVEX, VEX_W, Sched<[WriteMove]>;
-}
def VMOVSDto64Zmr : AVX512BI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
"vmovq\t{$src, $dst|$dst, $src}",
[(store (i64 (bitconvert FR64:$src)), addr:$dst)],
IIC_SSE_MOVDQ>, EVEX, VEX_W, Sched<[WriteStore]>,
EVEX_CD8<64, CD8VT1>;
+}
// Move Int Doubleword to Single Scalar
//
@@ -2893,18 +2898,25 @@ def VMOVPQIto64Zrr : I<0x7E, MRMDestReg,
IIC_SSE_MOVD_ToGP>, PD, EVEX, VEX_LIG, VEX_W,
Requires<[HasAVX512, In64BitMode]>;
-def VMOVPQIto64Zmr : I<0xD6, MRMDestMem, (outs),
- (ins i64mem:$dst, VR128X:$src),
- "vmovq\t{$src, $dst|$dst, $src}",
- [(store (extractelt (v2i64 VR128X:$src), (iPTR 0)),
- addr:$dst)], IIC_SSE_MOVDQ>,
- EVEX, PD, VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>,
- Sched<[WriteStore]>, Requires<[HasAVX512, In64BitMode]>;
-
-def VMOV64toPQIZrr_REV : AVX512BI<0xD6, MRMDestReg, (outs VR128X:$dst),
- (ins VR128X:$src),
- "vmovq.s\t{$src, $dst|$dst, $src}",[]>,
- EVEX, VEX_W, VEX_LIG;
+let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0, mayStore = 1 in
+def VMOVPQIto64Zmr : I<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, VR128X:$src),
+ "vmovq\t{$src, $dst|$dst, $src}",
+ [], IIC_SSE_MOVD_ToGP>, PD, EVEX, VEX_LIG, VEX_W,
+ Requires<[HasAVX512, In64BitMode]>;
+
+def VMOVPQI2QIZmr : I<0xD6, MRMDestMem, (outs),
+ (ins i64mem:$dst, VR128X:$src),
+ "vmovq\t{$src, $dst|$dst, $src}",
+ [(store (extractelt (v2i64 VR128X:$src), (iPTR 0)),
+ addr:$dst)], IIC_SSE_MOVDQ>,
+ EVEX, PD, VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>,
+ Sched<[WriteStore]>, Requires<[HasAVX512, In64BitMode]>;
+
+let hasSideEffects = 0 in
+def VMOVPQI2QIZrr : AVX512BI<0xD6, MRMDestReg, (outs VR128X:$dst),
+ (ins VR128X:$src),
+ "vmovq.s\t{$src, $dst|$dst, $src}",[]>,
+ EVEX, VEX_W, VEX_LIG;
// Move Scalar Single to Double Int
//
@@ -2923,12 +2935,12 @@ def VMOVSS2DIZmr : AVX512BI<0x7E, MRMDe
// Move Quadword Int to Packed Quadword Int
//
-def VMOVQI2PQIZrm : AVX512BI<0x6E, MRMSrcMem, (outs VR128X:$dst),
+def VMOVQI2PQIZrm : AVX512XSI<0x7E, MRMSrcMem, (outs VR128X:$dst),
(ins i64mem:$src),
"vmovq\t{$src, $dst|$dst, $src}",
[(set VR128X:$dst,
(v2i64 (scalar_to_vector (loadi64 addr:$src))))]>,
- EVEX, VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
+ EVEX, VEX_W, EVEX_CD8<8, CD8VT8>;
//===----------------------------------------------------------------------===//
// AVX-512 MOVSS, MOVSD
Modified: llvm/trunk/test/MC/X86/avx512-encodings.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/MC/X86/avx512-encodings.s?rev=256483&r1=256482&r2=256483&view=diff
==============================================================================
--- llvm/trunk/test/MC/X86/avx512-encodings.s (original)
+++ llvm/trunk/test/MC/X86/avx512-encodings.s Mon Dec 28 00:11:42 2015
@@ -17962,27 +17962,27 @@ vpermilpd $0x23, 0x400(%rbx), %zmm2
vmovq %r8, %xmm29
// CHECK: vmovq (%rcx), %xmm29
-// CHECK: encoding: [0x62,0x61,0xfd,0x08,0x6e,0x29]
+// CHECK: encoding: [0x62,0x61,0xfe,0x08,0x7e,0x29]
vmovq (%rcx), %xmm29
// CHECK: vmovq 291(%rax,%r14,8), %xmm29
-// CHECK: encoding: [0x62,0x21,0xfd,0x08,0x6e,0xac,0xf0,0x23,0x01,0x00,0x00]
+// CHECK: encoding: [0x62,0x21,0xfe,0x08,0x7e,0xac,0xf0,0x23,0x01,0x00,0x00]
vmovq 291(%rax,%r14,8), %xmm29
// CHECK: vmovq 1016(%rdx), %xmm29
-// CHECK: encoding: [0x62,0x61,0xfd,0x08,0x6e,0x6a,0x7f]
+// CHECK: encoding: [0x62,0x61,0xfe,0x08,0x7e,0x6a,0x7f]
vmovq 1016(%rdx), %xmm29
// CHECK: vmovq 1024(%rdx), %xmm29
-// CHECK: encoding: [0x62,0x61,0xfd,0x08,0x6e,0xaa,0x00,0x04,0x00,0x00]
+// CHECK: encoding: [0x62,0x61,0xfe,0x08,0x7e,0xaa,0x00,0x04,0x00,0x00]
vmovq 1024(%rdx), %xmm29
// CHECK: vmovq -1024(%rdx), %xmm29
-// CHECK: encoding: [0x62,0x61,0xfd,0x08,0x6e,0x6a,0x80]
+// CHECK: encoding: [0x62,0x61,0xfe,0x08,0x7e,0x6a,0x80]
vmovq -1024(%rdx), %xmm29
// CHECK: vmovq -1032(%rdx), %xmm29
-// CHECK: encoding: [0x62,0x61,0xfd,0x08,0x6e,0xaa,0xf8,0xfb,0xff,0xff]
+// CHECK: encoding: [0x62,0x61,0xfe,0x08,0x7e,0xaa,0xf8,0xfb,0xff,0xff]
vmovq -1032(%rdx), %xmm29
// CHECK: vmovq %xmm17, (%rcx)
@@ -18014,27 +18014,27 @@ vpermilpd $0x23, 0x400(%rbx), %zmm2
vmovq %xmm3, %xmm24
// CHECK: vmovq (%rcx), %xmm24
-// CHECK: encoding: [0x62,0x61,0xfd,0x08,0x6e,0x01]
+// CHECK: encoding: [0x62,0x61,0xfe,0x08,0x7e,0x01]
vmovq (%rcx), %xmm24
// CHECK: vmovq 291(%rax,%r14,8), %xmm24
-// CHECK: encoding: [0x62,0x21,0xfd,0x08,0x6e,0x84,0xf0,0x23,0x01,0x00,0x00]
+// CHECK: encoding: [0x62,0x21,0xfe,0x08,0x7e,0x84,0xf0,0x23,0x01,0x00,0x00]
vmovq 291(%rax,%r14,8), %xmm24
// CHECK: vmovq 1016(%rdx), %xmm24
-// CHECK: encoding: [0x62,0x61,0xfd,0x08,0x6e,0x42,0x7f]
+// CHECK: encoding: [0x62,0x61,0xfe,0x08,0x7e,0x42,0x7f]
vmovq 1016(%rdx), %xmm24
// CHECK: vmovq 1024(%rdx), %xmm24
-// CHECK: encoding: [0x62,0x61,0xfd,0x08,0x6e,0x82,0x00,0x04,0x00,0x00]
+// CHECK: encoding: [0x62,0x61,0xfe,0x08,0x7e,0x82,0x00,0x04,0x00,0x00]
vmovq 1024(%rdx), %xmm24
// CHECK: vmovq -1024(%rdx), %xmm24
-// CHECK: encoding: [0x62,0x61,0xfd,0x08,0x6e,0x42,0x80]
+// CHECK: encoding: [0x62,0x61,0xfe,0x08,0x7e,0x42,0x80]
vmovq -1024(%rdx), %xmm24
// CHECK: vmovq -1032(%rdx), %xmm24
-// CHECK: encoding: [0x62,0x61,0xfd,0x08,0x6e,0x82,0xf8,0xfb,0xff,0xff]
+// CHECK: encoding: [0x62,0x61,0xfe,0x08,0x7e,0x82,0xf8,0xfb,0xff,0xff]
vmovq -1032(%rdx), %xmm24
// CHECK: vmovq %xmm19, (%rcx)
More information about the llvm-commits
mailing list