[llvm] r192171 - Remove some instructions that existed to provide aliases to the assembler. Can be done with InstAlias instead. Unfortunately, this was causing printer to use 'vmovq' or 'vmovd' based on what was parsed. To cleanup the inconsistencies convert all 'vmovd' with 64-bit registers to 'vmovq', but provide an alias so that 'vmovd' will still parse.

Craig Topper craig.topper at gmail.com
Mon Oct 7 22:53:50 PDT 2013


Author: ctopper
Date: Tue Oct  8 00:53:50 2013
New Revision: 192171

URL: http://llvm.org/viewvc/llvm-project?rev=192171&view=rev
Log:
Remove some instructions that existed to provide aliases to the assembler. Can be done with InstAlias instead. Unfortunately, this was causing printer to use 'vmovq' or 'vmovd' based on what was parsed. To cleanup the inconsistencies convert all 'vmovd' with 64-bit registers to 'vmovq', but provide an alias so that 'vmovd' will still parse.

Modified:
    llvm/trunk/lib/Target/X86/X86InstrSSE.td
    llvm/trunk/test/CodeGen/X86/avx-basic.ll
    llvm/trunk/test/CodeGen/X86/avx-bitcast.ll
    llvm/trunk/test/CodeGen/X86/avx-splat.ll
    llvm/trunk/test/CodeGen/X86/mcinst-avx-lowering.ll
    llvm/trunk/test/MC/Disassembler/X86/x86-64.txt
    llvm/trunk/test/MC/X86/x86-64.s
    llvm/trunk/test/MC/X86/x86_64-avx-encoding.s
    llvm/trunk/utils/TableGen/X86RecognizableInstr.cpp

Modified: llvm/trunk/lib/Target/X86/X86InstrSSE.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrSSE.td?rev=192171&r1=192170&r2=192171&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrSSE.td (original)
+++ llvm/trunk/lib/Target/X86/X86InstrSSE.td Tue Oct  8 00:53:50 2013
@@ -4381,12 +4381,12 @@ def VMOVDI2PDIrm : VS2I<0x6E, MRMSrcMem,
                         IIC_SSE_MOVDQ>,
                       VEX, Sched<[WriteLoad]>;
 def VMOV64toPQIrr : VRS2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
-                        "mov{d|q}\t{$src, $dst|$dst, $src}",
+                        "movq\t{$src, $dst|$dst, $src}",
                         [(set VR128:$dst,
                           (v2i64 (scalar_to_vector GR64:$src)))],
                           IIC_SSE_MOVDQ>, VEX, Sched<[WriteMove]>;
 def VMOV64toSDrr : VRS2I<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
-                       "mov{d|q}\t{$src, $dst|$dst, $src}",
+                       "movq\t{$src, $dst|$dst, $src}",
                        [(set FR64:$dst, (bitconvert GR64:$src))],
                        IIC_SSE_MOVDQ>, VEX, Sched<[WriteMove]>;
 
@@ -4475,7 +4475,7 @@ def : Pat<(v4i64 (X86Vinsert undef, GR64
 //
 let SchedRW = [WriteMove] in {
 def VMOVPQIto64rr : VRS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
-                          "mov{d|q}\t{$src, $dst|$dst, $src}",
+                          "movq\t{$src, $dst|$dst, $src}",
                           [(set GR64:$dst, (vector_extract (v2i64 VR128:$src),
                                                            (iPTR 0)))],
                                                            IIC_SSE_MOVD_ToGP>,
@@ -4497,7 +4497,7 @@ def VMOV64toSDrm : VS2SI<0x7E, MRMSrcMem
                         [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>,
                         VEX, Sched<[WriteLoad]>;
 def VMOVSDto64rr : VRS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
-                         "mov{d|q}\t{$src, $dst|$dst, $src}",
+                         "movq\t{$src, $dst|$dst, $src}",
                          [(set GR64:$dst, (bitconvert FR64:$src))],
                          IIC_SSE_MOVDQ>, VEX, Sched<[WriteMove]>;
 def VMOVSDto64mr : VRS2I<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
@@ -4549,7 +4549,7 @@ def VMOVZDI2PDIrr : VS2I<0x6E, MRMSrcReg
                                       (v4i32 (scalar_to_vector GR32:$src)))))],
                                       IIC_SSE_MOVDQ>, VEX;
 def VMOVZQI2PQIrr : VS2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
-                       "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
+                       "movq\t{$src, $dst|$dst, $src}", // X86-64 only
                        [(set VR128:$dst, (v2i64 (X86vzmovl
                                       (v2i64 (scalar_to_vector GR64:$src)))))],
                                       IIC_SSE_MOVDQ>,
@@ -4614,15 +4614,12 @@ let Predicates = [UseSSE2], AddedComplex
 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
                 (MOV64toPQIrr VR128:$dst, GR64:$src), 0>;
 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
-                (MOV64toSDrr FR64:$dst, GR64:$src), 0>;
-def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
                 (MOVPQIto64rr GR64:$dst, VR128:$src), 0>;
-def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
-                (MOVSDto64rr GR64:$dst, FR64:$src), 0>;
-def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
-                (VMOVZQI2PQIrr VR128:$dst, GR64:$src), 0>;
-def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
-                (MOVZQI2PQIrr VR128:$dst, GR64:$src), 0>;
+// Allow "vmovd" but print "vmovq" since we don't need compatibility for AVX.
+def : InstAlias<"vmovd\t{$src, $dst|$dst, $src}",
+                (VMOV64toPQIrr VR128:$dst, GR64:$src), 0>;
+def : InstAlias<"vmovd\t{$src, $dst|$dst, $src}",
+                (VMOVPQIto64rr GR64:$dst, VR128:$src), 0>;
 
 //===---------------------------------------------------------------------===//
 // SSE2 - Move Quadword
@@ -4760,20 +4757,6 @@ let AddedComplexity = 20 in {
   }
 }
 
-// Instructions to match in the assembler
-let SchedRW = [WriteMove] in {
-def VMOVQs64rr : VS2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
-                      "movq\t{$src, $dst|$dst, $src}", [],
-                      IIC_SSE_MOVDQ>, VEX, VEX_W;
-def VMOVQd64rr : VS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
-                      "movq\t{$src, $dst|$dst, $src}", [],
-                      IIC_SSE_MOVDQ>, VEX, VEX_W;
-// Recognize "movd" with GR64 destination, but encode as a "movq"
-def VMOVQd64rr_alt : VS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
-                          "movq\t{$src, $dst|$dst, $src}", [],
-                          IIC_SSE_MOVDQ>, VEX, VEX_W;
-} // SchedRW
-
 //===---------------------------------------------------------------------===//
 // SSE3 - Replicate Single FP - MOVSHDUP and MOVSLDUP
 //===---------------------------------------------------------------------===//

Modified: llvm/trunk/test/CodeGen/X86/avx-basic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-basic.ll?rev=192171&r1=192170&r2=192171&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-basic.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-basic.ll Tue Oct  8 00:53:50 2013
@@ -122,10 +122,10 @@ define <16 x i16> @build_vec_16x16(i16 %
   ret <16 x i16> %res
 }
 
-;;; Check that VMOVPQIto64rr generates the assembly string "vmovd".  Previously
+;;; Check that VMOVPQIto64rr generates the assembly string "vmovq".  Previously
 ;;; an incorrect mnemonic of "movd" was printed for this instruction.
 ; CHECK: VMOVPQIto64rr
-; CHECK: vmovd
+; CHECK: vmovq
 define i64 @VMOVPQIto64rr(<2 x i64> %a) {
 entry:
   %vecext.i = extractelement <2 x i64> %a, i32 0

Modified: llvm/trunk/test/CodeGen/X86/avx-bitcast.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-bitcast.ll?rev=192171&r1=192170&r2=192171&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-bitcast.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-bitcast.ll Tue Oct  8 00:53:50 2013
@@ -1,7 +1,7 @@
 ; RUN: llc < %s -O0 -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s
 
 ; CHECK: vmovsd (%
-; CHECK-NEXT: vmovd %xmm
+; CHECK-NEXT: vmovq %xmm
 define i64 @bitcasti64tof64() {
   %a = load double* undef
   %b = bitcast double %a to i64

Modified: llvm/trunk/test/CodeGen/X86/avx-splat.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-splat.ll?rev=192171&r1=192170&r2=192171&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-splat.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-splat.ll Tue Oct  8 00:53:50 2013
@@ -20,7 +20,7 @@ entry:
   ret <16 x i16> %shuffle
 }
 
-; CHECK: vmovd
+; CHECK: vmovq
 ; CHECK-NEXT: vmovlhps %xmm
 ; CHECK-NEXT: vinsertf128 $1
 define <4 x i64> @funcC(i64 %q) nounwind uwtable readnone ssp {

Modified: llvm/trunk/test/CodeGen/X86/mcinst-avx-lowering.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/mcinst-avx-lowering.ll?rev=192171&r1=192170&r2=192171&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/mcinst-avx-lowering.ll (original)
+++ llvm/trunk/test/CodeGen/X86/mcinst-avx-lowering.ll Tue Oct  8 00:53:50 2013
@@ -4,7 +4,7 @@ define i64 @t1(double %d_ivar) nounwind
 entry:
 ; CHECK: t1
   %0 = bitcast double %d_ivar to i64
-; CHECK: vmovd
+; CHECK: vmovq
 ; CHECK: encoding: [0xc4,0xe1,0xf9,0x7e,0xc0]
   ret i64 %0
 }
@@ -13,7 +13,7 @@ define double @t2(i64 %d_ivar) nounwind
 entry:
 ; CHECK: t2
   %0 = bitcast i64 %d_ivar to double
-; CHECK: vmovd
+; CHECK: vmovq
 ; CHECK: encoding: [0xc4,0xe1,0xf9,0x6e,0xc7]
   ret double %0
 }

Modified: llvm/trunk/test/MC/Disassembler/X86/x86-64.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/MC/Disassembler/X86/x86-64.txt?rev=192171&r1=192170&r2=192171&view=diff
==============================================================================
--- llvm/trunk/test/MC/Disassembler/X86/x86-64.txt (original)
+++ llvm/trunk/test/MC/Disassembler/X86/x86-64.txt Tue Oct  8 00:53:50 2013
@@ -229,3 +229,9 @@
 
 # CHECK: vmovq %xmm0, %xmm0
 0xc5 0xfa 0x7e 0xc0
+
+# CHECK: vmovq %xmm0, %rax
+0xc4 0xe1 0xf9 0x7e 0xc0
+
+# CHECK: movd %xmm0, %rax
+0x66 0x48 0x0f 0x7e 0xc0

Modified: llvm/trunk/test/MC/X86/x86-64.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/MC/X86/x86-64.s?rev=192171&r1=192170&r2=192171&view=diff
==============================================================================
--- llvm/trunk/test/MC/X86/x86-64.s (original)
+++ llvm/trunk/test/MC/X86/x86-64.s Tue Oct  8 00:53:50 2013
@@ -1375,3 +1375,16 @@ fsub %st(1)
 fsubr %st(1)
 fdiv %st(1)
 fdivr %st(1)
+
+// CHECK: movd %xmm0, %eax
+// CHECK: movd %xmm0, %rax
+// CHECK: movd %xmm0, %rax
+// CHECK: vmovd %xmm0, %eax
+// CHECK: vmovq %xmm0, %rax
+// CHECK: vmovq %xmm0, %rax
+movd %xmm0, %eax
+movd %xmm0, %rax
+movq %xmm0, %rax
+vmovd %xmm0, %eax
+vmovd %xmm0, %rax
+vmovq %xmm0, %rax

Modified: llvm/trunk/test/MC/X86/x86_64-avx-encoding.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/MC/X86/x86_64-avx-encoding.s?rev=192171&r1=192170&r2=192171&view=diff
==============================================================================
--- llvm/trunk/test/MC/X86/x86_64-avx-encoding.s (original)
+++ llvm/trunk/test/MC/X86/x86_64-avx-encoding.s Tue Oct  8 00:53:50 2013
@@ -2212,11 +2212,11 @@ vdivpd  -4(%rcx,%rbx,8), %xmm10, %xmm11
 // CHECK: encoding: [0xc5,0x79,0x7e,0x30]
           vmovd  %xmm14, (%rax)
 
-// CHECK: vmovd  %rax, %xmm14
+// CHECK: vmovq  %rax, %xmm14
 // CHECK: encoding: [0xc4,0x61,0xf9,0x6e,0xf0]
           vmovd  %rax, %xmm14
 
-// CHECK: vmovd %xmm0, %rax
+// CHECK: vmovq %xmm0, %rax
 // CHECK: encoding: [0xc4,0xe1,0xf9,0x7e,0xc0]
           vmovd %xmm0, %rax
 
@@ -4048,7 +4048,7 @@ vdivpd  -4(%rcx,%rbx,8), %xmm10, %xmm11
 // CHECK: encoding: [0xc4,0xe3,0x79,0x17,0xe1,0x07]
           vextractps   $7, %xmm4, %rcx
 
-// CHECK: vmovd  %xmm4, %rcx
+// CHECK: vmovq  %xmm4, %rcx
 // CHECK: encoding: [0xc4,0xe1,0xf9,0x7e,0xe1]
           vmovd  %xmm4, %rcx
 

Modified: llvm/trunk/utils/TableGen/X86RecognizableInstr.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/utils/TableGen/X86RecognizableInstr.cpp?rev=192171&r1=192170&r2=192171&view=diff
==============================================================================
--- llvm/trunk/utils/TableGen/X86RecognizableInstr.cpp (original)
+++ llvm/trunk/utils/TableGen/X86RecognizableInstr.cpp Tue Oct  8 00:53:50 2013
@@ -549,9 +549,7 @@ RecognizableInstr::filter_ret Recognizab
       Name == "MMX_MOVD64rrv164"  ||
       Name == "MOV64ri64i32"      ||
       Name == "VMASKMOVDQU64"     ||
-      Name == "VEXTRACTPSrr64"    ||
-      Name == "VMOVQd64rr"        ||
-      Name == "VMOVQs64rr")
+      Name == "VEXTRACTPSrr64")
     return FILTER_WEAK;
 
   // XACQUIRE and XRELEASE reuse REPNE and REP respectively.





More information about the llvm-commits mailing list