[llvm] r294437 - [AArch64][TableGen] Skip tied result operands for InstAlias

Amara Emerson via llvm-commits llvm-commits at lists.llvm.org
Wed Feb 8 03:28:09 PST 2017


Author: aemerson
Date: Wed Feb  8 05:28:08 2017
New Revision: 294437

URL: http://llvm.org/viewvc/llvm-project?rev=294437&view=rev
Log:
[AArch64][TableGen] Skip tied result operands for InstAlias

This patch checks the number of operands in the resulting
instruction instead of just the alias, then skips over
tied operands when generating the printing method.

This allows us to generate the preferred assembly syntax
for the AArch64 'ins' instruction, which should always be
displayed as 'mov' according to the ARMARM.

Several unit tests have changed as a result, but only to
reflect the preferred disassembly.

Some other InstAlias patterns (movk/bic/orr) needed a
slight adjustment to stop them becoming the default
and breaking other unit tests.

Patch by Graham Hunter.

Differential Revision: https://reviews.llvm.org/D29219

Modified:
    llvm/trunk/lib/Target/AArch64/AArch64InstrInfo.td
    llvm/trunk/test/CodeGen/AArch64/arm64-AdvSIMD-Scalar.ll
    llvm/trunk/test/CodeGen/AArch64/arm64-build-vector.ll
    llvm/trunk/test/CodeGen/AArch64/arm64-dup.ll
    llvm/trunk/test/CodeGen/AArch64/arm64-indexed-vector-ldst.ll
    llvm/trunk/test/CodeGen/AArch64/arm64-neon-copy.ll
    llvm/trunk/test/CodeGen/AArch64/arm64-smaxv.ll
    llvm/trunk/test/CodeGen/AArch64/arm64-sminv.ll
    llvm/trunk/test/CodeGen/AArch64/arm64-stp.ll
    llvm/trunk/test/CodeGen/AArch64/arm64-umaxv.ll
    llvm/trunk/test/CodeGen/AArch64/arm64-uminv.ll
    llvm/trunk/test/CodeGen/AArch64/arm64-vaddv.ll
    llvm/trunk/test/CodeGen/AArch64/arm64-vcombine.ll
    llvm/trunk/test/CodeGen/AArch64/arm64-vector-insertion.ll
    llvm/trunk/test/CodeGen/AArch64/bitreverse.ll
    llvm/trunk/test/CodeGen/AArch64/concat_vector-scalar-combine.ll
    llvm/trunk/test/CodeGen/AArch64/fp16-v16-instructions.ll
    llvm/trunk/test/CodeGen/AArch64/fp16-v4-instructions.ll
    llvm/trunk/test/CodeGen/AArch64/fp16-v8-instructions.ll
    llvm/trunk/test/CodeGen/AArch64/fp16-vector-shuffle.ll
    llvm/trunk/test/CodeGen/AArch64/vector-fcopysign.ll
    llvm/trunk/test/MC/AArch64/arm64-advsimd.s
    llvm/trunk/test/MC/Disassembler/AArch64/arm64-advsimd.txt
    llvm/trunk/utils/TableGen/AsmWriterEmitter.cpp

Modified: llvm/trunk/lib/Target/AArch64/AArch64InstrInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64InstrInfo.td?rev=294437&r1=294436&r2=294437&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64InstrInfo.td (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64InstrInfo.td Wed Feb  8 05:28:08 2017
@@ -452,8 +452,8 @@ let PostEncoderMethod = "fixMOVZ" in
 defm MOVZ : MoveImmediate<0b10, "movz">;
 
 // First group of aliases covers an implicit "lsl #0".
-def : InstAlias<"movk $dst, $imm", (MOVKWi GPR32:$dst, imm0_65535:$imm, 0)>;
-def : InstAlias<"movk $dst, $imm", (MOVKXi GPR64:$dst, imm0_65535:$imm, 0)>;
+def : InstAlias<"movk $dst, $imm", (MOVKWi GPR32:$dst, imm0_65535:$imm, 0), 0>;
+def : InstAlias<"movk $dst, $imm", (MOVKXi GPR64:$dst, imm0_65535:$imm, 0), 0>;
 def : InstAlias<"movn $dst, $imm", (MOVNWi GPR32:$dst, imm0_65535:$imm, 0)>;
 def : InstAlias<"movn $dst, $imm", (MOVNXi GPR64:$dst, imm0_65535:$imm, 0)>;
 def : InstAlias<"movz $dst, $imm", (MOVZWi GPR32:$dst, imm0_65535:$imm, 0)>;
@@ -470,10 +470,10 @@ def : InstAlias<"movn $Rd, $sym", (MOVNX
 def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movz_symbol_g1:$sym, 16)>;
 def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movz_symbol_g0:$sym, 0)>;
 
-def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movk_symbol_g3:$sym, 48)>;
-def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movk_symbol_g2:$sym, 32)>;
-def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movk_symbol_g1:$sym, 16)>;
-def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movk_symbol_g0:$sym, 0)>;
+def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movk_symbol_g3:$sym, 48), 0>;
+def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movk_symbol_g2:$sym, 32), 0>;
+def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movk_symbol_g1:$sym, 16), 0>;
+def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movk_symbol_g0:$sym, 0), 0>;
 
 def : InstAlias<"movz $Rd, $sym", (MOVZWi GPR32:$Rd, movz_symbol_g1:$sym, 16)>;
 def : InstAlias<"movz $Rd, $sym", (MOVZWi GPR32:$Rd, movz_symbol_g0:$sym, 0)>;
@@ -481,8 +481,8 @@ def : InstAlias<"movz $Rd, $sym", (MOVZW
 def : InstAlias<"movn $Rd, $sym", (MOVNWi GPR32:$Rd, movz_symbol_g1:$sym, 16)>;
 def : InstAlias<"movn $Rd, $sym", (MOVNWi GPR32:$Rd, movz_symbol_g0:$sym, 0)>;
 
-def : InstAlias<"movk $Rd, $sym", (MOVKWi GPR32:$Rd, movk_symbol_g1:$sym, 16)>;
-def : InstAlias<"movk $Rd, $sym", (MOVKWi GPR32:$Rd, movk_symbol_g0:$sym, 0)>;
+def : InstAlias<"movk $Rd, $sym", (MOVKWi GPR32:$Rd, movk_symbol_g1:$sym, 16), 0>;
+def : InstAlias<"movk $Rd, $sym", (MOVKWi GPR32:$Rd, movk_symbol_g0:$sym, 0), 0>;
 
 // Final group of aliases covers true "mov $Rd, $imm" cases.
 multiclass movw_mov_alias<string basename,Instruction INST, RegisterClass GPR,
@@ -4396,20 +4396,20 @@ def : InstAlias<"bic $Vd.8h, $imm", (BIC
 def : InstAlias<"bic $Vd.2s, $imm", (BICv2i32 V64:$Vd,  imm0_255:$imm, 0)>;
 def : InstAlias<"bic $Vd.4s, $imm", (BICv4i32 V128:$Vd, imm0_255:$imm, 0)>;
 
-def : InstAlias<"bic.4h $Vd, $imm", (BICv4i16 V64:$Vd,  imm0_255:$imm, 0), 0>;
-def : InstAlias<"bic.8h $Vd, $imm", (BICv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
-def : InstAlias<"bic.2s $Vd, $imm", (BICv2i32 V64:$Vd,  imm0_255:$imm, 0), 0>;
-def : InstAlias<"bic.4s $Vd, $imm", (BICv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
+def : InstAlias<"bic.4h $Vd, $imm", (BICv4i16 V64:$Vd,  imm0_255:$imm, 0)>;
+def : InstAlias<"bic.8h $Vd, $imm", (BICv8i16 V128:$Vd, imm0_255:$imm, 0)>;
+def : InstAlias<"bic.2s $Vd, $imm", (BICv2i32 V64:$Vd,  imm0_255:$imm, 0)>;
+def : InstAlias<"bic.4s $Vd, $imm", (BICv4i32 V128:$Vd, imm0_255:$imm, 0)>;
 
 def : InstAlias<"orr $Vd.4h, $imm", (ORRv4i16 V64:$Vd,  imm0_255:$imm, 0)>;
 def : InstAlias<"orr $Vd.8h, $imm", (ORRv8i16 V128:$Vd, imm0_255:$imm, 0)>;
 def : InstAlias<"orr $Vd.2s, $imm", (ORRv2i32 V64:$Vd,  imm0_255:$imm, 0)>;
 def : InstAlias<"orr $Vd.4s, $imm", (ORRv4i32 V128:$Vd, imm0_255:$imm, 0)>;
 
-def : InstAlias<"orr.4h $Vd, $imm", (ORRv4i16 V64:$Vd,  imm0_255:$imm, 0), 0>;
-def : InstAlias<"orr.8h $Vd, $imm", (ORRv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
-def : InstAlias<"orr.2s $Vd, $imm", (ORRv2i32 V64:$Vd,  imm0_255:$imm, 0), 0>;
-def : InstAlias<"orr.4s $Vd, $imm", (ORRv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
+def : InstAlias<"orr.4h $Vd, $imm", (ORRv4i16 V64:$Vd,  imm0_255:$imm, 0)>;
+def : InstAlias<"orr.8h $Vd, $imm", (ORRv8i16 V128:$Vd, imm0_255:$imm, 0)>;
+def : InstAlias<"orr.2s $Vd, $imm", (ORRv2i32 V64:$Vd,  imm0_255:$imm, 0)>;
+def : InstAlias<"orr.4s $Vd, $imm", (ORRv4i32 V128:$Vd, imm0_255:$imm, 0)>;
 
 // AdvSIMD FMOV
 def FMOVv2f64_ns : SIMDModifiedImmVectorNoShift<1, 1, 0, 0b1111, V128, fpimm8,

Modified: llvm/trunk/test/CodeGen/AArch64/arm64-AdvSIMD-Scalar.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-AdvSIMD-Scalar.ll?rev=294437&r1=294436&r2=294437&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-AdvSIMD-Scalar.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-AdvSIMD-Scalar.ll Wed Feb  8 05:28:08 2017
@@ -17,7 +17,7 @@ define <2 x i64> @bar(<2 x i64> %a, <2 x
 ; CHECK: fmov [[COPY_REG2:x[0-9]+]], d[[REG2]]
 ; CHECK-NOOPT: fmov d0, [[COPY_REG3]]
 ; CHECK-OPT-NOT: fmov
-; CHECK: ins.d v0[1], [[COPY_REG2]]
+; CHECK: mov.d v0[1], [[COPY_REG2]]
 ; CHECK-NEXT: ret
 ;
 ; GENERIC-LABEL: bar:
@@ -29,7 +29,7 @@ define <2 x i64> @bar(<2 x i64> %a, <2 x
 ; GENERIC: fmov [[COPY_REG2:x[0-9]+]], d[[REG2]]
 ; GENERIC-NOOPT: fmov d0, [[COPY_REG3]]
 ; GENERIC-OPT-NOT: fmov
-; GENERIC: ins v0.d[1], [[COPY_REG2]]
+; GENERIC: mov v0.d[1], [[COPY_REG2]]
 ; GENERIC-NEXT: ret
   %add = add <2 x i64> %a, %b
   %vgetq_lane = extractelement <2 x i64> %add, i32 0

Modified: llvm/trunk/test/CodeGen/AArch64/arm64-build-vector.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-build-vector.ll?rev=294437&r1=294436&r2=294437&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-build-vector.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-build-vector.ll Wed Feb  8 05:28:08 2017
@@ -5,7 +5,7 @@
 define void @one_lane(i32* nocapture %out_int, i32 %skip0) nounwind {
 ; CHECK-LABEL: one_lane:
 ; CHECK: dup.16b v[[REG:[0-9]+]], wzr
-; CHECK-NEXT: ins.b v[[REG]][0], w1
+; CHECK-NEXT: mov.b v[[REG]][0], w1
 ; v and q are aliases, and str is preferred against st.16b when possible
 ; rdar://11246289
 ; CHECK: str q[[REG]], [x0]
@@ -23,9 +23,9 @@ define void @one_lane(i32* nocapture %ou
 define <4 x float>  @foo(float %a, float %b, float %c, float %d) nounwind {
 ; CHECK-LABEL: foo:
 ; CHECK-NOT: ins.s v0[0], v0[0]
-; CHECK: ins.s v0[1], v1[0]
-; CHECK: ins.s v0[2], v2[0]
-; CHECK: ins.s v0[3], v3[0]
+; CHECK: mov.s v0[1], v1[0]
+; CHECK: mov.s v0[2], v2[0]
+; CHECK: mov.s v0[3], v3[0]
 ; CHECK: ret
   %1 = insertelement <4 x float> undef, float %a, i32 0
   %2 = insertelement <4 x float> %1, float %b, i32 1

Modified: llvm/trunk/test/CodeGen/AArch64/arm64-dup.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-dup.ll?rev=294437&r1=294436&r2=294437&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-dup.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-dup.ll Wed Feb  8 05:28:08 2017
@@ -261,7 +261,7 @@ entry:
 define <2 x i32> @f(i32 %a, i32 %b) nounwind readnone  {
 ; CHECK-LABEL: f:
 ; CHECK-NEXT: fmov s0, w0
-; CHECK-NEXT: ins.s v0[1], w1
+; CHECK-NEXT: mov.s v0[1], w1
 ; CHECK-NEXT: ret
   %vecinit = insertelement <2 x i32> undef, i32 %a, i32 0
   %vecinit1 = insertelement <2 x i32> %vecinit, i32 %b, i32 1
@@ -271,9 +271,9 @@ define <2 x i32> @f(i32 %a, i32 %b) noun
 define <4 x i32> @g(i32 %a, i32 %b) nounwind readnone  {
 ; CHECK-LABEL: g:
 ; CHECK-NEXT: fmov s0, w0
-; CHECK-NEXT: ins.s v0[1], w1
-; CHECK-NEXT: ins.s v0[2], w1
-; CHECK-NEXT: ins.s v0[3], w0
+; CHECK-NEXT: mov.s v0[1], w1
+; CHECK-NEXT: mov.s v0[2], w1
+; CHECK-NEXT: mov.s v0[3], w0
 ; CHECK-NEXT: ret
   %vecinit = insertelement <4 x i32> undef, i32 %a, i32 0
   %vecinit1 = insertelement <4 x i32> %vecinit, i32 %b, i32 1
@@ -285,7 +285,7 @@ define <4 x i32> @g(i32 %a, i32 %b) noun
 define <2 x i64> @h(i64 %a, i64 %b) nounwind readnone  {
 ; CHECK-LABEL: h:
 ; CHECK-NEXT: fmov d0, x0
-; CHECK-NEXT: ins.d v0[1], x1
+; CHECK-NEXT: mov.d v0[1], x1
 ; CHECK-NEXT: ret
   %vecinit = insertelement <2 x i64> undef, i64 %a, i32 0
   %vecinit1 = insertelement <2 x i64> %vecinit, i64 %b, i32 1

Modified: llvm/trunk/test/CodeGen/AArch64/arm64-indexed-vector-ldst.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-indexed-vector-ldst.ll?rev=294437&r1=294436&r2=294437&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-indexed-vector-ldst.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-indexed-vector-ldst.ll Wed Feb  8 05:28:08 2017
@@ -6180,7 +6180,7 @@ define <4 x float> @test_v4f32_post_reg_
 ; CHECK-NEXT: ldr s[[LD:[0-9]+]], [x0]
 ; CHECK-NEXT: str q0, [x3]
 ; CHECK-NEXT: ldr q0, [x4]
-; CHECK-NEXT: ins.s v0[1], v[[LD]][0]
+; CHECK-NEXT: mov.s v0[1], v[[LD]][0]
 ; CHECK-NEXT: add [[POST:x[0-9]]], x0, x2, lsl #2
 ; CHECK-NEXT: str [[POST]], [x1]
 ; CHECK-NEXT: ret

Modified: llvm/trunk/test/CodeGen/AArch64/arm64-neon-copy.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-neon-copy.ll?rev=294437&r1=294436&r2=294437&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-neon-copy.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-neon-copy.ll Wed Feb  8 05:28:08 2017
@@ -3,56 +3,56 @@
 
 define <16 x i8> @ins16bw(<16 x i8> %tmp1, i8 %tmp2) {
 ; CHECK-LABEL: ins16bw:
-; CHECK: ins {{v[0-9]+}}.b[15], {{w[0-9]+}}
+; CHECK: mov {{v[0-9]+}}.b[15], {{w[0-9]+}}
   %tmp3 = insertelement <16 x i8> %tmp1, i8 %tmp2, i32 15
   ret <16 x i8> %tmp3
 }
 
 define <8 x i16> @ins8hw(<8 x i16> %tmp1, i16 %tmp2) {
 ; CHECK-LABEL: ins8hw:
-; CHECK: ins {{v[0-9]+}}.h[6], {{w[0-9]+}}
+; CHECK: mov {{v[0-9]+}}.h[6], {{w[0-9]+}}
   %tmp3 = insertelement <8 x i16> %tmp1, i16 %tmp2, i32 6
   ret <8 x i16> %tmp3
 }
 
 define <4 x i32> @ins4sw(<4 x i32> %tmp1, i32 %tmp2) {
 ; CHECK-LABEL: ins4sw:
-; CHECK: ins {{v[0-9]+}}.s[2], {{w[0-9]+}}
+; CHECK: mov {{v[0-9]+}}.s[2], {{w[0-9]+}}
   %tmp3 = insertelement <4 x i32> %tmp1, i32 %tmp2, i32 2
   ret <4 x i32> %tmp3
 }
 
 define <2 x i64> @ins2dw(<2 x i64> %tmp1, i64 %tmp2) {
 ; CHECK-LABEL: ins2dw:
-; CHECK: ins {{v[0-9]+}}.d[1], {{x[0-9]+}}
+; CHECK: mov {{v[0-9]+}}.d[1], {{x[0-9]+}}
   %tmp3 = insertelement <2 x i64> %tmp1, i64 %tmp2, i32 1
   ret <2 x i64> %tmp3
 }
 
 define <8 x i8> @ins8bw(<8 x i8> %tmp1, i8 %tmp2) {
 ; CHECK-LABEL: ins8bw:
-; CHECK: ins {{v[0-9]+}}.b[5], {{w[0-9]+}}
+; CHECK: mov {{v[0-9]+}}.b[5], {{w[0-9]+}}
   %tmp3 = insertelement <8 x i8> %tmp1, i8 %tmp2, i32 5
   ret <8 x i8> %tmp3
 }
 
 define <4 x i16> @ins4hw(<4 x i16> %tmp1, i16 %tmp2) {
 ; CHECK-LABEL: ins4hw:
-; CHECK: ins {{v[0-9]+}}.h[3], {{w[0-9]+}}
+; CHECK: mov {{v[0-9]+}}.h[3], {{w[0-9]+}}
   %tmp3 = insertelement <4 x i16> %tmp1, i16 %tmp2, i32 3
   ret <4 x i16> %tmp3
 }
 
 define <2 x i32> @ins2sw(<2 x i32> %tmp1, i32 %tmp2) {
 ; CHECK-LABEL: ins2sw:
-; CHECK: ins {{v[0-9]+}}.s[1], {{w[0-9]+}}
+; CHECK: mov {{v[0-9]+}}.s[1], {{w[0-9]+}}
   %tmp3 = insertelement <2 x i32> %tmp1, i32 %tmp2, i32 1
   ret <2 x i32> %tmp3
 }
 
 define <16 x i8> @ins16b16(<16 x i8> %tmp1, <16 x i8> %tmp2) {
 ; CHECK-LABEL: ins16b16:
-; CHECK: ins {{v[0-9]+}}.b[15], {{v[0-9]+}}.b[2]
+; CHECK: mov {{v[0-9]+}}.b[15], {{v[0-9]+}}.b[2]
   %tmp3 = extractelement <16 x i8> %tmp1, i32 2
   %tmp4 = insertelement <16 x i8> %tmp2, i8 %tmp3, i32 15
   ret <16 x i8> %tmp4
@@ -60,7 +60,7 @@ define <16 x i8> @ins16b16(<16 x i8> %tm
 
 define <8 x i16> @ins8h8(<8 x i16> %tmp1, <8 x i16> %tmp2) {
 ; CHECK-LABEL: ins8h8:
-; CHECK: ins {{v[0-9]+}}.h[7], {{v[0-9]+}}.h[2]
+; CHECK: mov {{v[0-9]+}}.h[7], {{v[0-9]+}}.h[2]
   %tmp3 = extractelement <8 x i16> %tmp1, i32 2
   %tmp4 = insertelement <8 x i16> %tmp2, i16 %tmp3, i32 7
   ret <8 x i16> %tmp4
@@ -68,7 +68,7 @@ define <8 x i16> @ins8h8(<8 x i16> %tmp1
 
 define <4 x i32> @ins4s4(<4 x i32> %tmp1, <4 x i32> %tmp2) {
 ; CHECK-LABEL: ins4s4:
-; CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[2]
+; CHECK: mov {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[2]
   %tmp3 = extractelement <4 x i32> %tmp1, i32 2
   %tmp4 = insertelement <4 x i32> %tmp2, i32 %tmp3, i32 1
   ret <4 x i32> %tmp4
@@ -76,7 +76,7 @@ define <4 x i32> @ins4s4(<4 x i32> %tmp1
 
 define <2 x i64> @ins2d2(<2 x i64> %tmp1, <2 x i64> %tmp2) {
 ; CHECK-LABEL: ins2d2:
-; CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+; CHECK: mov {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
   %tmp3 = extractelement <2 x i64> %tmp1, i32 0
   %tmp4 = insertelement <2 x i64> %tmp2, i64 %tmp3, i32 1
   ret <2 x i64> %tmp4
@@ -84,7 +84,7 @@ define <2 x i64> @ins2d2(<2 x i64> %tmp1
 
 define <4 x float> @ins4f4(<4 x float> %tmp1, <4 x float> %tmp2) {
 ; CHECK-LABEL: ins4f4:
-; CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[2]
+; CHECK: mov {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[2]
   %tmp3 = extractelement <4 x float> %tmp1, i32 2
   %tmp4 = insertelement <4 x float> %tmp2, float %tmp3, i32 1
   ret <4 x float> %tmp4
@@ -92,7 +92,7 @@ define <4 x float> @ins4f4(<4 x float> %
 
 define <2 x double> @ins2df2(<2 x double> %tmp1, <2 x double> %tmp2) {
 ; CHECK-LABEL: ins2df2:
-; CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+; CHECK: mov {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
   %tmp3 = extractelement <2 x double> %tmp1, i32 0
   %tmp4 = insertelement <2 x double> %tmp2, double %tmp3, i32 1
   ret <2 x double> %tmp4
@@ -100,7 +100,7 @@ define <2 x double> @ins2df2(<2 x double
 
 define <16 x i8> @ins8b16(<8 x i8> %tmp1, <16 x i8> %tmp2) {
 ; CHECK-LABEL: ins8b16:
-; CHECK: ins {{v[0-9]+}}.b[15], {{v[0-9]+}}.b[2]
+; CHECK: mov {{v[0-9]+}}.b[15], {{v[0-9]+}}.b[2]
   %tmp3 = extractelement <8 x i8> %tmp1, i32 2
   %tmp4 = insertelement <16 x i8> %tmp2, i8 %tmp3, i32 15
   ret <16 x i8> %tmp4
@@ -108,7 +108,7 @@ define <16 x i8> @ins8b16(<8 x i8> %tmp1
 
 define <8 x i16> @ins4h8(<4 x i16> %tmp1, <8 x i16> %tmp2) {
 ; CHECK-LABEL: ins4h8:
-; CHECK: ins {{v[0-9]+}}.h[7], {{v[0-9]+}}.h[2]
+; CHECK: mov {{v[0-9]+}}.h[7], {{v[0-9]+}}.h[2]
   %tmp3 = extractelement <4 x i16> %tmp1, i32 2
   %tmp4 = insertelement <8 x i16> %tmp2, i16 %tmp3, i32 7
   ret <8 x i16> %tmp4
@@ -116,7 +116,7 @@ define <8 x i16> @ins4h8(<4 x i16> %tmp1
 
 define <4 x i32> @ins2s4(<2 x i32> %tmp1, <4 x i32> %tmp2) {
 ; CHECK-LABEL: ins2s4:
-; CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[1]
+; CHECK: mov {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[1]
   %tmp3 = extractelement <2 x i32> %tmp1, i32 1
   %tmp4 = insertelement <4 x i32> %tmp2, i32 %tmp3, i32 1
   ret <4 x i32> %tmp4
@@ -124,7 +124,7 @@ define <4 x i32> @ins2s4(<2 x i32> %tmp1
 
 define <2 x i64> @ins1d2(<1 x i64> %tmp1, <2 x i64> %tmp2) {
 ; CHECK-LABEL: ins1d2:
-; CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+; CHECK: mov {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
   %tmp3 = extractelement <1 x i64> %tmp1, i32 0
   %tmp4 = insertelement <2 x i64> %tmp2, i64 %tmp3, i32 1
   ret <2 x i64> %tmp4
@@ -132,7 +132,7 @@ define <2 x i64> @ins1d2(<1 x i64> %tmp1
 
 define <4 x float> @ins2f4(<2 x float> %tmp1, <4 x float> %tmp2) {
 ; CHECK-LABEL: ins2f4:
-; CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[1]
+; CHECK: mov {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[1]
   %tmp3 = extractelement <2 x float> %tmp1, i32 1
   %tmp4 = insertelement <4 x float> %tmp2, float %tmp3, i32 1
   ret <4 x float> %tmp4
@@ -140,7 +140,7 @@ define <4 x float> @ins2f4(<2 x float> %
 
 define <2 x double> @ins1f2(<1 x double> %tmp1, <2 x double> %tmp2) {
 ; CHECK-LABEL: ins1f2:
-; CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+; CHECK: mov {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
   %tmp3 = extractelement <1 x double> %tmp1, i32 0
   %tmp4 = insertelement <2 x double> %tmp2, double %tmp3, i32 1
   ret <2 x double> %tmp4
@@ -148,7 +148,7 @@ define <2 x double> @ins1f2(<1 x double>
 
 define <8 x i8> @ins16b8(<16 x i8> %tmp1, <8 x i8> %tmp2) {
 ; CHECK-LABEL: ins16b8:
-; CHECK: ins {{v[0-9]+}}.b[7], {{v[0-9]+}}.b[2]
+; CHECK: mov {{v[0-9]+}}.b[7], {{v[0-9]+}}.b[2]
   %tmp3 = extractelement <16 x i8> %tmp1, i32 2
   %tmp4 = insertelement <8 x i8> %tmp2, i8 %tmp3, i32 7
   ret <8 x i8> %tmp4
@@ -156,7 +156,7 @@ define <8 x i8> @ins16b8(<16 x i8> %tmp1
 
 define <4 x i16> @ins8h4(<8 x i16> %tmp1, <4 x i16> %tmp2) {
 ; CHECK-LABEL: ins8h4:
-; CHECK: ins {{v[0-9]+}}.h[3], {{v[0-9]+}}.h[2]
+; CHECK: mov {{v[0-9]+}}.h[3], {{v[0-9]+}}.h[2]
   %tmp3 = extractelement <8 x i16> %tmp1, i32 2
   %tmp4 = insertelement <4 x i16> %tmp2, i16 %tmp3, i32 3
   ret <4 x i16> %tmp4
@@ -164,7 +164,7 @@ define <4 x i16> @ins8h4(<8 x i16> %tmp1
 
 define <2 x i32> @ins4s2(<4 x i32> %tmp1, <2 x i32> %tmp2) {
 ; CHECK-LABEL: ins4s2:
-; CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[2]
+; CHECK: mov {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[2]
   %tmp3 = extractelement <4 x i32> %tmp1, i32 2
   %tmp4 = insertelement <2 x i32> %tmp2, i32 %tmp3, i32 1
   ret <2 x i32> %tmp4
@@ -172,7 +172,7 @@ define <2 x i32> @ins4s2(<4 x i32> %tmp1
 
 define <1 x i64> @ins2d1(<2 x i64> %tmp1, <1 x i64> %tmp2) {
 ; CHECK-LABEL: ins2d1:
-; CHECK: ins {{v[0-9]+}}.d[0], {{v[0-9]+}}.d[0]
+; CHECK: mov {{v[0-9]+}}.d[0], {{v[0-9]+}}.d[0]
   %tmp3 = extractelement <2 x i64> %tmp1, i32 0
   %tmp4 = insertelement <1 x i64> %tmp2, i64 %tmp3, i32 0
   ret <1 x i64> %tmp4
@@ -180,7 +180,7 @@ define <1 x i64> @ins2d1(<2 x i64> %tmp1
 
 define <2 x float> @ins4f2(<4 x float> %tmp1, <2 x float> %tmp2) {
 ; CHECK-LABEL: ins4f2:
-; CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[2]
+; CHECK: mov {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[2]
   %tmp3 = extractelement <4 x float> %tmp1, i32 2
   %tmp4 = insertelement <2 x float> %tmp2, float %tmp3, i32 1
   ret <2 x float> %tmp4
@@ -196,7 +196,7 @@ define <1 x double> @ins2f1(<2 x double>
 
 define <8 x i8> @ins8b8(<8 x i8> %tmp1, <8 x i8> %tmp2) {
 ; CHECK-LABEL: ins8b8:
-; CHECK: ins {{v[0-9]+}}.b[4], {{v[0-9]+}}.b[2]
+; CHECK: mov {{v[0-9]+}}.b[4], {{v[0-9]+}}.b[2]
   %tmp3 = extractelement <8 x i8> %tmp1, i32 2
   %tmp4 = insertelement <8 x i8> %tmp2, i8 %tmp3, i32 4
   ret <8 x i8> %tmp4
@@ -204,7 +204,7 @@ define <8 x i8> @ins8b8(<8 x i8> %tmp1,
 
 define <4 x i16> @ins4h4(<4 x i16> %tmp1, <4 x i16> %tmp2) {
 ; CHECK-LABEL: ins4h4:
-; CHECK: ins {{v[0-9]+}}.h[3], {{v[0-9]+}}.h[2]
+; CHECK: mov {{v[0-9]+}}.h[3], {{v[0-9]+}}.h[2]
   %tmp3 = extractelement <4 x i16> %tmp1, i32 2
   %tmp4 = insertelement <4 x i16> %tmp2, i16 %tmp3, i32 3
   ret <4 x i16> %tmp4
@@ -212,7 +212,7 @@ define <4 x i16> @ins4h4(<4 x i16> %tmp1
 
 define <2 x i32> @ins2s2(<2 x i32> %tmp1, <2 x i32> %tmp2) {
 ; CHECK-LABEL: ins2s2:
-; CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
+; CHECK: mov {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
   %tmp3 = extractelement <2 x i32> %tmp1, i32 0
   %tmp4 = insertelement <2 x i32> %tmp2, i32 %tmp3, i32 1
   ret <2 x i32> %tmp4
@@ -220,7 +220,7 @@ define <2 x i32> @ins2s2(<2 x i32> %tmp1
 
 define <1 x i64> @ins1d1(<1 x i64> %tmp1, <1 x i64> %tmp2) {
 ; CHECK-LABEL: ins1d1:
-; CHECK: ins {{v[0-9]+}}.d[0], {{v[0-9]+}}.d[0]
+; CHECK: mov {{v[0-9]+}}.d[0], {{v[0-9]+}}.d[0]
   %tmp3 = extractelement <1 x i64> %tmp1, i32 0
   %tmp4 = insertelement <1 x i64> %tmp2, i64 %tmp3, i32 0
   ret <1 x i64> %tmp4
@@ -228,7 +228,7 @@ define <1 x i64> @ins1d1(<1 x i64> %tmp1
 
 define <2 x float> @ins2f2(<2 x float> %tmp1, <2 x float> %tmp2) {
 ; CHECK-LABEL: ins2f2:
-; CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
+; CHECK: mov {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
   %tmp3 = extractelement <2 x float> %tmp1, i32 0
   %tmp4 = insertelement <2 x float> %tmp2, float %tmp3, i32 1
   ret <2 x float> %tmp4
@@ -388,28 +388,28 @@ define i64 @smovx2s(<2 x i32> %tmp1) {
 
 define <8 x i8> @test_vcopy_lane_s8(<8 x i8> %v1, <8 x i8> %v2) {
 ; CHECK-LABEL: test_vcopy_lane_s8:
-; CHECK: ins  {{v[0-9]+}}.b[5], {{v[0-9]+}}.b[3]
+; CHECK: mov  {{v[0-9]+}}.b[5], {{v[0-9]+}}.b[3]
   %vset_lane = shufflevector <8 x i8> %v1, <8 x i8> %v2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 11, i32 6, i32 7>
   ret <8 x i8> %vset_lane
 }
 
 define <16 x i8> @test_vcopyq_laneq_s8(<16 x i8> %v1, <16 x i8> %v2) {
 ; CHECK-LABEL: test_vcopyq_laneq_s8:
-; CHECK: ins  {{v[0-9]+}}.b[14], {{v[0-9]+}}.b[6]
+; CHECK: mov  {{v[0-9]+}}.b[14], {{v[0-9]+}}.b[6]
   %vset_lane = shufflevector <16 x i8> %v1, <16 x i8> %v2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 22, i32 15>
   ret <16 x i8> %vset_lane
 }
 
 define <8 x i8> @test_vcopy_lane_swap_s8(<8 x i8> %v1, <8 x i8> %v2) {
 ; CHECK-LABEL: test_vcopy_lane_swap_s8:
-; CHECK: ins {{v[0-9]+}}.b[7], {{v[0-9]+}}.b[0]
+; CHECK: mov {{v[0-9]+}}.b[7], {{v[0-9]+}}.b[0]
   %vset_lane = shufflevector <8 x i8> %v1, <8 x i8> %v2, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 0>
   ret <8 x i8> %vset_lane
 }
 
 define <16 x i8> @test_vcopyq_laneq_swap_s8(<16 x i8> %v1, <16 x i8> %v2) {
 ; CHECK-LABEL: test_vcopyq_laneq_swap_s8:
-; CHECK: ins {{v[0-9]+}}.b[0], {{v[0-9]+}}.b[15]
+; CHECK: mov {{v[0-9]+}}.b[0], {{v[0-9]+}}.b[15]
   %vset_lane = shufflevector <16 x i8> %v1, <16 x i8> %v2, <16 x i32> <i32 15, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
   ret <16 x i8> %vset_lane
 }
@@ -907,9 +907,9 @@ define <8 x i8> @getl(<16 x i8> %x) #0 {
 ; CHECK-DAG: and [[MASKED_IDX:x[0-9]+]], x0, #0x7
 ; CHECK: bfi [[PTR:x[0-9]+]], [[MASKED_IDX]], #1, #3
 ; CHECK-DAG: ld1 { v[[R:[0-9]+]].h }[0], {{\[}}[[PTR]]{{\]}}
-; CHECK-DAG: ins v[[R]].h[1], v0.h[1]
-; CHECK-DAG: ins v[[R]].h[2], v0.h[2]
-; CHECK-DAG: ins v[[R]].h[3], v0.h[3]
+; CHECK-DAG: mov v[[R]].h[1], v0.h[1]
+; CHECK-DAG: mov v[[R]].h[2], v0.h[2]
+; CHECK-DAG: mov v[[R]].h[3], v0.h[3]
 define <4 x i16> @test_extracts_inserts_varidx_extract(<8 x i16> %x, i32 %idx) {
   %tmp = extractelement <8 x i16> %x, i32 %idx
   %tmp2 = insertelement <4 x i16> undef, i16 %tmp, i32 0
@@ -927,9 +927,9 @@ define <4 x i16> @test_extracts_inserts_
 ; CHECK: bfi x9, [[MASKED_IDX]], #1, #2
 ; CHECK: st1 { v0.h }[0], [x9]
 ; CHECK-DAG: ldr d[[R:[0-9]+]]
-; CHECK-DAG: ins v[[R]].h[1], v0.h[1]
-; CHECK-DAG: ins v[[R]].h[2], v0.h[2]
-; CHECK-DAG: ins v[[R]].h[3], v0.h[3]
+; CHECK-DAG: mov v[[R]].h[1], v0.h[1]
+; CHECK-DAG: mov v[[R]].h[2], v0.h[2]
+; CHECK-DAG: mov v[[R]].h[3], v0.h[3]
 define <4 x i16> @test_extracts_inserts_varidx_insert(<8 x i16> %x, i32 %idx) {
   %tmp = extractelement <8 x i16> %x, i32 0
   %tmp2 = insertelement <4 x i16> undef, i16 %tmp, i32 %idx
@@ -1125,7 +1125,7 @@ define <2 x i32> @test_concat_diff_v1i32
 ; CHECK-LABEL: test_concat_diff_v1i32_v1i32:
 ; CHECK: sqabs s{{[0-9]+}}, s{{[0-9]+}}
 ; CHECK: sqabs s{{[0-9]+}}, s{{[0-9]+}}
-; CHECK: ins {{v[0-9]+}}.s[1], w{{[0-9]+}}
+; CHECK: mov {{v[0-9]+}}.s[1], w{{[0-9]+}}
 entry:
   %c = tail call i32 @llvm.aarch64.neon.sqabs.i32(i32 %a)
   %d = insertelement <2 x i32> undef, i32 %c, i32 0
@@ -1137,7 +1137,7 @@ entry:
 
 define <16 x i8> @test_concat_v16i8_v16i8_v16i8(<16 x i8> %x, <16 x i8> %y) #0 {
 ; CHECK-LABEL: test_concat_v16i8_v16i8_v16i8:
-; CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+; CHECK: mov {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
 entry:
   %vecinit30 = shufflevector <16 x i8> %x, <16 x i8> %y, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
   ret <16 x i8> %vecinit30
@@ -1145,7 +1145,7 @@ entry:
 
 define <16 x i8> @test_concat_v16i8_v8i8_v16i8(<8 x i8> %x, <16 x i8> %y) #0 {
 ; CHECK-LABEL: test_concat_v16i8_v8i8_v16i8:
-; CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+; CHECK: mov {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
 entry:
   %vecext = extractelement <8 x i8> %x, i32 0
   %vecinit = insertelement <16 x i8> undef, i8 %vecext, i32 0
@@ -1169,7 +1169,7 @@ entry:
 
 define <16 x i8> @test_concat_v16i8_v16i8_v8i8(<16 x i8> %x, <8 x i8> %y) #0 {
 ; CHECK-LABEL: test_concat_v16i8_v16i8_v8i8:
-; CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+; CHECK: mov {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
 entry:
   %vecext = extractelement <16 x i8> %x, i32 0
   %vecinit = insertelement <16 x i8> undef, i8 %vecext, i32 0
@@ -1208,7 +1208,7 @@ entry:
 
 define <16 x i8> @test_concat_v16i8_v8i8_v8i8(<8 x i8> %x, <8 x i8> %y) #0 {
 ; CHECK-LABEL: test_concat_v16i8_v8i8_v8i8:
-; CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+; CHECK: mov {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
 entry:
   %vecext = extractelement <8 x i8> %x, i32 0
   %vecinit = insertelement <16 x i8> undef, i8 %vecext, i32 0
@@ -1247,7 +1247,7 @@ entry:
 
 define <8 x i16> @test_concat_v8i16_v8i16_v8i16(<8 x i16> %x, <8 x i16> %y) #0 {
 ; CHECK-LABEL: test_concat_v8i16_v8i16_v8i16:
-; CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+; CHECK: mov {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
 entry:
   %vecinit14 = shufflevector <8 x i16> %x, <8 x i16> %y, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
   ret <8 x i16> %vecinit14
@@ -1255,7 +1255,7 @@ entry:
 
 define <8 x i16> @test_concat_v8i16_v4i16_v8i16(<4 x i16> %x, <8 x i16> %y) #0 {
 ; CHECK-LABEL: test_concat_v8i16_v4i16_v8i16:
-; CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+; CHECK: mov {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
 entry:
   %vecext = extractelement <4 x i16> %x, i32 0
   %vecinit = insertelement <8 x i16> undef, i16 %vecext, i32 0
@@ -1271,7 +1271,7 @@ entry:
 
 define <8 x i16> @test_concat_v8i16_v8i16_v4i16(<8 x i16> %x, <4 x i16> %y) #0 {
 ; CHECK-LABEL: test_concat_v8i16_v8i16_v4i16:
-; CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+; CHECK: mov {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
 entry:
   %vecext = extractelement <8 x i16> %x, i32 0
   %vecinit = insertelement <8 x i16> undef, i16 %vecext, i32 0
@@ -1294,7 +1294,7 @@ entry:
 
 define <8 x i16> @test_concat_v8i16_v4i16_v4i16(<4 x i16> %x, <4 x i16> %y) #0 {
 ; CHECK-LABEL: test_concat_v8i16_v4i16_v4i16:
-; CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+; CHECK: mov {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
 entry:
   %vecext = extractelement <4 x i16> %x, i32 0
   %vecinit = insertelement <8 x i16> undef, i16 %vecext, i32 0
@@ -1317,7 +1317,7 @@ entry:
 
 define <4 x i32> @test_concat_v4i32_v4i32_v4i32(<4 x i32> %x, <4 x i32> %y) #0 {
 ; CHECK-LABEL: test_concat_v4i32_v4i32_v4i32:
-; CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+; CHECK: mov {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
 entry:
   %vecinit6 = shufflevector <4 x i32> %x, <4 x i32> %y, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
   ret <4 x i32> %vecinit6
@@ -1325,7 +1325,7 @@ entry:
 
 define <4 x i32> @test_concat_v4i32_v2i32_v4i32(<2 x i32> %x, <4 x i32> %y) #0 {
 ; CHECK-LABEL: test_concat_v4i32_v2i32_v4i32:
-; CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+; CHECK: mov {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
 entry:
   %vecext = extractelement <2 x i32> %x, i32 0
   %vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
@@ -1337,7 +1337,7 @@ entry:
 
 define <4 x i32> @test_concat_v4i32_v4i32_v2i32(<4 x i32> %x, <2 x i32> %y) #0 {
 ; CHECK-LABEL: test_concat_v4i32_v4i32_v2i32:
-; CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+; CHECK: mov {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
 entry:
   %vecext = extractelement <4 x i32> %x, i32 0
   %vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
@@ -1352,7 +1352,7 @@ entry:
 
 define <4 x i32> @test_concat_v4i32_v2i32_v2i32(<2 x i32> %x, <2 x i32> %y) #0 {
 ; CHECK-LABEL: test_concat_v4i32_v2i32_v2i32:
-; CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+; CHECK: mov {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
 entry:
   %vecinit6 = shufflevector <2 x i32> %x, <2 x i32> %y, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   ret <4 x i32> %vecinit6
@@ -1378,7 +1378,7 @@ entry:
 
 define <2 x i64> @test_concat_v2i64_v2i64_v1i64(<2 x i64> %x, <1 x i64> %y) #0 {
 ; CHECK-LABEL: test_concat_v2i64_v2i64_v1i64:
-; CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+; CHECK: mov {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
 entry:
   %vecext = extractelement <2 x i64> %x, i32 0
   %vecinit = insertelement <2 x i64> undef, i64 %vecext, i32 0
@@ -1389,7 +1389,7 @@ entry:
 
 define <2 x i64> @test_concat_v2i64_v1i64_v1i64(<1 x i64> %x, <1 x i64> %y) #0 {
 ; CHECK-LABEL: test_concat_v2i64_v1i64_v1i64:
-; CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+; CHECK: mov {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
 entry:
   %vecext = extractelement <1 x i64> %x, i32 0
   %vecinit = insertelement <2 x i64> undef, i64 %vecext, i32 0

Modified: llvm/trunk/test/CodeGen/AArch64/arm64-smaxv.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-smaxv.ll?rev=294437&r1=294436&r2=294437&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-smaxv.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-smaxv.ll Wed Feb  8 05:28:08 2017
@@ -68,7 +68,7 @@ entry:
 define <8 x i8> @test_vmaxv_s8_used_by_laneop(<8 x i8> %a1, <8 x i8> %a2) {
 ; CHECK-LABEL: test_vmaxv_s8_used_by_laneop:
 ; CHECK: smaxv.8b b[[REGNUM:[0-9]+]], v1
-; CHECK-NEXT: ins.b v0[3], v[[REGNUM]][0]
+; CHECK-NEXT: mov.b v0[3], v[[REGNUM]][0]
 ; CHECK-NEXT: ret
 entry:
   %0 = tail call i32 @llvm.aarch64.neon.smaxv.i32.v8i8(<8 x i8> %a2)
@@ -80,7 +80,7 @@ entry:
 define <4 x i16> @test_vmaxv_s16_used_by_laneop(<4 x i16> %a1, <4 x i16> %a2) {
 ; CHECK-LABEL: test_vmaxv_s16_used_by_laneop:
 ; CHECK: smaxv.4h h[[REGNUM:[0-9]+]], v1
-; CHECK-NEXT: ins.h v0[3], v[[REGNUM]][0]
+; CHECK-NEXT: mov.h v0[3], v[[REGNUM]][0]
 ; CHECK-NEXT: ret
 entry:
   %0 = tail call i32 @llvm.aarch64.neon.smaxv.i32.v4i16(<4 x i16> %a2)
@@ -92,7 +92,7 @@ entry:
 define <2 x i32> @test_vmaxv_s32_used_by_laneop(<2 x i32> %a1, <2 x i32> %a2) {
 ; CHECK-LABEL: test_vmaxv_s32_used_by_laneop:
 ; CHECK: smaxp.2s v[[REGNUM:[0-9]+]], v1, v1
-; CHECK-NEXT: ins.s v0[1], v[[REGNUM]][0]
+; CHECK-NEXT: mov.s v0[1], v[[REGNUM]][0]
 ; CHECK-NEXT: ret
 entry:
   %0 = tail call i32 @llvm.aarch64.neon.smaxv.i32.v2i32(<2 x i32> %a2)
@@ -103,7 +103,7 @@ entry:
 define <16 x i8> @test_vmaxvq_s8_used_by_laneop(<16 x i8> %a1, <16 x i8> %a2) {
 ; CHECK-LABEL: test_vmaxvq_s8_used_by_laneop:
 ; CHECK: smaxv.16b b[[REGNUM:[0-9]+]], v1
-; CHECK-NEXT: ins.b v0[3], v[[REGNUM]][0]
+; CHECK-NEXT: mov.b v0[3], v[[REGNUM]][0]
 ; CHECK-NEXT: ret
 entry:
   %0 = tail call i32 @llvm.aarch64.neon.smaxv.i32.v16i8(<16 x i8> %a2)
@@ -115,7 +115,7 @@ entry:
 define <8 x i16> @test_vmaxvq_s16_used_by_laneop(<8 x i16> %a1, <8 x i16> %a2) {
 ; CHECK-LABEL: test_vmaxvq_s16_used_by_laneop:
 ; CHECK: smaxv.8h h[[REGNUM:[0-9]+]], v1
-; CHECK-NEXT: ins.h v0[3], v[[REGNUM]][0]
+; CHECK-NEXT: mov.h v0[3], v[[REGNUM]][0]
 ; CHECK-NEXT: ret
 entry:
   %0 = tail call i32 @llvm.aarch64.neon.smaxv.i32.v8i16(<8 x i16> %a2)
@@ -127,7 +127,7 @@ entry:
 define <4 x i32> @test_vmaxvq_s32_used_by_laneop(<4 x i32> %a1, <4 x i32> %a2) {
 ; CHECK-LABEL: test_vmaxvq_s32_used_by_laneop:
 ; CHECK: smaxv.4s s[[REGNUM:[0-9]+]], v1
-; CHECK-NEXT: ins.s v0[3], v[[REGNUM]][0]
+; CHECK-NEXT: mov.s v0[3], v[[REGNUM]][0]
 ; CHECK-NEXT: ret
 entry:
   %0 = tail call i32 @llvm.aarch64.neon.smaxv.i32.v4i32(<4 x i32> %a2)

Modified: llvm/trunk/test/CodeGen/AArch64/arm64-sminv.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-sminv.ll?rev=294437&r1=294436&r2=294437&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-sminv.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-sminv.ll Wed Feb  8 05:28:08 2017
@@ -68,7 +68,7 @@ entry:
 define <8 x i8> @test_vminv_s8_used_by_laneop(<8 x i8> %a1, <8 x i8> %a2) {
 ; CHECK-LABEL: test_vminv_s8_used_by_laneop:
 ; CHECK: sminv.8b b[[REGNUM:[0-9]+]], v1
-; CHECK-NEXT: ins.b v0[3], v[[REGNUM]][0]
+; CHECK-NEXT: mov.b v0[3], v[[REGNUM]][0]
 ; CHECK-NEXT: ret
 entry:
   %0 = tail call i32 @llvm.aarch64.neon.sminv.i32.v8i8(<8 x i8> %a2)
@@ -80,7 +80,7 @@ entry:
 define <4 x i16> @test_vminv_s16_used_by_laneop(<4 x i16> %a1, <4 x i16> %a2) {
 ; CHECK-LABEL: test_vminv_s16_used_by_laneop:
 ; CHECK: sminv.4h h[[REGNUM:[0-9]+]], v1
-; CHECK-NEXT: ins.h v0[3], v[[REGNUM]][0]
+; CHECK-NEXT: mov.h v0[3], v[[REGNUM]][0]
 ; CHECK-NEXT: ret
 entry:
   %0 = tail call i32 @llvm.aarch64.neon.sminv.i32.v4i16(<4 x i16> %a2)
@@ -92,7 +92,7 @@ entry:
 define <2 x i32> @test_vminv_s32_used_by_laneop(<2 x i32> %a1, <2 x i32> %a2) {
 ; CHECK-LABEL: test_vminv_s32_used_by_laneop:
 ; CHECK: sminp.2s v[[REGNUM:[0-9]+]], v1, v1
-; CHECK-NEXT: ins.s v0[1], v[[REGNUM]][0]
+; CHECK-NEXT: mov.s v0[1], v[[REGNUM]][0]
 ; CHECK-NEXT: ret
 entry:
   %0 = tail call i32 @llvm.aarch64.neon.sminv.i32.v2i32(<2 x i32> %a2)
@@ -103,7 +103,7 @@ entry:
 define <16 x i8> @test_vminvq_s8_used_by_laneop(<16 x i8> %a1, <16 x i8> %a2) {
 ; CHECK-LABEL: test_vminvq_s8_used_by_laneop:
 ; CHECK: sminv.16b b[[REGNUM:[0-9]+]], v1
-; CHECK-NEXT: ins.b v0[3], v[[REGNUM]][0]
+; CHECK-NEXT: mov.b v0[3], v[[REGNUM]][0]
 ; CHECK-NEXT: ret
 entry:
   %0 = tail call i32 @llvm.aarch64.neon.sminv.i32.v16i8(<16 x i8> %a2)
@@ -115,7 +115,7 @@ entry:
 define <8 x i16> @test_vminvq_s16_used_by_laneop(<8 x i16> %a1, <8 x i16> %a2) {
 ; CHECK-LABEL: test_vminvq_s16_used_by_laneop:
 ; CHECK: sminv.8h h[[REGNUM:[0-9]+]], v1
-; CHECK-NEXT: ins.h v0[3], v[[REGNUM]][0]
+; CHECK-NEXT: mov.h v0[3], v[[REGNUM]][0]
 ; CHECK-NEXT: ret
 entry:
   %0 = tail call i32 @llvm.aarch64.neon.sminv.i32.v8i16(<8 x i16> %a2)
@@ -127,7 +127,7 @@ entry:
 define <4 x i32> @test_vminvq_s32_used_by_laneop(<4 x i32> %a1, <4 x i32> %a2) {
 ; CHECK-LABEL: test_vminvq_s32_used_by_laneop:
 ; CHECK: sminv.4s s[[REGNUM:[0-9]+]], v1
-; CHECK-NEXT: ins.s v0[3], v[[REGNUM]][0]
+; CHECK-NEXT: mov.s v0[3], v[[REGNUM]][0]
 ; CHECK-NEXT: ret
 entry:
   %0 = tail call i32 @llvm.aarch64.neon.sminv.i32.v4i32(<4 x i32> %a2)

Modified: llvm/trunk/test/CodeGen/AArch64/arm64-stp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-stp.ll?rev=294437&r1=294436&r2=294437&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-stp.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-stp.ll Wed Feb  8 05:28:08 2017
@@ -106,9 +106,9 @@ entry:
 ; CHECK-LABEL: nosplat_v4i32:
 ; CHECK: str w0,
 ; CHECK: ldr q[[REG1:[0-9]+]],
-; CHECK-DAG: ins v[[REG1]].s[1], w0
-; CHECK-DAG: ins v[[REG1]].s[2], w0
-; CHECK-DAG: ins v[[REG1]].s[3], w0
+; CHECK-DAG: mov v[[REG1]].s[1], w0
+; CHECK-DAG: mov v[[REG1]].s[2], w0
+; CHECK-DAG: mov v[[REG1]].s[3], w0
 ; CHECK: ext v[[REG2:[0-9]+]].16b, v[[REG1]].16b, v[[REG1]].16b, #8
 ; CHECK: stp d[[REG1]], d[[REG2]], [x1]
 ; CHECK: ret
@@ -128,9 +128,9 @@ define void @nosplat2_v4i32(i32 %v, i32
 entry:
 
 ; CHECK-LABEL: nosplat2_v4i32:
-; CHECK: ins v[[REG1]].s[1], w0
-; CHECK-DAG: ins v[[REG1]].s[2], w0
-; CHECK-DAG: ins v[[REG1]].s[3], w0
+; CHECK: mov v[[REG1]].s[1], w0
+; CHECK-DAG: mov v[[REG1]].s[2], w0
+; CHECK-DAG: mov v[[REG1]].s[3], w0
 ; CHECK: ext v[[REG2:[0-9]+]].16b, v[[REG1]].16b, v[[REG1]].16b, #8
 ; CHECK: stp d[[REG1]], d[[REG2]], [x1]
 ; CHECK: ret

Modified: llvm/trunk/test/CodeGen/AArch64/arm64-umaxv.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-umaxv.ll?rev=294437&r1=294436&r2=294437&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-umaxv.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-umaxv.ll Wed Feb  8 05:28:08 2017
@@ -89,7 +89,7 @@ return:
 define <8 x i8> @test_vmaxv_u8_used_by_laneop(<8 x i8> %a1, <8 x i8> %a2) {
 ; CHECK-LABEL: test_vmaxv_u8_used_by_laneop:
 ; CHECK: umaxv.8b b[[REGNUM:[0-9]+]], v1
-; CHECK-NEXT: ins.b v0[3], v[[REGNUM]][0]
+; CHECK-NEXT: mov.b v0[3], v[[REGNUM]][0]
 ; CHECK-NEXT: ret
 entry:
   %0 = tail call i32 @llvm.aarch64.neon.umaxv.i32.v8i8(<8 x i8> %a2)
@@ -101,7 +101,7 @@ entry:
 define <4 x i16> @test_vmaxv_u16_used_by_laneop(<4 x i16> %a1, <4 x i16> %a2) {
 ; CHECK-LABEL: test_vmaxv_u16_used_by_laneop:
 ; CHECK: umaxv.4h h[[REGNUM:[0-9]+]], v1
-; CHECK-NEXT: ins.h v0[3], v[[REGNUM]][0]
+; CHECK-NEXT: mov.h v0[3], v[[REGNUM]][0]
 ; CHECK-NEXT: ret
 entry:
   %0 = tail call i32 @llvm.aarch64.neon.umaxv.i32.v4i16(<4 x i16> %a2)
@@ -113,7 +113,7 @@ entry:
 define <2 x i32> @test_vmaxv_u32_used_by_laneop(<2 x i32> %a1, <2 x i32> %a2) {
 ; CHECK-LABEL: test_vmaxv_u32_used_by_laneop:
 ; CHECK: umaxp.2s v[[REGNUM:[0-9]+]], v1, v1
-; CHECK-NEXT: ins.s v0[1], v[[REGNUM]][0]
+; CHECK-NEXT: mov.s v0[1], v[[REGNUM]][0]
 ; CHECK-NEXT: ret
 entry:
   %0 = tail call i32 @llvm.aarch64.neon.umaxv.i32.v2i32(<2 x i32> %a2)
@@ -124,7 +124,7 @@ entry:
 define <16 x i8> @test_vmaxvq_u8_used_by_laneop(<16 x i8> %a1, <16 x i8> %a2) {
 ; CHECK-LABEL: test_vmaxvq_u8_used_by_laneop:
 ; CHECK: umaxv.16b b[[REGNUM:[0-9]+]], v1
-; CHECK-NEXT: ins.b v0[3], v[[REGNUM]][0]
+; CHECK-NEXT: mov.b v0[3], v[[REGNUM]][0]
 ; CHECK-NEXT: ret
 entry:
   %0 = tail call i32 @llvm.aarch64.neon.umaxv.i32.v16i8(<16 x i8> %a2)
@@ -136,7 +136,7 @@ entry:
 define <8 x i16> @test_vmaxvq_u16_used_by_laneop(<8 x i16> %a1, <8 x i16> %a2) {
 ; CHECK-LABEL: test_vmaxvq_u16_used_by_laneop:
 ; CHECK: umaxv.8h h[[REGNUM:[0-9]+]], v1
-; CHECK-NEXT: ins.h v0[3], v[[REGNUM]][0]
+; CHECK-NEXT: mov.h v0[3], v[[REGNUM]][0]
 ; CHECK-NEXT: ret
 entry:
   %0 = tail call i32 @llvm.aarch64.neon.umaxv.i32.v8i16(<8 x i16> %a2)
@@ -148,7 +148,7 @@ entry:
 define <4 x i32> @test_vmaxvq_u32_used_by_laneop(<4 x i32> %a1, <4 x i32> %a2) {
 ; CHECK-LABEL: test_vmaxvq_u32_used_by_laneop:
 ; CHECK: umaxv.4s s[[REGNUM:[0-9]+]], v1
-; CHECK-NEXT: ins.s v0[3], v[[REGNUM]][0]
+; CHECK-NEXT: mov.s v0[3], v[[REGNUM]][0]
 ; CHECK-NEXT: ret
 entry:
   %0 = tail call i32 @llvm.aarch64.neon.umaxv.i32.v4i32(<4 x i32> %a2)

Modified: llvm/trunk/test/CodeGen/AArch64/arm64-uminv.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-uminv.ll?rev=294437&r1=294436&r2=294437&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-uminv.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-uminv.ll Wed Feb  8 05:28:08 2017
@@ -89,7 +89,7 @@ return:
 define <8 x i8> @test_vminv_u8_used_by_laneop(<8 x i8> %a1, <8 x i8> %a2) {
 ; CHECK-LABEL: test_vminv_u8_used_by_laneop:
 ; CHECK: uminv.8b b[[REGNUM:[0-9]+]], v1
-; CHECK-NEXT: ins.b v0[3], v[[REGNUM]][0]
+; CHECK-NEXT: mov.b v0[3], v[[REGNUM]][0]
 ; CHECK-NEXT: ret
 entry:
   %0 = tail call i32 @llvm.aarch64.neon.uminv.i32.v8i8(<8 x i8> %a2)
@@ -101,7 +101,7 @@ entry:
 define <4 x i16> @test_vminv_u16_used_by_laneop(<4 x i16> %a1, <4 x i16> %a2) {
 ; CHECK-LABEL: test_vminv_u16_used_by_laneop:
 ; CHECK: uminv.4h h[[REGNUM:[0-9]+]], v1
-; CHECK-NEXT: ins.h v0[3], v[[REGNUM]][0]
+; CHECK-NEXT: mov.h v0[3], v[[REGNUM]][0]
 ; CHECK-NEXT: ret
 entry:
   %0 = tail call i32 @llvm.aarch64.neon.uminv.i32.v4i16(<4 x i16> %a2)
@@ -113,7 +113,7 @@ entry:
 define <2 x i32> @test_vminv_u32_used_by_laneop(<2 x i32> %a1, <2 x i32> %a2) {
 ; CHECK-LABEL: test_vminv_u32_used_by_laneop:
 ; CHECK: uminp.2s v[[REGNUM:[0-9]+]], v1, v1
-; CHECK-NEXT: ins.s v0[1], v[[REGNUM]][0]
+; CHECK-NEXT: mov.s v0[1], v[[REGNUM]][0]
 ; CHECK-NEXT: ret
 entry:
   %0 = tail call i32 @llvm.aarch64.neon.uminv.i32.v2i32(<2 x i32> %a2)
@@ -124,7 +124,7 @@ entry:
 define <16 x i8> @test_vminvq_u8_used_by_laneop(<16 x i8> %a1, <16 x i8> %a2) {
 ; CHECK-LABEL: test_vminvq_u8_used_by_laneop:
 ; CHECK: uminv.16b b[[REGNUM:[0-9]+]], v1
-; CHECK-NEXT: ins.b v0[3], v[[REGNUM]][0]
+; CHECK-NEXT: mov.b v0[3], v[[REGNUM]][0]
 ; CHECK-NEXT: ret
 entry:
   %0 = tail call i32 @llvm.aarch64.neon.uminv.i32.v16i8(<16 x i8> %a2)
@@ -136,7 +136,7 @@ entry:
 define <8 x i16> @test_vminvq_u16_used_by_laneop(<8 x i16> %a1, <8 x i16> %a2) {
 ; CHECK-LABEL: test_vminvq_u16_used_by_laneop:
 ; CHECK: uminv.8h h[[REGNUM:[0-9]+]], v1
-; CHECK-NEXT: ins.h v0[3], v[[REGNUM]][0]
+; CHECK-NEXT: mov.h v0[3], v[[REGNUM]][0]
 ; CHECK-NEXT: ret
 entry:
   %0 = tail call i32 @llvm.aarch64.neon.uminv.i32.v8i16(<8 x i16> %a2)
@@ -148,7 +148,7 @@ entry:
 define <4 x i32> @test_vminvq_u32_used_by_laneop(<4 x i32> %a1, <4 x i32> %a2) {
 ; CHECK-LABEL: test_vminvq_u32_used_by_laneop:
 ; CHECK: uminv.4s s[[REGNUM:[0-9]+]], v1
-; CHECK-NEXT: ins.s v0[3], v[[REGNUM]][0]
+; CHECK-NEXT: mov.s v0[3], v[[REGNUM]][0]
 ; CHECK-NEXT: ret
 entry:
   %0 = tail call i32 @llvm.aarch64.neon.uminv.i32.v4i32(<4 x i32> %a2)

Modified: llvm/trunk/test/CodeGen/AArch64/arm64-vaddv.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-vaddv.ll?rev=294437&r1=294436&r2=294437&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-vaddv.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-vaddv.ll Wed Feb  8 05:28:08 2017
@@ -14,7 +14,7 @@ entry:
 define <8 x i8> @test_vaddv_s8_used_by_laneop(<8 x i8> %a1, <8 x i8> %a2) {
 ; CHECK-LABEL: test_vaddv_s8_used_by_laneop:
 ; CHECK: addv.8b b[[REGNUM:[0-9]+]], v1
-; CHECK-NEXT: ins.b v0[3], v[[REGNUM]][0]
+; CHECK-NEXT: mov.b v0[3], v[[REGNUM]][0]
 ; CHECK-NEXT: ret
 entry:
   %0 = tail call i32 @llvm.aarch64.neon.saddv.i32.v8i8(<8 x i8> %a2)
@@ -37,7 +37,7 @@ entry:
 define <4 x i16> @test_vaddv_s16_used_by_laneop(<4 x i16> %a1, <4 x i16> %a2) {
 ; CHECK-LABEL: test_vaddv_s16_used_by_laneop:
 ; CHECK: addv.4h h[[REGNUM:[0-9]+]], v1
-; CHECK-NEXT: ins.h v0[3], v[[REGNUM]][0]
+; CHECK-NEXT: mov.h v0[3], v[[REGNUM]][0]
 ; CHECK-NEXT: ret
 entry:
   %0 = tail call i32 @llvm.aarch64.neon.saddv.i32.v4i16(<4 x i16> %a2)
@@ -60,7 +60,7 @@ entry:
 define <2 x i32> @test_vaddv_s32_used_by_laneop(<2 x i32> %a1, <2 x i32> %a2) {
 ; CHECK-LABEL: test_vaddv_s32_used_by_laneop:
 ; CHECK: addp.2s v[[REGNUM:[0-9]+]], v1, v1
-; CHECK-NEXT: ins.s v0[1], v[[REGNUM]][0]
+; CHECK-NEXT: mov.s v0[1], v[[REGNUM]][0]
 ; CHECK-NEXT: ret
 entry:
   %0 = tail call i32 @llvm.aarch64.neon.saddv.i32.v2i32(<2 x i32> %a2)
@@ -81,7 +81,7 @@ entry:
 define <2 x i64> @test_vaddv_s64_used_by_laneop(<2 x i64> %a1, <2 x i64> %a2) {
 ; CHECK-LABEL: test_vaddv_s64_used_by_laneop:
 ; CHECK: addp.2d d[[REGNUM:[0-9]+]], v1
-; CHECK-NEXT: ins.d v0[1], v[[REGNUM]][0]
+; CHECK-NEXT: mov.d v0[1], v[[REGNUM]][0]
 ; CHECK-NEXT: ret
 entry:
   %0 = tail call i64 @llvm.aarch64.neon.saddv.i64.v2i64(<2 x i64> %a2)
@@ -103,7 +103,7 @@ entry:
 define <8 x i8> @test_vaddv_u8_used_by_laneop(<8 x i8> %a1, <8 x i8> %a2) {
 ; CHECK-LABEL: test_vaddv_u8_used_by_laneop:
 ; CHECK: addv.8b b[[REGNUM:[0-9]+]], v1
-; CHECK-NEXT: ins.b v0[3], v[[REGNUM]][0]
+; CHECK-NEXT: mov.b v0[3], v[[REGNUM]][0]
 ; CHECK-NEXT: ret
 entry:
   %0 = tail call i32 @llvm.aarch64.neon.uaddv.i32.v8i8(<8 x i8> %a2)
@@ -137,7 +137,7 @@ entry:
 define <4 x i16> @test_vaddv_u16_used_by_laneop(<4 x i16> %a1, <4 x i16> %a2) {
 ; CHECK-LABEL: test_vaddv_u16_used_by_laneop:
 ; CHECK: addv.4h h[[REGNUM:[0-9]+]], v1
-; CHECK-NEXT: ins.h v0[3], v[[REGNUM]][0]
+; CHECK-NEXT: mov.h v0[3], v[[REGNUM]][0]
 ; CHECK-NEXT: ret
 entry:
   %0 = tail call i32 @llvm.aarch64.neon.uaddv.i32.v4i16(<4 x i16> %a2)
@@ -171,7 +171,7 @@ entry:
 define <2 x i32> @test_vaddv_u32_used_by_laneop(<2 x i32> %a1, <2 x i32> %a2) {
 ; CHECK-LABEL: test_vaddv_u32_used_by_laneop:
 ; CHECK: addp.2s v[[REGNUM:[0-9]+]], v1, v1
-; CHECK-NEXT: ins.s v0[1], v[[REGNUM]][0]
+; CHECK-NEXT: mov.s v0[1], v[[REGNUM]][0]
 ; CHECK-NEXT: ret
 entry:
   %0 = tail call i32 @llvm.aarch64.neon.uaddv.i32.v2i32(<2 x i32> %a2)
@@ -220,7 +220,7 @@ entry:
 define <2 x i64> @test_vaddv_u64_used_by_laneop(<2 x i64> %a1, <2 x i64> %a2) {
 ; CHECK-LABEL: test_vaddv_u64_used_by_laneop:
 ; CHECK: addp.2d d[[REGNUM:[0-9]+]], v1
-; CHECK-NEXT: ins.d v0[1], v[[REGNUM]][0]
+; CHECK-NEXT: mov.d v0[1], v[[REGNUM]][0]
 ; CHECK-NEXT: ret
 entry:
   %0 = tail call i64 @llvm.aarch64.neon.uaddv.i64.v2i64(<2 x i64> %a2)
@@ -254,7 +254,7 @@ entry:
 define <16 x i8> @test_vaddvq_s8_used_by_laneop(<16 x i8> %a1, <16 x i8> %a2) {
 ; CHECK-LABEL: test_vaddvq_s8_used_by_laneop:
 ; CHECK: addv.16b b[[REGNUM:[0-9]+]], v1
-; CHECK-NEXT: ins.b v0[3], v[[REGNUM]][0]
+; CHECK-NEXT: mov.b v0[3], v[[REGNUM]][0]
 ; CHECK-NEXT: ret
 entry:
   %0 = tail call i32 @llvm.aarch64.neon.saddv.i32.v16i8(<16 x i8> %a2)
@@ -277,7 +277,7 @@ entry:
 define <8 x i16> @test_vaddvq_s16_used_by_laneop(<8 x i16> %a1, <8 x i16> %a2) {
 ; CHECK-LABEL: test_vaddvq_s16_used_by_laneop:
 ; CHECK: addv.8h h[[REGNUM:[0-9]+]], v1
-; CHECK-NEXT: ins.h v0[3], v[[REGNUM]][0]
+; CHECK-NEXT: mov.h v0[3], v[[REGNUM]][0]
 ; CHECK-NEXT: ret
 entry:
   %0 = tail call i32 @llvm.aarch64.neon.saddv.i32.v8i16(<8 x i16> %a2)
@@ -299,7 +299,7 @@ entry:
 define <4 x i32> @test_vaddvq_s32_used_by_laneop(<4 x i32> %a1, <4 x i32> %a2) {
 ; CHECK-LABEL: test_vaddvq_s32_used_by_laneop:
 ; CHECK: addv.4s s[[REGNUM:[0-9]+]], v1
-; CHECK-NEXT: ins.s v0[3], v[[REGNUM]][0]
+; CHECK-NEXT: mov.s v0[3], v[[REGNUM]][0]
 ; CHECK-NEXT: ret
 entry:
   %0 = tail call i32 @llvm.aarch64.neon.saddv.i32.v4i32(<4 x i32> %a2)
@@ -321,7 +321,7 @@ entry:
 define <16 x i8> @test_vaddvq_u8_used_by_laneop(<16 x i8> %a1, <16 x i8> %a2) {
 ; CHECK-LABEL: test_vaddvq_u8_used_by_laneop:
 ; CHECK: addv.16b b[[REGNUM:[0-9]+]], v1
-; CHECK-NEXT: ins.b v0[3], v[[REGNUM]][0]
+; CHECK-NEXT: mov.b v0[3], v[[REGNUM]][0]
 ; CHECK-NEXT: ret
 entry:
   %0 = tail call i32 @llvm.aarch64.neon.uaddv.i32.v16i8(<16 x i8> %a2)
@@ -344,7 +344,7 @@ entry:
 define <8 x i16> @test_vaddvq_u16_used_by_laneop(<8 x i16> %a1, <8 x i16> %a2) {
 ; CHECK-LABEL: test_vaddvq_u16_used_by_laneop:
 ; CHECK: addv.8h h[[REGNUM:[0-9]+]], v1
-; CHECK-NEXT: ins.h v0[3], v[[REGNUM]][0]
+; CHECK-NEXT: mov.h v0[3], v[[REGNUM]][0]
 ; CHECK-NEXT: ret
 entry:
   %0 = tail call i32 @llvm.aarch64.neon.uaddv.i32.v8i16(<8 x i16> %a2)
@@ -366,7 +366,7 @@ entry:
 define <4 x i32> @test_vaddvq_u32_used_by_laneop(<4 x i32> %a1, <4 x i32> %a2) {
 ; CHECK-LABEL: test_vaddvq_u32_used_by_laneop:
 ; CHECK: addv.4s s[[REGNUM:[0-9]+]], v1
-; CHECK-NEXT: ins.s v0[3], v[[REGNUM]][0]
+; CHECK-NEXT: mov.s v0[3], v[[REGNUM]][0]
 ; CHECK-NEXT: ret
 entry:
   %0 = tail call i32 @llvm.aarch64.neon.uaddv.i32.v4i32(<4 x i32> %a2)

Modified: llvm/trunk/test/CodeGen/AArch64/arm64-vcombine.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-vcombine.ll?rev=294437&r1=294436&r2=294437&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-vcombine.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-vcombine.ll Wed Feb  8 05:28:08 2017
@@ -6,7 +6,7 @@
 define <16 x i8> @test(<16 x i8> %q0, <16 x i8> %q1, i8* nocapture %dest) nounwind {
 entry:
 ; CHECK-LABEL: test:
-; CHECK: ins.d v0[1], v1[0]
+; CHECK: mov.d v0[1], v1[0]
   %0 = bitcast <16 x i8> %q0 to <2 x i64>
   %shuffle.i = shufflevector <2 x i64> %0, <2 x i64> undef, <1 x i32> zeroinitializer
   %1 = bitcast <16 x i8> %q1 to <2 x i64>

Modified: llvm/trunk/test/CodeGen/AArch64/arm64-vector-insertion.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-vector-insertion.ll?rev=294437&r1=294436&r2=294437&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-vector-insertion.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-vector-insertion.ll Wed Feb  8 05:28:08 2017
@@ -9,7 +9,7 @@ entry:
 
   ; CHECK-LABEL: test0f
   ; CHECK: movi.2d v[[TEMP:[0-9]+]], #0000000000000000
-  ; CHECK: ins.s v[[TEMP]][0], v{{[0-9]+}}[0]
+  ; CHECK: mov.s v[[TEMP]][0], v{{[0-9]+}}[0]
   ; CHECK: str q[[TEMP]], [x0]
   ; CHECK: ret
 
@@ -27,7 +27,7 @@ entry:
   ; CHECK-LABEL: test1f
   ; CHECK: fmov  s[[TEMP:[0-9]+]], #1.0000000
   ; CHECK: dup.4s  v[[TEMP2:[0-9]+]], v[[TEMP]][0]
-  ; CHECK: ins.s v[[TEMP2]][0], v0[0]
+  ; CHECK: mov.s v[[TEMP2]][0], v0[0]
   ; CHECK: str q[[TEMP2]], [x0]
   ; CHECK: ret
 }

Modified: llvm/trunk/test/CodeGen/AArch64/bitreverse.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/bitreverse.ll?rev=294437&r1=294436&r2=294437&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/bitreverse.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/bitreverse.ll Wed Feb  8 05:28:08 2017
@@ -11,7 +11,7 @@ define <2 x i16> @f(<2 x i16> %a) {
 ; CHECK-DAG: fmov s0, [[REG2]]
 ; CHECK-DAG: mov [[REG3:w[0-9]+]], v0.s[1]
 ; CHECK-DAG: rbit [[REG4:w[0-9]+]], [[REG3]]
-; CHECK-DAG: ins v0.s[1], [[REG4]]
+; CHECK-DAG: mov v0.s[1], [[REG4]]
 ; CHECK-DAG: ushr v0.2s, v0.2s, #16
   %b = call <2 x i16> @llvm.bitreverse.v2i16(<2 x i16> %a)
   ret <2 x i16> %b

Modified: llvm/trunk/test/CodeGen/AArch64/concat_vector-scalar-combine.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/concat_vector-scalar-combine.ll?rev=294437&r1=294436&r2=294437&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/concat_vector-scalar-combine.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/concat_vector-scalar-combine.ll Wed Feb  8 05:28:08 2017
@@ -38,9 +38,9 @@ entry:
 define <8 x i8> @test_concat_scalars_2x_v2i8_to_v8i8(i32 %x, i32 %y) #0 {
 entry:
 ; CHECK-LABEL: test_concat_scalars_2x_v2i8_to_v8i8:
-; CHECK-NEXT: ins.h v0[0], w0
-; CHECK-NEXT: ins.h v0[1], w1
-; CHECK-NEXT: ins.h v0[3], w1
+; CHECK-NEXT: mov.h v0[0], w0
+; CHECK-NEXT: mov.h v0[1], w1
+; CHECK-NEXT: mov.h v0[3], w1
 ; CHECK-NEXT: ret
   %tx = trunc i32 %x to i16
   %ty = trunc i32 %y to i16
@@ -54,7 +54,7 @@ define <8 x i8> @test_concat_scalars_2x_
 entry:
 ; CHECK-LABEL: test_concat_scalars_2x_v4i8_to_v8i8_dup:
 ; CHECK-NEXT: fmov s0, w1
-; CHECK-NEXT: ins.s v0[1], w0
+; CHECK-NEXT: mov.s v0[1], w0
 ; CHECK-NEXT: ret
   %bx = bitcast i32 %x to <4 x i8>
   %by = bitcast i32 %y to <4 x i8>
@@ -66,9 +66,9 @@ define <8 x i16> @test_concat_scalars_2x
 entry:
 ; CHECK-LABEL: test_concat_scalars_2x_v2i16_to_v8i16_dup:
 ; CHECK-NEXT: fmov s0, w0
-; CHECK-NEXT: ins.s v0[1], w1
-; CHECK-NEXT: ins.s v0[2], w1
-; CHECK-NEXT: ins.s v0[3], w0
+; CHECK-NEXT: mov.s v0[1], w1
+; CHECK-NEXT: mov.s v0[2], w1
+; CHECK-NEXT: mov.s v0[3], w0
 ; CHECK-NEXT: ret
   %bx = bitcast i32 %x to <2 x i16>
   %by = bitcast i32 %y to <2 x i16>
@@ -84,10 +84,10 @@ define <8 x i8> @test_concat_scalars_mix
 entry:
 ; CHECK-LABEL: test_concat_scalars_mixed_2x_v2i8_to_v8i8:
 ; CHECK-NEXT: fmov s[[X:[0-9]+]], w0
-; CHECK-NEXT: ins.h v0[0], v[[X]][0]
-; CHECK-NEXT: ins.h v0[1], v1[0]
-; CHECK-NEXT: ins.h v0[2], v[[X]][0]
-; CHECK-NEXT: ins.h v0[3], v1[0]
+; CHECK-NEXT: mov.h v0[0], v[[X]][0]
+; CHECK-NEXT: mov.h v0[1], v1[0]
+; CHECK-NEXT: mov.h v0[2], v[[X]][0]
+; CHECK-NEXT: mov.h v0[3], v1[0]
 ; CHECK-NEXT: ret
   %t = trunc i32 %x to i16
   %0 = bitcast i16 %t to <2 x i8>
@@ -99,10 +99,10 @@ entry:
 define <2 x float> @test_concat_scalars_fp_2x_v2i8_to_v8i8(float %dummy, half %x, half %y) #0 {
 entry:
 ; CHECK-LABEL: test_concat_scalars_fp_2x_v2i8_to_v8i8:
-; CHECK-NEXT: ins.h v0[0], v1[0]
-; CHECK-NEXT: ins.h v0[1], v2[0]
-; CHECK-NEXT: ins.h v0[2], v1[0]
-; CHECK-NEXT: ins.h v0[3], v2[0]
+; CHECK-NEXT: mov.h v0[0], v1[0]
+; CHECK-NEXT: mov.h v0[1], v2[0]
+; CHECK-NEXT: mov.h v0[2], v1[0]
+; CHECK-NEXT: mov.h v0[3], v2[0]
 ; CHECK-NEXT: ret
   %0 = bitcast half %x to <2 x i8>
   %y0 = bitcast half %y to <2 x i8>

Modified: llvm/trunk/test/CodeGen/AArch64/fp16-v16-instructions.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/fp16-v16-instructions.ll?rev=294437&r1=294436&r2=294437&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/fp16-v16-instructions.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/fp16-v16-instructions.ll Wed Feb  8 05:28:08 2017
@@ -11,8 +11,8 @@ define <16 x half> @sitofp_i32(<16 x i32
 ; CHECK-DAG: fcvtn v1.4h, [[S2]]
 ; CHECK-DAG: v[[R1:[0-9]+]].4h, [[S1]]
 ; CHECK-DAG: v[[R3:[0-9]+]].4h, [[S3]]
-; CHECK-DAg: ins v0.d[1], v[[R1]].d[0]
-; CHECK-DAG: ins v1.d[1], v[[R3]].d[0]
+; CHECK-DAG: mov v0.d[1], v[[R1]].d[0]
+; CHECK-DAG: mov v1.d[1], v[[R3]].d[0]
 
   %1 = sitofp <16 x i32> %a to <16 x half>
   ret <16 x half> %1
@@ -44,8 +44,8 @@ define <16 x half> @sitofp_i64(<16 x i64
 ; CHECK-DAG: fcvtn v1.4h, [[S2]].4s
 ; CHECK-DAG: fcvtn v[[R1:[0-9]+]].4h, [[S1]].4s
 ; CHECK-DAG: fcvtn v[[R3:[0-9]+]].4h, [[S3]].4s
-; CHECK-DAG: ins v0.d[1], v[[R1]].d[0]
-; CHECK-DAG: ins v1.d[1], v[[R3]].d[0]
+; CHECK-DAG: mov v0.d[1], v[[R1]].d[0]
+; CHECK-DAG: mov v1.d[1], v[[R3]].d[0]
 
   %1 = sitofp <16 x i64> %a to <16 x half>
   ret <16 x half> %1
@@ -62,8 +62,8 @@ define <16 x half> @uitofp_i32(<16 x i32
 ; CHECK-DAG: fcvtn v1.4h, [[S2]]
 ; CHECK-DAG: v[[R1:[0-9]+]].4h, [[S1]]
 ; CHECK-DAG: v[[R3:[0-9]+]].4h, [[S3]]
-; CHECK-DAg: ins v0.d[1], v[[R1]].d[0]
-; CHECK-DAG: ins v1.d[1], v[[R3]].d[0]
+; CHECK-DAG: mov v0.d[1], v[[R1]].d[0]
+; CHECK-DAG: mov v1.d[1], v[[R3]].d[0]
 
   %1 = uitofp <16 x i32> %a to <16 x half>
   ret <16 x half> %1
@@ -95,8 +95,8 @@ define <16 x half> @uitofp_i64(<16 x i64
 ; CHECK-DAG: fcvtn v1.4h, [[S2]].4s
 ; CHECK-DAG: fcvtn v[[R1:[0-9]+]].4h, [[S1]].4s
 ; CHECK-DAG: fcvtn v[[R3:[0-9]+]].4h, [[S3]].4s
-; CHECK-DAG: ins v0.d[1], v[[R1]].d[0]
-; CHECK-DAG: ins v1.d[1], v[[R3]].d[0]
+; CHECK-DAG: mov v0.d[1], v[[R1]].d[0]
+; CHECK-DAG: mov v1.d[1], v[[R3]].d[0]
 
   %1 = uitofp <16 x i64> %a to <16 x half>
   ret <16 x half> %1

Modified: llvm/trunk/test/CodeGen/AArch64/fp16-v4-instructions.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/fp16-v4-instructions.ll?rev=294437&r1=294436&r2=294437&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/fp16-v4-instructions.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/fp16-v4-instructions.ll Wed Feb  8 05:28:08 2017
@@ -87,10 +87,10 @@ define <4 x half> @d_to_h(<4 x double> %
 ; CHECK-DAG: fcvt
 ; CHECK-DAG: fcvt
 ; CHECK-DAG: fcvt
-; CHECK-DAG: ins
-; CHECK-DAG: ins
-; CHECK-DAG: ins
-; CHECK-DAG: ins
+; CHECK-DAG: mov
+; CHECK-DAG: mov
+; CHECK-DAG: mov
+; CHECK-DAG: mov
   %1 = fptrunc <4 x double> %a to <4 x half>
   ret <4 x half> %1
 }
@@ -108,10 +108,10 @@ define <4 x double> @h_to_d(<4 x half> %
 ; CHECK-DAG: fcvt
 ; CHECK-DAG: fcvt
 ; CHECK-DAG: fcvt
-; CHECK-DAG: ins
-; CHECK-DAG: ins
-; CHECK-DAG: ins
-; CHECK-DAG: ins
+; CHECK-DAG: mov
+; CHECK-DAG: mov
+; CHECK-DAG: mov
+; CHECK-DAG: mov
   %1 = fpext <4 x half> %a to <4 x double>
   ret <4 x double> %1
 }

Modified: llvm/trunk/test/CodeGen/AArch64/fp16-v8-instructions.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/fp16-v8-instructions.ll?rev=294437&r1=294436&r2=294437&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/fp16-v8-instructions.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/fp16-v8-instructions.ll Wed Feb  8 05:28:08 2017
@@ -181,7 +181,7 @@ define <8 x half> @s_to_h(<8 x float> %a
 ; CHECK-LABEL: s_to_h:
 ; CHECK-DAG: fcvtn v0.4h, v0.4s
 ; CHECK-DAG: fcvtn [[REG:v[0-9+]]].4h, v1.4s
-; CHECK: ins v0.d[1], [[REG]].d[0]
+; CHECK: mov v0.d[1], [[REG]].d[0]
   %1 = fptrunc <8 x float> %a to <8 x half>
   ret <8 x half> %1
 }
@@ -200,14 +200,14 @@ define <8 x half> @d_to_h(<8 x double> %
 ; CHECK-DAG: fcvt h
 ; CHECK-DAG: fcvt h
 ; CHECK-DAG: fcvt h
-; CHECK-DAG: ins v{{[0-9]+}}.h
-; CHECK-DAG: ins v{{[0-9]+}}.h
-; CHECK-DAG: ins v{{[0-9]+}}.h
-; CHECK-DAG: ins v{{[0-9]+}}.h
-; CHECK-DAG: ins v{{[0-9]+}}.h
-; CHECK-DAG: ins v{{[0-9]+}}.h
-; CHECK-DAG: ins v{{[0-9]+}}.h
-; CHECK-DAG: ins v{{[0-9]+}}.h
+; CHECK-DAG: mov v{{[0-9]+}}.h
+; CHECK-DAG: mov v{{[0-9]+}}.h
+; CHECK-DAG: mov v{{[0-9]+}}.h
+; CHECK-DAG: mov v{{[0-9]+}}.h
+; CHECK-DAG: mov v{{[0-9]+}}.h
+; CHECK-DAG: mov v{{[0-9]+}}.h
+; CHECK-DAG: mov v{{[0-9]+}}.h
+; CHECK-DAG: mov v{{[0-9]+}}.h
   %1 = fptrunc <8 x double> %a to <8 x half>
   ret <8 x half> %1
 }
@@ -230,10 +230,10 @@ define <8 x double> @h_to_d(<8 x half> %
 ; CHECK-DAG: fcvt d
 ; CHECK-DAG: fcvt d
 ; CHECK-DAG: fcvt d
-; CHECK-DAG: ins
-; CHECK-DAG: ins
-; CHECK-DAG: ins
-; CHECK-DAG: ins
+; CHECK-DAG: mov
+; CHECK-DAG: mov
+; CHECK-DAG: mov
+; CHECK-DAG: mov
   %1 = fpext <8 x half> %a to <8 x double>
   ret <8 x double> %1
 }
@@ -263,7 +263,7 @@ define <8 x half> @sitofp_i8(<8 x i8> %a
 ; CHECK-DAG: scvtf [[LOF:v[0-9]+\.4s]], [[LO]]
 ; CHECK-DAG: fcvtn v[[LOREG:[0-9]+]].4h, [[LOF]]
 ; CHECK-DAG: fcvtn v0.4h, [[HIF]]
-; CHECK: ins v0.d[1], v[[LOREG]].d[0]
+; CHECK: mov v0.d[1], v[[LOREG]].d[0]
   %1 = sitofp <8 x i8> %a to <8 x half>
   ret <8 x half> %1
 }
@@ -277,7 +277,7 @@ define <8 x half> @sitofp_i16(<8 x i16>
 ; CHECK-DAG: scvtf [[LOF:v[0-9]+\.4s]], [[LO]]
 ; CHECK-DAG: fcvtn v[[LOREG:[0-9]+]].4h, [[LOF]]
 ; CHECK-DAG: fcvtn v0.4h, [[HIF]]
-; CHECK: ins v0.d[1], v[[LOREG]].d[0]
+; CHECK: mov v0.d[1], v[[LOREG]].d[0]
   %1 = sitofp <8 x i16> %a to <8 x half>
   ret <8 x half> %1
 }
@@ -289,7 +289,7 @@ define <8 x half> @sitofp_i32(<8 x i32>
 ; CHECK-DAG: scvtf [[OP2:v[0-9]+\.4s]], v1.4s
 ; CHECK-DAG: fcvtn v[[REG:[0-9]+]].4h, [[OP2]]
 ; CHECK-DAG: fcvtn v0.4h, [[OP1]]
-; CHECK: ins v0.d[1], v[[REG]].d[0]
+; CHECK: mov v0.d[1], v[[REG]].d[0]
   %1 = sitofp <8 x i32> %a to <8 x half>
   ret <8 x half> %1
 }
@@ -315,7 +315,7 @@ define <8 x half> @uitofp_i8(<8 x i8> %a
 ; CHECK-DAG: ucvtf [[LOF:v[0-9]+\.4s]], [[LO]]
 ; CHECK-DAG: fcvtn v[[LOREG:[0-9]+]].4h, [[LOF]]
 ; CHECK-DAG: fcvtn v0.4h, [[HIF]]
-; CHECK: ins v0.d[1], v[[LOREG]].d[0]
+; CHECK: mov v0.d[1], v[[LOREG]].d[0]
   %1 = uitofp <8 x i8> %a to <8 x half>
   ret <8 x half> %1
 }
@@ -329,7 +329,7 @@ define <8 x half> @uitofp_i16(<8 x i16>
 ; CHECK-DAG: ucvtf [[LOF:v[0-9]+\.4s]], [[LO]]
 ; CHECK-DAG: fcvtn v[[LOREG:[0-9]+]].4h, [[LOF]]
 ; CHECK-DAG: fcvtn v0.4h, [[HIF]]
-; CHECK: ins v0.d[1], v[[LOREG]].d[0]
+; CHECK: mov v0.d[1], v[[LOREG]].d[0]
   %1 = uitofp <8 x i16> %a to <8 x half>
   ret <8 x half> %1
 }
@@ -341,7 +341,7 @@ define <8 x half> @uitofp_i32(<8 x i32>
 ; CHECK-DAG: ucvtf [[OP2:v[0-9]+\.4s]], v1.4s
 ; CHECK-DAG: fcvtn v[[REG:[0-9]+]].4h, [[OP2]]
 ; CHECK-DAG: fcvtn v0.4h, [[OP1]]
-; CHECK: ins v0.d[1], v[[REG]].d[0]
+; CHECK: mov v0.d[1], v[[REG]].d[0]
   %1 = uitofp <8 x i32> %a to <8 x half>
   ret <8 x half> %1
 }

Modified: llvm/trunk/test/CodeGen/AArch64/fp16-vector-shuffle.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/fp16-vector-shuffle.ll?rev=294437&r1=294436&r2=294437&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/fp16-vector-shuffle.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/fp16-vector-shuffle.ll Wed Feb  8 05:28:08 2017
@@ -35,7 +35,7 @@ entry:
 ; }
 define <4 x half> @lane_64_64(<4 x half> %a, <4 x half> %b) #0 {
 ; CHECK-LABEL: lane_64_64:
-; CHECK: ins
+; CHECK: mov
 entry:
   %0 = shufflevector <4 x half> %a, <4 x half> %b, <4 x i32> <i32 0, i32 6, i32 2, i32 3>
   ret <4 x half> %0
@@ -46,7 +46,7 @@ entry:
 ; }
 define <8 x half> @lane_128_64(<8 x half> %a, <4 x half> %b) #0 {
 ; CHECK-LABEL: lane_128_64:
-; CHECK: ins
+; CHECK: mov
 entry:
   %0 = bitcast <4 x half> %b to <4 x i16>
   %vget_lane = extractelement <4 x i16> %0, i32 2
@@ -61,7 +61,7 @@ entry:
 ; }
 define <4 x half> @lane_64_128(<4 x half> %a, <8 x half> %b) #0 {
 ; CHECK-LABEL: lane_64_128:
-; CHECK: ins
+; CHECK: mov
 entry:
   %0 = bitcast <8 x half> %b to <8 x i16>
   %vgetq_lane = extractelement <8 x i16> %0, i32 5
@@ -76,7 +76,7 @@ entry:
 ; }
 define <8 x half> @lane_128_128(<8 x half> %a, <8 x half> %b) #0 {
 ; CHECK-LABEL: lane_128_128:
-; CHECK: ins
+; CHECK: mov
 entry:
   %0 = shufflevector <8 x half> %a, <8 x half> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 13, i32 4, i32 5, i32 6, i32 7>
   ret <8 x half> %0
@@ -225,7 +225,7 @@ entry:
 define <8 x half> @vcombine(<4 x half> %a, <4 x half> %b) #0 {
 entry:
 ; CHECK-LABEL: vcombine:
-; CHECK: ins
+; CHECK: mov
   %shuffle.i = shufflevector <4 x half> %a, <4 x half> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   ret <8 x half> %shuffle.i
 }
@@ -253,7 +253,7 @@ entry:
 define <4 x half> @set_lane_64(<4 x half> %a, half %b) #0 {
 ; CHECK-LABEL: set_lane_64:
 ; CHECK: fmov
-; CHECK: ins
+; CHECK: mov
 entry:
   %0 = bitcast half %b to i16
   %1 = bitcast <4 x half> %a to <4 x i16>
@@ -267,7 +267,7 @@ entry:
 define <8 x half> @set_lane_128(<8 x half> %a, half %b) #0 {
 ; CHECK-LABEL: set_lane_128:
 ; CHECK: fmov
-; CHECK: ins
+; CHECK: mov
 entry:
   %0 = bitcast half %b to i16
   %1 = bitcast <8 x half> %a to <8 x i16>

Modified: llvm/trunk/test/CodeGen/AArch64/vector-fcopysign.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/vector-fcopysign.ll?rev=294437&r1=294436&r2=294437&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/vector-fcopysign.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/vector-fcopysign.ll Wed Feb  8 05:28:08 2017
@@ -106,10 +106,10 @@ define <4 x float> @test_copysign_v4f32_
 ; CHECK-NEXT:    bit.16b v3, v1, v4
 ; CHECK-NEXT:    mov d1, v2[1]
 ; CHECK-NEXT:    fcvt s1, d1
-; CHECK-NEXT:    ins.s v0[1], v3[0]
-; CHECK-NEXT:    ins.s v0[2], v6[0]
+; CHECK-NEXT:    mov.s v0[1], v3[0]
+; CHECK-NEXT:    mov.s v0[2], v6[0]
 ; CHECK-NEXT:    bit.16b v7, v1, v4
-; CHECK-NEXT:    ins.s v0[3], v7[0]
+; CHECK-NEXT:    mov.s v0[3], v7[0]
 ; CHECK-NEXT:    ret
   %tmp0 = fptrunc <4 x double> %b to <4 x float>
   %r = call <4 x float> @llvm.copysign.v4f32(<4 x float> %a, <4 x float> %tmp0)

Modified: llvm/trunk/test/MC/AArch64/arm64-advsimd.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/MC/AArch64/arm64-advsimd.s?rev=294437&r1=294436&r2=294437&view=diff
==============================================================================
--- llvm/trunk/test/MC/AArch64/arm64-advsimd.s (original)
+++ llvm/trunk/test/MC/AArch64/arm64-advsimd.s Wed Feb  8 05:28:08 2017
@@ -220,15 +220,15 @@ foo:
   ins   v2.h[1], w5
   ins   v2.b[1], w5
 
-; CHECK: ins.d v2[1], x5             ; encoding: [0xa2,0x1c,0x18,0x4e]
-; CHECK: ins.s v2[1], w5             ; encoding: [0xa2,0x1c,0x0c,0x4e]
-; CHECK: ins.h v2[1], w5             ; encoding: [0xa2,0x1c,0x06,0x4e]
-; CHECK: ins.b v2[1], w5             ; encoding: [0xa2,0x1c,0x03,0x4e]
-
-; CHECK: ins.d v2[1], x5             ; encoding: [0xa2,0x1c,0x18,0x4e]
-; CHECK: ins.s v2[1], w5             ; encoding: [0xa2,0x1c,0x0c,0x4e]
-; CHECK: ins.h v2[1], w5             ; encoding: [0xa2,0x1c,0x06,0x4e]
-; CHECK: ins.b v2[1], w5             ; encoding: [0xa2,0x1c,0x03,0x4e]
+; CHECK: mov.d v2[1], x5             ; encoding: [0xa2,0x1c,0x18,0x4e]
+; CHECK: mov.s v2[1], w5             ; encoding: [0xa2,0x1c,0x0c,0x4e]
+; CHECK: mov.h v2[1], w5             ; encoding: [0xa2,0x1c,0x06,0x4e]
+; CHECK: mov.b v2[1], w5             ; encoding: [0xa2,0x1c,0x03,0x4e]
+
+; CHECK: mov.d v2[1], x5             ; encoding: [0xa2,0x1c,0x18,0x4e]
+; CHECK: mov.s v2[1], w5             ; encoding: [0xa2,0x1c,0x0c,0x4e]
+; CHECK: mov.h v2[1], w5             ; encoding: [0xa2,0x1c,0x06,0x4e]
+; CHECK: mov.b v2[1], w5             ; encoding: [0xa2,0x1c,0x03,0x4e]
 
   ins.d v2[1], v15[1]
   ins.s v2[1], v15[1]
@@ -240,15 +240,15 @@ foo:
   ins   v2.h[7], v15.h[3]
   ins   v2.b[10], v15.b[5]
 
-; CHECK: ins.d v2[1], v15[1]         ; encoding: [0xe2,0x45,0x18,0x6e]
-; CHECK: ins.s v2[1], v15[1]         ; encoding: [0xe2,0x25,0x0c,0x6e]
-; CHECK: ins.h v2[1], v15[1]         ; encoding: [0xe2,0x15,0x06,0x6e]
-; CHECK: ins.b v2[1], v15[1]         ; encoding: [0xe2,0x0d,0x03,0x6e]
-
-; CHECK: ins.d v2[1], v15[0]         ; encoding: [0xe2,0x05,0x18,0x6e]
-; CHECK: ins.s v2[3], v15[2]         ; encoding: [0xe2,0x45,0x1c,0x6e]
-; CHECK: ins.h v2[7], v15[3]         ; encoding: [0xe2,0x35,0x1e,0x6e]
-; CHECK: ins.b v2[10], v15[5]        ; encoding: [0xe2,0x2d,0x15,0x6e]
+; CHECK: mov.d v2[1], v15[1]         ; encoding: [0xe2,0x45,0x18,0x6e]
+; CHECK: mov.s v2[1], v15[1]         ; encoding: [0xe2,0x25,0x0c,0x6e]
+; CHECK: mov.h v2[1], v15[1]         ; encoding: [0xe2,0x15,0x06,0x6e]
+; CHECK: mov.b v2[1], v15[1]         ; encoding: [0xe2,0x0d,0x03,0x6e]
+
+; CHECK: mov.d v2[1], v15[0]         ; encoding: [0xe2,0x05,0x18,0x6e]
+; CHECK: mov.s v2[3], v15[2]         ; encoding: [0xe2,0x45,0x1c,0x6e]
+; CHECK: mov.h v2[7], v15[3]         ; encoding: [0xe2,0x35,0x1e,0x6e]
+; CHECK: mov.b v2[10], v15[5]        ; encoding: [0xe2,0x2d,0x15,0x6e]
 
 ; MOV aliases for the above INS instructions.
   mov.d v2[1], x5
@@ -271,22 +271,22 @@ foo:
   mov   v8.h[7], v17.h[3]
   mov   v9.b[10], v18.b[5]
 
-; CHECK: ins.d	v2[1], x5               ; encoding: [0xa2,0x1c,0x18,0x4e]
-; CHECK: ins.s	v3[1], w6               ; encoding: [0xc3,0x1c,0x0c,0x4e]
-; CHECK: ins.h	v4[1], w7               ; encoding: [0xe4,0x1c,0x06,0x4e]
-; CHECK: ins.b	v5[1], w8               ; encoding: [0x05,0x1d,0x03,0x4e]
-; CHECK: ins.d	v9[1], x2               ; encoding: [0x49,0x1c,0x18,0x4e]
-; CHECK: ins.s	v8[1], w3               ; encoding: [0x68,0x1c,0x0c,0x4e]
-; CHECK: ins.h	v7[1], w4               ; encoding: [0x87,0x1c,0x06,0x4e]
-; CHECK: ins.b	v6[1], w5               ; encoding: [0xa6,0x1c,0x03,0x4e]
-; CHECK: ins.d	v1[1], v10[1]           ; encoding: [0x41,0x45,0x18,0x6e]
-; CHECK: ins.s	v2[1], v11[1]           ; encoding: [0x62,0x25,0x0c,0x6e]
-; CHECK: ins.h	v7[1], v12[1]           ; encoding: [0x87,0x15,0x06,0x6e]
-; CHECK: ins.b	v8[1], v15[1]           ; encoding: [0xe8,0x0d,0x03,0x6e]
-; CHECK: ins.d	v2[1], v15[0]           ; encoding: [0xe2,0x05,0x18,0x6e]
-; CHECK: ins.s	v7[3], v16[2]           ; encoding: [0x07,0x46,0x1c,0x6e]
-; CHECK: ins.h	v8[7], v17[3]           ; encoding: [0x28,0x36,0x1e,0x6e]
-; CHECK: ins.b	v9[10], v18[5]          ; encoding: [0x49,0x2e,0x15,0x6e]
+; CHECK: mov.d	v2[1], x5               ; encoding: [0xa2,0x1c,0x18,0x4e]
+; CHECK: mov.s	v3[1], w6               ; encoding: [0xc3,0x1c,0x0c,0x4e]
+; CHECK: mov.h	v4[1], w7               ; encoding: [0xe4,0x1c,0x06,0x4e]
+; CHECK: mov.b	v5[1], w8               ; encoding: [0x05,0x1d,0x03,0x4e]
+; CHECK: mov.d	v9[1], x2               ; encoding: [0x49,0x1c,0x18,0x4e]
+; CHECK: mov.s	v8[1], w3               ; encoding: [0x68,0x1c,0x0c,0x4e]
+; CHECK: mov.h	v7[1], w4               ; encoding: [0x87,0x1c,0x06,0x4e]
+; CHECK: mov.b	v6[1], w5               ; encoding: [0xa6,0x1c,0x03,0x4e]
+; CHECK: mov.d	v1[1], v10[1]           ; encoding: [0x41,0x45,0x18,0x6e]
+; CHECK: mov.s	v2[1], v11[1]           ; encoding: [0x62,0x25,0x0c,0x6e]
+; CHECK: mov.h	v7[1], v12[1]           ; encoding: [0x87,0x15,0x06,0x6e]
+; CHECK: mov.b	v8[1], v15[1]           ; encoding: [0xe8,0x0d,0x03,0x6e]
+; CHECK: mov.d	v2[1], v15[0]           ; encoding: [0xe2,0x05,0x18,0x6e]
+; CHECK: mov.s	v7[3], v16[2]           ; encoding: [0x07,0x46,0x1c,0x6e]
+; CHECK: mov.h	v8[7], v17[3]           ; encoding: [0x28,0x36,0x1e,0x6e]
+; CHECK: mov.b	v9[10], v18[5]          ; encoding: [0x49,0x2e,0x15,0x6e]
 
 
   and.8b  v0, v0, v0

Modified: llvm/trunk/test/MC/Disassembler/AArch64/arm64-advsimd.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/MC/Disassembler/AArch64/arm64-advsimd.txt?rev=294437&r1=294436&r2=294437&view=diff
==============================================================================
--- llvm/trunk/test/MC/Disassembler/AArch64/arm64-advsimd.txt (original)
+++ llvm/trunk/test/MC/Disassembler/AArch64/arm64-advsimd.txt Wed Feb  8 05:28:08 2017
@@ -139,15 +139,15 @@
 0xa2 0x1c 0x06 0x4e
 0xa2 0x1c 0x03 0x4e
 
-# CHECK: ins.d v2[1], x5
-# CHECK: ins.s v2[1], w5
-# CHECK: ins.h v2[1], w5
-# CHECK: ins.b v2[1], w5
-
-# CHECK: ins.d v2[1], x5
-# CHECK: ins.s v2[1], w5
-# CHECK: ins.h v2[1], w5
-# CHECK: ins.b v2[1], w5
+# CHECK: mov.d v2[1], x5
+# CHECK: mov.s v2[1], w5
+# CHECK: mov.h v2[1], w5
+# CHECK: mov.b v2[1], w5
+
+# CHECK: mov.d v2[1], x5
+# CHECK: mov.s v2[1], w5
+# CHECK: mov.h v2[1], w5
+# CHECK: mov.b v2[1], w5
 
 0xe2 0x45 0x18 0x6e
 0xe2 0x25 0x0c 0x6e
@@ -159,15 +159,15 @@
 0xe2 0x35 0x1e 0x6e
 0xe2 0x2d 0x15 0x6e
 
-# CHECK: ins.d v2[1], v15[1]
-# CHECK: ins.s v2[1], v15[1]
-# CHECK: ins.h v2[1], v15[1]
-# CHECK: ins.b v2[1], v15[1]
-
-# CHECK: ins.d v2[1], v15[0]
-# CHECK: ins.s v2[3], v15[2]
-# CHECK: ins.h v2[7], v15[3]
-# CHECK: ins.b v2[10], v15[5]
+# CHECK: mov.d v2[1], v15[1]
+# CHECK: mov.s v2[1], v15[1]
+# CHECK: mov.h v2[1], v15[1]
+# CHECK: mov.b v2[1], v15[1]
+
+# CHECK: mov.d v2[1], v15[0]
+# CHECK: mov.s v2[3], v15[2]
+# CHECK: mov.h v2[7], v15[3]
+# CHECK: mov.b v2[10], v15[5]
 
 # INS/DUP (non-standard)
 0x60 0x0c 0x08 0x4e
@@ -196,15 +196,15 @@
 0xe2 0x35 0x1e 0x6e
 0xe2 0x2d 0x15 0x6e
 
-# CHECK: ins.d v2[1], v15[1]
-# CHECK: ins.s v2[1], v15[1]
-# CHECK: ins.h v2[1], v15[1]
-# CHECK: ins.b v2[1], v15[1]
-
-# CHECK: ins.d v2[1], v15[0]
-# CHECK: ins.s v2[3], v15[2]
-# CHECK: ins.h v2[7], v15[3]
-# CHECK: ins.b v2[10], v15[5]
+# CHECK: mov.d v2[1], v15[1]
+# CHECK: mov.s v2[1], v15[1]
+# CHECK: mov.h v2[1], v15[1]
+# CHECK: mov.b v2[1], v15[1]
+
+# CHECK: mov.d v2[1], v15[0]
+# CHECK: mov.s v2[3], v15[2]
+# CHECK: mov.h v2[7], v15[3]
+# CHECK: mov.b v2[10], v15[5]
 
 0x00 0x1c 0x20 0x0e
 0x00 0x1c 0x20 0x4e

Modified: llvm/trunk/utils/TableGen/AsmWriterEmitter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/utils/TableGen/AsmWriterEmitter.cpp?rev=294437&r1=294436&r2=294437&view=diff
==============================================================================
--- llvm/trunk/utils/TableGen/AsmWriterEmitter.cpp (original)
+++ llvm/trunk/utils/TableGen/AsmWriterEmitter.cpp Wed Feb  8 05:28:08 2017
@@ -820,8 +820,8 @@ void AsmWriterEmitter::EmitPrintAliasIns
       }
 
       unsigned NumMIOps = 0;
-      for (auto &Operand : CGA.ResultOperands)
-        NumMIOps += Operand.getMINumOperands();
+      for (auto &Operand : CGA.ResultInst->Operands.OperandList)
+        NumMIOps += Operand.MINumOperands;
 
       std::string Cond;
       Cond = std::string("MI->getNumOperands() == ") + utostr(NumMIOps);
@@ -831,6 +831,11 @@ void AsmWriterEmitter::EmitPrintAliasIns
 
       unsigned MIOpNum = 0;
       for (unsigned i = 0, e = LastOpNo; i != e; ++i) {
+        // Skip over tied operands as they're not part of an alias declaration.
+        if (CGA.ResultInst->Operands[MIOpNum].MINumOperands == 1 &&
+            CGA.ResultInst->Operands[MIOpNum].getTiedRegister() != -1)
+          ++MIOpNum;
+
         std::string Op = "MI->getOperand(" + utostr(MIOpNum) + ")";
 
         const CodeGenInstAlias::ResultOperand &RO = CGA.ResultOperands[i];




More information about the llvm-commits mailing list