[llvm] e8d60e7 - [ARM] Regenerate ARM neon-copy.ll test. NFC

David Green via llvm-commits llvm-commits at lists.llvm.org
Mon Aug 9 00:24:42 PDT 2021


Author: David Green
Date: 2021-08-09T08:24:28+01:00
New Revision: e8d60e75fc70e68d79c91423de272159f947c2fd

URL: https://github.com/llvm/llvm-project/commit/e8d60e75fc70e68d79c91423de272159f947c2fd
DIFF: https://github.com/llvm/llvm-project/commit/e8d60e75fc70e68d79c91423de272159f947c2fd.diff

LOG: [ARM] Regenerate ARM neon-copy.ll test. NFC

This test didn't include all test check lines, thanks to .'s in function
names. It also changed the triple to hard float to make a more
interesting test for NEON code generation.

Added: 
    

Modified: 
    llvm/test/CodeGen/ARM/neon-copy.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/ARM/neon-copy.ll b/llvm/test/CodeGen/ARM/neon-copy.ll
index e222939328c84..09a991da2e59a 100644
--- a/llvm/test/CodeGen/ARM/neon-copy.ll
+++ b/llvm/test/CodeGen/ARM/neon-copy.ll
@@ -1,15 +1,10 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -verify-machineinstrs -mtriple=armv7a-linux-gn | FileCheck %s
+; RUN: llc < %s -mtriple=armv7a-none-eabihf -mattr=+neon -verify-machineinstrs | FileCheck %s
 
 define <16 x i8> @ins16bw(<16 x i8> %tmp1, i8 %tmp2) {
 ; CHECK-LABEL: ins16bw:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d17, r2, r3
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    mov r0, sp
-; CHECK-NEXT:    vld1.8 {d17[7]}, [r0]
-; CHECK-NEXT:    vmov r0, r1, d16
-; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    vmov.8 d1[7], r0
 ; CHECK-NEXT:    bx lr
   %tmp3 = insertelement <16 x i8> %tmp1, i8 %tmp2, i32 15
   ret <16 x i8> %tmp3
@@ -18,12 +13,7 @@ define <16 x i8> @ins16bw(<16 x i8> %tmp1, i8 %tmp2) {
 define <8 x i16> @ins8hw(<8 x i16> %tmp1, i16 %tmp2) {
 ; CHECK-LABEL: ins8hw:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d17, r2, r3
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    mov r0, sp
-; CHECK-NEXT:    vld1.16 {d17[2]}, [r0:16]
-; CHECK-NEXT:    vmov r0, r1, d16
-; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    vmov.16 d1[2], r0
 ; CHECK-NEXT:    bx lr
   %tmp3 = insertelement <8 x i16> %tmp1, i16 %tmp2, i32 6
   ret <8 x i16> %tmp3
@@ -32,12 +22,7 @@ define <8 x i16> @ins8hw(<8 x i16> %tmp1, i16 %tmp2) {
 define <4 x i32> @ins4sw(<4 x i32> %tmp1, i32 %tmp2) {
 ; CHECK-LABEL: ins4sw:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d17, r2, r3
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    mov r0, sp
-; CHECK-NEXT:    vld1.32 {d17[0]}, [r0:32]
-; CHECK-NEXT:    vmov r0, r1, d16
-; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    vmov.32 d1[0], r0
 ; CHECK-NEXT:    bx lr
   %tmp3 = insertelement <4 x i32> %tmp1, i32 %tmp2, i32 2
   ret <4 x i32> %tmp3
@@ -46,7 +31,8 @@ define <4 x i32> @ins4sw(<4 x i32> %tmp1, i32 %tmp2) {
 define <2 x i64> @ins2dw(<2 x i64> %tmp1, i64 %tmp2) {
 ; CHECK-LABEL: ins2dw:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    ldm sp, {r2, r3}
+; CHECK-NEXT:    vmov.32 d1[0], r0
+; CHECK-NEXT:    vmov.32 d1[1], r1
 ; CHECK-NEXT:    bx lr
   %tmp3 = insertelement <2 x i64> %tmp1, i64 %tmp2, i32 1
   ret <2 x i64> %tmp3
@@ -55,9 +41,7 @@ define <2 x i64> @ins2dw(<2 x i64> %tmp1, i64 %tmp2) {
 define <8 x i8> @ins8bw(<8 x i8> %tmp1, i8 %tmp2) {
 ; CHECK-LABEL: ins8bw:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vmov.8 d16[5], r2
-; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov.8 d0[5], r0
 ; CHECK-NEXT:    bx lr
   %tmp3 = insertelement <8 x i8> %tmp1, i8 %tmp2, i32 5
   ret <8 x i8> %tmp3
@@ -66,9 +50,7 @@ define <8 x i8> @ins8bw(<8 x i8> %tmp1, i8 %tmp2) {
 define <4 x i16> @ins4hw(<4 x i16> %tmp1, i16 %tmp2) {
 ; CHECK-LABEL: ins4hw:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vmov.16 d16[3], r2
-; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov.16 d0[3], r0
 ; CHECK-NEXT:    bx lr
   %tmp3 = insertelement <4 x i16> %tmp1, i16 %tmp2, i32 3
   ret <4 x i16> %tmp3
@@ -77,7 +59,7 @@ define <4 x i16> @ins4hw(<4 x i16> %tmp1, i16 %tmp2) {
 define <2 x i32> @ins2sw(<2 x i32> %tmp1, i32 %tmp2) {
 ; CHECK-LABEL: ins2sw:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    mov r1, r2
+; CHECK-NEXT:    vmov.32 d0[1], r0
 ; CHECK-NEXT:    bx lr
   %tmp3 = insertelement <2 x i32> %tmp1, i32 %tmp2, i32 1
   ret <2 x i32> %tmp3
@@ -86,13 +68,9 @@ define <2 x i32> @ins2sw(<2 x i32> %tmp1, i32 %tmp2) {
 define <16 x i8> @ins16b16(<16 x i8> %tmp1, <16 x i8> %tmp2) {
 ; CHECK-LABEL: ins16b16:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    mov r1, sp
-; CHECK-NEXT:    vmov.u8 r0, d16[2]
-; CHECK-NEXT:    vld1.64 {d16, d17}, [r1]
-; CHECK-NEXT:    vmov.8 d17[7], r0
-; CHECK-NEXT:    vmov r0, r1, d16
-; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    vmov.u8 r0, d0[2]
+; CHECK-NEXT:    vmov.8 d3[7], r0
+; CHECK-NEXT:    vorr q0, q1, q1
 ; CHECK-NEXT:    bx lr
   %tmp3 = extractelement <16 x i8> %tmp1, i32 2
   %tmp4 = insertelement <16 x i8> %tmp2, i8 %tmp3, i32 15
@@ -102,13 +80,9 @@ define <16 x i8> @ins16b16(<16 x i8> %tmp1, <16 x i8> %tmp2) {
 define <8 x i16> @ins8h8(<8 x i16> %tmp1, <8 x i16> %tmp2) {
 ; CHECK-LABEL: ins8h8:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    mov r1, sp
-; CHECK-NEXT:    vmov.u16 r0, d16[2]
-; CHECK-NEXT:    vld1.64 {d16, d17}, [r1]
-; CHECK-NEXT:    vmov.16 d17[3], r0
-; CHECK-NEXT:    vmov r0, r1, d16
-; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    vmov.u16 r0, d0[2]
+; CHECK-NEXT:    vmov.16 d3[3], r0
+; CHECK-NEXT:    vorr q0, q1, q1
 ; CHECK-NEXT:    bx lr
   %tmp3 = extractelement <8 x i16> %tmp1, i32 2
   %tmp4 = insertelement <8 x i16> %tmp2, i16 %tmp3, i32 7
@@ -118,13 +92,9 @@ define <8 x i16> @ins8h8(<8 x i16> %tmp1, <8 x i16> %tmp2) {
 define <4 x i32> @ins4s4(<4 x i32> %tmp1, <4 x i32> %tmp2) {
 ; CHECK-LABEL: ins4s4:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d17, r2, r3
-; CHECK-NEXT:    mov r1, sp
-; CHECK-NEXT:    vmov.32 r0, d17[0]
-; CHECK-NEXT:    vld1.64 {d16, d17}, [r1]
-; CHECK-NEXT:    vmov.32 d16[1], r0
-; CHECK-NEXT:    vmov r2, r3, d17
-; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov.32 r0, d1[0]
+; CHECK-NEXT:    vmov.32 d2[1], r0
+; CHECK-NEXT:    vorr q0, q1, q1
 ; CHECK-NEXT:    bx lr
   %tmp3 = extractelement <4 x i32> %tmp1, i32 2
   %tmp4 = insertelement <4 x i32> %tmp2, i32 %tmp3, i32 1
@@ -134,9 +104,10 @@ define <4 x i32> @ins4s4(<4 x i32> %tmp1, <4 x i32> %tmp2) {
 define <2 x i64> @ins2d2(<2 x i64> %tmp1, <2 x i64> %tmp2) {
 ; CHECK-LABEL: ins2d2:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    mov r3, r1
-; CHECK-NEXT:    mov r2, r0
-; CHECK-NEXT:    ldm sp, {r0, r1}
+; CHECK-NEXT:    vmov r0, r1, d0
+; CHECK-NEXT:    vmov.32 d3[0], r0
+; CHECK-NEXT:    vmov.32 d3[1], r1
+; CHECK-NEXT:    vorr q0, q1, q1
 ; CHECK-NEXT:    bx lr
   %tmp3 = extractelement <2 x i64> %tmp1, i32 0
   %tmp4 = insertelement <2 x i64> %tmp2, i64 %tmp3, i32 1
@@ -146,12 +117,8 @@ define <2 x i64> @ins2d2(<2 x i64> %tmp1, <2 x i64> %tmp2) {
 define <4 x float> @ins4f4(<4 x float> %tmp1, <4 x float> %tmp2) {
 ; CHECK-LABEL: ins4f4:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d3, r2, r3
-; CHECK-NEXT:    mov r0, sp
-; CHECK-NEXT:    vld1.64 {d0, d1}, [r0]
-; CHECK-NEXT:    vmov.f32 s1, s6
-; CHECK-NEXT:    vmov r2, r3, d1
-; CHECK-NEXT:    vmov r0, r1, d0
+; CHECK-NEXT:    vmov.f32 s5, s2
+; CHECK-NEXT:    vorr q0, q1, q1
 ; CHECK-NEXT:    bx lr
   %tmp3 = extractelement <4 x float> %tmp1, i32 2
   %tmp4 = insertelement <4 x float> %tmp2, float %tmp3, i32 1
@@ -161,9 +128,8 @@ define <4 x float> @ins4f4(<4 x float> %tmp1, <4 x float> %tmp2) {
 define <2 x double> @ins2df2(<2 x double> %tmp1, <2 x double> %tmp2) {
 ; CHECK-LABEL: ins2df2:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    mov r3, r1
-; CHECK-NEXT:    mov r2, r0
-; CHECK-NEXT:    ldm sp, {r0, r1}
+; CHECK-NEXT:    vorr d3, d0, d0
+; CHECK-NEXT:    vorr q0, q1, q1
 ; CHECK-NEXT:    bx lr
   %tmp3 = extractelement <2 x double> %tmp1, i32 0
   %tmp4 = insertelement <2 x double> %tmp2, double %tmp3, i32 1
@@ -173,13 +139,9 @@ define <2 x double> @ins2df2(<2 x double> %tmp1, <2 x double> %tmp2) {
 define <16 x i8> @ins8b16(<8 x i8> %tmp1, <16 x i8> %tmp2) {
 ; CHECK-LABEL: ins8b16:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vmov.u8 r0, d16[2]
-; CHECK-NEXT:    vldr d17, [sp]
-; CHECK-NEXT:    vmov d16, r2, r3
-; CHECK-NEXT:    vmov.8 d17[7], r0
-; CHECK-NEXT:    vmov r0, r1, d16
-; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    vmov.u8 r0, d0[2]
+; CHECK-NEXT:    vmov.8 d3[7], r0
+; CHECK-NEXT:    vorr q0, q1, q1
 ; CHECK-NEXT:    bx lr
   %tmp3 = extractelement <8 x i8> %tmp1, i32 2
   %tmp4 = insertelement <16 x i8> %tmp2, i8 %tmp3, i32 15
@@ -189,13 +151,9 @@ define <16 x i8> @ins8b16(<8 x i8> %tmp1, <16 x i8> %tmp2) {
 define <8 x i16> @ins4h8(<4 x i16> %tmp1, <8 x i16> %tmp2) {
 ; CHECK-LABEL: ins4h8:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vmov.u16 r0, d16[2]
-; CHECK-NEXT:    vldr d17, [sp]
-; CHECK-NEXT:    vmov d16, r2, r3
-; CHECK-NEXT:    vmov.16 d17[3], r0
-; CHECK-NEXT:    vmov r0, r1, d16
-; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    vmov.u16 r0, d0[2]
+; CHECK-NEXT:    vmov.16 d3[3], r0
+; CHECK-NEXT:    vorr q0, q1, q1
 ; CHECK-NEXT:    bx lr
   %tmp3 = extractelement <4 x i16> %tmp1, i32 2
   %tmp4 = insertelement <8 x i16> %tmp2, i16 %tmp3, i32 7
@@ -205,13 +163,9 @@ define <8 x i16> @ins4h8(<4 x i16> %tmp1, <8 x i16> %tmp2) {
 define <4 x i32> @ins2s4(<2 x i32> %tmp1, <4 x i32> %tmp2) {
 ; CHECK-LABEL: ins2s4:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vmov.32 r0, d16[1]
-; CHECK-NEXT:    vldr d17, [sp]
-; CHECK-NEXT:    vmov d16, r2, r3
-; CHECK-NEXT:    vmov.32 d16[1], r0
-; CHECK-NEXT:    vmov r2, r3, d17
-; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov.32 r0, d0[1]
+; CHECK-NEXT:    vmov.32 d2[1], r0
+; CHECK-NEXT:    vorr q0, q1, q1
 ; CHECK-NEXT:    bx lr
   %tmp3 = extractelement <2 x i32> %tmp1, i32 1
   %tmp4 = insertelement <4 x i32> %tmp2, i32 %tmp3, i32 1
@@ -221,15 +175,12 @@ define <4 x i32> @ins2s4(<2 x i32> %tmp1, <4 x i32> %tmp2) {
 define <2 x i64> @ins1d2(<1 x i64> %tmp1, <2 x i64> %tmp2) {
 ; CHECK-LABEL: ins1d2:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    push {r11, lr}
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    mov r12, r3
-; CHECK-NEXT:    mov lr, r2
-; CHECK-NEXT:    mov r1, r12
-; CHECK-NEXT:    vmov.32 r2, d16[0]
-; CHECK-NEXT:    mov r0, lr
-; CHECK-NEXT:    vmov.32 r3, d16[1]
-; CHECK-NEXT:    pop {r11, pc}
+; CHECK-NEXT:    vmov.32 r0, d0[0]
+; CHECK-NEXT:    vmov.32 r1, d0[1]
+; CHECK-NEXT:    vmov.32 d3[0], r0
+; CHECK-NEXT:    vmov.32 d3[1], r1
+; CHECK-NEXT:    vorr q0, q1, q1
+; CHECK-NEXT:    bx lr
   %tmp3 = extractelement <1 x i64> %tmp1, i32 0
   %tmp4 = insertelement <2 x i64> %tmp2, i64 %tmp3, i32 1
   ret <2 x i64> %tmp4
@@ -238,12 +189,8 @@ define <2 x i64> @ins1d2(<1 x i64> %tmp1, <2 x i64> %tmp2) {
 define <4 x float> @ins2f4(<2 x float> %tmp1, <4 x float> %tmp2) {
 ; CHECK-LABEL: ins2f4:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vldr d1, [sp]
-; CHECK-NEXT:    vmov d2, r0, r1
-; CHECK-NEXT:    vmov d0, r2, r3
-; CHECK-NEXT:    vmov.f32 s1, s5
-; CHECK-NEXT:    vmov r2, r3, d1
-; CHECK-NEXT:    vmov r0, r1, d0
+; CHECK-NEXT:    vmov.f32 s5, s1
+; CHECK-NEXT:    vorr q0, q1, q1
 ; CHECK-NEXT:    bx lr
   %tmp3 = extractelement <2 x float> %tmp1, i32 1
   %tmp4 = insertelement <4 x float> %tmp2, float %tmp3, i32 1
@@ -253,15 +200,9 @@ define <4 x float> @ins2f4(<2 x float> %tmp1, <4 x float> %tmp2) {
 define <2 x double> @ins1f2(<1 x double> %tmp1, <2 x double> %tmp2) {
 ; CHECK-LABEL: ins1f2:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    push {r11, lr}
-; CHECK-NEXT:    mov lr, r3
-; CHECK-NEXT:    mov r12, r1
-; CHECK-NEXT:    mov r3, r0
-; CHECK-NEXT:    mov r0, r2
-; CHECK-NEXT:    mov r2, r3
-; CHECK-NEXT:    mov r1, lr
-; CHECK-NEXT:    mov r3, r12
-; CHECK-NEXT:    pop {r11, pc}
+; CHECK-NEXT:    vorr d3, d0, d0
+; CHECK-NEXT:    vorr q0, q1, q1
+; CHECK-NEXT:    bx lr
   %tmp3 = extractelement <1 x double> %tmp1, i32 0
   %tmp4 = insertelement <2 x double> %tmp2, double %tmp3, i32 1
   ret <2 x double> %tmp4
@@ -270,7 +211,7 @@ define <2 x double> @ins1f2(<1 x double> %tmp1, <2 x double> %tmp2) {
 define <2 x double> @ins1f2_args_flipped(<2 x double> %tmp2, <1 x double> %tmp1) {
 ; CHECK-LABEL: ins1f2_args_flipped:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    ldm sp, {r2, r3}
+; CHECK-NEXT:    vmov.f64 d1, d2
 ; CHECK-NEXT:    bx lr
   %tmp3 = extractelement <1 x double> %tmp1, i32 0
   %tmp4 = insertelement <2 x double> %tmp2, double %tmp3, i32 1
@@ -280,11 +221,9 @@ define <2 x double> @ins1f2_args_flipped(<2 x double> %tmp2, <1 x double> %tmp1)
 define <8 x i8> @ins16b8(<16 x i8> %tmp1, <8 x i8> %tmp2) {
 ; CHECK-LABEL: ins16b8:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vmov.u8 r0, d16[2]
-; CHECK-NEXT:    vldr d16, [sp]
-; CHECK-NEXT:    vmov.8 d16[7], r0
-; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov.u8 r0, d0[2]
+; CHECK-NEXT:    vmov.8 d2[7], r0
+; CHECK-NEXT:    vorr d0, d2, d2
 ; CHECK-NEXT:    bx lr
   %tmp3 = extractelement <16 x i8> %tmp1, i32 2
   %tmp4 = insertelement <8 x i8> %tmp2, i8 %tmp3, i32 7
@@ -294,11 +233,9 @@ define <8 x i8> @ins16b8(<16 x i8> %tmp1, <8 x i8> %tmp2) {
 define <4 x i16> @ins8h4(<8 x i16> %tmp1, <4 x i16> %tmp2) {
 ; CHECK-LABEL: ins8h4:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vmov.u16 r0, d16[2]
-; CHECK-NEXT:    vldr d16, [sp]
-; CHECK-NEXT:    vmov.16 d16[3], r0
-; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov.u16 r0, d0[2]
+; CHECK-NEXT:    vmov.16 d2[3], r0
+; CHECK-NEXT:    vorr d0, d2, d2
 ; CHECK-NEXT:    bx lr
   %tmp3 = extractelement <8 x i16> %tmp1, i32 2
   %tmp4 = insertelement <4 x i16> %tmp2, i16 %tmp3, i32 3
@@ -308,11 +245,9 @@ define <4 x i16> @ins8h4(<8 x i16> %tmp1, <4 x i16> %tmp2) {
 define <2 x i32> @ins4s2(<4 x i32> %tmp1, <2 x i32> %tmp2) {
 ; CHECK-LABEL: ins4s2:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d17, r2, r3
-; CHECK-NEXT:    vmov.32 r0, d17[0]
-; CHECK-NEXT:    vldr d16, [sp]
-; CHECK-NEXT:    vmov.32 d16[1], r0
-; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov.32 r0, d1[0]
+; CHECK-NEXT:    vmov.32 d2[1], r0
+; CHECK-NEXT:    vorr d0, d2, d2
 ; CHECK-NEXT:    bx lr
   %tmp3 = extractelement <4 x i32> %tmp1, i32 2
   %tmp4 = insertelement <2 x i32> %tmp2, i32 %tmp3, i32 1
@@ -322,6 +257,10 @@ define <2 x i32> @ins4s2(<4 x i32> %tmp1, <2 x i32> %tmp2) {
 define <1 x i64> @ins2d1(<2 x i64> %tmp1, <1 x i64> %tmp2) {
 ; CHECK-LABEL: ins2d1:
 ; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov r0, r1, d0
+; CHECK-NEXT:    vmov.32 d2[0], r0
+; CHECK-NEXT:    vmov.32 d2[1], r1
+; CHECK-NEXT:    vorr d0, d2, d2
 ; CHECK-NEXT:    bx lr
   %tmp3 = extractelement <2 x i64> %tmp1, i32 0
   %tmp4 = insertelement <1 x i64> %tmp2, i64 %tmp3, i32 0
@@ -331,10 +270,8 @@ define <1 x i64> @ins2d1(<2 x i64> %tmp1, <1 x i64> %tmp2) {
 define <2 x float> @ins4f2(<4 x float> %tmp1, <2 x float> %tmp2) {
 ; CHECK-LABEL: ins4f2:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d1, r2, r3
-; CHECK-NEXT:    vldr d2, [sp]
 ; CHECK-NEXT:    vmov.f32 s5, s2
-; CHECK-NEXT:    vmov r0, r1, d2
+; CHECK-NEXT:    vmov.f64 d0, d2
 ; CHECK-NEXT:    bx lr
   %tmp3 = extractelement <4 x float> %tmp1, i32 2
   %tmp4 = insertelement <2 x float> %tmp2, float %tmp3, i32 1
@@ -344,8 +281,7 @@ define <2 x float> @ins4f2(<4 x float> %tmp1, <2 x float> %tmp2) {
 define <1 x double> @ins2f1(<2 x double> %tmp1, <1 x double> %tmp2) {
 ; CHECK-LABEL: ins2f1:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    mov r1, r3
-; CHECK-NEXT:    mov r0, r2
+; CHECK-NEXT:    vmov.f64 d0, d1
 ; CHECK-NEXT:    bx lr
   %tmp3 = extractelement <2 x double> %tmp1, i32 1
   %tmp4 = insertelement <1 x double> %tmp2, double %tmp3, i32 0
@@ -355,11 +291,9 @@ define <1 x double> @ins2f1(<2 x double> %tmp1, <1 x double> %tmp2) {
 define <8 x i8> @ins8b8(<8 x i8> %tmp1, <8 x i8> %tmp2) {
 ; CHECK-LABEL: ins8b8:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vmov.u8 r0, d16[2]
-; CHECK-NEXT:    vmov d16, r2, r3
-; CHECK-NEXT:    vmov.8 d16[4], r0
-; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov.u8 r0, d0[2]
+; CHECK-NEXT:    vmov.8 d1[4], r0
+; CHECK-NEXT:    vorr d0, d1, d1
 ; CHECK-NEXT:    bx lr
   %tmp3 = extractelement <8 x i8> %tmp1, i32 2
   %tmp4 = insertelement <8 x i8> %tmp2, i8 %tmp3, i32 4
@@ -369,11 +303,9 @@ define <8 x i8> @ins8b8(<8 x i8> %tmp1, <8 x i8> %tmp2) {
 define <4 x i16> @ins4h4(<4 x i16> %tmp1, <4 x i16> %tmp2) {
 ; CHECK-LABEL: ins4h4:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vmov.u16 r0, d16[2]
-; CHECK-NEXT:    vmov d16, r2, r3
-; CHECK-NEXT:    vmov.16 d16[3], r0
-; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov.u16 r0, d0[2]
+; CHECK-NEXT:    vmov.16 d1[3], r0
+; CHECK-NEXT:    vorr d0, d1, d1
 ; CHECK-NEXT:    bx lr
   %tmp3 = extractelement <4 x i16> %tmp1, i32 2
   %tmp4 = insertelement <4 x i16> %tmp2, i16 %tmp3, i32 3
@@ -383,9 +315,9 @@ define <4 x i16> @ins4h4(<4 x i16> %tmp1, <4 x i16> %tmp2) {
 define <2 x i32> @ins2s2(<2 x i32> %tmp1, <2 x i32> %tmp2) {
 ; CHECK-LABEL: ins2s2:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    mov r0, r2
-; CHECK-NEXT:    vmov.32 r1, d16[0]
+; CHECK-NEXT:    vmov.32 r0, d0[0]
+; CHECK-NEXT:    vmov.32 d1[1], r0
+; CHECK-NEXT:    vorr d0, d1, d1
 ; CHECK-NEXT:    bx lr
   %tmp3 = extractelement <2 x i32> %tmp1, i32 0
   %tmp4 = insertelement <2 x i32> %tmp2, i32 %tmp3, i32 1
@@ -395,9 +327,11 @@ define <2 x i32> @ins2s2(<2 x i32> %tmp1, <2 x i32> %tmp2) {
 define <1 x i64> @ins1d1(<1 x i64> %tmp1, <1 x i64> %tmp2) {
 ; CHECK-LABEL: ins1d1:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vmov.32 r0, d16[0]
-; CHECK-NEXT:    vmov.32 r1, d16[1]
+; CHECK-NEXT:    vmov.32 r0, d0[0]
+; CHECK-NEXT:    vmov.32 r1, d0[1]
+; CHECK-NEXT:    vmov.32 d1[0], r0
+; CHECK-NEXT:    vmov.32 d1[1], r1
+; CHECK-NEXT:    vorr d0, d1, d1
 ; CHECK-NEXT:    bx lr
   %tmp3 = extractelement <1 x i64> %tmp1, i32 0
   %tmp4 = insertelement <1 x i64> %tmp2, i64 %tmp3, i32 0
@@ -407,10 +341,8 @@ define <1 x i64> @ins1d1(<1 x i64> %tmp1, <1 x i64> %tmp2) {
 define <2 x float> @ins2f2(<2 x float> %tmp1, <2 x float> %tmp2) {
 ; CHECK-LABEL: ins2f2:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d0, r0, r1
-; CHECK-NEXT:    vmov d1, r2, r3
 ; CHECK-NEXT:    vmov.f32 s3, s0
-; CHECK-NEXT:    vmov r0, r1, d1
+; CHECK-NEXT:    vmov.f64 d0, d1
 ; CHECK-NEXT:    bx lr
   %tmp3 = extractelement <2 x float> %tmp1, i32 0
   %tmp4 = insertelement <2 x float> %tmp2, float %tmp3, i32 1
@@ -429,9 +361,7 @@ define <1 x double> @ins1df1(<1 x double> %tmp1, <1 x double> %tmp2) {
 define i32 @umovw16b(<16 x i8> %tmp1) {
 ; CHECK-LABEL: umovw16b:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d17, r2, r3
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vmov.u8 r0, d17[0]
+; CHECK-NEXT:    vmov.u8 r0, d1[0]
 ; CHECK-NEXT:    bx lr
   %tmp3 = extractelement <16 x i8> %tmp1, i32 8
   %tmp4 = zext i8 %tmp3 to i32
@@ -441,9 +371,7 @@ define i32 @umovw16b(<16 x i8> %tmp1) {
 define i32 @umovw8h(<8 x i16> %tmp1) {
 ; CHECK-LABEL: umovw8h:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d17, r2, r3
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vmov.u16 r0, d16[2]
+; CHECK-NEXT:    vmov.u16 r0, d0[2]
 ; CHECK-NEXT:    bx lr
   %tmp3 = extractelement <8 x i16> %tmp1, i32 2
   %tmp4 = zext i16 %tmp3 to i32
@@ -453,8 +381,7 @@ define i32 @umovw8h(<8 x i16> %tmp1) {
 define i32 @umovw4s(<4 x i32> %tmp1) {
 ; CHECK-LABEL: umovw4s:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d17, r2, r3
-; CHECK-NEXT:    vmov.32 r0, d17[0]
+; CHECK-NEXT:    vmov.32 r0, d1[0]
 ; CHECK-NEXT:    bx lr
   %tmp3 = extractelement <4 x i32> %tmp1, i32 2
   ret i32 %tmp3
@@ -463,8 +390,7 @@ define i32 @umovw4s(<4 x i32> %tmp1) {
 define i64 @umovx2d(<2 x i64> %tmp1) {
 ; CHECK-LABEL: umovx2d:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    mov r1, r3
-; CHECK-NEXT:    mov r0, r2
+; CHECK-NEXT:    vmov r0, r1, d1
 ; CHECK-NEXT:    bx lr
   %tmp3 = extractelement <2 x i64> %tmp1, i32 1
   ret i64 %tmp3
@@ -473,8 +399,7 @@ define i64 @umovx2d(<2 x i64> %tmp1) {
 define i32 @umovw8b(<8 x i8> %tmp1) {
 ; CHECK-LABEL: umovw8b:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vmov.u8 r0, d16[7]
+; CHECK-NEXT:    vmov.u8 r0, d0[7]
 ; CHECK-NEXT:    bx lr
   %tmp3 = extractelement <8 x i8> %tmp1, i32 7
   %tmp4 = zext i8 %tmp3 to i32
@@ -484,8 +409,7 @@ define i32 @umovw8b(<8 x i8> %tmp1) {
 define i32 @umovw4h(<4 x i16> %tmp1) {
 ; CHECK-LABEL: umovw4h:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vmov.u16 r0, d16[2]
+; CHECK-NEXT:    vmov.u16 r0, d0[2]
 ; CHECK-NEXT:    bx lr
   %tmp3 = extractelement <4 x i16> %tmp1, i32 2
   %tmp4 = zext i16 %tmp3 to i32
@@ -495,8 +419,7 @@ define i32 @umovw4h(<4 x i16> %tmp1) {
 define i32 @umovw2s(<2 x i32> %tmp1) {
 ; CHECK-LABEL: umovw2s:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vmov.32 r0, d16[1]
+; CHECK-NEXT:    vmov.32 r0, d0[1]
 ; CHECK-NEXT:    bx lr
   %tmp3 = extractelement <2 x i32> %tmp1, i32 1
   ret i32 %tmp3
@@ -505,9 +428,8 @@ define i32 @umovw2s(<2 x i32> %tmp1) {
 define i64 @umovx1d(<1 x i64> %tmp1) {
 ; CHECK-LABEL: umovx1d:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vmov.32 r0, d16[0]
-; CHECK-NEXT:    vmov.32 r1, d16[1]
+; CHECK-NEXT:    vmov.32 r0, d0[0]
+; CHECK-NEXT:    vmov.32 r1, d0[1]
 ; CHECK-NEXT:    bx lr
   %tmp3 = extractelement <1 x i64> %tmp1, i32 0
   ret i64 %tmp3
@@ -516,9 +438,7 @@ define i64 @umovx1d(<1 x i64> %tmp1) {
 define i32 @smovw16b(<16 x i8> %tmp1) {
 ; CHECK-LABEL: smovw16b:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d17, r2, r3
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vmov.s8 r0, d17[0]
+; CHECK-NEXT:    vmov.s8 r0, d1[0]
 ; CHECK-NEXT:    add r0, r0, r0
 ; CHECK-NEXT:    bx lr
   %tmp3 = extractelement <16 x i8> %tmp1, i32 8
@@ -530,9 +450,7 @@ define i32 @smovw16b(<16 x i8> %tmp1) {
 define i32 @smovw8h(<8 x i16> %tmp1) {
 ; CHECK-LABEL: smovw8h:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d17, r2, r3
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vmov.s16 r0, d16[2]
+; CHECK-NEXT:    vmov.s16 r0, d0[2]
 ; CHECK-NEXT:    add r0, r0, r0
 ; CHECK-NEXT:    bx lr
   %tmp3 = extractelement <8 x i16> %tmp1, i32 2
@@ -544,8 +462,7 @@ define i32 @smovw8h(<8 x i16> %tmp1) {
 define i64 @smovx16b(<16 x i8> %tmp1) {
 ; CHECK-LABEL: smovx16b:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d17, r2, r3
-; CHECK-NEXT:    vmov.s8 r0, d17[0]
+; CHECK-NEXT:    vmov.s8 r0, d1[0]
 ; CHECK-NEXT:    asr r1, r0, #31
 ; CHECK-NEXT:    bx lr
   %tmp3 = extractelement <16 x i8> %tmp1, i32 8
@@ -556,8 +473,7 @@ define i64 @smovx16b(<16 x i8> %tmp1) {
 define i64 @smovx8h(<8 x i16> %tmp1) {
 ; CHECK-LABEL: smovx8h:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vmov.s16 r0, d16[2]
+; CHECK-NEXT:    vmov.s16 r0, d0[2]
 ; CHECK-NEXT:    asr r1, r0, #31
 ; CHECK-NEXT:    bx lr
   %tmp3 = extractelement <8 x i16> %tmp1, i32 2
@@ -568,8 +484,8 @@ define i64 @smovx8h(<8 x i16> %tmp1) {
 define i64 @smovx4s(<4 x i32> %tmp1) {
 ; CHECK-LABEL: smovx4s:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    mov r0, r2
-; CHECK-NEXT:    asr r1, r2, #31
+; CHECK-NEXT:    vmov.32 r0, d1[0]
+; CHECK-NEXT:    asr r1, r0, #31
 ; CHECK-NEXT:    bx lr
   %tmp3 = extractelement <4 x i32> %tmp1, i32 2
   %tmp4 = sext i32 %tmp3 to i64
@@ -579,8 +495,7 @@ define i64 @smovx4s(<4 x i32> %tmp1) {
 define i32 @smovw8b(<8 x i8> %tmp1) {
 ; CHECK-LABEL: smovw8b:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vmov.s8 r0, d16[4]
+; CHECK-NEXT:    vmov.s8 r0, d0[4]
 ; CHECK-NEXT:    add r0, r0, r0
 ; CHECK-NEXT:    bx lr
   %tmp3 = extractelement <8 x i8> %tmp1, i32 4
@@ -592,8 +507,7 @@ define i32 @smovw8b(<8 x i8> %tmp1) {
 define i32 @smovw4h(<4 x i16> %tmp1) {
 ; CHECK-LABEL: smovw4h:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vmov.s16 r0, d16[2]
+; CHECK-NEXT:    vmov.s16 r0, d0[2]
 ; CHECK-NEXT:    add r0, r0, r0
 ; CHECK-NEXT:    bx lr
   %tmp3 = extractelement <4 x i16> %tmp1, i32 2
@@ -605,8 +519,7 @@ define i32 @smovw4h(<4 x i16> %tmp1) {
 define i32 @smovx8b(<8 x i8> %tmp1) {
 ; CHECK-LABEL: smovx8b:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vmov.s8 r0, d16[6]
+; CHECK-NEXT:    vmov.s8 r0, d0[6]
 ; CHECK-NEXT:    bx lr
   %tmp3 = extractelement <8 x i8> %tmp1, i32 6
   %tmp4 = sext i8 %tmp3 to i32
@@ -616,8 +529,7 @@ define i32 @smovx8b(<8 x i8> %tmp1) {
 define i32 @smovx4h(<4 x i16> %tmp1) {
 ; CHECK-LABEL: smovx4h:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vmov.s16 r0, d16[2]
+; CHECK-NEXT:    vmov.s16 r0, d0[2]
 ; CHECK-NEXT:    bx lr
   %tmp3 = extractelement <4 x i16> %tmp1, i32 2
   %tmp4 = sext i16 %tmp3 to i32
@@ -627,8 +539,7 @@ define i32 @smovx4h(<4 x i16> %tmp1) {
 define i64 @smovx2s(<2 x i32> %tmp1) {
 ; CHECK-LABEL: smovx2s:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vmov.32 r0, d16[1]
+; CHECK-NEXT:    vmov.32 r0, d0[1]
 ; CHECK-NEXT:    asr r1, r0, #31
 ; CHECK-NEXT:    bx lr
   %tmp3 = extractelement <2 x i32> %tmp1, i32 1
@@ -639,11 +550,10 @@ define i64 @smovx2s(<2 x i32> %tmp1) {
 define <8 x i8> @test_vcopy_lane_s8(<8 x i8> %v1, <8 x i8> %v2) {
 ; CHECK-LABEL: test_vcopy_lane_s8:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d17, r2, r3
-; CHECK-NEXT:    vldr d18, .LCPI50_0
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vtbl.8 d16, {d16, d17}, d18
-; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    @ kill: def $d1 killed $d1 killed $q0 def $q0
+; CHECK-NEXT:    vldr d16, .LCPI50_0
+; CHECK-NEXT:    @ kill: def $d0 killed $d0 killed $q0 def $q0
+; CHECK-NEXT:    vtbl.8 d0, {d0, d1}, d16
 ; CHECK-NEXT:    bx lr
 ; CHECK-NEXT:    .p2align 3
 ; CHECK-NEXT:  @ %bb.1:
@@ -663,13 +573,11 @@ define <8 x i8> @test_vcopy_lane_s8(<8 x i8> %v1, <8 x i8> %v2) {
 define <16 x i8> @test_vcopyq_laneq_s8(<16 x i8> %v1, <16 x i8> %v2) {
 ; CHECK-LABEL: test_vcopyq_laneq_s8:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vldr d17, [sp]
-; CHECK-NEXT:    vmov d16, r2, r3
-; CHECK-NEXT:    vldr d18, .LCPI51_0
-; CHECK-NEXT:    vtbl.8 d17, {d16, d17}, d18
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vmov r2, r3, d17
-; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    @ kill: def $q1 killed $q1 killed $q0_q1 def $q0_q1
+; CHECK-NEXT:    vldr d16, .LCPI51_0
+; CHECK-NEXT:    @ kill: def $q0 killed $q0 killed $q0_q1 def $q0_q1
+; CHECK-NEXT:    vtbl.8 d1, {d1, d2}, d16
+; CHECK-NEXT:    @ kill: def $q0 killed $q0 killed $q0_q1
 ; CHECK-NEXT:    bx lr
 ; CHECK-NEXT:    .p2align 3
 ; CHECK-NEXT:  @ %bb.1:
@@ -689,11 +597,10 @@ define <16 x i8> @test_vcopyq_laneq_s8(<16 x i8> %v1, <16 x i8> %v2) {
 define <8 x i8> @test_vcopy_lane_swap_s8(<8 x i8> %v1, <8 x i8> %v2) {
 ; CHECK-LABEL: test_vcopy_lane_swap_s8:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d17, r2, r3
-; CHECK-NEXT:    vldr d18, .LCPI52_0
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vtbl.8 d16, {d16, d17}, d18
-; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    @ kill: def $d1 killed $d1 killed $q0 def $q0
+; CHECK-NEXT:    vldr d16, .LCPI52_0
+; CHECK-NEXT:    @ kill: def $d0 killed $d0 killed $q0 def $q0
+; CHECK-NEXT:    vtbl.8 d0, {d0, d1}, d16
 ; CHECK-NEXT:    bx lr
 ; CHECK-NEXT:    .p2align 3
 ; CHECK-NEXT:  @ %bb.1:
@@ -713,13 +620,11 @@ define <8 x i8> @test_vcopy_lane_swap_s8(<8 x i8> %v1, <8 x i8> %v2) {
 define <16 x i8> @test_vcopyq_laneq_swap_s8(<16 x i8> %v1, <16 x i8> %v2) {
 ; CHECK-LABEL: test_vcopyq_laneq_swap_s8:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    mov r0, sp
-; CHECK-NEXT:    vld1.64 {d2, d3}, [r0]
-; CHECK-NEXT:    vmov d1, r2, r3
-; CHECK-NEXT:    vldr d16, .LCPI53_0
-; CHECK-NEXT:    vtbl.8 d2, {d1, d2}, d16
-; CHECK-NEXT:    vmov r2, r3, d3
-; CHECK-NEXT:    vmov r0, r1, d2
+; CHECK-NEXT:    vorr q9, q1, q1
+; CHECK-NEXT:    vldr d20, .LCPI53_0
+; CHECK-NEXT:    vorr q8, q0, q0
+; CHECK-NEXT:    vtbl.8 d18, {d17, d18}, d20
+; CHECK-NEXT:    vorr q0, q9, q9
 ; CHECK-NEXT:    bx lr
 ; CHECK-NEXT:    .p2align 3
 ; CHECK-NEXT:  @ %bb.1:
@@ -739,8 +644,7 @@ define <16 x i8> @test_vcopyq_laneq_swap_s8(<16 x i8> %v1, <16 x i8> %v2) {
 define <8 x i8> @test_vdup_n_u8(i8 %v1) #0 {
 ; CHECK-LABEL: test_vdup_n_u8:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vdup.8 d16, r0
-; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vdup.8 d0, r0
 ; CHECK-NEXT:    bx lr
   %vecinit.i = insertelement <8 x i8> undef, i8 %v1, i32 0
   %vecinit1.i = insertelement <8 x i8> %vecinit.i, i8 %v1, i32 1
@@ -756,8 +660,7 @@ define <8 x i8> @test_vdup_n_u8(i8 %v1) #0 {
 define <4 x i16> @test_vdup_n_u16(i16 %v1) #0 {
 ; CHECK-LABEL: test_vdup_n_u16:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vdup.16 d16, r0
-; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vdup.16 d0, r0
 ; CHECK-NEXT:    bx lr
   %vecinit.i = insertelement <4 x i16> undef, i16 %v1, i32 0
   %vecinit1.i = insertelement <4 x i16> %vecinit.i, i16 %v1, i32 1
@@ -769,8 +672,7 @@ define <4 x i16> @test_vdup_n_u16(i16 %v1) #0 {
 define <2 x i32> @test_vdup_n_u32(i32 %v1) #0 {
 ; CHECK-LABEL: test_vdup_n_u32:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vdup.32 d16, r0
-; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vdup.32 d0, r0
 ; CHECK-NEXT:    bx lr
   %vecinit.i = insertelement <2 x i32> undef, i32 %v1, i32 0
   %vecinit1.i = insertelement <2 x i32> %vecinit.i, i32 %v1, i32 1
@@ -780,6 +682,8 @@ define <2 x i32> @test_vdup_n_u32(i32 %v1) #0 {
 define <1 x i64> @test_vdup_n_u64(i64 %v1) #0 {
 ; CHECK-LABEL: test_vdup_n_u64:
 ; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov.32 d0[0], r0
+; CHECK-NEXT:    vmov.32 d0[1], r1
 ; CHECK-NEXT:    bx lr
   %vecinit.i = insertelement <1 x i64> undef, i64 %v1, i32 0
   ret <1 x i64> %vecinit.i
@@ -788,9 +692,7 @@ define <1 x i64> @test_vdup_n_u64(i64 %v1) #0 {
 define <16 x i8> @test_vdupq_n_u8(i8 %v1) #0 {
 ; CHECK-LABEL: test_vdupq_n_u8:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vdup.8 q8, r0
-; CHECK-NEXT:    vmov r0, r1, d16
-; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    vdup.8 q0, r0
 ; CHECK-NEXT:    bx lr
   %vecinit.i = insertelement <16 x i8> undef, i8 %v1, i32 0
   %vecinit1.i = insertelement <16 x i8> %vecinit.i, i8 %v1, i32 1
@@ -814,9 +716,7 @@ define <16 x i8> @test_vdupq_n_u8(i8 %v1) #0 {
 define <8 x i16> @test_vdupq_n_u16(i16 %v1) #0 {
 ; CHECK-LABEL: test_vdupq_n_u16:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vdup.16 q8, r0
-; CHECK-NEXT:    vmov r0, r1, d16
-; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    vdup.16 q0, r0
 ; CHECK-NEXT:    bx lr
   %vecinit.i = insertelement <8 x i16> undef, i16 %v1, i32 0
   %vecinit1.i = insertelement <8 x i16> %vecinit.i, i16 %v1, i32 1
@@ -832,9 +732,7 @@ define <8 x i16> @test_vdupq_n_u16(i16 %v1) #0 {
 define <4 x i32> @test_vdupq_n_u32(i32 %v1) #0 {
 ; CHECK-LABEL: test_vdupq_n_u32:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    mov r1, r0
-; CHECK-NEXT:    mov r2, r0
-; CHECK-NEXT:    mov r3, r0
+; CHECK-NEXT:    vdup.32 q0, r0
 ; CHECK-NEXT:    bx lr
   %vecinit.i = insertelement <4 x i32> undef, i32 %v1, i32 0
   %vecinit1.i = insertelement <4 x i32> %vecinit.i, i32 %v1, i32 1
@@ -846,8 +744,9 @@ define <4 x i32> @test_vdupq_n_u32(i32 %v1) #0 {
 define <2 x i64> @test_vdupq_n_u64(i64 %v1) #0 {
 ; CHECK-LABEL: test_vdupq_n_u64:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    mov r2, r0
-; CHECK-NEXT:    mov r3, r1
+; CHECK-NEXT:    vmov.32 d0[0], r0
+; CHECK-NEXT:    vmov.32 d0[1], r1
+; CHECK-NEXT:    vorr d1, d0, d0
 ; CHECK-NEXT:    bx lr
   %vecinit.i = insertelement <2 x i64> undef, i64 %v1, i32 0
   %vecinit1.i = insertelement <2 x i64> %vecinit.i, i64 %v1, i32 1
@@ -857,9 +756,7 @@ define <2 x i64> @test_vdupq_n_u64(i64 %v1) #0 {
 define <8 x i8> @test_vdup_lane_s8(<8 x i8> %v1) #0 {
 ; CHECK-LABEL: test_vdup_lane_s8:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vdup.8 d16, d16[5]
-; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vdup.8 d0, d0[5]
 ; CHECK-NEXT:    bx lr
   %shuffle = shufflevector <8 x i8> %v1, <8 x i8> undef, <8 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
   ret <8 x i8> %shuffle
@@ -868,9 +765,7 @@ define <8 x i8> @test_vdup_lane_s8(<8 x i8> %v1) #0 {
 define <4 x i16> @test_vdup_lane_s16(<4 x i16> %v1) #0 {
 ; CHECK-LABEL: test_vdup_lane_s16:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vdup.16 d16, d16[2]
-; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vdup.16 d0, d0[2]
 ; CHECK-NEXT:    bx lr
   %shuffle = shufflevector <4 x i16> %v1, <4 x i16> undef, <4 x i32> <i32 2, i32 2, i32 2, i32 2>
   ret <4 x i16> %shuffle
@@ -879,9 +774,7 @@ define <4 x i16> @test_vdup_lane_s16(<4 x i16> %v1) #0 {
 define <2 x i32> @test_vdup_lane_s32(<2 x i32> %v1) #0 {
 ; CHECK-LABEL: test_vdup_lane_s32:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vdup.32 d16, d16[1]
-; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vdup.32 d0, d0[1]
 ; CHECK-NEXT:    bx lr
   %shuffle = shufflevector <2 x i32> %v1, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
   ret <2 x i32> %shuffle
@@ -890,10 +783,8 @@ define <2 x i32> @test_vdup_lane_s32(<2 x i32> %v1) #0 {
 define <16 x i8> @test_vdupq_lane_s8(<8 x i8> %v1) #0 {
 ; CHECK-LABEL: test_vdupq_lane_s8:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vdup.8 q8, d16[5]
-; CHECK-NEXT:    vmov r0, r1, d16
-; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    @ kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    vdup.8 q0, d0[5]
 ; CHECK-NEXT:    bx lr
   %shuffle = shufflevector <8 x i8> %v1, <8 x i8> undef, <16 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
   ret <16 x i8> %shuffle
@@ -902,10 +793,8 @@ define <16 x i8> @test_vdupq_lane_s8(<8 x i8> %v1) #0 {
 define <8 x i16> @test_vdupq_lane_s16(<4 x i16> %v1) #0 {
 ; CHECK-LABEL: test_vdupq_lane_s16:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vdup.16 q8, d16[2]
-; CHECK-NEXT:    vmov r0, r1, d16
-; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    @ kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    vdup.16 q0, d0[2]
 ; CHECK-NEXT:    bx lr
   %shuffle = shufflevector <4 x i16> %v1, <4 x i16> undef, <8 x i32> <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
   ret <8 x i16> %shuffle
@@ -914,10 +803,8 @@ define <8 x i16> @test_vdupq_lane_s16(<4 x i16> %v1) #0 {
 define <4 x i32> @test_vdupq_lane_s32(<2 x i32> %v1) #0 {
 ; CHECK-LABEL: test_vdupq_lane_s32:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vdup.32 q8, d16[1]
-; CHECK-NEXT:    vmov r0, r1, d16
-; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    @ kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    vdup.32 q0, d0[1]
 ; CHECK-NEXT:    bx lr
   %shuffle = shufflevector <2 x i32> %v1, <2 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   ret <4 x i32> %shuffle
@@ -926,10 +813,8 @@ define <4 x i32> @test_vdupq_lane_s32(<2 x i32> %v1) #0 {
 define <2 x i64> @test_vdupq_lane_s64(<1 x i64> %v1) #0 {
 ; CHECK-LABEL: test_vdupq_lane_s64:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vorr d17, d16, d16
-; CHECK-NEXT:    vmov r0, r1, d16
-; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    @ kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    vmov.f64 d1, d0
 ; CHECK-NEXT:    bx lr
   %shuffle = shufflevector <1 x i64> %v1, <1 x i64> undef, <2 x i32> zeroinitializer
   ret <2 x i64> %shuffle
@@ -938,9 +823,7 @@ define <2 x i64> @test_vdupq_lane_s64(<1 x i64> %v1) #0 {
 define <8 x i8> @test_vdup_laneq_s8(<16 x i8> %v1) #0 {
 ; CHECK-LABEL: test_vdup_laneq_s8:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vdup.8 d16, d16[5]
-; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vdup.8 d0, d0[5]
 ; CHECK-NEXT:    bx lr
   %shuffle = shufflevector <16 x i8> %v1, <16 x i8> undef, <8 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
   ret <8 x i8> %shuffle
@@ -949,9 +832,7 @@ define <8 x i8> @test_vdup_laneq_s8(<16 x i8> %v1) #0 {
 define <4 x i16> @test_vdup_laneq_s16(<8 x i16> %v1) #0 {
 ; CHECK-LABEL: test_vdup_laneq_s16:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vdup.16 d16, d16[2]
-; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vdup.16 d0, d0[2]
 ; CHECK-NEXT:    bx lr
   %shuffle = shufflevector <8 x i16> %v1, <8 x i16> undef, <4 x i32> <i32 2, i32 2, i32 2, i32 2>
   ret <4 x i16> %shuffle
@@ -960,9 +841,7 @@ define <4 x i16> @test_vdup_laneq_s16(<8 x i16> %v1) #0 {
 define <2 x i32> @test_vdup_laneq_s32(<4 x i32> %v1) #0 {
 ; CHECK-LABEL: test_vdup_laneq_s32:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vdup.32 d16, d16[1]
-; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vdup.32 d0, d0[1]
 ; CHECK-NEXT:    bx lr
   %shuffle = shufflevector <4 x i32> %v1, <4 x i32> undef, <2 x i32> <i32 1, i32 1>
   ret <2 x i32> %shuffle
@@ -971,10 +850,7 @@ define <2 x i32> @test_vdup_laneq_s32(<4 x i32> %v1) #0 {
 define <16 x i8> @test_vdupq_laneq_s8(<16 x i8> %v1) #0 {
 ; CHECK-LABEL: test_vdupq_laneq_s8:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vdup.8 q8, d16[5]
-; CHECK-NEXT:    vmov r0, r1, d16
-; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    vdup.8 q0, d0[5]
 ; CHECK-NEXT:    bx lr
   %shuffle = shufflevector <16 x i8> %v1, <16 x i8> undef, <16 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
   ret <16 x i8> %shuffle
@@ -983,10 +859,7 @@ define <16 x i8> @test_vdupq_laneq_s8(<16 x i8> %v1) #0 {
 define <8 x i16> @test_vdupq_laneq_s16(<8 x i16> %v1) #0 {
 ; CHECK-LABEL: test_vdupq_laneq_s16:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vdup.16 q8, d16[2]
-; CHECK-NEXT:    vmov r0, r1, d16
-; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    vdup.16 q0, d0[2]
 ; CHECK-NEXT:    bx lr
   %shuffle = shufflevector <8 x i16> %v1, <8 x i16> undef, <8 x i32> <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
   ret <8 x i16> %shuffle
@@ -995,10 +868,7 @@ define <8 x i16> @test_vdupq_laneq_s16(<8 x i16> %v1) #0 {
 define <4 x i32> @test_vdupq_laneq_s32(<4 x i32> %v1) #0 {
 ; CHECK-LABEL: test_vdupq_laneq_s32:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vdup.32 q8, d16[1]
-; CHECK-NEXT:    vmov r0, r1, d16
-; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    vdup.32 q0, d0[1]
 ; CHECK-NEXT:    bx lr
   %shuffle = shufflevector <4 x i32> %v1, <4 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   ret <4 x i32> %shuffle
@@ -1007,8 +877,7 @@ define <4 x i32> @test_vdupq_laneq_s32(<4 x i32> %v1) #0 {
 define <2 x i64> @test_vdupq_laneq_s64(<2 x i64> %v1) #0 {
 ; CHECK-LABEL: test_vdupq_laneq_s64:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    mov r2, r0
-; CHECK-NEXT:    mov r3, r1
+; CHECK-NEXT:    vmov.f64 d1, d0
 ; CHECK-NEXT:    bx lr
   %shuffle = shufflevector <2 x i64> %v1, <2 x i64> undef, <2 x i32> zeroinitializer
   ret <2 x i64> %shuffle
@@ -1017,6 +886,7 @@ define <2 x i64> @test_vdupq_laneq_s64(<2 x i64> %v1) #0 {
 define i64 @test_bitcastv8i8toi64(<8 x i8> %in) {
 ; CHECK-LABEL: test_bitcastv8i8toi64:
 ; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov r0, r1, d0
 ; CHECK-NEXT:    bx lr
    %res = bitcast <8 x i8> %in to i64
    ret i64 %res
@@ -1025,6 +895,7 @@ define i64 @test_bitcastv8i8toi64(<8 x i8> %in) {
 define i64 @test_bitcastv4i16toi64(<4 x i16> %in) {
 ; CHECK-LABEL: test_bitcastv4i16toi64:
 ; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov r0, r1, d0
 ; CHECK-NEXT:    bx lr
    %res = bitcast <4 x i16> %in to i64
    ret i64 %res
@@ -1033,6 +904,7 @@ define i64 @test_bitcastv4i16toi64(<4 x i16> %in) {
 define i64 @test_bitcastv2i32toi64(<2 x i32> %in) {
 ; CHECK-LABEL: test_bitcastv2i32toi64:
 ; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov r0, r1, d0
 ; CHECK-NEXT:    bx lr
    %res = bitcast <2 x i32> %in to i64
    ret i64 %res
@@ -1041,6 +913,7 @@ define i64 @test_bitcastv2i32toi64(<2 x i32> %in) {
 define i64 @test_bitcastv2f32toi64(<2 x float> %in) {
 ; CHECK-LABEL: test_bitcastv2f32toi64:
 ; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov r0, r1, d0
 ; CHECK-NEXT:    bx lr
    %res = bitcast <2 x float> %in to i64
    ret i64 %res
@@ -1049,6 +922,7 @@ define i64 @test_bitcastv2f32toi64(<2 x float> %in) {
 define i64 @test_bitcastv1i64toi64(<1 x i64> %in) {
 ; CHECK-LABEL: test_bitcastv1i64toi64:
 ; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov r0, r1, d0
 ; CHECK-NEXT:    bx lr
    %res = bitcast <1 x i64> %in to i64
    ret i64 %res
@@ -1057,6 +931,7 @@ define i64 @test_bitcastv1i64toi64(<1 x i64> %in) {
 define i64 @test_bitcastv1f64toi64(<1 x double> %in) {
 ; CHECK-LABEL: test_bitcastv1f64toi64:
 ; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov r0, r1, d0
 ; CHECK-NEXT:    bx lr
    %res = bitcast <1 x double> %in to i64
    ret i64 %res
@@ -1065,6 +940,7 @@ define i64 @test_bitcastv1f64toi64(<1 x double> %in) {
 define <8 x i8> @test_bitcasti64tov8i8(i64 %in) {
 ; CHECK-LABEL: test_bitcasti64tov8i8:
 ; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov d0, r0, r1
 ; CHECK-NEXT:    bx lr
    %res = bitcast i64 %in to <8 x i8>
    ret <8 x i8> %res
@@ -1073,6 +949,7 @@ define <8 x i8> @test_bitcasti64tov8i8(i64 %in) {
 define <4 x i16> @test_bitcasti64tov4i16(i64 %in) {
 ; CHECK-LABEL: test_bitcasti64tov4i16:
 ; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov d0, r0, r1
 ; CHECK-NEXT:    bx lr
    %res = bitcast i64 %in to <4 x i16>
    ret <4 x i16> %res
@@ -1081,6 +958,7 @@ define <4 x i16> @test_bitcasti64tov4i16(i64 %in) {
 define <2 x i32> @test_bitcasti64tov2i32(i64 %in) {
 ; CHECK-LABEL: test_bitcasti64tov2i32:
 ; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov d0, r0, r1
 ; CHECK-NEXT:    bx lr
    %res = bitcast i64 %in to <2 x i32>
    ret <2 x i32> %res
@@ -1089,6 +967,7 @@ define <2 x i32> @test_bitcasti64tov2i32(i64 %in) {
 define <2 x float> @test_bitcasti64tov2f32(i64 %in) {
 ; CHECK-LABEL: test_bitcasti64tov2f32:
 ; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov d0, r0, r1
 ; CHECK-NEXT:    bx lr
    %res = bitcast i64 %in to <2 x float>
    ret <2 x float> %res
@@ -1097,6 +976,7 @@ define <2 x float> @test_bitcasti64tov2f32(i64 %in) {
 define <1 x i64> @test_bitcasti64tov1i64(i64 %in) {
 ; CHECK-LABEL: test_bitcasti64tov1i64:
 ; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov d0, r0, r1
 ; CHECK-NEXT:    bx lr
    %res = bitcast i64 %in to <1 x i64>
    ret <1 x i64> %res
@@ -1105,6 +985,7 @@ define <1 x i64> @test_bitcasti64tov1i64(i64 %in) {
 define <1 x double> @test_bitcasti64tov1f64(i64 %in) {
 ; CHECK-LABEL: test_bitcasti64tov1f64:
 ; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov d0, r0, r1
 ; CHECK-NEXT:    bx lr
    %res = bitcast i64 %in to <1 x double>
    ret <1 x double> %res
@@ -1113,11 +994,13 @@ define <1 x double> @test_bitcasti64tov1f64(i64 %in) {
 define <1 x i64> @test_bitcastv8i8tov1f64(<8 x i8> %a) #0 {
 ; CHECK-LABEL: test_bitcastv8i8tov1f64:
 ; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r11, lr}
 ; CHECK-NEXT:    push {r11, lr}
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vneg.s8 d16, d16
+; CHECK-NEXT:    vneg.s8 d16, d0
 ; CHECK-NEXT:    vmov r0, r1, d16
-; CHECK-NEXT:    bl __fixdfdi
+; CHECK-NEXT:    bl __aeabi_d2lz
+; CHECK-NEXT:    vmov.32 d0[0], r0
+; CHECK-NEXT:    vmov.32 d0[1], r1
 ; CHECK-NEXT:    pop {r11, pc}
   %sub.i = sub <8 x i8> zeroinitializer, %a
   %1 = bitcast <8 x i8> %sub.i to <1 x double>
@@ -1128,11 +1011,13 @@ define <1 x i64> @test_bitcastv8i8tov1f64(<8 x i8> %a) #0 {
 define <1 x i64> @test_bitcastv4i16tov1f64(<4 x i16> %a) #0 {
 ; CHECK-LABEL: test_bitcastv4i16tov1f64:
 ; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r11, lr}
 ; CHECK-NEXT:    push {r11, lr}
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vneg.s16 d16, d16
+; CHECK-NEXT:    vneg.s16 d16, d0
 ; CHECK-NEXT:    vmov r0, r1, d16
-; CHECK-NEXT:    bl __fixdfdi
+; CHECK-NEXT:    bl __aeabi_d2lz
+; CHECK-NEXT:    vmov.32 d0[0], r0
+; CHECK-NEXT:    vmov.32 d0[1], r1
 ; CHECK-NEXT:    pop {r11, pc}
   %sub.i = sub <4 x i16> zeroinitializer, %a
   %1 = bitcast <4 x i16> %sub.i to <1 x double>
@@ -1143,11 +1028,13 @@ define <1 x i64> @test_bitcastv4i16tov1f64(<4 x i16> %a) #0 {
 define <1 x i64> @test_bitcastv2i32tov1f64(<2 x i32> %a) #0 {
 ; CHECK-LABEL: test_bitcastv2i32tov1f64:
 ; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r11, lr}
 ; CHECK-NEXT:    push {r11, lr}
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vneg.s32 d16, d16
+; CHECK-NEXT:    vneg.s32 d16, d0
 ; CHECK-NEXT:    vmov r0, r1, d16
-; CHECK-NEXT:    bl __fixdfdi
+; CHECK-NEXT:    bl __aeabi_d2lz
+; CHECK-NEXT:    vmov.32 d0[0], r0
+; CHECK-NEXT:    vmov.32 d0[1], r1
 ; CHECK-NEXT:    pop {r11, pc}
   %sub.i = sub <2 x i32> zeroinitializer, %a
   %1 = bitcast <2 x i32> %sub.i to <1 x double>
@@ -1158,12 +1045,14 @@ define <1 x i64> @test_bitcastv2i32tov1f64(<2 x i32> %a) #0 {
 define <1 x i64> @test_bitcastv1i64tov1f64(<1 x i64> %a) #0 {
 ; CHECK-LABEL: test_bitcastv1i64tov1f64:
 ; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r11, lr}
 ; CHECK-NEXT:    push {r11, lr}
 ; CHECK-NEXT:    vmov.i32 d16, #0x0
-; CHECK-NEXT:    vmov d17, r0, r1
-; CHECK-NEXT:    vsub.i64 d16, d16, d17
+; CHECK-NEXT:    vsub.i64 d16, d16, d0
 ; CHECK-NEXT:    vmov r0, r1, d16
-; CHECK-NEXT:    bl __fixdfdi
+; CHECK-NEXT:    bl __aeabi_d2lz
+; CHECK-NEXT:    vmov.32 d0[0], r0
+; CHECK-NEXT:    vmov.32 d0[1], r1
 ; CHECK-NEXT:    pop {r11, pc}
   %sub.i = sub <1 x i64> zeroinitializer, %a
   %1 = bitcast <1 x i64> %sub.i to <1 x double>
@@ -1174,11 +1063,13 @@ define <1 x i64> @test_bitcastv1i64tov1f64(<1 x i64> %a) #0 {
 define <1 x i64> @test_bitcastv2f32tov1f64(<2 x float> %a) #0 {
 ; CHECK-LABEL: test_bitcastv2f32tov1f64:
 ; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r11, lr}
 ; CHECK-NEXT:    push {r11, lr}
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vneg.f32 d16, d16
+; CHECK-NEXT:    vneg.f32 d16, d0
 ; CHECK-NEXT:    vmov r0, r1, d16
-; CHECK-NEXT:    bl __fixdfdi
+; CHECK-NEXT:    bl __aeabi_d2lz
+; CHECK-NEXT:    vmov.32 d0[0], r0
+; CHECK-NEXT:    vmov.32 d0[1], r1
 ; CHECK-NEXT:    pop {r11, pc}
   %sub.i = fsub <2 x float> <float -0.000000e+00, float -0.000000e+00>, %a
   %1 = bitcast <2 x float> %sub.i to <1 x double>
@@ -1189,14 +1080,13 @@ define <1 x i64> @test_bitcastv2f32tov1f64(<2 x float> %a) #0 {
 define <8 x i8> @test_bitcastv1f64tov8i8(<1 x i64> %a) #0 {
 ; CHECK-LABEL: test_bitcastv1f64tov8i8:
 ; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r11, lr}
 ; CHECK-NEXT:    push {r11, lr}
+; CHECK-NEXT:    vmov.32 r0, d0[0]
+; CHECK-NEXT:    vmov.32 r1, d0[1]
+; CHECK-NEXT:    bl __aeabi_l2d
 ; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vmov.32 r0, d16[0]
-; CHECK-NEXT:    vmov.32 r1, d16[1]
-; CHECK-NEXT:    bl __floatdidf
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vneg.s8 d16, d16
-; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vneg.s8 d0, d16
 ; CHECK-NEXT:    pop {r11, pc}
   %vcvt.i = sitofp <1 x i64> %a to <1 x double>
   %1 = bitcast <1 x double> %vcvt.i to <8 x i8>
@@ -1207,14 +1097,13 @@ define <8 x i8> @test_bitcastv1f64tov8i8(<1 x i64> %a) #0 {
 define <4 x i16> @test_bitcastv1f64tov4i16(<1 x i64> %a) #0 {
 ; CHECK-LABEL: test_bitcastv1f64tov4i16:
 ; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r11, lr}
 ; CHECK-NEXT:    push {r11, lr}
+; CHECK-NEXT:    vmov.32 r0, d0[0]
+; CHECK-NEXT:    vmov.32 r1, d0[1]
+; CHECK-NEXT:    bl __aeabi_l2d
 ; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vmov.32 r0, d16[0]
-; CHECK-NEXT:    vmov.32 r1, d16[1]
-; CHECK-NEXT:    bl __floatdidf
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vneg.s16 d16, d16
-; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vneg.s16 d0, d16
 ; CHECK-NEXT:    pop {r11, pc}
   %vcvt.i = sitofp <1 x i64> %a to <1 x double>
   %1 = bitcast <1 x double> %vcvt.i to <4 x i16>
@@ -1225,14 +1114,13 @@ define <4 x i16> @test_bitcastv1f64tov4i16(<1 x i64> %a) #0 {
 define <2 x i32> @test_bitcastv1f64tov2i32(<1 x i64> %a) #0 {
 ; CHECK-LABEL: test_bitcastv1f64tov2i32:
 ; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r11, lr}
 ; CHECK-NEXT:    push {r11, lr}
+; CHECK-NEXT:    vmov.32 r0, d0[0]
+; CHECK-NEXT:    vmov.32 r1, d0[1]
+; CHECK-NEXT:    bl __aeabi_l2d
 ; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vmov.32 r0, d16[0]
-; CHECK-NEXT:    vmov.32 r1, d16[1]
-; CHECK-NEXT:    bl __floatdidf
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vneg.s32 d16, d16
-; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vneg.s32 d0, d16
 ; CHECK-NEXT:    pop {r11, pc}
   %vcvt.i = sitofp <1 x i64> %a to <1 x double>
   %1 = bitcast <1 x double> %vcvt.i to <2 x i32>
@@ -1243,15 +1131,14 @@ define <2 x i32> @test_bitcastv1f64tov2i32(<1 x i64> %a) #0 {
 define <1 x i64> @test_bitcastv1f64tov1i64(<1 x i64> %a) #0 {
 ; CHECK-LABEL: test_bitcastv1f64tov1i64:
 ; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r11, lr}
 ; CHECK-NEXT:    push {r11, lr}
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vmov.32 r0, d16[0]
-; CHECK-NEXT:    vmov.32 r1, d16[1]
-; CHECK-NEXT:    bl __floatdidf
+; CHECK-NEXT:    vmov.32 r0, d0[0]
+; CHECK-NEXT:    vmov.32 r1, d0[1]
+; CHECK-NEXT:    bl __aeabi_l2d
 ; CHECK-NEXT:    vmov.i32 d16, #0x0
 ; CHECK-NEXT:    vmov d17, r0, r1
-; CHECK-NEXT:    vsub.i64 d16, d16, d17
-; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vsub.i64 d0, d16, d17
 ; CHECK-NEXT:    pop {r11, pc}
   %vcvt.i = sitofp <1 x i64> %a to <1 x double>
   %1 = bitcast <1 x double> %vcvt.i to <1 x i64>
@@ -1262,14 +1149,13 @@ define <1 x i64> @test_bitcastv1f64tov1i64(<1 x i64> %a) #0 {
 define <2 x float> @test_bitcastv1f64tov2f32(<1 x i64> %a) #0 {
 ; CHECK-LABEL: test_bitcastv1f64tov2f32:
 ; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r11, lr}
 ; CHECK-NEXT:    push {r11, lr}
+; CHECK-NEXT:    vmov.32 r0, d0[0]
+; CHECK-NEXT:    vmov.32 r1, d0[1]
+; CHECK-NEXT:    bl __aeabi_l2d
 ; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vmov.32 r0, d16[0]
-; CHECK-NEXT:    vmov.32 r1, d16[1]
-; CHECK-NEXT:    bl __floatdidf
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vneg.f32 d16, d16
-; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vneg.f32 d0, d16
 ; CHECK-NEXT:    pop {r11, pc}
   %vcvt.i = sitofp <1 x i64> %a to <1 x double>
   %1 = bitcast <1 x double> %vcvt.i to <2 x float>
@@ -1278,42 +1164,75 @@ define <2 x float> @test_bitcastv1f64tov2f32(<1 x i64> %a) #0 {
 }
 
 ; Test insert element into an undef vector
-define <8 x i8> @scalar_to_vector.v8i8(i8 %a) {
+define <8 x i8> @scalar_to_vector_v8i8(i8 %a) {
+; CHECK-LABEL: scalar_to_vector_v8i8:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov.8 d0[0], r0
+; CHECK-NEXT:    bx lr
   %b = insertelement <8 x i8> undef, i8 %a, i32 0
   ret <8 x i8> %b
 }
 
-define <16 x i8> @scalar_to_vector.v16i8(i8 %a) {
+define <16 x i8> @scalar_to_vector_v16i8(i8 %a) {
+; CHECK-LABEL: scalar_to_vector_v16i8:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov.8 d0[0], r0
+; CHECK-NEXT:    bx lr
   %b = insertelement <16 x i8> undef, i8 %a, i32 0
   ret <16 x i8> %b
 }
 
-define <4 x i16> @scalar_to_vector.v4i16(i16 %a) {
+define <4 x i16> @scalar_to_vector_v4i16(i16 %a) {
+; CHECK-LABEL: scalar_to_vector_v4i16:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov.16 d0[0], r0
+; CHECK-NEXT:    bx lr
   %b = insertelement <4 x i16> undef, i16 %a, i32 0
   ret <4 x i16> %b
 }
 
-define <8 x i16> @scalar_to_vector.v8i16(i16 %a) {
+define <8 x i16> @scalar_to_vector_v8i16(i16 %a) {
+; CHECK-LABEL: scalar_to_vector_v8i16:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov.16 d0[0], r0
+; CHECK-NEXT:    bx lr
   %b = insertelement <8 x i16> undef, i16 %a, i32 0
   ret <8 x i16> %b
 }
 
-define <2 x i32> @scalar_to_vector.v2i32(i32 %a) {
+define <2 x i32> @scalar_to_vector_v2i32(i32 %a) {
+; CHECK-LABEL: scalar_to_vector_v2i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov.32 d0[0], r0
+; CHECK-NEXT:    bx lr
   %b = insertelement <2 x i32> undef, i32 %a, i32 0
   ret <2 x i32> %b
 }
 
-define <4 x i32> @scalar_to_vector.v4i32(i32 %a) {
+define <4 x i32> @scalar_to_vector_v4i32(i32 %a) {
+; CHECK-LABEL: scalar_to_vector_v4i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov.32 d0[0], r0
+; CHECK-NEXT:    bx lr
   %b = insertelement <4 x i32> undef, i32 %a, i32 0
   ret <4 x i32> %b
 }
 
-define <2 x i64> @scalar_to_vector.v2i64(i64 %a) {
+define <2 x i64> @scalar_to_vector_v2i64(i64 %a) {
+; CHECK-LABEL: scalar_to_vector_v2i64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov.32 d0[0], r0
+; CHECK-NEXT:    vmov.32 d0[1], r1
+; CHECK-NEXT:    bx lr
   %b = insertelement <2 x i64> undef, i64 %a, i32 0
   ret <2 x i64> %b
 }
 
-define <8 x i8> @testDUP.v1i8(<1 x i8> %a) {
+define <8 x i8> @testDUPv1i8(<1 x i8> %a) {
+; CHECK-LABEL: testDUPv1i8:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vdup.8 d0, r0
+; CHECK-NEXT:    bx lr
   %b = extractelement <1 x i8> %a, i32 0
   %c = insertelement <8 x i8> undef, i8 %b, i32 0
   %d = insertelement <8 x i8> %c, i8 %b, i32 1
@@ -1326,7 +1245,11 @@ define <8 x i8> @testDUP.v1i8(<1 x i8> %a) {
   ret <8 x i8> %j
 }
 
-define <8 x i16> @testDUP.v1i16(<1 x i16> %a) {
+define <8 x i16> @testDUPv1i16(<1 x i16> %a) {
+; CHECK-LABEL: testDUPv1i16:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vdup.16 q0, r0
+; CHECK-NEXT:    bx lr
   %b = extractelement <1 x i16> %a, i32 0
   %c = insertelement <8 x i16> undef, i16 %b, i32 0
   %d = insertelement <8 x i16> %c, i16 %b, i32 1
@@ -1339,7 +1262,11 @@ define <8 x i16> @testDUP.v1i16(<1 x i16> %a) {
   ret <8 x i16> %j
 }
 
-define <4 x i32> @testDUP.v1i32(<1 x i32> %a) {
+define <4 x i32> @testDUPv1i32(<1 x i32> %a) {
+; CHECK-LABEL: testDUPv1i32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vdup.32 q0, r0
+; CHECK-NEXT:    bx lr
   %b = extractelement <1 x i32> %a, i32 0
   %c = insertelement <4 x i32> undef, i32 %b, i32 0
   %d = insertelement <4 x i32> %c, i32 %b, i32 1
@@ -1351,6 +1278,7 @@ define <4 x i32> @testDUP.v1i32(<1 x i32> %a) {
 define <8 x i8> @getl(<16 x i8> %x) #0 {
 ; CHECK-LABEL: getl:
 ; CHECK:       @ %bb.0:
+; CHECK-NEXT:    @ kill: def $d0 killed $d0 killed $q0
 ; CHECK-NEXT:    bx lr
   %vecext = extractelement <16 x i8> %x, i32 0
   %vecinit = insertelement <8 x i8> undef, i8 %vecext, i32 0
@@ -1374,25 +1302,24 @@ define <8 x i8> @getl(<16 x i8> %x) #0 {
 define <4 x i16> @test_extracts_inserts_varidx_extract(<8 x i16> %x, i32 %idx) {
 ; CHECK-LABEL: test_extracts_inserts_varidx_extract:
 ; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r11}
 ; CHECK-NEXT:    push {r11}
+; CHECK-NEXT:    .setfp r11, sp
 ; CHECK-NEXT:    mov r11, sp
+; CHECK-NEXT:    .pad #28
 ; CHECK-NEXT:    sub sp, sp, #28
 ; CHECK-NEXT:    bfc sp, #0, #4
-; CHECK-NEXT:    vmov d17, r2, r3
-; CHECK-NEXT:    ldr r3, [r11, #4]
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    mov r2, sp
-; CHECK-NEXT:    and r3, r3, #7
-; CHECK-NEXT:    vmov.u16 r0, d16[1]
-; CHECK-NEXT:    vmov.u16 r1, d16[2]
-; CHECK-NEXT:    lsl r3, r3, #1
-; CHECK-NEXT:    vmov.u16 r12, d16[3]
-; CHECK-NEXT:    vst1.64 {d16, d17}, [r2:128], r3
-; CHECK-NEXT:    vld1.16 {d16[0]}, [r2:16]
-; CHECK-NEXT:    vmov.16 d16[1], r0
-; CHECK-NEXT:    vmov.16 d16[2], r1
-; CHECK-NEXT:    vmov.16 d16[3], r12
-; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov.u16 r1, d0[1]
+; CHECK-NEXT:    and r0, r0, #7
+; CHECK-NEXT:    vmov.u16 r2, d0[2]
+; CHECK-NEXT:    mov r3, sp
+; CHECK-NEXT:    vmov.u16 r12, d0[3]
+; CHECK-NEXT:    lsl r0, r0, #1
+; CHECK-NEXT:    vst1.64 {d0, d1}, [r3:128], r0
+; CHECK-NEXT:    vld1.16 {d0[0]}, [r3:16]
+; CHECK-NEXT:    vmov.16 d0[1], r1
+; CHECK-NEXT:    vmov.16 d0[2], r2
+; CHECK-NEXT:    vmov.16 d0[3], r12
 ; CHECK-NEXT:    mov sp, r11
 ; CHECK-NEXT:    pop {r11}
 ; CHECK-NEXT:    bx lr
@@ -1410,21 +1337,19 @@ define <4 x i16> @test_extracts_inserts_varidx_extract(<8 x i16> %x, i32 %idx) {
 define <4 x i16> @test_extracts_inserts_varidx_insert(<8 x i16> %x, i32 %idx) {
 ; CHECK-LABEL: test_extracts_inserts_varidx_insert:
 ; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .pad #8
 ; CHECK-NEXT:    sub sp, sp, #8
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    ldr r3, [sp, #8]
-; CHECK-NEXT:    mov r2, sp
-; CHECK-NEXT:    vmov.u16 r0, d16[1]
-; CHECK-NEXT:    and r3, r3, #3
-; CHECK-NEXT:    vmov.u16 r1, d16[2]
-; CHECK-NEXT:    vmov.u16 r12, d16[3]
-; CHECK-NEXT:    orr r2, r2, r3, lsl #1
-; CHECK-NEXT:    vst1.16 {d16[0]}, [r2:16]
-; CHECK-NEXT:    vldr d16, [sp]
-; CHECK-NEXT:    vmov.16 d16[1], r0
-; CHECK-NEXT:    vmov.16 d16[2], r1
-; CHECK-NEXT:    vmov.16 d16[3], r12
-; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov.u16 r1, d0[1]
+; CHECK-NEXT:    and r0, r0, #3
+; CHECK-NEXT:    vmov.u16 r2, d0[2]
+; CHECK-NEXT:    mov r3, sp
+; CHECK-NEXT:    vmov.u16 r12, d0[3]
+; CHECK-NEXT:    orr r0, r3, r0, lsl #1
+; CHECK-NEXT:    vst1.16 {d0[0]}, [r0:16]
+; CHECK-NEXT:    vldr d0, [sp]
+; CHECK-NEXT:    vmov.16 d0[1], r1
+; CHECK-NEXT:    vmov.16 d0[2], r2
+; CHECK-NEXT:    vmov.16 d0[3], r12
 ; CHECK-NEXT:    add sp, sp, #8
 ; CHECK-NEXT:    bx lr
   %tmp = extractelement <8 x i16> %x, i32 0
@@ -1441,11 +1366,9 @@ define <4 x i16> @test_extracts_inserts_varidx_insert(<8 x i16> %x, i32 %idx) {
 define <4 x i16> @test_dup_v2i32_v4i16(<2 x i32> %a) {
 ; CHECK-LABEL: test_dup_v2i32_v4i16:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vmov.32 r0, d16[1]
+; CHECK-NEXT:    vmov.32 r0, d0[1]
 ; CHECK-NEXT:    vmov.16 d16[1], r0
-; CHECK-NEXT:    vdup.16 d16, d16[1]
-; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vdup.16 d0, d16[1]
 ; CHECK-NEXT:    bx lr
 entry:
   %x = extractelement <2 x i32> %a, i32 1
@@ -1460,9 +1383,9 @@ entry:
 define <8 x i16> @test_dup_v4i32_v8i16(<4 x i32> %a) {
 ; CHECK-LABEL: test_dup_v4i32_v8i16:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vdup.16 q8, r3
-; CHECK-NEXT:    vmov r0, r1, d16
-; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    vmov.32 r0, d1[1]
+; CHECK-NEXT:    vmov.16 d16[3], r0
+; CHECK-NEXT:    vdup.16 q0, d16[3]
 ; CHECK-NEXT:    bx lr
 entry:
   %x = extractelement <4 x i32> %a, i32 3
@@ -1481,11 +1404,9 @@ entry:
 define <4 x i16> @test_dup_v1i64_v4i16(<1 x i64> %a) {
 ; CHECK-LABEL: test_dup_v1i64_v4i16:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vmov.32 r0, d16[0]
+; CHECK-NEXT:    vmov.32 r0, d0[0]
 ; CHECK-NEXT:    vmov.16 d16[0], r0
-; CHECK-NEXT:    vdup.16 d16, d16[0]
-; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vdup.16 d0, d16[0]
 ; CHECK-NEXT:    bx lr
 entry:
   %x = extractelement <1 x i64> %a, i32 0
@@ -1500,9 +1421,7 @@ entry:
 define <2 x i32> @test_dup_v1i64_v2i32(<1 x i64> %a) {
 ; CHECK-LABEL: test_dup_v1i64_v2i32:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vdup.32 d16, d16[0]
-; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vdup.32 d0, d0[0]
 ; CHECK-NEXT:    bx lr
 entry:
   %x = extractelement <1 x i64> %a, i32 0
@@ -1515,9 +1434,9 @@ entry:
 define <8 x i16> @test_dup_v2i64_v8i16(<2 x i64> %a) {
 ; CHECK-LABEL: test_dup_v2i64_v8i16:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vdup.16 q8, r2
-; CHECK-NEXT:    vmov r0, r1, d16
-; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    vmov.32 r0, d1[0]
+; CHECK-NEXT:    vmov.16 d16[2], r0
+; CHECK-NEXT:    vdup.16 q0, d16[2]
 ; CHECK-NEXT:    bx lr
 entry:
   %x = extractelement <2 x i64> %a, i32 1
@@ -1536,9 +1455,7 @@ entry:
 define <4 x i32> @test_dup_v2i64_v4i32(<2 x i64> %a) {
 ; CHECK-LABEL: test_dup_v2i64_v4i32:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    mov r0, r2
-; CHECK-NEXT:    mov r1, r2
-; CHECK-NEXT:    mov r3, r2
+; CHECK-NEXT:    vdup.32 q0, d1[0]
 ; CHECK-NEXT:    bx lr
 entry:
   %x = extractelement <2 x i64> %a, i32 1
@@ -1553,8 +1470,9 @@ entry:
 define <4 x i16> @test_dup_v4i32_v4i16(<4 x i32> %a) {
 ; CHECK-LABEL: test_dup_v4i32_v4i16:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vdup.16 d16, r1
-; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov.32 r0, d0[1]
+; CHECK-NEXT:    vmov.16 d16[1], r0
+; CHECK-NEXT:    vdup.16 d0, d16[1]
 ; CHECK-NEXT:    bx lr
 entry:
   %x = extractelement <4 x i32> %a, i32 1
@@ -1569,8 +1487,9 @@ entry:
 define <4 x i16> @test_dup_v2i64_v4i16(<2 x i64> %a) {
 ; CHECK-LABEL: test_dup_v2i64_v4i16:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vdup.16 d16, r0
-; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov.32 r0, d0[0]
+; CHECK-NEXT:    vmov.16 d16[0], r0
+; CHECK-NEXT:    vdup.16 d0, d16[0]
 ; CHECK-NEXT:    bx lr
 entry:
   %x = extractelement <2 x i64> %a, i32 0
@@ -1585,8 +1504,7 @@ entry:
 define <2 x i32> @test_dup_v2i64_v2i32(<2 x i64> %a) {
 ; CHECK-LABEL: test_dup_v2i64_v2i32:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vdup.32 d16, r0
-; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vdup.32 d0, d0[0]
 ; CHECK-NEXT:    bx lr
 entry:
   %x = extractelement <2 x i64> %a, i32 0
@@ -1599,9 +1517,7 @@ entry:
 define <2 x i32> @test_concat_undef_v1i32(<2 x i32> %a) {
 ; CHECK-LABEL: test_concat_undef_v1i32:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vdup.32 d16, d16[0]
-; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vdup.32 d0, d0[0]
 ; CHECK-NEXT:    bx lr
 entry:
   %0 = extractelement <2 x i32> %a, i32 0
@@ -1612,9 +1528,7 @@ entry:
 define <2 x i32> @test_concat_same_v1i32_v1i32(<2 x i32> %a) {
 ; CHECK-LABEL: test_concat_same_v1i32_v1i32:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vdup.32 d16, d16[0]
-; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vdup.32 d0, d0[0]
 ; CHECK-NEXT:    bx lr
 entry:
   %0 = extractelement <2 x i32> %a, i32 0
@@ -1627,10 +1541,7 @@ entry:
 define <16 x i8> @test_concat_v16i8_v16i8_v16i8(<16 x i8> %x, <16 x i8> %y) #0 {
 ; CHECK-LABEL: test_concat_v16i8_v16i8_v16i8:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vldr d17, [sp]
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vmov r2, r3, d17
-; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov.f64 d1, d2
 ; CHECK-NEXT:    bx lr
 entry:
   %vecinit30 = shufflevector <16 x i8> %x, <16 x i8> %y, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
@@ -1640,10 +1551,8 @@ entry:
 define <16 x i8> @test_concat_v16i8_v8i8_v16i8(<8 x i8> %x, <16 x i8> %y) #0 {
 ; CHECK-LABEL: test_concat_v16i8_v8i8_v16i8:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vmov d17, r2, r3
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vmov r2, r3, d17
-; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    @ kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    vmov.f64 d1, d2
 ; CHECK-NEXT:    bx lr
 entry:
   %vecext = extractelement <8 x i8> %x, i32 0
@@ -1669,10 +1578,7 @@ entry:
 define <16 x i8> @test_concat_v16i8_v16i8_v8i8(<16 x i8> %x, <8 x i8> %y) #0 {
 ; CHECK-LABEL: test_concat_v16i8_v16i8_v8i8:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vldr d17, [sp]
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vmov r2, r3, d17
-; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov.f64 d1, d2
 ; CHECK-NEXT:    bx lr
 entry:
   %vecext = extractelement <16 x i8> %x, i32 0
@@ -1713,10 +1619,8 @@ entry:
 define <16 x i8> @test_concat_v16i8_v8i8_v8i8(<8 x i8> %x, <8 x i8> %y) #0 {
 ; CHECK-LABEL: test_concat_v16i8_v8i8_v8i8:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vmov d17, r2, r3
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vmov r2, r3, d17
-; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    @ kill: def $d1 killed $d1 killed $q0 def $q0
+; CHECK-NEXT:    @ kill: def $d0 killed $d0 killed $q0 def $q0
 ; CHECK-NEXT:    bx lr
 entry:
   %vecext = extractelement <8 x i8> %x, i32 0
@@ -1757,10 +1661,7 @@ entry:
 define <8 x i16> @test_concat_v8i16_v8i16_v8i16(<8 x i16> %x, <8 x i16> %y) #0 {
 ; CHECK-LABEL: test_concat_v8i16_v8i16_v8i16:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vldr d17, [sp]
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vmov r2, r3, d17
-; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov.f64 d1, d2
 ; CHECK-NEXT:    bx lr
 entry:
   %vecinit14 = shufflevector <8 x i16> %x, <8 x i16> %y, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
@@ -1770,10 +1671,8 @@ entry:
 define <8 x i16> @test_concat_v8i16_v4i16_v8i16(<4 x i16> %x, <8 x i16> %y) #0 {
 ; CHECK-LABEL: test_concat_v8i16_v4i16_v8i16:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vmov d17, r2, r3
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vmov r2, r3, d17
-; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    @ kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    vmov.f64 d1, d2
 ; CHECK-NEXT:    bx lr
 entry:
   %vecext = extractelement <4 x i16> %x, i32 0
@@ -1791,10 +1690,7 @@ entry:
 define <8 x i16> @test_concat_v8i16_v8i16_v4i16(<8 x i16> %x, <4 x i16> %y) #0 {
 ; CHECK-LABEL: test_concat_v8i16_v8i16_v4i16:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vldr d17, [sp]
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vmov r2, r3, d17
-; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov.f64 d1, d2
 ; CHECK-NEXT:    bx lr
 entry:
   %vecext = extractelement <8 x i16> %x, i32 0
@@ -1819,10 +1715,8 @@ entry:
 define <8 x i16> @test_concat_v8i16_v4i16_v4i16(<4 x i16> %x, <4 x i16> %y) #0 {
 ; CHECK-LABEL: test_concat_v8i16_v4i16_v4i16:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vmov d17, r2, r3
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vmov r2, r3, d17
-; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    @ kill: def $d1 killed $d1 killed $q0 def $q0
+; CHECK-NEXT:    @ kill: def $d0 killed $d0 killed $q0 def $q0
 ; CHECK-NEXT:    bx lr
 entry:
   %vecext = extractelement <4 x i16> %x, i32 0
@@ -1847,13 +1741,8 @@ entry:
 define <4 x i32> @test_concat_v4i32_v4i32_v4i32(<4 x i32> %x, <4 x i32> %y) #0 {
 ; CHECK-LABEL: test_concat_v4i32_v4i32_v4i32:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    mov r0, sp
-; CHECK-NEXT:    vld1.64 {d18, d19}, [r0]
-; CHECK-NEXT:    vext.32 q8, q8, q8, #2
-; CHECK-NEXT:    vext.32 q8, q8, q9, #2
-; CHECK-NEXT:    vmov r0, r1, d16
-; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    vext.32 q8, q0, q0, #2
+; CHECK-NEXT:    vext.32 q0, q8, q1, #2
 ; CHECK-NEXT:    bx lr
 entry:
   %vecinit6 = shufflevector <4 x i32> %x, <4 x i32> %y, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
@@ -1863,12 +1752,9 @@ entry:
 define <4 x i32> @test_concat_v4i32_v2i32_v4i32(<2 x i32> %x, <4 x i32> %y) #0 {
 ; CHECK-LABEL: test_concat_v4i32_v2i32_v4i32:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vmov d18, r2, r3
-; CHECK-NEXT:    vext.32 q8, q8, q8, #2
-; CHECK-NEXT:    vext.32 q8, q8, q9, #2
-; CHECK-NEXT:    vmov r0, r1, d16
-; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    @ kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    vext.32 q8, q0, q0, #2
+; CHECK-NEXT:    vext.32 q0, q8, q1, #2
 ; CHECK-NEXT:    bx lr
 entry:
   %vecext = extractelement <2 x i32> %x, i32 0
@@ -1882,9 +1768,7 @@ entry:
 define <4 x i32> @test_concat_v4i32_v4i32_v2i32(<4 x i32> %x, <2 x i32> %y) #0 {
 ; CHECK-LABEL: test_concat_v4i32_v4i32_v2i32:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vldr d16, [sp]
-; CHECK-NEXT:    vmov.32 r2, d16[0]
-; CHECK-NEXT:    vmov.32 r3, d16[1]
+; CHECK-NEXT:    vmov.f64 d1, d2
 ; CHECK-NEXT:    bx lr
 entry:
   %vecext = extractelement <4 x i32> %x, i32 0
@@ -1901,10 +1785,8 @@ entry:
 define <4 x i32> @test_concat_v4i32_v2i32_v2i32(<2 x i32> %x, <2 x i32> %y) #0 {
 ; CHECK-LABEL: test_concat_v4i32_v2i32_v2i32:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vmov d17, r2, r3
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vmov r2, r3, d17
-; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    @ kill: def $d1 killed $d1 killed $q0 def $q0
+; CHECK-NEXT:    @ kill: def $d0 killed $d0 killed $q0 def $q0
 ; CHECK-NEXT:    bx lr
 entry:
   %vecinit6 = shufflevector <2 x i32> %x, <2 x i32> %y, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -1914,8 +1796,7 @@ entry:
 define <2 x i64> @test_concat_v2i64_v2i64_v2i64(<2 x i64> %x, <2 x i64> %y) #0 {
 ; CHECK-LABEL: test_concat_v2i64_v2i64_v2i64:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vldr d16, [sp]
-; CHECK-NEXT:    vmov r2, r3, d16
+; CHECK-NEXT:    vmov.f64 d1, d2
 ; CHECK-NEXT:    bx lr
 entry:
   %vecinit2 = shufflevector <2 x i64> %x, <2 x i64> %y, <2 x i32> <i32 0, i32 2>
@@ -1925,6 +1806,8 @@ entry:
 define <2 x i64> @test_concat_v2i64_v1i64_v2i64(<1 x i64> %x, <2 x i64> %y) #0 {
 ; CHECK-LABEL: test_concat_v2i64_v1i64_v2i64:
 ; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    @ kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    vmov.f64 d1, d2
 ; CHECK-NEXT:    bx lr
 entry:
   %vecext = extractelement <1 x i64> %x, i32 0
@@ -1936,10 +1819,7 @@ entry:
 define <2 x i64> @test_concat_v2i64_v2i64_v1i64(<2 x i64> %x, <1 x i64> %y) #0 {
 ; CHECK-LABEL: test_concat_v2i64_v2i64_v1i64:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vldr d17, [sp]
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vmov r2, r3, d17
-; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov.f64 d1, d2
 ; CHECK-NEXT:    bx lr
 entry:
   %vecext = extractelement <2 x i64> %x, i32 0
@@ -1952,10 +1832,8 @@ entry:
 define <2 x i64> @test_concat_v2i64_v1i64_v1i64(<1 x i64> %x, <1 x i64> %y) #0 {
 ; CHECK-LABEL: test_concat_v2i64_v1i64_v1i64:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vmov d17, r2, r3
-; CHECK-NEXT:    vmov d16, r0, r1
-; CHECK-NEXT:    vmov r2, r3, d17
-; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    @ kill: def $d1 killed $d1 killed $q0 def $q0
+; CHECK-NEXT:    @ kill: def $d0 killed $d0 killed $q0 def $q0
 ; CHECK-NEXT:    bx lr
 entry:
   %vecext = extractelement <1 x i64> %x, i32 0
@@ -1969,8 +1847,7 @@ entry:
 define <4 x i16> @concat_vector_v4i16_const() {
 ; CHECK-LABEL: concat_vector_v4i16_const:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov.i32 d16, #0x0
-; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov.i32 d0, #0x0
 ; CHECK-NEXT:    bx lr
  %r = shufflevector <1 x i16> zeroinitializer, <1 x i16> undef, <4 x i32> zeroinitializer
  ret <4 x i16> %r
@@ -1979,8 +1856,7 @@ define <4 x i16> @concat_vector_v4i16_const() {
 define <4 x i16> @concat_vector_v4i16_const_one() {
 ; CHECK-LABEL: concat_vector_v4i16_const_one:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov.i16 d16, #0x1
-; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov.i16 d0, #0x1
 ; CHECK-NEXT:    bx lr
  %r = shufflevector <1 x i16> <i16 1>, <1 x i16> undef, <4 x i32> zeroinitializer
  ret <4 x i16> %r
@@ -1989,10 +1865,7 @@ define <4 x i16> @concat_vector_v4i16_const_one() {
 define <4 x i32> @concat_vector_v4i32_const() {
 ; CHECK-LABEL: concat_vector_v4i32_const:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    mov r0, #0
-; CHECK-NEXT:    mov r1, #0
-; CHECK-NEXT:    mov r2, #0
-; CHECK-NEXT:    mov r3, #0
+; CHECK-NEXT:    vmov.i32 q0, #0x0
 ; CHECK-NEXT:    bx lr
  %r = shufflevector <1 x i32> zeroinitializer, <1 x i32> undef, <4 x i32> zeroinitializer
  ret <4 x i32> %r
@@ -2001,8 +1874,7 @@ define <4 x i32> @concat_vector_v4i32_const() {
 define <8 x i8> @concat_vector_v8i8_const() {
 ; CHECK-LABEL: concat_vector_v8i8_const:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov.i32 d16, #0x0
-; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vmov.i32 d0, #0x0
 ; CHECK-NEXT:    bx lr
  %r = shufflevector <1 x i8> zeroinitializer, <1 x i8> undef, <8 x i32> zeroinitializer
  ret <8 x i8> %r
@@ -2011,9 +1883,7 @@ define <8 x i8> @concat_vector_v8i8_const() {
 define <8 x i16> @concat_vector_v8i16_const() {
 ; CHECK-LABEL: concat_vector_v8i16_const:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov.i32 q8, #0x0
-; CHECK-NEXT:    vmov r0, r1, d16
-; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    vmov.i32 q0, #0x0
 ; CHECK-NEXT:    bx lr
  %r = shufflevector <1 x i16> zeroinitializer, <1 x i16> undef, <8 x i32> zeroinitializer
  ret <8 x i16> %r
@@ -2022,9 +1892,7 @@ define <8 x i16> @concat_vector_v8i16_const() {
 define <8 x i16> @concat_vector_v8i16_const_one() {
 ; CHECK-LABEL: concat_vector_v8i16_const_one:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov.i16 q8, #0x1
-; CHECK-NEXT:    vmov r0, r1, d16
-; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    vmov.i16 q0, #0x1
 ; CHECK-NEXT:    bx lr
  %r = shufflevector <1 x i16> <i16 1>, <1 x i16> undef, <8 x i32> zeroinitializer
  ret <8 x i16> %r
@@ -2033,9 +1901,7 @@ define <8 x i16> @concat_vector_v8i16_const_one() {
 define <16 x i8> @concat_vector_v16i8_const() {
 ; CHECK-LABEL: concat_vector_v16i8_const:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov.i32 q8, #0x0
-; CHECK-NEXT:    vmov r0, r1, d16
-; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    vmov.i32 q0, #0x0
 ; CHECK-NEXT:    bx lr
  %r = shufflevector <1 x i8> zeroinitializer, <1 x i8> undef, <16 x i32> zeroinitializer
  ret <16 x i8> %r
@@ -2044,8 +1910,7 @@ define <16 x i8> @concat_vector_v16i8_const() {
 define <4 x i16> @concat_vector_v4i16(<1 x i16> %a) {
 ; CHECK-LABEL: concat_vector_v4i16:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vdup.16 d16, r0
-; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vdup.16 d0, r0
 ; CHECK-NEXT:    bx lr
  %r = shufflevector <1 x i16> %a, <1 x i16> undef, <4 x i32> zeroinitializer
  ret <4 x i16> %r
@@ -2054,9 +1919,7 @@ define <4 x i16> @concat_vector_v4i16(<1 x i16> %a) {
 define <4 x i32> @concat_vector_v4i32(<1 x i32> %a) {
 ; CHECK-LABEL: concat_vector_v4i32:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    mov r1, r0
-; CHECK-NEXT:    mov r2, r0
-; CHECK-NEXT:    mov r3, r0
+; CHECK-NEXT:    vdup.32 q0, r0
 ; CHECK-NEXT:    bx lr
  %r = shufflevector <1 x i32> %a, <1 x i32> undef, <4 x i32> zeroinitializer
  ret <4 x i32> %r
@@ -2065,8 +1928,7 @@ define <4 x i32> @concat_vector_v4i32(<1 x i32> %a) {
 define <8 x i8> @concat_vector_v8i8(<1 x i8> %a) {
 ; CHECK-LABEL: concat_vector_v8i8:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vdup.8 d16, r0
-; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    vdup.8 d0, r0
 ; CHECK-NEXT:    bx lr
  %r = shufflevector <1 x i8> %a, <1 x i8> undef, <8 x i32> zeroinitializer
  ret <8 x i8> %r
@@ -2075,9 +1937,7 @@ define <8 x i8> @concat_vector_v8i8(<1 x i8> %a) {
 define <8 x i16> @concat_vector_v8i16(<1 x i16> %a) {
 ; CHECK-LABEL: concat_vector_v8i16:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vdup.16 q8, r0
-; CHECK-NEXT:    vmov r0, r1, d16
-; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    vdup.16 q0, r0
 ; CHECK-NEXT:    bx lr
  %r = shufflevector <1 x i16> %a, <1 x i16> undef, <8 x i32> zeroinitializer
  ret <8 x i16> %r
@@ -2086,9 +1946,7 @@ define <8 x i16> @concat_vector_v8i16(<1 x i16> %a) {
 define <16 x i8> @concat_vector_v16i8(<1 x i8> %a) {
 ; CHECK-LABEL: concat_vector_v16i8:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vdup.8 q8, r0
-; CHECK-NEXT:    vmov r0, r1, d16
-; CHECK-NEXT:    vmov r2, r3, d17
+; CHECK-NEXT:    vdup.8 q0, r0
 ; CHECK-NEXT:    bx lr
  %r = shufflevector <1 x i8> %a, <1 x i8> undef, <16 x i32> zeroinitializer
  ret <16 x i8> %r


        


More information about the llvm-commits mailing list