[llvm] 371ee32 - [ARM] Fold extract of ARM_BUILD_VECTOR

David Green via llvm-commits llvm-commits at lists.llvm.org
Tue Jun 29 03:03:33 PDT 2021


Author: David Green
Date: 2021-06-29T11:03:19+01:00
New Revision: 371ee32e01a788a6dfc62cb7b10a94b80fe28425

URL: https://github.com/llvm/llvm-project/commit/371ee32e01a788a6dfc62cb7b10a94b80fe28425
DIFF: https://github.com/llvm/llvm-project/commit/371ee32e01a788a6dfc62cb7b10a94b80fe28425.diff

LOG: [ARM] Fold extract of ARM_BUILD_VECTOR

This adds a small fold for extract (ARM_BUILD_VECTOR) to fold to the
original node. This can help simplify the resulting codegen in some
cases.

Differential Revision: https://reviews.llvm.org/D104860

Added: 
    

Modified: 
    llvm/lib/Target/ARM/ARMISelLowering.cpp
    llvm/test/CodeGen/ARM/big-endian-vector-callee.ll
    llvm/test/CodeGen/ARM/big-endian-vector-caller.ll
    llvm/test/CodeGen/Thumb2/mve-shuffle.ll
    llvm/test/CodeGen/Thumb2/mve-soft-float-abi.ll
    llvm/test/CodeGen/Thumb2/mve-vld3.ll
    llvm/test/CodeGen/Thumb2/mve-vld4.ll
    llvm/test/CodeGen/Thumb2/mve-vmull-splat.ll
    llvm/test/CodeGen/Thumb2/mve-vst3.ll
    llvm/test/CodeGen/Thumb2/mve-vst4.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 0bd4306309f28..43b8cec412f85 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -14659,6 +14659,13 @@ static SDValue PerformExtractEltCombine(SDNode *N,
       return X;
   }
 
+  // extract ARM_BUILD_VECTOR -> x
+  if (Op0->getOpcode() == ARMISD::BUILD_VECTOR &&
+      isa<ConstantSDNode>(N->getOperand(1)) &&
+      N->getConstantOperandVal(1) < Op0.getNumOperands()) {
+    return Op0.getOperand(N->getConstantOperandVal(1));
+  }
+
   // extract(bitcast(BUILD_VECTOR(VMOVDRR(a, b), ..))) -> a or b
   if (Op0.getValueType() == MVT::v4i32 &&
       isa<ConstantSDNode>(N->getOperand(1)) &&

diff  --git a/llvm/test/CodeGen/ARM/big-endian-vector-callee.ll b/llvm/test/CodeGen/ARM/big-endian-vector-callee.ll
index c0c9d71e197f9..cb1da99d3fc37 100644
--- a/llvm/test/CodeGen/ARM/big-endian-vector-callee.ll
+++ b/llvm/test/CodeGen/ARM/big-endian-vector-callee.ll
@@ -1290,10 +1290,10 @@ define <2 x double> @test_v2f64_f128(fp128 %p) {
 ; SOFT-NEXT:    vmov.32 d17[1], r3
 ; SOFT-NEXT:    vmov.32 d16[1], r1
 ; SOFT-NEXT:    vrev64.32 q8, q8
-; SOFT-NEXT:    vadd.f64 d19, d17, d17
 ; SOFT-NEXT:    vadd.f64 d18, d16, d16
+; SOFT-NEXT:    vadd.f64 d16, d17, d17
 ; SOFT-NEXT:    vmov r1, r0, d18
-; SOFT-NEXT:    vmov r3, r2, d19
+; SOFT-NEXT:    vmov r3, r2, d16
 ; SOFT-NEXT:    add sp, sp, #16
 ; SOFT-NEXT:    pop {r11, pc}
 ;
@@ -1326,10 +1326,10 @@ define <2 x double> @test_v2f64_v2i64(<2 x i64> %p) {
 ; SOFT-NEXT:    vmov d17, r3, r2
 ; SOFT-NEXT:    vmov d16, r1, r0
 ; SOFT-NEXT:    vadd.i64 q8, q8, q8
-; SOFT-NEXT:    vadd.f64 d19, d17, d17
 ; SOFT-NEXT:    vadd.f64 d18, d16, d16
+; SOFT-NEXT:    vadd.f64 d16, d17, d17
 ; SOFT-NEXT:    vmov r1, r0, d18
-; SOFT-NEXT:    vmov r3, r2, d19
+; SOFT-NEXT:    vmov r3, r2, d16
 ; SOFT-NEXT:    bx lr
 ;
 ; HARD-LABEL: test_v2f64_v2i64:
@@ -1352,10 +1352,10 @@ define <2 x double> @test_v2f64_v4f32(<4 x float> %p) {
 ; SOFT-NEXT:    vrev64.32 q8, q8
 ; SOFT-NEXT:    vadd.f32 q8, q8, q8
 ; SOFT-NEXT:    vrev64.32 q8, q8
-; SOFT-NEXT:    vadd.f64 d19, d17, d17
 ; SOFT-NEXT:    vadd.f64 d18, d16, d16
+; SOFT-NEXT:    vadd.f64 d16, d17, d17
 ; SOFT-NEXT:    vmov r1, r0, d18
-; SOFT-NEXT:    vmov r3, r2, d19
+; SOFT-NEXT:    vmov r3, r2, d16
 ; SOFT-NEXT:    bx lr
 ;
 ; HARD-LABEL: test_v2f64_v4f32:
@@ -1380,10 +1380,10 @@ define <2 x double> @test_v2f64_v4i32(<4 x i32> %p) {
 ; SOFT-NEXT:    vrev64.32 q8, q8
 ; SOFT-NEXT:    vadd.i32 q8, q8, q8
 ; SOFT-NEXT:    vrev64.32 q8, q8
-; SOFT-NEXT:    vadd.f64 d19, d17, d17
 ; SOFT-NEXT:    vadd.f64 d18, d16, d16
+; SOFT-NEXT:    vadd.f64 d16, d17, d17
 ; SOFT-NEXT:    vmov r1, r0, d18
-; SOFT-NEXT:    vmov r3, r2, d19
+; SOFT-NEXT:    vmov r3, r2, d16
 ; SOFT-NEXT:    bx lr
 ;
 ; HARD-LABEL: test_v2f64_v4i32:
@@ -1408,10 +1408,10 @@ define <2 x double> @test_v2f64_v8i16(<8 x i16> %p) {
 ; SOFT-NEXT:    vrev64.16 q8, q8
 ; SOFT-NEXT:    vadd.i16 q8, q8, q8
 ; SOFT-NEXT:    vrev64.16 q8, q8
-; SOFT-NEXT:    vadd.f64 d19, d17, d17
 ; SOFT-NEXT:    vadd.f64 d18, d16, d16
+; SOFT-NEXT:    vadd.f64 d16, d17, d17
 ; SOFT-NEXT:    vmov r1, r0, d18
-; SOFT-NEXT:    vmov r3, r2, d19
+; SOFT-NEXT:    vmov r3, r2, d16
 ; SOFT-NEXT:    bx lr
 ;
 ; HARD-LABEL: test_v2f64_v8i16:
@@ -1436,10 +1436,10 @@ define <2 x double> @test_v2f64_v16i8(<16 x i8> %p) {
 ; SOFT-NEXT:    vrev64.8 q8, q8
 ; SOFT-NEXT:    vadd.i8 q8, q8, q8
 ; SOFT-NEXT:    vrev64.8 q8, q8
-; SOFT-NEXT:    vadd.f64 d19, d17, d17
 ; SOFT-NEXT:    vadd.f64 d18, d16, d16
+; SOFT-NEXT:    vadd.f64 d16, d17, d17
 ; SOFT-NEXT:    vmov r1, r0, d18
-; SOFT-NEXT:    vmov r3, r2, d19
+; SOFT-NEXT:    vmov r3, r2, d16
 ; SOFT-NEXT:    bx lr
 ;
 ; HARD-LABEL: test_v2f64_v16i8:

diff  --git a/llvm/test/CodeGen/ARM/big-endian-vector-caller.ll b/llvm/test/CodeGen/ARM/big-endian-vector-caller.ll
index bf4e8a918b2df..7aaf4ae0bfb53 100644
--- a/llvm/test/CodeGen/ARM/big-endian-vector-caller.ll
+++ b/llvm/test/CodeGen/ARM/big-endian-vector-caller.ll
@@ -1686,10 +1686,10 @@ define void @test_f128_v2f64(<2 x double>* %p, fp128* %q) {
 ; SOFT-NEXT:    sub sp, sp, #16
 ; SOFT-NEXT:    vld1.64 {d16, d17}, [r0]
 ; SOFT-NEXT:    mov r4, r1
-; SOFT-NEXT:    vadd.f64 d19, d17, d17
 ; SOFT-NEXT:    vadd.f64 d18, d16, d16
+; SOFT-NEXT:    vadd.f64 d16, d17, d17
 ; SOFT-NEXT:    vmov r1, r0, d18
-; SOFT-NEXT:    vmov r3, r2, d19
+; SOFT-NEXT:    vmov r3, r2, d16
 ; SOFT-NEXT:    bl test_f128_v2f64_helper
 ; SOFT-NEXT:    stm sp, {r0, r1, r2, r3}
 ; SOFT-NEXT:    bl __addtf3
@@ -2272,10 +2272,10 @@ define void @test_v2i64_v2f64(<2 x double>* %p, <2 x i64>* %q) {
 ; SOFT-NEXT:    push {r4, lr}
 ; SOFT-NEXT:    vld1.64 {d16, d17}, [r0]
 ; SOFT-NEXT:    mov r4, r1
-; SOFT-NEXT:    vadd.f64 d19, d17, d17
 ; SOFT-NEXT:    vadd.f64 d18, d16, d16
+; SOFT-NEXT:    vadd.f64 d16, d17, d17
 ; SOFT-NEXT:    vmov r1, r0, d18
-; SOFT-NEXT:    vmov r3, r2, d19
+; SOFT-NEXT:    vmov r3, r2, d16
 ; SOFT-NEXT:    bl test_v2i64_v2f64_helper
 ; SOFT-NEXT:    vmov d17, r3, r2
 ; SOFT-NEXT:    vmov d16, r1, r0
@@ -2528,10 +2528,10 @@ define void @test_v4f32_v2f64(<2 x double>* %p, <4 x float>* %q) {
 ; SOFT-NEXT:    push {r4, lr}
 ; SOFT-NEXT:    vld1.64 {d16, d17}, [r0]
 ; SOFT-NEXT:    mov r4, r1
-; SOFT-NEXT:    vadd.f64 d19, d17, d17
 ; SOFT-NEXT:    vadd.f64 d18, d16, d16
+; SOFT-NEXT:    vadd.f64 d16, d17, d17
 ; SOFT-NEXT:    vmov r1, r0, d18
-; SOFT-NEXT:    vmov r3, r2, d19
+; SOFT-NEXT:    vmov r3, r2, d16
 ; SOFT-NEXT:    bl test_v4f32_v2f64_helper
 ; SOFT-NEXT:    vmov d17, r3, r2
 ; SOFT-NEXT:    vmov d16, r1, r0
@@ -2800,10 +2800,10 @@ define void @test_v4i32_v2f64(<2 x double>* %p, <4 x i32>* %q) {
 ; SOFT-NEXT:    push {r4, lr}
 ; SOFT-NEXT:    vld1.64 {d16, d17}, [r0]
 ; SOFT-NEXT:    mov r4, r1
-; SOFT-NEXT:    vadd.f64 d19, d17, d17
 ; SOFT-NEXT:    vadd.f64 d18, d16, d16
+; SOFT-NEXT:    vadd.f64 d16, d17, d17
 ; SOFT-NEXT:    vmov r1, r0, d18
-; SOFT-NEXT:    vmov r3, r2, d19
+; SOFT-NEXT:    vmov r3, r2, d16
 ; SOFT-NEXT:    bl test_v4i32_v2f64_helper
 ; SOFT-NEXT:    vmov d17, r3, r2
 ; SOFT-NEXT:    vmov d16, r1, r0
@@ -3072,10 +3072,10 @@ define void @test_v8i16_v2f64(<2 x double>* %p, <8 x i16>* %q) {
 ; SOFT-NEXT:    push {r4, lr}
 ; SOFT-NEXT:    vld1.64 {d16, d17}, [r0]
 ; SOFT-NEXT:    mov r4, r1
-; SOFT-NEXT:    vadd.f64 d19, d17, d17
 ; SOFT-NEXT:    vadd.f64 d18, d16, d16
+; SOFT-NEXT:    vadd.f64 d16, d17, d17
 ; SOFT-NEXT:    vmov r1, r0, d18
-; SOFT-NEXT:    vmov r3, r2, d19
+; SOFT-NEXT:    vmov r3, r2, d16
 ; SOFT-NEXT:    bl test_v8i16_v2f64_helper
 ; SOFT-NEXT:    vmov d17, r3, r2
 ; SOFT-NEXT:    vmov d16, r1, r0
@@ -3344,10 +3344,10 @@ define void @test_v16i8_v2f64(<2 x double>* %p, <16 x i8>* %q) {
 ; SOFT-NEXT:    push {r4, lr}
 ; SOFT-NEXT:    vld1.64 {d16, d17}, [r0]
 ; SOFT-NEXT:    mov r4, r1
-; SOFT-NEXT:    vadd.f64 d19, d17, d17
 ; SOFT-NEXT:    vadd.f64 d18, d16, d16
+; SOFT-NEXT:    vadd.f64 d16, d17, d17
 ; SOFT-NEXT:    vmov r1, r0, d18
-; SOFT-NEXT:    vmov r3, r2, d19
+; SOFT-NEXT:    vmov r3, r2, d16
 ; SOFT-NEXT:    bl test_v16i8_v2f64_helper
 ; SOFT-NEXT:    vmov d17, r3, r2
 ; SOFT-NEXT:    vmov d16, r1, r0

diff  --git a/llvm/test/CodeGen/Thumb2/mve-shuffle.ll b/llvm/test/CodeGen/Thumb2/mve-shuffle.ll
index 46c3cebb76860..b66e7b24536cf 100644
--- a/llvm/test/CodeGen/Thumb2/mve-shuffle.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-shuffle.ll
@@ -164,23 +164,22 @@ define arm_aapcs_vfpcc <4 x i32> @shuffle4step_i32(<16 x i32> %src) {
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
 ; CHECK-NEXT:    vpush {d8, d9, d10, d11}
-; CHECK-NEXT:    vmov.f32 s18, s11
+; CHECK-NEXT:    vmov.f32 s16, s3
 ; CHECK-NEXT:    vmov.f32 s20, s2
-; CHECK-NEXT:    vmov.f32 s19, s15
+; CHECK-NEXT:    vmov.f32 s17, s7
 ; CHECK-NEXT:    vmov.f32 s21, s6
-; CHECK-NEXT:    vmov.f32 s16, s3
-; CHECK-NEXT:    vmov.f32 s11, s14
+; CHECK-NEXT:    vmov.f32 s18, s11
 ; CHECK-NEXT:    vmov.f32 s22, s10
-; CHECK-NEXT:    vmov.f32 s17, s7
+; CHECK-NEXT:    vmov.f32 s19, s15
 ; CHECK-NEXT:    vmov.f32 s23, s14
 ; CHECK-NEXT:    vadd.i32 q4, q5, q4
-; CHECK-NEXT:    vmov.f32 s22, s9
-; CHECK-NEXT:    vmov.f32 s23, s13
 ; CHECK-NEXT:    vmov.f32 s20, s1
-; CHECK-NEXT:    vmov.f32 s2, s8
-; CHECK-NEXT:    vmov.f32 s3, s12
 ; CHECK-NEXT:    vmov.f32 s21, s5
 ; CHECK-NEXT:    vmov.f32 s1, s4
+; CHECK-NEXT:    vmov.f32 s22, s9
+; CHECK-NEXT:    vmov.f32 s2, s8
+; CHECK-NEXT:    vmov.f32 s23, s13
+; CHECK-NEXT:    vmov.f32 s3, s12
 ; CHECK-NEXT:    vadd.i32 q0, q0, q5
 ; CHECK-NEXT:    vadd.i32 q0, q0, q4
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11}
@@ -359,36 +358,36 @@ entry:
 define arm_aapcs_vfpcc <8 x i16> @shuffle3step_i16(<32 x i16> %src) {
 ; CHECK-LABEL: shuffle3step_i16:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
-; CHECK-NEXT:    vpush {d8, d9, d10, d11}
-; CHECK-NEXT:    vmovx.f16 s16, s2
-; CHECK-NEXT:    vmov.f32 s12, s1
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13}
+; CHECK-NEXT:    vmovx.f16 s16, s1
+; CHECK-NEXT:    vmov.f32 s12, s0
 ; CHECK-NEXT:    vins.f16 s12, s16
-; CHECK-NEXT:    vmovx.f16 s16, s5
-; CHECK-NEXT:    vmov.f32 s13, s4
-; CHECK-NEXT:    vmovx.f16 s20, s11
+; CHECK-NEXT:    vmovx.f16 s16, s4
+; CHECK-NEXT:    vmov.f32 s13, s3
+; CHECK-NEXT:    vmovx.f16 s20, s5
 ; CHECK-NEXT:    vins.f16 s13, s16
-; CHECK-NEXT:    vmov.f32 s19, s10
-; CHECK-NEXT:    vins.f16 s19, s20
-; CHECK-NEXT:    vmov.f32 s14, s7
-; CHECK-NEXT:    vmov.f32 s18, s8
+; CHECK-NEXT:    vmovx.f16 s16, s7
+; CHECK-NEXT:    vmov.f32 s14, s6
 ; CHECK-NEXT:    vmov.u16 r0, q1[5]
-; CHECK-NEXT:    vmov q5, q4
-; CHECK-NEXT:    vmovnb.i32 q5, q3
-; CHECK-NEXT:    vmov.f32 s14, s22
-; CHECK-NEXT:    vmovx.f16 s20, s1
-; CHECK-NEXT:    vmov.f32 s15, s19
-; CHECK-NEXT:    vmov.f32 s16, s0
-; CHECK-NEXT:    vins.f16 s16, s20
-; CHECK-NEXT:    vmovx.f16 s20, s4
-; CHECK-NEXT:    vmov.f32 s17, s3
+; CHECK-NEXT:    vins.f16 s14, s16
+; CHECK-NEXT:    vmovx.f16 s16, s2
+; CHECK-NEXT:    vins.f16 s1, s16
+; CHECK-NEXT:    vmov.f32 s17, s4
 ; CHECK-NEXT:    vins.f16 s17, s20
-; CHECK-NEXT:    vmovx.f16 s20, s7
-; CHECK-NEXT:    vmov.f32 s18, s6
-; CHECK-NEXT:    vins.f16 s18, s20
 ; CHECK-NEXT:    vmovx.f16 s20, s10
-; CHECK-NEXT:    vmov.f32 s19, s9
-; CHECK-NEXT:    vins.f16 s19, s20
+; CHECK-NEXT:    vmov.f32 s15, s9
+; CHECK-NEXT:    vins.f16 s15, s20
+; CHECK-NEXT:    vmovx.f16 s20, s11
+; CHECK-NEXT:    vins.f16 s10, s20
+; CHECK-NEXT:    vmov.f32 s16, s1
+; CHECK-NEXT:    vmov.f32 s23, s10
+; CHECK-NEXT:    vmov.f32 s22, s8
+; CHECK-NEXT:    vmov.f32 s18, s7
+; CHECK-NEXT:    vmov q6, q5
+; CHECK-NEXT:    vmovnb.i32 q6, q4
+; CHECK-NEXT:    vmov.f32 s18, s26
+; CHECK-NEXT:    vmov.f32 s19, s23
 ; CHECK-NEXT:    vmovx.f16 s20, s0
 ; CHECK-NEXT:    vins.f16 s20, s2
 ; CHECK-NEXT:    vmovx.f16 s21, s3
@@ -401,9 +400,9 @@ define arm_aapcs_vfpcc <8 x i16> @shuffle3step_i16(<32 x i16> %src) {
 ; CHECK-NEXT:    vmovnb.i32 q1, q5
 ; CHECK-NEXT:    vmov.f32 s22, s6
 ; CHECK-NEXT:    vmov.f32 s23, s3
-; CHECK-NEXT:    vadd.i16 q0, q4, q5
-; CHECK-NEXT:    vadd.i16 q0, q0, q3
-; CHECK-NEXT:    vpop {d8, d9, d10, d11}
+; CHECK-NEXT:    vadd.i16 q0, q3, q5
+; CHECK-NEXT:    vadd.i16 q0, q0, q4
+; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13}
 ; CHECK-NEXT:    bx lr
 entry:
   %s1 = shufflevector <32 x i16> %src, <32 x i16> undef, <8 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21>
@@ -1193,23 +1192,22 @@ define arm_aapcs_vfpcc <4 x float> @shuffle4step_f32(<16 x float> %src) {
 ; CHECKFP:       @ %bb.0: @ %entry
 ; CHECKFP-NEXT:    .vsave {d8, d9, d10, d11}
 ; CHECKFP-NEXT:    vpush {d8, d9, d10, d11}
-; CHECKFP-NEXT:    vmov.f32 s18, s11
+; CHECKFP-NEXT:    vmov.f32 s16, s3
 ; CHECKFP-NEXT:    vmov.f32 s20, s2
-; CHECKFP-NEXT:    vmov.f32 s19, s15
+; CHECKFP-NEXT:    vmov.f32 s17, s7
 ; CHECKFP-NEXT:    vmov.f32 s21, s6
-; CHECKFP-NEXT:    vmov.f32 s16, s3
-; CHECKFP-NEXT:    vmov.f32 s11, s14
+; CHECKFP-NEXT:    vmov.f32 s18, s11
 ; CHECKFP-NEXT:    vmov.f32 s22, s10
-; CHECKFP-NEXT:    vmov.f32 s17, s7
+; CHECKFP-NEXT:    vmov.f32 s19, s15
 ; CHECKFP-NEXT:    vmov.f32 s23, s14
 ; CHECKFP-NEXT:    vadd.f32 q4, q5, q4
-; CHECKFP-NEXT:    vmov.f32 s22, s9
-; CHECKFP-NEXT:    vmov.f32 s23, s13
 ; CHECKFP-NEXT:    vmov.f32 s20, s1
-; CHECKFP-NEXT:    vmov.f32 s2, s8
-; CHECKFP-NEXT:    vmov.f32 s3, s12
 ; CHECKFP-NEXT:    vmov.f32 s21, s5
 ; CHECKFP-NEXT:    vmov.f32 s1, s4
+; CHECKFP-NEXT:    vmov.f32 s22, s9
+; CHECKFP-NEXT:    vmov.f32 s2, s8
+; CHECKFP-NEXT:    vmov.f32 s23, s13
+; CHECKFP-NEXT:    vmov.f32 s3, s12
 ; CHECKFP-NEXT:    vadd.f32 q0, q0, q5
 ; CHECKFP-NEXT:    vadd.f32 q0, q0, q4
 ; CHECKFP-NEXT:    vpop {d8, d9, d10, d11}

diff  --git a/llvm/test/CodeGen/Thumb2/mve-soft-float-abi.ll b/llvm/test/CodeGen/Thumb2/mve-soft-float-abi.ll
index 1fdb6d84e9ca8..488a20bc9602f 100644
--- a/llvm/test/CodeGen/Thumb2/mve-soft-float-abi.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-soft-float-abi.ll
@@ -348,24 +348,27 @@ entry:
 define <4 x float> @vector_add_f32(<4 x float> %lhs, <4 x float> %rhs) {
 ; CHECK-MVE-LABEL: vector_add_f32:
 ; CHECK-MVE:       @ %bb.0: @ %entry
-; CHECK-MVE-NEXT:    .save {r4, r5, r7, lr}
-; CHECK-MVE-NEXT:    push {r4, r5, r7, lr}
-; CHECK-MVE-NEXT:    .vsave {d8, d9, d10, d11, d12, d13}
-; CHECK-MVE-NEXT:    vpush {d8, d9, d10, d11, d12, d13}
-; CHECK-MVE-NEXT:    vmov d13, r2, r3
-; CHECK-MVE-NEXT:    vmov d12, r0, r1
-; CHECK-MVE-NEXT:    add r1, sp, #64
-; CHECK-MVE-NEXT:    vldrw.u32 q5, [r1]
-; CHECK-MVE-NEXT:    vmov r4, r0, d13
-; CHECK-MVE-NEXT:    vmov r5, r1, d11
+; CHECK-MVE-NEXT:    .save {r4, r5, r6, r7, lr}
+; CHECK-MVE-NEXT:    push {r4, r5, r6, r7, lr}
+; CHECK-MVE-NEXT:    .pad #4
+; CHECK-MVE-NEXT:    sub sp, #4
+; CHECK-MVE-NEXT:    .vsave {d8, d9, d10, d11}
+; CHECK-MVE-NEXT:    vpush {d8, d9, d10, d11}
+; CHECK-MVE-NEXT:    mov r4, r0
+; CHECK-MVE-NEXT:    add r0, sp, #56
+; CHECK-MVE-NEXT:    vldrw.u32 q5, [r0]
+; CHECK-MVE-NEXT:    mov r6, r1
+; CHECK-MVE-NEXT:    mov r0, r3
+; CHECK-MVE-NEXT:    mov r5, r2
+; CHECK-MVE-NEXT:    vmov r7, r1, d11
 ; CHECK-MVE-NEXT:    bl __aeabi_fadd
 ; CHECK-MVE-NEXT:    vmov s19, r0
-; CHECK-MVE-NEXT:    mov r0, r4
-; CHECK-MVE-NEXT:    mov r1, r5
+; CHECK-MVE-NEXT:    mov r0, r5
+; CHECK-MVE-NEXT:    mov r1, r7
 ; CHECK-MVE-NEXT:    bl __aeabi_fadd
-; CHECK-MVE-NEXT:    vmov s18, r0
-; CHECK-MVE-NEXT:    vmov r4, r0, d12
 ; CHECK-MVE-NEXT:    vmov r5, r1, d10
+; CHECK-MVE-NEXT:    vmov s18, r0
+; CHECK-MVE-NEXT:    mov r0, r6
 ; CHECK-MVE-NEXT:    bl __aeabi_fadd
 ; CHECK-MVE-NEXT:    vmov s17, r0
 ; CHECK-MVE-NEXT:    mov r0, r4
@@ -374,8 +377,9 @@ define <4 x float> @vector_add_f32(<4 x float> %lhs, <4 x float> %rhs) {
 ; CHECK-MVE-NEXT:    vmov s16, r0
 ; CHECK-MVE-NEXT:    vmov r2, r3, d9
 ; CHECK-MVE-NEXT:    vmov r0, r1, d8
-; CHECK-MVE-NEXT:    vpop {d8, d9, d10, d11, d12, d13}
-; CHECK-MVE-NEXT:    pop {r4, r5, r7, pc}
+; CHECK-MVE-NEXT:    vpop {d8, d9, d10, d11}
+; CHECK-MVE-NEXT:    add sp, #4
+; CHECK-MVE-NEXT:    pop {r4, r5, r6, r7, pc}
 ;
 ; CHECK-BE-LABEL: vector_add_f32:
 ; CHECK-BE:       @ %bb.0: @ %entry
@@ -432,10 +436,10 @@ define <2 x double> @vector_add_f64(<2 x double> %lhs, <2 x double> %rhs) {
 ; CHECK-MVE-NEXT:    push {r4, r5, r6, r7, lr}
 ; CHECK-MVE-NEXT:    .pad #4
 ; CHECK-MVE-NEXT:    sub sp, #4
-; CHECK-MVE-NEXT:    .vsave {d8, d9, d10, d11}
-; CHECK-MVE-NEXT:    vpush {d8, d9, d10, d11}
+; CHECK-MVE-NEXT:    .vsave {d8, d9}
+; CHECK-MVE-NEXT:    vpush {d8, d9}
 ; CHECK-MVE-NEXT:    mov r5, r0
-; CHECK-MVE-NEXT:    add r0, sp, #56
+; CHECK-MVE-NEXT:    add r0, sp, #40
 ; CHECK-MVE-NEXT:    vldrw.u32 q4, [r0]
 ; CHECK-MVE-NEXT:    mov r4, r2
 ; CHECK-MVE-NEXT:    mov r6, r3
@@ -445,14 +449,14 @@ define <2 x double> @vector_add_f64(<2 x double> %lhs, <2 x double> %rhs) {
 ; CHECK-MVE-NEXT:    mov r1, r6
 ; CHECK-MVE-NEXT:    bl __aeabi_dadd
 ; CHECK-MVE-NEXT:    vmov r2, r3, d8
-; CHECK-MVE-NEXT:    vmov d11, r0, r1
+; CHECK-MVE-NEXT:    mov r4, r0
+; CHECK-MVE-NEXT:    mov r6, r1
 ; CHECK-MVE-NEXT:    mov r0, r5
 ; CHECK-MVE-NEXT:    mov r1, r7
 ; CHECK-MVE-NEXT:    bl __aeabi_dadd
-; CHECK-MVE-NEXT:    vmov d10, r0, r1
-; CHECK-MVE-NEXT:    vmov r2, r3, d11
-; CHECK-MVE-NEXT:    vmov r0, r1, d10
-; CHECK-MVE-NEXT:    vpop {d8, d9, d10, d11}
+; CHECK-MVE-NEXT:    mov r2, r4
+; CHECK-MVE-NEXT:    mov r3, r6
+; CHECK-MVE-NEXT:    vpop {d8, d9}
 ; CHECK-MVE-NEXT:    add sp, #4
 ; CHECK-MVE-NEXT:    pop {r4, r5, r6, r7, pc}
 ;
@@ -462,10 +466,10 @@ define <2 x double> @vector_add_f64(<2 x double> %lhs, <2 x double> %rhs) {
 ; CHECK-BE-NEXT:    push {r4, r5, r6, r7, lr}
 ; CHECK-BE-NEXT:    .pad #4
 ; CHECK-BE-NEXT:    sub sp, #4
-; CHECK-BE-NEXT:    .vsave {d8, d9, d10, d11}
-; CHECK-BE-NEXT:    vpush {d8, d9, d10, d11}
+; CHECK-BE-NEXT:    .vsave {d8, d9}
+; CHECK-BE-NEXT:    vpush {d8, d9}
 ; CHECK-BE-NEXT:    mov r5, r0
-; CHECK-BE-NEXT:    add r0, sp, #56
+; CHECK-BE-NEXT:    add r0, sp, #40
 ; CHECK-BE-NEXT:    vldrb.u8 q0, [r0]
 ; CHECK-BE-NEXT:    mov r6, r2
 ; CHECK-BE-NEXT:    mov r4, r3
@@ -476,14 +480,14 @@ define <2 x double> @vector_add_f64(<2 x double> %lhs, <2 x double> %rhs) {
 ; CHECK-BE-NEXT:    mov r1, r4
 ; CHECK-BE-NEXT:    bl __aeabi_dadd
 ; CHECK-BE-NEXT:    vmov r3, r2, d8
-; CHECK-BE-NEXT:    vmov d11, r1, r0
+; CHECK-BE-NEXT:    mov r4, r0
+; CHECK-BE-NEXT:    mov r6, r1
 ; CHECK-BE-NEXT:    mov r0, r5
 ; CHECK-BE-NEXT:    mov r1, r7
 ; CHECK-BE-NEXT:    bl __aeabi_dadd
-; CHECK-BE-NEXT:    vmov d10, r1, r0
-; CHECK-BE-NEXT:    vmov r3, r2, d11
-; CHECK-BE-NEXT:    vmov r1, r0, d10
-; CHECK-BE-NEXT:    vpop {d8, d9, d10, d11}
+; CHECK-BE-NEXT:    mov r2, r4
+; CHECK-BE-NEXT:    mov r3, r6
+; CHECK-BE-NEXT:    vpop {d8, d9}
 ; CHECK-BE-NEXT:    add sp, #4
 ; CHECK-BE-NEXT:    pop {r4, r5, r6, r7, pc}
 ;
@@ -495,24 +499,22 @@ define <2 x double> @vector_add_f64(<2 x double> %lhs, <2 x double> %rhs) {
 ; CHECK-FP-NEXT:    sub sp, #4
 ; CHECK-FP-NEXT:    .vsave {d8, d9}
 ; CHECK-FP-NEXT:    vpush {d8, d9}
-; CHECK-FP-NEXT:    mov r5, r0
-; CHECK-FP-NEXT:    add r0, sp, #40
-; CHECK-FP-NEXT:    vldrw.u32 q4, [r0]
-; CHECK-FP-NEXT:    mov r4, r2
-; CHECK-FP-NEXT:    mov r6, r3
-; CHECK-FP-NEXT:    mov r7, r1
-; CHECK-FP-NEXT:    vmov r2, r3, d9
-; CHECK-FP-NEXT:    mov r0, r4
-; CHECK-FP-NEXT:    mov r1, r6
-; CHECK-FP-NEXT:    bl __aeabi_dadd
+; CHECK-FP-NEXT:    mov r5, r2
+; CHECK-FP-NEXT:    add r2, sp, #40
+; CHECK-FP-NEXT:    vldrw.u32 q4, [r2]
+; CHECK-FP-NEXT:    mov r4, r3
 ; CHECK-FP-NEXT:    vmov r2, r3, d8
-; CHECK-FP-NEXT:    vmov d9, r0, r1
-; CHECK-FP-NEXT:    mov r0, r5
-; CHECK-FP-NEXT:    mov r1, r7
 ; CHECK-FP-NEXT:    bl __aeabi_dadd
-; CHECK-FP-NEXT:    vmov d8, r0, r1
 ; CHECK-FP-NEXT:    vmov r2, r3, d9
-; CHECK-FP-NEXT:    vmov r0, r1, d8
+; CHECK-FP-NEXT:    mov r6, r0
+; CHECK-FP-NEXT:    mov r7, r1
+; CHECK-FP-NEXT:    mov r0, r5
+; CHECK-FP-NEXT:    mov r1, r4
+; CHECK-FP-NEXT:    bl __aeabi_dadd
+; CHECK-FP-NEXT:    mov r2, r0
+; CHECK-FP-NEXT:    mov r3, r1
+; CHECK-FP-NEXT:    mov r0, r6
+; CHECK-FP-NEXT:    mov r1, r7
 ; CHECK-FP-NEXT:    vpop {d8, d9}
 ; CHECK-FP-NEXT:    add sp, #4
 ; CHECK-FP-NEXT:    pop {r4, r5, r6, r7, pc}

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vld3.ll b/llvm/test/CodeGen/Thumb2/mve-vld3.ll
index 423f796e97753..b998d62b0d9c6 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vld3.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vld3.ll
@@ -39,20 +39,20 @@ define void @vld3_v4i32(<12 x i32> *%src, <4 x i32> *%dst) {
 ; CHECK-NEXT:    vpush {d8, d9}
 ; CHECK-NEXT:    vldrw.u32 q1, [r0]
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, #16]
-; CHECK-NEXT:    vldrw.u32 q2, [r0, #32]
-; CHECK-NEXT:    vmov.f64 d6, d2
-; CHECK-NEXT:    vmov.f32 s16, s5
-; CHECK-NEXT:    vmov.f32 s13, s7
-; CHECK-NEXT:    vmov.f32 s17, s0
-; CHECK-NEXT:    vmov.f32 s14, s2
-; CHECK-NEXT:    vmov.f32 s18, s3
+; CHECK-NEXT:    vldrw.u32 q4, [r0, #32]
+; CHECK-NEXT:    vmov.f64 d4, d2
+; CHECK-NEXT:    vmov.f32 s12, s5
+; CHECK-NEXT:    vmov.f32 s9, s7
+; CHECK-NEXT:    vmov.f32 s13, s0
+; CHECK-NEXT:    vmov.f32 s10, s2
+; CHECK-NEXT:    vmov.f32 s14, s3
 ; CHECK-NEXT:    vmov.f32 s0, s6
-; CHECK-NEXT:    vmov.f32 s2, s8
-; CHECK-NEXT:    vmov.f32 s19, s10
-; CHECK-NEXT:    vmov.f32 s15, s9
-; CHECK-NEXT:    vadd.i32 q3, q3, q4
-; CHECK-NEXT:    vmov.f32 s3, s11
-; CHECK-NEXT:    vadd.i32 q0, q3, q0
+; CHECK-NEXT:    vmov.f32 s2, s16
+; CHECK-NEXT:    vmov.f32 s15, s18
+; CHECK-NEXT:    vmov.f32 s11, s17
+; CHECK-NEXT:    vadd.i32 q2, q2, q3
+; CHECK-NEXT:    vmov.f32 s3, s19
+; CHECK-NEXT:    vadd.i32 q0, q2, q0
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    vpop {d8, d9}
 ; CHECK-NEXT:    bx lr
@@ -74,36 +74,36 @@ define void @vld3_v8i32(<24 x i32> *%src, <8 x i32> *%dst) {
 ; CHECK-NEXT:    vpush {d8, d9, d10, d11}
 ; CHECK-NEXT:    vldrw.u32 q1, [r0, #48]
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, #64]
-; CHECK-NEXT:    vldrw.u32 q2, [r0, #80]
-; CHECK-NEXT:    vmov.f64 d6, d2
-; CHECK-NEXT:    vmov.f32 s16, s5
-; CHECK-NEXT:    vmov.f32 s13, s7
-; CHECK-NEXT:    vmov.f32 s17, s0
-; CHECK-NEXT:    vmov.f32 s14, s2
-; CHECK-NEXT:    vmov.f32 s18, s3
+; CHECK-NEXT:    vldrw.u32 q4, [r0, #80]
+; CHECK-NEXT:    vmov.f64 d4, d2
+; CHECK-NEXT:    vmov.f32 s12, s5
+; CHECK-NEXT:    vmov.f32 s9, s7
+; CHECK-NEXT:    vmov.f32 s13, s0
+; CHECK-NEXT:    vmov.f32 s10, s2
+; CHECK-NEXT:    vmov.f32 s14, s3
 ; CHECK-NEXT:    vmov.f32 s0, s6
 ; CHECK-NEXT:    vldrw.u32 q1, [r0, #16]
-; CHECK-NEXT:    vmov.f32 s2, s8
-; CHECK-NEXT:    vmov.f32 s19, s10
-; CHECK-NEXT:    vmov.f32 s15, s9
-; CHECK-NEXT:    vmov.f32 s3, s11
-; CHECK-NEXT:    vadd.i32 q3, q3, q4
-; CHECK-NEXT:    vadd.i32 q0, q3, q0
-; CHECK-NEXT:    vldrw.u32 q3, [r0]
-; CHECK-NEXT:    vldrw.u32 q2, [r0, #32]
+; CHECK-NEXT:    vmov.f32 s2, s16
+; CHECK-NEXT:    vmov.f32 s15, s18
+; CHECK-NEXT:    vmov.f32 s11, s17
+; CHECK-NEXT:    vadd.i32 q2, q2, q3
+; CHECK-NEXT:    vmov.f32 s3, s19
+; CHECK-NEXT:    vadd.i32 q0, q2, q0
+; CHECK-NEXT:    vldrw.u32 q2, [r0]
+; CHECK-NEXT:    vldrw.u32 q3, [r0, #32]
 ; CHECK-NEXT:    vstrw.32 q0, [r1, #16]
-; CHECK-NEXT:    vmov.f32 s16, s13
-; CHECK-NEXT:    vmov.f64 d10, d6
+; CHECK-NEXT:    vmov.f32 s16, s9
+; CHECK-NEXT:    vmov.f64 d10, d4
 ; CHECK-NEXT:    vmov.f32 s17, s4
-; CHECK-NEXT:    vmov.f32 s21, s15
+; CHECK-NEXT:    vmov.f32 s21, s11
 ; CHECK-NEXT:    vmov.f32 s18, s7
 ; CHECK-NEXT:    vmov.f32 s22, s6
-; CHECK-NEXT:    vmov.f32 s4, s14
-; CHECK-NEXT:    vmov.f32 s6, s8
-; CHECK-NEXT:    vmov.f32 s19, s10
-; CHECK-NEXT:    vmov.f32 s23, s9
+; CHECK-NEXT:    vmov.f32 s4, s10
+; CHECK-NEXT:    vmov.f32 s6, s12
+; CHECK-NEXT:    vmov.f32 s19, s14
+; CHECK-NEXT:    vmov.f32 s23, s13
 ; CHECK-NEXT:    vadd.i32 q4, q5, q4
-; CHECK-NEXT:    vmov.f32 s7, s11
+; CHECK-NEXT:    vmov.f32 s7, s15
 ; CHECK-NEXT:    vadd.i32 q1, q4, q1
 ; CHECK-NEXT:    vstrw.32 q1, [r1]
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11}
@@ -126,70 +126,70 @@ define void @vld3_v16i32(<48 x i32> *%src, <16 x i32> *%dst) {
 ; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
 ; CHECK-NEXT:    vldrw.u32 q1, [r0, #48]
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, #64]
-; CHECK-NEXT:    vldrw.u32 q2, [r0, #80]
-; CHECK-NEXT:    vmov.f64 d6, d2
-; CHECK-NEXT:    vmov.f32 s16, s5
-; CHECK-NEXT:    vmov.f32 s13, s7
-; CHECK-NEXT:    vmov.f32 s17, s0
-; CHECK-NEXT:    vmov.f32 s14, s2
-; CHECK-NEXT:    vmov.f32 s18, s3
+; CHECK-NEXT:    vldrw.u32 q4, [r0, #80]
+; CHECK-NEXT:    vldrw.u32 q6, [r0, #176]
+; CHECK-NEXT:    vmov.f64 d4, d2
+; CHECK-NEXT:    vmov.f32 s12, s5
+; CHECK-NEXT:    vmov.f32 s9, s7
+; CHECK-NEXT:    vmov.f32 s13, s0
+; CHECK-NEXT:    vmov.f32 s10, s2
+; CHECK-NEXT:    vmov.f32 s14, s3
 ; CHECK-NEXT:    vmov.f32 s0, s6
 ; CHECK-NEXT:    vldrw.u32 q1, [r0, #16]
-; CHECK-NEXT:    vmov.f32 s2, s8
-; CHECK-NEXT:    vmov.f32 s19, s10
-; CHECK-NEXT:    vmov.f32 s15, s9
-; CHECK-NEXT:    vmov.f32 s3, s11
-; CHECK-NEXT:    vadd.i32 q3, q3, q4
-; CHECK-NEXT:    vadd.i32 q0, q3, q0
-; CHECK-NEXT:    vldrw.u32 q3, [r0]
-; CHECK-NEXT:    vldrw.u32 q2, [r0, #32]
-; CHECK-NEXT:    vmov.f32 s16, s13
-; CHECK-NEXT:    vmov.f64 d10, d6
+; CHECK-NEXT:    vmov.f32 s2, s16
+; CHECK-NEXT:    vmov.f32 s15, s18
+; CHECK-NEXT:    vmov.f32 s11, s17
+; CHECK-NEXT:    vadd.i32 q2, q2, q3
+; CHECK-NEXT:    vmov.f32 s3, s19
+; CHECK-NEXT:    vadd.i32 q0, q2, q0
+; CHECK-NEXT:    vldrw.u32 q2, [r0]
+; CHECK-NEXT:    vldrw.u32 q3, [r0, #32]
+; CHECK-NEXT:    vmov.f32 s16, s9
+; CHECK-NEXT:    vmov.f64 d10, d4
 ; CHECK-NEXT:    vmov.f32 s17, s4
-; CHECK-NEXT:    vmov.f32 s21, s15
+; CHECK-NEXT:    vmov.f32 s21, s11
 ; CHECK-NEXT:    vmov.f32 s18, s7
 ; CHECK-NEXT:    vmov.f32 s22, s6
-; CHECK-NEXT:    vmov.f32 s4, s14
-; CHECK-NEXT:    vldrw.u32 q3, [r0, #144]
-; CHECK-NEXT:    vmov.f32 s6, s8
-; CHECK-NEXT:    vmov.f32 s19, s10
-; CHECK-NEXT:    vmov.f32 s23, s9
-; CHECK-NEXT:    vmov.f32 s7, s11
+; CHECK-NEXT:    vmov.f32 s4, s10
 ; CHECK-NEXT:    vldrw.u32 q2, [r0, #160]
+; CHECK-NEXT:    vmov.f32 s6, s12
+; CHECK-NEXT:    vmov.f32 s19, s14
+; CHECK-NEXT:    vmov.f32 s23, s13
+; CHECK-NEXT:    vmov.f32 s7, s15
+; CHECK-NEXT:    vldrw.u32 q3, [r0, #144]
 ; CHECK-NEXT:    vadd.i32 q4, q5, q4
-; CHECK-NEXT:    vmov.f64 d10, d6
+; CHECK-NEXT:    vmov.f32 s20, s13
 ; CHECK-NEXT:    vadd.i32 q1, q4, q1
-; CHECK-NEXT:    vldrw.u32 q4, [r0, #176]
-; CHECK-NEXT:    vmov.f32 s24, s13
-; CHECK-NEXT:    vmov.f32 s21, s15
-; CHECK-NEXT:    vmov.f32 s25, s8
-; CHECK-NEXT:    vmov.f32 s22, s10
-; CHECK-NEXT:    vmov.f32 s26, s11
+; CHECK-NEXT:    vmov.f64 d8, d6
+; CHECK-NEXT:    vmov.f32 s17, s15
+; CHECK-NEXT:    vmov.f32 s21, s8
+; CHECK-NEXT:    vmov.f32 s18, s10
+; CHECK-NEXT:    vmov.f32 s22, s11
 ; CHECK-NEXT:    vmov.f32 s8, s14
 ; CHECK-NEXT:    vldrw.u32 q3, [r0, #112]
-; CHECK-NEXT:    vmov.f32 s10, s16
-; CHECK-NEXT:    vmov.f32 s27, s18
-; CHECK-NEXT:    vmov.f32 s23, s17
-; CHECK-NEXT:    vmov.f32 s11, s19
-; CHECK-NEXT:    vadd.i32 q5, q5, q6
-; CHECK-NEXT:    vadd.i32 q2, q5, q2
-; CHECK-NEXT:    vldrw.u32 q5, [r0, #96]
-; CHECK-NEXT:    vldrw.u32 q4, [r0, #128]
+; CHECK-NEXT:    vmov.f32 s10, s24
+; CHECK-NEXT:    vmov.f32 s23, s26
+; CHECK-NEXT:    vmov.f32 s19, s25
+; CHECK-NEXT:    vadd.i32 q4, q4, q5
+; CHECK-NEXT:    vmov.f32 s11, s27
+; CHECK-NEXT:    vadd.i32 q2, q4, q2
+; CHECK-NEXT:    vldrw.u32 q4, [r0, #96]
+; CHECK-NEXT:    vldrw.u32 q5, [r0, #128]
 ; CHECK-NEXT:    vstrw.32 q2, [r1, #48]
-; CHECK-NEXT:    vmov.f32 s24, s21
+; CHECK-NEXT:    vmov.f32 s24, s17
 ; CHECK-NEXT:    vstrw.32 q0, [r1, #16]
-; CHECK-NEXT:    vmov.f64 d14, d10
+; CHECK-NEXT:    vmov.f64 d14, d8
 ; CHECK-NEXT:    vstrw.32 q1, [r1]
 ; CHECK-NEXT:    vmov.f32 s25, s12
-; CHECK-NEXT:    vmov.f32 s29, s23
+; CHECK-NEXT:    vmov.f32 s29, s19
 ; CHECK-NEXT:    vmov.f32 s26, s15
 ; CHECK-NEXT:    vmov.f32 s30, s14
-; CHECK-NEXT:    vmov.f32 s12, s22
-; CHECK-NEXT:    vmov.f32 s14, s16
-; CHECK-NEXT:    vmov.f32 s27, s18
-; CHECK-NEXT:    vmov.f32 s31, s17
+; CHECK-NEXT:    vmov.f32 s12, s18
+; CHECK-NEXT:    vmov.f32 s14, s20
+; CHECK-NEXT:    vmov.f32 s27, s22
+; CHECK-NEXT:    vmov.f32 s31, s21
 ; CHECK-NEXT:    vadd.i32 q6, q7, q6
-; CHECK-NEXT:    vmov.f32 s15, s19
+; CHECK-NEXT:    vmov.f32 s15, s23
 ; CHECK-NEXT:    vadd.i32 q3, q6, q3
 ; CHECK-NEXT:    vstrw.32 q3, [r1, #32]
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
@@ -358,53 +358,53 @@ define void @vld3_v16i16(<48 x i16> *%src, <16 x i16> *%dst) {
 ; CHECK-NEXT:    .pad #16
 ; CHECK-NEXT:    sub sp, #16
 ; CHECK-NEXT:    vldrw.u32 q1, [r0, #48]
-; CHECK-NEXT:    vmovx.f16 s8, s6
-; CHECK-NEXT:    vmov.f32 s0, s5
+; CHECK-NEXT:    vmov.f64 d0, d2
+; CHECK-NEXT:    vmovx.f16 s8, s5
 ; CHECK-NEXT:    vins.f16 s0, s8
 ; CHECK-NEXT:    vldrw.u32 q2, [r0, #64]
-; CHECK-NEXT:    vmovx.f16 s12, s9
-; CHECK-NEXT:    vmov.f32 s1, s8
+; CHECK-NEXT:    vmov.f32 s1, s7
+; CHECK-NEXT:    vmovx.f16 s12, s8
+; CHECK-NEXT:    vmovx.f16 s16, s9
 ; CHECK-NEXT:    vins.f16 s1, s12
-; CHECK-NEXT:    vldrw.u32 q3, [r0, #80]
-; CHECK-NEXT:    vmov.f32 s2, s11
+; CHECK-NEXT:    vmovx.f16 s12, s11
+; CHECK-NEXT:    vmov.f32 s2, s10
 ; CHECK-NEXT:    vmov.u16 r2, q2[5]
-; CHECK-NEXT:    vmovx.f16 s20, s15
-; CHECK-NEXT:    vmov.f32 s19, s14
-; CHECK-NEXT:    vins.f16 s19, s20
-; CHECK-NEXT:    vmov.f32 s18, s12
-; CHECK-NEXT:    vmov q5, q4
-; CHECK-NEXT:    vmovnb.i32 q5, q0
-; CHECK-NEXT:    vmov.f32 s2, s22
-; CHECK-NEXT:    vmovx.f16 s20, s5
-; CHECK-NEXT:    vmov.f32 s3, s19
-; CHECK-NEXT:    vmov.f64 d8, d2
-; CHECK-NEXT:    vins.f16 s16, s20
-; CHECK-NEXT:    vmovx.f16 s20, s8
-; CHECK-NEXT:    vmov.f32 s17, s7
-; CHECK-NEXT:    vins.f16 s17, s20
-; CHECK-NEXT:    vmovx.f16 s20, s11
-; CHECK-NEXT:    vmov.f32 s18, s10
+; CHECK-NEXT:    vins.f16 s2, s12
+; CHECK-NEXT:    vmovx.f16 s12, s6
+; CHECK-NEXT:    vins.f16 s5, s12
+; CHECK-NEXT:    vmov.f32 s13, s8
+; CHECK-NEXT:    vins.f16 s13, s16
+; CHECK-NEXT:    vldrw.u32 q4, [r0, #80]
+; CHECK-NEXT:    vmov.f32 s12, s5
+; CHECK-NEXT:    vmovx.f16 s20, s18
+; CHECK-NEXT:    vmov.f32 s3, s17
+; CHECK-NEXT:    vins.f16 s3, s20
+; CHECK-NEXT:    vmovx.f16 s20, s19
 ; CHECK-NEXT:    vins.f16 s18, s20
-; CHECK-NEXT:    vmovx.f16 s20, s14
-; CHECK-NEXT:    vmov.f32 s19, s13
-; CHECK-NEXT:    vins.f16 s19, s20
+; CHECK-NEXT:    vmov.f32 s14, s11
+; CHECK-NEXT:    vmov.f32 s23, s18
+; CHECK-NEXT:    vmov.f32 s22, s16
+; CHECK-NEXT:    vmov q6, q5
+; CHECK-NEXT:    vmovnb.i32 q6, q3
+; CHECK-NEXT:    vmov.f32 s14, s26
+; CHECK-NEXT:    vmov.f32 s15, s23
 ; CHECK-NEXT:    vmovx.f16 s20, s4
 ; CHECK-NEXT:    vins.f16 s20, s6
 ; CHECK-NEXT:    vmovx.f16 s21, s7
-; CHECK-NEXT:    vins.f16 s6, s12
-; CHECK-NEXT:    vmovx.f16 s7, s13
+; CHECK-NEXT:    vins.f16 s6, s16
+; CHECK-NEXT:    vmovx.f16 s7, s17
 ; CHECK-NEXT:    vins.f16 s21, s9
-; CHECK-NEXT:    vins.f16 s7, s15
+; CHECK-NEXT:    vins.f16 s7, s19
 ; CHECK-NEXT:    vmov.16 q5[4], r2
 ; CHECK-NEXT:    vmov q2, q1
 ; CHECK-NEXT:    vmovnb.i32 q2, q5
 ; CHECK-NEXT:    vmov.f32 s22, s10
 ; CHECK-NEXT:    vldrw.u32 q2, [r0]
 ; CHECK-NEXT:    vmov.f32 s23, s7
-; CHECK-NEXT:    vadd.i16 q1, q4, q5
-; CHECK-NEXT:    vmovx.f16 s12, s10
-; CHECK-NEXT:    vadd.i16 q0, q1, q0
+; CHECK-NEXT:    vadd.i16 q0, q0, q5
 ; CHECK-NEXT:    vmov.f32 s4, s9
+; CHECK-NEXT:    vadd.i16 q0, q0, q3
+; CHECK-NEXT:    vmovx.f16 s12, s10
 ; CHECK-NEXT:    vins.f16 s4, s12
 ; CHECK-NEXT:    vldrw.u32 q3, [r0, #16]
 ; CHECK-NEXT:    vstrw.32 q0, [sp] @ 16-byte Spill
@@ -900,20 +900,20 @@ define void @vld3_v4f32(<12 x float> *%src, <4 x float> *%dst) {
 ; CHECK-NEXT:    vpush {d8, d9}
 ; CHECK-NEXT:    vldrw.u32 q1, [r0]
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, #16]
-; CHECK-NEXT:    vldrw.u32 q2, [r0, #32]
-; CHECK-NEXT:    vmov.f64 d6, d2
-; CHECK-NEXT:    vmov.f32 s16, s5
-; CHECK-NEXT:    vmov.f32 s13, s7
-; CHECK-NEXT:    vmov.f32 s17, s0
-; CHECK-NEXT:    vmov.f32 s14, s2
-; CHECK-NEXT:    vmov.f32 s18, s3
+; CHECK-NEXT:    vldrw.u32 q4, [r0, #32]
+; CHECK-NEXT:    vmov.f64 d4, d2
+; CHECK-NEXT:    vmov.f32 s12, s5
+; CHECK-NEXT:    vmov.f32 s9, s7
+; CHECK-NEXT:    vmov.f32 s13, s0
+; CHECK-NEXT:    vmov.f32 s10, s2
+; CHECK-NEXT:    vmov.f32 s14, s3
 ; CHECK-NEXT:    vmov.f32 s0, s6
-; CHECK-NEXT:    vmov.f32 s2, s8
-; CHECK-NEXT:    vmov.f32 s19, s10
-; CHECK-NEXT:    vmov.f32 s15, s9
-; CHECK-NEXT:    vadd.f32 q3, q3, q4
-; CHECK-NEXT:    vmov.f32 s3, s11
-; CHECK-NEXT:    vadd.f32 q0, q3, q0
+; CHECK-NEXT:    vmov.f32 s2, s16
+; CHECK-NEXT:    vmov.f32 s15, s18
+; CHECK-NEXT:    vmov.f32 s11, s17
+; CHECK-NEXT:    vadd.f32 q2, q2, q3
+; CHECK-NEXT:    vmov.f32 s3, s19
+; CHECK-NEXT:    vadd.f32 q0, q2, q0
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    vpop {d8, d9}
 ; CHECK-NEXT:    bx lr
@@ -935,36 +935,36 @@ define void @vld3_v8f32(<24 x float> *%src, <8 x float> *%dst) {
 ; CHECK-NEXT:    vpush {d8, d9, d10, d11}
 ; CHECK-NEXT:    vldrw.u32 q1, [r0, #48]
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, #64]
-; CHECK-NEXT:    vldrw.u32 q2, [r0, #80]
-; CHECK-NEXT:    vmov.f64 d6, d2
-; CHECK-NEXT:    vmov.f32 s16, s5
-; CHECK-NEXT:    vmov.f32 s13, s7
-; CHECK-NEXT:    vmov.f32 s17, s0
-; CHECK-NEXT:    vmov.f32 s14, s2
-; CHECK-NEXT:    vmov.f32 s18, s3
+; CHECK-NEXT:    vldrw.u32 q4, [r0, #80]
+; CHECK-NEXT:    vmov.f64 d4, d2
+; CHECK-NEXT:    vmov.f32 s12, s5
+; CHECK-NEXT:    vmov.f32 s9, s7
+; CHECK-NEXT:    vmov.f32 s13, s0
+; CHECK-NEXT:    vmov.f32 s10, s2
+; CHECK-NEXT:    vmov.f32 s14, s3
 ; CHECK-NEXT:    vmov.f32 s0, s6
 ; CHECK-NEXT:    vldrw.u32 q1, [r0, #16]
-; CHECK-NEXT:    vmov.f32 s2, s8
-; CHECK-NEXT:    vmov.f32 s19, s10
-; CHECK-NEXT:    vmov.f32 s15, s9
-; CHECK-NEXT:    vmov.f32 s3, s11
-; CHECK-NEXT:    vadd.f32 q3, q3, q4
-; CHECK-NEXT:    vadd.f32 q0, q3, q0
-; CHECK-NEXT:    vldrw.u32 q3, [r0]
-; CHECK-NEXT:    vldrw.u32 q2, [r0, #32]
+; CHECK-NEXT:    vmov.f32 s2, s16
+; CHECK-NEXT:    vmov.f32 s15, s18
+; CHECK-NEXT:    vmov.f32 s11, s17
+; CHECK-NEXT:    vadd.f32 q2, q2, q3
+; CHECK-NEXT:    vmov.f32 s3, s19
+; CHECK-NEXT:    vadd.f32 q0, q2, q0
+; CHECK-NEXT:    vldrw.u32 q2, [r0]
+; CHECK-NEXT:    vldrw.u32 q3, [r0, #32]
 ; CHECK-NEXT:    vstrw.32 q0, [r1, #16]
-; CHECK-NEXT:    vmov.f32 s16, s13
-; CHECK-NEXT:    vmov.f64 d10, d6
+; CHECK-NEXT:    vmov.f32 s16, s9
+; CHECK-NEXT:    vmov.f64 d10, d4
 ; CHECK-NEXT:    vmov.f32 s17, s4
-; CHECK-NEXT:    vmov.f32 s21, s15
+; CHECK-NEXT:    vmov.f32 s21, s11
 ; CHECK-NEXT:    vmov.f32 s18, s7
 ; CHECK-NEXT:    vmov.f32 s22, s6
-; CHECK-NEXT:    vmov.f32 s4, s14
-; CHECK-NEXT:    vmov.f32 s6, s8
-; CHECK-NEXT:    vmov.f32 s19, s10
-; CHECK-NEXT:    vmov.f32 s23, s9
+; CHECK-NEXT:    vmov.f32 s4, s10
+; CHECK-NEXT:    vmov.f32 s6, s12
+; CHECK-NEXT:    vmov.f32 s19, s14
+; CHECK-NEXT:    vmov.f32 s23, s13
 ; CHECK-NEXT:    vadd.f32 q4, q5, q4
-; CHECK-NEXT:    vmov.f32 s7, s11
+; CHECK-NEXT:    vmov.f32 s7, s15
 ; CHECK-NEXT:    vadd.f32 q1, q4, q1
 ; CHECK-NEXT:    vstrw.32 q1, [r1]
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11}
@@ -987,70 +987,70 @@ define void @vld3_v16f32(<48 x float> *%src, <16 x float> *%dst) {
 ; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
 ; CHECK-NEXT:    vldrw.u32 q1, [r0, #48]
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, #64]
-; CHECK-NEXT:    vldrw.u32 q2, [r0, #80]
-; CHECK-NEXT:    vmov.f64 d6, d2
-; CHECK-NEXT:    vmov.f32 s16, s5
-; CHECK-NEXT:    vmov.f32 s13, s7
-; CHECK-NEXT:    vmov.f32 s17, s0
-; CHECK-NEXT:    vmov.f32 s14, s2
-; CHECK-NEXT:    vmov.f32 s18, s3
+; CHECK-NEXT:    vldrw.u32 q4, [r0, #80]
+; CHECK-NEXT:    vldrw.u32 q6, [r0, #176]
+; CHECK-NEXT:    vmov.f64 d4, d2
+; CHECK-NEXT:    vmov.f32 s12, s5
+; CHECK-NEXT:    vmov.f32 s9, s7
+; CHECK-NEXT:    vmov.f32 s13, s0
+; CHECK-NEXT:    vmov.f32 s10, s2
+; CHECK-NEXT:    vmov.f32 s14, s3
 ; CHECK-NEXT:    vmov.f32 s0, s6
 ; CHECK-NEXT:    vldrw.u32 q1, [r0, #16]
-; CHECK-NEXT:    vmov.f32 s2, s8
-; CHECK-NEXT:    vmov.f32 s19, s10
-; CHECK-NEXT:    vmov.f32 s15, s9
-; CHECK-NEXT:    vmov.f32 s3, s11
-; CHECK-NEXT:    vadd.f32 q3, q3, q4
-; CHECK-NEXT:    vadd.f32 q0, q3, q0
-; CHECK-NEXT:    vldrw.u32 q3, [r0]
-; CHECK-NEXT:    vldrw.u32 q2, [r0, #32]
-; CHECK-NEXT:    vmov.f32 s16, s13
-; CHECK-NEXT:    vmov.f64 d10, d6
+; CHECK-NEXT:    vmov.f32 s2, s16
+; CHECK-NEXT:    vmov.f32 s15, s18
+; CHECK-NEXT:    vmov.f32 s11, s17
+; CHECK-NEXT:    vadd.f32 q2, q2, q3
+; CHECK-NEXT:    vmov.f32 s3, s19
+; CHECK-NEXT:    vadd.f32 q0, q2, q0
+; CHECK-NEXT:    vldrw.u32 q2, [r0]
+; CHECK-NEXT:    vldrw.u32 q3, [r0, #32]
+; CHECK-NEXT:    vmov.f32 s16, s9
+; CHECK-NEXT:    vmov.f64 d10, d4
 ; CHECK-NEXT:    vmov.f32 s17, s4
-; CHECK-NEXT:    vmov.f32 s21, s15
+; CHECK-NEXT:    vmov.f32 s21, s11
 ; CHECK-NEXT:    vmov.f32 s18, s7
 ; CHECK-NEXT:    vmov.f32 s22, s6
-; CHECK-NEXT:    vmov.f32 s4, s14
-; CHECK-NEXT:    vldrw.u32 q3, [r0, #144]
-; CHECK-NEXT:    vmov.f32 s6, s8
-; CHECK-NEXT:    vmov.f32 s19, s10
-; CHECK-NEXT:    vmov.f32 s23, s9
-; CHECK-NEXT:    vmov.f32 s7, s11
+; CHECK-NEXT:    vmov.f32 s4, s10
 ; CHECK-NEXT:    vldrw.u32 q2, [r0, #160]
+; CHECK-NEXT:    vmov.f32 s6, s12
+; CHECK-NEXT:    vmov.f32 s19, s14
+; CHECK-NEXT:    vmov.f32 s23, s13
+; CHECK-NEXT:    vmov.f32 s7, s15
+; CHECK-NEXT:    vldrw.u32 q3, [r0, #144]
 ; CHECK-NEXT:    vadd.f32 q4, q5, q4
-; CHECK-NEXT:    vmov.f64 d10, d6
+; CHECK-NEXT:    vmov.f32 s20, s13
 ; CHECK-NEXT:    vadd.f32 q1, q4, q1
-; CHECK-NEXT:    vldrw.u32 q4, [r0, #176]
-; CHECK-NEXT:    vmov.f32 s24, s13
-; CHECK-NEXT:    vmov.f32 s21, s15
-; CHECK-NEXT:    vmov.f32 s25, s8
-; CHECK-NEXT:    vmov.f32 s22, s10
-; CHECK-NEXT:    vmov.f32 s26, s11
+; CHECK-NEXT:    vmov.f64 d8, d6
+; CHECK-NEXT:    vmov.f32 s17, s15
+; CHECK-NEXT:    vmov.f32 s21, s8
+; CHECK-NEXT:    vmov.f32 s18, s10
+; CHECK-NEXT:    vmov.f32 s22, s11
 ; CHECK-NEXT:    vmov.f32 s8, s14
 ; CHECK-NEXT:    vldrw.u32 q3, [r0, #112]
-; CHECK-NEXT:    vmov.f32 s10, s16
-; CHECK-NEXT:    vmov.f32 s27, s18
-; CHECK-NEXT:    vmov.f32 s23, s17
-; CHECK-NEXT:    vmov.f32 s11, s19
-; CHECK-NEXT:    vadd.f32 q5, q5, q6
-; CHECK-NEXT:    vadd.f32 q2, q5, q2
-; CHECK-NEXT:    vldrw.u32 q5, [r0, #96]
-; CHECK-NEXT:    vldrw.u32 q4, [r0, #128]
+; CHECK-NEXT:    vmov.f32 s10, s24
+; CHECK-NEXT:    vmov.f32 s23, s26
+; CHECK-NEXT:    vmov.f32 s19, s25
+; CHECK-NEXT:    vadd.f32 q4, q4, q5
+; CHECK-NEXT:    vmov.f32 s11, s27
+; CHECK-NEXT:    vadd.f32 q2, q4, q2
+; CHECK-NEXT:    vldrw.u32 q4, [r0, #96]
+; CHECK-NEXT:    vldrw.u32 q5, [r0, #128]
 ; CHECK-NEXT:    vstrw.32 q2, [r1, #48]
-; CHECK-NEXT:    vmov.f32 s24, s21
+; CHECK-NEXT:    vmov.f32 s24, s17
 ; CHECK-NEXT:    vstrw.32 q0, [r1, #16]
-; CHECK-NEXT:    vmov.f64 d14, d10
+; CHECK-NEXT:    vmov.f64 d14, d8
 ; CHECK-NEXT:    vstrw.32 q1, [r1]
 ; CHECK-NEXT:    vmov.f32 s25, s12
-; CHECK-NEXT:    vmov.f32 s29, s23
+; CHECK-NEXT:    vmov.f32 s29, s19
 ; CHECK-NEXT:    vmov.f32 s26, s15
 ; CHECK-NEXT:    vmov.f32 s30, s14
-; CHECK-NEXT:    vmov.f32 s12, s22
-; CHECK-NEXT:    vmov.f32 s14, s16
-; CHECK-NEXT:    vmov.f32 s27, s18
-; CHECK-NEXT:    vmov.f32 s31, s17
+; CHECK-NEXT:    vmov.f32 s12, s18
+; CHECK-NEXT:    vmov.f32 s14, s20
+; CHECK-NEXT:    vmov.f32 s27, s22
+; CHECK-NEXT:    vmov.f32 s31, s21
 ; CHECK-NEXT:    vadd.f32 q6, q7, q6
-; CHECK-NEXT:    vmov.f32 s15, s19
+; CHECK-NEXT:    vmov.f32 s15, s23
 ; CHECK-NEXT:    vadd.f32 q3, q6, q3
 ; CHECK-NEXT:    vstrw.32 q3, [r1, #32]
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
@@ -1147,41 +1147,41 @@ define void @vld3_v8f16(<24 x half> *%src, <8 x half> *%dst) {
 ; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
 ; CHECK-NEXT:    vldrw.u32 q0, [r0]
 ; CHECK-NEXT:    vldrw.u32 q4, [r0, #16]
+; CHECK-NEXT:    vldrw.u32 q3, [r0, #32]
 ; CHECK-NEXT:    vmovx.f16 s8, s2
 ; CHECK-NEXT:    vmov.f32 s4, s1
 ; CHECK-NEXT:    vins.f16 s4, s8
 ; CHECK-NEXT:    vmovx.f16 s8, s17
 ; CHECK-NEXT:    vmov.f32 s5, s16
-; CHECK-NEXT:    vmovx.f16 s24, s1
+; CHECK-NEXT:    vmovx.f16 s20, s15
 ; CHECK-NEXT:    vins.f16 s5, s8
-; CHECK-NEXT:    vldrw.u32 q2, [r0, #32]
+; CHECK-NEXT:    vmov.f32 s11, s14
+; CHECK-NEXT:    vins.f16 s11, s20
 ; CHECK-NEXT:    vmov.f32 s6, s19
-; CHECK-NEXT:    vmovx.f16 s26, s16
-; CHECK-NEXT:    vmovx.f16 s20, s11
-; CHECK-NEXT:    vmov.f32 s15, s10
-; CHECK-NEXT:    vins.f16 s15, s20
-; CHECK-NEXT:    vmovx.f16 s20, s8
+; CHECK-NEXT:    vmovx.f16 s20, s12
+; CHECK-NEXT:    vmov.f32 s28, s18
 ; CHECK-NEXT:    vins.f16 s6, s20
 ; CHECK-NEXT:    vmovx.f16 s20, s19
-; CHECK-NEXT:    vmov.f32 s28, s18
-; CHECK-NEXT:    vmovx.f16 s30, s10
 ; CHECK-NEXT:    vins.f16 s28, s20
+; CHECK-NEXT:    vmovx.f16 s24, s1
 ; CHECK-NEXT:    vmovx.f16 s20, s0
 ; CHECK-NEXT:    vins.f16 s0, s24
 ; CHECK-NEXT:    vins.f16 s20, s2
+; CHECK-NEXT:    vmovx.f16 s26, s16
 ; CHECK-NEXT:    vmovx.f16 s21, s3
 ; CHECK-NEXT:    vins.f16 s3, s26
 ; CHECK-NEXT:    vins.f16 s21, s17
-; CHECK-NEXT:    vmov.f32 s14, s8
-; CHECK-NEXT:    vmovx.f16 s23, s9
+; CHECK-NEXT:    vmovx.f16 s30, s14
+; CHECK-NEXT:    vmovx.f16 s23, s13
+; CHECK-NEXT:    vmov.f32 s10, s12
 ; CHECK-NEXT:    vmov.f32 s1, s3
-; CHECK-NEXT:    vins.f16 s9, s30
-; CHECK-NEXT:    vins.f16 s23, s11
-; CHECK-NEXT:    vmovx.f16 s22, s18
+; CHECK-NEXT:    vins.f16 s13, s30
+; CHECK-NEXT:    vins.f16 s23, s15
 ; CHECK-NEXT:    vmov.f32 s2, s28
-; CHECK-NEXT:    vins.f16 s22, s8
-; CHECK-NEXT:    vmov.f32 s3, s9
-; CHECK-NEXT:    vmov.f32 s7, s15
+; CHECK-NEXT:    vmovx.f16 s22, s18
+; CHECK-NEXT:    vmov.f32 s3, s13
+; CHECK-NEXT:    vins.f16 s22, s12
+; CHECK-NEXT:    vmov.f32 s7, s11
 ; CHECK-NEXT:    vadd.f16 q0, q0, q5
 ; CHECK-NEXT:    vadd.f16 q0, q0, q1
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
@@ -1204,24 +1204,24 @@ define void @vld3_v16f16(<48 x half> *%src, <16 x half> *%dst) {
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
 ; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, #48]
-; CHECK-NEXT:    vldrw.u32 q4, [r0, #64]
+; CHECK-NEXT:    vldrw.u32 q3, [r0, #64]
 ; CHECK-NEXT:    vmovx.f16 s8, s2
 ; CHECK-NEXT:    vmov.f32 s4, s1
 ; CHECK-NEXT:    vins.f16 s4, s8
-; CHECK-NEXT:    vmovx.f16 s8, s17
-; CHECK-NEXT:    vmov.f32 s5, s16
+; CHECK-NEXT:    vmovx.f16 s8, s13
+; CHECK-NEXT:    vmov.f32 s5, s12
 ; CHECK-NEXT:    vmovx.f16 s24, s1
 ; CHECK-NEXT:    vins.f16 s5, s8
 ; CHECK-NEXT:    vldrw.u32 q2, [r0, #80]
-; CHECK-NEXT:    vmov.f32 s6, s19
-; CHECK-NEXT:    vmovx.f16 s26, s16
+; CHECK-NEXT:    vmov.f32 s6, s15
+; CHECK-NEXT:    vmovx.f16 s26, s12
 ; CHECK-NEXT:    vmovx.f16 s20, s11
-; CHECK-NEXT:    vmov.f32 s15, s10
-; CHECK-NEXT:    vins.f16 s15, s20
+; CHECK-NEXT:    vmov.f32 s19, s10
+; CHECK-NEXT:    vins.f16 s19, s20
 ; CHECK-NEXT:    vmovx.f16 s20, s8
 ; CHECK-NEXT:    vins.f16 s6, s20
-; CHECK-NEXT:    vmovx.f16 s20, s19
-; CHECK-NEXT:    vmov.f32 s28, s18
+; CHECK-NEXT:    vmovx.f16 s20, s15
+; CHECK-NEXT:    vmov.f32 s28, s14
 ; CHECK-NEXT:    vmovx.f16 s30, s10
 ; CHECK-NEXT:    vins.f16 s28, s20
 ; CHECK-NEXT:    vmovx.f16 s20, s0
@@ -1229,17 +1229,17 @@ define void @vld3_v16f16(<48 x half> *%src, <16 x half> *%dst) {
 ; CHECK-NEXT:    vins.f16 s20, s2
 ; CHECK-NEXT:    vmovx.f16 s21, s3
 ; CHECK-NEXT:    vins.f16 s3, s26
-; CHECK-NEXT:    vins.f16 s21, s17
-; CHECK-NEXT:    vmov.f32 s14, s8
+; CHECK-NEXT:    vins.f16 s21, s13
+; CHECK-NEXT:    vmov.f32 s18, s8
 ; CHECK-NEXT:    vmovx.f16 s23, s9
 ; CHECK-NEXT:    vmov.f32 s1, s3
 ; CHECK-NEXT:    vins.f16 s9, s30
 ; CHECK-NEXT:    vins.f16 s23, s11
-; CHECK-NEXT:    vmovx.f16 s22, s18
+; CHECK-NEXT:    vmovx.f16 s22, s14
 ; CHECK-NEXT:    vmov.f32 s2, s28
 ; CHECK-NEXT:    vins.f16 s22, s8
 ; CHECK-NEXT:    vmov.f32 s3, s9
-; CHECK-NEXT:    vmov.f32 s7, s15
+; CHECK-NEXT:    vmov.f32 s7, s19
 ; CHECK-NEXT:    vadd.f16 q0, q0, q5
 ; CHECK-NEXT:    vadd.f16 q1, q0, q1
 ; CHECK-NEXT:    vldrw.u32 q0, [r0]

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vld4.ll b/llvm/test/CodeGen/Thumb2/mve-vld4.ll
index 92a25743a6b82..c1b984761dcdf 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vld4.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vld4.ll
@@ -193,28 +193,27 @@ define void @vld4_v4i32_align1(<16 x i32> *%src, <4 x i32> *%dst) {
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
 ; CHECK-NEXT:    vpush {d8, d9, d10, d11}
-; CHECK-NEXT:    vldrb.u8 q0, [r0]
-; CHECK-NEXT:    vldrb.u8 q3, [r0, #32]
-; CHECK-NEXT:    vldrb.u8 q1, [r0, #48]
-; CHECK-NEXT:    vldrb.u8 q2, [r0, #16]
-; CHECK-NEXT:    vmov.f32 s18, s15
-; CHECK-NEXT:    vmov.f64 d10, d1
-; CHECK-NEXT:    vmov.f32 s19, s7
-; CHECK-NEXT:    vmov.f32 s21, s10
-; CHECK-NEXT:    vmov.f32 s16, s3
-; CHECK-NEXT:    vmov.f32 s15, s6
-; CHECK-NEXT:    vmov.f32 s22, s14
-; CHECK-NEXT:    vmov.f32 s17, s11
-; CHECK-NEXT:    vmov.f32 s23, s6
+; CHECK-NEXT:    vldrb.u8 q2, [r0]
+; CHECK-NEXT:    vldrb.u8 q3, [r0, #16]
+; CHECK-NEXT:    vldrb.u8 q1, [r0, #32]
+; CHECK-NEXT:    vldrb.u8 q0, [r0, #48]
+; CHECK-NEXT:    vmov.f32 s16, s11
+; CHECK-NEXT:    vmov.f64 d10, d5
+; CHECK-NEXT:    vmov.f32 s17, s15
+; CHECK-NEXT:    vmov.f32 s21, s14
+; CHECK-NEXT:    vmov.f32 s18, s7
+; CHECK-NEXT:    vmov.f32 s22, s6
+; CHECK-NEXT:    vmov.f32 s19, s3
+; CHECK-NEXT:    vmov.f32 s23, s2
 ; CHECK-NEXT:    vadd.i32 q4, q5, q4
-; CHECK-NEXT:    vmov.f32 s22, s13
-; CHECK-NEXT:    vmov.f32 s23, s5
-; CHECK-NEXT:    vmov.f32 s20, s1
-; CHECK-NEXT:    vmov.f32 s2, s12
-; CHECK-NEXT:    vmov.f32 s3, s4
-; CHECK-NEXT:    vmov.f32 s21, s9
-; CHECK-NEXT:    vmov.f32 s1, s8
-; CHECK-NEXT:    vadd.i32 q0, q0, q5
+; CHECK-NEXT:    vmov.f32 s20, s9
+; CHECK-NEXT:    vmov.f32 s21, s13
+; CHECK-NEXT:    vmov.f32 s9, s12
+; CHECK-NEXT:    vmov.f32 s22, s5
+; CHECK-NEXT:    vmov.f32 s10, s4
+; CHECK-NEXT:    vmov.f32 s23, s1
+; CHECK-NEXT:    vmov.f32 s11, s0
+; CHECK-NEXT:    vadd.i32 q0, q2, q5
 ; CHECK-NEXT:    vadd.i32 q0, q0, q4
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11}
@@ -992,28 +991,27 @@ define void @vld4_v4f32_align1(<16 x float> *%src, <4 x float> *%dst) {
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
 ; CHECK-NEXT:    vpush {d8, d9, d10, d11}
-; CHECK-NEXT:    vldrb.u8 q0, [r0]
-; CHECK-NEXT:    vldrb.u8 q3, [r0, #32]
-; CHECK-NEXT:    vldrb.u8 q1, [r0, #48]
-; CHECK-NEXT:    vldrb.u8 q2, [r0, #16]
-; CHECK-NEXT:    vmov.f32 s18, s15
-; CHECK-NEXT:    vmov.f64 d10, d1
-; CHECK-NEXT:    vmov.f32 s19, s7
-; CHECK-NEXT:    vmov.f32 s21, s10
-; CHECK-NEXT:    vmov.f32 s16, s3
-; CHECK-NEXT:    vmov.f32 s15, s6
-; CHECK-NEXT:    vmov.f32 s22, s14
-; CHECK-NEXT:    vmov.f32 s17, s11
-; CHECK-NEXT:    vmov.f32 s23, s6
+; CHECK-NEXT:    vldrb.u8 q2, [r0]
+; CHECK-NEXT:    vldrb.u8 q3, [r0, #16]
+; CHECK-NEXT:    vldrb.u8 q1, [r0, #32]
+; CHECK-NEXT:    vldrb.u8 q0, [r0, #48]
+; CHECK-NEXT:    vmov.f32 s16, s11
+; CHECK-NEXT:    vmov.f64 d10, d5
+; CHECK-NEXT:    vmov.f32 s17, s15
+; CHECK-NEXT:    vmov.f32 s21, s14
+; CHECK-NEXT:    vmov.f32 s18, s7
+; CHECK-NEXT:    vmov.f32 s22, s6
+; CHECK-NEXT:    vmov.f32 s19, s3
+; CHECK-NEXT:    vmov.f32 s23, s2
 ; CHECK-NEXT:    vadd.f32 q4, q5, q4
-; CHECK-NEXT:    vmov.f32 s22, s13
-; CHECK-NEXT:    vmov.f32 s23, s5
-; CHECK-NEXT:    vmov.f32 s20, s1
-; CHECK-NEXT:    vmov.f32 s2, s12
-; CHECK-NEXT:    vmov.f32 s3, s4
-; CHECK-NEXT:    vmov.f32 s21, s9
-; CHECK-NEXT:    vmov.f32 s1, s8
-; CHECK-NEXT:    vadd.f32 q0, q0, q5
+; CHECK-NEXT:    vmov.f32 s20, s9
+; CHECK-NEXT:    vmov.f32 s21, s13
+; CHECK-NEXT:    vmov.f32 s9, s12
+; CHECK-NEXT:    vmov.f32 s22, s5
+; CHECK-NEXT:    vmov.f32 s10, s4
+; CHECK-NEXT:    vmov.f32 s23, s1
+; CHECK-NEXT:    vmov.f32 s11, s0
+; CHECK-NEXT:    vadd.f32 q0, q2, q5
 ; CHECK-NEXT:    vadd.f32 q0, q0, q4
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11}

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vmull-splat.ll b/llvm/test/CodeGen/Thumb2/mve-vmull-splat.ll
index 418c56d7b1c1f..8004aad599b90 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vmull-splat.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vmull-splat.ll
@@ -190,17 +190,15 @@ entry:
 define arm_aapcs_vfpcc <4 x i64> @sext32_0213_0ext(<8 x i32> %src1, i32 %src2) {
 ; CHECK-LABEL: sext32_0213_0ext:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vmov.f32 s4, s0
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    vmov.f32 s16, s1
 ; CHECK-NEXT:    vmov q3[2], q3[0], r0, r0
-; CHECK-NEXT:    vmov.f32 s5, s2
-; CHECK-NEXT:    vmov.f32 s6, s1
-; CHECK-NEXT:    vmov.f32 s7, s3
-; CHECK-NEXT:    vmov.f32 s8, s4
-; CHECK-NEXT:    vmov.f32 s10, s5
-; CHECK-NEXT:    vmullb.s32 q0, q2, q3
-; CHECK-NEXT:    vmov.f32 s8, s6
-; CHECK-NEXT:    vmov.f32 s10, s7
-; CHECK-NEXT:    vmullb.s32 q1, q2, q3
+; CHECK-NEXT:    vmov.f32 s18, s3
+; CHECK-NEXT:    vmullb.s32 q2, q0, q3
+; CHECK-NEXT:    vmullb.s32 q1, q4, q3
+; CHECK-NEXT:    vmov q0, q2
+; CHECK-NEXT:    vpop {d8, d9}
 ; CHECK-NEXT:    bx lr
 entry:
   %shuf1 = shufflevector <8 x i32> %src1, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
@@ -215,17 +213,15 @@ entry:
 define arm_aapcs_vfpcc <4 x i64> @sext32_0ext_0213(<8 x i32> %src1, i32 %src2) {
 ; CHECK-LABEL: sext32_0ext_0213:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vmov.f32 s4, s0
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    vmov.f32 s16, s1
 ; CHECK-NEXT:    vmov q3[2], q3[0], r0, r0
-; CHECK-NEXT:    vmov.f32 s5, s2
-; CHECK-NEXT:    vmov.f32 s6, s1
-; CHECK-NEXT:    vmov.f32 s7, s3
-; CHECK-NEXT:    vmov.f32 s8, s4
-; CHECK-NEXT:    vmov.f32 s10, s5
-; CHECK-NEXT:    vmullb.s32 q0, q3, q2
-; CHECK-NEXT:    vmov.f32 s8, s6
-; CHECK-NEXT:    vmov.f32 s10, s7
-; CHECK-NEXT:    vmullb.s32 q1, q3, q2
+; CHECK-NEXT:    vmov.f32 s18, s3
+; CHECK-NEXT:    vmullb.s32 q2, q3, q0
+; CHECK-NEXT:    vmullb.s32 q1, q3, q4
+; CHECK-NEXT:    vmov q0, q2
+; CHECK-NEXT:    vpop {d8, d9}
 ; CHECK-NEXT:    bx lr
 entry:
   %shuf1 = shufflevector <8 x i32> %src1, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
@@ -242,18 +238,13 @@ define arm_aapcs_vfpcc <4 x i64> @sext32_0213_ext0(<8 x i32> %src1, i32 %src2) {
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r7, lr}
 ; CHECK-NEXT:    push {r4, r5, r7, lr}
-; CHECK-NEXT:    vmov.f32 s4, s0
-; CHECK-NEXT:    vmov.f32 s5, s2
-; CHECK-NEXT:    vmov.f32 s6, s1
-; CHECK-NEXT:    vmov.f32 s7, s3
-; CHECK-NEXT:    vmov.f32 s0, s4
-; CHECK-NEXT:    vmov.f32 s2, s5
-; CHECK-NEXT:    vmov r3, s0
-; CHECK-NEXT:    vmov.f32 s8, s6
+; CHECK-NEXT:    vmov q1, q0
+; CHECK-NEXT:    vmov r1, s6
+; CHECK-NEXT:    vmov r3, s4
+; CHECK-NEXT:    vmov.f32 s8, s5
 ; CHECK-NEXT:    vmov.f32 s10, s7
-; CHECK-NEXT:    vmov r1, s2
-; CHECK-NEXT:    umull r2, r5, r3, r0
 ; CHECK-NEXT:    umull lr, r12, r1, r0
+; CHECK-NEXT:    umull r2, r5, r3, r0
 ; CHECK-NEXT:    vmov q0[2], q0[0], r2, lr
 ; CHECK-NEXT:    asrs r2, r0, #31
 ; CHECK-NEXT:    mla r4, r1, r2, r12
@@ -291,19 +282,14 @@ define arm_aapcs_vfpcc <4 x i64> @sext32_ext0_0213(<8 x i32> %src1, i32 %src2) {
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .save {r4, r5, r7, lr}
 ; CHECK-NEXT:    push {r4, r5, r7, lr}
-; CHECK-NEXT:    vmov.f32 s4, s0
+; CHECK-NEXT:    vmov q1, q0
 ; CHECK-NEXT:    asrs r4, r0, #31
-; CHECK-NEXT:    vmov.f32 s5, s2
-; CHECK-NEXT:    vmov.f32 s6, s1
-; CHECK-NEXT:    vmov.f32 s7, s3
-; CHECK-NEXT:    vmov.f32 s0, s4
-; CHECK-NEXT:    vmov.f32 s2, s5
-; CHECK-NEXT:    vmov r3, s0
-; CHECK-NEXT:    vmov.f32 s8, s6
+; CHECK-NEXT:    vmov r1, s6
+; CHECK-NEXT:    vmov r3, s4
+; CHECK-NEXT:    vmov.f32 s8, s5
 ; CHECK-NEXT:    vmov.f32 s10, s7
-; CHECK-NEXT:    vmov r1, s2
-; CHECK-NEXT:    umull r2, r5, r0, r3
 ; CHECK-NEXT:    umull lr, r12, r0, r1
+; CHECK-NEXT:    umull r2, r5, r0, r3
 ; CHECK-NEXT:    vmov q0[2], q0[0], r2, lr
 ; CHECK-NEXT:    asrs r2, r1, #31
 ; CHECK-NEXT:    mla r2, r0, r2, r12
@@ -488,17 +474,15 @@ entry:
 define arm_aapcs_vfpcc <4 x i64> @zext32_0213_0ext(<8 x i32> %src1, i32 %src2) {
 ; CHECK-LABEL: zext32_0213_0ext:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vmov.f32 s4, s0
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    vmov.f32 s16, s1
 ; CHECK-NEXT:    vmov q3[2], q3[0], r0, r0
-; CHECK-NEXT:    vmov.f32 s5, s2
-; CHECK-NEXT:    vmov.f32 s6, s1
-; CHECK-NEXT:    vmov.f32 s7, s3
-; CHECK-NEXT:    vmov.f32 s8, s4
-; CHECK-NEXT:    vmov.f32 s10, s5
-; CHECK-NEXT:    vmullb.u32 q0, q2, q3
-; CHECK-NEXT:    vmov.f32 s8, s6
-; CHECK-NEXT:    vmov.f32 s10, s7
-; CHECK-NEXT:    vmullb.u32 q1, q2, q3
+; CHECK-NEXT:    vmov.f32 s18, s3
+; CHECK-NEXT:    vmullb.u32 q2, q0, q3
+; CHECK-NEXT:    vmullb.u32 q1, q4, q3
+; CHECK-NEXT:    vmov q0, q2
+; CHECK-NEXT:    vpop {d8, d9}
 ; CHECK-NEXT:    bx lr
 entry:
   %shuf1 = shufflevector <8 x i32> %src1, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
@@ -513,17 +497,15 @@ entry:
 define arm_aapcs_vfpcc <4 x i64> @zext32_0ext_0213(<8 x i32> %src1, i32 %src2) {
 ; CHECK-LABEL: zext32_0ext_0213:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vmov.f32 s4, s0
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    vmov.f32 s16, s1
 ; CHECK-NEXT:    vmov q3[2], q3[0], r0, r0
-; CHECK-NEXT:    vmov.f32 s5, s2
-; CHECK-NEXT:    vmov.f32 s6, s1
-; CHECK-NEXT:    vmov.f32 s7, s3
-; CHECK-NEXT:    vmov.f32 s8, s4
-; CHECK-NEXT:    vmov.f32 s10, s5
-; CHECK-NEXT:    vmullb.u32 q0, q3, q2
-; CHECK-NEXT:    vmov.f32 s8, s6
-; CHECK-NEXT:    vmov.f32 s10, s7
-; CHECK-NEXT:    vmullb.u32 q1, q3, q2
+; CHECK-NEXT:    vmov.f32 s18, s3
+; CHECK-NEXT:    vmullb.u32 q2, q3, q0
+; CHECK-NEXT:    vmullb.u32 q1, q3, q4
+; CHECK-NEXT:    vmov q0, q2
+; CHECK-NEXT:    vpop {d8, d9}
 ; CHECK-NEXT:    bx lr
 entry:
   %shuf1 = shufflevector <8 x i32> %src1, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
@@ -538,22 +520,17 @@ entry:
 define arm_aapcs_vfpcc <4 x i64> @zext32_0213_ext0(<8 x i32> %src1, i32 %src2) {
 ; CHECK-LABEL: zext32_0213_ext0:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vmov.f32 s4, s0
-; CHECK-NEXT:    vmov.f32 s5, s2
-; CHECK-NEXT:    vmov.f32 s6, s1
-; CHECK-NEXT:    vmov.f32 s7, s3
-; CHECK-NEXT:    vmov.f32 s0, s4
-; CHECK-NEXT:    vmov.f32 s2, s5
-; CHECK-NEXT:    vmov r3, s0
-; CHECK-NEXT:    vmov.f32 s8, s6
-; CHECK-NEXT:    vmov.f32 s10, s7
 ; CHECK-NEXT:    vmov r1, s2
-; CHECK-NEXT:    umull r3, r2, r3, r0
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov.f32 s4, s1
+; CHECK-NEXT:    vmov.f32 s6, s3
 ; CHECK-NEXT:    umull r1, r12, r1, r0
-; CHECK-NEXT:    vmov q0[2], q0[0], r3, r1
-; CHECK-NEXT:    vmov r1, s10
-; CHECK-NEXT:    vmov r3, s8
-; CHECK-NEXT:    vmov q0[3], q0[1], r2, r12
+; CHECK-NEXT:    umull r3, r2, r3, r0
+; CHECK-NEXT:    vmov q2[2], q2[0], r3, r1
+; CHECK-NEXT:    vmov r1, s6
+; CHECK-NEXT:    vmov r3, s4
+; CHECK-NEXT:    vmov q2[3], q2[1], r2, r12
+; CHECK-NEXT:    vmov q0, q2
 ; CHECK-NEXT:    umull r1, r2, r1, r0
 ; CHECK-NEXT:    umull r0, r3, r3, r0
 ; CHECK-NEXT:    vmov q1[2], q1[0], r0, r1
@@ -572,22 +549,17 @@ entry:
 define arm_aapcs_vfpcc <4 x i64> @zext32_ext0_0213(<8 x i32> %src1, i32 %src2) {
 ; CHECK-LABEL: zext32_ext0_0213:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vmov.f32 s4, s0
-; CHECK-NEXT:    vmov.f32 s5, s2
-; CHECK-NEXT:    vmov.f32 s6, s1
-; CHECK-NEXT:    vmov.f32 s7, s3
-; CHECK-NEXT:    vmov.f32 s0, s4
-; CHECK-NEXT:    vmov.f32 s2, s5
-; CHECK-NEXT:    vmov r3, s0
-; CHECK-NEXT:    vmov.f32 s8, s6
-; CHECK-NEXT:    vmov.f32 s10, s7
 ; CHECK-NEXT:    vmov r1, s2
-; CHECK-NEXT:    umull r3, r2, r0, r3
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov.f32 s4, s1
+; CHECK-NEXT:    vmov.f32 s6, s3
 ; CHECK-NEXT:    umull r1, r12, r0, r1
-; CHECK-NEXT:    vmov q0[2], q0[0], r3, r1
-; CHECK-NEXT:    vmov r1, s10
-; CHECK-NEXT:    vmov r3, s8
-; CHECK-NEXT:    vmov q0[3], q0[1], r2, r12
+; CHECK-NEXT:    umull r3, r2, r0, r3
+; CHECK-NEXT:    vmov q2[2], q2[0], r3, r1
+; CHECK-NEXT:    vmov r1, s6
+; CHECK-NEXT:    vmov r3, s4
+; CHECK-NEXT:    vmov q2[3], q2[1], r2, r12
+; CHECK-NEXT:    vmov q0, q2
 ; CHECK-NEXT:    umull r1, r2, r0, r1
 ; CHECK-NEXT:    umull r0, r3, r0, r3
 ; CHECK-NEXT:    vmov q1[2], q1[0], r0, r1

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vst3.ll b/llvm/test/CodeGen/Thumb2/mve-vst3.ll
index c1827f0c91886..1e46dd1b256f5 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vst3.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vst3.ll
@@ -345,61 +345,61 @@ define void @vst3_v8i16(<8 x i16> *%src, <24 x i16> *%dst) {
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12}
 ; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12}
-; CHECK-NEXT:    vldrw.u32 q3, [r0]
+; CHECK-NEXT:    vldrw.u32 q2, [r0]
 ; CHECK-NEXT:    vldrw.u32 q1, [r0, #16]
-; CHECK-NEXT:    vmov.f64 d0, d6
+; CHECK-NEXT:    vmov.f64 d0, d4
 ; CHECK-NEXT:    vmov.u16 r2, q1[1]
-; CHECK-NEXT:    vmovx.f16 s20, s12
+; CHECK-NEXT:    vmovx.f16 s20, s8
 ; CHECK-NEXT:    vins.f16 s0, s4
-; CHECK-NEXT:    vmov.f32 s8, s13
-; CHECK-NEXT:    vins.f16 s8, s5
+; CHECK-NEXT:    vmov.f32 s12, s9
+; CHECK-NEXT:    vins.f16 s12, s5
 ; CHECK-NEXT:    vmov.16 q0[4], r2
-; CHECK-NEXT:    vmov.f32 s3, s8
-; CHECK-NEXT:    vldrw.u32 q2, [r0, #32]
-; CHECK-NEXT:    vmov.f32 s1, s12
-; CHECK-NEXT:    vmov.f32 s17, s8
-; CHECK-NEXT:    vmov.f32 s18, s8
+; CHECK-NEXT:    vmov.f32 s3, s12
+; CHECK-NEXT:    vldrw.u32 q3, [r0, #32]
+; CHECK-NEXT:    vmov.f32 s1, s8
+; CHECK-NEXT:    vmov.f32 s17, s12
+; CHECK-NEXT:    vmov.f32 s18, s12
 ; CHECK-NEXT:    vins.f16 s17, s20
 ; CHECK-NEXT:    vmovx.f16 s20, s18
 ; CHECK-NEXT:    vins.f16 s2, s20
-; CHECK-NEXT:    vmovx.f16 s20, s10
+; CHECK-NEXT:    vmovx.f16 s20, s14
 ; CHECK-NEXT:    vmov.f32 s18, s2
 ; CHECK-NEXT:    vmov.f32 s1, s17
 ; CHECK-NEXT:    vmov.f32 s2, s18
 ; CHECK-NEXT:    vmovx.f16 s16, s6
 ; CHECK-NEXT:    vins.f16 s16, s20
-; CHECK-NEXT:    vmovx.f16 s20, s11
+; CHECK-NEXT:    vmovx.f16 s20, s15
 ; CHECK-NEXT:    vins.f16 s17, s7
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
 ; CHECK-NEXT:    vmovx.f16 s19, s7
 ; CHECK-NEXT:    vrev32.16 q1, q1
 ; CHECK-NEXT:    vins.f16 s19, s20
-; CHECK-NEXT:    vmov.f32 s21, s15
-; CHECK-NEXT:    vmov.f32 s18, s11
+; CHECK-NEXT:    vmov.f32 s21, s11
+; CHECK-NEXT:    vmov.f32 s18, s15
 ; CHECK-NEXT:    vmovx.f16 s24, s17
-; CHECK-NEXT:    vmov.f32 s22, s15
+; CHECK-NEXT:    vmov.f32 s22, s11
 ; CHECK-NEXT:    vins.f16 s21, s24
 ; CHECK-NEXT:    vmovx.f16 s24, s22
 ; CHECK-NEXT:    vins.f16 s18, s24
-; CHECK-NEXT:    vmov.f32 s8, s9
+; CHECK-NEXT:    vmov.f32 s12, s13
 ; CHECK-NEXT:    vmov.f32 s22, s18
 ; CHECK-NEXT:    vmov.f32 s17, s21
 ; CHECK-NEXT:    vmov.f32 s18, s22
-; CHECK-NEXT:    vmovx.f16 s20, s13
-; CHECK-NEXT:    vins.f16 s8, s20
-; CHECK-NEXT:    vmovx.f16 s20, s14
-; CHECK-NEXT:    vins.f16 s10, s20
+; CHECK-NEXT:    vmovx.f16 s20, s9
+; CHECK-NEXT:    vins.f16 s12, s20
+; CHECK-NEXT:    vmovx.f16 s20, s10
+; CHECK-NEXT:    vins.f16 s14, s20
 ; CHECK-NEXT:    vstrw.32 q4, [r1, #32]
-; CHECK-NEXT:    vmov.f32 s11, s10
-; CHECK-NEXT:    vmov.f32 s10, s14
-; CHECK-NEXT:    vmovx.f16 s12, s9
-; CHECK-NEXT:    vins.f16 s5, s12
-; CHECK-NEXT:    vmovx.f16 s12, s6
-; CHECK-NEXT:    vins.f16 s10, s12
-; CHECK-NEXT:    vmov.f32 s6, s10
-; CHECK-NEXT:    vmov.f32 s9, s5
-; CHECK-NEXT:    vmov.f32 s10, s6
-; CHECK-NEXT:    vstrw.32 q2, [r1, #16]
+; CHECK-NEXT:    vmov.f32 s15, s14
+; CHECK-NEXT:    vmov.f32 s14, s10
+; CHECK-NEXT:    vmovx.f16 s8, s13
+; CHECK-NEXT:    vins.f16 s5, s8
+; CHECK-NEXT:    vmovx.f16 s8, s6
+; CHECK-NEXT:    vins.f16 s14, s8
+; CHECK-NEXT:    vmov.f32 s6, s14
+; CHECK-NEXT:    vmov.f32 s13, s5
+; CHECK-NEXT:    vmov.f32 s14, s6
+; CHECK-NEXT:    vstrw.32 q3, [r1, #16]
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12}
 ; CHECK-NEXT:    bx lr
 entry:
@@ -423,20 +423,19 @@ define void @vst3_v16i16(<16 x i16> *%src, <48 x i16> *%dst) {
 ; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
 ; CHECK-NEXT:    .pad #80
 ; CHECK-NEXT:    sub sp, #80
-; CHECK-NEXT:    vldrw.u32 q1, [r0, #48]
+; CHECK-NEXT:    vldrw.u32 q5, [r0, #48]
 ; CHECK-NEXT:    vldrw.u32 q3, [r0, #80]
 ; CHECK-NEXT:    vldrw.u32 q6, [r0, #32]
-; CHECK-NEXT:    vldrw.u32 q5, [r0, #64]
 ; CHECK-NEXT:    vmovx.f16 s0, s14
-; CHECK-NEXT:    vmovx.f16 s8, s6
+; CHECK-NEXT:    vmovx.f16 s8, s22
 ; CHECK-NEXT:    vins.f16 s8, s0
 ; CHECK-NEXT:    vmovx.f16 s0, s15
-; CHECK-NEXT:    vins.f16 s9, s7
-; CHECK-NEXT:    vstrw.32 q1, [sp, #48] @ 16-byte Spill
-; CHECK-NEXT:    vmovx.f16 s11, s7
+; CHECK-NEXT:    vins.f16 s9, s23
 ; CHECK-NEXT:    vmov.u16 r2, q6[1]
+; CHECK-NEXT:    vmovx.f16 s11, s23
+; CHECK-NEXT:    vstrw.32 q6, [sp, #48] @ 16-byte Spill
 ; CHECK-NEXT:    vins.f16 s11, s0
-; CHECK-NEXT:    vstrw.32 q6, [sp] @ 16-byte Spill
+; CHECK-NEXT:    vstrw.32 q5, [sp] @ 16-byte Spill
 ; CHECK-NEXT:    vmov.f32 s10, s15
 ; CHECK-NEXT:    vmovx.f16 s4, s9
 ; CHECK-NEXT:    vmov q4, q2
@@ -457,95 +456,99 @@ define void @vst3_v16i16(<16 x i16> *%src, <48 x i16> *%dst) {
 ; CHECK-NEXT:    vmov.16 q2[4], r2
 ; CHECK-NEXT:    vmov.f32 s11, s5
 ; CHECK-NEXT:    vins.f16 s11, s25
-; CHECK-NEXT:    vmov.f32 s18, s2
+; CHECK-NEXT:    vldrw.u32 q6, [r0, #64]
 ; CHECK-NEXT:    vmov.f32 s9, s4
-; CHECK-NEXT:    vstrw.32 q4, [sp, #16] @ 16-byte Spill
-; CHECK-NEXT:    vmov.f32 s5, s20
-; CHECK-NEXT:    vldrw.u32 q4, [sp, #32] @ 16-byte Reload
-; CHECK-NEXT:    vmov.f32 s6, s20
+; CHECK-NEXT:    vmov.u16 r0, q5[1]
+; CHECK-NEXT:    vmov.f32 s5, s24
+; CHECK-NEXT:    vmov.f32 s6, s24
 ; CHECK-NEXT:    vins.f16 s5, s28
 ; CHECK-NEXT:    vmovx.f16 s28, s6
 ; CHECK-NEXT:    vins.f16 s10, s28
-; CHECK-NEXT:    vmov.f64 d14, d8
+; CHECK-NEXT:    vmov.f32 s18, s2
 ; CHECK-NEXT:    vmov.f32 s6, s10
+; CHECK-NEXT:    vstrw.32 q4, [sp, #16] @ 16-byte Spill
 ; CHECK-NEXT:    vmov.f32 s9, s5
-; CHECK-NEXT:    vmov.f32 s0, s17
+; CHECK-NEXT:    vldrw.u32 q4, [sp, #48] @ 16-byte Reload
 ; CHECK-NEXT:    vmov.f32 s10, s6
-; CHECK-NEXT:    vldrw.u32 q1, [sp, #48] @ 16-byte Reload
+; CHECK-NEXT:    vldrw.u32 q1, [sp, #32] @ 16-byte Reload
 ; CHECK-NEXT:    vstrw.32 q2, [r1]
-; CHECK-NEXT:    vins.f16 s28, s4
-; CHECK-NEXT:    vmov.u16 r0, q1[1]
-; CHECK-NEXT:    vins.f16 s0, s5
+; CHECK-NEXT:    vmov.f64 d14, d2
+; CHECK-NEXT:    vins.f16 s28, s20
+; CHECK-NEXT:    vmov.f32 s0, s5
+; CHECK-NEXT:    vins.f16 s0, s21
 ; CHECK-NEXT:    vmov.16 q7[4], r0
 ; CHECK-NEXT:    vmov.f32 s31, s0
-; CHECK-NEXT:    vmovx.f16 s4, s16
+; CHECK-NEXT:    vldrw.u32 q5, [sp, #64] @ 16-byte Reload
 ; CHECK-NEXT:    vmov.f32 s1, s12
+; CHECK-NEXT:    vmov.f32 s29, s4
+; CHECK-NEXT:    vmovx.f16 s4, s4
 ; CHECK-NEXT:    vmov.f32 s2, s12
 ; CHECK-NEXT:    vins.f16 s1, s4
-; CHECK-NEXT:    vmov.f32 s29, s16
 ; CHECK-NEXT:    vmovx.f16 s4, s2
-; CHECK-NEXT:    vldrw.u32 q4, [sp, #64] @ 16-byte Reload
 ; CHECK-NEXT:    vins.f16 s30, s4
-; CHECK-NEXT:    vmovx.f16 s4, s22
+; CHECK-NEXT:    vmovx.f16 s4, s26
 ; CHECK-NEXT:    vmov.f32 s2, s30
 ; CHECK-NEXT:    vmov.f32 s29, s1
 ; CHECK-NEXT:    vmov.f32 s12, s13
 ; CHECK-NEXT:    vmov.f32 s30, s2
-; CHECK-NEXT:    vmovx.f16 s0, s26
+; CHECK-NEXT:    vmovx.f16 s0, s18
 ; CHECK-NEXT:    vins.f16 s0, s4
-; CHECK-NEXT:    vmovx.f16 s4, s23
-; CHECK-NEXT:    vins.f16 s1, s27
+; CHECK-NEXT:    vmov q1, q4
+; CHECK-NEXT:    vins.f16 s1, s7
 ; CHECK-NEXT:    vstrw.32 q7, [r1, #48]
-; CHECK-NEXT:    vmovx.f16 s3, s27
+; CHECK-NEXT:    vmovx.f16 s3, s7
+; CHECK-NEXT:    vmovx.f16 s4, s27
 ; CHECK-NEXT:    vins.f16 s3, s4
-; CHECK-NEXT:    vmov.f32 s5, s19
-; CHECK-NEXT:    vmov.f32 s2, s23
-; CHECK-NEXT:    vmovx.f16 s24, s1
-; CHECK-NEXT:    vmov.f32 s6, s19
+; CHECK-NEXT:    vmov.f32 s5, s23
+; CHECK-NEXT:    vmov.f32 s2, s27
+; CHECK-NEXT:    vmovx.f16 s16, s1
+; CHECK-NEXT:    vmov.f32 s6, s23
+; CHECK-NEXT:    vins.f16 s5, s16
 ; CHECK-NEXT:    vldrw.u32 q4, [sp, #32] @ 16-byte Reload
-; CHECK-NEXT:    vins.f16 s5, s24
-; CHECK-NEXT:    vmovx.f16 s24, s6
-; CHECK-NEXT:    vins.f16 s2, s24
-; CHECK-NEXT:    vmovx.f16 s24, s17
+; CHECK-NEXT:    vmovx.f16 s20, s6
+; CHECK-NEXT:    vmov.f32 s24, s25
+; CHECK-NEXT:    vins.f16 s2, s20
+; CHECK-NEXT:    vmovx.f16 s20, s17
+; CHECK-NEXT:    vins.f16 s12, s20
+; CHECK-NEXT:    vmovx.f16 s20, s18
+; CHECK-NEXT:    vins.f16 s14, s20
 ; CHECK-NEXT:    vmov.f32 s6, s2
-; CHECK-NEXT:    vins.f16 s12, s24
-; CHECK-NEXT:    vmovx.f16 s24, s18
-; CHECK-NEXT:    vmov.f32 s1, s5
-; CHECK-NEXT:    vins.f16 s14, s24
-; CHECK-NEXT:    vldrw.u32 q6, [sp, #48] @ 16-byte Reload
 ; CHECK-NEXT:    vmov.f32 s15, s14
 ; CHECK-NEXT:    vmov.f32 s14, s18
 ; CHECK-NEXT:    vmovx.f16 s16, s13
-; CHECK-NEXT:    vrev32.16 q6, q6
-; CHECK-NEXT:    vmov.f32 s20, s21
-; CHECK-NEXT:    vins.f16 s25, s16
-; CHECK-NEXT:    vmovx.f16 s16, s26
+; CHECK-NEXT:    vstr s16, [sp, #32] @ 4-byte Spill
+; CHECK-NEXT:    vldrw.u32 q4, [sp] @ 16-byte Reload
+; CHECK-NEXT:    vmov.f32 s1, s5
+; CHECK-NEXT:    vrev32.16 q5, q4
+; CHECK-NEXT:    vldr s16, [sp, #32] @ 4-byte Reload
+; CHECK-NEXT:    vins.f16 s21, s16
+; CHECK-NEXT:    vmovx.f16 s16, s22
 ; CHECK-NEXT:    vins.f16 s14, s16
 ; CHECK-NEXT:    vldrw.u32 q4, [sp, #64] @ 16-byte Reload
 ; CHECK-NEXT:    vmov.f32 s2, s6
 ; CHECK-NEXT:    vmovx.f16 s4, s17
-; CHECK-NEXT:    vmov.f32 s26, s14
-; CHECK-NEXT:    vins.f16 s20, s4
+; CHECK-NEXT:    vmov.f32 s22, s14
+; CHECK-NEXT:    vins.f16 s24, s4
 ; CHECK-NEXT:    vmovx.f16 s4, s18
-; CHECK-NEXT:    vins.f16 s22, s4
-; CHECK-NEXT:    vldrw.u32 q1, [sp] @ 16-byte Reload
-; CHECK-NEXT:    vmov.f32 s23, s22
+; CHECK-NEXT:    vins.f16 s26, s4
+; CHECK-NEXT:    vmov.f32 s13, s21
+; CHECK-NEXT:    vmov.f32 s27, s26
 ; CHECK-NEXT:    vstrw.32 q0, [r1, #32]
-; CHECK-NEXT:    vmov.f32 s22, s18
-; CHECK-NEXT:    vmovx.f16 s16, s21
-; CHECK-NEXT:    vrev32.16 q1, q1
-; CHECK-NEXT:    vmov.f32 s13, s25
-; CHECK-NEXT:    vins.f16 s5, s16
-; CHECK-NEXT:    vmovx.f16 s16, s6
-; CHECK-NEXT:    vins.f16 s22, s16
+; CHECK-NEXT:    vmov.f32 s26, s18
+; CHECK-NEXT:    vldrw.u32 q4, [sp, #48] @ 16-byte Reload
+; CHECK-NEXT:    vmovx.f16 s4, s25
 ; CHECK-NEXT:    vldrw.u32 q0, [sp, #16] @ 16-byte Reload
-; CHECK-NEXT:    vmov.f32 s6, s22
-; CHECK-NEXT:    vmov.f32 s21, s5
-; CHECK-NEXT:    vstrw.32 q0, [r1, #80]
-; CHECK-NEXT:    vmov.f32 s14, s26
+; CHECK-NEXT:    vrev32.16 q4, q4
+; CHECK-NEXT:    vins.f16 s17, s4
+; CHECK-NEXT:    vmovx.f16 s4, s18
+; CHECK-NEXT:    vins.f16 s26, s4
+; CHECK-NEXT:    vmov.f32 s14, s22
+; CHECK-NEXT:    vmov.f32 s18, s26
 ; CHECK-NEXT:    vstrw.32 q3, [r1, #64]
-; CHECK-NEXT:    vmov.f32 s22, s6
-; CHECK-NEXT:    vstrw.32 q5, [r1, #16]
+; CHECK-NEXT:    vmov.f32 s25, s17
+; CHECK-NEXT:    vstrw.32 q0, [r1, #80]
+; CHECK-NEXT:    vmov.f32 s26, s18
+; CHECK-NEXT:    vstrw.32 q6, [r1, #16]
 ; CHECK-NEXT:    add sp, #80
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
 ; CHECK-NEXT:    bx lr
@@ -1378,60 +1381,60 @@ define void @vst3_v8f16(<8 x half> *%src, <24 x half> *%dst) {
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14}
 ; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14}
-; CHECK-NEXT:    vldrw.u32 q3, [r0]
+; CHECK-NEXT:    vldrw.u32 q2, [r0]
 ; CHECK-NEXT:    vldrw.u32 q5, [r0, #16]
-; CHECK-NEXT:    vmov.f64 d0, d6
+; CHECK-NEXT:    vmov.f64 d0, d4
 ; CHECK-NEXT:    vmovx.f16 s6, s20
-; CHECK-NEXT:    vmovx.f16 s8, s12
-; CHECK-NEXT:    vmov.f32 s4, s13
+; CHECK-NEXT:    vmovx.f16 s12, s8
+; CHECK-NEXT:    vmov.f32 s4, s9
 ; CHECK-NEXT:    vins.f16 s0, s20
 ; CHECK-NEXT:    vmov r2, s6
 ; CHECK-NEXT:    vins.f16 s4, s21
 ; CHECK-NEXT:    vmov.16 q0[4], r2
 ; CHECK-NEXT:    vmov.f32 s3, s4
 ; CHECK-NEXT:    vldrw.u32 q1, [r0, #32]
-; CHECK-NEXT:    vmov.f32 s1, s12
+; CHECK-NEXT:    vmov.f32 s1, s8
 ; CHECK-NEXT:    vmov.f32 s17, s4
 ; CHECK-NEXT:    vmovx.f16 s24, s7
 ; CHECK-NEXT:    vmov.f32 s18, s4
-; CHECK-NEXT:    vins.f16 s17, s8
-; CHECK-NEXT:    vmovx.f16 s8, s18
-; CHECK-NEXT:    vins.f16 s2, s8
-; CHECK-NEXT:    vmovx.f16 s11, s23
-; CHECK-NEXT:    vins.f16 s11, s24
+; CHECK-NEXT:    vins.f16 s17, s12
+; CHECK-NEXT:    vmovx.f16 s12, s18
+; CHECK-NEXT:    vins.f16 s2, s12
+; CHECK-NEXT:    vmovx.f16 s15, s23
+; CHECK-NEXT:    vins.f16 s15, s24
 ; CHECK-NEXT:    vmovx.f16 s24, s6
-; CHECK-NEXT:    vmovx.f16 s8, s22
+; CHECK-NEXT:    vmovx.f16 s12, s22
 ; CHECK-NEXT:    vmov.f32 s18, s2
-; CHECK-NEXT:    vins.f16 s8, s24
-; CHECK-NEXT:    vmov.f32 s25, s15
-; CHECK-NEXT:    vins.f16 s9, s23
-; CHECK-NEXT:    vmov.f32 s26, s15
-; CHECK-NEXT:    vmov.f32 s10, s7
-; CHECK-NEXT:    vmovx.f16 s28, s9
+; CHECK-NEXT:    vins.f16 s12, s24
+; CHECK-NEXT:    vmov.f32 s25, s11
+; CHECK-NEXT:    vins.f16 s13, s23
+; CHECK-NEXT:    vmov.f32 s26, s11
+; CHECK-NEXT:    vmov.f32 s14, s7
+; CHECK-NEXT:    vmovx.f16 s28, s13
 ; CHECK-NEXT:    vins.f16 s25, s28
 ; CHECK-NEXT:    vmovx.f16 s28, s26
-; CHECK-NEXT:    vins.f16 s10, s28
-; CHECK-NEXT:    vmovx.f16 s28, s13
+; CHECK-NEXT:    vins.f16 s14, s28
+; CHECK-NEXT:    vmovx.f16 s28, s9
 ; CHECK-NEXT:    vmov.f32 s4, s5
 ; CHECK-NEXT:    vrev32.16 q5, q5
 ; CHECK-NEXT:    vins.f16 s4, s28
-; CHECK-NEXT:    vmovx.f16 s28, s14
+; CHECK-NEXT:    vmovx.f16 s28, s10
 ; CHECK-NEXT:    vins.f16 s6, s28
-; CHECK-NEXT:    vmov.f32 s26, s10
+; CHECK-NEXT:    vmov.f32 s26, s14
 ; CHECK-NEXT:    vmov.f32 s7, s6
-; CHECK-NEXT:    vmov.f32 s6, s14
-; CHECK-NEXT:    vmovx.f16 s12, s5
-; CHECK-NEXT:    vins.f16 s21, s12
-; CHECK-NEXT:    vmovx.f16 s12, s22
-; CHECK-NEXT:    vins.f16 s6, s12
+; CHECK-NEXT:    vmov.f32 s6, s10
+; CHECK-NEXT:    vmovx.f16 s8, s5
+; CHECK-NEXT:    vins.f16 s21, s8
+; CHECK-NEXT:    vmovx.f16 s8, s22
+; CHECK-NEXT:    vins.f16 s6, s8
 ; CHECK-NEXT:    vmov.f32 s1, s17
 ; CHECK-NEXT:    vmov.f32 s22, s6
-; CHECK-NEXT:    vmov.f32 s9, s25
+; CHECK-NEXT:    vmov.f32 s13, s25
 ; CHECK-NEXT:    vmov.f32 s5, s21
 ; CHECK-NEXT:    vmov.f32 s2, s18
-; CHECK-NEXT:    vmov.f32 s10, s26
+; CHECK-NEXT:    vmov.f32 s14, s26
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
-; CHECK-NEXT:    vstrw.32 q2, [r1, #32]
+; CHECK-NEXT:    vstrw.32 q3, [r1, #32]
 ; CHECK-NEXT:    vmov.f32 s6, s22
 ; CHECK-NEXT:    vstrw.32 q1, [r1, #16]
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14}
@@ -1457,144 +1460,147 @@ define void @vst3_v16f16(<16 x half> *%src, <48 x half> *%dst) {
 ; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
 ; CHECK-NEXT:    .pad #128
 ; CHECK-NEXT:    sub sp, #128
-; CHECK-NEXT:    vldrw.u32 q1, [r0, #32]
-; CHECK-NEXT:    vldrw.u32 q7, [r0, #64]
-; CHECK-NEXT:    vldrw.u32 q5, [r0, #80]
-; CHECK-NEXT:    vmovx.f16 s0, s31
-; CHECK-NEXT:    vmovx.f16 s11, s7
-; CHECK-NEXT:    vins.f16 s11, s0
-; CHECK-NEXT:    vmovx.f16 s0, s30
-; CHECK-NEXT:    vmovx.f16 s8, s6
-; CHECK-NEXT:    vmov q4, q1
-; CHECK-NEXT:    vins.f16 s8, s0
-; CHECK-NEXT:    vstrw.32 q4, [sp, #48] @ 16-byte Spill
-; CHECK-NEXT:    vins.f16 s9, s7
-; CHECK-NEXT:    vmov.f32 s10, s31
-; CHECK-NEXT:    vmovx.f16 s0, s9
-; CHECK-NEXT:    vmov q3, q2
-; CHECK-NEXT:    vldrw.u32 q2, [r0]
-; CHECK-NEXT:    vmov.f32 s5, s11
-; CHECK-NEXT:    vmov q6, q2
-; CHECK-NEXT:    vmov.f32 s6, s11
-; CHECK-NEXT:    vldrw.u32 q2, [r0, #48]
+; CHECK-NEXT:    vldrw.u32 q3, [r0, #32]
+; CHECK-NEXT:    vldrw.u32 q4, [r0, #64]
+; CHECK-NEXT:    vldrw.u32 q6, [r0]
+; CHECK-NEXT:    vldrw.u32 q5, [r0, #16]
+; CHECK-NEXT:    vmovx.f16 s0, s19
+; CHECK-NEXT:    vmovx.f16 s7, s15
+; CHECK-NEXT:    vins.f16 s7, s0
+; CHECK-NEXT:    vmovx.f16 s0, s18
+; CHECK-NEXT:    vmovx.f16 s4, s14
+; CHECK-NEXT:    vstrw.32 q5, [sp, #64] @ 16-byte Spill
+; CHECK-NEXT:    vins.f16 s4, s0
+; CHECK-NEXT:    vmov.f64 d14, d12
+; CHECK-NEXT:    vins.f16 s5, s15
+; CHECK-NEXT:    vstrw.32 q3, [sp] @ 16-byte Spill
+; CHECK-NEXT:    vmov.f32 s6, s19
+; CHECK-NEXT:    vmovx.f16 s0, s5
+; CHECK-NEXT:    vmov q2, q1
+; CHECK-NEXT:    vmov.f32 s5, s27
+; CHECK-NEXT:    vmov.f32 s6, s27
+; CHECK-NEXT:    vins.f16 s28, s12
 ; CHECK-NEXT:    vins.f16 s5, s0
 ; CHECK-NEXT:    vmovx.f16 s0, s6
-; CHECK-NEXT:    vstrw.32 q1, [sp, #64] @ 16-byte Spill
-; CHECK-NEXT:    vldrw.u32 q1, [r0, #16]
-; CHECK-NEXT:    vins.f16 s14, s0
+; CHECK-NEXT:    vins.f16 s10, s0
+; CHECK-NEXT:    vstrw.32 q1, [sp, #32] @ 16-byte Spill
+; CHECK-NEXT:    vmov.f64 d2, d10
+; CHECK-NEXT:    vstrw.32 q2, [sp, #16] @ 16-byte Spill
+; CHECK-NEXT:    vldrw.u32 q2, [r0, #48]
 ; CHECK-NEXT:    vmovx.f16 s2, s8
-; CHECK-NEXT:    vstrw.32 q3, [sp, #32] @ 16-byte Spill
-; CHECK-NEXT:    vmov.f64 d6, d2
-; CHECK-NEXT:    vstrw.32 q1, [sp, #80] @ 16-byte Spill
-; CHECK-NEXT:    vstrw.32 q6, [sp, #16] @ 16-byte Spill
-; CHECK-NEXT:    vstrw.32 q2, [sp] @ 16-byte Spill
-; CHECK-NEXT:    vmov.f32 s0, s5
-; CHECK-NEXT:    vins.f16 s12, s8
+; CHECK-NEXT:    vstrw.32 q2, [sp, #48] @ 16-byte Spill
+; CHECK-NEXT:    vmov.f32 s0, s21
+; CHECK-NEXT:    vins.f16 s4, s8
 ; CHECK-NEXT:    vmov r2, s2
 ; CHECK-NEXT:    vins.f16 s0, s9
-; CHECK-NEXT:    vmov.16 q3[4], r2
-; CHECK-NEXT:    vmovx.f16 s2, s16
-; CHECK-NEXT:    vmov.f32 s15, s0
-; CHECK-NEXT:    vmovx.f16 s0, s4
-; CHECK-NEXT:    vmov.f32 s13, s4
+; CHECK-NEXT:    vmov.16 q1[4], r2
+; CHECK-NEXT:    vmovx.f16 s2, s12
+; CHECK-NEXT:    vmov.f32 s7, s0
+; CHECK-NEXT:    vmovx.f16 s0, s20
 ; CHECK-NEXT:    vmov.f32 s5, s20
-; CHECK-NEXT:    vmov.f32 s6, s20
-; CHECK-NEXT:    vins.f16 s5, s0
-; CHECK-NEXT:    vmovx.f16 s0, s6
-; CHECK-NEXT:    vstrw.32 q1, [sp, #112] @ 16-byte Spill
-; CHECK-NEXT:    vmov q1, q6
-; CHECK-NEXT:    vins.f16 s14, s0
-; CHECK-NEXT:    vmov.f32 s0, s5
-; CHECK-NEXT:    vins.f16 s24, s16
+; CHECK-NEXT:    vldrw.u32 q5, [r0, #80]
 ; CHECK-NEXT:    vmov r0, s2
-; CHECK-NEXT:    vins.f16 s0, s17
-; CHECK-NEXT:    vmov.16 q6[4], r0
-; CHECK-NEXT:    vmov.f32 s27, s0
-; CHECK-NEXT:    vmovx.f16 s0, s4
-; CHECK-NEXT:    vmov.f32 s25, s4
-; CHECK-NEXT:    vmov.f32 s5, s28
-; CHECK-NEXT:    vmov.f32 s6, s28
+; CHECK-NEXT:    vmov.f32 s9, s20
+; CHECK-NEXT:    vmov.16 q7[4], r0
+; CHECK-NEXT:    vmov.f32 s10, s20
+; CHECK-NEXT:    vins.f16 s9, s0
+; CHECK-NEXT:    vmovx.f16 s0, s10
+; CHECK-NEXT:    vins.f16 s6, s0
+; CHECK-NEXT:    vmov.f32 s0, s25
+; CHECK-NEXT:    vstrw.32 q2, [sp, #96] @ 16-byte Spill
+; CHECK-NEXT:    vmov q2, q4
+; CHECK-NEXT:    vins.f16 s0, s13
+; CHECK-NEXT:    vstrw.32 q1, [sp, #112] @ 16-byte Spill
+; CHECK-NEXT:    vmov.f32 s5, s8
+; CHECK-NEXT:    vldrw.u32 q3, [sp, #48] @ 16-byte Reload
+; CHECK-NEXT:    vmov.f32 s31, s0
+; CHECK-NEXT:    vmovx.f16 s0, s24
+; CHECK-NEXT:    vmov.f32 s6, s8
 ; CHECK-NEXT:    vins.f16 s5, s0
+; CHECK-NEXT:    vmov.f32 s29, s24
 ; CHECK-NEXT:    vmovx.f16 s0, s6
-; CHECK-NEXT:    vstrw.32 q1, [sp, #96] @ 16-byte Spill
-; CHECK-NEXT:    vins.f16 s26, s0
+; CHECK-NEXT:    vstrw.32 q1, [sp, #80] @ 16-byte Spill
+; CHECK-NEXT:    vins.f16 s30, s0
 ; CHECK-NEXT:    vmovx.f16 s0, s22
-; CHECK-NEXT:    vmovx.f16 s4, s10
+; CHECK-NEXT:    vmovx.f16 s4, s14
+; CHECK-NEXT:    vmov.f32 s8, s9
 ; CHECK-NEXT:    vins.f16 s4, s0
 ; CHECK-NEXT:    vmovx.f16 s0, s23
-; CHECK-NEXT:    vmovx.f16 s7, s11
-; CHECK-NEXT:    vmov.f32 s28, s29
+; CHECK-NEXT:    vmovx.f16 s7, s15
 ; CHECK-NEXT:    vins.f16 s7, s0
-; CHECK-NEXT:    vins.f16 s5, s11
-; CHECK-NEXT:    vldrw.u32 q2, [sp, #80] @ 16-byte Reload
+; CHECK-NEXT:    vins.f16 s5, s15
+; CHECK-NEXT:    vldrw.u32 q3, [sp, #64] @ 16-byte Reload
 ; CHECK-NEXT:    vmov.f32 s6, s23
 ; CHECK-NEXT:    vmovx.f16 s16, s5
-; CHECK-NEXT:    vmov.f32 s1, s11
-; CHECK-NEXT:    vmov.f32 s2, s11
+; CHECK-NEXT:    vmov.f32 s1, s15
+; CHECK-NEXT:    vmov.f32 s2, s15
 ; CHECK-NEXT:    vins.f16 s1, s16
 ; CHECK-NEXT:    vmovx.f16 s16, s2
 ; CHECK-NEXT:    vins.f16 s6, s16
-; CHECK-NEXT:    vmovx.f16 s16, s9
+; CHECK-NEXT:    vmovx.f16 s16, s13
 ; CHECK-NEXT:    vmov.f32 s20, s21
 ; CHECK-NEXT:    vins.f16 s20, s16
-; CHECK-NEXT:    vmovx.f16 s16, s10
+; CHECK-NEXT:    vmovx.f16 s16, s14
 ; CHECK-NEXT:    vins.f16 s22, s16
-; CHECK-NEXT:    vldrw.u32 q2, [sp] @ 16-byte Reload
-; CHECK-NEXT:    vldrw.u32 q4, [sp, #80] @ 16-byte Reload
+; CHECK-NEXT:    vldrw.u32 q4, [sp, #112] @ 16-byte Reload
+; CHECK-NEXT:    vldrw.u32 q3, [sp, #96] @ 16-byte Reload
 ; CHECK-NEXT:    vmov.f32 s23, s22
-; CHECK-NEXT:    vrev32.16 q2, q2
+; CHECK-NEXT:    vmov.f32 s14, s18
+; CHECK-NEXT:    vstrw.32 q3, [sp, #96] @ 16-byte Spill
+; CHECK-NEXT:    vldrw.u32 q3, [sp, #80] @ 16-byte Reload
+; CHECK-NEXT:    vmov.f32 s14, s30
+; CHECK-NEXT:    vstrw.32 q3, [sp, #80] @ 16-byte Spill
+; CHECK-NEXT:    vldrw.u32 q3, [sp, #64] @ 16-byte Reload
 ; CHECK-NEXT:    vmov.f32 s2, s6
-; CHECK-NEXT:    vmov.f32 s22, s18
-; CHECK-NEXT:    vmovx.f16 s16, s21
-; CHECK-NEXT:    vins.f16 s9, s16
-; CHECK-NEXT:    vldrw.u32 q4, [sp, #112] @ 16-byte Reload
-; CHECK-NEXT:    vmov.f32 s18, s14
-; CHECK-NEXT:    vstrw.32 q2, [sp, #80] @ 16-byte Spill
-; CHECK-NEXT:    vstrw.32 q4, [sp, #112] @ 16-byte Spill
-; CHECK-NEXT:    vldrw.u32 q4, [sp, #96] @ 16-byte Reload
-; CHECK-NEXT:    vmov.f32 s18, s26
-; CHECK-NEXT:    vstrw.32 q4, [sp, #96] @ 16-byte Spill
-; CHECK-NEXT:    vmovx.f16 s16, s10
-; CHECK-NEXT:    vins.f16 s22, s16
-; CHECK-NEXT:    vldrw.u32 q4, [sp, #16] @ 16-byte Reload
+; CHECK-NEXT:    vmov.f32 s22, s14
+; CHECK-NEXT:    vmovx.f16 s12, s21
+; CHECK-NEXT:    vstr s12, [sp, #64] @ 4-byte Spill
+; CHECK-NEXT:    vldrw.u32 q3, [sp, #48] @ 16-byte Reload
 ; CHECK-NEXT:    vmov.f32 s5, s1
-; CHECK-NEXT:    vmovx.f16 s8, s17
+; CHECK-NEXT:    vrev32.16 q4, q3
+; CHECK-NEXT:    vldr s12, [sp, #64] @ 4-byte Reload
+; CHECK-NEXT:    vins.f16 s17, s12
+; CHECK-NEXT:    vmovx.f16 s12, s18
+; CHECK-NEXT:    vins.f16 s22, s12
+; CHECK-NEXT:    vmovx.f16 s12, s25
 ; CHECK-NEXT:    vmov.f32 s6, s2
-; CHECK-NEXT:    vins.f16 s28, s8
-; CHECK-NEXT:    vmovx.f16 s0, s18
-; CHECK-NEXT:    vins.f16 s30, s0
-; CHECK-NEXT:    vldrw.u32 q0, [sp, #48] @ 16-byte Reload
-; CHECK-NEXT:    vmov.f32 s31, s30
-; CHECK-NEXT:    vldrw.u32 q2, [sp, #32] @ 16-byte Reload
-; CHECK-NEXT:    vmov.f32 s30, s18
-; CHECK-NEXT:    vmovx.f16 s16, s29
-; CHECK-NEXT:    vrev32.16 q0, q0
+; CHECK-NEXT:    vins.f16 s8, s12
+; CHECK-NEXT:    vmovx.f16 s0, s26
+; CHECK-NEXT:    vmov.f32 s18, s22
+; CHECK-NEXT:    vins.f16 s10, s0
+; CHECK-NEXT:    vldrw.u32 q0, [sp] @ 16-byte Reload
+; CHECK-NEXT:    vmov.f32 s11, s10
 ; CHECK-NEXT:    vstrw.32 q1, [r1, #80]
-; CHECK-NEXT:    vins.f16 s1, s16
-; CHECK-NEXT:    vmovx.f16 s16, s2
-; CHECK-NEXT:    vins.f16 s30, s16
-; CHECK-NEXT:    vldrw.u32 q4, [sp, #96] @ 16-byte Reload
-; CHECK-NEXT:    vmov.f32 s2, s30
-; CHECK-NEXT:    vmov.f32 s25, s17
-; CHECK-NEXT:    vmov.f32 s26, s18
-; CHECK-NEXT:    vldrw.u32 q4, [sp, #112] @ 16-byte Reload
+; CHECK-NEXT:    vmov.f32 s10, s26
+; CHECK-NEXT:    vrev32.16 q6, q0
+; CHECK-NEXT:    vmovx.f16 s12, s9
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #80] @ 16-byte Reload
+; CHECK-NEXT:    vins.f16 s25, s12
+; CHECK-NEXT:    vmovx.f16 s12, s26
+; CHECK-NEXT:    vins.f16 s10, s12
 ; CHECK-NEXT:    vmov.f32 s29, s1
-; CHECK-NEXT:    vstrw.32 q6, [r1]
-; CHECK-NEXT:    vmov.f32 s13, s17
-; CHECK-NEXT:    vmov.f32 s14, s18
-; CHECK-NEXT:    vldrw.u32 q4, [sp, #64] @ 16-byte Reload
-; CHECK-NEXT:    vmov.f32 s18, s10
-; CHECK-NEXT:    vstrw.32 q3, [r1, #48]
-; CHECK-NEXT:    vmov.f32 s9, s17
+; CHECK-NEXT:    vldrw.u32 q3, [sp, #96] @ 16-byte Reload
 ; CHECK-NEXT:    vmov.f32 s30, s2
-; CHECK-NEXT:    vstrw.32 q7, [r1, #16]
-; CHECK-NEXT:    vmov.f32 s10, s18
-; CHECK-NEXT:    vldrw.u32 q4, [sp, #80] @ 16-byte Reload
-; CHECK-NEXT:    vmov.f32 s18, s22
-; CHECK-NEXT:    vstrw.32 q2, [r1, #32]
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #112] @ 16-byte Reload
+; CHECK-NEXT:    vmov.f32 s26, s10
+; CHECK-NEXT:    vmov.f32 s1, s13
+; CHECK-NEXT:    vstrw.32 q7, [r1]
+; CHECK-NEXT:    vmov.f32 s2, s14
+; CHECK-NEXT:    vldrw.u32 q3, [sp, #16] @ 16-byte Reload
+; CHECK-NEXT:    vstrw.32 q0, [sp, #112] @ 16-byte Spill
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #32] @ 16-byte Reload
+; CHECK-NEXT:    vmov.f32 s2, s14
+; CHECK-NEXT:    vmov.f32 s13, s1
 ; CHECK-NEXT:    vmov.f32 s21, s17
+; CHECK-NEXT:    vmov.f32 s9, s25
 ; CHECK-NEXT:    vmov.f32 s22, s18
+; CHECK-NEXT:    vmov.f32 s10, s26
 ; CHECK-NEXT:    vstrw.32 q5, [r1, #64]
+; CHECK-NEXT:    vstrw.32 q2, [r1, #16]
+; CHECK-NEXT:    vmov.f32 s14, s2
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #112] @ 16-byte Reload
+; CHECK-NEXT:    vstrw.32 q3, [r1, #32]
+; CHECK-NEXT:    vstrw.32 q0, [r1, #48]
 ; CHECK-NEXT:    add sp, #128
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
 ; CHECK-NEXT:    bx lr

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vst4.ll b/llvm/test/CodeGen/Thumb2/mve-vst4.ll
index cb933dc41f15a..4d1e12f0c5efd 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vst4.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vst4.ll
@@ -11,23 +11,18 @@ define void @vst4_v2i32(<2 x i32> *%src, <8 x i32> *%dst) {
 ; CHECK-NEXT:    ldrd lr, r12, [r0]
 ; CHECK-NEXT:    ldrd r3, r2, [r0, #8]
 ; CHECK-NEXT:    ldrd r4, r0, [r0, #16]
-; CHECK-NEXT:    vmov q1[2], q1[0], r4, r0
-; CHECK-NEXT:    vmov.f64 d0, d2
-; CHECK-NEXT:    vmov.f32 s1, s6
-; CHECK-NEXT:    vmov.f32 s2, s4
-; CHECK-NEXT:    vmov.f32 s3, s6
 ; CHECK-NEXT:    vmov q1[2], q1[0], lr, r3
 ; CHECK-NEXT:    vmov q1[3], q1[1], r12, r2
+; CHECK-NEXT:    vmov q0[2], q0[0], r4, r0
 ; CHECK-NEXT:    vmov.f64 d4, d2
 ; CHECK-NEXT:    vmov.f32 s9, s6
 ; CHECK-NEXT:    vmov.f32 s10, s0
-; CHECK-NEXT:    vmov.f32 s11, s2
+; CHECK-NEXT:    vmov.f32 s11, s0
+; CHECK-NEXT:    vmov.f32 s0, s5
 ; CHECK-NEXT:    vstrw.32 q2, [r1]
-; CHECK-NEXT:    vmov.f32 s8, s5
-; CHECK-NEXT:    vmov.f32 s9, s7
-; CHECK-NEXT:    vmov.f32 s10, s1
-; CHECK-NEXT:    vmov.f32 s11, s3
-; CHECK-NEXT:    vstrw.32 q2, [r1, #16]
+; CHECK-NEXT:    vmov.f32 s1, s7
+; CHECK-NEXT:    vmov.f32 s3, s2
+; CHECK-NEXT:    vstrw.32 q0, [r1, #16]
 ; CHECK-NEXT:    pop {r4, pc}
 entry:
   %s1 = getelementptr <2 x i32>, <2 x i32>* %src, i32 0
@@ -208,30 +203,30 @@ define void @vst4_v4i32_align1(<4 x i32> *%src, <16 x i32> *%dst) {
 ; CHECK-NEXT:    .vsave {d8, d9, d10}
 ; CHECK-NEXT:    vpush {d8, d9, d10}
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, #32]
-; CHECK-NEXT:    vldrw.u32 q4, [r0]
+; CHECK-NEXT:    vldrw.u32 q1, [r0, #16]
 ; CHECK-NEXT:    vmov r2, r3, d1
 ; CHECK-NEXT:    vmov r12, lr, d0
-; CHECK-NEXT:    vldrw.u32 q0, [r0, #16]
-; CHECK-NEXT:    vmov.f64 d2, d8
-; CHECK-NEXT:    vmov.f32 s5, s0
-; CHECK-NEXT:    vmov s10, r2
-; CHECK-NEXT:    vmov s14, r3
-; CHECK-NEXT:    vmov.f32 s8, s18
-; CHECK-NEXT:    vmov s20, lr
-; CHECK-NEXT:    vmov.f32 s9, s2
-; CHECK-NEXT:    vmov s6, r12
-; CHECK-NEXT:    vmov.f32 s0, s17
-; CHECK-NEXT:    vmov.f32 s12, s19
-; CHECK-NEXT:    vmov.f32 s13, s3
+; CHECK-NEXT:    vldrw.u32 q0, [r0]
+; CHECK-NEXT:    vmov s14, r2
+; CHECK-NEXT:    vmov s18, r3
+; CHECK-NEXT:    vmov s10, lr
+; CHECK-NEXT:    vmov s20, r12
+; CHECK-NEXT:    vmov.f32 s16, s3
+; CHECK-NEXT:    vmov.f32 s12, s2
+; CHECK-NEXT:    vmov.f32 s8, s1
+; CHECK-NEXT:    vmov.f32 s1, s4
+; CHECK-NEXT:    vmov.f32 s17, s7
+; CHECK-NEXT:    vmov.f32 s13, s6
+; CHECK-NEXT:    vmov.f32 s9, s5
 ; CHECK-NEXT:    vmov.f32 s2, s20
+; CHECK-NEXT:    vmov.f32 s19, s18
 ; CHECK-NEXT:    vmov.f32 s15, s14
+; CHECK-NEXT:    vstrb.8 q4, [r1, #48]
 ; CHECK-NEXT:    vmov.f32 s11, s10
-; CHECK-NEXT:    vstrb.8 q3, [r1, #48]
+; CHECK-NEXT:    vstrb.8 q3, [r1, #32]
 ; CHECK-NEXT:    vmov.f32 s3, s20
-; CHECK-NEXT:    vstrb.8 q2, [r1, #32]
-; CHECK-NEXT:    vmov.f32 s7, s6
-; CHECK-NEXT:    vstrb.8 q0, [r1, #16]
-; CHECK-NEXT:    vstrb.8 q1, [r1]
+; CHECK-NEXT:    vstrb.8 q2, [r1, #16]
+; CHECK-NEXT:    vstrb.8 q0, [r1]
 ; CHECK-NEXT:    vpop {d8, d9, d10}
 ; CHECK-NEXT:    pop {r7, pc}
 entry:
@@ -975,30 +970,30 @@ define void @vst4_v4f32_align1(<4 x float> *%src, <16 x float> *%dst) {
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12}
 ; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12}
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, #32]
-; CHECK-NEXT:    vldrw.u32 q5, [r0]
-; CHECK-NEXT:    vldrw.u32 q1, [r0, #16]
+; CHECK-NEXT:    vldrw.u32 q2, [r0, #16]
+; CHECK-NEXT:    vldrw.u32 q1, [r0]
 ; CHECK-NEXT:    vmov r2, r3, d1
 ; CHECK-NEXT:    vmov r12, lr, d0
-; CHECK-NEXT:    vmov.f64 d4, d10
-; CHECK-NEXT:    vmov.f32 s9, s4
-; CHECK-NEXT:    vmov s14, r2
-; CHECK-NEXT:    vmov s18, r3
-; CHECK-NEXT:    vmov.f32 s12, s22
-; CHECK-NEXT:    vmov s24, lr
-; CHECK-NEXT:    vmov.f32 s13, s6
-; CHECK-NEXT:    vmov.f32 s4, s21
-; CHECK-NEXT:    vmov.f32 s16, s23
-; CHECK-NEXT:    vmov.f32 s17, s7
-; CHECK-NEXT:    vmov s10, r12
+; CHECK-NEXT:    vmov s18, r2
+; CHECK-NEXT:    vmov s22, r3
+; CHECK-NEXT:    vmov s14, lr
+; CHECK-NEXT:    vmov s24, r12
+; CHECK-NEXT:    vmov.f32 s20, s7
+; CHECK-NEXT:    vmov.f32 s16, s6
+; CHECK-NEXT:    vmov.f32 s12, s5
+; CHECK-NEXT:    vmov.f32 s5, s8
+; CHECK-NEXT:    vmov.f32 s21, s11
+; CHECK-NEXT:    vmov.f32 s13, s9
+; CHECK-NEXT:    vmov.f32 s17, s10
 ; CHECK-NEXT:    vmov.f32 s6, s24
-; CHECK-NEXT:    vmov.f32 s19, s18
-; CHECK-NEXT:    vmov.f32 s15, s2
-; CHECK-NEXT:    vstrb.8 q4, [r1, #48]
-; CHECK-NEXT:    vmov.f32 s7, s24
-; CHECK-NEXT:    vstrb.8 q3, [r1, #32]
-; CHECK-NEXT:    vmov.f32 s11, s0
-; CHECK-NEXT:    vstrb.8 q1, [r1, #16]
-; CHECK-NEXT:    vstrb.8 q2, [r1]
+; CHECK-NEXT:    vmov.f32 s23, s22
+; CHECK-NEXT:    vmov.f32 s19, s2
+; CHECK-NEXT:    vstrb.8 q5, [r1, #48]
+; CHECK-NEXT:    vmov.f32 s15, s14
+; CHECK-NEXT:    vstrb.8 q4, [r1, #32]
+; CHECK-NEXT:    vmov.f32 s7, s0
+; CHECK-NEXT:    vstrb.8 q3, [r1, #16]
+; CHECK-NEXT:    vstrb.8 q1, [r1]
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12}
 ; CHECK-NEXT:    pop {r7, pc}
 entry:


        


More information about the llvm-commits mailing list