[llvm] 1b435eb - [ARM] i16 insert-of-extract to VINS pattern

David Green via llvm-commits llvm-commits at lists.llvm.org
Mon Feb 8 00:42:03 PST 2021


Author: David Green
Date: 2021-02-08T08:41:07Z
New Revision: 1b435eb8f3b71d11976c04bb6b99d6502aed2aab

URL: https://github.com/llvm/llvm-project/commit/1b435eb8f3b71d11976c04bb6b99d6502aed2aab
DIFF: https://github.com/llvm/llvm-project/commit/1b435eb8f3b71d11976c04bb6b99d6502aed2aab.diff

LOG: [ARM] i16 insert-of-extract to VINS pattern

This adds another tablegen fold that converts an i16 odd-lane-insert of
an even-lane-extract into a VINS. We extract the existing f32 value from
the destination register and VINS the new value into it. The rest of the
backend then is able to optimize the INSERT_SUBREG / COPY_TO_REGCLASS /
EXTRACT_SUBREG.

Differential Revision: https://reviews.llvm.org/D95456

Added: 
    

Modified: 
    llvm/lib/Target/ARM/ARMInstrMVE.td
    llvm/test/CodeGen/Thumb2/mve-shuffle.ll
    llvm/test/CodeGen/Thumb2/mve-vld3.ll
    llvm/test/CodeGen/Thumb2/mve-vst3.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/ARM/ARMInstrMVE.td b/llvm/lib/Target/ARM/ARMInstrMVE.td
index 75eb458aeb3d..8b0dadd4af24 100644
--- a/llvm/lib/Target/ARM/ARMInstrMVE.td
+++ b/llvm/lib/Target/ARM/ARMInstrMVE.td
@@ -1878,6 +1878,14 @@ let Predicates = [HasMVEInt] in {
             (MVE_VMOV_from_lane_u16 MQPR:$src, imm:$lane)>;
   def : Pat<(ARMvgetlaneu (v8f16 MQPR:$src), imm:$lane),
             (MVE_VMOV_from_lane_u16 MQPR:$src, imm:$lane)>;
+  // For i16's inserts being extracted from low lanes, then may use VINS.
+  def : Pat<(ARMinsertelt (v8i16 MQPR:$src1),
+                          (ARMvgetlaneu (v8i16 MQPR:$src2), imm_even:$extlane),
+                          imm_odd:$inslane),
+            (COPY_TO_REGCLASS (INSERT_SUBREG (v4f32 (COPY_TO_REGCLASS MQPR:$src1, MQPR)),
+                                (VINSH (EXTRACT_SUBREG MQPR:$src1, (SSubReg_f16_reg imm_odd:$inslane)),
+                                       (EXTRACT_SUBREG MQPR:$src2, (SSubReg_f16_reg imm_even:$extlane))),
+                                (SSubReg_f16_reg imm_odd:$inslane)), MQPR)>;
 
   def : Pat<(v16i8 (scalar_to_vector GPR:$src)),
             (MVE_VMOV_to_lane_8  (v16i8 (IMPLICIT_DEF)), rGPR:$src, (i32 0))>;

diff  --git a/llvm/test/CodeGen/Thumb2/mve-shuffle.ll b/llvm/test/CodeGen/Thumb2/mve-shuffle.ll
index 863ae1e9557e..8e98ab2425a8 100644
--- a/llvm/test/CodeGen/Thumb2/mve-shuffle.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-shuffle.ll
@@ -376,49 +376,48 @@ define arm_aapcs_vfpcc <8 x i16> @shuffle3step_i16(<32 x i16> %src) {
 ; CHECK-NEXT:    vmov.16 q4[7], r0
 ; CHECK-NEXT:    vmov.u16 r0, q1[7]
 ; CHECK-NEXT:    vmov.16 q3[5], r0
+; CHECK-NEXT:    vmov.u16 r0, q2[4]
+; CHECK-NEXT:    vmov.16 q5[6], r0
 ; CHECK-NEXT:    vmov.u16 r0, q0[2]
-; CHECK-NEXT:    vmov.f32 s15, s19
-; CHECK-NEXT:    vmov.16 q4[0], r0
+; CHECK-NEXT:    vmov.16 q6[0], r0
 ; CHECK-NEXT:    vmov.u16 r0, q0[5]
-; CHECK-NEXT:    vmov.16 q4[1], r0
+; CHECK-NEXT:    vmov.16 q6[1], r0
 ; CHECK-NEXT:    vmov.u16 r0, q1[0]
-; CHECK-NEXT:    vmov.16 q4[2], r0
+; CHECK-NEXT:    vmov.16 q6[2], r0
 ; CHECK-NEXT:    vmov.u16 r0, q1[3]
-; CHECK-NEXT:    vmov.16 q4[3], r0
-; CHECK-NEXT:    vmov.u16 r0, q2[4]
-; CHECK-NEXT:    vmov.16 q6[6], r0
+; CHECK-NEXT:    vmov.16 q6[3], r0
 ; CHECK-NEXT:    vmov.u16 r0, q2[7]
-; CHECK-NEXT:    vmov.16 q6[7], r0
-; CHECK-NEXT:    vmov.f32 s18, s7
-; CHECK-NEXT:    vmov.f32 s26, s8
-; CHECK-NEXT:    vmov q5, q6
-; CHECK-NEXT:    vmov r1, s16
-; CHECK-NEXT:    vmovnb.i32 q5, q4
-; CHECK-NEXT:    vmov r0, s22
-; CHECK-NEXT:    vmov q5[2], q5[0], r1, r0
-; CHECK-NEXT:    vmov r0, s27
-; CHECK-NEXT:    vmov r1, s17
-; CHECK-NEXT:    vmov q5[3], q5[1], r1, r0
-; CHECK-NEXT:    vmov.u16 r0, q2[0]
-; CHECK-NEXT:    vmov.16 q4[5], r0
-; CHECK-NEXT:    vmov.u16 r0, q1[5]
-; CHECK-NEXT:    vmovx.f16 s19, s9
-; CHECK-NEXT:    vins.f16 s19, s11
+; CHECK-NEXT:    vmov.16 q5[7], r0
+; CHECK-NEXT:    vmov.f32 s26, s7
+; CHECK-NEXT:    vmov.f32 s22, s8
+; CHECK-NEXT:    vmov.f32 s15, s19
+; CHECK-NEXT:    vmov q4, q5
+; CHECK-NEXT:    vmovnb.i32 q4, q6
+; CHECK-NEXT:    vmov r1, s24
+; CHECK-NEXT:    vmov r0, s18
+; CHECK-NEXT:    vmov q4[2], q4[0], r1, r0
+; CHECK-NEXT:    vmov r0, s23
+; CHECK-NEXT:    vins.f16 s22, s8
+; CHECK-NEXT:    vmov r1, s25
+; CHECK-NEXT:    vmovx.f16 s23, s9
+; CHECK-NEXT:    vmov q4[3], q4[1], r1, r0
+; CHECK-NEXT:    vins.f16 s23, s11
 ; CHECK-NEXT:    vmovx.f16 s8, s0
 ; CHECK-NEXT:    vins.f16 s8, s2
+; CHECK-NEXT:    vmov.u16 r0, q1[5]
 ; CHECK-NEXT:    vmovx.f16 s9, s3
-; CHECK-NEXT:    vmov q0, q4
+; CHECK-NEXT:    vmov q0, q5
 ; CHECK-NEXT:    vins.f16 s9, s5
 ; CHECK-NEXT:    vmov.16 q2[4], r0
 ; CHECK-NEXT:    vmovnb.i32 q0, q2
 ; CHECK-NEXT:    vmov r1, s8
 ; CHECK-NEXT:    vmov r0, s2
 ; CHECK-NEXT:    vmov q0[2], q0[0], r1, r0
-; CHECK-NEXT:    vmov r0, s19
+; CHECK-NEXT:    vmov r0, s23
 ; CHECK-NEXT:    vmov r1, s9
 ; CHECK-NEXT:    vmov q0[3], q0[1], r1, r0
 ; CHECK-NEXT:    vadd.i16 q0, q3, q0
-; CHECK-NEXT:    vadd.i16 q0, q0, q5
+; CHECK-NEXT:    vadd.i16 q0, q0, q4
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13}
 ; CHECK-NEXT:    bx lr
 entry:

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vld3.ll b/llvm/test/CodeGen/Thumb2/mve-vld3.ll
index 3795f96766b8..8ea422424ead 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vld3.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vld3.ll
@@ -377,12 +377,11 @@ define void @vld3_v8i16(<24 x i16> *%src, <8 x i16> *%dst) {
 ; CHECK-NEXT:    vins.f16 s20, s10
 ; CHECK-NEXT:    vmovx.f16 s21, s11
 ; CHECK-NEXT:    vins.f16 s21, s5
-; CHECK-NEXT:    vmov.16 q5[4], r0
-; CHECK-NEXT:    vmov.u16 r0, q0[0]
-; CHECK-NEXT:    vmov.16 q1[5], r0
-; CHECK-NEXT:    vmov r2, s20
+; CHECK-NEXT:    vins.f16 s6, s0
 ; CHECK-NEXT:    vmovx.f16 s7, s1
+; CHECK-NEXT:    vmov.16 q5[4], r0
 ; CHECK-NEXT:    vins.f16 s7, s3
+; CHECK-NEXT:    vmov r2, s20
 ; CHECK-NEXT:    vmov q0, q1
 ; CHECK-NEXT:    vmovnb.i32 q0, q5
 ; CHECK-NEXT:    vmov r0, s2
@@ -411,132 +410,130 @@ define void @vld3_v16i16(<48 x i16> *%src, <16 x i16> *%dst) {
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
 ; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
-; CHECK-NEXT:    vldrw.u32 q3, [r0, #48]
-; CHECK-NEXT:    vldrw.u32 q2, [r0, #64]
-; CHECK-NEXT:    vldrw.u32 q4, [r0, #80]
-; CHECK-NEXT:    vldrw.u32 q0, [r0, #16]
-; CHECK-NEXT:    vmov.u16 r2, q3[0]
+; CHECK-NEXT:    vldrw.u32 q2, [r0, #48]
+; CHECK-NEXT:    vldrw.u32 q0, [r0, #64]
+; CHECK-NEXT:    vldrw.u32 q3, [r0, #80]
+; CHECK-NEXT:    vmov.u16 r2, q2[0]
 ; CHECK-NEXT:    vmov.16 q1[0], r2
-; CHECK-NEXT:    vmov.u16 r2, q3[3]
+; CHECK-NEXT:    vmov.u16 r2, q2[3]
 ; CHECK-NEXT:    vmov.16 q1[1], r2
-; CHECK-NEXT:    vmov.u16 r2, q3[6]
+; CHECK-NEXT:    vmov.u16 r2, q2[6]
 ; CHECK-NEXT:    vmov.16 q1[2], r2
-; CHECK-NEXT:    vmov.u16 r2, q2[1]
+; CHECK-NEXT:    vmov.u16 r2, q0[1]
 ; CHECK-NEXT:    vmov.16 q1[3], r2
-; CHECK-NEXT:    vmov.u16 r2, q2[4]
+; CHECK-NEXT:    vmov.u16 r2, q0[4]
 ; CHECK-NEXT:    vmov.16 q1[4], r2
-; CHECK-NEXT:    vmov.u16 r2, q4[2]
-; CHECK-NEXT:    vmov.16 q5[6], r2
-; CHECK-NEXT:    vmov.u16 r2, q4[5]
-; CHECK-NEXT:    vmov.16 q5[7], r2
-; CHECK-NEXT:    vmov.u16 r2, q2[7]
-; CHECK-NEXT:    vmov.16 q1[5], r2
-; CHECK-NEXT:    vmov.u16 r2, q4[4]
-; CHECK-NEXT:    vmov.16 q6[6], r2
 ; CHECK-NEXT:    vmov.u16 r2, q3[2]
-; CHECK-NEXT:    vmov.f32 s7, s23
-; CHECK-NEXT:    vmov.16 q5[0], r2
+; CHECK-NEXT:    vmov.16 q4[6], r2
 ; CHECK-NEXT:    vmov.u16 r2, q3[5]
-; CHECK-NEXT:    vmov.16 q5[1], r2
-; CHECK-NEXT:    vmov.u16 r2, q2[0]
-; CHECK-NEXT:    vmov.16 q5[2], r2
-; CHECK-NEXT:    vmov.u16 r2, q2[3]
-; CHECK-NEXT:    vmov.16 q5[3], r2
-; CHECK-NEXT:    vmov.u16 r2, q4[7]
-; CHECK-NEXT:    vmov.16 q6[7], r2
-; CHECK-NEXT:    vmov.f32 s22, s11
-; CHECK-NEXT:    vmov.f32 s26, s16
-; CHECK-NEXT:    vmov q7, q6
-; CHECK-NEXT:    vmov r3, s20
-; CHECK-NEXT:    vmovnb.i32 q7, q5
-; CHECK-NEXT:    vmov r12, s27
-; CHECK-NEXT:    vmov r2, s30
-; CHECK-NEXT:    vmov q6[2], q6[0], r3, r2
-; CHECK-NEXT:    vmov r2, s21
-; CHECK-NEXT:    vmov q6[3], q6[1], r2, r12
-; CHECK-NEXT:    vmov.u16 r2, q4[0]
-; CHECK-NEXT:    vmov.16 q5[5], r2
+; CHECK-NEXT:    vmov.16 q4[7], r2
+; CHECK-NEXT:    vmov.u16 r2, q0[7]
+; CHECK-NEXT:    vmov.16 q1[5], r2
+; CHECK-NEXT:    vmov.u16 r2, q3[4]
+; CHECK-NEXT:    vmov.16 q5[6], r2
+; CHECK-NEXT:    vmov.u16 r2, q2[2]
+; CHECK-NEXT:    vmov.16 q6[0], r2
 ; CHECK-NEXT:    vmov.u16 r2, q2[5]
-; CHECK-NEXT:    vmovx.f16 s23, s17
-; CHECK-NEXT:    vins.f16 s23, s19
-; CHECK-NEXT:    vmovx.f16 s16, s12
-; CHECK-NEXT:    vins.f16 s16, s14
-; CHECK-NEXT:    vmovx.f16 s17, s15
-; CHECK-NEXT:    vins.f16 s17, s9
-; CHECK-NEXT:    vmov q2, q5
-; CHECK-NEXT:    vmov.16 q4[4], r2
-; CHECK-NEXT:    vmovnb.i32 q2, q4
-; CHECK-NEXT:    vmov r3, s16
-; CHECK-NEXT:    vmov r2, s10
-; CHECK-NEXT:    vmov q2[2], q2[0], r3, r2
+; CHECK-NEXT:    vmov.16 q6[1], r2
+; CHECK-NEXT:    vmov.u16 r2, q0[0]
+; CHECK-NEXT:    vmov.16 q6[2], r2
+; CHECK-NEXT:    vmov.u16 r2, q0[3]
+; CHECK-NEXT:    vmov.16 q6[3], r2
+; CHECK-NEXT:    vmov.u16 r2, q3[7]
+; CHECK-NEXT:    vmov.16 q5[7], r2
+; CHECK-NEXT:    vmov.f32 s26, s3
+; CHECK-NEXT:    vmov.f32 s22, s12
+; CHECK-NEXT:    vmov.f32 s7, s19
+; CHECK-NEXT:    vmov q4, q5
+; CHECK-NEXT:    vmovnb.i32 q4, q6
+; CHECK-NEXT:    vmov r3, s24
+; CHECK-NEXT:    vmov r2, s18
+; CHECK-NEXT:    vmov q4[2], q4[0], r3, r2
 ; CHECK-NEXT:    vmov r2, s23
-; CHECK-NEXT:    vmov r3, s17
-; CHECK-NEXT:    vldrw.u32 q5, [r0, #32]
-; CHECK-NEXT:    vmov q2[3], q2[1], r3, r2
-; CHECK-NEXT:    vadd.i16 q1, q1, q2
+; CHECK-NEXT:    vins.f16 s22, s12
+; CHECK-NEXT:    vmov r3, s25
+; CHECK-NEXT:    vmovx.f16 s23, s13
+; CHECK-NEXT:    vmov q4[3], q4[1], r3, r2
+; CHECK-NEXT:    vins.f16 s23, s15
+; CHECK-NEXT:    vmovx.f16 s12, s8
+; CHECK-NEXT:    vins.f16 s12, s10
+; CHECK-NEXT:    vmov.u16 r2, q0[5]
+; CHECK-NEXT:    vmovx.f16 s13, s11
 ; CHECK-NEXT:    vldrw.u32 q2, [r0]
-; CHECK-NEXT:    vmov.u16 r0, q5[4]
-; CHECK-NEXT:    vadd.i16 q1, q1, q6
+; CHECK-NEXT:    vins.f16 s13, s1
+; CHECK-NEXT:    vmov q0, q5
+; CHECK-NEXT:    vmov.16 q3[4], r2
+; CHECK-NEXT:    vmovnb.i32 q0, q3
+; CHECK-NEXT:    vmov r3, s12
+; CHECK-NEXT:    vmov r2, s2
+; CHECK-NEXT:    vmov q0[2], q0[0], r3, r2
+; CHECK-NEXT:    vmov r2, s23
+; CHECK-NEXT:    vmov r3, s13
+; CHECK-NEXT:    vldrw.u32 q5, [r0, #32]
+; CHECK-NEXT:    vmov q0[3], q0[1], r3, r2
 ; CHECK-NEXT:    vmov.u16 r2, q2[2]
-; CHECK-NEXT:    vmov.16 q6[6], r0
+; CHECK-NEXT:    vadd.i16 q0, q1, q0
+; CHECK-NEXT:    vldrw.u32 q1, [r0, #16]
 ; CHECK-NEXT:    vmov.16 q3[0], r2
 ; CHECK-NEXT:    vmov.u16 r2, q2[5]
+; CHECK-NEXT:    vmov.u16 r0, q5[4]
 ; CHECK-NEXT:    vmov.16 q3[1], r2
-; CHECK-NEXT:    vmov.u16 r2, q0[0]
+; CHECK-NEXT:    vmov.u16 r2, q1[0]
+; CHECK-NEXT:    vmov.16 q6[6], r0
 ; CHECK-NEXT:    vmov.u16 r0, q5[7]
 ; CHECK-NEXT:    vmov.16 q3[2], r2
 ; CHECK-NEXT:    vmov.16 q6[7], r0
-; CHECK-NEXT:    vmov.u16 r2, q0[3]
+; CHECK-NEXT:    vmov.u16 r2, q1[3]
 ; CHECK-NEXT:    vmov.16 q3[3], r2
 ; CHECK-NEXT:    vmov.f32 s26, s20
-; CHECK-NEXT:    vmov.f32 s14, s3
+; CHECK-NEXT:    vadd.i16 q0, q0, q4
+; CHECK-NEXT:    vmov.f32 s14, s7
 ; CHECK-NEXT:    vmov q4, q6
-; CHECK-NEXT:    vmovnb.i32 q4, q3
 ; CHECK-NEXT:    vmov.u16 r2, q2[0]
+; CHECK-NEXT:    vmovnb.i32 q4, q3
+; CHECK-NEXT:    vmov r3, s27
 ; CHECK-NEXT:    vmov r0, s18
 ; CHECK-NEXT:    vmov.16 q4[0], r2
 ; CHECK-NEXT:    vmov.u16 r2, q2[3]
-; CHECK-NEXT:    vmov r3, s27
+; CHECK-NEXT:    vstrw.32 q0, [r1, #16]
 ; CHECK-NEXT:    vmov.16 q4[1], r2
 ; CHECK-NEXT:    vmov.u16 r2, q2[6]
 ; CHECK-NEXT:    vmov.16 q4[2], r2
-; CHECK-NEXT:    vmov.u16 r2, q0[1]
+; CHECK-NEXT:    vmov.u16 r2, q1[1]
 ; CHECK-NEXT:    vmov.16 q4[3], r2
-; CHECK-NEXT:    vmov.u16 r2, q0[4]
+; CHECK-NEXT:    vmov.u16 r2, q1[4]
 ; CHECK-NEXT:    vmov.16 q4[4], r2
 ; CHECK-NEXT:    vmov.u16 r2, q5[2]
 ; CHECK-NEXT:    vmov.16 q7[6], r2
 ; CHECK-NEXT:    vmov.u16 r2, q5[5]
 ; CHECK-NEXT:    vmov.16 q7[7], r2
-; CHECK-NEXT:    vmov.u16 r2, q0[7]
+; CHECK-NEXT:    vmov.u16 r2, q1[7]
 ; CHECK-NEXT:    vmov.16 q4[5], r2
 ; CHECK-NEXT:    vmov r2, s12
 ; CHECK-NEXT:    vmov q6[2], q6[0], r2, r0
 ; CHECK-NEXT:    vmov r0, s13
+; CHECK-NEXT:    vins.f16 s14, s20
 ; CHECK-NEXT:    vmov q6[3], q6[1], r0, r3
-; CHECK-NEXT:    vmov.u16 r0, q5[0]
-; CHECK-NEXT:    vmov.16 q3[5], r0
-; CHECK-NEXT:    vmov.u16 r0, q0[5]
 ; CHECK-NEXT:    vmovx.f16 s15, s21
-; CHECK-NEXT:    vmov.f32 s19, s31
+; CHECK-NEXT:    vmov.u16 r0, q1[5]
 ; CHECK-NEXT:    vins.f16 s15, s23
 ; CHECK-NEXT:    vmovx.f16 s20, s8
 ; CHECK-NEXT:    vins.f16 s20, s10
-; CHECK-NEXT:    vstrw.32 q1, [r1, #16]
+; CHECK-NEXT:    vmov.f32 s19, s31
 ; CHECK-NEXT:    vmovx.f16 s21, s11
-; CHECK-NEXT:    vins.f16 s21, s1
-; CHECK-NEXT:    vmov q0, q3
+; CHECK-NEXT:    vins.f16 s21, s5
+; CHECK-NEXT:    vmov q1, q3
 ; CHECK-NEXT:    vmov.16 q5[4], r0
-; CHECK-NEXT:    vmovnb.i32 q0, q5
+; CHECK-NEXT:    vmovnb.i32 q1, q5
 ; CHECK-NEXT:    vmov r2, s20
-; CHECK-NEXT:    vmov r0, s2
-; CHECK-NEXT:    vmov q0[2], q0[0], r2, r0
+; CHECK-NEXT:    vmov r0, s6
+; CHECK-NEXT:    vmov q1[2], q1[0], r2, r0
 ; CHECK-NEXT:    vmov r0, s15
 ; CHECK-NEXT:    vmov r2, s21
-; CHECK-NEXT:    vmov q0[3], q0[1], r2, r0
-; CHECK-NEXT:    vadd.i16 q0, q4, q0
-; CHECK-NEXT:    vadd.i16 q0, q0, q6
-; CHECK-NEXT:    vstrw.32 q0, [r1]
+; CHECK-NEXT:    vmov q1[3], q1[1], r2, r0
+; CHECK-NEXT:    vadd.i16 q1, q4, q1
+; CHECK-NEXT:    vadd.i16 q1, q1, q6
+; CHECK-NEXT:    vstrw.32 q1, [r1]
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
 ; CHECK-NEXT:    bx lr
 entry:
@@ -647,8 +644,6 @@ define void @vld3_v8i8(<24 x i8> *%src, <8 x i8> *%dst) {
 ; CHECK-NEXT:    vmov.16 q2[3], r0
 ; CHECK-NEXT:    vmov.u8 r0, q1[13]
 ; CHECK-NEXT:    vmov.16 q2[4], r0
-; CHECK-NEXT:    vmov.u16 r0, q0[0]
-; CHECK-NEXT:    vmov.16 q2[5], r0
 ; CHECK-NEXT:    vmov.u8 r0, q1[0]
 ; CHECK-NEXT:    vmov.16 q3[0], r0
 ; CHECK-NEXT:    vmov.u8 r0, q1[3]
@@ -660,13 +655,14 @@ define void @vld3_v8i8(<24 x i8> *%src, <8 x i8> *%dst) {
 ; CHECK-NEXT:    vmov.u8 r0, q1[12]
 ; CHECK-NEXT:    vmov.16 q3[4], r0
 ; CHECK-NEXT:    vmov.u8 r0, q1[15]
+; CHECK-NEXT:    vins.f16 s10, s0
 ; CHECK-NEXT:    vmov.16 q3[5], r0
 ; CHECK-NEXT:    vmov.u16 r0, q0[2]
 ; CHECK-NEXT:    vmovx.f16 s11, s1
 ; CHECK-NEXT:    vmov.16 q3[6], r0
 ; CHECK-NEXT:    vmov.u16 r0, q0[5]
-; CHECK-NEXT:    vins.f16 s11, s3
 ; CHECK-NEXT:    vmov.16 q3[7], r0
+; CHECK-NEXT:    vins.f16 s11, s3
 ; CHECK-NEXT:    vmov.u8 r0, q1[2]
 ; CHECK-NEXT:    vadd.i16 q2, q3, q2
 ; CHECK-NEXT:    vmov.16 q3[0], r0

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vst3.ll b/llvm/test/CodeGen/Thumb2/mve-vst3.ll
index 815e46818ff7..62fce97c76c9 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vst3.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vst3.ll
@@ -416,15 +416,14 @@ define void @vst3_v8i16(<8 x i16> *%src, <24 x i16> *%dst) {
 ; CHECK-NEXT:    vmov.16 q5[0], r0
 ; CHECK-NEXT:    vmov.u16 r0, q3[5]
 ; CHECK-NEXT:    vmov.16 q5[1], r0
-; CHECK-NEXT:    vmov.u16 r0, q2[6]
-; CHECK-NEXT:    vmov.16 q5[3], r0
 ; CHECK-NEXT:    vmov.u16 r0, q2[7]
+; CHECK-NEXT:    vins.f16 s21, s11
 ; CHECK-NEXT:    vmov.f32 s17, s25
 ; CHECK-NEXT:    vmov.16 q5[6], r0
 ; CHECK-NEXT:    vmov.u16 r0, q3[7]
 ; CHECK-NEXT:    vmov r2, s7
-; CHECK-NEXT:    vmov.f32 s18, s26
 ; CHECK-NEXT:    vmov.16 q5[7], r0
+; CHECK-NEXT:    vmov.f32 s18, s26
 ; CHECK-NEXT:    vdup.32 q6, r2
 ; CHECK-NEXT:    vmov.f32 s22, s15
 ; CHECK-NEXT:    vmov.u16 r2, q6[2]
@@ -442,16 +441,16 @@ define void @vst3_v8i16(<8 x i16> *%src, <24 x i16> *%dst) {
 ; CHECK-NEXT:    vmov.u16 r0, q2[5]
 ; CHECK-NEXT:    vmov.16 q2[2], r2
 ; CHECK-NEXT:    vmov.u16 r2, q0[3]
-; CHECK-NEXT:    vmov.f32 s21, s29
 ; CHECK-NEXT:    vmov.16 q2[3], r2
 ; CHECK-NEXT:    vmov.u16 r2, q0[4]
 ; CHECK-NEXT:    vmov.16 q2[4], r2
-; CHECK-NEXT:    vmov.f32 s22, s30
+; CHECK-NEXT:    vmov.f32 s21, s29
 ; CHECK-NEXT:    vmov.16 q2[5], r0
-; CHECK-NEXT:    vstrw.32 q5, [r1, #32]
+; CHECK-NEXT:    vmov.f32 s22, s30
 ; CHECK-NEXT:    vmov.f32 s1, s9
-; CHECK-NEXT:    vstrw.32 q4, [r1]
+; CHECK-NEXT:    vstrw.32 q5, [r1, #32]
 ; CHECK-NEXT:    vmov.f32 s2, s10
+; CHECK-NEXT:    vstrw.32 q4, [r1]
 ; CHECK-NEXT:    vstrw.32 q0, [r1, #16]
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
 ; CHECK-NEXT:    bx lr
@@ -476,174 +475,172 @@ define void @vst3_v16i16(<16 x i16> *%src, <48 x i16> *%dst) {
 ; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
 ; CHECK-NEXT:    .pad #160
 ; CHECK-NEXT:    sub sp, #160
-; CHECK-NEXT:    vldrw.u32 q0, [r0, #16]
-; CHECK-NEXT:    vldrw.u32 q7, [r0, #48]
-; CHECK-NEXT:    vldrw.u32 q1, [r0, #32]
-; CHECK-NEXT:    vmov.f64 d4, d0
-; CHECK-NEXT:    vmov.u16 r2, q7[1]
-; CHECK-NEXT:    vstrw.32 q0, [sp, #128] @ 16-byte Spill
-; CHECK-NEXT:    vstrw.32 q1, [sp, #16] @ 16-byte Spill
-; CHECK-NEXT:    vins.f16 s8, s28
+; CHECK-NEXT:    vldrw.u32 q1, [r0, #16]
+; CHECK-NEXT:    vldrw.u32 q0, [r0, #48]
+; CHECK-NEXT:    vmov.f64 d4, d2
+; CHECK-NEXT:    vmov.u16 r2, q0[1]
+; CHECK-NEXT:    vmov q6, q0
+; CHECK-NEXT:    vstrw.32 q1, [sp, #128] @ 16-byte Spill
+; CHECK-NEXT:    vstrw.32 q6, [sp, #32] @ 16-byte Spill
+; CHECK-NEXT:    vins.f16 s8, s0
 ; CHECK-NEXT:    vmov.16 q2[4], r2
-; CHECK-NEXT:    vmov.f32 s11, s1
-; CHECK-NEXT:    vins.f16 s11, s29
-; CHECK-NEXT:    vmov q4, q2
-; CHECK-NEXT:    vmov.f32 s17, s0
+; CHECK-NEXT:    vmov.f32 s11, s5
+; CHECK-NEXT:    vins.f16 s11, s1
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, #80]
-; CHECK-NEXT:    vmov.u16 r2, q4[3]
-; CHECK-NEXT:    vstrw.32 q4, [sp, #80] @ 16-byte Spill
+; CHECK-NEXT:    vmov.f32 s9, s4
 ; CHECK-NEXT:    vmov r3, s0
-; CHECK-NEXT:    vmov q2, q0
+; CHECK-NEXT:    vmov q1, q0
 ; CHECK-NEXT:    vdup.32 q0, r3
+; CHECK-NEXT:    vmov.u16 r2, q2[3]
 ; CHECK-NEXT:    vmov.u16 r3, q0[2]
-; CHECK-NEXT:    vmov.16 q3[2], r3
-; CHECK-NEXT:    vmov.16 q3[3], r2
-; CHECK-NEXT:    vmov.u16 r2, q4[4]
-; CHECK-NEXT:    vmov.16 q3[4], r2
+; CHECK-NEXT:    vmov q3, q2
+; CHECK-NEXT:    vstrw.32 q2, [sp, #96] @ 16-byte Spill
+; CHECK-NEXT:    vmov.16 q2[2], r3
+; CHECK-NEXT:    vmov.16 q2[3], r2
+; CHECK-NEXT:    vmov.u16 r2, q3[4]
+; CHECK-NEXT:    vldrw.u32 q3, [r0, #32]
+; CHECK-NEXT:    vmov.16 q2[4], r2
 ; CHECK-NEXT:    vmov.u16 r2, q0[5]
-; CHECK-NEXT:    vldrw.u32 q0, [r0]
-; CHECK-NEXT:    vmov.16 q3[5], r2
-; CHECK-NEXT:    vmov.u16 r2, q1[1]
-; CHECK-NEXT:    vstrw.32 q3, [sp, #96] @ 16-byte Spill
-; CHECK-NEXT:    vmov.f64 d12, d0
-; CHECK-NEXT:    vstrw.32 q0, [sp, #144] @ 16-byte Spill
-; CHECK-NEXT:    vins.f16 s24, s4
-; CHECK-NEXT:    vmov.16 q6[4], r2
-; CHECK-NEXT:    vmov.f32 s27, s1
-; CHECK-NEXT:    vins.f16 s27, s5
-; CHECK-NEXT:    vmov.f32 s25, s0
 ; CHECK-NEXT:    vldrw.u32 q0, [r0, #64]
-; CHECK-NEXT:    vmov.u16 r2, q6[3]
-; CHECK-NEXT:    vmov r0, s0
-; CHECK-NEXT:    vmov q5, q0
+; CHECK-NEXT:    vmov.16 q2[5], r2
+; CHECK-NEXT:    vmov.u16 r2, q3[5]
+; CHECK-NEXT:    vmov.16 q5[0], r2
+; CHECK-NEXT:    vmov.u16 r2, q0[5]
+; CHECK-NEXT:    vmov.16 q5[1], r2
+; CHECK-NEXT:    vstrw.32 q2, [sp, #112] @ 16-byte Spill
+; CHECK-NEXT:    vldrw.u32 q2, [r0]
+; CHECK-NEXT:    vins.f16 s21, s15
+; CHECK-NEXT:    vmov.u16 r2, q3[7]
+; CHECK-NEXT:    vstrw.32 q0, [sp, #144] @ 16-byte Spill
+; CHECK-NEXT:    vmov.16 q5[6], r2
+; CHECK-NEXT:    vmov.u16 r2, q0[7]
+; CHECK-NEXT:    vmov.16 q5[7], r2
+; CHECK-NEXT:    vmov r0, s11
+; CHECK-NEXT:    vmov.f32 s22, s3
 ; CHECK-NEXT:    vdup.32 q0, r0
-; CHECK-NEXT:    vstrw.32 q5, [sp, #112] @ 16-byte Spill
 ; CHECK-NEXT:    vmov.u16 r0, q0[2]
-; CHECK-NEXT:    vmov.16 q3[2], r0
-; CHECK-NEXT:    vmov.u16 r0, q6[4]
-; CHECK-NEXT:    vmov.16 q3[3], r2
-; CHECK-NEXT:    vmov.16 q3[4], r0
+; CHECK-NEXT:    vmov.u16 r2, q5[3]
+; CHECK-NEXT:    vmov.16 q4[2], r0
+; CHECK-NEXT:    vmov.u16 r0, q5[4]
+; CHECK-NEXT:    vmov.16 q4[3], r2
+; CHECK-NEXT:    vstrw.32 q2, [sp, #48] @ 16-byte Spill
+; CHECK-NEXT:    vmov.16 q4[4], r0
 ; CHECK-NEXT:    vmov.u16 r0, q0[5]
-; CHECK-NEXT:    vmov.16 q3[5], r0
-; CHECK-NEXT:    vmov.u16 r0, q1[5]
-; CHECK-NEXT:    vmov.16 q4[0], r0
-; CHECK-NEXT:    vmov.u16 r0, q5[5]
+; CHECK-NEXT:    vmov q0, q2
+; CHECK-NEXT:    vmov.16 q4[5], r0
+; CHECK-NEXT:    vmov.f64 d14, d0
+; CHECK-NEXT:    vmov.u16 r0, q3[1]
+; CHECK-NEXT:    vstrw.32 q4, [sp, #80] @ 16-byte Spill
+; CHECK-NEXT:    vmov q4, q1
+; CHECK-NEXT:    vstrw.32 q4, [sp, #16] @ 16-byte Spill
+; CHECK-NEXT:    vins.f16 s28, s12
+; CHECK-NEXT:    vmov.16 q7[4], r0
+; CHECK-NEXT:    vmov.f32 s31, s1
+; CHECK-NEXT:    vins.f16 s31, s13
+; CHECK-NEXT:    vrev32.16 q3, q3
+; CHECK-NEXT:    vmov.f32 s29, s0
 ; CHECK-NEXT:    vldrw.u32 q0, [sp, #144] @ 16-byte Reload
-; CHECK-NEXT:    vmov.16 q4[1], r0
-; CHECK-NEXT:    vmov.u16 r0, q1[6]
-; CHECK-NEXT:    vstrw.32 q3, [sp, #64] @ 16-byte Spill
-; CHECK-NEXT:    vmov.16 q4[3], r0
-; CHECK-NEXT:    vmov.u16 r0, q1[7]
-; CHECK-NEXT:    vmov r2, s3
-; CHECK-NEXT:    vmov.16 q4[6], r0
-; CHECK-NEXT:    vmov.u16 r0, q5[7]
-; CHECK-NEXT:    vdup.32 q0, r2
-; CHECK-NEXT:    vmov.16 q4[7], r0
-; CHECK-NEXT:    vmov.u16 r2, q0[2]
-; CHECK-NEXT:    vmov.f32 s18, s23
-; CHECK-NEXT:    vmov.16 q1[2], r2
-; CHECK-NEXT:    vmov.u16 r0, q4[3]
-; CHECK-NEXT:    vmov q3, q2
-; CHECK-NEXT:    vmov.16 q1[3], r0
-; CHECK-NEXT:    vmov.u16 r0, q4[4]
-; CHECK-NEXT:    vmov.16 q1[4], r0
-; CHECK-NEXT:    vmov.u16 r0, q0[5]
-; CHECK-NEXT:    vmov.16 q1[5], r0
-; CHECK-NEXT:    vmov.u16 r0, q7[5]
-; CHECK-NEXT:    vstrw.32 q1, [sp, #48] @ 16-byte Spill
-; CHECK-NEXT:    vmov.16 q1[0], r0
-; CHECK-NEXT:    vmov.u16 r0, q2[5]
+; CHECK-NEXT:    vmov.u16 r0, q7[3]
 ; CHECK-NEXT:    vstrw.32 q3, [sp] @ 16-byte Spill
-; CHECK-NEXT:    vmov.16 q1[1], r0
-; CHECK-NEXT:    vmov.u16 r0, q7[6]
-; CHECK-NEXT:    vmov.16 q1[3], r0
-; CHECK-NEXT:    vmov.u16 r0, q7[7]
-; CHECK-NEXT:    vmov.16 q1[6], r0
-; CHECK-NEXT:    vmov.u16 r0, q2[7]
-; CHECK-NEXT:    vldrw.u32 q2, [sp, #128] @ 16-byte Reload
-; CHECK-NEXT:    vmov.16 q1[7], r0
-; CHECK-NEXT:    vmov.f32 s6, s15
-; CHECK-NEXT:    vmov r2, s11
-; CHECK-NEXT:    vmov.u16 r0, q1[3]
+; CHECK-NEXT:    vmov r2, s0
 ; CHECK-NEXT:    vdup.32 q0, r2
 ; CHECK-NEXT:    vmov.u16 r2, q0[2]
-; CHECK-NEXT:    vmov.16 q5[2], r2
-; CHECK-NEXT:    vmov.16 q5[3], r0
-; CHECK-NEXT:    vmov.u16 r0, q1[4]
-; CHECK-NEXT:    vmov.16 q5[4], r0
+; CHECK-NEXT:    vmov.16 q2[2], r2
+; CHECK-NEXT:    vmov.16 q2[3], r0
+; CHECK-NEXT:    vmov.u16 r0, q7[4]
+; CHECK-NEXT:    vmov.16 q2[4], r0
 ; CHECK-NEXT:    vmov.u16 r0, q0[5]
-; CHECK-NEXT:    vmov.16 q5[5], r0
-; CHECK-NEXT:    vmov.u16 r0, q3[2]
-; CHECK-NEXT:    vrev32.16 q0, q7
-; CHECK-NEXT:    vmov.16 q7[0], r0
-; CHECK-NEXT:    vmov.u16 r0, q2[3]
-; CHECK-NEXT:    vstrw.32 q0, [sp, #32] @ 16-byte Spill
-; CHECK-NEXT:    vmov.16 q7[1], r0
-; CHECK-NEXT:    vmov.u16 r0, q3[4]
-; CHECK-NEXT:    vldrw.u32 q0, [sp, #16] @ 16-byte Reload
-; CHECK-NEXT:    vmov.16 q7[6], r0
-; CHECK-NEXT:    vmov.u16 r0, q2[5]
-; CHECK-NEXT:    vldrw.u32 q2, [sp, #112] @ 16-byte Reload
-; CHECK-NEXT:    vldrw.u32 q3, [sp, #144] @ 16-byte Reload
-; CHECK-NEXT:    vrev32.16 q0, q0
-; CHECK-NEXT:    vmov.16 q7[7], r0
-; CHECK-NEXT:    vmov.u16 r0, q2[2]
-; CHECK-NEXT:    vstrw.32 q0, [sp, #16] @ 16-byte Spill
+; CHECK-NEXT:    vmov.16 q2[5], r0
+; CHECK-NEXT:    vmov.u16 r0, q6[5]
 ; CHECK-NEXT:    vmov.16 q0[0], r0
-; CHECK-NEXT:    vmov.u16 r0, q3[3]
-; CHECK-NEXT:    vmov.f32 s5, s21
+; CHECK-NEXT:    vmov.u16 r0, q1[5]
 ; CHECK-NEXT:    vmov.16 q0[1], r0
-; CHECK-NEXT:    vmov.u16 r0, q2[4]
-; CHECK-NEXT:    vldrw.u32 q2, [sp, #64] @ 16-byte Reload
+; CHECK-NEXT:    vmov.u16 r0, q6[7]
+; CHECK-NEXT:    vins.f16 s1, s27
+; CHECK-NEXT:    vstrw.32 q2, [sp, #64] @ 16-byte Spill
 ; CHECK-NEXT:    vmov.16 q0[6], r0
-; CHECK-NEXT:    vmov.u16 r0, q3[5]
-; CHECK-NEXT:    vldrw.u32 q3, [sp, #80] @ 16-byte Reload
-; CHECK-NEXT:    vmov.f32 s25, s9
+; CHECK-NEXT:    vmov.u16 r0, q1[7]
+; CHECK-NEXT:    vldrw.u32 q1, [sp, #128] @ 16-byte Reload
 ; CHECK-NEXT:    vmov.16 q0[7], r0
-; CHECK-NEXT:    vmov.f32 s26, s10
-; CHECK-NEXT:    vldrw.u32 q2, [sp, #96] @ 16-byte Reload
-; CHECK-NEXT:    vmov.f32 s6, s22
-; CHECK-NEXT:    vstrw.32 q6, [r1]
-; CHECK-NEXT:    vmov.f32 s13, s9
-; CHECK-NEXT:    vstrw.32 q1, [r1, #80]
-; CHECK-NEXT:    vmov.f32 s14, s10
-; CHECK-NEXT:    vldrw.u32 q2, [sp, #48] @ 16-byte Reload
-; CHECK-NEXT:    vldrw.u32 q1, [sp, #16] @ 16-byte Reload
-; CHECK-NEXT:    vstrw.32 q3, [r1, #48]
-; CHECK-NEXT:    vmov.f32 s17, s9
-; CHECK-NEXT:    vmov.f32 s18, s10
-; CHECK-NEXT:    vldrw.u32 q2, [sp, #112] @ 16-byte Reload
-; CHECK-NEXT:    vmov.u16 r2, q1[2]
+; CHECK-NEXT:    vmov.f32 s2, s19
+; CHECK-NEXT:    vmov r2, s7
+; CHECK-NEXT:    vmov.u16 r0, q0[3]
+; CHECK-NEXT:    vdup.32 q6, r2
+; CHECK-NEXT:    vmov.u16 r2, q6[2]
+; CHECK-NEXT:    vmov.16 q2[2], r2
+; CHECK-NEXT:    vmov.16 q2[3], r0
+; CHECK-NEXT:    vmov.u16 r0, q0[4]
+; CHECK-NEXT:    vmov.16 q2[4], r0
+; CHECK-NEXT:    vmov.u16 r0, q6[5]
+; CHECK-NEXT:    vmov.16 q2[5], r0
+; CHECK-NEXT:    vmov.u16 r0, q4[2]
+; CHECK-NEXT:    vmov.16 q3[0], r0
+; CHECK-NEXT:    vmov.u16 r0, q1[3]
+; CHECK-NEXT:    vmov.16 q3[1], r0
+; CHECK-NEXT:    vmov.u16 r0, q4[4]
+; CHECK-NEXT:    vldrw.u32 q6, [sp, #32] @ 16-byte Reload
+; CHECK-NEXT:    vmov.16 q3[6], r0
 ; CHECK-NEXT:    vmov.u16 r0, q1[5]
+; CHECK-NEXT:    vldrw.u32 q1, [sp, #144] @ 16-byte Reload
+; CHECK-NEXT:    vldrw.u32 q4, [sp, #48] @ 16-byte Reload
+; CHECK-NEXT:    vrev32.16 q6, q6
+; CHECK-NEXT:    vmov.16 q3[7], r0
+; CHECK-NEXT:    vmov.u16 r0, q1[2]
+; CHECK-NEXT:    vstrw.32 q6, [sp, #32] @ 16-byte Spill
+; CHECK-NEXT:    vmov.16 q6[0], r0
+; CHECK-NEXT:    vmov.u16 r0, q4[3]
 ; CHECK-NEXT:    vmov.f32 s1, s9
-; CHECK-NEXT:    vldrw.u32 q2, [sp, #144] @ 16-byte Reload
-; CHECK-NEXT:    vmov.16 q1[2], r2
-; CHECK-NEXT:    vstrw.32 q4, [r1, #32]
+; CHECK-NEXT:    vmov.16 q6[1], r0
+; CHECK-NEXT:    vmov.u16 r0, q1[4]
+; CHECK-NEXT:    vldrw.u32 q1, [sp, #64] @ 16-byte Reload
 ; CHECK-NEXT:    vmov.f32 s2, s10
-; CHECK-NEXT:    vmov.u16 r2, q0[3]
-; CHECK-NEXT:    vmov.16 q1[3], r2
-; CHECK-NEXT:    vmov.u16 r2, q0[4]
-; CHECK-NEXT:    vmov.16 q1[4], r2
-; CHECK-NEXT:    vmov.16 q1[5], r0
-; CHECK-NEXT:    vmov.f32 s1, s5
-; CHECK-NEXT:    vmov.f32 s2, s6
-; CHECK-NEXT:    vldrw.u32 q1, [sp] @ 16-byte Reload
-; CHECK-NEXT:    vstrw.32 q0, [r1, #16]
-; CHECK-NEXT:    vldrw.u32 q0, [sp, #32] @ 16-byte Reload
+; CHECK-NEXT:    vldrw.u32 q2, [sp, #96] @ 16-byte Reload
+; CHECK-NEXT:    vmov.16 q6[6], r0
 ; CHECK-NEXT:    vmov.f32 s29, s5
+; CHECK-NEXT:    vmov.u16 r0, q4[5]
+; CHECK-NEXT:    vmov.f32 s30, s6
+; CHECK-NEXT:    vldrw.u32 q1, [sp, #80] @ 16-byte Reload
+; CHECK-NEXT:    vstrw.32 q0, [r1, #80]
+; CHECK-NEXT:    vldrw.u32 q0, [sp] @ 16-byte Reload
+; CHECK-NEXT:    vmov.f32 s21, s5
+; CHECK-NEXT:    vmov.16 q6[7], r0
+; CHECK-NEXT:    vmov.f32 s22, s6
+; CHECK-NEXT:    vldrw.u32 q1, [sp, #112] @ 16-byte Reload
+; CHECK-NEXT:    vmov.u16 r2, q0[2]
+; CHECK-NEXT:    vmov.u16 r0, q0[5]
+; CHECK-NEXT:    vmov.f32 s9, s5
+; CHECK-NEXT:    vmov.16 q0[2], r2
+; CHECK-NEXT:    vmov.f32 s10, s6
+; CHECK-NEXT:    vldrw.u32 q1, [sp, #144] @ 16-byte Reload
+; CHECK-NEXT:    vstrw.32 q5, [r1, #32]
+; CHECK-NEXT:    vstrw.32 q2, [r1, #48]
+; CHECK-NEXT:    vmov.f32 s25, s5
+; CHECK-NEXT:    vldrw.u32 q1, [sp, #16] @ 16-byte Reload
+; CHECK-NEXT:    vmov.f32 s26, s18
+; CHECK-NEXT:    vstrw.32 q7, [r1]
+; CHECK-NEXT:    vmov.u16 r2, q6[3]
+; CHECK-NEXT:    vmov.f32 s13, s5
+; CHECK-NEXT:    vmov.16 q0[3], r2
+; CHECK-NEXT:    vmov.u16 r2, q6[4]
+; CHECK-NEXT:    vmov.16 q0[4], r2
 ; CHECK-NEXT:    vldrw.u32 q1, [sp, #128] @ 16-byte Reload
+; CHECK-NEXT:    vmov.16 q0[5], r0
+; CHECK-NEXT:    vmov.f32 s25, s1
+; CHECK-NEXT:    vmov.f32 s26, s2
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #32] @ 16-byte Reload
+; CHECK-NEXT:    vmov.f32 s14, s6
+; CHECK-NEXT:    vstrw.32 q6, [r1, #16]
 ; CHECK-NEXT:    vmov.u16 r2, q0[2]
 ; CHECK-NEXT:    vmov.u16 r0, q0[5]
-; CHECK-NEXT:    vmov.f32 s30, s6
 ; CHECK-NEXT:    vmov.16 q0[2], r2
-; CHECK-NEXT:    vmov.u16 r2, q7[3]
+; CHECK-NEXT:    vmov.u16 r2, q3[3]
 ; CHECK-NEXT:    vmov.16 q0[3], r2
-; CHECK-NEXT:    vmov.u16 r2, q7[4]
+; CHECK-NEXT:    vmov.u16 r2, q3[4]
 ; CHECK-NEXT:    vmov.16 q0[4], r2
 ; CHECK-NEXT:    vmov.16 q0[5], r0
-; CHECK-NEXT:    vmov.f32 s29, s1
-; CHECK-NEXT:    vmov.f32 s30, s2
-; CHECK-NEXT:    vstrw.32 q7, [r1, #64]
+; CHECK-NEXT:    vmov.f32 s13, s1
+; CHECK-NEXT:    vmov.f32 s14, s2
+; CHECK-NEXT:    vstrw.32 q3, [r1, #64]
 ; CHECK-NEXT:    add sp, #160
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
 ; CHECK-NEXT:    bx lr
@@ -775,20 +772,19 @@ define void @vst3_v8i8(<8 x i8> *%src, <24 x i8> *%dst) {
 ; CHECK-NEXT:    vmov.16 q0[0], r2
 ; CHECK-NEXT:    vmov.u16 r2, q2[5]
 ; CHECK-NEXT:    vmov.16 q0[1], r2
-; CHECK-NEXT:    vmov.u16 r2, q1[6]
-; CHECK-NEXT:    vmov.16 q0[3], r2
 ; CHECK-NEXT:    vmov.u16 r2, q1[7]
+; CHECK-NEXT:    vins.f16 s1, s7
+; CHECK-NEXT:    vmov r0, s15
 ; CHECK-NEXT:    vmov.16 q0[6], r2
 ; CHECK-NEXT:    vmov.u16 r2, q2[7]
-; CHECK-NEXT:    vmov r0, s15
 ; CHECK-NEXT:    vmov.16 q0[7], r2
 ; CHECK-NEXT:    vdup.32 q4, r0
 ; CHECK-NEXT:    vmov.f32 s2, s11
 ; CHECK-NEXT:    vmov.u16 r0, q4[2]
-; CHECK-NEXT:    vmov.u16 r2, q0[3]
 ; CHECK-NEXT:    vmov.16 q5[2], r0
-; CHECK-NEXT:    vmov.u16 r0, q0[4]
+; CHECK-NEXT:    vmov.u16 r2, q0[3]
 ; CHECK-NEXT:    vmov.16 q5[3], r2
+; CHECK-NEXT:    vmov.u16 r0, q0[4]
 ; CHECK-NEXT:    vmov.16 q5[4], r0
 ; CHECK-NEXT:    vmov.u16 r0, q4[5]
 ; CHECK-NEXT:    vmov.16 q5[5], r0


        


More information about the llvm-commits mailing list