[llvm] 6d9d204 - [ARM] VINS f16 pattern

David Green via llvm-commits llvm-commits at lists.llvm.org
Sun Mar 21 05:00:26 PDT 2021


Author: David Green
Date: 2021-03-21T12:00:06Z
New Revision: 6d9d2049c8532457e86a48f602a7e5d5ed2828d3

URL: https://github.com/llvm/llvm-project/commit/6d9d2049c8532457e86a48f602a7e5d5ed2828d3
DIFF: https://github.com/llvm/llvm-project/commit/6d9d2049c8532457e86a48f602a7e5d5ed2828d3.diff

LOG: [ARM] VINS f16 pattern

This adds an extra pattern for inserting an f16 into a odd vector lane
via an VINS. If the dual-insert-lane pattern does not happen to apply,
this can help with some simple cases.

Differential Revision: https://reviews.llvm.org/D95471

Added: 
    

Modified: 
    llvm/lib/Target/ARM/ARMInstrMVE.td
    llvm/test/CodeGen/Thumb2/mve-float16regloops.ll
    llvm/test/CodeGen/Thumb2/mve-masked-load.ll
    llvm/test/CodeGen/Thumb2/mve-shuffle.ll
    llvm/test/CodeGen/Thumb2/mve-vst3.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/ARM/ARMInstrMVE.td b/llvm/lib/Target/ARM/ARMInstrMVE.td
index 7d1c9017e3dc..c4830e7351f5 100644
--- a/llvm/lib/Target/ARM/ARMInstrMVE.td
+++ b/llvm/lib/Target/ARM/ARMInstrMVE.td
@@ -1900,8 +1900,13 @@ let Predicates = [HasMVEInt] in {
   def : Pat<(insertelt (v4f32 MQPR:$src1), (f32 SPR:$src2), imm:$lane),
             (INSERT_SUBREG (v4f32 (COPY_TO_REGCLASS MQPR:$src1, MQPR)), SPR:$src2, (SSubReg_f32_reg imm:$lane))>;
 
-  def : Pat<(insertelt (v8f16 MQPR:$src1), (f16 HPR:$src2), imm:$lane),
+  def : Pat<(insertelt (v8f16 MQPR:$src1), (f16 HPR:$src2), imm_even:$lane),
             (MVE_VMOV_to_lane_16 MQPR:$src1, (COPY_TO_REGCLASS (f16 HPR:$src2), rGPR), imm:$lane)>;
+  def : Pat<(insertelt (v8f16 MQPR:$src1), (f16 HPR:$src2), imm_odd:$lane),
+            (COPY_TO_REGCLASS (INSERT_SUBREG (v4f32 (COPY_TO_REGCLASS MQPR:$src1, MQPR)),
+                                (VINSH (EXTRACT_SUBREG MQPR:$src1, (SSubReg_f16_reg imm_odd:$lane)),
+                                       (COPY_TO_REGCLASS HPR:$src2, SPR)),
+                                (SSubReg_f16_reg imm_odd:$lane)), MQPR)>;
   def : Pat<(extractelt (v8f16 MQPR:$src), imm_even:$lane),
             (EXTRACT_SUBREG MQPR:$src, (SSubReg_f16_reg imm_even:$lane))>;
   def : Pat<(extractelt (v8f16 MQPR:$src), imm_odd:$lane),

diff  --git a/llvm/test/CodeGen/Thumb2/mve-float16regloops.ll b/llvm/test/CodeGen/Thumb2/mve-float16regloops.ll
index 6b053b8fd104..dd8c4f110691 100644
--- a/llvm/test/CodeGen/Thumb2/mve-float16regloops.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-float16regloops.ll
@@ -1468,19 +1468,19 @@ define void @arm_biquad_cascade_df2T_f16(%struct.arm_biquad_cascade_df2T_instanc
 ; CHECK-NEXT:    @ Parent Loop BB17_3 Depth=1
 ; CHECK-NEXT:    @ => This Inner Loop Header: Depth=2
 ; CHECK-NEXT:    ldrh r7, [r1], #4
-; CHECK-NEXT:    vmov r4, s4
+; CHECK-NEXT:    vmov r3, s4
 ; CHECK-NEXT:    vfma.f16 q2, q3, r7
-; CHECK-NEXT:    ldrh r3, [r1, #-2]
+; CHECK-NEXT:    ldrh r4, [r1, #-2]
 ; CHECK-NEXT:    vmov.u16 r7, q2[0]
 ; CHECK-NEXT:    vfma.f16 q2, q4, r7
-; CHECK-NEXT:    vmov.16 q2[3], r4
-; CHECK-NEXT:    vfma.f16 q2, q5, r3
-; CHECK-NEXT:    vmov.u16 r3, q2[1]
-; CHECK-NEXT:    vfma.f16 q2, q6, r3
-; CHECK-NEXT:    strh r3, [r5, #2]
+; CHECK-NEXT:    vins.f16 s9, s4
+; CHECK-NEXT:    vfma.f16 q2, q5, r4
+; CHECK-NEXT:    vmov.u16 r4, q2[1]
+; CHECK-NEXT:    vfma.f16 q2, q6, r4
+; CHECK-NEXT:    strh r4, [r5, #2]
 ; CHECK-NEXT:    vmov.f32 s8, s9
 ; CHECK-NEXT:    strh r7, [r5], #4
-; CHECK-NEXT:    vmov.16 q2[2], r4
+; CHECK-NEXT:    vmov.16 q2[2], r3
 ; CHECK-NEXT:    le lr, .LBB17_5
 ; CHECK-NEXT:  .LBB17_6: @ %while.end
 ; CHECK-NEXT:    @ in Loop: Header=BB17_3 Depth=1

diff  --git a/llvm/test/CodeGen/Thumb2/mve-masked-load.ll b/llvm/test/CodeGen/Thumb2/mve-masked-load.ll
index 9d5e3412946a..02895b0a214c 100644
--- a/llvm/test/CodeGen/Thumb2/mve-masked-load.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-masked-load.ll
@@ -1500,8 +1500,7 @@ define arm_aapcs_vfpcc <8 x half> @masked_v8f16_align1_undef(<8 x half> *%dest,
 ; CHECK-LE-NEXT:    ldrh r2, [r0, #2]
 ; CHECK-LE-NEXT:    strh.w r2, [sp, #24]
 ; CHECK-LE-NEXT:    vldr.16 s4, [sp, #24]
-; CHECK-LE-NEXT:    vmov r2, s4
-; CHECK-LE-NEXT:    vmov.16 q0[1], r2
+; CHECK-LE-NEXT:    vins.f16 s0, s4
 ; CHECK-LE-NEXT:    lsls r2, r1, #29
 ; CHECK-LE-NEXT:    bpl .LBB45_3
 ; CHECK-LE-NEXT:  .LBB45_11: @ %cond.load4
@@ -1516,8 +1515,7 @@ define arm_aapcs_vfpcc <8 x half> @masked_v8f16_align1_undef(<8 x half> *%dest,
 ; CHECK-LE-NEXT:    ldrh r2, [r0, #6]
 ; CHECK-LE-NEXT:    strh.w r2, [sp, #16]
 ; CHECK-LE-NEXT:    vldr.16 s4, [sp, #16]
-; CHECK-LE-NEXT:    vmov r2, s4
-; CHECK-LE-NEXT:    vmov.16 q0[3], r2
+; CHECK-LE-NEXT:    vins.f16 s1, s4
 ; CHECK-LE-NEXT:    lsls r2, r1, #27
 ; CHECK-LE-NEXT:    bpl .LBB45_5
 ; CHECK-LE-NEXT:  .LBB45_13: @ %cond.load10
@@ -1532,8 +1530,7 @@ define arm_aapcs_vfpcc <8 x half> @masked_v8f16_align1_undef(<8 x half> *%dest,
 ; CHECK-LE-NEXT:    ldrh r2, [r0, #10]
 ; CHECK-LE-NEXT:    strh.w r2, [sp, #8]
 ; CHECK-LE-NEXT:    vldr.16 s4, [sp, #8]
-; CHECK-LE-NEXT:    vmov r2, s4
-; CHECK-LE-NEXT:    vmov.16 q0[5], r2
+; CHECK-LE-NEXT:    vins.f16 s2, s4
 ; CHECK-LE-NEXT:    lsls r2, r1, #25
 ; CHECK-LE-NEXT:    bpl .LBB45_7
 ; CHECK-LE-NEXT:  .LBB45_15: @ %cond.load16
@@ -1548,8 +1545,7 @@ define arm_aapcs_vfpcc <8 x half> @masked_v8f16_align1_undef(<8 x half> *%dest,
 ; CHECK-LE-NEXT:    ldrh r0, [r0, #14]
 ; CHECK-LE-NEXT:    strh.w r0, [sp]
 ; CHECK-LE-NEXT:    vldr.16 s4, [sp]
-; CHECK-LE-NEXT:    vmov r0, s4
-; CHECK-LE-NEXT:    vmov.16 q0[7], r0
+; CHECK-LE-NEXT:    vins.f16 s3, s4
 ; CHECK-LE-NEXT:    add sp, #40
 ; CHECK-LE-NEXT:    bx lr
 ;
@@ -1614,8 +1610,7 @@ define arm_aapcs_vfpcc <8 x half> @masked_v8f16_align1_undef(<8 x half> *%dest,
 ; CHECK-BE-NEXT:    ldrh r0, [r0, #14]
 ; CHECK-BE-NEXT:    strh.w r0, [sp]
 ; CHECK-BE-NEXT:    vldr.16 s0, [sp]
-; CHECK-BE-NEXT:    vmov r0, s0
-; CHECK-BE-NEXT:    vmov.16 q1[7], r0
+; CHECK-BE-NEXT:    vins.f16 s7, s0
 ; CHECK-BE-NEXT:  .LBB45_9: @ %else20
 ; CHECK-BE-NEXT:    vrev64.16 q0, q1
 ; CHECK-BE-NEXT:    add sp, #40
@@ -1630,8 +1625,7 @@ define arm_aapcs_vfpcc <8 x half> @masked_v8f16_align1_undef(<8 x half> *%dest,
 ; CHECK-BE-NEXT:    ldrh r2, [r0, #2]
 ; CHECK-BE-NEXT:    strh.w r2, [sp, #24]
 ; CHECK-BE-NEXT:    vldr.16 s0, [sp, #24]
-; CHECK-BE-NEXT:    vmov r2, s0
-; CHECK-BE-NEXT:    vmov.16 q1[1], r2
+; CHECK-BE-NEXT:    vins.f16 s4, s0
 ; CHECK-BE-NEXT:    lsls r2, r1, #26
 ; CHECK-BE-NEXT:    bpl .LBB45_3
 ; CHECK-BE-NEXT:  .LBB45_12: @ %cond.load4
@@ -1646,8 +1640,7 @@ define arm_aapcs_vfpcc <8 x half> @masked_v8f16_align1_undef(<8 x half> *%dest,
 ; CHECK-BE-NEXT:    ldrh r2, [r0, #6]
 ; CHECK-BE-NEXT:    strh.w r2, [sp, #16]
 ; CHECK-BE-NEXT:    vldr.16 s0, [sp, #16]
-; CHECK-BE-NEXT:    vmov r2, s0
-; CHECK-BE-NEXT:    vmov.16 q1[3], r2
+; CHECK-BE-NEXT:    vins.f16 s5, s0
 ; CHECK-BE-NEXT:    lsls r2, r1, #28
 ; CHECK-BE-NEXT:    bpl .LBB45_5
 ; CHECK-BE-NEXT:  .LBB45_14: @ %cond.load10
@@ -1662,8 +1655,7 @@ define arm_aapcs_vfpcc <8 x half> @masked_v8f16_align1_undef(<8 x half> *%dest,
 ; CHECK-BE-NEXT:    ldrh r2, [r0, #10]
 ; CHECK-BE-NEXT:    strh.w r2, [sp, #8]
 ; CHECK-BE-NEXT:    vldr.16 s0, [sp, #8]
-; CHECK-BE-NEXT:    vmov r2, s0
-; CHECK-BE-NEXT:    vmov.16 q1[5], r2
+; CHECK-BE-NEXT:    vins.f16 s6, s0
 ; CHECK-BE-NEXT:    lsls r2, r1, #30
 ; CHECK-BE-NEXT:    bpl .LBB45_7
 ; CHECK-BE-NEXT:  .LBB45_16: @ %cond.load16

diff  --git a/llvm/test/CodeGen/Thumb2/mve-shuffle.ll b/llvm/test/CodeGen/Thumb2/mve-shuffle.ll
index db8f7018ba55..415ce651b5ca 100644
--- a/llvm/test/CodeGen/Thumb2/mve-shuffle.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-shuffle.ll
@@ -1319,8 +1319,7 @@ entry:
 define arm_aapcs_vfpcc <8 x half> @oneoff21_f16(<8 x half> %src1, <8 x half> %src2) {
 ; CHECK-LABEL: oneoff21_f16:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vmov r0, s0
-; CHECK-NEXT:    vmov.16 q1[3], r0
+; CHECK-NEXT:    vins.f16 s5, s0
 ; CHECK-NEXT:    vmov q0, q1
 ; CHECK-NEXT:    bx lr
 entry:

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vst3.ll b/llvm/test/CodeGen/Thumb2/mve-vst3.ll
index c1367ea819a9..f569ddb2de91 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vst3.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vst3.ll
@@ -1392,63 +1392,61 @@ define void @vst3_v8f16(<8 x half> *%src, <24 x half> *%dst) {
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14}
 ; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14}
-; CHECK-NEXT:    vldrw.u32 q2, [r0]
+; CHECK-NEXT:    vldrw.u32 q3, [r0]
 ; CHECK-NEXT:    vldrw.u32 q5, [r0, #16]
-; CHECK-NEXT:    vmov.f64 d0, d4
+; CHECK-NEXT:    vmov.f64 d0, d6
 ; CHECK-NEXT:    vmovx.f16 s6, s20
-; CHECK-NEXT:    vmovx.f16 s12, s8
-; CHECK-NEXT:    vmovx.f16 s24, s23
-; CHECK-NEXT:    vmov.f32 s4, s9
+; CHECK-NEXT:    vmovx.f16 s8, s12
+; CHECK-NEXT:    vmov.f32 s4, s13
 ; CHECK-NEXT:    vins.f16 s0, s20
 ; CHECK-NEXT:    vmov r2, s6
 ; CHECK-NEXT:    vins.f16 s4, s21
 ; CHECK-NEXT:    vmov.16 q0[4], r2
 ; CHECK-NEXT:    vmov.f32 s3, s4
 ; CHECK-NEXT:    vldrw.u32 q1, [r0, #32]
-; CHECK-NEXT:    vmov.f32 s1, s8
+; CHECK-NEXT:    vmov.f32 s1, s12
 ; CHECK-NEXT:    vmov.f32 s17, s4
-; CHECK-NEXT:    vmovx.f16 s26, s6
+; CHECK-NEXT:    vmovx.f16 s24, s7
 ; CHECK-NEXT:    vmov.f32 s18, s4
-; CHECK-NEXT:    vins.f16 s17, s12
-; CHECK-NEXT:    vmovx.f16 s12, s18
-; CHECK-NEXT:    vins.f16 s2, s12
-; CHECK-NEXT:    vmovx.f16 s12, s7
-; CHECK-NEXT:    vins.f16 s24, s12
-; CHECK-NEXT:    vmovx.f16 s12, s22
-; CHECK-NEXT:    vmov r0, s23
-; CHECK-NEXT:    vins.f16 s12, s26
-; CHECK-NEXT:    vmov.16 q3[3], r0
-; CHECK-NEXT:    vrev32.16 q5, q5
-; CHECK-NEXT:    vmov.f32 s15, s24
-; CHECK-NEXT:    vmov.f32 s25, s11
-; CHECK-NEXT:    vmov.f32 s14, s7
-; CHECK-NEXT:    vmovx.f16 s28, s13
-; CHECK-NEXT:    vmov.f32 s26, s11
+; CHECK-NEXT:    vins.f16 s17, s8
+; CHECK-NEXT:    vmovx.f16 s8, s18
+; CHECK-NEXT:    vins.f16 s2, s8
+; CHECK-NEXT:    vmovx.f16 s11, s23
+; CHECK-NEXT:    vins.f16 s11, s24
+; CHECK-NEXT:    vmovx.f16 s24, s6
+; CHECK-NEXT:    vmovx.f16 s8, s22
+; CHECK-NEXT:    vmov.f32 s18, s2
+; CHECK-NEXT:    vins.f16 s8, s24
+; CHECK-NEXT:    vmov.f32 s25, s15
+; CHECK-NEXT:    vins.f16 s9, s23
+; CHECK-NEXT:    vmov.f32 s26, s15
+; CHECK-NEXT:    vmov.f32 s10, s7
+; CHECK-NEXT:    vmovx.f16 s28, s9
 ; CHECK-NEXT:    vins.f16 s25, s28
 ; CHECK-NEXT:    vmovx.f16 s28, s26
-; CHECK-NEXT:    vins.f16 s14, s28
-; CHECK-NEXT:    vmovx.f16 s28, s9
+; CHECK-NEXT:    vins.f16 s10, s28
+; CHECK-NEXT:    vmovx.f16 s28, s13
 ; CHECK-NEXT:    vmov.f32 s4, s5
+; CHECK-NEXT:    vrev32.16 q5, q5
 ; CHECK-NEXT:    vins.f16 s4, s28
-; CHECK-NEXT:    vmovx.f16 s28, s10
+; CHECK-NEXT:    vmovx.f16 s28, s14
 ; CHECK-NEXT:    vins.f16 s6, s28
-; CHECK-NEXT:    vmov.f32 s18, s2
+; CHECK-NEXT:    vmov.f32 s26, s10
 ; CHECK-NEXT:    vmov.f32 s7, s6
-; CHECK-NEXT:    vmov.f32 s6, s10
-; CHECK-NEXT:    vmovx.f16 s8, s5
-; CHECK-NEXT:    vins.f16 s21, s8
-; CHECK-NEXT:    vmovx.f16 s8, s22
-; CHECK-NEXT:    vins.f16 s6, s8
-; CHECK-NEXT:    vmov.f32 s26, s14
-; CHECK-NEXT:    vmov.f32 s22, s6
+; CHECK-NEXT:    vmov.f32 s6, s14
+; CHECK-NEXT:    vmovx.f16 s12, s5
+; CHECK-NEXT:    vins.f16 s21, s12
+; CHECK-NEXT:    vmovx.f16 s12, s22
+; CHECK-NEXT:    vins.f16 s6, s12
 ; CHECK-NEXT:    vmov.f32 s1, s17
-; CHECK-NEXT:    vmov.f32 s13, s25
+; CHECK-NEXT:    vmov.f32 s22, s6
+; CHECK-NEXT:    vmov.f32 s9, s25
 ; CHECK-NEXT:    vmov.f32 s5, s21
 ; CHECK-NEXT:    vmov.f32 s2, s18
+; CHECK-NEXT:    vmov.f32 s10, s26
 ; CHECK-NEXT:    vstrw.32 q0, [r1]
-; CHECK-NEXT:    vmov.f32 s14, s26
+; CHECK-NEXT:    vstrw.32 q2, [r1, #32]
 ; CHECK-NEXT:    vmov.f32 s6, s22
-; CHECK-NEXT:    vstrw.32 q3, [r1, #32]
 ; CHECK-NEXT:    vstrw.32 q1, [r1, #16]
 ; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14}
 ; CHECK-NEXT:    bx lr
@@ -1473,146 +1471,142 @@ define void @vst3_v16f16(<16 x half> *%src, <48 x half> *%dst) {
 ; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
 ; CHECK-NEXT:    .pad #128
 ; CHECK-NEXT:    sub sp, #128
-; CHECK-NEXT:    vldrw.u32 q3, [r0, #32]
+; CHECK-NEXT:    vldrw.u32 q1, [r0, #32]
 ; CHECK-NEXT:    vldrw.u32 q7, [r0, #64]
-; CHECK-NEXT:    vldrw.u32 q4, [r0]
-; CHECK-NEXT:    vldrw.u32 q5, [r0, #48]
+; CHECK-NEXT:    vldrw.u32 q5, [r0, #80]
 ; CHECK-NEXT:    vmovx.f16 s0, s31
-; CHECK-NEXT:    vmovx.f16 s2, s15
-; CHECK-NEXT:    vins.f16 s2, s0
+; CHECK-NEXT:    vmovx.f16 s11, s7
+; CHECK-NEXT:    vins.f16 s11, s0
 ; CHECK-NEXT:    vmovx.f16 s0, s30
-; CHECK-NEXT:    vmovx.f16 s4, s14
-; CHECK-NEXT:    vmov r2, s15
-; CHECK-NEXT:    vins.f16 s4, s0
-; CHECK-NEXT:    vmov q6, q5
-; CHECK-NEXT:    vmov.16 q1[3], r2
-; CHECK-NEXT:    vstrw.32 q3, [sp, #32] @ 16-byte Spill
-; CHECK-NEXT:    vmov.f32 s7, s2
-; CHECK-NEXT:    vmovx.f16 s2, s20
-; CHECK-NEXT:    vmov.f32 s6, s31
-; CHECK-NEXT:    vmovx.f16 s0, s5
-; CHECK-NEXT:    vmov q2, q1
-; CHECK-NEXT:    vmov.f32 s5, s19
-; CHECK-NEXT:    vmov.f32 s6, s19
-; CHECK-NEXT:    vstrw.32 q4, [sp, #16] @ 16-byte Spill
+; CHECK-NEXT:    vmovx.f16 s8, s6
+; CHECK-NEXT:    vmov q4, q1
+; CHECK-NEXT:    vins.f16 s8, s0
+; CHECK-NEXT:    vstrw.32 q4, [sp, #48] @ 16-byte Spill
+; CHECK-NEXT:    vins.f16 s9, s7
+; CHECK-NEXT:    vmov.f32 s10, s31
+; CHECK-NEXT:    vmovx.f16 s0, s9
+; CHECK-NEXT:    vmov q3, q2
+; CHECK-NEXT:    vldrw.u32 q2, [r0]
+; CHECK-NEXT:    vmov.f32 s5, s11
+; CHECK-NEXT:    vmov q6, q2
+; CHECK-NEXT:    vmov.f32 s6, s11
+; CHECK-NEXT:    vldrw.u32 q2, [r0, #48]
 ; CHECK-NEXT:    vins.f16 s5, s0
 ; CHECK-NEXT:    vmovx.f16 s0, s6
 ; CHECK-NEXT:    vstrw.32 q1, [sp, #64] @ 16-byte Spill
 ; CHECK-NEXT:    vldrw.u32 q1, [r0, #16]
-; CHECK-NEXT:    vins.f16 s10, s0
-; CHECK-NEXT:    vmov r2, s2
-; CHECK-NEXT:    vstrw.32 q2, [sp, #48] @ 16-byte Spill
-; CHECK-NEXT:    vmov.f64 d4, d2
-; CHECK-NEXT:    vstrw.32 q1, [sp] @ 16-byte Spill
-; CHECK-NEXT:    vmovx.f16 s2, s12
-; CHECK-NEXT:    vins.f16 s8, s20
+; CHECK-NEXT:    vins.f16 s14, s0
+; CHECK-NEXT:    vmovx.f16 s2, s8
+; CHECK-NEXT:    vstrw.32 q3, [sp, #32] @ 16-byte Spill
+; CHECK-NEXT:    vmov.f64 d6, d2
+; CHECK-NEXT:    vstrw.32 q1, [sp, #80] @ 16-byte Spill
+; CHECK-NEXT:    vstrw.32 q6, [sp, #16] @ 16-byte Spill
+; CHECK-NEXT:    vstrw.32 q2, [sp] @ 16-byte Spill
 ; CHECK-NEXT:    vmov.f32 s0, s5
-; CHECK-NEXT:    vins.f16 s0, s21
-; CHECK-NEXT:    vmov.16 q2[4], r2
-; CHECK-NEXT:    vldrw.u32 q5, [r0, #80]
-; CHECK-NEXT:    vmov.f32 s11, s0
-; CHECK-NEXT:    vmov.f32 s9, s4
+; CHECK-NEXT:    vins.f16 s12, s8
+; CHECK-NEXT:    vmov r2, s2
+; CHECK-NEXT:    vins.f16 s0, s9
+; CHECK-NEXT:    vmov.16 q3[4], r2
+; CHECK-NEXT:    vmovx.f16 s2, s16
+; CHECK-NEXT:    vmov.f32 s15, s0
 ; CHECK-NEXT:    vmovx.f16 s0, s4
+; CHECK-NEXT:    vmov.f32 s13, s4
 ; CHECK-NEXT:    vmov.f32 s5, s20
 ; CHECK-NEXT:    vmov.f32 s6, s20
 ; CHECK-NEXT:    vins.f16 s5, s0
 ; CHECK-NEXT:    vmovx.f16 s0, s6
 ; CHECK-NEXT:    vstrw.32 q1, [sp, #112] @ 16-byte Spill
-; CHECK-NEXT:    vmov.f64 d2, d8
-; CHECK-NEXT:    vins.f16 s10, s0
-; CHECK-NEXT:    vstrw.32 q2, [sp, #96] @ 16-byte Spill
-; CHECK-NEXT:    vmov q2, q6
-; CHECK-NEXT:    vmovx.f16 s24, s10
-; CHECK-NEXT:    vmov.f32 s0, s17
-; CHECK-NEXT:    vins.f16 s4, s12
+; CHECK-NEXT:    vmov q1, q6
+; CHECK-NEXT:    vins.f16 s14, s0
+; CHECK-NEXT:    vmov.f32 s0, s5
+; CHECK-NEXT:    vins.f16 s24, s16
 ; CHECK-NEXT:    vmov r0, s2
-; CHECK-NEXT:    vins.f16 s0, s13
-; CHECK-NEXT:    vmov.16 q1[4], r0
-; CHECK-NEXT:    vmov.f32 s13, s28
-; CHECK-NEXT:    vmov.f32 s7, s0
-; CHECK-NEXT:    vmovx.f16 s0, s16
-; CHECK-NEXT:    vmov.f32 s14, s28
-; CHECK-NEXT:    vmovx.f16 s2, s11
-; CHECK-NEXT:    vins.f16 s13, s0
-; CHECK-NEXT:    vmov.f32 s5, s16
-; CHECK-NEXT:    vmovx.f16 s0, s14
-; CHECK-NEXT:    vmov r0, s11
-; CHECK-NEXT:    vins.f16 s6, s0
+; CHECK-NEXT:    vins.f16 s0, s17
+; CHECK-NEXT:    vmov.16 q6[4], r0
+; CHECK-NEXT:    vmov.f32 s27, s0
+; CHECK-NEXT:    vmovx.f16 s0, s4
+; CHECK-NEXT:    vmov.f32 s25, s4
+; CHECK-NEXT:    vmov.f32 s5, s28
+; CHECK-NEXT:    vmov.f32 s6, s28
+; CHECK-NEXT:    vins.f16 s5, s0
+; CHECK-NEXT:    vmovx.f16 s0, s6
+; CHECK-NEXT:    vstrw.32 q1, [sp, #96] @ 16-byte Spill
+; CHECK-NEXT:    vins.f16 s26, s0
 ; CHECK-NEXT:    vmovx.f16 s0, s22
-; CHECK-NEXT:    vins.f16 s24, s0
-; CHECK-NEXT:    vstrw.32 q1, [sp, #80] @ 16-byte Spill
-; CHECK-NEXT:    vmov q1, q2
+; CHECK-NEXT:    vmovx.f16 s4, s10
+; CHECK-NEXT:    vins.f16 s4, s0
 ; CHECK-NEXT:    vmovx.f16 s0, s23
-; CHECK-NEXT:    vldrw.u32 q2, [sp] @ 16-byte Reload
-; CHECK-NEXT:    vins.f16 s2, s0
-; CHECK-NEXT:    vmov.16 q6[3], r0
-; CHECK-NEXT:    vrev32.16 q1, q1
-; CHECK-NEXT:    vmov.f32 s27, s2
+; CHECK-NEXT:    vmovx.f16 s7, s11
+; CHECK-NEXT:    vmov.f32 s28, s29
+; CHECK-NEXT:    vins.f16 s7, s0
+; CHECK-NEXT:    vins.f16 s5, s11
+; CHECK-NEXT:    vldrw.u32 q2, [sp, #80] @ 16-byte Reload
+; CHECK-NEXT:    vmov.f32 s6, s23
+; CHECK-NEXT:    vmovx.f16 s16, s5
 ; CHECK-NEXT:    vmov.f32 s1, s11
-; CHECK-NEXT:    vmov.f32 s26, s23
-; CHECK-NEXT:    vmovx.f16 s16, s25
 ; CHECK-NEXT:    vmov.f32 s2, s11
 ; CHECK-NEXT:    vins.f16 s1, s16
 ; CHECK-NEXT:    vmovx.f16 s16, s2
-; CHECK-NEXT:    vins.f16 s26, s16
+; CHECK-NEXT:    vins.f16 s6, s16
 ; CHECK-NEXT:    vmovx.f16 s16, s9
 ; CHECK-NEXT:    vmov.f32 s20, s21
 ; CHECK-NEXT:    vins.f16 s20, s16
 ; CHECK-NEXT:    vmovx.f16 s16, s10
 ; CHECK-NEXT:    vins.f16 s22, s16
-; CHECK-NEXT:    vmov.f32 s2, s26
+; CHECK-NEXT:    vldrw.u32 q2, [sp] @ 16-byte Reload
+; CHECK-NEXT:    vldrw.u32 q4, [sp, #80] @ 16-byte Reload
 ; CHECK-NEXT:    vmov.f32 s23, s22
-; CHECK-NEXT:    vmov.f32 s22, s10
+; CHECK-NEXT:    vrev32.16 q2, q2
+; CHECK-NEXT:    vmov.f32 s2, s6
+; CHECK-NEXT:    vmov.f32 s22, s18
 ; CHECK-NEXT:    vmovx.f16 s16, s21
-; CHECK-NEXT:    vldrw.u32 q2, [sp, #96] @ 16-byte Reload
-; CHECK-NEXT:    vins.f16 s5, s16
+; CHECK-NEXT:    vins.f16 s9, s16
 ; CHECK-NEXT:    vldrw.u32 q4, [sp, #112] @ 16-byte Reload
-; CHECK-NEXT:    vmov.f32 s25, s1
-; CHECK-NEXT:    vmov.f32 s18, s10
-; CHECK-NEXT:    vldrw.u32 q2, [sp, #80] @ 16-byte Reload
+; CHECK-NEXT:    vmov.f32 s18, s14
+; CHECK-NEXT:    vstrw.32 q2, [sp, #80] @ 16-byte Spill
 ; CHECK-NEXT:    vstrw.32 q4, [sp, #112] @ 16-byte Spill
-; CHECK-NEXT:    vmovx.f16 s16, s6
-; CHECK-NEXT:    vmov.f32 s14, s10
+; CHECK-NEXT:    vldrw.u32 q4, [sp, #96] @ 16-byte Reload
+; CHECK-NEXT:    vmov.f32 s18, s26
+; CHECK-NEXT:    vstrw.32 q4, [sp, #96] @ 16-byte Spill
+; CHECK-NEXT:    vmovx.f16 s16, s10
 ; CHECK-NEXT:    vins.f16 s22, s16
 ; CHECK-NEXT:    vldrw.u32 q4, [sp, #16] @ 16-byte Reload
-; CHECK-NEXT:    vstrw.32 q1, [sp] @ 16-byte Spill
-; CHECK-NEXT:    vldrw.u32 q1, [sp, #80] @ 16-byte Reload
-; CHECK-NEXT:    vmov.f32 s28, s29
+; CHECK-NEXT:    vmov.f32 s5, s1
 ; CHECK-NEXT:    vmovx.f16 s8, s17
-; CHECK-NEXT:    vmov.f32 s26, s2
-; CHECK-NEXT:    vmov.f32 s5, s13
+; CHECK-NEXT:    vmov.f32 s6, s2
 ; CHECK-NEXT:    vins.f16 s28, s8
 ; CHECK-NEXT:    vmovx.f16 s0, s18
-; CHECK-NEXT:    vldrw.u32 q2, [sp, #96] @ 16-byte Reload
 ; CHECK-NEXT:    vins.f16 s30, s0
-; CHECK-NEXT:    vldrw.u32 q0, [sp, #32] @ 16-byte Reload
-; CHECK-NEXT:    vmov.f32 s6, s14
-; CHECK-NEXT:    vldrw.u32 q3, [sp, #112] @ 16-byte Reload
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #48] @ 16-byte Reload
 ; CHECK-NEXT:    vmov.f32 s31, s30
-; CHECK-NEXT:    vrev32.16 q0, q0
+; CHECK-NEXT:    vldrw.u32 q2, [sp, #32] @ 16-byte Reload
 ; CHECK-NEXT:    vmov.f32 s30, s18
 ; CHECK-NEXT:    vmovx.f16 s16, s29
-; CHECK-NEXT:    vmov.f32 s9, s13
+; CHECK-NEXT:    vrev32.16 q0, q0
+; CHECK-NEXT:    vstrw.32 q1, [r1, #80]
 ; CHECK-NEXT:    vins.f16 s1, s16
-; CHECK-NEXT:    vmov.f32 s10, s14
 ; CHECK-NEXT:    vmovx.f16 s16, s2
-; CHECK-NEXT:    vldrw.u32 q3, [sp, #48] @ 16-byte Reload
 ; CHECK-NEXT:    vins.f16 s30, s16
-; CHECK-NEXT:    vldrw.u32 q4, [sp, #64] @ 16-byte Reload
+; CHECK-NEXT:    vldrw.u32 q4, [sp, #96] @ 16-byte Reload
 ; CHECK-NEXT:    vmov.f32 s2, s30
-; CHECK-NEXT:    vmov.f32 s18, s14
-; CHECK-NEXT:    vstrw.32 q2, [r1, #48]
-; CHECK-NEXT:    vmov.f32 s13, s17
-; CHECK-NEXT:    vstrw.32 q6, [r1, #80]
+; CHECK-NEXT:    vmov.f32 s25, s17
+; CHECK-NEXT:    vmov.f32 s26, s18
+; CHECK-NEXT:    vldrw.u32 q4, [sp, #112] @ 16-byte Reload
 ; CHECK-NEXT:    vmov.f32 s29, s1
-; CHECK-NEXT:    vstrw.32 q1, [r1]
-; CHECK-NEXT:    vmov.f32 s30, s2
+; CHECK-NEXT:    vstrw.32 q6, [r1]
+; CHECK-NEXT:    vmov.f32 s13, s17
 ; CHECK-NEXT:    vmov.f32 s14, s18
-; CHECK-NEXT:    vldrw.u32 q4, [sp] @ 16-byte Reload
-; CHECK-NEXT:    vmov.f32 s18, s22
+; CHECK-NEXT:    vldrw.u32 q4, [sp, #64] @ 16-byte Reload
+; CHECK-NEXT:    vmov.f32 s18, s10
+; CHECK-NEXT:    vstrw.32 q3, [r1, #48]
+; CHECK-NEXT:    vmov.f32 s9, s17
+; CHECK-NEXT:    vmov.f32 s30, s2
 ; CHECK-NEXT:    vstrw.32 q7, [r1, #16]
+; CHECK-NEXT:    vmov.f32 s10, s18
+; CHECK-NEXT:    vldrw.u32 q4, [sp, #80] @ 16-byte Reload
+; CHECK-NEXT:    vmov.f32 s18, s22
+; CHECK-NEXT:    vstrw.32 q2, [r1, #32]
 ; CHECK-NEXT:    vmov.f32 s21, s17
-; CHECK-NEXT:    vstrw.32 q3, [r1, #32]
 ; CHECK-NEXT:    vmov.f32 s22, s18
 ; CHECK-NEXT:    vstrw.32 q5, [r1, #64]
 ; CHECK-NEXT:    add sp, #128


        


More information about the llvm-commits mailing list