[llvm] r372719 - [ARM] MVE sext and widen/narrow tests from larger types. NFC

David Green via llvm-commits llvm-commits at lists.llvm.org
Tue Sep 24 03:39:58 PDT 2019


Author: dmgreen
Date: Tue Sep 24 03:39:58 2019
New Revision: 372719

URL: http://llvm.org/viewvc/llvm-project?rev=372719&view=rev
Log:
[ARM] MVE sext and widen/narrow tests from larger types. NFC

Modified:
    llvm/trunk/test/CodeGen/Thumb2/mve-sext.ll
    llvm/trunk/test/CodeGen/Thumb2/mve-widen-narrow.ll

Modified: llvm/trunk/test/CodeGen/Thumb2/mve-sext.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Thumb2/mve-sext.ll?rev=372719&r1=372718&r2=372719&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Thumb2/mve-sext.ll (original)
+++ llvm/trunk/test/CodeGen/Thumb2/mve-sext.ll Tue Sep 24 03:39:58 2019
@@ -32,6 +32,131 @@ entry:
   ret <4 x i32> %0
 }
 
+define arm_aapcs_vfpcc <16 x i16> @sext_v16i8_v16i16(<16 x i8> %src) {
+; CHECK-LABEL: sext_v16i8_v16i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.u8 r0, q0[0]
+; CHECK-NEXT:    vmov.16 q1[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[1]
+; CHECK-NEXT:    vmov.16 q1[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[2]
+; CHECK-NEXT:    vmov.16 q1[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[3]
+; CHECK-NEXT:    vmov.16 q1[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[4]
+; CHECK-NEXT:    vmov.16 q1[4], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[5]
+; CHECK-NEXT:    vmov.16 q1[5], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[6]
+; CHECK-NEXT:    vmov.16 q1[6], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[7]
+; CHECK-NEXT:    vmov.16 q1[7], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[8]
+; CHECK-NEXT:    vmovlb.s8 q2, q1
+; CHECK-NEXT:    vmov.16 q1[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[9]
+; CHECK-NEXT:    vmov.16 q1[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[10]
+; CHECK-NEXT:    vmov.16 q1[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[11]
+; CHECK-NEXT:    vmov.16 q1[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[12]
+; CHECK-NEXT:    vmov.16 q1[4], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[13]
+; CHECK-NEXT:    vmov.16 q1[5], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[14]
+; CHECK-NEXT:    vmov.16 q1[6], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[15]
+; CHECK-NEXT:    vmov.16 q1[7], r0
+; CHECK-NEXT:    vmov q0, q2
+; CHECK-NEXT:    vmovlb.s8 q1, q1
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = sext <16 x i8> %src to <16 x i16>
+  ret <16 x i16> %0
+}
+
+define arm_aapcs_vfpcc <8 x i32> @sext_v8i16_v8i32(<8 x i16> %src) {
+; CHECK-LABEL: sext_v8i16_v8i32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.u16 r0, q0[0]
+; CHECK-NEXT:    vmov.32 q1[0], r0
+; CHECK-NEXT:    vmov.u16 r0, q0[1]
+; CHECK-NEXT:    vmov.32 q1[1], r0
+; CHECK-NEXT:    vmov.u16 r0, q0[2]
+; CHECK-NEXT:    vmov.32 q1[2], r0
+; CHECK-NEXT:    vmov.u16 r0, q0[3]
+; CHECK-NEXT:    vmov.32 q1[3], r0
+; CHECK-NEXT:    vmov.u16 r0, q0[4]
+; CHECK-NEXT:    vmovlb.s16 q2, q1
+; CHECK-NEXT:    vmov.32 q1[0], r0
+; CHECK-NEXT:    vmov.u16 r0, q0[5]
+; CHECK-NEXT:    vmov.32 q1[1], r0
+; CHECK-NEXT:    vmov.u16 r0, q0[6]
+; CHECK-NEXT:    vmov.32 q1[2], r0
+; CHECK-NEXT:    vmov.u16 r0, q0[7]
+; CHECK-NEXT:    vmov.32 q1[3], r0
+; CHECK-NEXT:    vmov q0, q2
+; CHECK-NEXT:    vmovlb.s16 q1, q1
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = sext <8 x i16> %src to <8 x i32>
+  ret <8 x i32> %0
+}
+
+define arm_aapcs_vfpcc <16 x i32> @sext_v16i8_v16i32(<16 x i8> %src) {
+; CHECK-LABEL: sext_v16i8_v16i32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    vmov.u8 r0, q0[0]
+; CHECK-NEXT:    vmov.32 q1[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[1]
+; CHECK-NEXT:    vmov.32 q1[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[2]
+; CHECK-NEXT:    vmov.32 q1[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[3]
+; CHECK-NEXT:    vmov.32 q1[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[4]
+; CHECK-NEXT:    vmovlb.s8 q1, q1
+; CHECK-NEXT:    vmovlb.s16 q4, q1
+; CHECK-NEXT:    vmov.32 q1[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[5]
+; CHECK-NEXT:    vmov.32 q1[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[6]
+; CHECK-NEXT:    vmov.32 q1[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[7]
+; CHECK-NEXT:    vmov.32 q1[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[8]
+; CHECK-NEXT:    vmov.32 q2[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[9]
+; CHECK-NEXT:    vmov.32 q2[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[10]
+; CHECK-NEXT:    vmov.32 q2[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[11]
+; CHECK-NEXT:    vmov.32 q2[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[12]
+; CHECK-NEXT:    vmov.32 q3[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[13]
+; CHECK-NEXT:    vmov.32 q3[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[14]
+; CHECK-NEXT:    vmov.32 q3[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[15]
+; CHECK-NEXT:    vmov.32 q3[3], r0
+; CHECK-NEXT:    vmovlb.s8 q1, q1
+; CHECK-NEXT:    vmovlb.s8 q2, q2
+; CHECK-NEXT:    vmovlb.s8 q0, q3
+; CHECK-NEXT:    vmovlb.s16 q3, q0
+; CHECK-NEXT:    vmovlb.s16 q1, q1
+; CHECK-NEXT:    vmovlb.s16 q2, q2
+; CHECK-NEXT:    vmov q0, q4
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = sext <16 x i8> %src to <16 x i32>
+  ret <16 x i32> %0
+}
+
 define arm_aapcs_vfpcc <2 x i64> @sext_v2i32_v2i64(<2 x i32> %src) {
 ; CHECK-LABEL: sext_v2i32_v2i64:
 ; CHECK:       @ %bb.0: @ %entry
@@ -82,16 +207,138 @@ entry:
   ret <4 x i32> %0
 }
 
+define arm_aapcs_vfpcc <16 x i16> @zext_v16i8_v16i16(<16 x i8> %src) {
+; CHECK-LABEL: zext_v16i8_v16i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.u8 r0, q0[0]
+; CHECK-NEXT:    vmov.16 q1[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[1]
+; CHECK-NEXT:    vmov.16 q1[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[2]
+; CHECK-NEXT:    vmov.16 q1[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[3]
+; CHECK-NEXT:    vmov.16 q1[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[4]
+; CHECK-NEXT:    vmov.16 q1[4], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[5]
+; CHECK-NEXT:    vmov.16 q1[5], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[6]
+; CHECK-NEXT:    vmov.16 q1[6], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[7]
+; CHECK-NEXT:    vmov.16 q1[7], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[8]
+; CHECK-NEXT:    vmovlb.u8 q2, q1
+; CHECK-NEXT:    vmov.16 q1[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[9]
+; CHECK-NEXT:    vmov.16 q1[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[10]
+; CHECK-NEXT:    vmov.16 q1[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[11]
+; CHECK-NEXT:    vmov.16 q1[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[12]
+; CHECK-NEXT:    vmov.16 q1[4], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[13]
+; CHECK-NEXT:    vmov.16 q1[5], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[14]
+; CHECK-NEXT:    vmov.16 q1[6], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[15]
+; CHECK-NEXT:    vmov.16 q1[7], r0
+; CHECK-NEXT:    vmov q0, q2
+; CHECK-NEXT:    vmovlb.u8 q1, q1
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext <16 x i8> %src to <16 x i16>
+  ret <16 x i16> %0
+}
+
+define arm_aapcs_vfpcc <8 x i32> @zext_v8i16_v8i32(<8 x i16> %src) {
+; CHECK-LABEL: zext_v8i16_v8i32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.u16 r0, q0[0]
+; CHECK-NEXT:    vmov.32 q1[0], r0
+; CHECK-NEXT:    vmov.u16 r0, q0[1]
+; CHECK-NEXT:    vmov.32 q1[1], r0
+; CHECK-NEXT:    vmov.u16 r0, q0[2]
+; CHECK-NEXT:    vmov.32 q1[2], r0
+; CHECK-NEXT:    vmov.u16 r0, q0[3]
+; CHECK-NEXT:    vmov.32 q1[3], r0
+; CHECK-NEXT:    vmov.u16 r0, q0[4]
+; CHECK-NEXT:    vmovlb.u16 q2, q1
+; CHECK-NEXT:    vmov.32 q1[0], r0
+; CHECK-NEXT:    vmov.u16 r0, q0[5]
+; CHECK-NEXT:    vmov.32 q1[1], r0
+; CHECK-NEXT:    vmov.u16 r0, q0[6]
+; CHECK-NEXT:    vmov.32 q1[2], r0
+; CHECK-NEXT:    vmov.u16 r0, q0[7]
+; CHECK-NEXT:    vmov.32 q1[3], r0
+; CHECK-NEXT:    vmov q0, q2
+; CHECK-NEXT:    vmovlb.u16 q1, q1
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext <8 x i16> %src to <8 x i32>
+  ret <8 x i32> %0
+}
+
+define arm_aapcs_vfpcc <16 x i32> @zext_v16i8_v16i32(<16 x i8> %src) {
+; CHECK-LABEL: zext_v16i8_v16i32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11}
+; CHECK-NEXT:    vmov.u8 r0, q0[0]
+; CHECK-NEXT:    vmov.i32 q3, #0xff
+; CHECK-NEXT:    vmov.32 q1[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[1]
+; CHECK-NEXT:    vmov.32 q1[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[2]
+; CHECK-NEXT:    vmov.32 q1[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[3]
+; CHECK-NEXT:    vmov.32 q1[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[4]
+; CHECK-NEXT:    vand q4, q1, q3
+; CHECK-NEXT:    vmov.32 q1[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[5]
+; CHECK-NEXT:    vmov.32 q1[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[6]
+; CHECK-NEXT:    vmov.32 q1[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[7]
+; CHECK-NEXT:    vmov.32 q1[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[8]
+; CHECK-NEXT:    vmov.32 q2[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[9]
+; CHECK-NEXT:    vmov.32 q2[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[10]
+; CHECK-NEXT:    vmov.32 q2[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[11]
+; CHECK-NEXT:    vmov.32 q2[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[12]
+; CHECK-NEXT:    vmov.32 q5[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[13]
+; CHECK-NEXT:    vmov.32 q5[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[14]
+; CHECK-NEXT:    vmov.32 q5[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[15]
+; CHECK-NEXT:    vmov.32 q5[3], r0
+; CHECK-NEXT:    vand q1, q1, q3
+; CHECK-NEXT:    vand q2, q2, q3
+; CHECK-NEXT:    vand q3, q5, q3
+; CHECK-NEXT:    vmov q0, q4
+; CHECK-NEXT:    vpop {d8, d9, d10, d11}
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = zext <16 x i8> %src to <16 x i32>
+  ret <16 x i32> %0
+}
+
 define arm_aapcs_vfpcc <2 x i64> @zext_v2i32_v2i64(<2 x i32> %src) {
 ; CHECK-LABEL: zext_v2i32_v2i64:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    adr r0, .LCPI7_0
+; CHECK-NEXT:    adr r0, .LCPI13_0
 ; CHECK-NEXT:    vldrw.u32 q1, [r0]
 ; CHECK-NEXT:    vand q0, q0, q1
 ; CHECK-NEXT:    bx lr
 ; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  @ %bb.1:
-; CHECK-NEXT:  .LCPI7_0:
+; CHECK-NEXT:  .LCPI13_0:
 ; CHECK-NEXT:    .long 4294967295 @ 0xffffffff
 ; CHECK-NEXT:    .long 0 @ 0x0
 ; CHECK-NEXT:    .long 4294967295 @ 0xffffffff
@@ -129,6 +376,119 @@ entry:
   ret <4 x i8> %0
 }
 
+define arm_aapcs_vfpcc <16 x i8> @trunc_v16i16_v16i8(<16 x i16> %src) {
+; CHECK-LABEL: trunc_v16i16_v16i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov q2, q0
+; CHECK-NEXT:    vmov.u16 r0, q0[0]
+; CHECK-NEXT:    vmov.8 q0[0], r0
+; CHECK-NEXT:    vmov.u16 r0, q2[1]
+; CHECK-NEXT:    vmov.8 q0[1], r0
+; CHECK-NEXT:    vmov.u16 r0, q2[2]
+; CHECK-NEXT:    vmov.8 q0[2], r0
+; CHECK-NEXT:    vmov.u16 r0, q2[3]
+; CHECK-NEXT:    vmov.8 q0[3], r0
+; CHECK-NEXT:    vmov.u16 r0, q2[4]
+; CHECK-NEXT:    vmov.8 q0[4], r0
+; CHECK-NEXT:    vmov.u16 r0, q2[5]
+; CHECK-NEXT:    vmov.8 q0[5], r0
+; CHECK-NEXT:    vmov.u16 r0, q2[6]
+; CHECK-NEXT:    vmov.8 q0[6], r0
+; CHECK-NEXT:    vmov.u16 r0, q2[7]
+; CHECK-NEXT:    vmov.8 q0[7], r0
+; CHECK-NEXT:    vmov.u16 r0, q1[0]
+; CHECK-NEXT:    vmov.8 q0[8], r0
+; CHECK-NEXT:    vmov.u16 r0, q1[1]
+; CHECK-NEXT:    vmov.8 q0[9], r0
+; CHECK-NEXT:    vmov.u16 r0, q1[2]
+; CHECK-NEXT:    vmov.8 q0[10], r0
+; CHECK-NEXT:    vmov.u16 r0, q1[3]
+; CHECK-NEXT:    vmov.8 q0[11], r0
+; CHECK-NEXT:    vmov.u16 r0, q1[4]
+; CHECK-NEXT:    vmov.8 q0[12], r0
+; CHECK-NEXT:    vmov.u16 r0, q1[5]
+; CHECK-NEXT:    vmov.8 q0[13], r0
+; CHECK-NEXT:    vmov.u16 r0, q1[6]
+; CHECK-NEXT:    vmov.8 q0[14], r0
+; CHECK-NEXT:    vmov.u16 r0, q1[7]
+; CHECK-NEXT:    vmov.8 q0[15], r0
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = trunc <16 x i16> %src to <16 x i8>
+  ret <16 x i8> %0
+}
+
+define arm_aapcs_vfpcc <8 x i16> @trunc_v8i32_v8i16(<8 x i32> %src) {
+; CHECK-LABEL: trunc_v8i32_v8i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov q2, q0
+; CHECK-NEXT:    vmov r0, s8
+; CHECK-NEXT:    vmov.16 q0[0], r0
+; CHECK-NEXT:    vmov r0, s9
+; CHECK-NEXT:    vmov.16 q0[1], r0
+; CHECK-NEXT:    vmov r0, s10
+; CHECK-NEXT:    vmov.16 q0[2], r0
+; CHECK-NEXT:    vmov r0, s11
+; CHECK-NEXT:    vmov.16 q0[3], r0
+; CHECK-NEXT:    vmov r0, s4
+; CHECK-NEXT:    vmov.16 q0[4], r0
+; CHECK-NEXT:    vmov r0, s5
+; CHECK-NEXT:    vmov.16 q0[5], r0
+; CHECK-NEXT:    vmov r0, s6
+; CHECK-NEXT:    vmov.16 q0[6], r0
+; CHECK-NEXT:    vmov r0, s7
+; CHECK-NEXT:    vmov.16 q0[7], r0
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = trunc <8 x i32> %src to <8 x i16>
+  ret <8 x i16> %0
+}
+
+define arm_aapcs_vfpcc <16 x i8> @trunc_v16i32_v16i8(<16 x i32> %src) {
+; CHECK-LABEL: trunc_v16i32_v16i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    vmov q4, q0
+; CHECK-NEXT:    vmov r0, s16
+; CHECK-NEXT:    vmov.8 q0[0], r0
+; CHECK-NEXT:    vmov r0, s17
+; CHECK-NEXT:    vmov.8 q0[1], r0
+; CHECK-NEXT:    vmov r0, s18
+; CHECK-NEXT:    vmov.8 q0[2], r0
+; CHECK-NEXT:    vmov r0, s19
+; CHECK-NEXT:    vmov.8 q0[3], r0
+; CHECK-NEXT:    vmov r0, s4
+; CHECK-NEXT:    vmov.8 q0[4], r0
+; CHECK-NEXT:    vmov r0, s5
+; CHECK-NEXT:    vmov.8 q0[5], r0
+; CHECK-NEXT:    vmov r0, s6
+; CHECK-NEXT:    vmov.8 q0[6], r0
+; CHECK-NEXT:    vmov r0, s7
+; CHECK-NEXT:    vmov.8 q0[7], r0
+; CHECK-NEXT:    vmov r0, s8
+; CHECK-NEXT:    vmov.8 q0[8], r0
+; CHECK-NEXT:    vmov r0, s9
+; CHECK-NEXT:    vmov.8 q0[9], r0
+; CHECK-NEXT:    vmov r0, s10
+; CHECK-NEXT:    vmov.8 q0[10], r0
+; CHECK-NEXT:    vmov r0, s11
+; CHECK-NEXT:    vmov.8 q0[11], r0
+; CHECK-NEXT:    vmov r0, s12
+; CHECK-NEXT:    vmov.8 q0[12], r0
+; CHECK-NEXT:    vmov r0, s13
+; CHECK-NEXT:    vmov.8 q0[13], r0
+; CHECK-NEXT:    vmov r0, s14
+; CHECK-NEXT:    vmov.8 q0[14], r0
+; CHECK-NEXT:    vmov r0, s15
+; CHECK-NEXT:    vmov.8 q0[15], r0
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = trunc <16 x i32> %src to <16 x i8>
+  ret <16 x i8> %0
+}
+
 define arm_aapcs_vfpcc <2 x i32> @trunc_v2i64_v2i32(<2 x i64> %src) {
 ; CHECK-LABEL: trunc_v2i64_v2i32:
 ; CHECK:       @ %bb.0: @ %entry

Modified: llvm/trunk/test/CodeGen/Thumb2/mve-widen-narrow.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Thumb2/mve-widen-narrow.ll?rev=372719&r1=372718&r2=372719&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Thumb2/mve-widen-narrow.ll (original)
+++ llvm/trunk/test/CodeGen/Thumb2/mve-widen-narrow.ll Tue Sep 24 03:39:58 2019
@@ -41,6 +41,57 @@ entry:
   ret void
 }
 
+
+define void @foo_int8_int32_double(<16 x i8>* %dest, <16 x i32>* readonly %src, i32 %n) {
+; CHECK-LABEL: foo_int8_int32_double:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrw.u32 q0, [r1]
+; CHECK-NEXT:    vldrw.u32 q1, [r1, #16]
+; CHECK-NEXT:    vldrw.u32 q2, [r1, #32]
+; CHECK-NEXT:    vldrw.u32 q3, [r1, #48]
+; CHECK-NEXT:    vstrb.32 q1, [r0, #4]
+; CHECK-NEXT:    vstrb.32 q0, [r0]
+; CHECK-NEXT:    vstrb.32 q3, [r0, #12]
+; CHECK-NEXT:    vstrb.32 q2, [r0, #8]
+; CHECK-NEXT:    bx lr
+entry:
+  %wide.load = load <16 x i32>, <16 x i32>* %src, align 4
+  %0 = trunc <16 x i32> %wide.load to <16 x i8>
+  store <16 x i8> %0, <16 x i8>* %dest, align 1
+  ret void
+}
+
+define void @foo_int16_int32_double(<8 x i16>* %dest, <8 x i32>* readonly %src, i32 %n) {
+; CHECK-LABEL: foo_int16_int32_double:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrw.u32 q0, [r1]
+; CHECK-NEXT:    vldrw.u32 q1, [r1, #16]
+; CHECK-NEXT:    vstrh.32 q1, [r0, #8]
+; CHECK-NEXT:    vstrh.32 q0, [r0]
+; CHECK-NEXT:    bx lr
+entry:
+  %wide.load = load <8 x i32>, <8 x i32>* %src, align 4
+  %0 = trunc <8 x i32> %wide.load to <8 x i16>
+  store <8 x i16> %0, <8 x i16>* %dest, align 2
+  ret void
+}
+
+define void @foo_int8_int16_double(<16 x i8>* %dest, <16 x i16>* readonly %src, i32 %n) {
+; CHECK-LABEL: foo_int8_int16_double:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrh.u16 q0, [r1]
+; CHECK-NEXT:    vldrh.u16 q1, [r1, #16]
+; CHECK-NEXT:    vstrb.16 q1, [r0, #8]
+; CHECK-NEXT:    vstrb.16 q0, [r0]
+; CHECK-NEXT:    bx lr
+entry:
+  %wide.load = load <16 x i16>, <16 x i16>* %src, align 2
+  %0 = trunc <16 x i16> %wide.load to <16 x i8>
+  store <16 x i8> %0, <16 x i8>* %dest, align 1
+  ret void
+}
+
+
 define void @foo_int32_int8(<4 x i32>* %dest, <4 x i8>* readonly %src, i32 %n) {
 ; CHECK-LABEL: foo_int32_int8:
 ; CHECK:       @ %bb.0: @ %entry
@@ -80,6 +131,143 @@ entry:
   ret void
 }
 
+define void @foo_int32_int8_double(<16 x i32>* %dest, <16 x i8>* readonly %src, i32 %n) {
+; CHECK-LABEL: foo_int32_int8_double:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrb.u8 q0, [r1]
+; CHECK-NEXT:    vmov.u8 r1, q0[12]
+; CHECK-NEXT:    vmov.32 q1[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[13]
+; CHECK-NEXT:    vmov.32 q1[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[14]
+; CHECK-NEXT:    vmov.32 q1[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[15]
+; CHECK-NEXT:    vmov.32 q1[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[8]
+; CHECK-NEXT:    vmovlb.s8 q1, q1
+; CHECK-NEXT:    vmovlb.s16 q1, q1
+; CHECK-NEXT:    vstrw.32 q1, [r0, #48]
+; CHECK-NEXT:    vmov.32 q1[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[9]
+; CHECK-NEXT:    vmov.32 q1[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[10]
+; CHECK-NEXT:    vmov.32 q1[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[11]
+; CHECK-NEXT:    vmov.32 q1[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[4]
+; CHECK-NEXT:    vmovlb.s8 q1, q1
+; CHECK-NEXT:    vmovlb.s16 q1, q1
+; CHECK-NEXT:    vstrw.32 q1, [r0, #32]
+; CHECK-NEXT:    vmov.32 q1[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[5]
+; CHECK-NEXT:    vmov.32 q1[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[6]
+; CHECK-NEXT:    vmov.32 q1[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[7]
+; CHECK-NEXT:    vmov.32 q1[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[0]
+; CHECK-NEXT:    vmovlb.s8 q1, q1
+; CHECK-NEXT:    vmovlb.s16 q1, q1
+; CHECK-NEXT:    vstrw.32 q1, [r0, #16]
+; CHECK-NEXT:    vmov.32 q1[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[1]
+; CHECK-NEXT:    vmov.32 q1[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[2]
+; CHECK-NEXT:    vmov.32 q1[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[3]
+; CHECK-NEXT:    vmov.32 q1[3], r1
+; CHECK-NEXT:    vmovlb.s8 q0, q1
+; CHECK-NEXT:    vmovlb.s16 q0, q0
+; CHECK-NEXT:    vstrw.32 q0, [r0]
+; CHECK-NEXT:    bx lr
+entry:
+  %wide.load = load <16 x i8>, <16 x i8>* %src, align 1
+  %0 = sext <16 x i8> %wide.load to <16 x i32>
+  store <16 x i32> %0, <16 x i32>* %dest, align 4
+  ret void
+}
+
+define void @foo_int16_int8_double(<16 x i16>* %dest, <16 x i8>* readonly %src, i32 %n) {
+; CHECK-LABEL: foo_int16_int8_double:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrb.u8 q0, [r1]
+; CHECK-NEXT:    vmov.u8 r1, q0[8]
+; CHECK-NEXT:    vmov.16 q1[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[9]
+; CHECK-NEXT:    vmov.16 q1[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[10]
+; CHECK-NEXT:    vmov.16 q1[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[11]
+; CHECK-NEXT:    vmov.16 q1[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[12]
+; CHECK-NEXT:    vmov.16 q1[4], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[13]
+; CHECK-NEXT:    vmov.16 q1[5], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[14]
+; CHECK-NEXT:    vmov.16 q1[6], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[15]
+; CHECK-NEXT:    vmov.16 q1[7], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[0]
+; CHECK-NEXT:    vmovlb.s8 q1, q1
+; CHECK-NEXT:    vstrh.16 q1, [r0, #16]
+; CHECK-NEXT:    vmov.16 q1[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[1]
+; CHECK-NEXT:    vmov.16 q1[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[2]
+; CHECK-NEXT:    vmov.16 q1[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[3]
+; CHECK-NEXT:    vmov.16 q1[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[4]
+; CHECK-NEXT:    vmov.16 q1[4], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[5]
+; CHECK-NEXT:    vmov.16 q1[5], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[6]
+; CHECK-NEXT:    vmov.16 q1[6], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[7]
+; CHECK-NEXT:    vmov.16 q1[7], r1
+; CHECK-NEXT:    vmovlb.s8 q0, q1
+; CHECK-NEXT:    vstrh.16 q0, [r0]
+; CHECK-NEXT:    bx lr
+entry:
+  %wide.load = load <16 x i8>, <16 x i8>* %src, align 1
+  %0 = sext <16 x i8> %wide.load to <16 x i16>
+  store <16 x i16> %0, <16 x i16>* %dest, align 2
+  ret void
+}
+
+define void @foo_int32_int16_double(<8 x i32>* %dest, <8 x i16>* readonly %src, i32 %n) {
+; CHECK-LABEL: foo_int32_int16_double:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrh.u16 q0, [r1]
+; CHECK-NEXT:    vmov.u16 r1, q0[4]
+; CHECK-NEXT:    vmov.32 q1[0], r1
+; CHECK-NEXT:    vmov.u16 r1, q0[5]
+; CHECK-NEXT:    vmov.32 q1[1], r1
+; CHECK-NEXT:    vmov.u16 r1, q0[6]
+; CHECK-NEXT:    vmov.32 q1[2], r1
+; CHECK-NEXT:    vmov.u16 r1, q0[7]
+; CHECK-NEXT:    vmov.32 q1[3], r1
+; CHECK-NEXT:    vmov.u16 r1, q0[0]
+; CHECK-NEXT:    vmovlb.s16 q1, q1
+; CHECK-NEXT:    vstrw.32 q1, [r0, #16]
+; CHECK-NEXT:    vmov.32 q1[0], r1
+; CHECK-NEXT:    vmov.u16 r1, q0[1]
+; CHECK-NEXT:    vmov.32 q1[1], r1
+; CHECK-NEXT:    vmov.u16 r1, q0[2]
+; CHECK-NEXT:    vmov.32 q1[2], r1
+; CHECK-NEXT:    vmov.u16 r1, q0[3]
+; CHECK-NEXT:    vmov.32 q1[3], r1
+; CHECK-NEXT:    vmovlb.s16 q0, q1
+; CHECK-NEXT:    vstrw.32 q0, [r0]
+; CHECK-NEXT:    bx lr
+entry:
+  %wide.load = load <8 x i16>, <8 x i16>* %src, align 2
+  %0 = sext <8 x i16> %wide.load to <8 x i32>
+  store <8 x i32> %0, <8 x i32>* %dest, align 4
+  ret void
+}
+
+
 define void @foo_uint32_uint8(<4 x i32>* %dest, <4 x i8>* readonly %src, i32 %n) {
 ; CHECK-LABEL: foo_uint32_uint8:
 ; CHECK:       @ %bb.0: @ %entry
@@ -120,6 +308,315 @@ entry:
 }
 
 
+define void @foo_uint32_uint8_double(<16 x i32>* %dest, <16 x i8>* readonly %src, i32 %n) {
+; CHECK-LABEL: foo_uint32_uint8_double:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrb.u8 q0, [r1]
+; CHECK-NEXT:    vmov.i32 q1, #0xff
+; CHECK-NEXT:    vmov.u8 r1, q0[12]
+; CHECK-NEXT:    vmov.32 q2[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[13]
+; CHECK-NEXT:    vmov.32 q2[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[14]
+; CHECK-NEXT:    vmov.32 q2[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[15]
+; CHECK-NEXT:    vmov.32 q2[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[8]
+; CHECK-NEXT:    vand q2, q2, q1
+; CHECK-NEXT:    vstrw.32 q2, [r0, #48]
+; CHECK-NEXT:    vmov.32 q2[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[9]
+; CHECK-NEXT:    vmov.32 q2[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[10]
+; CHECK-NEXT:    vmov.32 q2[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[11]
+; CHECK-NEXT:    vmov.32 q2[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[4]
+; CHECK-NEXT:    vand q2, q2, q1
+; CHECK-NEXT:    vstrw.32 q2, [r0, #32]
+; CHECK-NEXT:    vmov.32 q2[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[5]
+; CHECK-NEXT:    vmov.32 q2[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[6]
+; CHECK-NEXT:    vmov.32 q2[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[7]
+; CHECK-NEXT:    vmov.32 q2[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[0]
+; CHECK-NEXT:    vand q2, q2, q1
+; CHECK-NEXT:    vstrw.32 q2, [r0, #16]
+; CHECK-NEXT:    vmov.32 q2[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[1]
+; CHECK-NEXT:    vmov.32 q2[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[2]
+; CHECK-NEXT:    vmov.32 q2[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[3]
+; CHECK-NEXT:    vmov.32 q2[3], r1
+; CHECK-NEXT:    vand q0, q2, q1
+; CHECK-NEXT:    vstrw.32 q0, [r0]
+; CHECK-NEXT:    bx lr
+entry:
+  %wide.load = load <16 x i8>, <16 x i8>* %src, align 1
+  %0 = zext <16 x i8> %wide.load to <16 x i32>
+  store <16 x i32> %0, <16 x i32>* %dest, align 4
+  ret void
+}
+
+define void @foo_uint16_uint8_double(<16 x i16>* %dest, <16 x i8>* readonly %src, i32 %n) {
+; CHECK-LABEL: foo_uint16_uint8_double:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrb.u8 q0, [r1]
+; CHECK-NEXT:    vmov.u8 r1, q0[8]
+; CHECK-NEXT:    vmov.16 q1[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[9]
+; CHECK-NEXT:    vmov.16 q1[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[10]
+; CHECK-NEXT:    vmov.16 q1[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[11]
+; CHECK-NEXT:    vmov.16 q1[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[12]
+; CHECK-NEXT:    vmov.16 q1[4], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[13]
+; CHECK-NEXT:    vmov.16 q1[5], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[14]
+; CHECK-NEXT:    vmov.16 q1[6], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[15]
+; CHECK-NEXT:    vmov.16 q1[7], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[0]
+; CHECK-NEXT:    vmovlb.u8 q1, q1
+; CHECK-NEXT:    vstrh.16 q1, [r0, #16]
+; CHECK-NEXT:    vmov.16 q1[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[1]
+; CHECK-NEXT:    vmov.16 q1[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[2]
+; CHECK-NEXT:    vmov.16 q1[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[3]
+; CHECK-NEXT:    vmov.16 q1[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[4]
+; CHECK-NEXT:    vmov.16 q1[4], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[5]
+; CHECK-NEXT:    vmov.16 q1[5], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[6]
+; CHECK-NEXT:    vmov.16 q1[6], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[7]
+; CHECK-NEXT:    vmov.16 q1[7], r1
+; CHECK-NEXT:    vmovlb.u8 q0, q1
+; CHECK-NEXT:    vstrh.16 q0, [r0]
+; CHECK-NEXT:    bx lr
+entry:
+  %wide.load = load <16 x i8>, <16 x i8>* %src, align 1
+  %0 = zext <16 x i8> %wide.load to <16 x i16>
+  store <16 x i16> %0, <16 x i16>* %dest, align 2
+  ret void
+}
+
+define void @foo_uint32_uint16_double(<8 x i32>* %dest, <8 x i16>* readonly %src, i32 %n) {
+; CHECK-LABEL: foo_uint32_uint16_double:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrh.u16 q0, [r1]
+; CHECK-NEXT:    vmov.u16 r1, q0[4]
+; CHECK-NEXT:    vmov.32 q1[0], r1
+; CHECK-NEXT:    vmov.u16 r1, q0[5]
+; CHECK-NEXT:    vmov.32 q1[1], r1
+; CHECK-NEXT:    vmov.u16 r1, q0[6]
+; CHECK-NEXT:    vmov.32 q1[2], r1
+; CHECK-NEXT:    vmov.u16 r1, q0[7]
+; CHECK-NEXT:    vmov.32 q1[3], r1
+; CHECK-NEXT:    vmov.u16 r1, q0[0]
+; CHECK-NEXT:    vmovlb.u16 q1, q1
+; CHECK-NEXT:    vstrw.32 q1, [r0, #16]
+; CHECK-NEXT:    vmov.32 q1[0], r1
+; CHECK-NEXT:    vmov.u16 r1, q0[1]
+; CHECK-NEXT:    vmov.32 q1[1], r1
+; CHECK-NEXT:    vmov.u16 r1, q0[2]
+; CHECK-NEXT:    vmov.32 q1[2], r1
+; CHECK-NEXT:    vmov.u16 r1, q0[3]
+; CHECK-NEXT:    vmov.32 q1[3], r1
+; CHECK-NEXT:    vmovlb.u16 q0, q1
+; CHECK-NEXT:    vstrw.32 q0, [r0]
+; CHECK-NEXT:    bx lr
+entry:
+  %wide.load = load <8 x i16>, <8 x i16>* %src, align 2
+  %0 = zext <8 x i16> %wide.load to <8 x i32>
+  store <8 x i32> %0, <8 x i32>* %dest, align 4
+  ret void
+}
+
+
+define void @foo_int32_int8_both(<16 x i32>* %dest, <16 x i8>* readonly %src, i32 %n) {
+; CHECK-LABEL: foo_int32_int8_both:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrb.u8 q0, [r1]
+; CHECK-NEXT:    vmov.u8 r1, q0[8]
+; CHECK-NEXT:    vmov.16 q1[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[9]
+; CHECK-NEXT:    vmov.16 q1[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[10]
+; CHECK-NEXT:    vmov.16 q1[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[11]
+; CHECK-NEXT:    vmov.16 q1[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[12]
+; CHECK-NEXT:    vmov.16 q1[4], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[13]
+; CHECK-NEXT:    vmov.16 q1[5], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[14]
+; CHECK-NEXT:    vmov.16 q1[6], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[15]
+; CHECK-NEXT:    vmov.16 q1[7], r1
+; CHECK-NEXT:    vmovlb.s8 q1, q1
+; CHECK-NEXT:    vmov.u16 r1, q1[4]
+; CHECK-NEXT:    vmov.32 q2[0], r1
+; CHECK-NEXT:    vmov.u16 r1, q1[5]
+; CHECK-NEXT:    vmov.32 q2[1], r1
+; CHECK-NEXT:    vmov.u16 r1, q1[6]
+; CHECK-NEXT:    vmov.32 q2[2], r1
+; CHECK-NEXT:    vmov.u16 r1, q1[7]
+; CHECK-NEXT:    vmov.32 q2[3], r1
+; CHECK-NEXT:    vmov.u16 r1, q1[0]
+; CHECK-NEXT:    vmovlb.u16 q2, q2
+; CHECK-NEXT:    vstrw.32 q2, [r0, #48]
+; CHECK-NEXT:    vmov.32 q2[0], r1
+; CHECK-NEXT:    vmov.u16 r1, q1[1]
+; CHECK-NEXT:    vmov.32 q2[1], r1
+; CHECK-NEXT:    vmov.u16 r1, q1[2]
+; CHECK-NEXT:    vmov.32 q2[2], r1
+; CHECK-NEXT:    vmov.u16 r1, q1[3]
+; CHECK-NEXT:    vmov.32 q2[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[0]
+; CHECK-NEXT:    vmovlb.u16 q1, q2
+; CHECK-NEXT:    vstrw.32 q1, [r0, #32]
+; CHECK-NEXT:    vmov.16 q1[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[1]
+; CHECK-NEXT:    vmov.16 q1[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[2]
+; CHECK-NEXT:    vmov.16 q1[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[3]
+; CHECK-NEXT:    vmov.16 q1[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[4]
+; CHECK-NEXT:    vmov.16 q1[4], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[5]
+; CHECK-NEXT:    vmov.16 q1[5], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[6]
+; CHECK-NEXT:    vmov.16 q1[6], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[7]
+; CHECK-NEXT:    vmov.16 q1[7], r1
+; CHECK-NEXT:    vmovlb.s8 q0, q1
+; CHECK-NEXT:    vmov.u16 r1, q0[4]
+; CHECK-NEXT:    vmov.32 q1[0], r1
+; CHECK-NEXT:    vmov.u16 r1, q0[5]
+; CHECK-NEXT:    vmov.32 q1[1], r1
+; CHECK-NEXT:    vmov.u16 r1, q0[6]
+; CHECK-NEXT:    vmov.32 q1[2], r1
+; CHECK-NEXT:    vmov.u16 r1, q0[7]
+; CHECK-NEXT:    vmov.32 q1[3], r1
+; CHECK-NEXT:    vmov.u16 r1, q0[0]
+; CHECK-NEXT:    vmovlb.u16 q1, q1
+; CHECK-NEXT:    vstrw.32 q1, [r0, #16]
+; CHECK-NEXT:    vmov.32 q1[0], r1
+; CHECK-NEXT:    vmov.u16 r1, q0[1]
+; CHECK-NEXT:    vmov.32 q1[1], r1
+; CHECK-NEXT:    vmov.u16 r1, q0[2]
+; CHECK-NEXT:    vmov.32 q1[2], r1
+; CHECK-NEXT:    vmov.u16 r1, q0[3]
+; CHECK-NEXT:    vmov.32 q1[3], r1
+; CHECK-NEXT:    vmovlb.u16 q0, q1
+; CHECK-NEXT:    vstrw.32 q0, [r0]
+; CHECK-NEXT:    bx lr
+entry:
+  %wide.load = load <16 x i8>, <16 x i8>* %src, align 1
+  %0 = sext <16 x i8> %wide.load to <16 x i16>
+  %1 = zext <16 x i16> %0 to <16 x i32>
+  store <16 x i32> %1, <16 x i32>* %dest, align 4
+  ret void
+}
+
+define <8 x i16>* @foo_uint32_uint16_double_offset(<8 x i32>* %dest, <8 x i16>* readonly %src, i32 %n) {
+; CHECK-LABEL: foo_uint32_uint16_double_offset:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrh.u16 q0, [r1, #16]!
+; CHECK-NEXT:    vmov.u16 r2, q0[4]
+; CHECK-NEXT:    vmov.32 q1[0], r2
+; CHECK-NEXT:    vmov.u16 r2, q0[5]
+; CHECK-NEXT:    vmov.32 q1[1], r2
+; CHECK-NEXT:    vmov.u16 r2, q0[6]
+; CHECK-NEXT:    vmov.32 q1[2], r2
+; CHECK-NEXT:    vmov.u16 r2, q0[7]
+; CHECK-NEXT:    vmov.32 q1[3], r2
+; CHECK-NEXT:    vmov.u16 r2, q0[0]
+; CHECK-NEXT:    vmovlb.s16 q1, q1
+; CHECK-NEXT:    vstrw.32 q1, [r0, #16]
+; CHECK-NEXT:    vmov.32 q1[0], r2
+; CHECK-NEXT:    vmov.u16 r2, q0[1]
+; CHECK-NEXT:    vmov.32 q1[1], r2
+; CHECK-NEXT:    vmov.u16 r2, q0[2]
+; CHECK-NEXT:    vmov.32 q1[2], r2
+; CHECK-NEXT:    vmov.u16 r2, q0[3]
+; CHECK-NEXT:    vmov.32 q1[3], r2
+; CHECK-NEXT:    vmovlb.s16 q0, q1
+; CHECK-NEXT:    vstrw.32 q0, [r0]
+; CHECK-NEXT:    mov r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = getelementptr inbounds <8 x i16>, <8 x i16>* %src, i32 1
+  %wide.load = load <8 x i16>, <8 x i16>* %z, align 2
+  %0 = sext <8 x i16> %wide.load to <8 x i32>
+  store <8 x i32> %0, <8 x i32>* %dest, align 4
+  ret <8 x i16>* %z
+}
+
+define <16 x i16>* @foo_uint32_uint16_quad_offset(<16 x i32>* %dest, <16 x i16>* readonly %src, i32 %n) {
+; CHECK-LABEL: foo_uint32_uint16_quad_offset:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldrh.u16 q1, [r1, #32]!
+; CHECK-NEXT:    vmov.u16 r2, q1[4]
+; CHECK-NEXT:    vmov.32 q0[0], r2
+; CHECK-NEXT:    vmov.u16 r2, q1[5]
+; CHECK-NEXT:    vmov.32 q0[1], r2
+; CHECK-NEXT:    vmov.u16 r2, q1[6]
+; CHECK-NEXT:    vmov.32 q0[2], r2
+; CHECK-NEXT:    vmov.u16 r2, q1[7]
+; CHECK-NEXT:    vmov.32 q0[3], r2
+; CHECK-NEXT:    vmov.u16 r2, q1[0]
+; CHECK-NEXT:    vmovlb.s16 q2, q0
+; CHECK-NEXT:    vldrh.u16 q0, [r1, #16]
+; CHECK-NEXT:    vstrw.32 q2, [r0, #16]
+; CHECK-NEXT:    vmov.32 q2[0], r2
+; CHECK-NEXT:    vmov.u16 r2, q1[1]
+; CHECK-NEXT:    vmov.32 q2[1], r2
+; CHECK-NEXT:    vmov.u16 r2, q1[2]
+; CHECK-NEXT:    vmov.32 q2[2], r2
+; CHECK-NEXT:    vmov.u16 r2, q1[3]
+; CHECK-NEXT:    vmov.32 q2[3], r2
+; CHECK-NEXT:    vmov.u16 r2, q0[4]
+; CHECK-NEXT:    vmovlb.s16 q1, q2
+; CHECK-NEXT:    vstrw.32 q1, [r0]
+; CHECK-NEXT:    vmov.32 q1[0], r2
+; CHECK-NEXT:    vmov.u16 r2, q0[5]
+; CHECK-NEXT:    vmov.32 q1[1], r2
+; CHECK-NEXT:    vmov.u16 r2, q0[6]
+; CHECK-NEXT:    vmov.32 q1[2], r2
+; CHECK-NEXT:    vmov.u16 r2, q0[7]
+; CHECK-NEXT:    vmov.32 q1[3], r2
+; CHECK-NEXT:    vmov.u16 r2, q0[0]
+; CHECK-NEXT:    vmovlb.s16 q1, q1
+; CHECK-NEXT:    vstrw.32 q1, [r0, #48]
+; CHECK-NEXT:    vmov.32 q1[0], r2
+; CHECK-NEXT:    vmov.u16 r2, q0[1]
+; CHECK-NEXT:    vmov.32 q1[1], r2
+; CHECK-NEXT:    vmov.u16 r2, q0[2]
+; CHECK-NEXT:    vmov.32 q1[2], r2
+; CHECK-NEXT:    vmov.u16 r2, q0[3]
+; CHECK-NEXT:    vmov.32 q1[3], r2
+; CHECK-NEXT:    vmovlb.s16 q0, q1
+; CHECK-NEXT:    vstrw.32 q0, [r0, #32]
+; CHECK-NEXT:    mov r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = getelementptr inbounds <16 x i16>, <16 x i16>* %src, i32 1
+  %wide.load = load <16 x i16>, <16 x i16>* %z, align 2
+  %0 = sext <16 x i16> %wide.load to <16 x i32>
+  store <16 x i32> %0, <16 x i32>* %dest, align 4
+  ret <16 x i16>* %z
+}
 
 
 define void @foo_int16_int32_align1(<4 x i16>* %dest, <4 x i32>* readonly %src, i32 %n) {




More information about the llvm-commits mailing list