[llvm] 40b72c9 - [ARM] Extra MLA reductions tests. NFC
David Green via llvm-commits
llvm-commits at lists.llvm.org
Fri Sep 11 09:51:29 PDT 2020
Author: David Green
Date: 2020-09-11T17:51:15+01:00
New Revision: 40b72c9c792057f71319cfde3d7c7904dd8df6bc
URL: https://github.com/llvm/llvm-project/commit/40b72c9c792057f71319cfde3d7c7904dd8df6bc
DIFF: https://github.com/llvm/llvm-project/commit/40b72c9c792057f71319cfde3d7c7904dd8df6bc.diff
LOG: [ARM] Extra MLA reductions tests. NFC
Added:
Modified:
llvm/test/CodeGen/Thumb2/mve-vecreduce-mla.ll
llvm/test/CodeGen/Thumb2/mve-vecreduce-mlapred.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/Thumb2/mve-vecreduce-mla.ll b/llvm/test/CodeGen/Thumb2/mve-vecreduce-mla.ll
index 93e3b16590b3..4010e3c91112 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vecreduce-mla.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vecreduce-mla.ll
@@ -170,6 +170,279 @@ entry:
ret i64 %z
}
+define arm_aapcs_vfpcc i64 @add_v8i16_v8i32_v8i64_zext(<8 x i16> %x, <8 x i16> %y) {
+; CHECK-LABEL: add_v8i16_v8i32_v8i64_zext:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .vsave {d8, d9}
+; CHECK-NEXT: vpush {d8, d9}
+; CHECK-NEXT: vmov.u16 r0, q1[0]
+; CHECK-NEXT: vmov.32 q2[0], r0
+; CHECK-NEXT: vmov.u16 r0, q1[1]
+; CHECK-NEXT: vmov.32 q2[1], r0
+; CHECK-NEXT: vmov.u16 r0, q1[2]
+; CHECK-NEXT: vmov.32 q2[2], r0
+; CHECK-NEXT: vmov.u16 r0, q1[3]
+; CHECK-NEXT: vmov.32 q2[3], r0
+; CHECK-NEXT: vmov.u16 r0, q0[0]
+; CHECK-NEXT: vmov.32 q3[0], r0
+; CHECK-NEXT: vmov.u16 r0, q0[1]
+; CHECK-NEXT: vmov.32 q3[1], r0
+; CHECK-NEXT: vmov.u16 r0, q0[2]
+; CHECK-NEXT: vmov.32 q3[2], r0
+; CHECK-NEXT: vmov.u16 r0, q0[3]
+; CHECK-NEXT: vmov.32 q3[3], r0
+; CHECK-NEXT: vmullb.u16 q3, q3, q2
+; CHECK-NEXT: vmov.i64 q2, #0xffffffff
+; CHECK-NEXT: vmov.f32 s16, s12
+; CHECK-NEXT: vmov.f32 s18, s13
+; CHECK-NEXT: vand q4, q4, q2
+; CHECK-NEXT: vmov r2, s18
+; CHECK-NEXT: vmov r3, s16
+; CHECK-NEXT: vmov r0, s19
+; CHECK-NEXT: vmov r1, s17
+; CHECK-NEXT: vmov.f32 s16, s14
+; CHECK-NEXT: vmov.f32 s18, s15
+; CHECK-NEXT: vand q3, q4, q2
+; CHECK-NEXT: adds r2, r2, r3
+; CHECK-NEXT: vmov r3, s12
+; CHECK-NEXT: adcs r0, r1
+; CHECK-NEXT: vmov r1, s13
+; CHECK-NEXT: adds r2, r2, r3
+; CHECK-NEXT: vmov r3, s15
+; CHECK-NEXT: adcs r1, r0
+; CHECK-NEXT: vmov r0, s14
+; CHECK-NEXT: adds r0, r0, r2
+; CHECK-NEXT: vmov.u16 r2, q1[4]
+; CHECK-NEXT: vmov.32 q3[0], r2
+; CHECK-NEXT: vmov.u16 r2, q1[5]
+; CHECK-NEXT: vmov.32 q3[1], r2
+; CHECK-NEXT: vmov.u16 r2, q1[6]
+; CHECK-NEXT: vmov.32 q3[2], r2
+; CHECK-NEXT: vmov.u16 r2, q1[7]
+; CHECK-NEXT: vmov.32 q3[3], r2
+; CHECK-NEXT: vmov.u16 r2, q0[4]
+; CHECK-NEXT: vmov.32 q1[0], r2
+; CHECK-NEXT: vmov.u16 r2, q0[5]
+; CHECK-NEXT: vmov.32 q1[1], r2
+; CHECK-NEXT: vmov.u16 r2, q0[6]
+; CHECK-NEXT: vmov.32 q1[2], r2
+; CHECK-NEXT: vmov.u16 r2, q0[7]
+; CHECK-NEXT: vmov.32 q1[3], r2
+; CHECK-NEXT: adcs r1, r3
+; CHECK-NEXT: vmullb.u16 q0, q1, q3
+; CHECK-NEXT: vmov.f32 s4, s0
+; CHECK-NEXT: vmov.f32 s6, s1
+; CHECK-NEXT: vand q1, q1, q2
+; CHECK-NEXT: vmov r3, s4
+; CHECK-NEXT: vmov r2, s5
+; CHECK-NEXT: adds r0, r0, r3
+; CHECK-NEXT: vmov r3, s6
+; CHECK-NEXT: adcs r1, r2
+; CHECK-NEXT: vmov r2, s7
+; CHECK-NEXT: vmov.f32 s4, s2
+; CHECK-NEXT: vmov.f32 s6, s3
+; CHECK-NEXT: vand q0, q1, q2
+; CHECK-NEXT: adds r0, r0, r3
+; CHECK-NEXT: vmov r3, s0
+; CHECK-NEXT: adcs r1, r2
+; CHECK-NEXT: vmov r2, s1
+; CHECK-NEXT: adds r0, r0, r3
+; CHECK-NEXT: vmov r3, s2
+; CHECK-NEXT: adcs r1, r2
+; CHECK-NEXT: vmov r2, s3
+; CHECK-NEXT: adds r0, r0, r3
+; CHECK-NEXT: adcs r1, r2
+; CHECK-NEXT: vpop {d8, d9}
+; CHECK-NEXT: bx lr
+entry:
+ %xx = zext <8 x i16> %x to <8 x i32>
+ %yy = zext <8 x i16> %y to <8 x i32>
+ %m = mul <8 x i32> %xx, %yy
+ %ma = zext <8 x i32> %m to <8 x i64>
+ %z = call i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64> %ma)
+ ret i64 %z
+}
+
+define arm_aapcs_vfpcc i64 @add_v8i16_v8i32_v8i64_sext(<8 x i16> %x, <8 x i16> %y) {
+; CHECK-LABEL: add_v8i16_v8i32_v8i64_sext:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .vsave {d8, d9}
+; CHECK-NEXT: vpush {d8, d9}
+; CHECK-NEXT: vmov.u16 r0, q1[0]
+; CHECK-NEXT: vmov.32 q2[0], r0
+; CHECK-NEXT: vmov.u16 r0, q1[1]
+; CHECK-NEXT: vmov.32 q2[1], r0
+; CHECK-NEXT: vmov.u16 r0, q1[2]
+; CHECK-NEXT: vmov.32 q2[2], r0
+; CHECK-NEXT: vmov.u16 r0, q1[3]
+; CHECK-NEXT: vmov.32 q2[3], r0
+; CHECK-NEXT: vmov.u16 r0, q0[0]
+; CHECK-NEXT: vmov.32 q3[0], r0
+; CHECK-NEXT: vmov.u16 r0, q0[1]
+; CHECK-NEXT: vmov.32 q3[1], r0
+; CHECK-NEXT: vmov.u16 r0, q0[2]
+; CHECK-NEXT: vmov.32 q3[2], r0
+; CHECK-NEXT: vmov.u16 r0, q0[3]
+; CHECK-NEXT: vmov.32 q3[3], r0
+; CHECK-NEXT: vmullb.s16 q2, q3, q2
+; CHECK-NEXT: vmov.f32 s12, s8
+; CHECK-NEXT: vmov.f32 s14, s9
+; CHECK-NEXT: vmov r0, s12
+; CHECK-NEXT: vmov.32 q4[0], r0
+; CHECK-NEXT: asrs r0, r0, #31
+; CHECK-NEXT: vmov.32 q4[1], r0
+; CHECK-NEXT: vmov r0, s14
+; CHECK-NEXT: vmov.32 q4[2], r0
+; CHECK-NEXT: vmov.f32 s12, s10
+; CHECK-NEXT: vmov.f32 s14, s11
+; CHECK-NEXT: asrs r1, r0, #31
+; CHECK-NEXT: vmov.32 q4[3], r1
+; CHECK-NEXT: vmov r2, s18
+; CHECK-NEXT: vmov r3, s16
+; CHECK-NEXT: vmov r1, s17
+; CHECK-NEXT: adds r2, r2, r3
+; CHECK-NEXT: adc.w r12, r1, r0, asr #31
+; CHECK-NEXT: vmov r1, s12
+; CHECK-NEXT: vmov.32 q2[0], r1
+; CHECK-NEXT: asrs r1, r1, #31
+; CHECK-NEXT: vmov.32 q2[1], r1
+; CHECK-NEXT: vmov r1, s14
+; CHECK-NEXT: vmov.32 q2[2], r1
+; CHECK-NEXT: asrs r3, r1, #31
+; CHECK-NEXT: vmov.32 q2[3], r3
+; CHECK-NEXT: vmov r0, s8
+; CHECK-NEXT: vmov r3, s9
+; CHECK-NEXT: adds r0, r0, r2
+; CHECK-NEXT: adc.w r2, r12, r3
+; CHECK-NEXT: vmov r3, s10
+; CHECK-NEXT: adds.w r12, r0, r3
+; CHECK-NEXT: adc.w r1, r2, r1, asr #31
+; CHECK-NEXT: vmov.u16 r2, q1[4]
+; CHECK-NEXT: vmov.32 q2[0], r2
+; CHECK-NEXT: vmov.u16 r2, q1[5]
+; CHECK-NEXT: vmov.32 q2[1], r2
+; CHECK-NEXT: vmov.u16 r2, q1[6]
+; CHECK-NEXT: vmov.32 q2[2], r2
+; CHECK-NEXT: vmov.u16 r2, q1[7]
+; CHECK-NEXT: vmov.32 q2[3], r2
+; CHECK-NEXT: vmov.u16 r2, q0[4]
+; CHECK-NEXT: vmov.32 q1[0], r2
+; CHECK-NEXT: vmov.u16 r2, q0[5]
+; CHECK-NEXT: vmov.32 q1[1], r2
+; CHECK-NEXT: vmov.u16 r2, q0[6]
+; CHECK-NEXT: vmov.32 q1[2], r2
+; CHECK-NEXT: vmov.u16 r2, q0[7]
+; CHECK-NEXT: vmov.32 q1[3], r2
+; CHECK-NEXT: vmullb.s16 q0, q1, q2
+; CHECK-NEXT: vmov.f32 s4, s0
+; CHECK-NEXT: vmov.f32 s6, s1
+; CHECK-NEXT: vmov r2, s4
+; CHECK-NEXT: vmov.32 q2[0], r2
+; CHECK-NEXT: asrs r2, r2, #31
+; CHECK-NEXT: vmov.32 q2[1], r2
+; CHECK-NEXT: vmov r2, s6
+; CHECK-NEXT: vmov.32 q2[2], r2
+; CHECK-NEXT: vmov.f32 s4, s2
+; CHECK-NEXT: vmov.f32 s6, s3
+; CHECK-NEXT: asrs r3, r2, #31
+; CHECK-NEXT: vmov.32 q2[3], r3
+; CHECK-NEXT: vmov r0, s8
+; CHECK-NEXT: vmov r3, s9
+; CHECK-NEXT: adds.w r0, r0, r12
+; CHECK-NEXT: adcs r1, r3
+; CHECK-NEXT: vmov r3, s10
+; CHECK-NEXT: adds r0, r0, r3
+; CHECK-NEXT: adc.w r1, r1, r2, asr #31
+; CHECK-NEXT: vmov r2, s4
+; CHECK-NEXT: adds r0, r0, r2
+; CHECK-NEXT: adc.w r1, r1, r2, asr #31
+; CHECK-NEXT: vmov r2, s6
+; CHECK-NEXT: adds r0, r0, r2
+; CHECK-NEXT: adc.w r1, r1, r2, asr #31
+; CHECK-NEXT: vpop {d8, d9}
+; CHECK-NEXT: bx lr
+entry:
+ %xx = sext <8 x i16> %x to <8 x i32>
+ %yy = sext <8 x i16> %y to <8 x i32>
+ %m = mul <8 x i32> %xx, %yy
+ %ma = sext <8 x i32> %m to <8 x i64>
+ %z = call i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64> %ma)
+ ret i64 %z
+}
+
+define arm_aapcs_vfpcc i64 @add_v8i16_v8i32_v8i64_sextzext(<8 x i16> %x, <8 x i16> %y) {
+; CHECK-LABEL: add_v8i16_v8i32_v8i64_sextzext:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.u16 r0, q0[0]
+; CHECK-NEXT: vmov.32 q1[0], r0
+; CHECK-NEXT: vmov.u16 r0, q0[1]
+; CHECK-NEXT: vmov.32 q1[1], r0
+; CHECK-NEXT: vmov.u16 r0, q0[2]
+; CHECK-NEXT: vmov.32 q1[2], r0
+; CHECK-NEXT: vmov.u16 r0, q0[3]
+; CHECK-NEXT: vmov.32 q1[3], r0
+; CHECK-NEXT: vmullb.s16 q2, q1, q1
+; CHECK-NEXT: vmov.i64 q1, #0xffffffff
+; CHECK-NEXT: vmov.f32 s12, s8
+; CHECK-NEXT: vmov.f32 s14, s9
+; CHECK-NEXT: vand q3, q3, q1
+; CHECK-NEXT: vmov r2, s14
+; CHECK-NEXT: vmov r3, s12
+; CHECK-NEXT: vmov r0, s15
+; CHECK-NEXT: vmov r1, s13
+; CHECK-NEXT: vmov.f32 s12, s10
+; CHECK-NEXT: vmov.f32 s14, s11
+; CHECK-NEXT: vand q2, q3, q1
+; CHECK-NEXT: adds r2, r2, r3
+; CHECK-NEXT: vmov r3, s8
+; CHECK-NEXT: adcs r0, r1
+; CHECK-NEXT: vmov r1, s9
+; CHECK-NEXT: adds r2, r2, r3
+; CHECK-NEXT: vmov r3, s10
+; CHECK-NEXT: adcs r0, r1
+; CHECK-NEXT: vmov r1, s11
+; CHECK-NEXT: adds r2, r2, r3
+; CHECK-NEXT: adcs r0, r1
+; CHECK-NEXT: vmov.u16 r1, q0[4]
+; CHECK-NEXT: vmov.32 q2[0], r1
+; CHECK-NEXT: vmov.u16 r1, q0[5]
+; CHECK-NEXT: vmov.32 q2[1], r1
+; CHECK-NEXT: vmov.u16 r1, q0[6]
+; CHECK-NEXT: vmov.32 q2[2], r1
+; CHECK-NEXT: vmov.u16 r1, q0[7]
+; CHECK-NEXT: vmov.32 q2[3], r1
+; CHECK-NEXT: vmullb.s16 q0, q2, q2
+; CHECK-NEXT: vmov.f32 s8, s0
+; CHECK-NEXT: vmov.f32 s10, s1
+; CHECK-NEXT: vand q2, q2, q1
+; CHECK-NEXT: vmov r3, s8
+; CHECK-NEXT: vmov r1, s9
+; CHECK-NEXT: adds r2, r2, r3
+; CHECK-NEXT: vmov r3, s10
+; CHECK-NEXT: adcs r0, r1
+; CHECK-NEXT: vmov r1, s11
+; CHECK-NEXT: vmov.f32 s8, s2
+; CHECK-NEXT: vmov.f32 s10, s3
+; CHECK-NEXT: vand q0, q2, q1
+; CHECK-NEXT: adds r2, r2, r3
+; CHECK-NEXT: vmov r3, s0
+; CHECK-NEXT: adcs r0, r1
+; CHECK-NEXT: vmov r1, s1
+; CHECK-NEXT: adds r2, r2, r3
+; CHECK-NEXT: vmov r3, s3
+; CHECK-NEXT: adcs r1, r0
+; CHECK-NEXT: vmov r0, s2
+; CHECK-NEXT: adds r0, r0, r2
+; CHECK-NEXT: adcs r1, r3
+; CHECK-NEXT: bx lr
+entry:
+ %xx = sext <8 x i16> %x to <8 x i32>
+ %m = mul <8 x i32> %xx, %xx
+ %ma = zext <8 x i32> %m to <8 x i64>
+ %z = call i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64> %ma)
+ ret i64 %z
+}
+
define arm_aapcs_vfpcc i64 @add_v2i16_v2i64_zext(<2 x i16> %x, <2 x i16> %y) {
; CHECK-LABEL: add_v2i16_v2i64_zext:
; CHECK: @ %bb.0: @ %entry
@@ -239,6 +512,336 @@ entry:
ret i32 %z
}
+define arm_aapcs_vfpcc i32 @add_v16i8_v16i16_v16i32_zext(<16 x i8> %x, <16 x i8> %y) {
+; CHECK-LABEL: add_v16i8_v16i16_v16i32_zext:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .vsave {d8, d9}
+; CHECK-NEXT: vpush {d8, d9}
+; CHECK-NEXT: vmov.u8 r0, q1[8]
+; CHECK-NEXT: vmov.16 q2[0], r0
+; CHECK-NEXT: vmov.u8 r0, q1[9]
+; CHECK-NEXT: vmov.16 q2[1], r0
+; CHECK-NEXT: vmov.u8 r0, q1[10]
+; CHECK-NEXT: vmov.16 q2[2], r0
+; CHECK-NEXT: vmov.u8 r0, q1[11]
+; CHECK-NEXT: vmov.16 q2[3], r0
+; CHECK-NEXT: vmov.u8 r0, q1[12]
+; CHECK-NEXT: vmov.16 q2[4], r0
+; CHECK-NEXT: vmov.u8 r0, q1[13]
+; CHECK-NEXT: vmov.16 q2[5], r0
+; CHECK-NEXT: vmov.u8 r0, q1[14]
+; CHECK-NEXT: vmov.16 q2[6], r0
+; CHECK-NEXT: vmov.u8 r0, q1[15]
+; CHECK-NEXT: vmov.16 q2[7], r0
+; CHECK-NEXT: vmov.u8 r0, q0[8]
+; CHECK-NEXT: vmov.16 q3[0], r0
+; CHECK-NEXT: vmov.u8 r0, q0[9]
+; CHECK-NEXT: vmov.16 q3[1], r0
+; CHECK-NEXT: vmov.u8 r0, q0[10]
+; CHECK-NEXT: vmov.16 q3[2], r0
+; CHECK-NEXT: vmov.u8 r0, q0[11]
+; CHECK-NEXT: vmov.16 q3[3], r0
+; CHECK-NEXT: vmov.u8 r0, q0[12]
+; CHECK-NEXT: vmov.16 q3[4], r0
+; CHECK-NEXT: vmov.u8 r0, q0[13]
+; CHECK-NEXT: vmov.16 q3[5], r0
+; CHECK-NEXT: vmov.u8 r0, q0[14]
+; CHECK-NEXT: vmov.16 q3[6], r0
+; CHECK-NEXT: vmov.u8 r0, q0[15]
+; CHECK-NEXT: vmov.16 q3[7], r0
+; CHECK-NEXT: vmullb.u8 q2, q3, q2
+; CHECK-NEXT: vmov.u16 r0, q2[4]
+; CHECK-NEXT: vmov.32 q3[0], r0
+; CHECK-NEXT: vmov.u16 r0, q2[5]
+; CHECK-NEXT: vmov.32 q3[1], r0
+; CHECK-NEXT: vmov.u16 r0, q2[6]
+; CHECK-NEXT: vmov.32 q3[2], r0
+; CHECK-NEXT: vmov.u16 r0, q2[7]
+; CHECK-NEXT: vmov.32 q3[3], r0
+; CHECK-NEXT: vmov.u8 r0, q1[0]
+; CHECK-NEXT: vmov.16 q4[0], r0
+; CHECK-NEXT: vmov.u8 r0, q1[1]
+; CHECK-NEXT: vmov.16 q4[1], r0
+; CHECK-NEXT: vmov.u8 r0, q1[2]
+; CHECK-NEXT: vmov.16 q4[2], r0
+; CHECK-NEXT: vmov.u8 r0, q1[3]
+; CHECK-NEXT: vmov.16 q4[3], r0
+; CHECK-NEXT: vmov.u8 r0, q1[4]
+; CHECK-NEXT: vmov.16 q4[4], r0
+; CHECK-NEXT: vmov.u8 r0, q1[5]
+; CHECK-NEXT: vmov.16 q4[5], r0
+; CHECK-NEXT: vmov.u8 r0, q1[6]
+; CHECK-NEXT: vmov.16 q4[6], r0
+; CHECK-NEXT: vmov.u8 r0, q1[7]
+; CHECK-NEXT: vmov.16 q4[7], r0
+; CHECK-NEXT: vmov.u8 r0, q0[0]
+; CHECK-NEXT: vmov.16 q1[0], r0
+; CHECK-NEXT: vmov.u8 r0, q0[1]
+; CHECK-NEXT: vmov.16 q1[1], r0
+; CHECK-NEXT: vmov.u8 r0, q0[2]
+; CHECK-NEXT: vmov.16 q1[2], r0
+; CHECK-NEXT: vmov.u8 r0, q0[3]
+; CHECK-NEXT: vmov.16 q1[3], r0
+; CHECK-NEXT: vmov.u8 r0, q0[4]
+; CHECK-NEXT: vmov.16 q1[4], r0
+; CHECK-NEXT: vmov.u8 r0, q0[5]
+; CHECK-NEXT: vmov.16 q1[5], r0
+; CHECK-NEXT: vmov.u8 r0, q0[6]
+; CHECK-NEXT: vmov.16 q1[6], r0
+; CHECK-NEXT: vmov.u8 r0, q0[7]
+; CHECK-NEXT: vmov.16 q1[7], r0
+; CHECK-NEXT: vmovlb.u16 q3, q3
+; CHECK-NEXT: vmullb.u8 q0, q1, q4
+; CHECK-NEXT: vmov.u16 r0, q0[4]
+; CHECK-NEXT: vmov.32 q1[0], r0
+; CHECK-NEXT: vmov.u16 r0, q0[5]
+; CHECK-NEXT: vmov.32 q1[1], r0
+; CHECK-NEXT: vmov.u16 r0, q0[6]
+; CHECK-NEXT: vmov.32 q1[2], r0
+; CHECK-NEXT: vmov.u16 r0, q0[7]
+; CHECK-NEXT: vmov.32 q1[3], r0
+; CHECK-NEXT: vmov.u16 r0, q2[0]
+; CHECK-NEXT: vmovlb.u16 q1, q1
+; CHECK-NEXT: vadd.i32 q1, q1, q3
+; CHECK-NEXT: vmov.32 q3[0], r0
+; CHECK-NEXT: vmov.u16 r0, q2[1]
+; CHECK-NEXT: vmov.32 q3[1], r0
+; CHECK-NEXT: vmov.u16 r0, q2[2]
+; CHECK-NEXT: vmov.32 q3[2], r0
+; CHECK-NEXT: vmov.u16 r0, q2[3]
+; CHECK-NEXT: vmov.32 q3[3], r0
+; CHECK-NEXT: vmov.u16 r0, q0[0]
+; CHECK-NEXT: vmovlb.u16 q2, q3
+; CHECK-NEXT: vmov.32 q3[0], r0
+; CHECK-NEXT: vmov.u16 r0, q0[1]
+; CHECK-NEXT: vmov.32 q3[1], r0
+; CHECK-NEXT: vmov.u16 r0, q0[2]
+; CHECK-NEXT: vmov.32 q3[2], r0
+; CHECK-NEXT: vmov.u16 r0, q0[3]
+; CHECK-NEXT: vmov.32 q3[3], r0
+; CHECK-NEXT: vmovlb.u16 q0, q3
+; CHECK-NEXT: vadd.i32 q0, q0, q2
+; CHECK-NEXT: vadd.i32 q0, q0, q1
+; CHECK-NEXT: vaddv.u32 r0, q0
+; CHECK-NEXT: vpop {d8, d9}
+; CHECK-NEXT: bx lr
+entry:
+ %xx = zext <16 x i8> %x to <16 x i16>
+ %yy = zext <16 x i8> %y to <16 x i16>
+ %m = mul <16 x i16> %xx, %yy
+ %ma = zext <16 x i16> %m to <16 x i32>
+ %z = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %ma)
+ ret i32 %z
+}
+
+define arm_aapcs_vfpcc i32 @add_v16i8_v16i16_v16i32_sext(<16 x i8> %x, <16 x i8> %y) {
+; CHECK-LABEL: add_v16i8_v16i16_v16i32_sext:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .vsave {d8, d9}
+; CHECK-NEXT: vpush {d8, d9}
+; CHECK-NEXT: vmov.u8 r0, q1[8]
+; CHECK-NEXT: vmov.16 q2[0], r0
+; CHECK-NEXT: vmov.u8 r0, q1[9]
+; CHECK-NEXT: vmov.16 q2[1], r0
+; CHECK-NEXT: vmov.u8 r0, q1[10]
+; CHECK-NEXT: vmov.16 q2[2], r0
+; CHECK-NEXT: vmov.u8 r0, q1[11]
+; CHECK-NEXT: vmov.16 q2[3], r0
+; CHECK-NEXT: vmov.u8 r0, q1[12]
+; CHECK-NEXT: vmov.16 q2[4], r0
+; CHECK-NEXT: vmov.u8 r0, q1[13]
+; CHECK-NEXT: vmov.16 q2[5], r0
+; CHECK-NEXT: vmov.u8 r0, q1[14]
+; CHECK-NEXT: vmov.16 q2[6], r0
+; CHECK-NEXT: vmov.u8 r0, q1[15]
+; CHECK-NEXT: vmov.16 q2[7], r0
+; CHECK-NEXT: vmov.u8 r0, q0[8]
+; CHECK-NEXT: vmov.16 q3[0], r0
+; CHECK-NEXT: vmov.u8 r0, q0[9]
+; CHECK-NEXT: vmov.16 q3[1], r0
+; CHECK-NEXT: vmov.u8 r0, q0[10]
+; CHECK-NEXT: vmov.16 q3[2], r0
+; CHECK-NEXT: vmov.u8 r0, q0[11]
+; CHECK-NEXT: vmov.16 q3[3], r0
+; CHECK-NEXT: vmov.u8 r0, q0[12]
+; CHECK-NEXT: vmov.16 q3[4], r0
+; CHECK-NEXT: vmov.u8 r0, q0[13]
+; CHECK-NEXT: vmov.16 q3[5], r0
+; CHECK-NEXT: vmov.u8 r0, q0[14]
+; CHECK-NEXT: vmov.16 q3[6], r0
+; CHECK-NEXT: vmov.u8 r0, q0[15]
+; CHECK-NEXT: vmov.16 q3[7], r0
+; CHECK-NEXT: vmullb.s8 q2, q3, q2
+; CHECK-NEXT: vmov.u16 r0, q2[4]
+; CHECK-NEXT: vmov.32 q3[0], r0
+; CHECK-NEXT: vmov.u16 r0, q2[5]
+; CHECK-NEXT: vmov.32 q3[1], r0
+; CHECK-NEXT: vmov.u16 r0, q2[6]
+; CHECK-NEXT: vmov.32 q3[2], r0
+; CHECK-NEXT: vmov.u16 r0, q2[7]
+; CHECK-NEXT: vmov.32 q3[3], r0
+; CHECK-NEXT: vmov.u8 r0, q1[0]
+; CHECK-NEXT: vmov.16 q4[0], r0
+; CHECK-NEXT: vmov.u8 r0, q1[1]
+; CHECK-NEXT: vmov.16 q4[1], r0
+; CHECK-NEXT: vmov.u8 r0, q1[2]
+; CHECK-NEXT: vmov.16 q4[2], r0
+; CHECK-NEXT: vmov.u8 r0, q1[3]
+; CHECK-NEXT: vmov.16 q4[3], r0
+; CHECK-NEXT: vmov.u8 r0, q1[4]
+; CHECK-NEXT: vmov.16 q4[4], r0
+; CHECK-NEXT: vmov.u8 r0, q1[5]
+; CHECK-NEXT: vmov.16 q4[5], r0
+; CHECK-NEXT: vmov.u8 r0, q1[6]
+; CHECK-NEXT: vmov.16 q4[6], r0
+; CHECK-NEXT: vmov.u8 r0, q1[7]
+; CHECK-NEXT: vmov.16 q4[7], r0
+; CHECK-NEXT: vmov.u8 r0, q0[0]
+; CHECK-NEXT: vmov.16 q1[0], r0
+; CHECK-NEXT: vmov.u8 r0, q0[1]
+; CHECK-NEXT: vmov.16 q1[1], r0
+; CHECK-NEXT: vmov.u8 r0, q0[2]
+; CHECK-NEXT: vmov.16 q1[2], r0
+; CHECK-NEXT: vmov.u8 r0, q0[3]
+; CHECK-NEXT: vmov.16 q1[3], r0
+; CHECK-NEXT: vmov.u8 r0, q0[4]
+; CHECK-NEXT: vmov.16 q1[4], r0
+; CHECK-NEXT: vmov.u8 r0, q0[5]
+; CHECK-NEXT: vmov.16 q1[5], r0
+; CHECK-NEXT: vmov.u8 r0, q0[6]
+; CHECK-NEXT: vmov.16 q1[6], r0
+; CHECK-NEXT: vmov.u8 r0, q0[7]
+; CHECK-NEXT: vmov.16 q1[7], r0
+; CHECK-NEXT: vmovlb.s16 q3, q3
+; CHECK-NEXT: vmullb.s8 q0, q1, q4
+; CHECK-NEXT: vmov.u16 r0, q0[4]
+; CHECK-NEXT: vmov.32 q1[0], r0
+; CHECK-NEXT: vmov.u16 r0, q0[5]
+; CHECK-NEXT: vmov.32 q1[1], r0
+; CHECK-NEXT: vmov.u16 r0, q0[6]
+; CHECK-NEXT: vmov.32 q1[2], r0
+; CHECK-NEXT: vmov.u16 r0, q0[7]
+; CHECK-NEXT: vmov.32 q1[3], r0
+; CHECK-NEXT: vmov.u16 r0, q2[0]
+; CHECK-NEXT: vmovlb.s16 q1, q1
+; CHECK-NEXT: vadd.i32 q1, q1, q3
+; CHECK-NEXT: vmov.32 q3[0], r0
+; CHECK-NEXT: vmov.u16 r0, q2[1]
+; CHECK-NEXT: vmov.32 q3[1], r0
+; CHECK-NEXT: vmov.u16 r0, q2[2]
+; CHECK-NEXT: vmov.32 q3[2], r0
+; CHECK-NEXT: vmov.u16 r0, q2[3]
+; CHECK-NEXT: vmov.32 q3[3], r0
+; CHECK-NEXT: vmov.u16 r0, q0[0]
+; CHECK-NEXT: vmovlb.s16 q2, q3
+; CHECK-NEXT: vmov.32 q3[0], r0
+; CHECK-NEXT: vmov.u16 r0, q0[1]
+; CHECK-NEXT: vmov.32 q3[1], r0
+; CHECK-NEXT: vmov.u16 r0, q0[2]
+; CHECK-NEXT: vmov.32 q3[2], r0
+; CHECK-NEXT: vmov.u16 r0, q0[3]
+; CHECK-NEXT: vmov.32 q3[3], r0
+; CHECK-NEXT: vmovlb.s16 q0, q3
+; CHECK-NEXT: vadd.i32 q0, q0, q2
+; CHECK-NEXT: vadd.i32 q0, q0, q1
+; CHECK-NEXT: vaddv.u32 r0, q0
+; CHECK-NEXT: vpop {d8, d9}
+; CHECK-NEXT: bx lr
+entry:
+ %xx = sext <16 x i8> %x to <16 x i16>
+ %yy = sext <16 x i8> %y to <16 x i16>
+ %m = mul <16 x i16> %xx, %yy
+ %ma = sext <16 x i16> %m to <16 x i32>
+ %z = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %ma)
+ ret i32 %z
+}
+
+define arm_aapcs_vfpcc i32 @add_v16i8_v16i16_v16i32_sextzext(<16 x i8> %x, <16 x i8> %y) {
+; CHECK-LABEL: add_v16i8_v16i16_v16i32_sextzext:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.u8 r0, q0[8]
+; CHECK-NEXT: vmov.16 q1[0], r0
+; CHECK-NEXT: vmov.u8 r0, q0[9]
+; CHECK-NEXT: vmov.16 q1[1], r0
+; CHECK-NEXT: vmov.u8 r0, q0[10]
+; CHECK-NEXT: vmov.16 q1[2], r0
+; CHECK-NEXT: vmov.u8 r0, q0[11]
+; CHECK-NEXT: vmov.16 q1[3], r0
+; CHECK-NEXT: vmov.u8 r0, q0[12]
+; CHECK-NEXT: vmov.16 q1[4], r0
+; CHECK-NEXT: vmov.u8 r0, q0[13]
+; CHECK-NEXT: vmov.16 q1[5], r0
+; CHECK-NEXT: vmov.u8 r0, q0[14]
+; CHECK-NEXT: vmov.16 q1[6], r0
+; CHECK-NEXT: vmov.u8 r0, q0[15]
+; CHECK-NEXT: vmov.16 q1[7], r0
+; CHECK-NEXT: vmullb.s8 q1, q1, q1
+; CHECK-NEXT: vmov.u16 r0, q1[4]
+; CHECK-NEXT: vmov.32 q2[0], r0
+; CHECK-NEXT: vmov.u16 r0, q1[5]
+; CHECK-NEXT: vmov.32 q2[1], r0
+; CHECK-NEXT: vmov.u16 r0, q1[6]
+; CHECK-NEXT: vmov.32 q2[2], r0
+; CHECK-NEXT: vmov.u16 r0, q1[7]
+; CHECK-NEXT: vmov.32 q2[3], r0
+; CHECK-NEXT: vmov.u8 r0, q0[0]
+; CHECK-NEXT: vmov.16 q3[0], r0
+; CHECK-NEXT: vmov.u8 r0, q0[1]
+; CHECK-NEXT: vmov.16 q3[1], r0
+; CHECK-NEXT: vmov.u8 r0, q0[2]
+; CHECK-NEXT: vmov.16 q3[2], r0
+; CHECK-NEXT: vmov.u8 r0, q0[3]
+; CHECK-NEXT: vmov.16 q3[3], r0
+; CHECK-NEXT: vmov.u8 r0, q0[4]
+; CHECK-NEXT: vmov.16 q3[4], r0
+; CHECK-NEXT: vmov.u8 r0, q0[5]
+; CHECK-NEXT: vmov.16 q3[5], r0
+; CHECK-NEXT: vmov.u8 r0, q0[6]
+; CHECK-NEXT: vmov.16 q3[6], r0
+; CHECK-NEXT: vmov.u8 r0, q0[7]
+; CHECK-NEXT: vmov.16 q3[7], r0
+; CHECK-NEXT: vmovlb.u16 q2, q2
+; CHECK-NEXT: vmullb.s8 q0, q3, q3
+; CHECK-NEXT: vmov.u16 r0, q0[4]
+; CHECK-NEXT: vmov.32 q3[0], r0
+; CHECK-NEXT: vmov.u16 r0, q0[5]
+; CHECK-NEXT: vmov.32 q3[1], r0
+; CHECK-NEXT: vmov.u16 r0, q0[6]
+; CHECK-NEXT: vmov.32 q3[2], r0
+; CHECK-NEXT: vmov.u16 r0, q0[7]
+; CHECK-NEXT: vmov.32 q3[3], r0
+; CHECK-NEXT: vmov.u16 r0, q1[0]
+; CHECK-NEXT: vmovlb.u16 q3, q3
+; CHECK-NEXT: vadd.i32 q2, q3, q2
+; CHECK-NEXT: vmov.32 q3[0], r0
+; CHECK-NEXT: vmov.u16 r0, q1[1]
+; CHECK-NEXT: vmov.32 q3[1], r0
+; CHECK-NEXT: vmov.u16 r0, q1[2]
+; CHECK-NEXT: vmov.32 q3[2], r0
+; CHECK-NEXT: vmov.u16 r0, q1[3]
+; CHECK-NEXT: vmov.32 q3[3], r0
+; CHECK-NEXT: vmov.u16 r0, q0[0]
+; CHECK-NEXT: vmovlb.u16 q1, q3
+; CHECK-NEXT: vmov.32 q3[0], r0
+; CHECK-NEXT: vmov.u16 r0, q0[1]
+; CHECK-NEXT: vmov.32 q3[1], r0
+; CHECK-NEXT: vmov.u16 r0, q0[2]
+; CHECK-NEXT: vmov.32 q3[2], r0
+; CHECK-NEXT: vmov.u16 r0, q0[3]
+; CHECK-NEXT: vmov.32 q3[3], r0
+; CHECK-NEXT: vmovlb.u16 q0, q3
+; CHECK-NEXT: vadd.i32 q0, q0, q1
+; CHECK-NEXT: vadd.i32 q0, q0, q2
+; CHECK-NEXT: vaddv.u32 r0, q0
+; CHECK-NEXT: bx lr
+entry:
+ %xx = sext <16 x i8> %x to <16 x i16>
+ %m = mul <16 x i16> %xx, %xx
+ %ma = zext <16 x i16> %m to <16 x i32>
+ %z = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %ma)
+ ret i32 %z
+}
+
define arm_aapcs_vfpcc i32 @add_v4i8_v4i32_zext(<4 x i8> %x, <4 x i8> %y) {
; CHECK-LABEL: add_v4i8_v4i32_zext:
; CHECK: @ %bb.0: @ %entry
@@ -990,6 +1593,308 @@ entry:
ret i64 %r
}
+define arm_aapcs_vfpcc i64 @add_v8i16_v8i32_v8i64_acc_zext(<8 x i16> %x, <8 x i16> %y, i64 %a) {
+; CHECK-LABEL: add_v8i16_v8i32_v8i64_acc_zext:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .save {r4, lr}
+; CHECK-NEXT: push {r4, lr}
+; CHECK-NEXT: .vsave {d8, d9}
+; CHECK-NEXT: vpush {d8, d9}
+; CHECK-NEXT: vmov.u16 r2, q1[0]
+; CHECK-NEXT: vmov.32 q2[0], r2
+; CHECK-NEXT: vmov.u16 r2, q1[1]
+; CHECK-NEXT: vmov.32 q2[1], r2
+; CHECK-NEXT: vmov.u16 r2, q1[2]
+; CHECK-NEXT: vmov.32 q2[2], r2
+; CHECK-NEXT: vmov.u16 r2, q1[3]
+; CHECK-NEXT: vmov.32 q2[3], r2
+; CHECK-NEXT: vmov.u16 r2, q0[0]
+; CHECK-NEXT: vmov.32 q3[0], r2
+; CHECK-NEXT: vmov.u16 r2, q0[1]
+; CHECK-NEXT: vmov.32 q3[1], r2
+; CHECK-NEXT: vmov.u16 r2, q0[2]
+; CHECK-NEXT: vmov.32 q3[2], r2
+; CHECK-NEXT: vmov.u16 r2, q0[3]
+; CHECK-NEXT: vmov.32 q3[3], r2
+; CHECK-NEXT: vmullb.u16 q3, q3, q2
+; CHECK-NEXT: vmov.i64 q2, #0xffffffff
+; CHECK-NEXT: vmov.f32 s16, s12
+; CHECK-NEXT: vmov.f32 s18, s13
+; CHECK-NEXT: vand q4, q4, q2
+; CHECK-NEXT: vmov r2, s18
+; CHECK-NEXT: vmov r3, s16
+; CHECK-NEXT: vmov r12, s19
+; CHECK-NEXT: vmov lr, s17
+; CHECK-NEXT: vmov.f32 s16, s14
+; CHECK-NEXT: vmov.f32 s18, s15
+; CHECK-NEXT: vand q3, q4, q2
+; CHECK-NEXT: adds r4, r3, r2
+; CHECK-NEXT: vmov r3, s12
+; CHECK-NEXT: vmov r2, s13
+; CHECK-NEXT: adc.w r12, r12, lr
+; CHECK-NEXT: adds.w lr, r4, r3
+; CHECK-NEXT: vmov r3, s14
+; CHECK-NEXT: adc.w r4, r12, r2
+; CHECK-NEXT: vmov r2, s15
+; CHECK-NEXT: adds.w r12, lr, r3
+; CHECK-NEXT: adc.w r3, r4, r2
+; CHECK-NEXT: vmov.u16 r2, q1[4]
+; CHECK-NEXT: vmov.32 q3[0], r2
+; CHECK-NEXT: vmov.u16 r2, q1[5]
+; CHECK-NEXT: vmov.32 q3[1], r2
+; CHECK-NEXT: vmov.u16 r2, q1[6]
+; CHECK-NEXT: vmov.32 q3[2], r2
+; CHECK-NEXT: vmov.u16 r2, q1[7]
+; CHECK-NEXT: vmov.32 q3[3], r2
+; CHECK-NEXT: vmov.u16 r2, q0[4]
+; CHECK-NEXT: vmov.32 q1[0], r2
+; CHECK-NEXT: vmov.u16 r2, q0[5]
+; CHECK-NEXT: vmov.32 q1[1], r2
+; CHECK-NEXT: vmov.u16 r2, q0[6]
+; CHECK-NEXT: vmov.32 q1[2], r2
+; CHECK-NEXT: vmov.u16 r2, q0[7]
+; CHECK-NEXT: vmov.32 q1[3], r2
+; CHECK-NEXT: vmullb.u16 q0, q1, q3
+; CHECK-NEXT: vmov.f32 s4, s0
+; CHECK-NEXT: vmov.f32 s6, s1
+; CHECK-NEXT: vand q1, q1, q2
+; CHECK-NEXT: vmov r4, s4
+; CHECK-NEXT: vmov r2, s5
+; CHECK-NEXT: adds.w r4, r4, r12
+; CHECK-NEXT: adc.w r12, r3, r2
+; CHECK-NEXT: vmov r2, s6
+; CHECK-NEXT: vmov r3, s7
+; CHECK-NEXT: vmov.f32 s4, s2
+; CHECK-NEXT: vmov.f32 s6, s3
+; CHECK-NEXT: vand q0, q1, q2
+; CHECK-NEXT: adds.w lr, r4, r2
+; CHECK-NEXT: vmov r2, s0
+; CHECK-NEXT: vmov r4, s1
+; CHECK-NEXT: adc.w r3, r3, r12
+; CHECK-NEXT: adds.w r12, lr, r2
+; CHECK-NEXT: vmov r2, s2
+; CHECK-NEXT: adcs r3, r4
+; CHECK-NEXT: vmov r4, s3
+; CHECK-NEXT: adds.w r2, r2, r12
+; CHECK-NEXT: adcs r3, r4
+; CHECK-NEXT: adds r0, r0, r2
+; CHECK-NEXT: adcs r1, r3
+; CHECK-NEXT: vpop {d8, d9}
+; CHECK-NEXT: pop {r4, pc}
+entry:
+ %xx = zext <8 x i16> %x to <8 x i32>
+ %yy = zext <8 x i16> %y to <8 x i32>
+ %m = mul <8 x i32> %xx, %yy
+ %ma = zext <8 x i32> %m to <8 x i64>
+ %z = call i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64> %ma)
+ %r = add i64 %z, %a
+ ret i64 %r
+}
+
+define arm_aapcs_vfpcc i64 @add_v8i16_v8i32_v8i64_acc_sext(<8 x i16> %x, <8 x i16> %y, i64 %a) {
+; CHECK-LABEL: add_v8i16_v8i32_v8i64_acc_sext:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .save {r4, lr}
+; CHECK-NEXT: push {r4, lr}
+; CHECK-NEXT: .vsave {d8, d9}
+; CHECK-NEXT: vpush {d8, d9}
+; CHECK-NEXT: vmov.u16 r2, q1[0]
+; CHECK-NEXT: vmov.32 q2[0], r2
+; CHECK-NEXT: vmov.u16 r2, q1[1]
+; CHECK-NEXT: vmov.32 q2[1], r2
+; CHECK-NEXT: vmov.u16 r2, q1[2]
+; CHECK-NEXT: vmov.32 q2[2], r2
+; CHECK-NEXT: vmov.u16 r2, q1[3]
+; CHECK-NEXT: vmov.32 q2[3], r2
+; CHECK-NEXT: vmov.u16 r2, q0[0]
+; CHECK-NEXT: vmov.32 q3[0], r2
+; CHECK-NEXT: vmov.u16 r2, q0[1]
+; CHECK-NEXT: vmov.32 q3[1], r2
+; CHECK-NEXT: vmov.u16 r2, q0[2]
+; CHECK-NEXT: vmov.32 q3[2], r2
+; CHECK-NEXT: vmov.u16 r2, q0[3]
+; CHECK-NEXT: vmov.32 q3[3], r2
+; CHECK-NEXT: vmullb.s16 q2, q3, q2
+; CHECK-NEXT: vmov.f32 s12, s8
+; CHECK-NEXT: vmov.f32 s14, s9
+; CHECK-NEXT: vmov r2, s12
+; CHECK-NEXT: vmov.32 q4[0], r2
+; CHECK-NEXT: asrs r2, r2, #31
+; CHECK-NEXT: vmov.32 q4[1], r2
+; CHECK-NEXT: vmov r2, s14
+; CHECK-NEXT: vmov.32 q4[2], r2
+; CHECK-NEXT: vmov.f32 s12, s10
+; CHECK-NEXT: vmov.f32 s14, s11
+; CHECK-NEXT: asrs r3, r2, #31
+; CHECK-NEXT: vmov.32 q4[3], r3
+; CHECK-NEXT: vmov lr, s18
+; CHECK-NEXT: vmov r3, s16
+; CHECK-NEXT: vmov r12, s17
+; CHECK-NEXT: adds.w lr, lr, r3
+; CHECK-NEXT: adc.w r12, r12, r2, asr #31
+; CHECK-NEXT: vmov r2, s12
+; CHECK-NEXT: vmov.32 q2[0], r2
+; CHECK-NEXT: asrs r2, r2, #31
+; CHECK-NEXT: vmov.32 q2[1], r2
+; CHECK-NEXT: vmov r2, s14
+; CHECK-NEXT: vmov.32 q2[2], r2
+; CHECK-NEXT: asrs r3, r2, #31
+; CHECK-NEXT: vmov.32 q2[3], r3
+; CHECK-NEXT: vmov r4, s8
+; CHECK-NEXT: vmov r3, s9
+; CHECK-NEXT: adds.w r4, r4, lr
+; CHECK-NEXT: adc.w lr, r12, r3
+; CHECK-NEXT: vmov r3, s10
+; CHECK-NEXT: adds.w r12, r4, r3
+; CHECK-NEXT: adc.w lr, lr, r2, asr #31
+; CHECK-NEXT: vmov.u16 r2, q1[4]
+; CHECK-NEXT: vmov.32 q2[0], r2
+; CHECK-NEXT: vmov.u16 r2, q1[5]
+; CHECK-NEXT: vmov.32 q2[1], r2
+; CHECK-NEXT: vmov.u16 r2, q1[6]
+; CHECK-NEXT: vmov.32 q2[2], r2
+; CHECK-NEXT: vmov.u16 r2, q1[7]
+; CHECK-NEXT: vmov.32 q2[3], r2
+; CHECK-NEXT: vmov.u16 r2, q0[4]
+; CHECK-NEXT: vmov.32 q1[0], r2
+; CHECK-NEXT: vmov.u16 r2, q0[5]
+; CHECK-NEXT: vmov.32 q1[1], r2
+; CHECK-NEXT: vmov.u16 r2, q0[6]
+; CHECK-NEXT: vmov.32 q1[2], r2
+; CHECK-NEXT: vmov.u16 r2, q0[7]
+; CHECK-NEXT: vmov.32 q1[3], r2
+; CHECK-NEXT: vmullb.s16 q0, q1, q2
+; CHECK-NEXT: vmov.f32 s4, s0
+; CHECK-NEXT: vmov.f32 s6, s1
+; CHECK-NEXT: vmov r2, s4
+; CHECK-NEXT: vmov.32 q2[0], r2
+; CHECK-NEXT: asrs r2, r2, #31
+; CHECK-NEXT: vmov.32 q2[1], r2
+; CHECK-NEXT: vmov r2, s6
+; CHECK-NEXT: vmov.32 q2[2], r2
+; CHECK-NEXT: vmov.f32 s4, s2
+; CHECK-NEXT: vmov.f32 s6, s3
+; CHECK-NEXT: asrs r4, r2, #31
+; CHECK-NEXT: vmov.32 q2[3], r4
+; CHECK-NEXT: vmov r3, s8
+; CHECK-NEXT: vmov r4, s9
+; CHECK-NEXT: adds.w r12, r12, r3
+; CHECK-NEXT: vmov r3, s10
+; CHECK-NEXT: adc.w r4, r4, lr
+; CHECK-NEXT: adds.w r3, r3, r12
+; CHECK-NEXT: adc.w r2, r4, r2, asr #31
+; CHECK-NEXT: vmov r4, s4
+; CHECK-NEXT: adds r3, r3, r4
+; CHECK-NEXT: adc.w r2, r2, r4, asr #31
+; CHECK-NEXT: vmov r4, s6
+; CHECK-NEXT: adds r3, r3, r4
+; CHECK-NEXT: adc.w r2, r2, r4, asr #31
+; CHECK-NEXT: adds r0, r0, r3
+; CHECK-NEXT: adcs r1, r2
+; CHECK-NEXT: vpop {d8, d9}
+; CHECK-NEXT: pop {r4, pc}
+entry:
+ %xx = sext <8 x i16> %x to <8 x i32>
+ %yy = sext <8 x i16> %y to <8 x i32>
+ %m = mul <8 x i32> %xx, %yy
+ %ma = sext <8 x i32> %m to <8 x i64>
+ %z = call i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64> %ma)
+ %r = add i64 %z, %a
+ ret i64 %r
+}
+
+define arm_aapcs_vfpcc i64 @add_v8i16_v8i32_v8i64_acc_sextzext(<8 x i16> %x, <8 x i16> %y, i64 %a) {
+; CHECK-LABEL: add_v8i16_v8i32_v8i64_acc_sextzext:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .save {r4, lr}
+; CHECK-NEXT: push {r4, lr}
+; CHECK-NEXT: vmov.u16 r2, q0[0]
+; CHECK-NEXT: vmov.32 q1[0], r2
+; CHECK-NEXT: vmov.u16 r2, q0[1]
+; CHECK-NEXT: vmov.32 q1[1], r2
+; CHECK-NEXT: vmov.u16 r2, q0[2]
+; CHECK-NEXT: vmov.32 q1[2], r2
+; CHECK-NEXT: vmov.u16 r2, q0[3]
+; CHECK-NEXT: vmov.32 q1[3], r2
+; CHECK-NEXT: vmullb.s16 q1, q1, q1
+; CHECK-NEXT: vmov.f32 s8, s4
+; CHECK-NEXT: vmov.f32 s10, s5
+; CHECK-NEXT: vmov r2, s8
+; CHECK-NEXT: vmov.32 q3[0], r2
+; CHECK-NEXT: asrs r2, r2, #31
+; CHECK-NEXT: vmov.32 q3[1], r2
+; CHECK-NEXT: vmov r2, s10
+; CHECK-NEXT: vmov.32 q3[2], r2
+; CHECK-NEXT: vmov.f32 s8, s6
+; CHECK-NEXT: vmov.f32 s10, s7
+; CHECK-NEXT: asrs r3, r2, #31
+; CHECK-NEXT: vmov.32 q3[3], r3
+; CHECK-NEXT: vmov lr, s14
+; CHECK-NEXT: vmov r3, s12
+; CHECK-NEXT: vmov r12, s13
+; CHECK-NEXT: adds.w lr, lr, r3
+; CHECK-NEXT: adc.w r12, r12, r2, asr #31
+; CHECK-NEXT: vmov r2, s8
+; CHECK-NEXT: vmov.32 q1[0], r2
+; CHECK-NEXT: asrs r2, r2, #31
+; CHECK-NEXT: vmov.32 q1[1], r2
+; CHECK-NEXT: vmov r2, s10
+; CHECK-NEXT: vmov.32 q1[2], r2
+; CHECK-NEXT: asrs r3, r2, #31
+; CHECK-NEXT: vmov.32 q1[3], r3
+; CHECK-NEXT: vmov r4, s4
+; CHECK-NEXT: vmov r3, s5
+; CHECK-NEXT: adds.w r4, r4, lr
+; CHECK-NEXT: adc.w lr, r12, r3
+; CHECK-NEXT: vmov r3, s6
+; CHECK-NEXT: adds.w r12, r4, r3
+; CHECK-NEXT: adc.w lr, lr, r2, asr #31
+; CHECK-NEXT: vmov.u16 r2, q0[4]
+; CHECK-NEXT: vmov.32 q1[0], r2
+; CHECK-NEXT: vmov.u16 r2, q0[5]
+; CHECK-NEXT: vmov.32 q1[1], r2
+; CHECK-NEXT: vmov.u16 r2, q0[6]
+; CHECK-NEXT: vmov.32 q1[2], r2
+; CHECK-NEXT: vmov.u16 r2, q0[7]
+; CHECK-NEXT: vmov.32 q1[3], r2
+; CHECK-NEXT: vmullb.s16 q0, q1, q1
+; CHECK-NEXT: vmov.f32 s4, s0
+; CHECK-NEXT: vmov.f32 s6, s1
+; CHECK-NEXT: vmov r2, s4
+; CHECK-NEXT: vmov.32 q2[0], r2
+; CHECK-NEXT: asrs r2, r2, #31
+; CHECK-NEXT: vmov.32 q2[1], r2
+; CHECK-NEXT: vmov r2, s6
+; CHECK-NEXT: vmov.32 q2[2], r2
+; CHECK-NEXT: vmov.f32 s4, s2
+; CHECK-NEXT: vmov.f32 s6, s3
+; CHECK-NEXT: asrs r4, r2, #31
+; CHECK-NEXT: vmov.32 q2[3], r4
+; CHECK-NEXT: vmov r3, s8
+; CHECK-NEXT: vmov r4, s9
+; CHECK-NEXT: adds.w r12, r12, r3
+; CHECK-NEXT: vmov r3, s10
+; CHECK-NEXT: adc.w r4, r4, lr
+; CHECK-NEXT: adds.w r3, r3, r12
+; CHECK-NEXT: adc.w r2, r4, r2, asr #31
+; CHECK-NEXT: vmov r4, s4
+; CHECK-NEXT: adds r3, r3, r4
+; CHECK-NEXT: adc.w r2, r2, r4, asr #31
+; CHECK-NEXT: vmov r4, s6
+; CHECK-NEXT: adds r3, r3, r4
+; CHECK-NEXT: adc.w r2, r2, r4, asr #31
+; CHECK-NEXT: adds r0, r0, r3
+; CHECK-NEXT: adcs r1, r2
+; CHECK-NEXT: pop {r4, pc}
+entry:
+ %xx = sext <8 x i16> %x to <8 x i32>
+ %m = mul <8 x i32> %xx, %xx
+ %ma = sext <8 x i32> %m to <8 x i64>
+ %z = call i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64> %ma)
+ %r = add i64 %z, %a
+ ret i64 %r
+}
+
define arm_aapcs_vfpcc i64 @add_v2i16_v2i64_acc_zext(<2 x i16> %x, <2 x i16> %y, i64 %a) {
; CHECK-LABEL: add_v2i16_v2i64_acc_zext:
; CHECK: @ %bb.0: @ %entry
@@ -1071,6 +1976,339 @@ entry:
ret i32 %r
}
+define arm_aapcs_vfpcc i32 @add_v16i8_v16i16_v16i32_acc_zext(<16 x i8> %x, <16 x i8> %y, i32 %a) {
+; CHECK-LABEL: add_v16i8_v16i16_v16i32_acc_zext:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .vsave {d8, d9}
+; CHECK-NEXT: vpush {d8, d9}
+; CHECK-NEXT: vmov.u8 r1, q1[8]
+; CHECK-NEXT: vmov.16 q2[0], r1
+; CHECK-NEXT: vmov.u8 r1, q1[9]
+; CHECK-NEXT: vmov.16 q2[1], r1
+; CHECK-NEXT: vmov.u8 r1, q1[10]
+; CHECK-NEXT: vmov.16 q2[2], r1
+; CHECK-NEXT: vmov.u8 r1, q1[11]
+; CHECK-NEXT: vmov.16 q2[3], r1
+; CHECK-NEXT: vmov.u8 r1, q1[12]
+; CHECK-NEXT: vmov.16 q2[4], r1
+; CHECK-NEXT: vmov.u8 r1, q1[13]
+; CHECK-NEXT: vmov.16 q2[5], r1
+; CHECK-NEXT: vmov.u8 r1, q1[14]
+; CHECK-NEXT: vmov.16 q2[6], r1
+; CHECK-NEXT: vmov.u8 r1, q1[15]
+; CHECK-NEXT: vmov.16 q2[7], r1
+; CHECK-NEXT: vmov.u8 r1, q0[8]
+; CHECK-NEXT: vmov.16 q3[0], r1
+; CHECK-NEXT: vmov.u8 r1, q0[9]
+; CHECK-NEXT: vmov.16 q3[1], r1
+; CHECK-NEXT: vmov.u8 r1, q0[10]
+; CHECK-NEXT: vmov.16 q3[2], r1
+; CHECK-NEXT: vmov.u8 r1, q0[11]
+; CHECK-NEXT: vmov.16 q3[3], r1
+; CHECK-NEXT: vmov.u8 r1, q0[12]
+; CHECK-NEXT: vmov.16 q3[4], r1
+; CHECK-NEXT: vmov.u8 r1, q0[13]
+; CHECK-NEXT: vmov.16 q3[5], r1
+; CHECK-NEXT: vmov.u8 r1, q0[14]
+; CHECK-NEXT: vmov.16 q3[6], r1
+; CHECK-NEXT: vmov.u8 r1, q0[15]
+; CHECK-NEXT: vmov.16 q3[7], r1
+; CHECK-NEXT: vmullb.u8 q2, q3, q2
+; CHECK-NEXT: vmov.u16 r1, q2[4]
+; CHECK-NEXT: vmov.32 q3[0], r1
+; CHECK-NEXT: vmov.u16 r1, q2[5]
+; CHECK-NEXT: vmov.32 q3[1], r1
+; CHECK-NEXT: vmov.u16 r1, q2[6]
+; CHECK-NEXT: vmov.32 q3[2], r1
+; CHECK-NEXT: vmov.u16 r1, q2[7]
+; CHECK-NEXT: vmov.32 q3[3], r1
+; CHECK-NEXT: vmov.u8 r1, q1[0]
+; CHECK-NEXT: vmov.16 q4[0], r1
+; CHECK-NEXT: vmov.u8 r1, q1[1]
+; CHECK-NEXT: vmov.16 q4[1], r1
+; CHECK-NEXT: vmov.u8 r1, q1[2]
+; CHECK-NEXT: vmov.16 q4[2], r1
+; CHECK-NEXT: vmov.u8 r1, q1[3]
+; CHECK-NEXT: vmov.16 q4[3], r1
+; CHECK-NEXT: vmov.u8 r1, q1[4]
+; CHECK-NEXT: vmov.16 q4[4], r1
+; CHECK-NEXT: vmov.u8 r1, q1[5]
+; CHECK-NEXT: vmov.16 q4[5], r1
+; CHECK-NEXT: vmov.u8 r1, q1[6]
+; CHECK-NEXT: vmov.16 q4[6], r1
+; CHECK-NEXT: vmov.u8 r1, q1[7]
+; CHECK-NEXT: vmov.16 q4[7], r1
+; CHECK-NEXT: vmov.u8 r1, q0[0]
+; CHECK-NEXT: vmov.16 q1[0], r1
+; CHECK-NEXT: vmov.u8 r1, q0[1]
+; CHECK-NEXT: vmov.16 q1[1], r1
+; CHECK-NEXT: vmov.u8 r1, q0[2]
+; CHECK-NEXT: vmov.16 q1[2], r1
+; CHECK-NEXT: vmov.u8 r1, q0[3]
+; CHECK-NEXT: vmov.16 q1[3], r1
+; CHECK-NEXT: vmov.u8 r1, q0[4]
+; CHECK-NEXT: vmov.16 q1[4], r1
+; CHECK-NEXT: vmov.u8 r1, q0[5]
+; CHECK-NEXT: vmov.16 q1[5], r1
+; CHECK-NEXT: vmov.u8 r1, q0[6]
+; CHECK-NEXT: vmov.16 q1[6], r1
+; CHECK-NEXT: vmov.u8 r1, q0[7]
+; CHECK-NEXT: vmov.16 q1[7], r1
+; CHECK-NEXT: vmovlb.u16 q3, q3
+; CHECK-NEXT: vmullb.u8 q0, q1, q4
+; CHECK-NEXT: vmov.u16 r1, q0[4]
+; CHECK-NEXT: vmov.32 q1[0], r1
+; CHECK-NEXT: vmov.u16 r1, q0[5]
+; CHECK-NEXT: vmov.32 q1[1], r1
+; CHECK-NEXT: vmov.u16 r1, q0[6]
+; CHECK-NEXT: vmov.32 q1[2], r1
+; CHECK-NEXT: vmov.u16 r1, q0[7]
+; CHECK-NEXT: vmov.32 q1[3], r1
+; CHECK-NEXT: vmov.u16 r1, q2[0]
+; CHECK-NEXT: vmovlb.u16 q1, q1
+; CHECK-NEXT: vadd.i32 q1, q1, q3
+; CHECK-NEXT: vmov.32 q3[0], r1
+; CHECK-NEXT: vmov.u16 r1, q2[1]
+; CHECK-NEXT: vmov.32 q3[1], r1
+; CHECK-NEXT: vmov.u16 r1, q2[2]
+; CHECK-NEXT: vmov.32 q3[2], r1
+; CHECK-NEXT: vmov.u16 r1, q2[3]
+; CHECK-NEXT: vmov.32 q3[3], r1
+; CHECK-NEXT: vmov.u16 r1, q0[0]
+; CHECK-NEXT: vmovlb.u16 q2, q3
+; CHECK-NEXT: vmov.32 q3[0], r1
+; CHECK-NEXT: vmov.u16 r1, q0[1]
+; CHECK-NEXT: vmov.32 q3[1], r1
+; CHECK-NEXT: vmov.u16 r1, q0[2]
+; CHECK-NEXT: vmov.32 q3[2], r1
+; CHECK-NEXT: vmov.u16 r1, q0[3]
+; CHECK-NEXT: vmov.32 q3[3], r1
+; CHECK-NEXT: vmovlb.u16 q0, q3
+; CHECK-NEXT: vadd.i32 q0, q0, q2
+; CHECK-NEXT: vadd.i32 q0, q0, q1
+; CHECK-NEXT: vaddva.u32 r0, q0
+; CHECK-NEXT: vpop {d8, d9}
+; CHECK-NEXT: bx lr
+entry:
+ %xx = zext <16 x i8> %x to <16 x i16>
+ %yy = zext <16 x i8> %y to <16 x i16>
+ %m = mul <16 x i16> %xx, %yy
+ %ma = zext <16 x i16> %m to <16 x i32>
+ %z = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %ma)
+ %r = add i32 %z, %a
+ ret i32 %r
+}
+
+define arm_aapcs_vfpcc i32 @add_v16i8_v16i16_v16i32_acc_sext(<16 x i8> %x, <16 x i8> %y, i32 %a) {
+; CHECK-LABEL: add_v16i8_v16i16_v16i32_acc_sext:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .vsave {d8, d9}
+; CHECK-NEXT: vpush {d8, d9}
+; CHECK-NEXT: vmov.u8 r1, q1[8]
+; CHECK-NEXT: vmov.16 q2[0], r1
+; CHECK-NEXT: vmov.u8 r1, q1[9]
+; CHECK-NEXT: vmov.16 q2[1], r1
+; CHECK-NEXT: vmov.u8 r1, q1[10]
+; CHECK-NEXT: vmov.16 q2[2], r1
+; CHECK-NEXT: vmov.u8 r1, q1[11]
+; CHECK-NEXT: vmov.16 q2[3], r1
+; CHECK-NEXT: vmov.u8 r1, q1[12]
+; CHECK-NEXT: vmov.16 q2[4], r1
+; CHECK-NEXT: vmov.u8 r1, q1[13]
+; CHECK-NEXT: vmov.16 q2[5], r1
+; CHECK-NEXT: vmov.u8 r1, q1[14]
+; CHECK-NEXT: vmov.16 q2[6], r1
+; CHECK-NEXT: vmov.u8 r1, q1[15]
+; CHECK-NEXT: vmov.16 q2[7], r1
+; CHECK-NEXT: vmov.u8 r1, q0[8]
+; CHECK-NEXT: vmov.16 q3[0], r1
+; CHECK-NEXT: vmov.u8 r1, q0[9]
+; CHECK-NEXT: vmov.16 q3[1], r1
+; CHECK-NEXT: vmov.u8 r1, q0[10]
+; CHECK-NEXT: vmov.16 q3[2], r1
+; CHECK-NEXT: vmov.u8 r1, q0[11]
+; CHECK-NEXT: vmov.16 q3[3], r1
+; CHECK-NEXT: vmov.u8 r1, q0[12]
+; CHECK-NEXT: vmov.16 q3[4], r1
+; CHECK-NEXT: vmov.u8 r1, q0[13]
+; CHECK-NEXT: vmov.16 q3[5], r1
+; CHECK-NEXT: vmov.u8 r1, q0[14]
+; CHECK-NEXT: vmov.16 q3[6], r1
+; CHECK-NEXT: vmov.u8 r1, q0[15]
+; CHECK-NEXT: vmov.16 q3[7], r1
+; CHECK-NEXT: vmullb.s8 q2, q3, q2
+; CHECK-NEXT: vmov.u16 r1, q2[4]
+; CHECK-NEXT: vmov.32 q3[0], r1
+; CHECK-NEXT: vmov.u16 r1, q2[5]
+; CHECK-NEXT: vmov.32 q3[1], r1
+; CHECK-NEXT: vmov.u16 r1, q2[6]
+; CHECK-NEXT: vmov.32 q3[2], r1
+; CHECK-NEXT: vmov.u16 r1, q2[7]
+; CHECK-NEXT: vmov.32 q3[3], r1
+; CHECK-NEXT: vmov.u8 r1, q1[0]
+; CHECK-NEXT: vmov.16 q4[0], r1
+; CHECK-NEXT: vmov.u8 r1, q1[1]
+; CHECK-NEXT: vmov.16 q4[1], r1
+; CHECK-NEXT: vmov.u8 r1, q1[2]
+; CHECK-NEXT: vmov.16 q4[2], r1
+; CHECK-NEXT: vmov.u8 r1, q1[3]
+; CHECK-NEXT: vmov.16 q4[3], r1
+; CHECK-NEXT: vmov.u8 r1, q1[4]
+; CHECK-NEXT: vmov.16 q4[4], r1
+; CHECK-NEXT: vmov.u8 r1, q1[5]
+; CHECK-NEXT: vmov.16 q4[5], r1
+; CHECK-NEXT: vmov.u8 r1, q1[6]
+; CHECK-NEXT: vmov.16 q4[6], r1
+; CHECK-NEXT: vmov.u8 r1, q1[7]
+; CHECK-NEXT: vmov.16 q4[7], r1
+; CHECK-NEXT: vmov.u8 r1, q0[0]
+; CHECK-NEXT: vmov.16 q1[0], r1
+; CHECK-NEXT: vmov.u8 r1, q0[1]
+; CHECK-NEXT: vmov.16 q1[1], r1
+; CHECK-NEXT: vmov.u8 r1, q0[2]
+; CHECK-NEXT: vmov.16 q1[2], r1
+; CHECK-NEXT: vmov.u8 r1, q0[3]
+; CHECK-NEXT: vmov.16 q1[3], r1
+; CHECK-NEXT: vmov.u8 r1, q0[4]
+; CHECK-NEXT: vmov.16 q1[4], r1
+; CHECK-NEXT: vmov.u8 r1, q0[5]
+; CHECK-NEXT: vmov.16 q1[5], r1
+; CHECK-NEXT: vmov.u8 r1, q0[6]
+; CHECK-NEXT: vmov.16 q1[6], r1
+; CHECK-NEXT: vmov.u8 r1, q0[7]
+; CHECK-NEXT: vmov.16 q1[7], r1
+; CHECK-NEXT: vmovlb.s16 q3, q3
+; CHECK-NEXT: vmullb.s8 q0, q1, q4
+; CHECK-NEXT: vmov.u16 r1, q0[4]
+; CHECK-NEXT: vmov.32 q1[0], r1
+; CHECK-NEXT: vmov.u16 r1, q0[5]
+; CHECK-NEXT: vmov.32 q1[1], r1
+; CHECK-NEXT: vmov.u16 r1, q0[6]
+; CHECK-NEXT: vmov.32 q1[2], r1
+; CHECK-NEXT: vmov.u16 r1, q0[7]
+; CHECK-NEXT: vmov.32 q1[3], r1
+; CHECK-NEXT: vmov.u16 r1, q2[0]
+; CHECK-NEXT: vmovlb.s16 q1, q1
+; CHECK-NEXT: vadd.i32 q1, q1, q3
+; CHECK-NEXT: vmov.32 q3[0], r1
+; CHECK-NEXT: vmov.u16 r1, q2[1]
+; CHECK-NEXT: vmov.32 q3[1], r1
+; CHECK-NEXT: vmov.u16 r1, q2[2]
+; CHECK-NEXT: vmov.32 q3[2], r1
+; CHECK-NEXT: vmov.u16 r1, q2[3]
+; CHECK-NEXT: vmov.32 q3[3], r1
+; CHECK-NEXT: vmov.u16 r1, q0[0]
+; CHECK-NEXT: vmovlb.s16 q2, q3
+; CHECK-NEXT: vmov.32 q3[0], r1
+; CHECK-NEXT: vmov.u16 r1, q0[1]
+; CHECK-NEXT: vmov.32 q3[1], r1
+; CHECK-NEXT: vmov.u16 r1, q0[2]
+; CHECK-NEXT: vmov.32 q3[2], r1
+; CHECK-NEXT: vmov.u16 r1, q0[3]
+; CHECK-NEXT: vmov.32 q3[3], r1
+; CHECK-NEXT: vmovlb.s16 q0, q3
+; CHECK-NEXT: vadd.i32 q0, q0, q2
+; CHECK-NEXT: vadd.i32 q0, q0, q1
+; CHECK-NEXT: vaddva.u32 r0, q0
+; CHECK-NEXT: vpop {d8, d9}
+; CHECK-NEXT: bx lr
+entry:
+ %xx = sext <16 x i8> %x to <16 x i16>
+ %yy = sext <16 x i8> %y to <16 x i16>
+ %m = mul <16 x i16> %xx, %yy
+ %ma = sext <16 x i16> %m to <16 x i32>
+ %z = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %ma)
+ %r = add i32 %z, %a
+ ret i32 %r
+}
+
+define arm_aapcs_vfpcc i32 @add_v16i8_v16i16_v16i32_acc_sextzext(<16 x i8> %x, i32 %a) {
+; CHECK-LABEL: add_v16i8_v16i16_v16i32_acc_sextzext:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.u8 r1, q0[8]
+; CHECK-NEXT: vmov.16 q1[0], r1
+; CHECK-NEXT: vmov.u8 r1, q0[9]
+; CHECK-NEXT: vmov.16 q1[1], r1
+; CHECK-NEXT: vmov.u8 r1, q0[10]
+; CHECK-NEXT: vmov.16 q1[2], r1
+; CHECK-NEXT: vmov.u8 r1, q0[11]
+; CHECK-NEXT: vmov.16 q1[3], r1
+; CHECK-NEXT: vmov.u8 r1, q0[12]
+; CHECK-NEXT: vmov.16 q1[4], r1
+; CHECK-NEXT: vmov.u8 r1, q0[13]
+; CHECK-NEXT: vmov.16 q1[5], r1
+; CHECK-NEXT: vmov.u8 r1, q0[14]
+; CHECK-NEXT: vmov.16 q1[6], r1
+; CHECK-NEXT: vmov.u8 r1, q0[15]
+; CHECK-NEXT: vmov.16 q1[7], r1
+; CHECK-NEXT: vmullb.s8 q1, q1, q1
+; CHECK-NEXT: vmov.u16 r1, q1[4]
+; CHECK-NEXT: vmov.32 q2[0], r1
+; CHECK-NEXT: vmov.u16 r1, q1[5]
+; CHECK-NEXT: vmov.32 q2[1], r1
+; CHECK-NEXT: vmov.u16 r1, q1[6]
+; CHECK-NEXT: vmov.32 q2[2], r1
+; CHECK-NEXT: vmov.u16 r1, q1[7]
+; CHECK-NEXT: vmov.32 q2[3], r1
+; CHECK-NEXT: vmov.u8 r1, q0[0]
+; CHECK-NEXT: vmov.16 q3[0], r1
+; CHECK-NEXT: vmov.u8 r1, q0[1]
+; CHECK-NEXT: vmov.16 q3[1], r1
+; CHECK-NEXT: vmov.u8 r1, q0[2]
+; CHECK-NEXT: vmov.16 q3[2], r1
+; CHECK-NEXT: vmov.u8 r1, q0[3]
+; CHECK-NEXT: vmov.16 q3[3], r1
+; CHECK-NEXT: vmov.u8 r1, q0[4]
+; CHECK-NEXT: vmov.16 q3[4], r1
+; CHECK-NEXT: vmov.u8 r1, q0[5]
+; CHECK-NEXT: vmov.16 q3[5], r1
+; CHECK-NEXT: vmov.u8 r1, q0[6]
+; CHECK-NEXT: vmov.16 q3[6], r1
+; CHECK-NEXT: vmov.u8 r1, q0[7]
+; CHECK-NEXT: vmov.16 q3[7], r1
+; CHECK-NEXT: vmovlb.u16 q2, q2
+; CHECK-NEXT: vmullb.s8 q0, q3, q3
+; CHECK-NEXT: vmov.u16 r1, q0[4]
+; CHECK-NEXT: vmov.32 q3[0], r1
+; CHECK-NEXT: vmov.u16 r1, q0[5]
+; CHECK-NEXT: vmov.32 q3[1], r1
+; CHECK-NEXT: vmov.u16 r1, q0[6]
+; CHECK-NEXT: vmov.32 q3[2], r1
+; CHECK-NEXT: vmov.u16 r1, q0[7]
+; CHECK-NEXT: vmov.32 q3[3], r1
+; CHECK-NEXT: vmov.u16 r1, q1[0]
+; CHECK-NEXT: vmovlb.u16 q3, q3
+; CHECK-NEXT: vadd.i32 q2, q3, q2
+; CHECK-NEXT: vmov.32 q3[0], r1
+; CHECK-NEXT: vmov.u16 r1, q1[1]
+; CHECK-NEXT: vmov.32 q3[1], r1
+; CHECK-NEXT: vmov.u16 r1, q1[2]
+; CHECK-NEXT: vmov.32 q3[2], r1
+; CHECK-NEXT: vmov.u16 r1, q1[3]
+; CHECK-NEXT: vmov.32 q3[3], r1
+; CHECK-NEXT: vmov.u16 r1, q0[0]
+; CHECK-NEXT: vmovlb.u16 q1, q3
+; CHECK-NEXT: vmov.32 q3[0], r1
+; CHECK-NEXT: vmov.u16 r1, q0[1]
+; CHECK-NEXT: vmov.32 q3[1], r1
+; CHECK-NEXT: vmov.u16 r1, q0[2]
+; CHECK-NEXT: vmov.32 q3[2], r1
+; CHECK-NEXT: vmov.u16 r1, q0[3]
+; CHECK-NEXT: vmov.32 q3[3], r1
+; CHECK-NEXT: vmovlb.u16 q0, q3
+; CHECK-NEXT: vadd.i32 q0, q0, q1
+; CHECK-NEXT: vadd.i32 q0, q0, q2
+; CHECK-NEXT: vaddva.u32 r0, q0
+; CHECK-NEXT: bx lr
+entry:
+ %xx = sext <16 x i8> %x to <16 x i16>
+ %m = mul <16 x i16> %xx, %xx
+ %ma = zext <16 x i16> %m to <16 x i32>
+ %z = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %ma)
+ %r = add i32 %z, %a
+ ret i32 %r
+}
+
define arm_aapcs_vfpcc i32 @add_v4i8_v4i32_acc_zext(<4 x i8> %x, <4 x i8> %y, i32 %a) {
; CHECK-LABEL: add_v4i8_v4i32_acc_zext:
; CHECK: @ %bb.0: @ %entry
diff --git a/llvm/test/CodeGen/Thumb2/mve-vecreduce-mlapred.ll b/llvm/test/CodeGen/Thumb2/mve-vecreduce-mlapred.ll
index f30856d32b11..bc316c3c2478 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vecreduce-mlapred.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vecreduce-mlapred.ll
@@ -236,6 +236,483 @@ entry:
ret i64 %z
}
+define arm_aapcs_vfpcc i64 @add_v8i16_v8i32_v8i64_zext(<8 x i16> %x, <8 x i16> %y, <8 x i16> %b) {
+; CHECK-LABEL: add_v8i16_v8i32_v8i64_zext:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .save {r7, lr}
+; CHECK-NEXT: push {r7, lr}
+; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13}
+; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13}
+; CHECK-NEXT: vmov.i8 q3, #0x0
+; CHECK-NEXT: vmov.i8 q4, #0xff
+; CHECK-NEXT: vcmp.i16 eq, q2, zr
+; CHECK-NEXT: vpsel q3, q4, q3
+; CHECK-NEXT: vmov.u16 r0, q3[0]
+; CHECK-NEXT: vmov.32 q2[0], r0
+; CHECK-NEXT: vmov.u16 r0, q3[1]
+; CHECK-NEXT: vmov.32 q2[1], r0
+; CHECK-NEXT: vmov.u16 r0, q3[2]
+; CHECK-NEXT: vmov.32 q2[2], r0
+; CHECK-NEXT: vmov.u16 r0, q3[3]
+; CHECK-NEXT: vmov.32 q2[3], r0
+; CHECK-NEXT: vcmp.i32 ne, q2, zr
+; CHECK-NEXT: vmrs r12, p0
+; CHECK-NEXT: and r1, r12, #1
+; CHECK-NEXT: rsbs r1, r1, #0
+; CHECK-NEXT: vmov.32 q4[0], r1
+; CHECK-NEXT: vmov.32 q4[1], r1
+; CHECK-NEXT: ubfx r1, r12, #4, #1
+; CHECK-NEXT: rsbs r1, r1, #0
+; CHECK-NEXT: vmov.32 q4[2], r1
+; CHECK-NEXT: vmov.32 q4[3], r1
+; CHECK-NEXT: vmov.u16 r1, q1[0]
+; CHECK-NEXT: vmov.32 q2[0], r1
+; CHECK-NEXT: vmov.u16 r1, q1[1]
+; CHECK-NEXT: vmov.32 q2[1], r1
+; CHECK-NEXT: vmov.u16 r1, q1[2]
+; CHECK-NEXT: vmov.32 q2[2], r1
+; CHECK-NEXT: vmov.u16 r1, q1[3]
+; CHECK-NEXT: vmov.32 q2[3], r1
+; CHECK-NEXT: vmov.u16 r1, q0[0]
+; CHECK-NEXT: vmov.32 q5[0], r1
+; CHECK-NEXT: vmov.u16 r1, q0[1]
+; CHECK-NEXT: vmov.32 q5[1], r1
+; CHECK-NEXT: vmov.u16 r1, q0[2]
+; CHECK-NEXT: vmov.32 q5[2], r1
+; CHECK-NEXT: vmov.u16 r1, q0[3]
+; CHECK-NEXT: vmov.32 q5[3], r1
+; CHECK-NEXT: vmullb.u16 q5, q5, q2
+; CHECK-NEXT: vmov.i64 q2, #0xffffffff
+; CHECK-NEXT: vmov.f32 s24, s20
+; CHECK-NEXT: vmov.f32 s26, s21
+; CHECK-NEXT: vand q6, q6, q2
+; CHECK-NEXT: vand q4, q6, q4
+; CHECK-NEXT: vmov.f32 s24, s22
+; CHECK-NEXT: vmov r3, s18
+; CHECK-NEXT: vmov r0, s16
+; CHECK-NEXT: vmov r1, s19
+; CHECK-NEXT: vmov r2, s17
+; CHECK-NEXT: vmov.f32 s26, s23
+; CHECK-NEXT: vand q5, q6, q2
+; CHECK-NEXT: adds r0, r0, r3
+; CHECK-NEXT: adcs r1, r2
+; CHECK-NEXT: ubfx r2, r12, #8, #1
+; CHECK-NEXT: rsbs r2, r2, #0
+; CHECK-NEXT: vmov.32 q4[0], r2
+; CHECK-NEXT: vmov.32 q4[1], r2
+; CHECK-NEXT: ubfx r2, r12, #12, #1
+; CHECK-NEXT: rsbs r2, r2, #0
+; CHECK-NEXT: vmov.32 q4[2], r2
+; CHECK-NEXT: vmov.32 q4[3], r2
+; CHECK-NEXT: vand q4, q5, q4
+; CHECK-NEXT: vmov r3, s16
+; CHECK-NEXT: vmov r2, s17
+; CHECK-NEXT: adds r0, r0, r3
+; CHECK-NEXT: vmov r3, s18
+; CHECK-NEXT: adcs r1, r2
+; CHECK-NEXT: vmov r2, s19
+; CHECK-NEXT: adds.w r12, r0, r3
+; CHECK-NEXT: adcs r1, r2
+; CHECK-NEXT: vmov.u16 r2, q3[4]
+; CHECK-NEXT: vmov.32 q4[0], r2
+; CHECK-NEXT: vmov.u16 r2, q3[5]
+; CHECK-NEXT: vmov.32 q4[1], r2
+; CHECK-NEXT: vmov.u16 r2, q3[6]
+; CHECK-NEXT: vmov.32 q4[2], r2
+; CHECK-NEXT: vmov.u16 r2, q3[7]
+; CHECK-NEXT: vmov.32 q4[3], r2
+; CHECK-NEXT: vcmp.i32 ne, q4, zr
+; CHECK-NEXT: vmrs lr, p0
+; CHECK-NEXT: and r3, lr, #1
+; CHECK-NEXT: rsbs r3, r3, #0
+; CHECK-NEXT: vmov.32 q3[0], r3
+; CHECK-NEXT: vmov.32 q3[1], r3
+; CHECK-NEXT: ubfx r3, lr, #4, #1
+; CHECK-NEXT: rsbs r3, r3, #0
+; CHECK-NEXT: vmov.32 q3[2], r3
+; CHECK-NEXT: vmov.32 q3[3], r3
+; CHECK-NEXT: vmov.u16 r3, q1[4]
+; CHECK-NEXT: vmov.32 q4[0], r3
+; CHECK-NEXT: vmov.u16 r3, q1[5]
+; CHECK-NEXT: vmov.32 q4[1], r3
+; CHECK-NEXT: vmov.u16 r3, q1[6]
+; CHECK-NEXT: vmov.32 q4[2], r3
+; CHECK-NEXT: vmov.u16 r3, q1[7]
+; CHECK-NEXT: vmov.32 q4[3], r3
+; CHECK-NEXT: vmov.u16 r3, q0[4]
+; CHECK-NEXT: vmov.32 q1[0], r3
+; CHECK-NEXT: vmov.u16 r3, q0[5]
+; CHECK-NEXT: vmov.32 q1[1], r3
+; CHECK-NEXT: vmov.u16 r3, q0[6]
+; CHECK-NEXT: vmov.32 q1[2], r3
+; CHECK-NEXT: vmov.u16 r3, q0[7]
+; CHECK-NEXT: vmov.32 q1[3], r3
+; CHECK-NEXT: vmullb.u16 q0, q1, q4
+; CHECK-NEXT: vmov.f32 s4, s0
+; CHECK-NEXT: vmov.f32 s6, s1
+; CHECK-NEXT: vand q1, q1, q2
+; CHECK-NEXT: vand q1, q1, q3
+; CHECK-NEXT: vmov.f32 s12, s2
+; CHECK-NEXT: vmov r0, s4
+; CHECK-NEXT: vmov r3, s5
+; CHECK-NEXT: vmov r2, s6
+; CHECK-NEXT: vmov.f32 s14, s3
+; CHECK-NEXT: vand q0, q3, q2
+; CHECK-NEXT: adds.w r0, r0, r12
+; CHECK-NEXT: adcs r1, r3
+; CHECK-NEXT: vmov r3, s7
+; CHECK-NEXT: adds r0, r0, r2
+; CHECK-NEXT: ubfx r2, lr, #8, #1
+; CHECK-NEXT: rsb.w r2, r2, #0
+; CHECK-NEXT: vmov.32 q1[0], r2
+; CHECK-NEXT: vmov.32 q1[1], r2
+; CHECK-NEXT: ubfx r2, lr, #12, #1
+; CHECK-NEXT: rsb.w r2, r2, #0
+; CHECK-NEXT: vmov.32 q1[2], r2
+; CHECK-NEXT: vmov.32 q1[3], r2
+; CHECK-NEXT: vand q0, q0, q1
+; CHECK-NEXT: vmov r2, s1
+; CHECK-NEXT: adcs r1, r3
+; CHECK-NEXT: vmov r3, s0
+; CHECK-NEXT: adds r0, r0, r3
+; CHECK-NEXT: vmov r3, s2
+; CHECK-NEXT: adcs r1, r2
+; CHECK-NEXT: vmov r2, s3
+; CHECK-NEXT: adds r0, r0, r3
+; CHECK-NEXT: adcs r1, r2
+; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13}
+; CHECK-NEXT: pop {r7, pc}
+entry:
+ %c = icmp eq <8 x i16> %b, zeroinitializer
+ %xx = zext <8 x i16> %x to <8 x i32>
+ %yy = zext <8 x i16> %y to <8 x i32>
+ %m = mul <8 x i32> %xx, %yy
+ %ma = zext <8 x i32> %m to <8 x i64>
+ %s = select <8 x i1> %c, <8 x i64> %ma, <8 x i64> zeroinitializer
+ %z = call i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64> %s)
+ ret i64 %z
+}
+
+define arm_aapcs_vfpcc i64 @add_v8i16_v8i32_v8i64_sext(<8 x i16> %x, <8 x i16> %y, <8 x i16> %b) {
+; CHECK-LABEL: add_v8i16_v8i32_v8i64_sext:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13}
+; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13}
+; CHECK-NEXT: vmov.u16 r0, q1[0]
+; CHECK-NEXT: vmov.i8 q6, #0xff
+; CHECK-NEXT: vmov.32 q3[0], r0
+; CHECK-NEXT: vmov.u16 r0, q1[1]
+; CHECK-NEXT: vmov.32 q3[1], r0
+; CHECK-NEXT: vmov.u16 r0, q1[2]
+; CHECK-NEXT: vmov.32 q3[2], r0
+; CHECK-NEXT: vmov.u16 r0, q1[3]
+; CHECK-NEXT: vmov.32 q3[3], r0
+; CHECK-NEXT: vmov.u16 r0, q0[0]
+; CHECK-NEXT: vmov.32 q4[0], r0
+; CHECK-NEXT: vmov.u16 r0, q0[1]
+; CHECK-NEXT: vmov.32 q4[1], r0
+; CHECK-NEXT: vmov.u16 r0, q0[2]
+; CHECK-NEXT: vmov.32 q4[2], r0
+; CHECK-NEXT: vmov.u16 r0, q0[3]
+; CHECK-NEXT: vmov.32 q4[3], r0
+; CHECK-NEXT: vcmp.i16 eq, q2, zr
+; CHECK-NEXT: vmullb.s16 q3, q4, q3
+; CHECK-NEXT: vmov.f32 s20, s12
+; CHECK-NEXT: vmov.f32 s22, s13
+; CHECK-NEXT: vmov r0, s20
+; CHECK-NEXT: vmov.32 q4[0], r0
+; CHECK-NEXT: asrs r0, r0, #31
+; CHECK-NEXT: vmov.32 q4[1], r0
+; CHECK-NEXT: vmov r0, s22
+; CHECK-NEXT: vmov.i8 q5, #0x0
+; CHECK-NEXT: vmov.32 q4[2], r0
+; CHECK-NEXT: vpsel q2, q6, q5
+; CHECK-NEXT: asrs r0, r0, #31
+; CHECK-NEXT: vmov.32 q4[3], r0
+; CHECK-NEXT: vmov.u16 r0, q2[0]
+; CHECK-NEXT: vmov.32 q5[0], r0
+; CHECK-NEXT: vmov.u16 r0, q2[1]
+; CHECK-NEXT: vmov.32 q5[1], r0
+; CHECK-NEXT: vmov.u16 r0, q2[2]
+; CHECK-NEXT: vmov.32 q5[2], r0
+; CHECK-NEXT: vmov.u16 r0, q2[3]
+; CHECK-NEXT: vmov.32 q5[3], r0
+; CHECK-NEXT: vcmp.i32 ne, q5, zr
+; CHECK-NEXT: vmrs r0, p0
+; CHECK-NEXT: and r1, r0, #1
+; CHECK-NEXT: rsbs r1, r1, #0
+; CHECK-NEXT: vmov.32 q5[0], r1
+; CHECK-NEXT: vmov.32 q5[1], r1
+; CHECK-NEXT: ubfx r1, r0, #4, #1
+; CHECK-NEXT: rsbs r1, r1, #0
+; CHECK-NEXT: vmov.32 q5[2], r1
+; CHECK-NEXT: vmov.32 q5[3], r1
+; CHECK-NEXT: vand q4, q4, q5
+; CHECK-NEXT: vmov r1, s18
+; CHECK-NEXT: vmov r2, s16
+; CHECK-NEXT: vmov r12, s19
+; CHECK-NEXT: vmov r3, s17
+; CHECK-NEXT: vmov.f32 s16, s14
+; CHECK-NEXT: vmov.f32 s18, s15
+; CHECK-NEXT: adds r1, r1, r2
+; CHECK-NEXT: adc.w r2, r3, r12
+; CHECK-NEXT: vmov r3, s16
+; CHECK-NEXT: vmov.32 q3[0], r3
+; CHECK-NEXT: asrs r3, r3, #31
+; CHECK-NEXT: vmov.32 q3[1], r3
+; CHECK-NEXT: vmov r3, s18
+; CHECK-NEXT: vmov.32 q3[2], r3
+; CHECK-NEXT: asrs r3, r3, #31
+; CHECK-NEXT: vmov.32 q3[3], r3
+; CHECK-NEXT: ubfx r3, r0, #8, #1
+; CHECK-NEXT: rsbs r3, r3, #0
+; CHECK-NEXT: ubfx r0, r0, #12, #1
+; CHECK-NEXT: vmov.32 q4[0], r3
+; CHECK-NEXT: rsbs r0, r0, #0
+; CHECK-NEXT: vmov.32 q4[1], r3
+; CHECK-NEXT: vmov.32 q4[2], r0
+; CHECK-NEXT: vmov.32 q4[3], r0
+; CHECK-NEXT: vand q3, q3, q4
+; CHECK-NEXT: vmov r3, s12
+; CHECK-NEXT: vmov r0, s13
+; CHECK-NEXT: adds r1, r1, r3
+; CHECK-NEXT: vmov r3, s15
+; CHECK-NEXT: adcs r2, r0
+; CHECK-NEXT: vmov r0, s14
+; CHECK-NEXT: adds.w r12, r1, r0
+; CHECK-NEXT: adc.w r1, r2, r3
+; CHECK-NEXT: vmov.u16 r2, q1[4]
+; CHECK-NEXT: vmov.32 q3[0], r2
+; CHECK-NEXT: vmov.u16 r2, q1[5]
+; CHECK-NEXT: vmov.32 q3[1], r2
+; CHECK-NEXT: vmov.u16 r2, q1[6]
+; CHECK-NEXT: vmov.32 q3[2], r2
+; CHECK-NEXT: vmov.u16 r2, q1[7]
+; CHECK-NEXT: vmov.32 q3[3], r2
+; CHECK-NEXT: vmov.u16 r2, q0[4]
+; CHECK-NEXT: vmov.32 q1[0], r2
+; CHECK-NEXT: vmov.u16 r2, q0[5]
+; CHECK-NEXT: vmov.32 q1[1], r2
+; CHECK-NEXT: vmov.u16 r2, q0[6]
+; CHECK-NEXT: vmov.32 q1[2], r2
+; CHECK-NEXT: vmov.u16 r2, q0[7]
+; CHECK-NEXT: vmov.32 q1[3], r2
+; CHECK-NEXT: vmullb.s16 q0, q1, q3
+; CHECK-NEXT: vmov.f32 s12, s0
+; CHECK-NEXT: vmov.f32 s14, s1
+; CHECK-NEXT: vmov r2, s12
+; CHECK-NEXT: vmov.32 q1[0], r2
+; CHECK-NEXT: asrs r2, r2, #31
+; CHECK-NEXT: vmov.32 q1[1], r2
+; CHECK-NEXT: vmov r2, s14
+; CHECK-NEXT: vmov.32 q1[2], r2
+; CHECK-NEXT: asrs r2, r2, #31
+; CHECK-NEXT: vmov.32 q1[3], r2
+; CHECK-NEXT: vmov.u16 r2, q2[4]
+; CHECK-NEXT: vmov.32 q3[0], r2
+; CHECK-NEXT: vmov.u16 r2, q2[5]
+; CHECK-NEXT: vmov.32 q3[1], r2
+; CHECK-NEXT: vmov.u16 r2, q2[6]
+; CHECK-NEXT: vmov.32 q3[2], r2
+; CHECK-NEXT: vmov.u16 r2, q2[7]
+; CHECK-NEXT: vmov.32 q3[3], r2
+; CHECK-NEXT: vcmp.i32 ne, q3, zr
+; CHECK-NEXT: vmrs r2, p0
+; CHECK-NEXT: and r3, r2, #1
+; CHECK-NEXT: rsbs r3, r3, #0
+; CHECK-NEXT: vmov.32 q2[0], r3
+; CHECK-NEXT: vmov.32 q2[1], r3
+; CHECK-NEXT: ubfx r3, r2, #4, #1
+; CHECK-NEXT: rsbs r3, r3, #0
+; CHECK-NEXT: vmov.32 q2[2], r3
+; CHECK-NEXT: vmov.32 q2[3], r3
+; CHECK-NEXT: vand q1, q1, q2
+; CHECK-NEXT: vmov r0, s4
+; CHECK-NEXT: vmov r3, s5
+; CHECK-NEXT: adds.w r12, r12, r0
+; CHECK-NEXT: vmov r0, s6
+; CHECK-NEXT: adcs r1, r3
+; CHECK-NEXT: vmov r3, s7
+; CHECK-NEXT: vmov.f32 s4, s2
+; CHECK-NEXT: vmov.f32 s6, s3
+; CHECK-NEXT: adds.w r0, r0, r12
+; CHECK-NEXT: adcs r1, r3
+; CHECK-NEXT: vmov r3, s4
+; CHECK-NEXT: vmov.32 q0[0], r3
+; CHECK-NEXT: asrs r3, r3, #31
+; CHECK-NEXT: vmov.32 q0[1], r3
+; CHECK-NEXT: vmov r3, s6
+; CHECK-NEXT: vmov.32 q0[2], r3
+; CHECK-NEXT: asrs r3, r3, #31
+; CHECK-NEXT: vmov.32 q0[3], r3
+; CHECK-NEXT: ubfx r3, r2, #8, #1
+; CHECK-NEXT: rsbs r3, r3, #0
+; CHECK-NEXT: ubfx r2, r2, #12, #1
+; CHECK-NEXT: vmov.32 q1[0], r3
+; CHECK-NEXT: rsbs r2, r2, #0
+; CHECK-NEXT: vmov.32 q1[1], r3
+; CHECK-NEXT: vmov.32 q1[2], r2
+; CHECK-NEXT: vmov.32 q1[3], r2
+; CHECK-NEXT: vand q0, q0, q1
+; CHECK-NEXT: vmov r3, s0
+; CHECK-NEXT: vmov r2, s1
+; CHECK-NEXT: adds r0, r0, r3
+; CHECK-NEXT: vmov r3, s2
+; CHECK-NEXT: adcs r1, r2
+; CHECK-NEXT: vmov r2, s3
+; CHECK-NEXT: adds r0, r0, r3
+; CHECK-NEXT: adcs r1, r2
+; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13}
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <8 x i16> %b, zeroinitializer
+ %xx = sext <8 x i16> %x to <8 x i32>
+ %yy = sext <8 x i16> %y to <8 x i32>
+ %m = mul <8 x i32> %xx, %yy
+ %ma = sext <8 x i32> %m to <8 x i64>
+ %s = select <8 x i1> %c, <8 x i64> %ma, <8 x i64> zeroinitializer
+ %z = call i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64> %s)
+ ret i64 %z
+}
+
+define arm_aapcs_vfpcc i64 @add_v8i16_v8i32_v8i64_sextzext(<8 x i16> %x, <8 x i16> %y, <8 x i16> %b) {
+; CHECK-LABEL: add_v8i16_v8i32_v8i64_sextzext:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .vsave {d8, d9, d10, d11}
+; CHECK-NEXT: vpush {d8, d9, d10, d11}
+; CHECK-NEXT: vmov.i8 q1, #0x0
+; CHECK-NEXT: vmov.i8 q3, #0xff
+; CHECK-NEXT: vcmp.i16 eq, q2, zr
+; CHECK-NEXT: vpsel q2, q3, q1
+; CHECK-NEXT: vmov.u16 r0, q2[0]
+; CHECK-NEXT: vmov.32 q1[0], r0
+; CHECK-NEXT: vmov.u16 r0, q2[1]
+; CHECK-NEXT: vmov.32 q1[1], r0
+; CHECK-NEXT: vmov.u16 r0, q2[2]
+; CHECK-NEXT: vmov.32 q1[2], r0
+; CHECK-NEXT: vmov.u16 r0, q2[3]
+; CHECK-NEXT: vmov.32 q1[3], r0
+; CHECK-NEXT: vcmp.i32 ne, q1, zr
+; CHECK-NEXT: vmrs r0, p0
+; CHECK-NEXT: and r1, r0, #1
+; CHECK-NEXT: rsbs r1, r1, #0
+; CHECK-NEXT: vmov.32 q4[0], r1
+; CHECK-NEXT: vmov.32 q4[1], r1
+; CHECK-NEXT: ubfx r1, r0, #4, #1
+; CHECK-NEXT: rsbs r1, r1, #0
+; CHECK-NEXT: vmov.32 q4[2], r1
+; CHECK-NEXT: vmov.32 q4[3], r1
+; CHECK-NEXT: vmov.u16 r1, q0[0]
+; CHECK-NEXT: vmov.32 q1[0], r1
+; CHECK-NEXT: vmov.u16 r1, q0[1]
+; CHECK-NEXT: vmov.32 q1[1], r1
+; CHECK-NEXT: vmov.u16 r1, q0[2]
+; CHECK-NEXT: vmov.32 q1[2], r1
+; CHECK-NEXT: vmov.u16 r1, q0[3]
+; CHECK-NEXT: vmov.32 q1[3], r1
+; CHECK-NEXT: vmullb.s16 q3, q1, q1
+; CHECK-NEXT: vmov.i64 q1, #0xffffffff
+; CHECK-NEXT: vmov.f32 s20, s12
+; CHECK-NEXT: vmov.f32 s22, s13
+; CHECK-NEXT: vand q5, q5, q1
+; CHECK-NEXT: vand q4, q5, q4
+; CHECK-NEXT: vmov.f32 s20, s14
+; CHECK-NEXT: vmov r3, s18
+; CHECK-NEXT: vmov r1, s16
+; CHECK-NEXT: vmov r12, s19
+; CHECK-NEXT: vmov r2, s17
+; CHECK-NEXT: vmov.f32 s22, s15
+; CHECK-NEXT: vand q3, q5, q1
+; CHECK-NEXT: adds r1, r1, r3
+; CHECK-NEXT: ubfx r3, r0, #8, #1
+; CHECK-NEXT: rsb.w r3, r3, #0
+; CHECK-NEXT: ubfx r0, r0, #12, #1
+; CHECK-NEXT: vmov.32 q4[0], r3
+; CHECK-NEXT: rsb.w r0, r0, #0
+; CHECK-NEXT: vmov.32 q4[1], r3
+; CHECK-NEXT: adc.w r2, r2, r12
+; CHECK-NEXT: vmov.32 q4[2], r0
+; CHECK-NEXT: vmov.32 q4[3], r0
+; CHECK-NEXT: vand q3, q3, q4
+; CHECK-NEXT: vmov r3, s12
+; CHECK-NEXT: vmov r0, s13
+; CHECK-NEXT: adds r1, r1, r3
+; CHECK-NEXT: vmov r3, s15
+; CHECK-NEXT: adcs r2, r0
+; CHECK-NEXT: vmov r0, s14
+; CHECK-NEXT: adds.w r12, r1, r0
+; CHECK-NEXT: adc.w r1, r2, r3
+; CHECK-NEXT: vmov.u16 r2, q2[4]
+; CHECK-NEXT: vmov.32 q3[0], r2
+; CHECK-NEXT: vmov.u16 r2, q2[5]
+; CHECK-NEXT: vmov.32 q3[1], r2
+; CHECK-NEXT: vmov.u16 r2, q2[6]
+; CHECK-NEXT: vmov.32 q3[2], r2
+; CHECK-NEXT: vmov.u16 r2, q2[7]
+; CHECK-NEXT: vmov.32 q3[3], r2
+; CHECK-NEXT: vcmp.i32 ne, q3, zr
+; CHECK-NEXT: vmrs r2, p0
+; CHECK-NEXT: and r3, r2, #1
+; CHECK-NEXT: rsbs r3, r3, #0
+; CHECK-NEXT: vmov.32 q2[0], r3
+; CHECK-NEXT: vmov.32 q2[1], r3
+; CHECK-NEXT: ubfx r3, r2, #4, #1
+; CHECK-NEXT: rsbs r3, r3, #0
+; CHECK-NEXT: vmov.32 q2[2], r3
+; CHECK-NEXT: vmov.32 q2[3], r3
+; CHECK-NEXT: vmov.u16 r3, q0[4]
+; CHECK-NEXT: vmov.32 q3[0], r3
+; CHECK-NEXT: vmov.u16 r3, q0[5]
+; CHECK-NEXT: vmov.32 q3[1], r3
+; CHECK-NEXT: vmov.u16 r3, q0[6]
+; CHECK-NEXT: vmov.32 q3[2], r3
+; CHECK-NEXT: vmov.u16 r3, q0[7]
+; CHECK-NEXT: vmov.32 q3[3], r3
+; CHECK-NEXT: vmullb.s16 q0, q3, q3
+; CHECK-NEXT: vmov.f32 s12, s0
+; CHECK-NEXT: vmov.f32 s14, s1
+; CHECK-NEXT: vand q3, q3, q1
+; CHECK-NEXT: vand q2, q3, q2
+; CHECK-NEXT: vmov.f32 s12, s2
+; CHECK-NEXT: vmov r0, s8
+; CHECK-NEXT: vmov r3, s9
+; CHECK-NEXT: vmov.f32 s14, s3
+; CHECK-NEXT: vand q0, q3, q1
+; CHECK-NEXT: adds.w r12, r12, r0
+; CHECK-NEXT: vmov r0, s10
+; CHECK-NEXT: adcs r1, r3
+; CHECK-NEXT: vmov r3, s11
+; CHECK-NEXT: adds.w r0, r0, r12
+; CHECK-NEXT: adcs r1, r3
+; CHECK-NEXT: ubfx r3, r2, #8, #1
+; CHECK-NEXT: rsbs r3, r3, #0
+; CHECK-NEXT: ubfx r2, r2, #12, #1
+; CHECK-NEXT: vmov.32 q2[0], r3
+; CHECK-NEXT: rsbs r2, r2, #0
+; CHECK-NEXT: vmov.32 q2[1], r3
+; CHECK-NEXT: vmov.32 q2[2], r2
+; CHECK-NEXT: vmov.32 q2[3], r2
+; CHECK-NEXT: vand q0, q0, q2
+; CHECK-NEXT: vmov r3, s0
+; CHECK-NEXT: vmov r2, s1
+; CHECK-NEXT: adds r0, r0, r3
+; CHECK-NEXT: vmov r3, s2
+; CHECK-NEXT: adcs r1, r2
+; CHECK-NEXT: vmov r2, s3
+; CHECK-NEXT: adds r0, r0, r3
+; CHECK-NEXT: adcs r1, r2
+; CHECK-NEXT: vpop {d8, d9, d10, d11}
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <8 x i16> %b, zeroinitializer
+ %xx = sext <8 x i16> %x to <8 x i32>
+ %m = mul <8 x i32> %xx, %xx
+ %ma = zext <8 x i32> %m to <8 x i64>
+ %s = select <8 x i1> %c, <8 x i64> %ma, <8 x i64> zeroinitializer
+ %z = call i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64> %s)
+ ret i64 %z
+}
+
define arm_aapcs_vfpcc i64 @add_v2i16_v2i64_zext(<2 x i16> %x, <2 x i16> %y, <2 x i16> %b) {
; CHECK-LABEL: add_v2i16_v2i64_zext:
; CHECK: @ %bb.0: @ %entry
@@ -347,26 +824,641 @@ define arm_aapcs_vfpcc i32 @add_v16i8_v16i32_zext(<16 x i8> %x, <16 x i8> %y, <1
; CHECK-NEXT: bx lr
entry:
%c = icmp eq <16 x i8> %b, zeroinitializer
- %xx = zext <16 x i8> %x to <16 x i32>
- %yy = zext <16 x i8> %y to <16 x i32>
- %m = mul <16 x i32> %xx, %yy
- %s = select <16 x i1> %c, <16 x i32> %m, <16 x i32> zeroinitializer
+ %xx = zext <16 x i8> %x to <16 x i32>
+ %yy = zext <16 x i8> %y to <16 x i32>
+ %m = mul <16 x i32> %xx, %yy
+ %s = select <16 x i1> %c, <16 x i32> %m, <16 x i32> zeroinitializer
+ %z = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %s)
+ ret i32 %z
+}
+
+define arm_aapcs_vfpcc i32 @add_v16i8_v16i32_sext(<16 x i8> %x, <16 x i8> %y, <16 x i8> %b) {
+; CHECK-LABEL: add_v16i8_v16i32_sext:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vpt.i8 eq, q2, zr
+; CHECK-NEXT: vmlavt.s8 r0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <16 x i8> %b, zeroinitializer
+ %xx = sext <16 x i8> %x to <16 x i32>
+ %yy = sext <16 x i8> %y to <16 x i32>
+ %m = mul <16 x i32> %xx, %yy
+ %s = select <16 x i1> %c, <16 x i32> %m, <16 x i32> zeroinitializer
+ %z = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %s)
+ ret i32 %z
+}
+
+define arm_aapcs_vfpcc i32 @add_v16i8_v16i16_v16i32_zext(<16 x i8> %x, <16 x i8> %y, <16 x i8> %b) {
+; CHECK-LABEL: add_v16i8_v16i16_v16i32_zext:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT: .pad #64
+; CHECK-NEXT: sub sp, #64
+; CHECK-NEXT: vcmp.i8 eq, q2, zr
+; CHECK-NEXT: vmov.i8 q2, #0x0
+; CHECK-NEXT: vmov.i8 q7, #0xff
+; CHECK-NEXT: vmov q6, q1
+; CHECK-NEXT: vpsel q1, q7, q2
+; CHECK-NEXT: vstrw.32 q2, [sp] @ 16-byte Spill
+; CHECK-NEXT: vmov.u8 r0, q1[0]
+; CHECK-NEXT: vmov.16 q3[0], r0
+; CHECK-NEXT: vmov.u8 r0, q1[1]
+; CHECK-NEXT: vmov.16 q3[1], r0
+; CHECK-NEXT: vmov.u8 r0, q1[2]
+; CHECK-NEXT: vmov.16 q3[2], r0
+; CHECK-NEXT: vmov.u8 r0, q1[3]
+; CHECK-NEXT: vmov.16 q3[3], r0
+; CHECK-NEXT: vmov.u8 r0, q1[4]
+; CHECK-NEXT: vmov.16 q3[4], r0
+; CHECK-NEXT: vmov.u8 r0, q1[5]
+; CHECK-NEXT: vmov.16 q3[5], r0
+; CHECK-NEXT: vmov.u8 r0, q1[6]
+; CHECK-NEXT: vmov.16 q3[6], r0
+; CHECK-NEXT: vmov.u8 r0, q1[7]
+; CHECK-NEXT: vmov.16 q3[7], r0
+; CHECK-NEXT: vcmp.i16 ne, q3, zr
+; CHECK-NEXT: vpsel q3, q7, q2
+; CHECK-NEXT: vmov.u16 r0, q3[4]
+; CHECK-NEXT: vstrw.32 q3, [sp, #48] @ 16-byte Spill
+; CHECK-NEXT: vmov.32 q2[0], r0
+; CHECK-NEXT: vmov.u16 r0, q3[5]
+; CHECK-NEXT: vmov.32 q2[1], r0
+; CHECK-NEXT: vmov.u16 r0, q3[6]
+; CHECK-NEXT: vmov.32 q2[2], r0
+; CHECK-NEXT: vmov.u16 r0, q3[7]
+; CHECK-NEXT: vmov.32 q2[3], r0
+; CHECK-NEXT: vmov.u8 r0, q6[0]
+; CHECK-NEXT: vmov.16 q4[0], r0
+; CHECK-NEXT: vmov.u8 r0, q6[1]
+; CHECK-NEXT: vmov.16 q4[1], r0
+; CHECK-NEXT: vmov.u8 r0, q6[2]
+; CHECK-NEXT: vmov.16 q4[2], r0
+; CHECK-NEXT: vmov.u8 r0, q6[3]
+; CHECK-NEXT: vmov.16 q4[3], r0
+; CHECK-NEXT: vmov.u8 r0, q6[4]
+; CHECK-NEXT: vmov.16 q4[4], r0
+; CHECK-NEXT: vmov.u8 r0, q6[5]
+; CHECK-NEXT: vmov.16 q4[5], r0
+; CHECK-NEXT: vmov.u8 r0, q6[6]
+; CHECK-NEXT: vmov.16 q4[6], r0
+; CHECK-NEXT: vmov.u8 r0, q6[7]
+; CHECK-NEXT: vmov.16 q4[7], r0
+; CHECK-NEXT: vmov.u8 r0, q0[0]
+; CHECK-NEXT: vmov.16 q5[0], r0
+; CHECK-NEXT: vmov.u8 r0, q0[1]
+; CHECK-NEXT: vmov.16 q5[1], r0
+; CHECK-NEXT: vmov.u8 r0, q0[2]
+; CHECK-NEXT: vmov.16 q5[2], r0
+; CHECK-NEXT: vmov.u8 r0, q0[3]
+; CHECK-NEXT: vmov.16 q5[3], r0
+; CHECK-NEXT: vmov.u8 r0, q0[4]
+; CHECK-NEXT: vmov.16 q5[4], r0
+; CHECK-NEXT: vmov.u8 r0, q0[5]
+; CHECK-NEXT: vmov.16 q5[5], r0
+; CHECK-NEXT: vmov.u8 r0, q0[6]
+; CHECK-NEXT: vmov.16 q5[6], r0
+; CHECK-NEXT: vmov.u8 r0, q0[7]
+; CHECK-NEXT: vmov.16 q5[7], r0
+; CHECK-NEXT: vmov q3, q0
+; CHECK-NEXT: vmullb.u8 q5, q5, q4
+; CHECK-NEXT: vcmp.i32 ne, q2, zr
+; CHECK-NEXT: vmov.u16 r0, q5[4]
+; CHECK-NEXT: vmov.i32 q4, #0x0
+; CHECK-NEXT: vmov.32 q0[0], r0
+; CHECK-NEXT: vmov.u16 r0, q5[5]
+; CHECK-NEXT: vmov.32 q0[1], r0
+; CHECK-NEXT: vmov.u16 r0, q5[6]
+; CHECK-NEXT: vmov.32 q0[2], r0
+; CHECK-NEXT: vmov.u16 r0, q5[7]
+; CHECK-NEXT: vmov.32 q0[3], r0
+; CHECK-NEXT: vmov.i32 q2, #0xffff
+; CHECK-NEXT: vmov.u8 r0, q1[8]
+; CHECK-NEXT: vstrw.32 q4, [sp, #32] @ 16-byte Spill
+; CHECK-NEXT: vstrw.32 q2, [sp, #16] @ 16-byte Spill
+; CHECK-NEXT: vpst
+; CHECK-NEXT: vandt q4, q0, q2
+; CHECK-NEXT: vmov.16 q0[0], r0
+; CHECK-NEXT: vmov.u8 r0, q1[9]
+; CHECK-NEXT: vmov.16 q0[1], r0
+; CHECK-NEXT: vmov.u8 r0, q1[10]
+; CHECK-NEXT: vmov.16 q0[2], r0
+; CHECK-NEXT: vmov.u8 r0, q1[11]
+; CHECK-NEXT: vmov.16 q0[3], r0
+; CHECK-NEXT: vmov.u8 r0, q1[12]
+; CHECK-NEXT: vmov.16 q0[4], r0
+; CHECK-NEXT: vmov.u8 r0, q1[13]
+; CHECK-NEXT: vmov.16 q0[5], r0
+; CHECK-NEXT: vmov.u8 r0, q1[14]
+; CHECK-NEXT: vmov.16 q0[6], r0
+; CHECK-NEXT: vmov.u8 r0, q1[15]
+; CHECK-NEXT: vmov.16 q0[7], r0
+; CHECK-NEXT: vcmp.i16 ne, q0, zr
+; CHECK-NEXT: vldrw.u32 q0, [sp] @ 16-byte Reload
+; CHECK-NEXT: vpsel q0, q7, q0
+; CHECK-NEXT: vmov.u16 r0, q0[4]
+; CHECK-NEXT: vmov.32 q1[0], r0
+; CHECK-NEXT: vmov.u16 r0, q0[5]
+; CHECK-NEXT: vmov.32 q1[1], r0
+; CHECK-NEXT: vmov.u16 r0, q0[6]
+; CHECK-NEXT: vmov.32 q1[2], r0
+; CHECK-NEXT: vmov.u16 r0, q0[7]
+; CHECK-NEXT: vmov.32 q1[3], r0
+; CHECK-NEXT: vmov.u8 r0, q6[8]
+; CHECK-NEXT: vmov.16 q7[0], r0
+; CHECK-NEXT: vmov.u8 r0, q6[9]
+; CHECK-NEXT: vmov.16 q7[1], r0
+; CHECK-NEXT: vmov.u8 r0, q6[10]
+; CHECK-NEXT: vmov.16 q7[2], r0
+; CHECK-NEXT: vmov.u8 r0, q6[11]
+; CHECK-NEXT: vmov.16 q7[3], r0
+; CHECK-NEXT: vmov.u8 r0, q6[12]
+; CHECK-NEXT: vmov.16 q7[4], r0
+; CHECK-NEXT: vmov.u8 r0, q6[13]
+; CHECK-NEXT: vmov.16 q7[5], r0
+; CHECK-NEXT: vmov.u8 r0, q6[14]
+; CHECK-NEXT: vmov.16 q7[6], r0
+; CHECK-NEXT: vmov.u8 r0, q6[15]
+; CHECK-NEXT: vmov.16 q7[7], r0
+; CHECK-NEXT: vmov.u8 r0, q3[8]
+; CHECK-NEXT: vcmp.i32 ne, q1, zr
+; CHECK-NEXT: vmov.16 q1[0], r0
+; CHECK-NEXT: vmov.u8 r0, q3[9]
+; CHECK-NEXT: vmov.16 q1[1], r0
+; CHECK-NEXT: vmov.u8 r0, q3[10]
+; CHECK-NEXT: vmov.16 q1[2], r0
+; CHECK-NEXT: vmov.u8 r0, q3[11]
+; CHECK-NEXT: vmov.16 q1[3], r0
+; CHECK-NEXT: vmov.u8 r0, q3[12]
+; CHECK-NEXT: vmov.16 q1[4], r0
+; CHECK-NEXT: vmov.u8 r0, q3[13]
+; CHECK-NEXT: vmov.16 q1[5], r0
+; CHECK-NEXT: vmov.u8 r0, q3[14]
+; CHECK-NEXT: vmov.16 q1[6], r0
+; CHECK-NEXT: vmov.u8 r0, q3[15]
+; CHECK-NEXT: vmov.16 q1[7], r0
+; CHECK-NEXT: vmullb.u8 q1, q1, q7
+; CHECK-NEXT: vmov.u16 r0, q1[4]
+; CHECK-NEXT: vmov.32 q2[0], r0
+; CHECK-NEXT: vmov.u16 r0, q1[5]
+; CHECK-NEXT: vmov.32 q2[1], r0
+; CHECK-NEXT: vmov.u16 r0, q1[6]
+; CHECK-NEXT: vmov.32 q2[2], r0
+; CHECK-NEXT: vmov.u16 r0, q1[7]
+; CHECK-NEXT: vmov.32 q2[3], r0
+; CHECK-NEXT: vmovlb.u16 q2, q2
+; CHECK-NEXT: vpst
+; CHECK-NEXT: vaddt.i32 q4, q4, q2
+; CHECK-NEXT: vldrw.u32 q3, [sp, #48] @ 16-byte Reload
+; CHECK-NEXT: vmov.u16 r0, q3[0]
+; CHECK-NEXT: vmov.32 q2[0], r0
+; CHECK-NEXT: vmov.u16 r0, q3[1]
+; CHECK-NEXT: vmov.32 q2[1], r0
+; CHECK-NEXT: vmov.u16 r0, q3[2]
+; CHECK-NEXT: vmov.32 q2[2], r0
+; CHECK-NEXT: vmov.u16 r0, q3[3]
+; CHECK-NEXT: vmov.32 q2[3], r0
+; CHECK-NEXT: vmov.u16 r0, q5[0]
+; CHECK-NEXT: vcmp.i32 ne, q2, zr
+; CHECK-NEXT: vmov.32 q2[0], r0
+; CHECK-NEXT: vmov.u16 r0, q5[1]
+; CHECK-NEXT: vldrw.u32 q3, [sp, #32] @ 16-byte Reload
+; CHECK-NEXT: vmov.32 q2[1], r0
+; CHECK-NEXT: vmov.u16 r0, q5[2]
+; CHECK-NEXT: vmov.32 q2[2], r0
+; CHECK-NEXT: vmov.u16 r0, q5[3]
+; CHECK-NEXT: vmov.32 q2[3], r0
+; CHECK-NEXT: vmov.u16 r0, q0[0]
+; CHECK-NEXT: vldrw.u32 q5, [sp, #16] @ 16-byte Reload
+; CHECK-NEXT: vpst
+; CHECK-NEXT: vandt q3, q2, q5
+; CHECK-NEXT: vmov.32 q2[0], r0
+; CHECK-NEXT: vmov.u16 r0, q0[1]
+; CHECK-NEXT: vmov.32 q2[1], r0
+; CHECK-NEXT: vmov.u16 r0, q0[2]
+; CHECK-NEXT: vmov.32 q2[2], r0
+; CHECK-NEXT: vmov.u16 r0, q0[3]
+; CHECK-NEXT: vmov.32 q2[3], r0
+; CHECK-NEXT: vmov.u16 r0, q1[0]
+; CHECK-NEXT: vmov.32 q0[0], r0
+; CHECK-NEXT: vmov.u16 r0, q1[1]
+; CHECK-NEXT: vmov.32 q0[1], r0
+; CHECK-NEXT: vmov.u16 r0, q1[2]
+; CHECK-NEXT: vmov.32 q0[2], r0
+; CHECK-NEXT: vmov.u16 r0, q1[3]
+; CHECK-NEXT: vmov.32 q0[3], r0
+; CHECK-NEXT: vmov q1, q3
+; CHECK-NEXT: vmovlb.u16 q0, q0
+; CHECK-NEXT: vpt.i32 ne, q2, zr
+; CHECK-NEXT: vaddt.i32 q1, q3, q0
+; CHECK-NEXT: vadd.i32 q0, q1, q4
+; CHECK-NEXT: vaddv.u32 r0, q0
+; CHECK-NEXT: add sp, #64
+; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <16 x i8> %b, zeroinitializer
+ %xx = zext <16 x i8> %x to <16 x i16>
+ %yy = zext <16 x i8> %y to <16 x i16>
+ %m = mul <16 x i16> %xx, %yy
+ %ma = zext <16 x i16> %m to <16 x i32>
+ %s = select <16 x i1> %c, <16 x i32> %ma, <16 x i32> zeroinitializer
+ %z = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %s)
+ ret i32 %z
+}
+
+define arm_aapcs_vfpcc i32 @add_v16i8_v16i16_v16i32_sext(<16 x i8> %x, <16 x i8> %y, <16 x i8> %b) {
+; CHECK-LABEL: add_v16i8_v16i16_v16i32_sext:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT: .pad #32
+; CHECK-NEXT: sub sp, #32
+; CHECK-NEXT: vmov q3, q2
+; CHECK-NEXT: vmov q6, q0
+; CHECK-NEXT: vmov.i8 q0, #0x0
+; CHECK-NEXT: vcmp.i8 eq, q3, zr
+; CHECK-NEXT: vmov.i8 q5, #0xff
+; CHECK-NEXT: vmov q2, q1
+; CHECK-NEXT: vpsel q1, q5, q0
+; CHECK-NEXT: vstrw.32 q0, [sp] @ 16-byte Spill
+; CHECK-NEXT: vmov.u8 r0, q1[0]
+; CHECK-NEXT: vmov.16 q3[0], r0
+; CHECK-NEXT: vmov.u8 r0, q1[1]
+; CHECK-NEXT: vmov.16 q3[1], r0
+; CHECK-NEXT: vmov.u8 r0, q1[2]
+; CHECK-NEXT: vmov.16 q3[2], r0
+; CHECK-NEXT: vmov.u8 r0, q1[3]
+; CHECK-NEXT: vmov.16 q3[3], r0
+; CHECK-NEXT: vmov.u8 r0, q1[4]
+; CHECK-NEXT: vmov.16 q3[4], r0
+; CHECK-NEXT: vmov.u8 r0, q1[5]
+; CHECK-NEXT: vmov.16 q3[5], r0
+; CHECK-NEXT: vmov.u8 r0, q1[6]
+; CHECK-NEXT: vmov.16 q3[6], r0
+; CHECK-NEXT: vmov.u8 r0, q1[7]
+; CHECK-NEXT: vmov.16 q3[7], r0
+; CHECK-NEXT: vcmp.i16 ne, q3, zr
+; CHECK-NEXT: vpsel q3, q5, q0
+; CHECK-NEXT: vmov.u16 r0, q3[4]
+; CHECK-NEXT: vmov.32 q0[0], r0
+; CHECK-NEXT: vmov.u16 r0, q3[5]
+; CHECK-NEXT: vmov.32 q0[1], r0
+; CHECK-NEXT: vmov.u16 r0, q3[6]
+; CHECK-NEXT: vmov.32 q0[2], r0
+; CHECK-NEXT: vmov.u16 r0, q3[7]
+; CHECK-NEXT: vmov.32 q0[3], r0
+; CHECK-NEXT: vmov.u8 r0, q2[0]
+; CHECK-NEXT: vmov.16 q4[0], r0
+; CHECK-NEXT: vmov.u8 r0, q2[1]
+; CHECK-NEXT: vmov.16 q4[1], r0
+; CHECK-NEXT: vmov.u8 r0, q2[2]
+; CHECK-NEXT: vmov.16 q4[2], r0
+; CHECK-NEXT: vmov.u8 r0, q2[3]
+; CHECK-NEXT: vmov.16 q4[3], r0
+; CHECK-NEXT: vmov.u8 r0, q2[4]
+; CHECK-NEXT: vmov.16 q4[4], r0
+; CHECK-NEXT: vmov.u8 r0, q2[5]
+; CHECK-NEXT: vmov.16 q4[5], r0
+; CHECK-NEXT: vmov.u8 r0, q2[6]
+; CHECK-NEXT: vmov.16 q4[6], r0
+; CHECK-NEXT: vmov.u8 r0, q2[7]
+; CHECK-NEXT: vmov.16 q4[7], r0
+; CHECK-NEXT: vmov.u8 r0, q6[0]
+; CHECK-NEXT: vmov.16 q7[0], r0
+; CHECK-NEXT: vmov.u8 r0, q6[1]
+; CHECK-NEXT: vmov.16 q7[1], r0
+; CHECK-NEXT: vmov.u8 r0, q6[2]
+; CHECK-NEXT: vmov.16 q7[2], r0
+; CHECK-NEXT: vmov.u8 r0, q6[3]
+; CHECK-NEXT: vmov.16 q7[3], r0
+; CHECK-NEXT: vmov.u8 r0, q6[4]
+; CHECK-NEXT: vmov.16 q7[4], r0
+; CHECK-NEXT: vmov.u8 r0, q6[5]
+; CHECK-NEXT: vmov.16 q7[5], r0
+; CHECK-NEXT: vmov.u8 r0, q6[6]
+; CHECK-NEXT: vmov.16 q7[6], r0
+; CHECK-NEXT: vmov.u8 r0, q6[7]
+; CHECK-NEXT: vmov.16 q7[7], r0
+; CHECK-NEXT: vcmp.i32 ne, q0, zr
+; CHECK-NEXT: vmullb.s8 q4, q7, q4
+; CHECK-NEXT: vmov.u16 r0, q4[4]
+; CHECK-NEXT: vmov.32 q0[0], r0
+; CHECK-NEXT: vmov.u16 r0, q4[5]
+; CHECK-NEXT: vmov.32 q0[1], r0
+; CHECK-NEXT: vmov.u16 r0, q4[6]
+; CHECK-NEXT: vmov.32 q0[2], r0
+; CHECK-NEXT: vmov.u16 r0, q4[7]
+; CHECK-NEXT: vmov.32 q0[3], r0
+; CHECK-NEXT: vmov.u8 r0, q1[8]
+; CHECK-NEXT: vmovlb.s16 q7, q0
+; CHECK-NEXT: vmov.i32 q0, #0x0
+; CHECK-NEXT: vstrw.32 q0, [sp, #16] @ 16-byte Spill
+; CHECK-NEXT: vpsel q7, q7, q0
+; CHECK-NEXT: vmov.16 q0[0], r0
+; CHECK-NEXT: vmov.u8 r0, q1[9]
+; CHECK-NEXT: vmov.16 q0[1], r0
+; CHECK-NEXT: vmov.u8 r0, q1[10]
+; CHECK-NEXT: vmov.16 q0[2], r0
+; CHECK-NEXT: vmov.u8 r0, q1[11]
+; CHECK-NEXT: vmov.16 q0[3], r0
+; CHECK-NEXT: vmov.u8 r0, q1[12]
+; CHECK-NEXT: vmov.16 q0[4], r0
+; CHECK-NEXT: vmov.u8 r0, q1[13]
+; CHECK-NEXT: vmov.16 q0[5], r0
+; CHECK-NEXT: vmov.u8 r0, q1[14]
+; CHECK-NEXT: vmov.16 q0[6], r0
+; CHECK-NEXT: vmov.u8 r0, q1[15]
+; CHECK-NEXT: vmov.16 q0[7], r0
+; CHECK-NEXT: vcmp.i16 ne, q0, zr
+; CHECK-NEXT: vldrw.u32 q0, [sp] @ 16-byte Reload
+; CHECK-NEXT: vpsel q0, q5, q0
+; CHECK-NEXT: vmov.u16 r0, q0[4]
+; CHECK-NEXT: vmov.32 q1[0], r0
+; CHECK-NEXT: vmov.u16 r0, q0[5]
+; CHECK-NEXT: vmov.32 q1[1], r0
+; CHECK-NEXT: vmov.u16 r0, q0[6]
+; CHECK-NEXT: vmov.32 q1[2], r0
+; CHECK-NEXT: vmov.u16 r0, q0[7]
+; CHECK-NEXT: vmov.32 q1[3], r0
+; CHECK-NEXT: vmov.u8 r0, q2[8]
+; CHECK-NEXT: vmov.16 q5[0], r0
+; CHECK-NEXT: vmov.u8 r0, q2[9]
+; CHECK-NEXT: vmov.16 q5[1], r0
+; CHECK-NEXT: vmov.u8 r0, q2[10]
+; CHECK-NEXT: vmov.16 q5[2], r0
+; CHECK-NEXT: vmov.u8 r0, q2[11]
+; CHECK-NEXT: vmov.16 q5[3], r0
+; CHECK-NEXT: vmov.u8 r0, q2[12]
+; CHECK-NEXT: vmov.16 q5[4], r0
+; CHECK-NEXT: vmov.u8 r0, q2[13]
+; CHECK-NEXT: vmov.16 q5[5], r0
+; CHECK-NEXT: vmov.u8 r0, q2[14]
+; CHECK-NEXT: vmov.16 q5[6], r0
+; CHECK-NEXT: vmov.u8 r0, q2[15]
+; CHECK-NEXT: vmov.16 q5[7], r0
+; CHECK-NEXT: vmov.u8 r0, q6[8]
+; CHECK-NEXT: vcmp.i32 ne, q1, zr
+; CHECK-NEXT: vmov.16 q1[0], r0
+; CHECK-NEXT: vmov.u8 r0, q6[9]
+; CHECK-NEXT: vmov.16 q1[1], r0
+; CHECK-NEXT: vmov.u8 r0, q6[10]
+; CHECK-NEXT: vmov.16 q1[2], r0
+; CHECK-NEXT: vmov.u8 r0, q6[11]
+; CHECK-NEXT: vmov.16 q1[3], r0
+; CHECK-NEXT: vmov.u8 r0, q6[12]
+; CHECK-NEXT: vmov.16 q1[4], r0
+; CHECK-NEXT: vmov.u8 r0, q6[13]
+; CHECK-NEXT: vmov.16 q1[5], r0
+; CHECK-NEXT: vmov.u8 r0, q6[14]
+; CHECK-NEXT: vmov.16 q1[6], r0
+; CHECK-NEXT: vmov.u8 r0, q6[15]
+; CHECK-NEXT: vmov.16 q1[7], r0
+; CHECK-NEXT: vmullb.s8 q1, q1, q5
+; CHECK-NEXT: vmov.u16 r0, q1[4]
+; CHECK-NEXT: vmov.32 q2[0], r0
+; CHECK-NEXT: vmov.u16 r0, q1[5]
+; CHECK-NEXT: vmov.32 q2[1], r0
+; CHECK-NEXT: vmov.u16 r0, q1[6]
+; CHECK-NEXT: vmov.32 q2[2], r0
+; CHECK-NEXT: vmov.u16 r0, q1[7]
+; CHECK-NEXT: vmov.32 q2[3], r0
+; CHECK-NEXT: vmov.u16 r0, q3[0]
+; CHECK-NEXT: vmovlb.s16 q2, q2
+; CHECK-NEXT: vpst
+; CHECK-NEXT: vaddt.i32 q7, q7, q2
+; CHECK-NEXT: vmov.32 q2[0], r0
+; CHECK-NEXT: vmov.u16 r0, q3[1]
+; CHECK-NEXT: vmov.32 q2[1], r0
+; CHECK-NEXT: vmov.u16 r0, q3[2]
+; CHECK-NEXT: vmov.32 q2[2], r0
+; CHECK-NEXT: vmov.u16 r0, q3[3]
+; CHECK-NEXT: vmov.32 q2[3], r0
+; CHECK-NEXT: vmov.u16 r0, q4[0]
+; CHECK-NEXT: vcmp.i32 ne, q2, zr
+; CHECK-NEXT: vmov.32 q2[0], r0
+; CHECK-NEXT: vmov.u16 r0, q4[1]
+; CHECK-NEXT: vldrw.u32 q3, [sp, #16] @ 16-byte Reload
+; CHECK-NEXT: vmov.32 q2[1], r0
+; CHECK-NEXT: vmov.u16 r0, q4[2]
+; CHECK-NEXT: vmov.32 q2[2], r0
+; CHECK-NEXT: vmov.u16 r0, q4[3]
+; CHECK-NEXT: vmov.32 q2[3], r0
+; CHECK-NEXT: vmov.u16 r0, q0[0]
+; CHECK-NEXT: vmovlb.s16 q2, q2
+; CHECK-NEXT: vpsel q2, q2, q3
+; CHECK-NEXT: vmov.32 q3[0], r0
+; CHECK-NEXT: vmov.u16 r0, q0[1]
+; CHECK-NEXT: vmov.32 q3[1], r0
+; CHECK-NEXT: vmov.u16 r0, q0[2]
+; CHECK-NEXT: vmov.32 q3[2], r0
+; CHECK-NEXT: vmov.u16 r0, q0[3]
+; CHECK-NEXT: vmov.32 q3[3], r0
+; CHECK-NEXT: vmov.u16 r0, q1[0]
+; CHECK-NEXT: vmov.32 q0[0], r0
+; CHECK-NEXT: vmov.u16 r0, q1[1]
+; CHECK-NEXT: vmov.32 q0[1], r0
+; CHECK-NEXT: vmov.u16 r0, q1[2]
+; CHECK-NEXT: vmov.32 q0[2], r0
+; CHECK-NEXT: vmov.u16 r0, q1[3]
+; CHECK-NEXT: vmov.32 q0[3], r0
+; CHECK-NEXT: vmovlb.s16 q0, q0
+; CHECK-NEXT: vpt.i32 ne, q3, zr
+; CHECK-NEXT: vaddt.i32 q2, q2, q0
+; CHECK-NEXT: vadd.i32 q0, q2, q7
+; CHECK-NEXT: vaddv.u32 r0, q0
+; CHECK-NEXT: add sp, #32
+; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <16 x i8> %b, zeroinitializer
+ %xx = sext <16 x i8> %x to <16 x i16>
+ %yy = sext <16 x i8> %y to <16 x i16>
+ %m = mul <16 x i16> %xx, %yy
+ %ma = sext <16 x i16> %m to <16 x i32>
+ %s = select <16 x i1> %c, <16 x i32> %ma, <16 x i32> zeroinitializer
%z = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %s)
ret i32 %z
}
-define arm_aapcs_vfpcc i32 @add_v16i8_v16i32_sext(<16 x i8> %x, <16 x i8> %y, <16 x i8> %b) {
-; CHECK-LABEL: add_v16i8_v16i32_sext:
+define arm_aapcs_vfpcc i32 @add_v16i8_v16i16_v16i32_sextzext(<16 x i8> %x, <16 x i8> %y, <16 x i8> %b) {
+; CHECK-LABEL: add_v16i8_v16i16_v16i32_sextzext:
; CHECK: @ %bb.0: @ %entry
-; CHECK-NEXT: vpt.i8 eq, q2, zr
-; CHECK-NEXT: vmlavt.s8 r0, q0, q1
+; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT: .pad #32
+; CHECK-NEXT: sub sp, #32
+; CHECK-NEXT: vmov q4, q0
+; CHECK-NEXT: vcmp.i8 eq, q2, zr
+; CHECK-NEXT: vmov.i8 q2, #0xff
+; CHECK-NEXT: vmov.i8 q0, #0x0
+; CHECK-NEXT: vpsel q1, q2, q0
+; CHECK-NEXT: vmov q3, q2
+; CHECK-NEXT: vmov.u8 r0, q1[0]
+; CHECK-NEXT: vstrw.32 q2, [sp] @ 16-byte Spill
+; CHECK-NEXT: vmov.16 q2[0], r0
+; CHECK-NEXT: vmov.u8 r0, q1[1]
+; CHECK-NEXT: vmov.16 q2[1], r0
+; CHECK-NEXT: vmov.u8 r0, q1[2]
+; CHECK-NEXT: vmov.16 q2[2], r0
+; CHECK-NEXT: vmov.u8 r0, q1[3]
+; CHECK-NEXT: vmov.16 q2[3], r0
+; CHECK-NEXT: vmov.u8 r0, q1[4]
+; CHECK-NEXT: vmov.16 q2[4], r0
+; CHECK-NEXT: vmov.u8 r0, q1[5]
+; CHECK-NEXT: vmov.16 q2[5], r0
+; CHECK-NEXT: vmov.u8 r0, q1[6]
+; CHECK-NEXT: vmov.16 q2[6], r0
+; CHECK-NEXT: vmov.u8 r0, q1[7]
+; CHECK-NEXT: vmov.16 q2[7], r0
+; CHECK-NEXT: vstrw.32 q0, [sp, #16] @ 16-byte Spill
+; CHECK-NEXT: vcmp.i16 ne, q2, zr
+; CHECK-NEXT: vmov.i32 q6, #0x0
+; CHECK-NEXT: vpsel q5, q3, q0
+; CHECK-NEXT: vmov q7, q6
+; CHECK-NEXT: vmov.u16 r0, q5[4]
+; CHECK-NEXT: vmov.i32 q2, #0xffff
+; CHECK-NEXT: vmov.32 q0[0], r0
+; CHECK-NEXT: vmov.u16 r0, q5[5]
+; CHECK-NEXT: vmov.32 q0[1], r0
+; CHECK-NEXT: vmov.u16 r0, q5[6]
+; CHECK-NEXT: vmov.32 q0[2], r0
+; CHECK-NEXT: vmov.u16 r0, q5[7]
+; CHECK-NEXT: vmov.32 q0[3], r0
+; CHECK-NEXT: vmov.u8 r0, q4[0]
+; CHECK-NEXT: vmov.16 q3[0], r0
+; CHECK-NEXT: vmov.u8 r0, q4[1]
+; CHECK-NEXT: vmov.16 q3[1], r0
+; CHECK-NEXT: vmov.u8 r0, q4[2]
+; CHECK-NEXT: vmov.16 q3[2], r0
+; CHECK-NEXT: vmov.u8 r0, q4[3]
+; CHECK-NEXT: vmov.16 q3[3], r0
+; CHECK-NEXT: vmov.u8 r0, q4[4]
+; CHECK-NEXT: vmov.16 q3[4], r0
+; CHECK-NEXT: vmov.u8 r0, q4[5]
+; CHECK-NEXT: vmov.16 q3[5], r0
+; CHECK-NEXT: vmov.u8 r0, q4[6]
+; CHECK-NEXT: vmov.16 q3[6], r0
+; CHECK-NEXT: vmov.u8 r0, q4[7]
+; CHECK-NEXT: vmov.16 q3[7], r0
+; CHECK-NEXT: vcmp.i32 ne, q0, zr
+; CHECK-NEXT: vmullb.s8 q3, q3, q3
+; CHECK-NEXT: vmov.u16 r0, q3[4]
+; CHECK-NEXT: vmov.32 q0[0], r0
+; CHECK-NEXT: vmov.u16 r0, q3[5]
+; CHECK-NEXT: vmov.32 q0[1], r0
+; CHECK-NEXT: vmov.u16 r0, q3[6]
+; CHECK-NEXT: vmov.32 q0[2], r0
+; CHECK-NEXT: vmov.u16 r0, q3[7]
+; CHECK-NEXT: vmov.32 q0[3], r0
+; CHECK-NEXT: vmov.u8 r0, q1[8]
+; CHECK-NEXT: vpst
+; CHECK-NEXT: vandt q7, q0, q2
+; CHECK-NEXT: vmov.16 q0[0], r0
+; CHECK-NEXT: vmov.u8 r0, q1[9]
+; CHECK-NEXT: vmov.16 q0[1], r0
+; CHECK-NEXT: vmov.u8 r0, q1[10]
+; CHECK-NEXT: vmov.16 q0[2], r0
+; CHECK-NEXT: vmov.u8 r0, q1[11]
+; CHECK-NEXT: vmov.16 q0[3], r0
+; CHECK-NEXT: vmov.u8 r0, q1[12]
+; CHECK-NEXT: vmov.16 q0[4], r0
+; CHECK-NEXT: vmov.u8 r0, q1[13]
+; CHECK-NEXT: vmov.16 q0[5], r0
+; CHECK-NEXT: vmov.u8 r0, q1[14]
+; CHECK-NEXT: vmov.16 q0[6], r0
+; CHECK-NEXT: vmov.u8 r0, q1[15]
+; CHECK-NEXT: vmov.16 q0[7], r0
+; CHECK-NEXT: vldrw.u32 q1, [sp] @ 16-byte Reload
+; CHECK-NEXT: vcmp.i16 ne, q0, zr
+; CHECK-NEXT: vldrw.u32 q0, [sp, #16] @ 16-byte Reload
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: vmov.u16 r0, q0[4]
+; CHECK-NEXT: vmov.32 q1[0], r0
+; CHECK-NEXT: vmov.u16 r0, q0[5]
+; CHECK-NEXT: vmov.32 q1[1], r0
+; CHECK-NEXT: vmov.u16 r0, q0[6]
+; CHECK-NEXT: vmov.32 q1[2], r0
+; CHECK-NEXT: vmov.u16 r0, q0[7]
+; CHECK-NEXT: vmov.32 q1[3], r0
+; CHECK-NEXT: vmov.u8 r0, q4[8]
+; CHECK-NEXT: vcmp.i32 ne, q1, zr
+; CHECK-NEXT: vmov.16 q1[0], r0
+; CHECK-NEXT: vmov.u8 r0, q4[9]
+; CHECK-NEXT: vmov.16 q1[1], r0
+; CHECK-NEXT: vmov.u8 r0, q4[10]
+; CHECK-NEXT: vmov.16 q1[2], r0
+; CHECK-NEXT: vmov.u8 r0, q4[11]
+; CHECK-NEXT: vmov.16 q1[3], r0
+; CHECK-NEXT: vmov.u8 r0, q4[12]
+; CHECK-NEXT: vmov.16 q1[4], r0
+; CHECK-NEXT: vmov.u8 r0, q4[13]
+; CHECK-NEXT: vmov.16 q1[5], r0
+; CHECK-NEXT: vmov.u8 r0, q4[14]
+; CHECK-NEXT: vmov.16 q1[6], r0
+; CHECK-NEXT: vmov.u8 r0, q4[15]
+; CHECK-NEXT: vmov.16 q1[7], r0
+; CHECK-NEXT: vmullb.s8 q1, q1, q1
+; CHECK-NEXT: vmov.u16 r0, q1[4]
+; CHECK-NEXT: vmov.32 q4[0], r0
+; CHECK-NEXT: vmov.u16 r0, q1[5]
+; CHECK-NEXT: vmov.32 q4[1], r0
+; CHECK-NEXT: vmov.u16 r0, q1[6]
+; CHECK-NEXT: vmov.32 q4[2], r0
+; CHECK-NEXT: vmov.u16 r0, q1[7]
+; CHECK-NEXT: vmov.32 q4[3], r0
+; CHECK-NEXT: vmov.u16 r0, q5[0]
+; CHECK-NEXT: vmovlb.u16 q4, q4
+; CHECK-NEXT: vpst
+; CHECK-NEXT: vaddt.i32 q7, q7, q4
+; CHECK-NEXT: vmov.32 q4[0], r0
+; CHECK-NEXT: vmov.u16 r0, q5[1]
+; CHECK-NEXT: vmov.32 q4[1], r0
+; CHECK-NEXT: vmov.u16 r0, q5[2]
+; CHECK-NEXT: vmov.32 q4[2], r0
+; CHECK-NEXT: vmov.u16 r0, q5[3]
+; CHECK-NEXT: vmov.32 q4[3], r0
+; CHECK-NEXT: vmov.u16 r0, q3[0]
+; CHECK-NEXT: vcmp.i32 ne, q4, zr
+; CHECK-NEXT: vmov.32 q4[0], r0
+; CHECK-NEXT: vmov.u16 r0, q3[1]
+; CHECK-NEXT: vmov.32 q4[1], r0
+; CHECK-NEXT: vmov.u16 r0, q3[2]
+; CHECK-NEXT: vmov.32 q4[2], r0
+; CHECK-NEXT: vmov.u16 r0, q3[3]
+; CHECK-NEXT: vmov.32 q4[3], r0
+; CHECK-NEXT: vmov.u16 r0, q0[0]
+; CHECK-NEXT: vpst
+; CHECK-NEXT: vandt q6, q4, q2
+; CHECK-NEXT: vmov.32 q2[0], r0
+; CHECK-NEXT: vmov.u16 r0, q0[1]
+; CHECK-NEXT: vmov.32 q2[1], r0
+; CHECK-NEXT: vmov.u16 r0, q0[2]
+; CHECK-NEXT: vmov.32 q2[2], r0
+; CHECK-NEXT: vmov.u16 r0, q0[3]
+; CHECK-NEXT: vmov.32 q2[3], r0
+; CHECK-NEXT: vmov.u16 r0, q1[0]
+; CHECK-NEXT: vmov.32 q0[0], r0
+; CHECK-NEXT: vmov.u16 r0, q1[1]
+; CHECK-NEXT: vmov.32 q0[1], r0
+; CHECK-NEXT: vmov.u16 r0, q1[2]
+; CHECK-NEXT: vmov.32 q0[2], r0
+; CHECK-NEXT: vmov.u16 r0, q1[3]
+; CHECK-NEXT: vmov.32 q0[3], r0
+; CHECK-NEXT: vmovlb.u16 q0, q0
+; CHECK-NEXT: vpt.i32 ne, q2, zr
+; CHECK-NEXT: vaddt.i32 q6, q6, q0
+; CHECK-NEXT: vadd.i32 q0, q6, q7
+; CHECK-NEXT: vaddv.u32 r0, q0
+; CHECK-NEXT: add sp, #32
+; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
; CHECK-NEXT: bx lr
entry:
%c = icmp eq <16 x i8> %b, zeroinitializer
- %xx = sext <16 x i8> %x to <16 x i32>
- %yy = sext <16 x i8> %y to <16 x i32>
- %m = mul <16 x i32> %xx, %yy
- %s = select <16 x i1> %c, <16 x i32> %m, <16 x i32> zeroinitializer
+ %xx = sext <16 x i8> %x to <16 x i16>
+ %m = mul <16 x i16> %xx, %xx
+ %ma = zext <16 x i16> %m to <16 x i32>
+ %s = select <16 x i1> %c, <16 x i32> %ma, <16 x i32> zeroinitializer
%z = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %s)
ret i32 %z
}
@@ -1642,27 +2734,517 @@ define arm_aapcs_vfpcc i64 @add_v8i16_v8i64_acc_zext(<8 x i16> %x, <8 x i16> %y,
; CHECK-NEXT: bx lr
entry:
%c = icmp eq <8 x i16> %b, zeroinitializer
- %xx = zext <8 x i16> %x to <8 x i64>
- %yy = zext <8 x i16> %y to <8 x i64>
- %m = mul <8 x i64> %xx, %yy
- %s = select <8 x i1> %c, <8 x i64> %m, <8 x i64> zeroinitializer
+ %xx = zext <8 x i16> %x to <8 x i64>
+ %yy = zext <8 x i16> %y to <8 x i64>
+ %m = mul <8 x i64> %xx, %yy
+ %s = select <8 x i1> %c, <8 x i64> %m, <8 x i64> zeroinitializer
+ %z = call i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64> %s)
+ %r = add i64 %z, %a
+ ret i64 %r
+}
+
+define arm_aapcs_vfpcc i64 @add_v8i16_v8i64_acc_sext(<8 x i16> %x, <8 x i16> %y, <8 x i16> %b, i64 %a) {
+; CHECK-LABEL: add_v8i16_v8i64_acc_sext:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vpt.i16 eq, q2, zr
+; CHECK-NEXT: vmlalvat.s16 r0, r1, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <8 x i16> %b, zeroinitializer
+ %xx = sext <8 x i16> %x to <8 x i64>
+ %yy = sext <8 x i16> %y to <8 x i64>
+ %m = mul <8 x i64> %xx, %yy
+ %s = select <8 x i1> %c, <8 x i64> %m, <8 x i64> zeroinitializer
+ %z = call i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64> %s)
+ %r = add i64 %z, %a
+ ret i64 %r
+}
+
+define arm_aapcs_vfpcc i64 @add_v8i16_v8i32_v8i64_acc_zext(<8 x i16> %x, <8 x i16> %y, <8 x i16> %b, i64 %a) {
+; CHECK-LABEL: add_v8i16_v8i32_v8i64_acc_zext:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .save {r4, r5, r6, lr}
+; CHECK-NEXT: push {r4, r5, r6, lr}
+; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13}
+; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13}
+; CHECK-NEXT: vmov.i8 q3, #0x0
+; CHECK-NEXT: vmov.i8 q4, #0xff
+; CHECK-NEXT: vcmp.i16 eq, q2, zr
+; CHECK-NEXT: vpsel q3, q4, q3
+; CHECK-NEXT: vmov.u16 r2, q3[0]
+; CHECK-NEXT: vmov.32 q2[0], r2
+; CHECK-NEXT: vmov.u16 r2, q3[1]
+; CHECK-NEXT: vmov.32 q2[1], r2
+; CHECK-NEXT: vmov.u16 r2, q3[2]
+; CHECK-NEXT: vmov.32 q2[2], r2
+; CHECK-NEXT: vmov.u16 r2, q3[3]
+; CHECK-NEXT: vmov.32 q2[3], r2
+; CHECK-NEXT: vcmp.i32 ne, q2, zr
+; CHECK-NEXT: vmrs r12, p0
+; CHECK-NEXT: and r3, r12, #1
+; CHECK-NEXT: rsbs r3, r3, #0
+; CHECK-NEXT: vmov.32 q4[0], r3
+; CHECK-NEXT: vmov.32 q4[1], r3
+; CHECK-NEXT: ubfx r3, r12, #4, #1
+; CHECK-NEXT: rsbs r3, r3, #0
+; CHECK-NEXT: vmov.32 q4[2], r3
+; CHECK-NEXT: vmov.32 q4[3], r3
+; CHECK-NEXT: vmov.u16 r3, q1[0]
+; CHECK-NEXT: vmov.32 q2[0], r3
+; CHECK-NEXT: vmov.u16 r3, q1[1]
+; CHECK-NEXT: vmov.32 q2[1], r3
+; CHECK-NEXT: vmov.u16 r3, q1[2]
+; CHECK-NEXT: vmov.32 q2[2], r3
+; CHECK-NEXT: vmov.u16 r3, q1[3]
+; CHECK-NEXT: vmov.32 q2[3], r3
+; CHECK-NEXT: vmov.u16 r3, q0[0]
+; CHECK-NEXT: vmov.32 q5[0], r3
+; CHECK-NEXT: vmov.u16 r3, q0[1]
+; CHECK-NEXT: vmov.32 q5[1], r3
+; CHECK-NEXT: vmov.u16 r3, q0[2]
+; CHECK-NEXT: vmov.32 q5[2], r3
+; CHECK-NEXT: vmov.u16 r3, q0[3]
+; CHECK-NEXT: vmov.32 q5[3], r3
+; CHECK-NEXT: vmullb.u16 q5, q5, q2
+; CHECK-NEXT: vmov.i64 q2, #0xffffffff
+; CHECK-NEXT: vmov.f32 s24, s20
+; CHECK-NEXT: vmov.f32 s26, s21
+; CHECK-NEXT: vand q6, q6, q2
+; CHECK-NEXT: vand q4, q6, q4
+; CHECK-NEXT: vmov.f32 s24, s22
+; CHECK-NEXT: vmov r3, s18
+; CHECK-NEXT: vmov r4, s16
+; CHECK-NEXT: vmov lr, s19
+; CHECK-NEXT: vmov r2, s17
+; CHECK-NEXT: vmov.f32 s26, s23
+; CHECK-NEXT: vand q5, q6, q2
+; CHECK-NEXT: adds r3, r3, r4
+; CHECK-NEXT: ubfx r4, r12, #8, #1
+; CHECK-NEXT: rsb.w r4, r4, #0
+; CHECK-NEXT: vmov.32 q4[0], r4
+; CHECK-NEXT: adc.w lr, lr, r2
+; CHECK-NEXT: vmov.32 q4[1], r4
+; CHECK-NEXT: ubfx r4, r12, #12, #1
+; CHECK-NEXT: rsbs r4, r4, #0
+; CHECK-NEXT: vmov.32 q4[2], r4
+; CHECK-NEXT: vmov.32 q4[3], r4
+; CHECK-NEXT: vand q4, q5, q4
+; CHECK-NEXT: vmov r2, s16
+; CHECK-NEXT: vmov r4, s17
+; CHECK-NEXT: adds.w r12, r3, r2
+; CHECK-NEXT: vmov r2, s18
+; CHECK-NEXT: adc.w r3, lr, r4
+; CHECK-NEXT: vmov r4, s19
+; CHECK-NEXT: adds.w r12, r12, r2
+; CHECK-NEXT: vmov.u16 r2, q3[4]
+; CHECK-NEXT: vmov.32 q4[0], r2
+; CHECK-NEXT: vmov.u16 r2, q3[5]
+; CHECK-NEXT: vmov.32 q4[1], r2
+; CHECK-NEXT: vmov.u16 r2, q3[6]
+; CHECK-NEXT: vmov.32 q4[2], r2
+; CHECK-NEXT: vmov.u16 r2, q3[7]
+; CHECK-NEXT: vmov.32 q4[3], r2
+; CHECK-NEXT: adc.w lr, r3, r4
+; CHECK-NEXT: vcmp.i32 ne, q4, zr
+; CHECK-NEXT: vmrs r6, p0
+; CHECK-NEXT: and r4, r6, #1
+; CHECK-NEXT: rsbs r4, r4, #0
+; CHECK-NEXT: vmov.32 q3[0], r4
+; CHECK-NEXT: vmov.32 q3[1], r4
+; CHECK-NEXT: ubfx r4, r6, #4, #1
+; CHECK-NEXT: rsbs r4, r4, #0
+; CHECK-NEXT: vmov.32 q3[2], r4
+; CHECK-NEXT: vmov.32 q3[3], r4
+; CHECK-NEXT: vmov.u16 r4, q1[4]
+; CHECK-NEXT: vmov.32 q4[0], r4
+; CHECK-NEXT: vmov.u16 r4, q1[5]
+; CHECK-NEXT: vmov.32 q4[1], r4
+; CHECK-NEXT: vmov.u16 r4, q1[6]
+; CHECK-NEXT: vmov.32 q4[2], r4
+; CHECK-NEXT: vmov.u16 r4, q1[7]
+; CHECK-NEXT: vmov.32 q4[3], r4
+; CHECK-NEXT: vmov.u16 r4, q0[4]
+; CHECK-NEXT: vmov.32 q1[0], r4
+; CHECK-NEXT: vmov.u16 r4, q0[5]
+; CHECK-NEXT: vmov.32 q1[1], r4
+; CHECK-NEXT: vmov.u16 r4, q0[6]
+; CHECK-NEXT: vmov.32 q1[2], r4
+; CHECK-NEXT: vmov.u16 r4, q0[7]
+; CHECK-NEXT: vmov.32 q1[3], r4
+; CHECK-NEXT: vmullb.u16 q0, q1, q4
+; CHECK-NEXT: vmov.f32 s4, s0
+; CHECK-NEXT: vmov.f32 s6, s1
+; CHECK-NEXT: vand q1, q1, q2
+; CHECK-NEXT: vand q1, q1, q3
+; CHECK-NEXT: vmov.f32 s12, s2
+; CHECK-NEXT: vmov r3, s4
+; CHECK-NEXT: vmov r4, s5
+; CHECK-NEXT: vmov r5, s6
+; CHECK-NEXT: vmov r2, s7
+; CHECK-NEXT: vmov.f32 s14, s3
+; CHECK-NEXT: vand q0, q3, q2
+; CHECK-NEXT: adds.w r3, r3, r12
+; CHECK-NEXT: adc.w r4, r4, lr
+; CHECK-NEXT: adds r3, r3, r5
+; CHECK-NEXT: ubfx r5, r6, #8, #1
+; CHECK-NEXT: rsb.w r5, r5, #0
+; CHECK-NEXT: ubfx r6, r6, #12, #1
+; CHECK-NEXT: vmov.32 q1[0], r5
+; CHECK-NEXT: rsb.w r6, r6, #0
+; CHECK-NEXT: vmov.32 q1[1], r5
+; CHECK-NEXT: adcs r2, r4
+; CHECK-NEXT: vmov.32 q1[2], r6
+; CHECK-NEXT: vmov.32 q1[3], r6
+; CHECK-NEXT: vand q0, q0, q1
+; CHECK-NEXT: vmov r5, s0
+; CHECK-NEXT: vmov r6, s1
+; CHECK-NEXT: adds r3, r3, r5
+; CHECK-NEXT: vmov r5, s2
+; CHECK-NEXT: adcs r2, r6
+; CHECK-NEXT: vmov r6, s3
+; CHECK-NEXT: adds r3, r3, r5
+; CHECK-NEXT: adcs r2, r6
+; CHECK-NEXT: adds r0, r0, r3
+; CHECK-NEXT: adcs r1, r2
+; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13}
+; CHECK-NEXT: pop {r4, r5, r6, pc}
+entry:
+ %c = icmp eq <8 x i16> %b, zeroinitializer
+ %xx = zext <8 x i16> %x to <8 x i32>
+ %yy = zext <8 x i16> %y to <8 x i32>
+ %m = mul <8 x i32> %xx, %yy
+ %ma = zext <8 x i32> %m to <8 x i64>
+ %s = select <8 x i1> %c, <8 x i64> %ma, <8 x i64> zeroinitializer
+ %z = call i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64> %s)
+ %r = add i64 %z, %a
+ ret i64 %r
+}
+
+define arm_aapcs_vfpcc i64 @add_v8i16_v8i32_v8i64_acc_sext(<8 x i16> %x, <8 x i16> %y, <8 x i16> %b, i64 %a) {
+; CHECK-LABEL: add_v8i16_v8i32_v8i64_acc_sext:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .save {r4, r5, r7, lr}
+; CHECK-NEXT: push {r4, r5, r7, lr}
+; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13}
+; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13}
+; CHECK-NEXT: vmov.u16 r2, q1[0]
+; CHECK-NEXT: vmov.i8 q6, #0xff
+; CHECK-NEXT: vmov.32 q3[0], r2
+; CHECK-NEXT: vmov.u16 r2, q1[1]
+; CHECK-NEXT: vmov.32 q3[1], r2
+; CHECK-NEXT: vmov.u16 r2, q1[2]
+; CHECK-NEXT: vmov.32 q3[2], r2
+; CHECK-NEXT: vmov.u16 r2, q1[3]
+; CHECK-NEXT: vmov.32 q3[3], r2
+; CHECK-NEXT: vmov.u16 r2, q0[0]
+; CHECK-NEXT: vmov.32 q4[0], r2
+; CHECK-NEXT: vmov.u16 r2, q0[1]
+; CHECK-NEXT: vmov.32 q4[1], r2
+; CHECK-NEXT: vmov.u16 r2, q0[2]
+; CHECK-NEXT: vmov.32 q4[2], r2
+; CHECK-NEXT: vmov.u16 r2, q0[3]
+; CHECK-NEXT: vmov.32 q4[3], r2
+; CHECK-NEXT: vcmp.i16 eq, q2, zr
+; CHECK-NEXT: vmullb.s16 q3, q4, q3
+; CHECK-NEXT: vmov.f32 s20, s12
+; CHECK-NEXT: vmov.f32 s22, s13
+; CHECK-NEXT: vmov r2, s20
+; CHECK-NEXT: vmov.32 q4[0], r2
+; CHECK-NEXT: asrs r2, r2, #31
+; CHECK-NEXT: vmov.32 q4[1], r2
+; CHECK-NEXT: vmov r2, s22
+; CHECK-NEXT: vmov.i8 q5, #0x0
+; CHECK-NEXT: vmov.32 q4[2], r2
+; CHECK-NEXT: vpsel q2, q6, q5
+; CHECK-NEXT: asrs r2, r2, #31
+; CHECK-NEXT: vmov.32 q4[3], r2
+; CHECK-NEXT: vmov.u16 r2, q2[0]
+; CHECK-NEXT: vmov.32 q5[0], r2
+; CHECK-NEXT: vmov.u16 r2, q2[1]
+; CHECK-NEXT: vmov.32 q5[1], r2
+; CHECK-NEXT: vmov.u16 r2, q2[2]
+; CHECK-NEXT: vmov.32 q5[2], r2
+; CHECK-NEXT: vmov.u16 r2, q2[3]
+; CHECK-NEXT: vmov.32 q5[3], r2
+; CHECK-NEXT: vcmp.i32 ne, q5, zr
+; CHECK-NEXT: vmrs r2, p0
+; CHECK-NEXT: and r3, r2, #1
+; CHECK-NEXT: rsbs r3, r3, #0
+; CHECK-NEXT: vmov.32 q5[0], r3
+; CHECK-NEXT: vmov.32 q5[1], r3
+; CHECK-NEXT: ubfx r3, r2, #4, #1
+; CHECK-NEXT: rsbs r3, r3, #0
+; CHECK-NEXT: vmov.32 q5[2], r3
+; CHECK-NEXT: vmov.32 q5[3], r3
+; CHECK-NEXT: vand q4, q4, q5
+; CHECK-NEXT: vmov r3, s18
+; CHECK-NEXT: vmov r4, s16
+; CHECK-NEXT: vmov r12, s19
+; CHECK-NEXT: vmov r5, s17
+; CHECK-NEXT: vmov.f32 s16, s14
+; CHECK-NEXT: vmov.f32 s18, s15
+; CHECK-NEXT: adds.w lr, r4, r3
+; CHECK-NEXT: vmov r3, s16
+; CHECK-NEXT: vmov.32 q3[0], r3
+; CHECK-NEXT: adc.w r12, r12, r5
+; CHECK-NEXT: asrs r3, r3, #31
+; CHECK-NEXT: vmov.32 q3[1], r3
+; CHECK-NEXT: vmov r3, s18
+; CHECK-NEXT: vmov.32 q3[2], r3
+; CHECK-NEXT: asrs r3, r3, #31
+; CHECK-NEXT: vmov.32 q3[3], r3
+; CHECK-NEXT: ubfx r3, r2, #8, #1
+; CHECK-NEXT: rsbs r3, r3, #0
+; CHECK-NEXT: ubfx r2, r2, #12, #1
+; CHECK-NEXT: vmov.32 q4[0], r3
+; CHECK-NEXT: rsbs r2, r2, #0
+; CHECK-NEXT: vmov.32 q4[1], r3
+; CHECK-NEXT: vmov.32 q4[2], r2
+; CHECK-NEXT: vmov.32 q4[3], r2
+; CHECK-NEXT: vand q3, q3, q4
+; CHECK-NEXT: vmov r3, s12
+; CHECK-NEXT: vmov r2, s13
+; CHECK-NEXT: vmov r4, s14
+; CHECK-NEXT: vmov r5, s15
+; CHECK-NEXT: adds.w r3, r3, lr
+; CHECK-NEXT: adc.w r2, r2, r12
+; CHECK-NEXT: adds.w r12, r3, r4
+; CHECK-NEXT: adc.w r3, r2, r5
+; CHECK-NEXT: vmov.u16 r2, q1[4]
+; CHECK-NEXT: vmov.32 q3[0], r2
+; CHECK-NEXT: vmov.u16 r2, q1[5]
+; CHECK-NEXT: vmov.32 q3[1], r2
+; CHECK-NEXT: vmov.u16 r2, q1[6]
+; CHECK-NEXT: vmov.32 q3[2], r2
+; CHECK-NEXT: vmov.u16 r2, q1[7]
+; CHECK-NEXT: vmov.32 q3[3], r2
+; CHECK-NEXT: vmov.u16 r2, q0[4]
+; CHECK-NEXT: vmov.32 q1[0], r2
+; CHECK-NEXT: vmov.u16 r2, q0[5]
+; CHECK-NEXT: vmov.32 q1[1], r2
+; CHECK-NEXT: vmov.u16 r2, q0[6]
+; CHECK-NEXT: vmov.32 q1[2], r2
+; CHECK-NEXT: vmov.u16 r2, q0[7]
+; CHECK-NEXT: vmov.32 q1[3], r2
+; CHECK-NEXT: vmullb.s16 q0, q1, q3
+; CHECK-NEXT: vmov.f32 s12, s0
+; CHECK-NEXT: vmov.f32 s14, s1
+; CHECK-NEXT: vmov r2, s12
+; CHECK-NEXT: vmov.32 q1[0], r2
+; CHECK-NEXT: asrs r2, r2, #31
+; CHECK-NEXT: vmov.32 q1[1], r2
+; CHECK-NEXT: vmov r2, s14
+; CHECK-NEXT: vmov.32 q1[2], r2
+; CHECK-NEXT: asrs r2, r2, #31
+; CHECK-NEXT: vmov.32 q1[3], r2
+; CHECK-NEXT: vmov.u16 r2, q2[4]
+; CHECK-NEXT: vmov.32 q3[0], r2
+; CHECK-NEXT: vmov.u16 r2, q2[5]
+; CHECK-NEXT: vmov.32 q3[1], r2
+; CHECK-NEXT: vmov.u16 r2, q2[6]
+; CHECK-NEXT: vmov.32 q3[2], r2
+; CHECK-NEXT: vmov.u16 r2, q2[7]
+; CHECK-NEXT: vmov.32 q3[3], r2
+; CHECK-NEXT: vcmp.i32 ne, q3, zr
+; CHECK-NEXT: vmrs r2, p0
+; CHECK-NEXT: and r5, r2, #1
+; CHECK-NEXT: rsbs r5, r5, #0
+; CHECK-NEXT: vmov.32 q2[0], r5
+; CHECK-NEXT: vmov.32 q2[1], r5
+; CHECK-NEXT: ubfx r5, r2, #4, #1
+; CHECK-NEXT: rsbs r5, r5, #0
+; CHECK-NEXT: vmov.32 q2[2], r5
+; CHECK-NEXT: vmov.32 q2[3], r5
+; CHECK-NEXT: vand q1, q1, q2
+; CHECK-NEXT: vmov r4, s4
+; CHECK-NEXT: vmov r5, s5
+; CHECK-NEXT: adds.w r12, r12, r4
+; CHECK-NEXT: vmov r4, s6
+; CHECK-NEXT: adcs r5, r3
+; CHECK-NEXT: vmov r3, s7
+; CHECK-NEXT: vmov.f32 s4, s2
+; CHECK-NEXT: vmov.f32 s6, s3
+; CHECK-NEXT: adds.w r4, r4, r12
+; CHECK-NEXT: adc.w r12, r5, r3
+; CHECK-NEXT: vmov r3, s4
+; CHECK-NEXT: vmov.32 q0[0], r3
+; CHECK-NEXT: asrs r3, r3, #31
+; CHECK-NEXT: vmov.32 q0[1], r3
+; CHECK-NEXT: vmov r3, s6
+; CHECK-NEXT: vmov.32 q0[2], r3
+; CHECK-NEXT: asrs r3, r3, #31
+; CHECK-NEXT: vmov.32 q0[3], r3
+; CHECK-NEXT: ubfx r3, r2, #8, #1
+; CHECK-NEXT: rsbs r3, r3, #0
+; CHECK-NEXT: ubfx r2, r2, #12, #1
+; CHECK-NEXT: vmov.32 q1[0], r3
+; CHECK-NEXT: rsbs r2, r2, #0
+; CHECK-NEXT: vmov.32 q1[1], r3
+; CHECK-NEXT: vmov.32 q1[2], r2
+; CHECK-NEXT: vmov.32 q1[3], r2
+; CHECK-NEXT: vand q0, q0, q1
+; CHECK-NEXT: vmov r3, s0
+; CHECK-NEXT: vmov r2, s1
+; CHECK-NEXT: vmov r5, s3
+; CHECK-NEXT: adds r3, r3, r4
+; CHECK-NEXT: vmov r4, s2
+; CHECK-NEXT: adc.w r2, r2, r12
+; CHECK-NEXT: adds r3, r3, r4
+; CHECK-NEXT: adcs r2, r5
+; CHECK-NEXT: adds r0, r0, r3
+; CHECK-NEXT: adcs r1, r2
+; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13}
+; CHECK-NEXT: pop {r4, r5, r7, pc}
+entry:
+ %c = icmp eq <8 x i16> %b, zeroinitializer
+ %xx = sext <8 x i16> %x to <8 x i32>
+ %yy = sext <8 x i16> %y to <8 x i32>
+ %m = mul <8 x i32> %xx, %yy
+ %ma = sext <8 x i32> %m to <8 x i64>
+ %s = select <8 x i1> %c, <8 x i64> %ma, <8 x i64> zeroinitializer
%z = call i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64> %s)
%r = add i64 %z, %a
ret i64 %r
}
-define arm_aapcs_vfpcc i64 @add_v8i16_v8i64_acc_sext(<8 x i16> %x, <8 x i16> %y, <8 x i16> %b, i64 %a) {
-; CHECK-LABEL: add_v8i16_v8i64_acc_sext:
+define arm_aapcs_vfpcc i64 @add_v8i16_v8i32_v8i64_acc_sextzext(<8 x i16> %x, <8 x i16> %y, <8 x i16> %b, i64 %a) {
+; CHECK-LABEL: add_v8i16_v8i32_v8i64_acc_sextzext:
; CHECK: @ %bb.0: @ %entry
-; CHECK-NEXT: vpt.i16 eq, q2, zr
-; CHECK-NEXT: vmlalvat.s16 r0, r1, q0, q1
-; CHECK-NEXT: bx lr
+; CHECK-NEXT: .save {r4, r5, r7, lr}
+; CHECK-NEXT: push {r4, r5, r7, lr}
+; CHECK-NEXT: .vsave {d8, d9, d10, d11}
+; CHECK-NEXT: vpush {d8, d9, d10, d11}
+; CHECK-NEXT: vmov.i8 q1, #0x0
+; CHECK-NEXT: vmov.i8 q3, #0xff
+; CHECK-NEXT: vcmp.i16 eq, q2, zr
+; CHECK-NEXT: vpsel q2, q3, q1
+; CHECK-NEXT: vmov.u16 r2, q2[0]
+; CHECK-NEXT: vmov.32 q1[0], r2
+; CHECK-NEXT: vmov.u16 r2, q2[1]
+; CHECK-NEXT: vmov.32 q1[1], r2
+; CHECK-NEXT: vmov.u16 r2, q2[2]
+; CHECK-NEXT: vmov.32 q1[2], r2
+; CHECK-NEXT: vmov.u16 r2, q2[3]
+; CHECK-NEXT: vmov.32 q1[3], r2
+; CHECK-NEXT: vcmp.i32 ne, q1, zr
+; CHECK-NEXT: vmrs r2, p0
+; CHECK-NEXT: and r3, r2, #1
+; CHECK-NEXT: rsbs r3, r3, #0
+; CHECK-NEXT: vmov.32 q4[0], r3
+; CHECK-NEXT: vmov.32 q4[1], r3
+; CHECK-NEXT: ubfx r3, r2, #4, #1
+; CHECK-NEXT: rsbs r3, r3, #0
+; CHECK-NEXT: vmov.32 q4[2], r3
+; CHECK-NEXT: vmov.32 q4[3], r3
+; CHECK-NEXT: vmov.u16 r3, q0[0]
+; CHECK-NEXT: vmov.32 q1[0], r3
+; CHECK-NEXT: vmov.u16 r3, q0[1]
+; CHECK-NEXT: vmov.32 q1[1], r3
+; CHECK-NEXT: vmov.u16 r3, q0[2]
+; CHECK-NEXT: vmov.32 q1[2], r3
+; CHECK-NEXT: vmov.u16 r3, q0[3]
+; CHECK-NEXT: vmov.32 q1[3], r3
+; CHECK-NEXT: vmullb.s16 q3, q1, q1
+; CHECK-NEXT: vmov.i64 q1, #0xffffffff
+; CHECK-NEXT: vmov.f32 s20, s12
+; CHECK-NEXT: vmov.f32 s22, s13
+; CHECK-NEXT: vand q5, q5, q1
+; CHECK-NEXT: vand q4, q5, q4
+; CHECK-NEXT: vmov.f32 s20, s14
+; CHECK-NEXT: vmov r3, s18
+; CHECK-NEXT: vmov r4, s16
+; CHECK-NEXT: vmov r12, s19
+; CHECK-NEXT: vmov lr, s17
+; CHECK-NEXT: vmov.f32 s22, s15
+; CHECK-NEXT: vand q3, q5, q1
+; CHECK-NEXT: adds r5, r4, r3
+; CHECK-NEXT: ubfx r3, r2, #8, #1
+; CHECK-NEXT: rsb.w r3, r3, #0
+; CHECK-NEXT: ubfx r2, r2, #12, #1
+; CHECK-NEXT: vmov.32 q4[0], r3
+; CHECK-NEXT: rsb.w r2, r2, #0
+; CHECK-NEXT: vmov.32 q4[1], r3
+; CHECK-NEXT: adc.w r4, lr, r12
+; CHECK-NEXT: vmov.32 q4[2], r2
+; CHECK-NEXT: vmov.32 q4[3], r2
+; CHECK-NEXT: vand q3, q3, q4
+; CHECK-NEXT: vmov r3, s12
+; CHECK-NEXT: vmov r2, s13
+; CHECK-NEXT: adds r3, r3, r5
+; CHECK-NEXT: vmov r5, s15
+; CHECK-NEXT: adcs r2, r4
+; CHECK-NEXT: vmov r4, s14
+; CHECK-NEXT: adds.w r12, r3, r4
+; CHECK-NEXT: adc.w r3, r2, r5
+; CHECK-NEXT: vmov.u16 r2, q2[4]
+; CHECK-NEXT: vmov.32 q3[0], r2
+; CHECK-NEXT: vmov.u16 r2, q2[5]
+; CHECK-NEXT: vmov.32 q3[1], r2
+; CHECK-NEXT: vmov.u16 r2, q2[6]
+; CHECK-NEXT: vmov.32 q3[2], r2
+; CHECK-NEXT: vmov.u16 r2, q2[7]
+; CHECK-NEXT: vmov.32 q3[3], r2
+; CHECK-NEXT: vcmp.i32 ne, q3, zr
+; CHECK-NEXT: vmrs r2, p0
+; CHECK-NEXT: and r5, r2, #1
+; CHECK-NEXT: rsbs r5, r5, #0
+; CHECK-NEXT: vmov.32 q2[0], r5
+; CHECK-NEXT: vmov.32 q2[1], r5
+; CHECK-NEXT: ubfx r5, r2, #4, #1
+; CHECK-NEXT: rsbs r5, r5, #0
+; CHECK-NEXT: vmov.32 q2[2], r5
+; CHECK-NEXT: vmov.32 q2[3], r5
+; CHECK-NEXT: vmov.u16 r5, q0[4]
+; CHECK-NEXT: vmov.32 q3[0], r5
+; CHECK-NEXT: vmov.u16 r5, q0[5]
+; CHECK-NEXT: vmov.32 q3[1], r5
+; CHECK-NEXT: vmov.u16 r5, q0[6]
+; CHECK-NEXT: vmov.32 q3[2], r5
+; CHECK-NEXT: vmov.u16 r5, q0[7]
+; CHECK-NEXT: vmov.32 q3[3], r5
+; CHECK-NEXT: vmullb.s16 q0, q3, q3
+; CHECK-NEXT: vmov.f32 s12, s0
+; CHECK-NEXT: vmov.f32 s14, s1
+; CHECK-NEXT: vand q3, q3, q1
+; CHECK-NEXT: vand q2, q3, q2
+; CHECK-NEXT: vmov.f32 s12, s2
+; CHECK-NEXT: vmov r4, s8
+; CHECK-NEXT: vmov r5, s9
+; CHECK-NEXT: vmov.f32 s14, s3
+; CHECK-NEXT: vand q0, q3, q1
+; CHECK-NEXT: adds.w r4, r4, r12
+; CHECK-NEXT: adc.w r12, r3, r5
+; CHECK-NEXT: vmov r3, s10
+; CHECK-NEXT: vmov r5, s11
+; CHECK-NEXT: adds r3, r3, r4
+; CHECK-NEXT: ubfx r4, r2, #8, #1
+; CHECK-NEXT: rsb.w r4, r4, #0
+; CHECK-NEXT: ubfx r2, r2, #12, #1
+; CHECK-NEXT: vmov.32 q2[0], r4
+; CHECK-NEXT: rsb.w r2, r2, #0
+; CHECK-NEXT: vmov.32 q2[1], r4
+; CHECK-NEXT: adc.w r5, r5, r12
+; CHECK-NEXT: vmov.32 q2[2], r2
+; CHECK-NEXT: vmov.32 q2[3], r2
+; CHECK-NEXT: vand q0, q0, q2
+; CHECK-NEXT: vmov r4, s0
+; CHECK-NEXT: vmov r2, s1
+; CHECK-NEXT: adds r3, r3, r4
+; CHECK-NEXT: vmov r4, s2
+; CHECK-NEXT: adcs r2, r5
+; CHECK-NEXT: vmov r5, s3
+; CHECK-NEXT: adds r3, r3, r4
+; CHECK-NEXT: adcs r2, r5
+; CHECK-NEXT: adds r0, r0, r3
+; CHECK-NEXT: adcs r1, r2
+; CHECK-NEXT: vpop {d8, d9, d10, d11}
+; CHECK-NEXT: pop {r4, r5, r7, pc}
entry:
%c = icmp eq <8 x i16> %b, zeroinitializer
- %xx = sext <8 x i16> %x to <8 x i64>
- %yy = sext <8 x i16> %y to <8 x i64>
- %m = mul <8 x i64> %xx, %yy
- %s = select <8 x i1> %c, <8 x i64> %m, <8 x i64> zeroinitializer
+ %xx = sext <8 x i16> %x to <8 x i32>
+ %m = mul <8 x i32> %xx, %xx
+ %ma = zext <8 x i32> %m to <8 x i64>
+ %s = select <8 x i1> %c, <8 x i64> %ma, <8 x i64> zeroinitializer
%z = call i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64> %s)
%r = add i64 %z, %a
ret i64 %r
@@ -1815,6 +3397,624 @@ entry:
ret i32 %r
}
+define arm_aapcs_vfpcc i32 @add_v16i8_v16i16_v16i32_acc_zext(<16 x i8> %x, <16 x i8> %y, <16 x i8> %b, i32 %a) {
+; CHECK-LABEL: add_v16i8_v16i16_v16i32_acc_zext:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT: .pad #64
+; CHECK-NEXT: sub sp, #64
+; CHECK-NEXT: vcmp.i8 eq, q2, zr
+; CHECK-NEXT: vmov.i8 q2, #0x0
+; CHECK-NEXT: vmov.i8 q7, #0xff
+; CHECK-NEXT: vmov q6, q1
+; CHECK-NEXT: vpsel q1, q7, q2
+; CHECK-NEXT: vstrw.32 q2, [sp] @ 16-byte Spill
+; CHECK-NEXT: vmov.u8 r1, q1[0]
+; CHECK-NEXT: vmov.16 q3[0], r1
+; CHECK-NEXT: vmov.u8 r1, q1[1]
+; CHECK-NEXT: vmov.16 q3[1], r1
+; CHECK-NEXT: vmov.u8 r1, q1[2]
+; CHECK-NEXT: vmov.16 q3[2], r1
+; CHECK-NEXT: vmov.u8 r1, q1[3]
+; CHECK-NEXT: vmov.16 q3[3], r1
+; CHECK-NEXT: vmov.u8 r1, q1[4]
+; CHECK-NEXT: vmov.16 q3[4], r1
+; CHECK-NEXT: vmov.u8 r1, q1[5]
+; CHECK-NEXT: vmov.16 q3[5], r1
+; CHECK-NEXT: vmov.u8 r1, q1[6]
+; CHECK-NEXT: vmov.16 q3[6], r1
+; CHECK-NEXT: vmov.u8 r1, q1[7]
+; CHECK-NEXT: vmov.16 q3[7], r1
+; CHECK-NEXT: vcmp.i16 ne, q3, zr
+; CHECK-NEXT: vpsel q3, q7, q2
+; CHECK-NEXT: vmov.u16 r1, q3[4]
+; CHECK-NEXT: vstrw.32 q3, [sp, #48] @ 16-byte Spill
+; CHECK-NEXT: vmov.32 q2[0], r1
+; CHECK-NEXT: vmov.u16 r1, q3[5]
+; CHECK-NEXT: vmov.32 q2[1], r1
+; CHECK-NEXT: vmov.u16 r1, q3[6]
+; CHECK-NEXT: vmov.32 q2[2], r1
+; CHECK-NEXT: vmov.u16 r1, q3[7]
+; CHECK-NEXT: vmov.32 q2[3], r1
+; CHECK-NEXT: vmov.u8 r1, q6[0]
+; CHECK-NEXT: vmov.16 q4[0], r1
+; CHECK-NEXT: vmov.u8 r1, q6[1]
+; CHECK-NEXT: vmov.16 q4[1], r1
+; CHECK-NEXT: vmov.u8 r1, q6[2]
+; CHECK-NEXT: vmov.16 q4[2], r1
+; CHECK-NEXT: vmov.u8 r1, q6[3]
+; CHECK-NEXT: vmov.16 q4[3], r1
+; CHECK-NEXT: vmov.u8 r1, q6[4]
+; CHECK-NEXT: vmov.16 q4[4], r1
+; CHECK-NEXT: vmov.u8 r1, q6[5]
+; CHECK-NEXT: vmov.16 q4[5], r1
+; CHECK-NEXT: vmov.u8 r1, q6[6]
+; CHECK-NEXT: vmov.16 q4[6], r1
+; CHECK-NEXT: vmov.u8 r1, q6[7]
+; CHECK-NEXT: vmov.16 q4[7], r1
+; CHECK-NEXT: vmov.u8 r1, q0[0]
+; CHECK-NEXT: vmov.16 q5[0], r1
+; CHECK-NEXT: vmov.u8 r1, q0[1]
+; CHECK-NEXT: vmov.16 q5[1], r1
+; CHECK-NEXT: vmov.u8 r1, q0[2]
+; CHECK-NEXT: vmov.16 q5[2], r1
+; CHECK-NEXT: vmov.u8 r1, q0[3]
+; CHECK-NEXT: vmov.16 q5[3], r1
+; CHECK-NEXT: vmov.u8 r1, q0[4]
+; CHECK-NEXT: vmov.16 q5[4], r1
+; CHECK-NEXT: vmov.u8 r1, q0[5]
+; CHECK-NEXT: vmov.16 q5[5], r1
+; CHECK-NEXT: vmov.u8 r1, q0[6]
+; CHECK-NEXT: vmov.16 q5[6], r1
+; CHECK-NEXT: vmov.u8 r1, q0[7]
+; CHECK-NEXT: vmov.16 q5[7], r1
+; CHECK-NEXT: vmov q3, q0
+; CHECK-NEXT: vmullb.u8 q5, q5, q4
+; CHECK-NEXT: vcmp.i32 ne, q2, zr
+; CHECK-NEXT: vmov.u16 r1, q5[4]
+; CHECK-NEXT: vmov.i32 q4, #0x0
+; CHECK-NEXT: vmov.32 q0[0], r1
+; CHECK-NEXT: vmov.u16 r1, q5[5]
+; CHECK-NEXT: vmov.32 q0[1], r1
+; CHECK-NEXT: vmov.u16 r1, q5[6]
+; CHECK-NEXT: vmov.32 q0[2], r1
+; CHECK-NEXT: vmov.u16 r1, q5[7]
+; CHECK-NEXT: vmov.32 q0[3], r1
+; CHECK-NEXT: vmov.i32 q2, #0xffff
+; CHECK-NEXT: vmov.u8 r1, q1[8]
+; CHECK-NEXT: vstrw.32 q4, [sp, #32] @ 16-byte Spill
+; CHECK-NEXT: vstrw.32 q2, [sp, #16] @ 16-byte Spill
+; CHECK-NEXT: vpst
+; CHECK-NEXT: vandt q4, q0, q2
+; CHECK-NEXT: vmov.16 q0[0], r1
+; CHECK-NEXT: vmov.u8 r1, q1[9]
+; CHECK-NEXT: vmov.16 q0[1], r1
+; CHECK-NEXT: vmov.u8 r1, q1[10]
+; CHECK-NEXT: vmov.16 q0[2], r1
+; CHECK-NEXT: vmov.u8 r1, q1[11]
+; CHECK-NEXT: vmov.16 q0[3], r1
+; CHECK-NEXT: vmov.u8 r1, q1[12]
+; CHECK-NEXT: vmov.16 q0[4], r1
+; CHECK-NEXT: vmov.u8 r1, q1[13]
+; CHECK-NEXT: vmov.16 q0[5], r1
+; CHECK-NEXT: vmov.u8 r1, q1[14]
+; CHECK-NEXT: vmov.16 q0[6], r1
+; CHECK-NEXT: vmov.u8 r1, q1[15]
+; CHECK-NEXT: vmov.16 q0[7], r1
+; CHECK-NEXT: vcmp.i16 ne, q0, zr
+; CHECK-NEXT: vldrw.u32 q0, [sp] @ 16-byte Reload
+; CHECK-NEXT: vpsel q0, q7, q0
+; CHECK-NEXT: vmov.u16 r1, q0[4]
+; CHECK-NEXT: vmov.32 q1[0], r1
+; CHECK-NEXT: vmov.u16 r1, q0[5]
+; CHECK-NEXT: vmov.32 q1[1], r1
+; CHECK-NEXT: vmov.u16 r1, q0[6]
+; CHECK-NEXT: vmov.32 q1[2], r1
+; CHECK-NEXT: vmov.u16 r1, q0[7]
+; CHECK-NEXT: vmov.32 q1[3], r1
+; CHECK-NEXT: vmov.u8 r1, q6[8]
+; CHECK-NEXT: vmov.16 q7[0], r1
+; CHECK-NEXT: vmov.u8 r1, q6[9]
+; CHECK-NEXT: vmov.16 q7[1], r1
+; CHECK-NEXT: vmov.u8 r1, q6[10]
+; CHECK-NEXT: vmov.16 q7[2], r1
+; CHECK-NEXT: vmov.u8 r1, q6[11]
+; CHECK-NEXT: vmov.16 q7[3], r1
+; CHECK-NEXT: vmov.u8 r1, q6[12]
+; CHECK-NEXT: vmov.16 q7[4], r1
+; CHECK-NEXT: vmov.u8 r1, q6[13]
+; CHECK-NEXT: vmov.16 q7[5], r1
+; CHECK-NEXT: vmov.u8 r1, q6[14]
+; CHECK-NEXT: vmov.16 q7[6], r1
+; CHECK-NEXT: vmov.u8 r1, q6[15]
+; CHECK-NEXT: vmov.16 q7[7], r1
+; CHECK-NEXT: vmov.u8 r1, q3[8]
+; CHECK-NEXT: vcmp.i32 ne, q1, zr
+; CHECK-NEXT: vmov.16 q1[0], r1
+; CHECK-NEXT: vmov.u8 r1, q3[9]
+; CHECK-NEXT: vmov.16 q1[1], r1
+; CHECK-NEXT: vmov.u8 r1, q3[10]
+; CHECK-NEXT: vmov.16 q1[2], r1
+; CHECK-NEXT: vmov.u8 r1, q3[11]
+; CHECK-NEXT: vmov.16 q1[3], r1
+; CHECK-NEXT: vmov.u8 r1, q3[12]
+; CHECK-NEXT: vmov.16 q1[4], r1
+; CHECK-NEXT: vmov.u8 r1, q3[13]
+; CHECK-NEXT: vmov.16 q1[5], r1
+; CHECK-NEXT: vmov.u8 r1, q3[14]
+; CHECK-NEXT: vmov.16 q1[6], r1
+; CHECK-NEXT: vmov.u8 r1, q3[15]
+; CHECK-NEXT: vmov.16 q1[7], r1
+; CHECK-NEXT: vmullb.u8 q1, q1, q7
+; CHECK-NEXT: vmov.u16 r1, q1[4]
+; CHECK-NEXT: vmov.32 q2[0], r1
+; CHECK-NEXT: vmov.u16 r1, q1[5]
+; CHECK-NEXT: vmov.32 q2[1], r1
+; CHECK-NEXT: vmov.u16 r1, q1[6]
+; CHECK-NEXT: vmov.32 q2[2], r1
+; CHECK-NEXT: vmov.u16 r1, q1[7]
+; CHECK-NEXT: vmov.32 q2[3], r1
+; CHECK-NEXT: vmovlb.u16 q2, q2
+; CHECK-NEXT: vpst
+; CHECK-NEXT: vaddt.i32 q4, q4, q2
+; CHECK-NEXT: vldrw.u32 q3, [sp, #48] @ 16-byte Reload
+; CHECK-NEXT: vmov.u16 r1, q3[0]
+; CHECK-NEXT: vmov.32 q2[0], r1
+; CHECK-NEXT: vmov.u16 r1, q3[1]
+; CHECK-NEXT: vmov.32 q2[1], r1
+; CHECK-NEXT: vmov.u16 r1, q3[2]
+; CHECK-NEXT: vmov.32 q2[2], r1
+; CHECK-NEXT: vmov.u16 r1, q3[3]
+; CHECK-NEXT: vmov.32 q2[3], r1
+; CHECK-NEXT: vmov.u16 r1, q5[0]
+; CHECK-NEXT: vcmp.i32 ne, q2, zr
+; CHECK-NEXT: vmov.32 q2[0], r1
+; CHECK-NEXT: vmov.u16 r1, q5[1]
+; CHECK-NEXT: vldrw.u32 q3, [sp, #32] @ 16-byte Reload
+; CHECK-NEXT: vmov.32 q2[1], r1
+; CHECK-NEXT: vmov.u16 r1, q5[2]
+; CHECK-NEXT: vmov.32 q2[2], r1
+; CHECK-NEXT: vmov.u16 r1, q5[3]
+; CHECK-NEXT: vmov.32 q2[3], r1
+; CHECK-NEXT: vmov.u16 r1, q0[0]
+; CHECK-NEXT: vldrw.u32 q5, [sp, #16] @ 16-byte Reload
+; CHECK-NEXT: vpst
+; CHECK-NEXT: vandt q3, q2, q5
+; CHECK-NEXT: vmov.32 q2[0], r1
+; CHECK-NEXT: vmov.u16 r1, q0[1]
+; CHECK-NEXT: vmov.32 q2[1], r1
+; CHECK-NEXT: vmov.u16 r1, q0[2]
+; CHECK-NEXT: vmov.32 q2[2], r1
+; CHECK-NEXT: vmov.u16 r1, q0[3]
+; CHECK-NEXT: vmov.32 q2[3], r1
+; CHECK-NEXT: vmov.u16 r1, q1[0]
+; CHECK-NEXT: vmov.32 q0[0], r1
+; CHECK-NEXT: vmov.u16 r1, q1[1]
+; CHECK-NEXT: vmov.32 q0[1], r1
+; CHECK-NEXT: vmov.u16 r1, q1[2]
+; CHECK-NEXT: vmov.32 q0[2], r1
+; CHECK-NEXT: vmov.u16 r1, q1[3]
+; CHECK-NEXT: vmov.32 q0[3], r1
+; CHECK-NEXT: vmov q1, q3
+; CHECK-NEXT: vmovlb.u16 q0, q0
+; CHECK-NEXT: vpt.i32 ne, q2, zr
+; CHECK-NEXT: vaddt.i32 q1, q3, q0
+; CHECK-NEXT: vadd.i32 q0, q1, q4
+; CHECK-NEXT: vaddva.u32 r0, q0
+; CHECK-NEXT: add sp, #64
+; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <16 x i8> %b, zeroinitializer
+ %xx = zext <16 x i8> %x to <16 x i16>
+ %yy = zext <16 x i8> %y to <16 x i16>
+ %m = mul <16 x i16> %xx, %yy
+ %ma = zext <16 x i16> %m to <16 x i32>
+ %s = select <16 x i1> %c, <16 x i32> %ma, <16 x i32> zeroinitializer
+ %z = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %s)
+ %r = add i32 %z, %a
+ ret i32 %r
+}
+
+define arm_aapcs_vfpcc i32 @add_v16i8_v16i16_v16i32_acc_sext(<16 x i8> %x, <16 x i8> %y, <16 x i8> %b, i32 %a) {
+; CHECK-LABEL: add_v16i8_v16i16_v16i32_acc_sext:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT: .pad #32
+; CHECK-NEXT: sub sp, #32
+; CHECK-NEXT: vmov q3, q2
+; CHECK-NEXT: vmov q6, q0
+; CHECK-NEXT: vmov.i8 q0, #0x0
+; CHECK-NEXT: vcmp.i8 eq, q3, zr
+; CHECK-NEXT: vmov.i8 q5, #0xff
+; CHECK-NEXT: vmov q2, q1
+; CHECK-NEXT: vpsel q1, q5, q0
+; CHECK-NEXT: vstrw.32 q0, [sp] @ 16-byte Spill
+; CHECK-NEXT: vmov.u8 r1, q1[0]
+; CHECK-NEXT: vmov.16 q3[0], r1
+; CHECK-NEXT: vmov.u8 r1, q1[1]
+; CHECK-NEXT: vmov.16 q3[1], r1
+; CHECK-NEXT: vmov.u8 r1, q1[2]
+; CHECK-NEXT: vmov.16 q3[2], r1
+; CHECK-NEXT: vmov.u8 r1, q1[3]
+; CHECK-NEXT: vmov.16 q3[3], r1
+; CHECK-NEXT: vmov.u8 r1, q1[4]
+; CHECK-NEXT: vmov.16 q3[4], r1
+; CHECK-NEXT: vmov.u8 r1, q1[5]
+; CHECK-NEXT: vmov.16 q3[5], r1
+; CHECK-NEXT: vmov.u8 r1, q1[6]
+; CHECK-NEXT: vmov.16 q3[6], r1
+; CHECK-NEXT: vmov.u8 r1, q1[7]
+; CHECK-NEXT: vmov.16 q3[7], r1
+; CHECK-NEXT: vcmp.i16 ne, q3, zr
+; CHECK-NEXT: vpsel q3, q5, q0
+; CHECK-NEXT: vmov.u16 r1, q3[4]
+; CHECK-NEXT: vmov.32 q0[0], r1
+; CHECK-NEXT: vmov.u16 r1, q3[5]
+; CHECK-NEXT: vmov.32 q0[1], r1
+; CHECK-NEXT: vmov.u16 r1, q3[6]
+; CHECK-NEXT: vmov.32 q0[2], r1
+; CHECK-NEXT: vmov.u16 r1, q3[7]
+; CHECK-NEXT: vmov.32 q0[3], r1
+; CHECK-NEXT: vmov.u8 r1, q2[0]
+; CHECK-NEXT: vmov.16 q4[0], r1
+; CHECK-NEXT: vmov.u8 r1, q2[1]
+; CHECK-NEXT: vmov.16 q4[1], r1
+; CHECK-NEXT: vmov.u8 r1, q2[2]
+; CHECK-NEXT: vmov.16 q4[2], r1
+; CHECK-NEXT: vmov.u8 r1, q2[3]
+; CHECK-NEXT: vmov.16 q4[3], r1
+; CHECK-NEXT: vmov.u8 r1, q2[4]
+; CHECK-NEXT: vmov.16 q4[4], r1
+; CHECK-NEXT: vmov.u8 r1, q2[5]
+; CHECK-NEXT: vmov.16 q4[5], r1
+; CHECK-NEXT: vmov.u8 r1, q2[6]
+; CHECK-NEXT: vmov.16 q4[6], r1
+; CHECK-NEXT: vmov.u8 r1, q2[7]
+; CHECK-NEXT: vmov.16 q4[7], r1
+; CHECK-NEXT: vmov.u8 r1, q6[0]
+; CHECK-NEXT: vmov.16 q7[0], r1
+; CHECK-NEXT: vmov.u8 r1, q6[1]
+; CHECK-NEXT: vmov.16 q7[1], r1
+; CHECK-NEXT: vmov.u8 r1, q6[2]
+; CHECK-NEXT: vmov.16 q7[2], r1
+; CHECK-NEXT: vmov.u8 r1, q6[3]
+; CHECK-NEXT: vmov.16 q7[3], r1
+; CHECK-NEXT: vmov.u8 r1, q6[4]
+; CHECK-NEXT: vmov.16 q7[4], r1
+; CHECK-NEXT: vmov.u8 r1, q6[5]
+; CHECK-NEXT: vmov.16 q7[5], r1
+; CHECK-NEXT: vmov.u8 r1, q6[6]
+; CHECK-NEXT: vmov.16 q7[6], r1
+; CHECK-NEXT: vmov.u8 r1, q6[7]
+; CHECK-NEXT: vmov.16 q7[7], r1
+; CHECK-NEXT: vcmp.i32 ne, q0, zr
+; CHECK-NEXT: vmullb.s8 q4, q7, q4
+; CHECK-NEXT: vmov.u16 r1, q4[4]
+; CHECK-NEXT: vmov.32 q0[0], r1
+; CHECK-NEXT: vmov.u16 r1, q4[5]
+; CHECK-NEXT: vmov.32 q0[1], r1
+; CHECK-NEXT: vmov.u16 r1, q4[6]
+; CHECK-NEXT: vmov.32 q0[2], r1
+; CHECK-NEXT: vmov.u16 r1, q4[7]
+; CHECK-NEXT: vmov.32 q0[3], r1
+; CHECK-NEXT: vmov.u8 r1, q1[8]
+; CHECK-NEXT: vmovlb.s16 q7, q0
+; CHECK-NEXT: vmov.i32 q0, #0x0
+; CHECK-NEXT: vstrw.32 q0, [sp, #16] @ 16-byte Spill
+; CHECK-NEXT: vpsel q7, q7, q0
+; CHECK-NEXT: vmov.16 q0[0], r1
+; CHECK-NEXT: vmov.u8 r1, q1[9]
+; CHECK-NEXT: vmov.16 q0[1], r1
+; CHECK-NEXT: vmov.u8 r1, q1[10]
+; CHECK-NEXT: vmov.16 q0[2], r1
+; CHECK-NEXT: vmov.u8 r1, q1[11]
+; CHECK-NEXT: vmov.16 q0[3], r1
+; CHECK-NEXT: vmov.u8 r1, q1[12]
+; CHECK-NEXT: vmov.16 q0[4], r1
+; CHECK-NEXT: vmov.u8 r1, q1[13]
+; CHECK-NEXT: vmov.16 q0[5], r1
+; CHECK-NEXT: vmov.u8 r1, q1[14]
+; CHECK-NEXT: vmov.16 q0[6], r1
+; CHECK-NEXT: vmov.u8 r1, q1[15]
+; CHECK-NEXT: vmov.16 q0[7], r1
+; CHECK-NEXT: vcmp.i16 ne, q0, zr
+; CHECK-NEXT: vldrw.u32 q0, [sp] @ 16-byte Reload
+; CHECK-NEXT: vpsel q0, q5, q0
+; CHECK-NEXT: vmov.u16 r1, q0[4]
+; CHECK-NEXT: vmov.32 q1[0], r1
+; CHECK-NEXT: vmov.u16 r1, q0[5]
+; CHECK-NEXT: vmov.32 q1[1], r1
+; CHECK-NEXT: vmov.u16 r1, q0[6]
+; CHECK-NEXT: vmov.32 q1[2], r1
+; CHECK-NEXT: vmov.u16 r1, q0[7]
+; CHECK-NEXT: vmov.32 q1[3], r1
+; CHECK-NEXT: vmov.u8 r1, q2[8]
+; CHECK-NEXT: vmov.16 q5[0], r1
+; CHECK-NEXT: vmov.u8 r1, q2[9]
+; CHECK-NEXT: vmov.16 q5[1], r1
+; CHECK-NEXT: vmov.u8 r1, q2[10]
+; CHECK-NEXT: vmov.16 q5[2], r1
+; CHECK-NEXT: vmov.u8 r1, q2[11]
+; CHECK-NEXT: vmov.16 q5[3], r1
+; CHECK-NEXT: vmov.u8 r1, q2[12]
+; CHECK-NEXT: vmov.16 q5[4], r1
+; CHECK-NEXT: vmov.u8 r1, q2[13]
+; CHECK-NEXT: vmov.16 q5[5], r1
+; CHECK-NEXT: vmov.u8 r1, q2[14]
+; CHECK-NEXT: vmov.16 q5[6], r1
+; CHECK-NEXT: vmov.u8 r1, q2[15]
+; CHECK-NEXT: vmov.16 q5[7], r1
+; CHECK-NEXT: vmov.u8 r1, q6[8]
+; CHECK-NEXT: vcmp.i32 ne, q1, zr
+; CHECK-NEXT: vmov.16 q1[0], r1
+; CHECK-NEXT: vmov.u8 r1, q6[9]
+; CHECK-NEXT: vmov.16 q1[1], r1
+; CHECK-NEXT: vmov.u8 r1, q6[10]
+; CHECK-NEXT: vmov.16 q1[2], r1
+; CHECK-NEXT: vmov.u8 r1, q6[11]
+; CHECK-NEXT: vmov.16 q1[3], r1
+; CHECK-NEXT: vmov.u8 r1, q6[12]
+; CHECK-NEXT: vmov.16 q1[4], r1
+; CHECK-NEXT: vmov.u8 r1, q6[13]
+; CHECK-NEXT: vmov.16 q1[5], r1
+; CHECK-NEXT: vmov.u8 r1, q6[14]
+; CHECK-NEXT: vmov.16 q1[6], r1
+; CHECK-NEXT: vmov.u8 r1, q6[15]
+; CHECK-NEXT: vmov.16 q1[7], r1
+; CHECK-NEXT: vmullb.s8 q1, q1, q5
+; CHECK-NEXT: vmov.u16 r1, q1[4]
+; CHECK-NEXT: vmov.32 q2[0], r1
+; CHECK-NEXT: vmov.u16 r1, q1[5]
+; CHECK-NEXT: vmov.32 q2[1], r1
+; CHECK-NEXT: vmov.u16 r1, q1[6]
+; CHECK-NEXT: vmov.32 q2[2], r1
+; CHECK-NEXT: vmov.u16 r1, q1[7]
+; CHECK-NEXT: vmov.32 q2[3], r1
+; CHECK-NEXT: vmov.u16 r1, q3[0]
+; CHECK-NEXT: vmovlb.s16 q2, q2
+; CHECK-NEXT: vpst
+; CHECK-NEXT: vaddt.i32 q7, q7, q2
+; CHECK-NEXT: vmov.32 q2[0], r1
+; CHECK-NEXT: vmov.u16 r1, q3[1]
+; CHECK-NEXT: vmov.32 q2[1], r1
+; CHECK-NEXT: vmov.u16 r1, q3[2]
+; CHECK-NEXT: vmov.32 q2[2], r1
+; CHECK-NEXT: vmov.u16 r1, q3[3]
+; CHECK-NEXT: vmov.32 q2[3], r1
+; CHECK-NEXT: vmov.u16 r1, q4[0]
+; CHECK-NEXT: vcmp.i32 ne, q2, zr
+; CHECK-NEXT: vmov.32 q2[0], r1
+; CHECK-NEXT: vmov.u16 r1, q4[1]
+; CHECK-NEXT: vldrw.u32 q3, [sp, #16] @ 16-byte Reload
+; CHECK-NEXT: vmov.32 q2[1], r1
+; CHECK-NEXT: vmov.u16 r1, q4[2]
+; CHECK-NEXT: vmov.32 q2[2], r1
+; CHECK-NEXT: vmov.u16 r1, q4[3]
+; CHECK-NEXT: vmov.32 q2[3], r1
+; CHECK-NEXT: vmov.u16 r1, q0[0]
+; CHECK-NEXT: vmovlb.s16 q2, q2
+; CHECK-NEXT: vpsel q2, q2, q3
+; CHECK-NEXT: vmov.32 q3[0], r1
+; CHECK-NEXT: vmov.u16 r1, q0[1]
+; CHECK-NEXT: vmov.32 q3[1], r1
+; CHECK-NEXT: vmov.u16 r1, q0[2]
+; CHECK-NEXT: vmov.32 q3[2], r1
+; CHECK-NEXT: vmov.u16 r1, q0[3]
+; CHECK-NEXT: vmov.32 q3[3], r1
+; CHECK-NEXT: vmov.u16 r1, q1[0]
+; CHECK-NEXT: vmov.32 q0[0], r1
+; CHECK-NEXT: vmov.u16 r1, q1[1]
+; CHECK-NEXT: vmov.32 q0[1], r1
+; CHECK-NEXT: vmov.u16 r1, q1[2]
+; CHECK-NEXT: vmov.32 q0[2], r1
+; CHECK-NEXT: vmov.u16 r1, q1[3]
+; CHECK-NEXT: vmov.32 q0[3], r1
+; CHECK-NEXT: vmovlb.s16 q0, q0
+; CHECK-NEXT: vpt.i32 ne, q3, zr
+; CHECK-NEXT: vaddt.i32 q2, q2, q0
+; CHECK-NEXT: vadd.i32 q0, q2, q7
+; CHECK-NEXT: vaddva.u32 r0, q0
+; CHECK-NEXT: add sp, #32
+; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <16 x i8> %b, zeroinitializer
+ %xx = sext <16 x i8> %x to <16 x i16>
+ %yy = sext <16 x i8> %y to <16 x i16>
+ %m = mul <16 x i16> %xx, %yy
+ %ma = sext <16 x i16> %m to <16 x i32>
+ %s = select <16 x i1> %c, <16 x i32> %ma, <16 x i32> zeroinitializer
+ %z = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %s)
+ %r = add i32 %z, %a
+ ret i32 %r
+}
+
+define arm_aapcs_vfpcc i32 @add_v16i8_v16i16_v16i32_acc_sextzext(<16 x i8> %x, <16 x i8> %y, <16 x i8> %b, i32 %a) {
+; CHECK-LABEL: add_v16i8_v16i16_v16i32_acc_sextzext:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT: .pad #32
+; CHECK-NEXT: sub sp, #32
+; CHECK-NEXT: vmov q4, q0
+; CHECK-NEXT: vcmp.i8 eq, q2, zr
+; CHECK-NEXT: vmov.i8 q2, #0xff
+; CHECK-NEXT: vmov.i8 q0, #0x0
+; CHECK-NEXT: vpsel q1, q2, q0
+; CHECK-NEXT: vmov q3, q2
+; CHECK-NEXT: vmov.u8 r1, q1[0]
+; CHECK-NEXT: vstrw.32 q2, [sp] @ 16-byte Spill
+; CHECK-NEXT: vmov.16 q2[0], r1
+; CHECK-NEXT: vmov.u8 r1, q1[1]
+; CHECK-NEXT: vmov.16 q2[1], r1
+; CHECK-NEXT: vmov.u8 r1, q1[2]
+; CHECK-NEXT: vmov.16 q2[2], r1
+; CHECK-NEXT: vmov.u8 r1, q1[3]
+; CHECK-NEXT: vmov.16 q2[3], r1
+; CHECK-NEXT: vmov.u8 r1, q1[4]
+; CHECK-NEXT: vmov.16 q2[4], r1
+; CHECK-NEXT: vmov.u8 r1, q1[5]
+; CHECK-NEXT: vmov.16 q2[5], r1
+; CHECK-NEXT: vmov.u8 r1, q1[6]
+; CHECK-NEXT: vmov.16 q2[6], r1
+; CHECK-NEXT: vmov.u8 r1, q1[7]
+; CHECK-NEXT: vmov.16 q2[7], r1
+; CHECK-NEXT: vstrw.32 q0, [sp, #16] @ 16-byte Spill
+; CHECK-NEXT: vcmp.i16 ne, q2, zr
+; CHECK-NEXT: vmov.i32 q6, #0x0
+; CHECK-NEXT: vpsel q5, q3, q0
+; CHECK-NEXT: vmov q7, q6
+; CHECK-NEXT: vmov.u16 r1, q5[4]
+; CHECK-NEXT: vmov.i32 q2, #0xffff
+; CHECK-NEXT: vmov.32 q0[0], r1
+; CHECK-NEXT: vmov.u16 r1, q5[5]
+; CHECK-NEXT: vmov.32 q0[1], r1
+; CHECK-NEXT: vmov.u16 r1, q5[6]
+; CHECK-NEXT: vmov.32 q0[2], r1
+; CHECK-NEXT: vmov.u16 r1, q5[7]
+; CHECK-NEXT: vmov.32 q0[3], r1
+; CHECK-NEXT: vmov.u8 r1, q4[0]
+; CHECK-NEXT: vmov.16 q3[0], r1
+; CHECK-NEXT: vmov.u8 r1, q4[1]
+; CHECK-NEXT: vmov.16 q3[1], r1
+; CHECK-NEXT: vmov.u8 r1, q4[2]
+; CHECK-NEXT: vmov.16 q3[2], r1
+; CHECK-NEXT: vmov.u8 r1, q4[3]
+; CHECK-NEXT: vmov.16 q3[3], r1
+; CHECK-NEXT: vmov.u8 r1, q4[4]
+; CHECK-NEXT: vmov.16 q3[4], r1
+; CHECK-NEXT: vmov.u8 r1, q4[5]
+; CHECK-NEXT: vmov.16 q3[5], r1
+; CHECK-NEXT: vmov.u8 r1, q4[6]
+; CHECK-NEXT: vmov.16 q3[6], r1
+; CHECK-NEXT: vmov.u8 r1, q4[7]
+; CHECK-NEXT: vmov.16 q3[7], r1
+; CHECK-NEXT: vcmp.i32 ne, q0, zr
+; CHECK-NEXT: vmullb.s8 q3, q3, q3
+; CHECK-NEXT: vmov.u16 r1, q3[4]
+; CHECK-NEXT: vmov.32 q0[0], r1
+; CHECK-NEXT: vmov.u16 r1, q3[5]
+; CHECK-NEXT: vmov.32 q0[1], r1
+; CHECK-NEXT: vmov.u16 r1, q3[6]
+; CHECK-NEXT: vmov.32 q0[2], r1
+; CHECK-NEXT: vmov.u16 r1, q3[7]
+; CHECK-NEXT: vmov.32 q0[3], r1
+; CHECK-NEXT: vmov.u8 r1, q1[8]
+; CHECK-NEXT: vpst
+; CHECK-NEXT: vandt q7, q0, q2
+; CHECK-NEXT: vmov.16 q0[0], r1
+; CHECK-NEXT: vmov.u8 r1, q1[9]
+; CHECK-NEXT: vmov.16 q0[1], r1
+; CHECK-NEXT: vmov.u8 r1, q1[10]
+; CHECK-NEXT: vmov.16 q0[2], r1
+; CHECK-NEXT: vmov.u8 r1, q1[11]
+; CHECK-NEXT: vmov.16 q0[3], r1
+; CHECK-NEXT: vmov.u8 r1, q1[12]
+; CHECK-NEXT: vmov.16 q0[4], r1
+; CHECK-NEXT: vmov.u8 r1, q1[13]
+; CHECK-NEXT: vmov.16 q0[5], r1
+; CHECK-NEXT: vmov.u8 r1, q1[14]
+; CHECK-NEXT: vmov.16 q0[6], r1
+; CHECK-NEXT: vmov.u8 r1, q1[15]
+; CHECK-NEXT: vmov.16 q0[7], r1
+; CHECK-NEXT: vldrw.u32 q1, [sp] @ 16-byte Reload
+; CHECK-NEXT: vcmp.i16 ne, q0, zr
+; CHECK-NEXT: vldrw.u32 q0, [sp, #16] @ 16-byte Reload
+; CHECK-NEXT: vpsel q0, q1, q0
+; CHECK-NEXT: vmov.u16 r1, q0[4]
+; CHECK-NEXT: vmov.32 q1[0], r1
+; CHECK-NEXT: vmov.u16 r1, q0[5]
+; CHECK-NEXT: vmov.32 q1[1], r1
+; CHECK-NEXT: vmov.u16 r1, q0[6]
+; CHECK-NEXT: vmov.32 q1[2], r1
+; CHECK-NEXT: vmov.u16 r1, q0[7]
+; CHECK-NEXT: vmov.32 q1[3], r1
+; CHECK-NEXT: vmov.u8 r1, q4[8]
+; CHECK-NEXT: vcmp.i32 ne, q1, zr
+; CHECK-NEXT: vmov.16 q1[0], r1
+; CHECK-NEXT: vmov.u8 r1, q4[9]
+; CHECK-NEXT: vmov.16 q1[1], r1
+; CHECK-NEXT: vmov.u8 r1, q4[10]
+; CHECK-NEXT: vmov.16 q1[2], r1
+; CHECK-NEXT: vmov.u8 r1, q4[11]
+; CHECK-NEXT: vmov.16 q1[3], r1
+; CHECK-NEXT: vmov.u8 r1, q4[12]
+; CHECK-NEXT: vmov.16 q1[4], r1
+; CHECK-NEXT: vmov.u8 r1, q4[13]
+; CHECK-NEXT: vmov.16 q1[5], r1
+; CHECK-NEXT: vmov.u8 r1, q4[14]
+; CHECK-NEXT: vmov.16 q1[6], r1
+; CHECK-NEXT: vmov.u8 r1, q4[15]
+; CHECK-NEXT: vmov.16 q1[7], r1
+; CHECK-NEXT: vmullb.s8 q1, q1, q1
+; CHECK-NEXT: vmov.u16 r1, q1[4]
+; CHECK-NEXT: vmov.32 q4[0], r1
+; CHECK-NEXT: vmov.u16 r1, q1[5]
+; CHECK-NEXT: vmov.32 q4[1], r1
+; CHECK-NEXT: vmov.u16 r1, q1[6]
+; CHECK-NEXT: vmov.32 q4[2], r1
+; CHECK-NEXT: vmov.u16 r1, q1[7]
+; CHECK-NEXT: vmov.32 q4[3], r1
+; CHECK-NEXT: vmov.u16 r1, q5[0]
+; CHECK-NEXT: vmovlb.u16 q4, q4
+; CHECK-NEXT: vpst
+; CHECK-NEXT: vaddt.i32 q7, q7, q4
+; CHECK-NEXT: vmov.32 q4[0], r1
+; CHECK-NEXT: vmov.u16 r1, q5[1]
+; CHECK-NEXT: vmov.32 q4[1], r1
+; CHECK-NEXT: vmov.u16 r1, q5[2]
+; CHECK-NEXT: vmov.32 q4[2], r1
+; CHECK-NEXT: vmov.u16 r1, q5[3]
+; CHECK-NEXT: vmov.32 q4[3], r1
+; CHECK-NEXT: vmov.u16 r1, q3[0]
+; CHECK-NEXT: vcmp.i32 ne, q4, zr
+; CHECK-NEXT: vmov.32 q4[0], r1
+; CHECK-NEXT: vmov.u16 r1, q3[1]
+; CHECK-NEXT: vmov.32 q4[1], r1
+; CHECK-NEXT: vmov.u16 r1, q3[2]
+; CHECK-NEXT: vmov.32 q4[2], r1
+; CHECK-NEXT: vmov.u16 r1, q3[3]
+; CHECK-NEXT: vmov.32 q4[3], r1
+; CHECK-NEXT: vmov.u16 r1, q0[0]
+; CHECK-NEXT: vpst
+; CHECK-NEXT: vandt q6, q4, q2
+; CHECK-NEXT: vmov.32 q2[0], r1
+; CHECK-NEXT: vmov.u16 r1, q0[1]
+; CHECK-NEXT: vmov.32 q2[1], r1
+; CHECK-NEXT: vmov.u16 r1, q0[2]
+; CHECK-NEXT: vmov.32 q2[2], r1
+; CHECK-NEXT: vmov.u16 r1, q0[3]
+; CHECK-NEXT: vmov.32 q2[3], r1
+; CHECK-NEXT: vmov.u16 r1, q1[0]
+; CHECK-NEXT: vmov.32 q0[0], r1
+; CHECK-NEXT: vmov.u16 r1, q1[1]
+; CHECK-NEXT: vmov.32 q0[1], r1
+; CHECK-NEXT: vmov.u16 r1, q1[2]
+; CHECK-NEXT: vmov.32 q0[2], r1
+; CHECK-NEXT: vmov.u16 r1, q1[3]
+; CHECK-NEXT: vmov.32 q0[3], r1
+; CHECK-NEXT: vmovlb.u16 q0, q0
+; CHECK-NEXT: vpt.i32 ne, q2, zr
+; CHECK-NEXT: vaddt.i32 q6, q6, q0
+; CHECK-NEXT: vadd.i32 q0, q6, q7
+; CHECK-NEXT: vaddva.u32 r0, q0
+; CHECK-NEXT: add sp, #32
+; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT: bx lr
+entry:
+ %c = icmp eq <16 x i8> %b, zeroinitializer
+ %xx = sext <16 x i8> %x to <16 x i16>
+ %m = mul <16 x i16> %xx, %xx
+ %ma = zext <16 x i16> %m to <16 x i32>
+ %s = select <16 x i1> %c, <16 x i32> %ma, <16 x i32> zeroinitializer
+ %z = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %s)
+ %r = add i32 %z, %a
+ ret i32 %r
+}
+
define arm_aapcs_vfpcc i32 @add_v4i8_v4i32_acc_zext(<4 x i8> %x, <4 x i8> %y, <4 x i8> %b, i32 %a) {
; CHECK-LABEL: add_v4i8_v4i32_acc_zext:
; CHECK: @ %bb.0: @ %entry
More information about the llvm-commits
mailing list