[llvm] 5bb4954 - [ARM] Extra vecreduce add with accumulate tests. NFC
David Green via llvm-commits
llvm-commits at lists.llvm.org
Wed Feb 12 10:00:53 PST 2020
Author: David Green
Date: 2020-02-12T17:59:53Z
New Revision: 5bb4954008a6354a54ae225ba37f2788109f218a
URL: https://github.com/llvm/llvm-project/commit/5bb4954008a6354a54ae225ba37f2788109f218a
DIFF: https://github.com/llvm/llvm-project/commit/5bb4954008a6354a54ae225ba37f2788109f218a.diff
LOG: [ARM] Extra vecreduce add with accumulate tests. NFC
Added:
Modified:
llvm/test/CodeGen/Thumb2/mve-vecreduce-add.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/Thumb2/mve-vecreduce-add.ll b/llvm/test/CodeGen/Thumb2/mve-vecreduce-add.ll
index 85f1c96a48cb..65aec16793b9 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vecreduce-add.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vecreduce-add.ll
@@ -211,10 +211,11 @@ entry:
ret i32 %z
}
-define arm_aapcs_vfpcc i16 @add_v8i16_v8i16(<8 x i16> %x) {
+define arm_aapcs_vfpcc zeroext i16 @add_v8i16_v8i16(<8 x i16> %x) {
; CHECK-LABEL: add_v8i16_v8i16:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vaddv.u16 r0, q0
+; CHECK-NEXT: uxth r0, r0
; CHECK-NEXT: bx lr
entry:
%z = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %x)
@@ -527,7 +528,7 @@ entry:
ret i32 %z
}
-define arm_aapcs_vfpcc i16 @add_v16i8_v16i16_zext(<16 x i8> %x) {
+define arm_aapcs_vfpcc zeroext i16 @add_v16i8_v16i16_zext(<16 x i8> %x) {
; CHECK-LABEL: add_v16i8_v16i16_zext:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmov.u8 r0, q0[8]
@@ -566,6 +567,7 @@ define arm_aapcs_vfpcc i16 @add_v16i8_v16i16_zext(<16 x i8> %x) {
; CHECK-NEXT: vmovlb.u8 q0, q2
; CHECK-NEXT: vadd.i16 q0, q0, q1
; CHECK-NEXT: vaddv.u16 r0, q0
+; CHECK-NEXT: uxth r0, r0
; CHECK-NEXT: bx lr
entry:
%xx = zext <16 x i8> %x to <16 x i16>
@@ -573,7 +575,7 @@ entry:
ret i16 %z
}
-define arm_aapcs_vfpcc i16 @add_v16i8_v16i16_sext(<16 x i8> %x) {
+define arm_aapcs_vfpcc signext i16 @add_v16i8_v16i16_sext(<16 x i8> %x) {
; CHECK-LABEL: add_v16i8_v16i16_sext:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmov.u8 r0, q0[8]
@@ -612,6 +614,7 @@ define arm_aapcs_vfpcc i16 @add_v16i8_v16i16_sext(<16 x i8> %x) {
; CHECK-NEXT: vmovlb.s8 q0, q2
; CHECK-NEXT: vadd.i16 q0, q0, q1
; CHECK-NEXT: vaddv.u16 r0, q0
+; CHECK-NEXT: sxth r0, r0
; CHECK-NEXT: bx lr
entry:
%xx = sext <16 x i8> %x to <16 x i16>
@@ -619,11 +622,12 @@ entry:
ret i16 %z
}
-define arm_aapcs_vfpcc i16 @add_v8i8_v8i16_zext(<8 x i8> %x) {
+define arm_aapcs_vfpcc zeroext i16 @add_v8i8_v8i16_zext(<8 x i8> %x) {
; CHECK-LABEL: add_v8i8_v8i16_zext:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmovlb.u8 q0, q0
; CHECK-NEXT: vaddv.u16 r0, q0
+; CHECK-NEXT: uxth r0, r0
; CHECK-NEXT: bx lr
entry:
%xx = zext <8 x i8> %x to <8 x i16>
@@ -631,11 +635,12 @@ entry:
ret i16 %z
}
-define arm_aapcs_vfpcc i16 @add_v8i8_v8i16_sext(<8 x i8> %x) {
+define arm_aapcs_vfpcc signext i16 @add_v8i8_v8i16_sext(<8 x i8> %x) {
; CHECK-LABEL: add_v8i8_v8i16_sext:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmovlb.s8 q0, q0
; CHECK-NEXT: vaddv.u16 r0, q0
+; CHECK-NEXT: sxth r0, r0
; CHECK-NEXT: bx lr
entry:
%xx = sext <8 x i8> %x to <8 x i16>
@@ -643,10 +648,11 @@ entry:
ret i16 %z
}
-define arm_aapcs_vfpcc i8 @add_v16i8_v16i8(<16 x i8> %x) {
+define arm_aapcs_vfpcc zeroext i8 @add_v16i8_v16i8(<16 x i8> %x) {
; CHECK-LABEL: add_v16i8_v16i8:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vaddv.u8 r0, q0
+; CHECK-NEXT: uxtb r0, r0
; CHECK-NEXT: bx lr
entry:
%z = call i8 @llvm.experimental.vector.reduce.add.v16i8(<16 x i8> %x)
@@ -960,6 +966,1041 @@ entry:
ret i64 %z
}
+define arm_aapcs_vfpcc i32 @add_v4i32_v4i32_acc(<4 x i32> %x, i32 %a) {
+; CHECK-LABEL: add_v4i32_v4i32_acc:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vaddva.u32 r0, q0
+; CHECK-NEXT: bx lr
+entry:
+ %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %x)
+ %r = add i32 %z, %a
+ ret i32 %r
+}
+
+define arm_aapcs_vfpcc i64 @add_v4i32_v4i64_acc_zext(<4 x i32> %x, i64 %a) {
+; CHECK-LABEL: add_v4i32_v4i64_acc_zext:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .save {r4, lr}
+; CHECK-NEXT: push {r4, lr}
+; CHECK-NEXT: adr r2, .LCPI29_0
+; CHECK-NEXT: vmov.f32 s4, s0
+; CHECK-NEXT: vldrw.u32 q2, [r2]
+; CHECK-NEXT: vmov.f32 s6, s1
+; CHECK-NEXT: vand q1, q1, q2
+; CHECK-NEXT: vmov r2, s6
+; CHECK-NEXT: vmov r3, s4
+; CHECK-NEXT: vmov r12, s7
+; CHECK-NEXT: vmov lr, s5
+; CHECK-NEXT: vmov.f32 s4, s2
+; CHECK-NEXT: vmov.f32 s6, s3
+; CHECK-NEXT: vand q0, q1, q2
+; CHECK-NEXT: adds r4, r3, r2
+; CHECK-NEXT: vmov r3, s0
+; CHECK-NEXT: vmov r2, s1
+; CHECK-NEXT: adc.w r12, r12, lr
+; CHECK-NEXT: adds r3, r3, r4
+; CHECK-NEXT: vmov r4, s3
+; CHECK-NEXT: adc.w r12, r12, r2
+; CHECK-NEXT: vmov r2, s2
+; CHECK-NEXT: adds r2, r2, r3
+; CHECK-NEXT: adc.w r3, r12, r4
+; CHECK-NEXT: adds r0, r0, r2
+; CHECK-NEXT: adcs r1, r3
+; CHECK-NEXT: pop {r4, pc}
+; CHECK-NEXT: .p2align 4
+; CHECK-NEXT: @ %bb.1:
+; CHECK-NEXT: .LCPI29_0:
+; CHECK-NEXT: .long 4294967295 @ 0xffffffff
+; CHECK-NEXT: .long 0 @ 0x0
+; CHECK-NEXT: .long 4294967295 @ 0xffffffff
+; CHECK-NEXT: .long 0 @ 0x0
+entry:
+ %xx = zext <4 x i32> %x to <4 x i64>
+ %z = call i64 @llvm.experimental.vector.reduce.add.v4i64(<4 x i64> %xx)
+ %r = add i64 %z, %a
+ ret i64 %r
+}
+
+define arm_aapcs_vfpcc i64 @add_v4i32_v4i64_acc_sext(<4 x i32> %x, i64 %a) {
+; CHECK-LABEL: add_v4i32_v4i64_acc_sext:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .save {r7, lr}
+; CHECK-NEXT: push {r7, lr}
+; CHECK-NEXT: vmov.f32 s4, s0
+; CHECK-NEXT: vmov.f32 s6, s1
+; CHECK-NEXT: vmov r2, s4
+; CHECK-NEXT: vmov.32 q2[0], r2
+; CHECK-NEXT: asrs r2, r2, #31
+; CHECK-NEXT: vmov.32 q2[1], r2
+; CHECK-NEXT: vmov r2, s6
+; CHECK-NEXT: vmov.32 q2[2], r2
+; CHECK-NEXT: vmov.f32 s4, s2
+; CHECK-NEXT: vmov.f32 s6, s3
+; CHECK-NEXT: asrs r3, r2, #31
+; CHECK-NEXT: vmov.32 q2[3], r3
+; CHECK-NEXT: vmov lr, s10
+; CHECK-NEXT: vmov r3, s8
+; CHECK-NEXT: vmov r12, s9
+; CHECK-NEXT: adds.w r3, r3, lr
+; CHECK-NEXT: adc.w r12, r12, r2, asr #31
+; CHECK-NEXT: vmov r2, s4
+; CHECK-NEXT: adds r3, r3, r2
+; CHECK-NEXT: adc.w r12, r12, r2, asr #31
+; CHECK-NEXT: vmov r2, s6
+; CHECK-NEXT: adds r3, r3, r2
+; CHECK-NEXT: adc.w r2, r12, r2, asr #31
+; CHECK-NEXT: adds r0, r0, r3
+; CHECK-NEXT: adcs r1, r2
+; CHECK-NEXT: pop {r7, pc}
+entry:
+ %xx = sext <4 x i32> %x to <4 x i64>
+ %z = call i64 @llvm.experimental.vector.reduce.add.v4i64(<4 x i64> %xx)
+ %r = add i64 %z, %a
+ ret i64 %r
+}
+
+define arm_aapcs_vfpcc i64 @add_v2i32_v2i64_acc_zext(<2 x i32> %x, i64 %a) {
+; CHECK-LABEL: add_v2i32_v2i64_acc_zext:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .save {r7, lr}
+; CHECK-NEXT: push {r7, lr}
+; CHECK-NEXT: adr r2, .LCPI31_0
+; CHECK-NEXT: vldrw.u32 q1, [r2]
+; CHECK-NEXT: vand q0, q0, q1
+; CHECK-NEXT: vmov r2, s2
+; CHECK-NEXT: vmov r3, s0
+; CHECK-NEXT: vmov r12, s3
+; CHECK-NEXT: vmov lr, s1
+; CHECK-NEXT: adds r2, r2, r3
+; CHECK-NEXT: adc.w r3, lr, r12
+; CHECK-NEXT: adds r0, r0, r2
+; CHECK-NEXT: adcs r1, r3
+; CHECK-NEXT: pop {r7, pc}
+; CHECK-NEXT: .p2align 4
+; CHECK-NEXT: @ %bb.1:
+; CHECK-NEXT: .LCPI31_0:
+; CHECK-NEXT: .long 4294967295 @ 0xffffffff
+; CHECK-NEXT: .long 0 @ 0x0
+; CHECK-NEXT: .long 4294967295 @ 0xffffffff
+; CHECK-NEXT: .long 0 @ 0x0
+entry:
+ %xx = zext <2 x i32> %x to <2 x i64>
+ %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %xx)
+ %r = add i64 %z, %a
+ ret i64 %r
+}
+
+define arm_aapcs_vfpcc i64 @add_v2i32_v2i64_acc_sext(<2 x i32> %x, i64 %a) {
+; CHECK-LABEL: add_v2i32_v2i64_acc_sext:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov r2, s0
+; CHECK-NEXT: vmov.32 q1[0], r2
+; CHECK-NEXT: vmov r3, s2
+; CHECK-NEXT: asrs r2, r2, #31
+; CHECK-NEXT: vmov.32 q1[1], r2
+; CHECK-NEXT: vmov r12, s4
+; CHECK-NEXT: adds.w r12, r12, r3
+; CHECK-NEXT: adc.w r2, r2, r3, asr #31
+; CHECK-NEXT: adds.w r0, r0, r12
+; CHECK-NEXT: adcs r1, r2
+; CHECK-NEXT: bx lr
+entry:
+ %xx = sext <2 x i32> %x to <2 x i64>
+ %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %xx)
+ %r = add i64 %z, %a
+ ret i64 %r
+}
+
+define arm_aapcs_vfpcc i32 @add_v8i16_v8i32_acc_zext(<8 x i16> %x, i32 %a) {
+; CHECK-LABEL: add_v8i16_v8i32_acc_zext:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.u16 r1, q0[4]
+; CHECK-NEXT: vmov.32 q1[0], r1
+; CHECK-NEXT: vmov.u16 r1, q0[5]
+; CHECK-NEXT: vmov.32 q1[1], r1
+; CHECK-NEXT: vmov.u16 r1, q0[6]
+; CHECK-NEXT: vmov.32 q1[2], r1
+; CHECK-NEXT: vmov.u16 r1, q0[7]
+; CHECK-NEXT: vmov.32 q1[3], r1
+; CHECK-NEXT: vmov.u16 r1, q0[0]
+; CHECK-NEXT: vmov.32 q2[0], r1
+; CHECK-NEXT: vmov.u16 r1, q0[1]
+; CHECK-NEXT: vmov.32 q2[1], r1
+; CHECK-NEXT: vmov.u16 r1, q0[2]
+; CHECK-NEXT: vmov.32 q2[2], r1
+; CHECK-NEXT: vmov.u16 r1, q0[3]
+; CHECK-NEXT: vmov.32 q2[3], r1
+; CHECK-NEXT: vmovlb.u16 q1, q1
+; CHECK-NEXT: vmovlb.u16 q0, q2
+; CHECK-NEXT: vadd.i32 q0, q0, q1
+; CHECK-NEXT: vaddva.u32 r0, q0
+; CHECK-NEXT: bx lr
+entry:
+ %xx = zext <8 x i16> %x to <8 x i32>
+ %z = call i32 @llvm.experimental.vector.reduce.add.v8i32(<8 x i32> %xx)
+ %r = add i32 %z, %a
+ ret i32 %r
+}
+
+define arm_aapcs_vfpcc i32 @add_v8i16_v8i32_acc_sext(<8 x i16> %x, i32 %a) {
+; CHECK-LABEL: add_v8i16_v8i32_acc_sext:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.u16 r1, q0[4]
+; CHECK-NEXT: vmov.32 q1[0], r1
+; CHECK-NEXT: vmov.u16 r1, q0[5]
+; CHECK-NEXT: vmov.32 q1[1], r1
+; CHECK-NEXT: vmov.u16 r1, q0[6]
+; CHECK-NEXT: vmov.32 q1[2], r1
+; CHECK-NEXT: vmov.u16 r1, q0[7]
+; CHECK-NEXT: vmov.32 q1[3], r1
+; CHECK-NEXT: vmov.u16 r1, q0[0]
+; CHECK-NEXT: vmov.32 q2[0], r1
+; CHECK-NEXT: vmov.u16 r1, q0[1]
+; CHECK-NEXT: vmov.32 q2[1], r1
+; CHECK-NEXT: vmov.u16 r1, q0[2]
+; CHECK-NEXT: vmov.32 q2[2], r1
+; CHECK-NEXT: vmov.u16 r1, q0[3]
+; CHECK-NEXT: vmov.32 q2[3], r1
+; CHECK-NEXT: vmovlb.s16 q1, q1
+; CHECK-NEXT: vmovlb.s16 q0, q2
+; CHECK-NEXT: vadd.i32 q0, q0, q1
+; CHECK-NEXT: vaddva.u32 r0, q0
+; CHECK-NEXT: bx lr
+entry:
+ %xx = sext <8 x i16> %x to <8 x i32>
+ %z = call i32 @llvm.experimental.vector.reduce.add.v8i32(<8 x i32> %xx)
+ %r = add i32 %z, %a
+ ret i32 %r
+}
+
+define arm_aapcs_vfpcc i32 @add_v4i16_v4i32_acc_zext(<4 x i16> %x, i32 %a) {
+; CHECK-LABEL: add_v4i16_v4i32_acc_zext:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmovlb.u16 q0, q0
+; CHECK-NEXT: vaddva.u32 r0, q0
+; CHECK-NEXT: bx lr
+entry:
+ %xx = zext <4 x i16> %x to <4 x i32>
+ %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %xx)
+ %r = add i32 %z, %a
+ ret i32 %r
+}
+
+define arm_aapcs_vfpcc i32 @add_v4i16_v4i32_acc_sext(<4 x i16> %x, i32 %a) {
+; CHECK-LABEL: add_v4i16_v4i32_acc_sext:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmovlb.s16 q0, q0
+; CHECK-NEXT: vaddva.u32 r0, q0
+; CHECK-NEXT: bx lr
+entry:
+ %xx = sext <4 x i16> %x to <4 x i32>
+ %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %xx)
+ %r = add i32 %z, %a
+ ret i32 %r
+}
+
+define arm_aapcs_vfpcc zeroext i16 @add_v8i16_v8i16_acc(<8 x i16> %x, i16 %a) {
+; CHECK-LABEL: add_v8i16_v8i16_acc:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vaddva.u16 r0, q0
+; CHECK-NEXT: uxth r0, r0
+; CHECK-NEXT: bx lr
+entry:
+ %z = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %x)
+ %r = add i16 %z, %a
+ ret i16 %r
+}
+
+define arm_aapcs_vfpcc i64 @add_v8i16_v8i64_acc_zext(<8 x i16> %x, i64 %a) {
+; CHECK-LABEL: add_v8i16_v8i64_acc_zext:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .save {r4, lr}
+; CHECK-NEXT: push {r4, lr}
+; CHECK-NEXT: vmov.u16 r2, q0[0]
+; CHECK-NEXT: vmov.32 q2[0], r2
+; CHECK-NEXT: vmov.u16 r2, q0[1]
+; CHECK-NEXT: vmov.32 q2[2], r2
+; CHECK-NEXT: adr r2, .LCPI38_0
+; CHECK-NEXT: vldrw.u32 q1, [r2]
+; CHECK-NEXT: vand q2, q2, q1
+; CHECK-NEXT: vmov r2, s10
+; CHECK-NEXT: vmov r3, s8
+; CHECK-NEXT: add r2, r3
+; CHECK-NEXT: vmov.u16 r3, q0[2]
+; CHECK-NEXT: vmov.32 q3[0], r3
+; CHECK-NEXT: vmov.u16 r3, q0[3]
+; CHECK-NEXT: vmov.32 q3[2], r3
+; CHECK-NEXT: vand q3, q3, q1
+; CHECK-NEXT: vmov r3, s12
+; CHECK-NEXT: add r2, r3
+; CHECK-NEXT: vmov r3, s14
+; CHECK-NEXT: add r2, r3
+; CHECK-NEXT: vmov.u16 r3, q0[4]
+; CHECK-NEXT: vmov.32 q3[0], r3
+; CHECK-NEXT: vmov.u16 r3, q0[5]
+; CHECK-NEXT: vmov.32 q3[2], r3
+; CHECK-NEXT: vand q3, q3, q1
+; CHECK-NEXT: vmov r3, s12
+; CHECK-NEXT: vmov lr, s15
+; CHECK-NEXT: add.w r12, r2, r3
+; CHECK-NEXT: vmov r3, s14
+; CHECK-NEXT: vmov r2, s11
+; CHECK-NEXT: adds.w r4, r12, r3
+; CHECK-NEXT: adc.w r12, r2, lr
+; CHECK-NEXT: vmov.u16 r2, q0[6]
+; CHECK-NEXT: vmov.32 q2[0], r2
+; CHECK-NEXT: vmov.u16 r2, q0[7]
+; CHECK-NEXT: vmov.32 q2[2], r2
+; CHECK-NEXT: vand q0, q2, q1
+; CHECK-NEXT: vmov r3, s0
+; CHECK-NEXT: vmov r2, s1
+; CHECK-NEXT: adds r3, r3, r4
+; CHECK-NEXT: vmov r4, s3
+; CHECK-NEXT: adc.w r12, r12, r2
+; CHECK-NEXT: vmov r2, s2
+; CHECK-NEXT: adds r2, r2, r3
+; CHECK-NEXT: adc.w r3, r12, r4
+; CHECK-NEXT: adds r0, r0, r2
+; CHECK-NEXT: adcs r1, r3
+; CHECK-NEXT: pop {r4, pc}
+; CHECK-NEXT: .p2align 4
+; CHECK-NEXT: @ %bb.1:
+; CHECK-NEXT: .LCPI38_0:
+; CHECK-NEXT: .long 65535 @ 0xffff
+; CHECK-NEXT: .long 0 @ 0x0
+; CHECK-NEXT: .long 65535 @ 0xffff
+; CHECK-NEXT: .long 0 @ 0x0
+entry:
+ %xx = zext <8 x i16> %x to <8 x i64>
+ %z = call i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64> %xx)
+ %r = add i64 %z, %a
+ ret i64 %r
+}
+
+define arm_aapcs_vfpcc i64 @add_v8i16_v8i64_acc_sext(<8 x i16> %x, i64 %a) {
+; CHECK-LABEL: add_v8i16_v8i64_acc_sext:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .save {r4, lr}
+; CHECK-NEXT: push {r4, lr}
+; CHECK-NEXT: vmov.u16 r2, q0[0]
+; CHECK-NEXT: sxth r2, r2
+; CHECK-NEXT: vmov.32 q1[0], r2
+; CHECK-NEXT: asrs r2, r2, #31
+; CHECK-NEXT: vmov.32 q1[1], r2
+; CHECK-NEXT: vmov.u16 r2, q0[1]
+; CHECK-NEXT: sxth r2, r2
+; CHECK-NEXT: vmov.32 q1[2], r2
+; CHECK-NEXT: asrs r3, r2, #31
+; CHECK-NEXT: vmov.32 q1[3], r3
+; CHECK-NEXT: vmov lr, s6
+; CHECK-NEXT: vmov r3, s4
+; CHECK-NEXT: vmov r12, s5
+; CHECK-NEXT: adds.w lr, lr, r3
+; CHECK-NEXT: adc.w r12, r12, r2, asr #31
+; CHECK-NEXT: vmov.u16 r2, q0[2]
+; CHECK-NEXT: sxth r2, r2
+; CHECK-NEXT: vmov.32 q1[0], r2
+; CHECK-NEXT: asrs r2, r2, #31
+; CHECK-NEXT: vmov.32 q1[1], r2
+; CHECK-NEXT: vmov.u16 r2, q0[3]
+; CHECK-NEXT: sxth r2, r2
+; CHECK-NEXT: vmov.32 q1[2], r2
+; CHECK-NEXT: asrs r3, r2, #31
+; CHECK-NEXT: vmov.32 q1[3], r3
+; CHECK-NEXT: vmov r4, s4
+; CHECK-NEXT: vmov r3, s5
+; CHECK-NEXT: adds.w r4, r4, lr
+; CHECK-NEXT: adc.w r12, r12, r3
+; CHECK-NEXT: vmov r3, s6
+; CHECK-NEXT: adds.w lr, r4, r3
+; CHECK-NEXT: vmov.u16 r4, q0[4]
+; CHECK-NEXT: sxth r4, r4
+; CHECK-NEXT: adc.w r12, r12, r2, asr #31
+; CHECK-NEXT: vmov.32 q1[0], r4
+; CHECK-NEXT: asrs r4, r4, #31
+; CHECK-NEXT: vmov.32 q1[1], r4
+; CHECK-NEXT: vmov.u16 r4, q0[5]
+; CHECK-NEXT: sxth r4, r4
+; CHECK-NEXT: vmov.32 q1[2], r4
+; CHECK-NEXT: asrs r2, r4, #31
+; CHECK-NEXT: vmov.32 q1[3], r2
+; CHECK-NEXT: vmov r3, s4
+; CHECK-NEXT: vmov r2, s5
+; CHECK-NEXT: adds.w r3, r3, lr
+; CHECK-NEXT: adc.w r12, r12, r2
+; CHECK-NEXT: vmov r2, s6
+; CHECK-NEXT: adds r2, r2, r3
+; CHECK-NEXT: adc.w r3, r12, r4, asr #31
+; CHECK-NEXT: vmov.u16 r4, q0[6]
+; CHECK-NEXT: sxth r4, r4
+; CHECK-NEXT: adds r2, r2, r4
+; CHECK-NEXT: adc.w r3, r3, r4, asr #31
+; CHECK-NEXT: vmov.u16 r4, q0[7]
+; CHECK-NEXT: sxth r4, r4
+; CHECK-NEXT: adds r2, r2, r4
+; CHECK-NEXT: adc.w r3, r3, r4, asr #31
+; CHECK-NEXT: adds r0, r0, r2
+; CHECK-NEXT: adcs r1, r3
+; CHECK-NEXT: pop {r4, pc}
+entry:
+ %xx = sext <8 x i16> %x to <8 x i64>
+ %z = call i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64> %xx)
+ %r = add i64 %z, %a
+ ret i64 %r
+}
+
+define arm_aapcs_vfpcc i64 @add_v2i16_v2i64_acc_zext(<2 x i16> %x, i64 %a) {
+; CHECK-LABEL: add_v2i16_v2i64_acc_zext:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: adr r2, .LCPI40_0
+; CHECK-NEXT: vldrw.u32 q1, [r2]
+; CHECK-NEXT: vand q0, q0, q1
+; CHECK-NEXT: vmov r2, s2
+; CHECK-NEXT: vmov r3, s0
+; CHECK-NEXT: add r2, r3
+; CHECK-NEXT: vmov r3, s3
+; CHECK-NEXT: adds r0, r0, r2
+; CHECK-NEXT: adcs r1, r3
+; CHECK-NEXT: bx lr
+; CHECK-NEXT: .p2align 4
+; CHECK-NEXT: @ %bb.1:
+; CHECK-NEXT: .LCPI40_0:
+; CHECK-NEXT: .long 65535 @ 0xffff
+; CHECK-NEXT: .long 0 @ 0x0
+; CHECK-NEXT: .long 65535 @ 0xffff
+; CHECK-NEXT: .long 0 @ 0x0
+entry:
+ %xx = zext <2 x i16> %x to <2 x i64>
+ %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %xx)
+ %r = add i64 %z, %a
+ ret i64 %r
+}
+
+define arm_aapcs_vfpcc i64 @add_v2i16_v2i64_acc_sext(<2 x i16> %x, i64 %a) {
+; CHECK-LABEL: add_v2i16_v2i64_acc_sext:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov r2, s0
+; CHECK-NEXT: vmov r3, s2
+; CHECK-NEXT: sxth r2, r2
+; CHECK-NEXT: vmov.32 q1[0], r2
+; CHECK-NEXT: asrs r2, r2, #31
+; CHECK-NEXT: vmov.32 q1[1], r2
+; CHECK-NEXT: sxth r3, r3
+; CHECK-NEXT: vmov r12, s4
+; CHECK-NEXT: adds.w r12, r12, r3
+; CHECK-NEXT: adc.w r2, r2, r3, asr #31
+; CHECK-NEXT: adds.w r0, r0, r12
+; CHECK-NEXT: adcs r1, r2
+; CHECK-NEXT: bx lr
+entry:
+ %xx = sext <2 x i16> %x to <2 x i64>
+ %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %xx)
+ %r = add i64 %z, %a
+ ret i64 %r
+}
+
+define arm_aapcs_vfpcc i32 @add_v16i8_v16i32_acc_zext(<16 x i8> %x, i32 %a) {
+; CHECK-LABEL: add_v16i8_v16i32_acc_zext:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .vsave {d8, d9}
+; CHECK-NEXT: vpush {d8, d9}
+; CHECK-NEXT: vmov.u8 r1, q0[12]
+; CHECK-NEXT: vmov.i32 q1, #0xff
+; CHECK-NEXT: vmov.32 q2[0], r1
+; CHECK-NEXT: vmov.u8 r1, q0[13]
+; CHECK-NEXT: vmov.32 q2[1], r1
+; CHECK-NEXT: vmov.u8 r1, q0[14]
+; CHECK-NEXT: vmov.32 q2[2], r1
+; CHECK-NEXT: vmov.u8 r1, q0[15]
+; CHECK-NEXT: vmov.32 q2[3], r1
+; CHECK-NEXT: vmov.u8 r1, q0[4]
+; CHECK-NEXT: vmov.32 q3[0], r1
+; CHECK-NEXT: vmov.u8 r1, q0[5]
+; CHECK-NEXT: vmov.32 q3[1], r1
+; CHECK-NEXT: vmov.u8 r1, q0[6]
+; CHECK-NEXT: vmov.32 q3[2], r1
+; CHECK-NEXT: vmov.u8 r1, q0[7]
+; CHECK-NEXT: vmov.32 q3[3], r1
+; CHECK-NEXT: vand q2, q2, q1
+; CHECK-NEXT: vand q3, q3, q1
+; CHECK-NEXT: vmov.u8 r1, q0[8]
+; CHECK-NEXT: vadd.i32 q2, q3, q2
+; CHECK-NEXT: vmov.32 q3[0], r1
+; CHECK-NEXT: vmov.u8 r1, q0[9]
+; CHECK-NEXT: vmov.32 q3[1], r1
+; CHECK-NEXT: vmov.u8 r1, q0[10]
+; CHECK-NEXT: vmov.32 q3[2], r1
+; CHECK-NEXT: vmov.u8 r1, q0[11]
+; CHECK-NEXT: vmov.32 q3[3], r1
+; CHECK-NEXT: vmov.u8 r1, q0[0]
+; CHECK-NEXT: vmov.32 q4[0], r1
+; CHECK-NEXT: vmov.u8 r1, q0[1]
+; CHECK-NEXT: vmov.32 q4[1], r1
+; CHECK-NEXT: vmov.u8 r1, q0[2]
+; CHECK-NEXT: vmov.32 q4[2], r1
+; CHECK-NEXT: vmov.u8 r1, q0[3]
+; CHECK-NEXT: vmov.32 q4[3], r1
+; CHECK-NEXT: vand q3, q3, q1
+; CHECK-NEXT: vand q0, q4, q1
+; CHECK-NEXT: vadd.i32 q0, q0, q3
+; CHECK-NEXT: vadd.i32 q0, q0, q2
+; CHECK-NEXT: vaddva.u32 r0, q0
+; CHECK-NEXT: vpop {d8, d9}
+; CHECK-NEXT: bx lr
+entry:
+ %xx = zext <16 x i8> %x to <16 x i32>
+ %z = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %xx)
+ %r = add i32 %z, %a
+ ret i32 %r
+}
+
+define arm_aapcs_vfpcc i32 @add_v16i8_v16i32_acc_sext(<16 x i8> %x, i32 %a) {
+; CHECK-LABEL: add_v16i8_v16i32_acc_sext:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.u8 r1, q0[12]
+; CHECK-NEXT: vmov.32 q1[0], r1
+; CHECK-NEXT: vmov.u8 r1, q0[13]
+; CHECK-NEXT: vmov.32 q1[1], r1
+; CHECK-NEXT: vmov.u8 r1, q0[14]
+; CHECK-NEXT: vmov.32 q1[2], r1
+; CHECK-NEXT: vmov.u8 r1, q0[15]
+; CHECK-NEXT: vmov.32 q1[3], r1
+; CHECK-NEXT: vmov.u8 r1, q0[4]
+; CHECK-NEXT: vmov.32 q2[0], r1
+; CHECK-NEXT: vmov.u8 r1, q0[5]
+; CHECK-NEXT: vmov.32 q2[1], r1
+; CHECK-NEXT: vmov.u8 r1, q0[6]
+; CHECK-NEXT: vmov.32 q2[2], r1
+; CHECK-NEXT: vmov.u8 r1, q0[7]
+; CHECK-NEXT: vmov.32 q2[3], r1
+; CHECK-NEXT: vmovlb.s8 q1, q1
+; CHECK-NEXT: vmovlb.s8 q2, q2
+; CHECK-NEXT: vmovlb.s16 q1, q1
+; CHECK-NEXT: vmovlb.s16 q2, q2
+; CHECK-NEXT: vmov.u8 r1, q0[8]
+; CHECK-NEXT: vadd.i32 q1, q2, q1
+; CHECK-NEXT: vmov.32 q2[0], r1
+; CHECK-NEXT: vmov.u8 r1, q0[9]
+; CHECK-NEXT: vmov.32 q2[1], r1
+; CHECK-NEXT: vmov.u8 r1, q0[10]
+; CHECK-NEXT: vmov.32 q2[2], r1
+; CHECK-NEXT: vmov.u8 r1, q0[11]
+; CHECK-NEXT: vmov.32 q2[3], r1
+; CHECK-NEXT: vmov.u8 r1, q0[0]
+; CHECK-NEXT: vmov.32 q3[0], r1
+; CHECK-NEXT: vmov.u8 r1, q0[1]
+; CHECK-NEXT: vmov.32 q3[1], r1
+; CHECK-NEXT: vmov.u8 r1, q0[2]
+; CHECK-NEXT: vmov.32 q3[2], r1
+; CHECK-NEXT: vmov.u8 r1, q0[3]
+; CHECK-NEXT: vmov.32 q3[3], r1
+; CHECK-NEXT: vmovlb.s8 q2, q2
+; CHECK-NEXT: vmovlb.s8 q0, q3
+; CHECK-NEXT: vmovlb.s16 q2, q2
+; CHECK-NEXT: vmovlb.s16 q0, q0
+; CHECK-NEXT: vadd.i32 q0, q0, q2
+; CHECK-NEXT: vadd.i32 q0, q0, q1
+; CHECK-NEXT: vaddva.u32 r0, q0
+; CHECK-NEXT: bx lr
+entry:
+ %xx = sext <16 x i8> %x to <16 x i32>
+ %z = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %xx)
+ %r = add i32 %z, %a
+ ret i32 %r
+}
+
+define arm_aapcs_vfpcc i32 @add_v4i8_v4i32_acc_zext(<4 x i8> %x, i32 %a) {
+; CHECK-LABEL: add_v4i8_v4i32_acc_zext:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.i32 q1, #0xff
+; CHECK-NEXT: vand q0, q0, q1
+; CHECK-NEXT: vaddva.u32 r0, q0
+; CHECK-NEXT: bx lr
+entry:
+ %xx = zext <4 x i8> %x to <4 x i32>
+ %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %xx)
+ %r = add i32 %z, %a
+ ret i32 %r
+}
+
+define arm_aapcs_vfpcc i32 @add_v4i8_v4i32_acc_sext(<4 x i8> %x, i32 %a) {
+; CHECK-LABEL: add_v4i8_v4i32_acc_sext:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmovlb.s8 q0, q0
+; CHECK-NEXT: vmovlb.s16 q0, q0
+; CHECK-NEXT: vaddva.u32 r0, q0
+; CHECK-NEXT: bx lr
+entry:
+ %xx = sext <4 x i8> %x to <4 x i32>
+ %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %xx)
+ %r = add i32 %z, %a
+ ret i32 %r
+}
+
+define arm_aapcs_vfpcc zeroext i16 @add_v16i8_v16i16_acc_zext(<16 x i8> %x, i16 %a) {
+; CHECK-LABEL: add_v16i8_v16i16_acc_zext:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.u8 r1, q0[8]
+; CHECK-NEXT: vmov.16 q1[0], r1
+; CHECK-NEXT: vmov.u8 r1, q0[9]
+; CHECK-NEXT: vmov.16 q1[1], r1
+; CHECK-NEXT: vmov.u8 r1, q0[10]
+; CHECK-NEXT: vmov.16 q1[2], r1
+; CHECK-NEXT: vmov.u8 r1, q0[11]
+; CHECK-NEXT: vmov.16 q1[3], r1
+; CHECK-NEXT: vmov.u8 r1, q0[12]
+; CHECK-NEXT: vmov.16 q1[4], r1
+; CHECK-NEXT: vmov.u8 r1, q0[13]
+; CHECK-NEXT: vmov.16 q1[5], r1
+; CHECK-NEXT: vmov.u8 r1, q0[14]
+; CHECK-NEXT: vmov.16 q1[6], r1
+; CHECK-NEXT: vmov.u8 r1, q0[15]
+; CHECK-NEXT: vmov.16 q1[7], r1
+; CHECK-NEXT: vmov.u8 r1, q0[0]
+; CHECK-NEXT: vmov.16 q2[0], r1
+; CHECK-NEXT: vmov.u8 r1, q0[1]
+; CHECK-NEXT: vmov.16 q2[1], r1
+; CHECK-NEXT: vmov.u8 r1, q0[2]
+; CHECK-NEXT: vmov.16 q2[2], r1
+; CHECK-NEXT: vmov.u8 r1, q0[3]
+; CHECK-NEXT: vmov.16 q2[3], r1
+; CHECK-NEXT: vmov.u8 r1, q0[4]
+; CHECK-NEXT: vmov.16 q2[4], r1
+; CHECK-NEXT: vmov.u8 r1, q0[5]
+; CHECK-NEXT: vmov.16 q2[5], r1
+; CHECK-NEXT: vmov.u8 r1, q0[6]
+; CHECK-NEXT: vmov.16 q2[6], r1
+; CHECK-NEXT: vmov.u8 r1, q0[7]
+; CHECK-NEXT: vmov.16 q2[7], r1
+; CHECK-NEXT: vmovlb.u8 q1, q1
+; CHECK-NEXT: vmovlb.u8 q0, q2
+; CHECK-NEXT: vadd.i16 q0, q0, q1
+; CHECK-NEXT: vaddva.u16 r0, q0
+; CHECK-NEXT: uxth r0, r0
+; CHECK-NEXT: bx lr
+entry:
+ %xx = zext <16 x i8> %x to <16 x i16>
+ %z = call i16 @llvm.experimental.vector.reduce.add.v16i16(<16 x i16> %xx)
+ %r = add i16 %z, %a
+ ret i16 %r
+}
+
+define arm_aapcs_vfpcc signext i16 @add_v16i8_v16i16_acc_sext(<16 x i8> %x, i16 %a) {
+; CHECK-LABEL: add_v16i8_v16i16_acc_sext:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov.u8 r1, q0[8]
+; CHECK-NEXT: vmov.16 q1[0], r1
+; CHECK-NEXT: vmov.u8 r1, q0[9]
+; CHECK-NEXT: vmov.16 q1[1], r1
+; CHECK-NEXT: vmov.u8 r1, q0[10]
+; CHECK-NEXT: vmov.16 q1[2], r1
+; CHECK-NEXT: vmov.u8 r1, q0[11]
+; CHECK-NEXT: vmov.16 q1[3], r1
+; CHECK-NEXT: vmov.u8 r1, q0[12]
+; CHECK-NEXT: vmov.16 q1[4], r1
+; CHECK-NEXT: vmov.u8 r1, q0[13]
+; CHECK-NEXT: vmov.16 q1[5], r1
+; CHECK-NEXT: vmov.u8 r1, q0[14]
+; CHECK-NEXT: vmov.16 q1[6], r1
+; CHECK-NEXT: vmov.u8 r1, q0[15]
+; CHECK-NEXT: vmov.16 q1[7], r1
+; CHECK-NEXT: vmov.u8 r1, q0[0]
+; CHECK-NEXT: vmov.16 q2[0], r1
+; CHECK-NEXT: vmov.u8 r1, q0[1]
+; CHECK-NEXT: vmov.16 q2[1], r1
+; CHECK-NEXT: vmov.u8 r1, q0[2]
+; CHECK-NEXT: vmov.16 q2[2], r1
+; CHECK-NEXT: vmov.u8 r1, q0[3]
+; CHECK-NEXT: vmov.16 q2[3], r1
+; CHECK-NEXT: vmov.u8 r1, q0[4]
+; CHECK-NEXT: vmov.16 q2[4], r1
+; CHECK-NEXT: vmov.u8 r1, q0[5]
+; CHECK-NEXT: vmov.16 q2[5], r1
+; CHECK-NEXT: vmov.u8 r1, q0[6]
+; CHECK-NEXT: vmov.16 q2[6], r1
+; CHECK-NEXT: vmov.u8 r1, q0[7]
+; CHECK-NEXT: vmov.16 q2[7], r1
+; CHECK-NEXT: vmovlb.s8 q1, q1
+; CHECK-NEXT: vmovlb.s8 q0, q2
+; CHECK-NEXT: vadd.i16 q0, q0, q1
+; CHECK-NEXT: vaddva.u16 r0, q0
+; CHECK-NEXT: sxth r0, r0
+; CHECK-NEXT: bx lr
+entry:
+ %xx = sext <16 x i8> %x to <16 x i16>
+ %z = call i16 @llvm.experimental.vector.reduce.add.v16i16(<16 x i16> %xx)
+ %r = add i16 %z, %a
+ ret i16 %r
+}
+
+define arm_aapcs_vfpcc zeroext i16 @add_v8i8_v8i16_acc_zext(<8 x i8> %x, i16 %a) {
+; CHECK-LABEL: add_v8i8_v8i16_acc_zext:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmovlb.u8 q0, q0
+; CHECK-NEXT: vaddva.u16 r0, q0
+; CHECK-NEXT: uxth r0, r0
+; CHECK-NEXT: bx lr
+entry:
+ %xx = zext <8 x i8> %x to <8 x i16>
+ %z = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %xx)
+ %r = add i16 %z, %a
+ ret i16 %r
+}
+
+define arm_aapcs_vfpcc signext i16 @add_v8i8_v8i16_acc_sext(<8 x i8> %x, i16 %a) {
+; CHECK-LABEL: add_v8i8_v8i16_acc_sext:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmovlb.s8 q0, q0
+; CHECK-NEXT: vaddva.u16 r0, q0
+; CHECK-NEXT: sxth r0, r0
+; CHECK-NEXT: bx lr
+entry:
+ %xx = sext <8 x i8> %x to <8 x i16>
+ %z = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %xx)
+ %r = add i16 %z, %a
+ ret i16 %r
+}
+
+define arm_aapcs_vfpcc zeroext i8 @add_v16i8_v16i8_acc(<16 x i8> %x, i8 %a) {
+; CHECK-LABEL: add_v16i8_v16i8_acc:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vaddva.u8 r0, q0
+; CHECK-NEXT: uxtb r0, r0
+; CHECK-NEXT: bx lr
+entry:
+ %z = call i8 @llvm.experimental.vector.reduce.add.v16i8(<16 x i8> %x)
+ %r = add i8 %z, %a
+ ret i8 %r
+}
+
+define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_acc_zext(<16 x i8> %x, i64 %a) {
+; CHECK-LABEL: add_v16i8_v16i64_acc_zext:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .save {r4, lr}
+; CHECK-NEXT: push {r4, lr}
+; CHECK-NEXT: vmov.u8 r2, q0[0]
+; CHECK-NEXT: vmov.32 q2[0], r2
+; CHECK-NEXT: vmov.u8 r2, q0[1]
+; CHECK-NEXT: vmov.32 q2[2], r2
+; CHECK-NEXT: adr r2, .LCPI51_0
+; CHECK-NEXT: vldrw.u32 q1, [r2]
+; CHECK-NEXT: vand q2, q2, q1
+; CHECK-NEXT: vmov r2, s10
+; CHECK-NEXT: vmov r3, s8
+; CHECK-NEXT: add r2, r3
+; CHECK-NEXT: vmov.u8 r3, q0[2]
+; CHECK-NEXT: vmov.32 q3[0], r3
+; CHECK-NEXT: vmov.u8 r3, q0[3]
+; CHECK-NEXT: vmov.32 q3[2], r3
+; CHECK-NEXT: vand q3, q3, q1
+; CHECK-NEXT: vmov r3, s12
+; CHECK-NEXT: add r2, r3
+; CHECK-NEXT: vmov r3, s14
+; CHECK-NEXT: add r2, r3
+; CHECK-NEXT: vmov.u8 r3, q0[4]
+; CHECK-NEXT: vmov.32 q3[0], r3
+; CHECK-NEXT: vmov.u8 r3, q0[5]
+; CHECK-NEXT: vmov.32 q3[2], r3
+; CHECK-NEXT: vand q3, q3, q1
+; CHECK-NEXT: vmov r3, s12
+; CHECK-NEXT: vmov lr, s15
+; CHECK-NEXT: add.w r12, r2, r3
+; CHECK-NEXT: vmov r3, s14
+; CHECK-NEXT: vmov r2, s11
+; CHECK-NEXT: adds.w r4, r12, r3
+; CHECK-NEXT: adc.w r12, r2, lr
+; CHECK-NEXT: vmov.u8 r2, q0[6]
+; CHECK-NEXT: vmov.32 q2[0], r2
+; CHECK-NEXT: vmov.u8 r2, q0[7]
+; CHECK-NEXT: vmov.32 q2[2], r2
+; CHECK-NEXT: vand q2, q2, q1
+; CHECK-NEXT: vmov r3, s8
+; CHECK-NEXT: vmov r2, s9
+; CHECK-NEXT: adds r3, r3, r4
+; CHECK-NEXT: vmov r4, s11
+; CHECK-NEXT: adc.w r12, r12, r2
+; CHECK-NEXT: vmov r2, s10
+; CHECK-NEXT: adds.w lr, r3, r2
+; CHECK-NEXT: adc.w r3, r12, r4
+; CHECK-NEXT: vmov.u8 r4, q0[8]
+; CHECK-NEXT: vmov.32 q2[0], r4
+; CHECK-NEXT: vmov.u8 r4, q0[9]
+; CHECK-NEXT: vmov.32 q2[2], r4
+; CHECK-NEXT: vand q2, q2, q1
+; CHECK-NEXT: vmov r2, s8
+; CHECK-NEXT: vmov r4, s9
+; CHECK-NEXT: adds.w r12, lr, r2
+; CHECK-NEXT: vmov r2, s10
+; CHECK-NEXT: adcs r3, r4
+; CHECK-NEXT: vmov r4, s11
+; CHECK-NEXT: adds.w r12, r12, r2
+; CHECK-NEXT: adcs r3, r4
+; CHECK-NEXT: vmov.u8 r4, q0[10]
+; CHECK-NEXT: vmov.32 q2[0], r4
+; CHECK-NEXT: vmov.u8 r4, q0[11]
+; CHECK-NEXT: vmov.32 q2[2], r4
+; CHECK-NEXT: vand q2, q2, q1
+; CHECK-NEXT: vmov r2, s8
+; CHECK-NEXT: vmov r4, s9
+; CHECK-NEXT: adds.w r12, r12, r2
+; CHECK-NEXT: vmov r2, s10
+; CHECK-NEXT: adcs r3, r4
+; CHECK-NEXT: vmov r4, s11
+; CHECK-NEXT: adds.w r12, r12, r2
+; CHECK-NEXT: adcs r3, r4
+; CHECK-NEXT: vmov.u8 r4, q0[12]
+; CHECK-NEXT: vmov.32 q2[0], r4
+; CHECK-NEXT: vmov.u8 r4, q0[13]
+; CHECK-NEXT: vmov.32 q2[2], r4
+; CHECK-NEXT: vand q2, q2, q1
+; CHECK-NEXT: vmov r2, s8
+; CHECK-NEXT: vmov r4, s9
+; CHECK-NEXT: adds.w r12, r12, r2
+; CHECK-NEXT: vmov r2, s10
+; CHECK-NEXT: adcs r3, r4
+; CHECK-NEXT: vmov r4, s11
+; CHECK-NEXT: adds.w r12, r12, r2
+; CHECK-NEXT: adcs r3, r4
+; CHECK-NEXT: vmov.u8 r4, q0[14]
+; CHECK-NEXT: vmov.32 q2[0], r4
+; CHECK-NEXT: vmov.u8 r4, q0[15]
+; CHECK-NEXT: vmov.32 q2[2], r4
+; CHECK-NEXT: vand q0, q2, q1
+; CHECK-NEXT: vmov r2, s0
+; CHECK-NEXT: vmov r4, s1
+; CHECK-NEXT: adds.w r12, r12, r2
+; CHECK-NEXT: vmov r2, s2
+; CHECK-NEXT: adcs r3, r4
+; CHECK-NEXT: vmov r4, s3
+; CHECK-NEXT: adds.w r2, r2, r12
+; CHECK-NEXT: adcs r3, r4
+; CHECK-NEXT: adds r0, r0, r2
+; CHECK-NEXT: adcs r1, r3
+; CHECK-NEXT: pop {r4, pc}
+; CHECK-NEXT: .p2align 4
+; CHECK-NEXT: @ %bb.1:
+; CHECK-NEXT: .LCPI51_0:
+; CHECK-NEXT: .long 255 @ 0xff
+; CHECK-NEXT: .long 0 @ 0x0
+; CHECK-NEXT: .long 255 @ 0xff
+; CHECK-NEXT: .long 0 @ 0x0
+entry:
+ %xx = zext <16 x i8> %x to <16 x i64>
+ %z = call i64 @llvm.experimental.vector.reduce.add.v16i64(<16 x i64> %xx)
+ %r = add i64 %z, %a
+ ret i64 %r
+}
+
+define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_acc_sext(<16 x i8> %x, i64 %a) {
+; CHECK-LABEL: add_v16i8_v16i64_acc_sext:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .save {r4, lr}
+; CHECK-NEXT: push {r4, lr}
+; CHECK-NEXT: vmov.u8 r2, q0[0]
+; CHECK-NEXT: sxtb r2, r2
+; CHECK-NEXT: vmov.32 q1[0], r2
+; CHECK-NEXT: asrs r2, r2, #31
+; CHECK-NEXT: vmov.32 q1[1], r2
+; CHECK-NEXT: vmov.u8 r2, q0[1]
+; CHECK-NEXT: sxtb r2, r2
+; CHECK-NEXT: vmov.32 q1[2], r2
+; CHECK-NEXT: asrs r3, r2, #31
+; CHECK-NEXT: vmov.32 q1[3], r3
+; CHECK-NEXT: vmov lr, s6
+; CHECK-NEXT: vmov r3, s4
+; CHECK-NEXT: vmov r12, s5
+; CHECK-NEXT: adds.w lr, lr, r3
+; CHECK-NEXT: adc.w r12, r12, r2, asr #31
+; CHECK-NEXT: vmov.u8 r2, q0[2]
+; CHECK-NEXT: sxtb r2, r2
+; CHECK-NEXT: vmov.32 q1[0], r2
+; CHECK-NEXT: asrs r2, r2, #31
+; CHECK-NEXT: vmov.32 q1[1], r2
+; CHECK-NEXT: vmov.u8 r2, q0[3]
+; CHECK-NEXT: sxtb r2, r2
+; CHECK-NEXT: vmov.32 q1[2], r2
+; CHECK-NEXT: asrs r3, r2, #31
+; CHECK-NEXT: vmov.32 q1[3], r3
+; CHECK-NEXT: vmov r4, s4
+; CHECK-NEXT: vmov r3, s5
+; CHECK-NEXT: adds.w r4, r4, lr
+; CHECK-NEXT: adc.w r12, r12, r3
+; CHECK-NEXT: vmov r3, s6
+; CHECK-NEXT: adds.w lr, r4, r3
+; CHECK-NEXT: vmov.u8 r4, q0[4]
+; CHECK-NEXT: sxtb r4, r4
+; CHECK-NEXT: adc.w r12, r12, r2, asr #31
+; CHECK-NEXT: vmov.32 q1[0], r4
+; CHECK-NEXT: asrs r4, r4, #31
+; CHECK-NEXT: vmov.32 q1[1], r4
+; CHECK-NEXT: vmov.u8 r4, q0[5]
+; CHECK-NEXT: sxtb r4, r4
+; CHECK-NEXT: vmov.32 q1[2], r4
+; CHECK-NEXT: asrs r2, r4, #31
+; CHECK-NEXT: vmov.32 q1[3], r2
+; CHECK-NEXT: vmov r3, s4
+; CHECK-NEXT: vmov r2, s5
+; CHECK-NEXT: adds.w r3, r3, lr
+; CHECK-NEXT: adc.w r12, r12, r2
+; CHECK-NEXT: vmov r2, s6
+; CHECK-NEXT: adds.w lr, r3, r2
+; CHECK-NEXT: adc.w r12, r12, r4, asr #31
+; CHECK-NEXT: vmov.u8 r4, q0[6]
+; CHECK-NEXT: sxtb r4, r4
+; CHECK-NEXT: vmov.32 q1[0], r4
+; CHECK-NEXT: asrs r4, r4, #31
+; CHECK-NEXT: vmov.32 q1[1], r4
+; CHECK-NEXT: vmov.u8 r4, q0[7]
+; CHECK-NEXT: sxtb r4, r4
+; CHECK-NEXT: vmov.32 q1[2], r4
+; CHECK-NEXT: asrs r2, r4, #31
+; CHECK-NEXT: vmov.32 q1[3], r2
+; CHECK-NEXT: vmov r3, s4
+; CHECK-NEXT: vmov r2, s5
+; CHECK-NEXT: adds.w r3, r3, lr
+; CHECK-NEXT: adc.w r12, r12, r2
+; CHECK-NEXT: vmov r2, s6
+; CHECK-NEXT: adds.w lr, r3, r2
+; CHECK-NEXT: adc.w r12, r12, r4, asr #31
+; CHECK-NEXT: vmov.u8 r4, q0[8]
+; CHECK-NEXT: sxtb r4, r4
+; CHECK-NEXT: vmov.32 q1[0], r4
+; CHECK-NEXT: asrs r4, r4, #31
+; CHECK-NEXT: vmov.32 q1[1], r4
+; CHECK-NEXT: vmov.u8 r4, q0[9]
+; CHECK-NEXT: sxtb r4, r4
+; CHECK-NEXT: vmov.32 q1[2], r4
+; CHECK-NEXT: asrs r2, r4, #31
+; CHECK-NEXT: vmov.32 q1[3], r2
+; CHECK-NEXT: vmov r3, s4
+; CHECK-NEXT: vmov r2, s5
+; CHECK-NEXT: adds.w r3, r3, lr
+; CHECK-NEXT: adc.w r12, r12, r2
+; CHECK-NEXT: vmov r2, s6
+; CHECK-NEXT: adds.w lr, r3, r2
+; CHECK-NEXT: adc.w r12, r12, r4, asr #31
+; CHECK-NEXT: vmov.u8 r4, q0[10]
+; CHECK-NEXT: sxtb r4, r4
+; CHECK-NEXT: vmov.32 q1[0], r4
+; CHECK-NEXT: asrs r4, r4, #31
+; CHECK-NEXT: vmov.32 q1[1], r4
+; CHECK-NEXT: vmov.u8 r4, q0[11]
+; CHECK-NEXT: sxtb r4, r4
+; CHECK-NEXT: vmov.32 q1[2], r4
+; CHECK-NEXT: asrs r2, r4, #31
+; CHECK-NEXT: vmov.32 q1[3], r2
+; CHECK-NEXT: vmov r3, s4
+; CHECK-NEXT: vmov r2, s5
+; CHECK-NEXT: adds.w r3, r3, lr
+; CHECK-NEXT: adc.w r12, r12, r2
+; CHECK-NEXT: vmov r2, s6
+; CHECK-NEXT: adds.w lr, r3, r2
+; CHECK-NEXT: adc.w r12, r12, r4, asr #31
+; CHECK-NEXT: vmov.u8 r4, q0[12]
+; CHECK-NEXT: sxtb r4, r4
+; CHECK-NEXT: vmov.32 q1[0], r4
+; CHECK-NEXT: asrs r4, r4, #31
+; CHECK-NEXT: vmov.32 q1[1], r4
+; CHECK-NEXT: vmov.u8 r4, q0[13]
+; CHECK-NEXT: sxtb r4, r4
+; CHECK-NEXT: vmov.32 q1[2], r4
+; CHECK-NEXT: asrs r2, r4, #31
+; CHECK-NEXT: vmov.32 q1[3], r2
+; CHECK-NEXT: vmov r3, s4
+; CHECK-NEXT: vmov r2, s5
+; CHECK-NEXT: adds.w r3, r3, lr
+; CHECK-NEXT: adc.w r12, r12, r2
+; CHECK-NEXT: vmov r2, s6
+; CHECK-NEXT: adds r2, r2, r3
+; CHECK-NEXT: adc.w r3, r12, r4, asr #31
+; CHECK-NEXT: vmov.u8 r4, q0[14]
+; CHECK-NEXT: sxtb r4, r4
+; CHECK-NEXT: adds r2, r2, r4
+; CHECK-NEXT: adc.w r3, r3, r4, asr #31
+; CHECK-NEXT: vmov.u8 r4, q0[15]
+; CHECK-NEXT: sxtb r4, r4
+; CHECK-NEXT: adds r2, r2, r4
+; CHECK-NEXT: adc.w r3, r3, r4, asr #31
+; CHECK-NEXT: adds r0, r0, r2
+; CHECK-NEXT: adcs r1, r3
+; CHECK-NEXT: pop {r4, pc}
+entry:
+ %xx = sext <16 x i8> %x to <16 x i64>
+ %z = call i64 @llvm.experimental.vector.reduce.add.v16i64(<16 x i64> %xx)
+ %r = add i64 %z, %a
+ ret i64 %r
+}
+
+define arm_aapcs_vfpcc i64 @add_v2i8_v2i64_acc_zext(<2 x i8> %x, i64 %a) {
+; CHECK-LABEL: add_v2i8_v2i64_acc_zext:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: adr r2, .LCPI53_0
+; CHECK-NEXT: vldrw.u32 q1, [r2]
+; CHECK-NEXT: vand q0, q0, q1
+; CHECK-NEXT: vmov r2, s2
+; CHECK-NEXT: vmov r3, s0
+; CHECK-NEXT: add r2, r3
+; CHECK-NEXT: vmov r3, s3
+; CHECK-NEXT: adds r0, r0, r2
+; CHECK-NEXT: adcs r1, r3
+; CHECK-NEXT: bx lr
+; CHECK-NEXT: .p2align 4
+; CHECK-NEXT: @ %bb.1:
+; CHECK-NEXT: .LCPI53_0:
+; CHECK-NEXT: .long 255 @ 0xff
+; CHECK-NEXT: .long 0 @ 0x0
+; CHECK-NEXT: .long 255 @ 0xff
+; CHECK-NEXT: .long 0 @ 0x0
+entry:
+ %xx = zext <2 x i8> %x to <2 x i64>
+ %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %xx)
+ %r = add i64 %z, %a
+ ret i64 %r
+}
+
+define arm_aapcs_vfpcc i64 @add_v2i8_v2i64_acc_sext(<2 x i8> %x, i64 %a) {
+; CHECK-LABEL: add_v2i8_v2i64_acc_sext:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov r2, s0
+; CHECK-NEXT: vmov r3, s2
+; CHECK-NEXT: sxtb r2, r2
+; CHECK-NEXT: vmov.32 q1[0], r2
+; CHECK-NEXT: asrs r2, r2, #31
+; CHECK-NEXT: vmov.32 q1[1], r2
+; CHECK-NEXT: sxtb r3, r3
+; CHECK-NEXT: vmov r12, s4
+; CHECK-NEXT: adds.w r12, r12, r3
+; CHECK-NEXT: adc.w r2, r2, r3, asr #31
+; CHECK-NEXT: adds.w r0, r0, r12
+; CHECK-NEXT: adcs r1, r2
+; CHECK-NEXT: bx lr
+entry:
+ %xx = sext <2 x i8> %x to <2 x i64>
+ %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %xx)
+ %r = add i64 %z, %a
+ ret i64 %r
+}
+
+define arm_aapcs_vfpcc i64 @add_v2i64_v2i64_acc(<2 x i64> %x, i64 %a) {
+; CHECK-LABEL: add_v2i64_v2i64_acc:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .save {r7, lr}
+; CHECK-NEXT: push {r7, lr}
+; CHECK-NEXT: vmov r2, s2
+; CHECK-NEXT: vmov r3, s0
+; CHECK-NEXT: vmov r12, s3
+; CHECK-NEXT: vmov lr, s1
+; CHECK-NEXT: adds r2, r2, r3
+; CHECK-NEXT: adc.w r3, lr, r12
+; CHECK-NEXT: adds r0, r0, r2
+; CHECK-NEXT: adcs r1, r3
+; CHECK-NEXT: pop {r7, pc}
+entry:
+ %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %x)
+ %r = add i64 %z, %a
+ ret i64 %r
+}
+
declare i16 @llvm.experimental.vector.reduce.add.v16i16(<16 x i16>)
declare i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16>)
declare i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32>)
More information about the llvm-commits
mailing list