[llvm] 0ebdc3b - [ARM] Predicated MVE reduction tests. NFC

David Green via llvm-commits llvm-commits at lists.llvm.org
Mon Jul 20 22:48:16 PDT 2020


Author: David Green
Date: 2020-07-21T06:47:48+01:00
New Revision: 0ebdc3be417f4833395281fda8be6df47f73935c

URL: https://github.com/llvm/llvm-project/commit/0ebdc3be417f4833395281fda8be6df47f73935c
DIFF: https://github.com/llvm/llvm-project/commit/0ebdc3be417f4833395281fda8be6df47f73935c.diff

LOG: [ARM] Predicated MVE reduction tests. NFC

Added: 
    llvm/test/CodeGen/Thumb2/mve-vecreduce-addpred.ll
    llvm/test/CodeGen/Thumb2/mve-vecreduce-mlapred.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/Thumb2/mve-vecreduce-addpred.ll b/llvm/test/CodeGen/Thumb2/mve-vecreduce-addpred.ll
new file mode 100644
index 000000000000..b137f139a99c
--- /dev/null
+++ b/llvm/test/CodeGen/Thumb2/mve-vecreduce-addpred.ll
@@ -0,0 +1,4063 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK
+
+define arm_aapcs_vfpcc i32 @add_v4i32_v4i32(<4 x i32> %x, <4 x i32> %b) {
+; CHECK-LABEL: add_v4i32_v4i32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vcmp.i32 eq, q1, zr
+; CHECK-NEXT:    vmov.i32 q1, #0x0
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vaddv.u32 r0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <4 x i32> %b, zeroinitializer
+  %s = select <4 x i1> %c, <4 x i32> %x, <4 x i32> zeroinitializer
+  %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %s)
+  ret i32 %z
+}
+
+define arm_aapcs_vfpcc i64 @add_v4i32_v4i64_zext(<4 x i32> %x, <4 x i32> %b) {
+; CHECK-LABEL: add_v4i32_v4i64_zext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vcmp.i32 eq, q1, zr
+; CHECK-NEXT:    vmov.f32 s12, s0
+; CHECK-NEXT:    vmrs r0, p0
+; CHECK-NEXT:    vmov.f32 s14, s1
+; CHECK-NEXT:    vmov.i64 q2, #0xffffffff
+; CHECK-NEXT:    vand q3, q3, q2
+; CHECK-NEXT:    and r1, r0, #1
+; CHECK-NEXT:    rsbs r1, r1, #0
+; CHECK-NEXT:    vmov.32 q1[0], r1
+; CHECK-NEXT:    vmov.32 q1[1], r1
+; CHECK-NEXT:    ubfx r1, r0, #4, #1
+; CHECK-NEXT:    rsbs r1, r1, #0
+; CHECK-NEXT:    vmov.32 q1[2], r1
+; CHECK-NEXT:    vmov.32 q1[3], r1
+; CHECK-NEXT:    vand q1, q3, q1
+; CHECK-NEXT:    vmov r3, s6
+; CHECK-NEXT:    vmov r1, s4
+; CHECK-NEXT:    vmov r12, s7
+; CHECK-NEXT:    vmov r2, s5
+; CHECK-NEXT:    vmov.f32 s4, s2
+; CHECK-NEXT:    vmov.f32 s6, s3
+; CHECK-NEXT:    vand q0, q1, q2
+; CHECK-NEXT:    adds r1, r1, r3
+; CHECK-NEXT:    ubfx r3, r0, #8, #1
+; CHECK-NEXT:    rsb.w r3, r3, #0
+; CHECK-NEXT:    ubfx r0, r0, #12, #1
+; CHECK-NEXT:    vmov.32 q1[0], r3
+; CHECK-NEXT:    rsb.w r0, r0, #0
+; CHECK-NEXT:    vmov.32 q1[1], r3
+; CHECK-NEXT:    adc.w r2, r2, r12
+; CHECK-NEXT:    vmov.32 q1[2], r0
+; CHECK-NEXT:    vmov.32 q1[3], r0
+; CHECK-NEXT:    vand q0, q0, q1
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov r0, s1
+; CHECK-NEXT:    adds r1, r1, r3
+; CHECK-NEXT:    vmov r3, s3
+; CHECK-NEXT:    adcs r2, r0
+; CHECK-NEXT:    vmov r0, s2
+; CHECK-NEXT:    adds r0, r0, r1
+; CHECK-NEXT:    adc.w r1, r2, r3
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <4 x i32> %b, zeroinitializer
+  %xx = zext <4 x i32> %x to <4 x i64>
+  %s = select <4 x i1> %c, <4 x i64> %xx, <4 x i64> zeroinitializer
+  %z = call i64 @llvm.experimental.vector.reduce.add.v4i64(<4 x i64> %s)
+  ret i64 %z
+}
+
+define arm_aapcs_vfpcc i64 @add_v4i32_v4i64_sext(<4 x i32> %x, <4 x i32> %b) {
+; CHECK-LABEL: add_v4i32_v4i64_sext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.f32 s12, s0
+; CHECK-NEXT:    vcmp.i32 eq, q1, zr
+; CHECK-NEXT:    vmov.f32 s14, s1
+; CHECK-NEXT:    vmov r0, s12
+; CHECK-NEXT:    vmov.32 q2[0], r0
+; CHECK-NEXT:    asrs r0, r0, #31
+; CHECK-NEXT:    vmov.32 q2[1], r0
+; CHECK-NEXT:    vmov r0, s14
+; CHECK-NEXT:    vmov.32 q2[2], r0
+; CHECK-NEXT:    asrs r0, r0, #31
+; CHECK-NEXT:    vmov.32 q2[3], r0
+; CHECK-NEXT:    vmrs r0, p0
+; CHECK-NEXT:    and r1, r0, #1
+; CHECK-NEXT:    rsbs r1, r1, #0
+; CHECK-NEXT:    vmov.32 q1[0], r1
+; CHECK-NEXT:    vmov.32 q1[1], r1
+; CHECK-NEXT:    ubfx r1, r0, #4, #1
+; CHECK-NEXT:    rsbs r1, r1, #0
+; CHECK-NEXT:    vmov.32 q1[2], r1
+; CHECK-NEXT:    vmov.32 q1[3], r1
+; CHECK-NEXT:    vand q1, q2, q1
+; CHECK-NEXT:    vmov r1, s6
+; CHECK-NEXT:    vmov r2, s4
+; CHECK-NEXT:    vmov r12, s7
+; CHECK-NEXT:    vmov r3, s5
+; CHECK-NEXT:    vmov.f32 s4, s2
+; CHECK-NEXT:    vmov.f32 s6, s3
+; CHECK-NEXT:    adds r1, r1, r2
+; CHECK-NEXT:    adc.w r2, r3, r12
+; CHECK-NEXT:    vmov r3, s4
+; CHECK-NEXT:    vmov.32 q0[0], r3
+; CHECK-NEXT:    asrs r3, r3, #31
+; CHECK-NEXT:    vmov.32 q0[1], r3
+; CHECK-NEXT:    vmov r3, s6
+; CHECK-NEXT:    vmov.32 q0[2], r3
+; CHECK-NEXT:    asrs r3, r3, #31
+; CHECK-NEXT:    vmov.32 q0[3], r3
+; CHECK-NEXT:    ubfx r3, r0, #8, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    ubfx r0, r0, #12, #1
+; CHECK-NEXT:    vmov.32 q1[0], r3
+; CHECK-NEXT:    rsbs r0, r0, #0
+; CHECK-NEXT:    vmov.32 q1[1], r3
+; CHECK-NEXT:    vmov.32 q1[2], r0
+; CHECK-NEXT:    vmov.32 q1[3], r0
+; CHECK-NEXT:    vand q0, q0, q1
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov r0, s1
+; CHECK-NEXT:    adds r1, r1, r3
+; CHECK-NEXT:    vmov r3, s3
+; CHECK-NEXT:    adcs r2, r0
+; CHECK-NEXT:    vmov r0, s2
+; CHECK-NEXT:    adds r0, r0, r1
+; CHECK-NEXT:    adc.w r1, r2, r3
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <4 x i32> %b, zeroinitializer
+  %xx = sext <4 x i32> %x to <4 x i64>
+  %s = select <4 x i1> %c, <4 x i64> %xx, <4 x i64> zeroinitializer
+  %z = call i64 @llvm.experimental.vector.reduce.add.v4i64(<4 x i64> %s)
+  ret i64 %z
+}
+
+define arm_aapcs_vfpcc i64 @add_v2i32_v2i64_zext(<2 x i32> %x, <2 x i32> %b) {
+; CHECK-LABEL: add_v2i32_v2i64_zext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov r0, s4
+; CHECK-NEXT:    vmov.i64 q2, #0xffffffff
+; CHECK-NEXT:    vand q0, q0, q2
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    cset r0, eq
+; CHECK-NEXT:    tst.w r0, #1
+; CHECK-NEXT:    csetm r0, ne
+; CHECK-NEXT:    vmov.32 q2[0], r0
+; CHECK-NEXT:    vmov.32 q2[1], r0
+; CHECK-NEXT:    vmov r0, s6
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    cset r0, eq
+; CHECK-NEXT:    tst.w r0, #1
+; CHECK-NEXT:    csetm r0, ne
+; CHECK-NEXT:    vmov.32 q2[2], r0
+; CHECK-NEXT:    vmov.32 q2[3], r0
+; CHECK-NEXT:    vand q0, q0, q2
+; CHECK-NEXT:    vmov r0, s2
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov r1, s3
+; CHECK-NEXT:    vmov r2, s1
+; CHECK-NEXT:    adds r0, r0, r3
+; CHECK-NEXT:    adcs r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <2 x i32> %b, zeroinitializer
+  %xx = zext <2 x i32> %x to <2 x i64>
+  %s = select <2 x i1> %c, <2 x i64> %xx, <2 x i64> zeroinitializer
+  %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %s)
+  ret i64 %z
+}
+
+define arm_aapcs_vfpcc i64 @add_v2i32_v2i64_sext(<2 x i32> %x, <2 x i32> %b) {
+; CHECK-LABEL: add_v2i32_v2i64_sext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    vmov.32 q2[0], r0
+; CHECK-NEXT:    asrs r0, r0, #31
+; CHECK-NEXT:    vmov.32 q2[1], r0
+; CHECK-NEXT:    vmov r0, s2
+; CHECK-NEXT:    vmov.32 q2[2], r0
+; CHECK-NEXT:    asrs r0, r0, #31
+; CHECK-NEXT:    vmov.32 q2[3], r0
+; CHECK-NEXT:    vmov r0, s4
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    cset r0, eq
+; CHECK-NEXT:    tst.w r0, #1
+; CHECK-NEXT:    csetm r0, ne
+; CHECK-NEXT:    vmov.32 q0[0], r0
+; CHECK-NEXT:    vmov.32 q0[1], r0
+; CHECK-NEXT:    vmov r0, s6
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    cset r0, eq
+; CHECK-NEXT:    tst.w r0, #1
+; CHECK-NEXT:    csetm r0, ne
+; CHECK-NEXT:    vmov.32 q0[2], r0
+; CHECK-NEXT:    vmov.32 q0[3], r0
+; CHECK-NEXT:    vand q0, q2, q0
+; CHECK-NEXT:    vmov r0, s2
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov r1, s3
+; CHECK-NEXT:    vmov r2, s1
+; CHECK-NEXT:    adds r0, r0, r3
+; CHECK-NEXT:    adcs r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <2 x i32> %b, zeroinitializer
+  %xx = sext <2 x i32> %x to <2 x i64>
+  %s = select <2 x i1> %c, <2 x i64> %xx, <2 x i64> zeroinitializer
+  %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %s)
+  ret i64 %z
+}
+
+define arm_aapcs_vfpcc i32 @add_v8i16_v8i32_zext(<8 x i16> %x, <8 x i16> %b) {
+; CHECK-LABEL: add_v8i16_v8i32_zext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    vmov.i8 q2, #0x0
+; CHECK-NEXT:    vmov.i8 q3, #0xff
+; CHECK-NEXT:    vcmp.i16 eq, q1, zr
+; CHECK-NEXT:    vpsel q1, q3, q2
+; CHECK-NEXT:    vmov.u16 r0, q1[4]
+; CHECK-NEXT:    vmov.32 q2[0], r0
+; CHECK-NEXT:    vmov.u16 r0, q1[5]
+; CHECK-NEXT:    vmov.32 q2[1], r0
+; CHECK-NEXT:    vmov.u16 r0, q1[6]
+; CHECK-NEXT:    vmov.32 q2[2], r0
+; CHECK-NEXT:    vmov.u16 r0, q1[7]
+; CHECK-NEXT:    vmov.32 q2[3], r0
+; CHECK-NEXT:    vmov.u16 r0, q0[4]
+; CHECK-NEXT:    vcmp.i32 ne, q2, zr
+; CHECK-NEXT:    vmov.32 q2[0], r0
+; CHECK-NEXT:    vmov.u16 r0, q0[5]
+; CHECK-NEXT:    vmov.32 q2[1], r0
+; CHECK-NEXT:    vmov.u16 r0, q0[6]
+; CHECK-NEXT:    vmov.32 q2[2], r0
+; CHECK-NEXT:    vmov.u16 r0, q0[7]
+; CHECK-NEXT:    vmov.32 q2[3], r0
+; CHECK-NEXT:    vmov.u16 r0, q1[0]
+; CHECK-NEXT:    vmov.32 q4[0], r0
+; CHECK-NEXT:    vmov.u16 r0, q1[1]
+; CHECK-NEXT:    vmov.32 q4[1], r0
+; CHECK-NEXT:    vmov.u16 r0, q1[2]
+; CHECK-NEXT:    vmov.32 q4[2], r0
+; CHECK-NEXT:    vmov.u16 r0, q1[3]
+; CHECK-NEXT:    vmov.32 q4[3], r0
+; CHECK-NEXT:    vmov.u16 r0, q0[0]
+; CHECK-NEXT:    vmov.32 q1[0], r0
+; CHECK-NEXT:    vmov.u16 r0, q0[1]
+; CHECK-NEXT:    vmov.32 q1[1], r0
+; CHECK-NEXT:    vmov.u16 r0, q0[2]
+; CHECK-NEXT:    vmov.32 q1[2], r0
+; CHECK-NEXT:    vmov.u16 r0, q0[3]
+; CHECK-NEXT:    vmovlb.u16 q3, q2
+; CHECK-NEXT:    vmov.i32 q2, #0x0
+; CHECK-NEXT:    vmov.32 q1[3], r0
+; CHECK-NEXT:    vpsel q3, q3, q2
+; CHECK-NEXT:    vcmp.i32 ne, q4, zr
+; CHECK-NEXT:    vmovlb.u16 q0, q1
+; CHECK-NEXT:    vpsel q0, q0, q2
+; CHECK-NEXT:    vadd.i32 q0, q0, q3
+; CHECK-NEXT:    vaddv.u32 r0, q0
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <8 x i16> %b, zeroinitializer
+  %xx = zext <8 x i16> %x to <8 x i32>
+  %s = select <8 x i1> %c, <8 x i32> %xx, <8 x i32> zeroinitializer
+  %z = call i32 @llvm.experimental.vector.reduce.add.v8i32(<8 x i32> %s)
+  ret i32 %z
+}
+
+define arm_aapcs_vfpcc i32 @add_v8i16_v8i32_sext(<8 x i16> %x, <8 x i16> %b) {
+; CHECK-LABEL: add_v8i16_v8i32_sext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    vmov.i8 q2, #0x0
+; CHECK-NEXT:    vmov.i8 q3, #0xff
+; CHECK-NEXT:    vcmp.i16 eq, q1, zr
+; CHECK-NEXT:    vpsel q1, q3, q2
+; CHECK-NEXT:    vmov.u16 r0, q1[4]
+; CHECK-NEXT:    vmov.32 q2[0], r0
+; CHECK-NEXT:    vmov.u16 r0, q1[5]
+; CHECK-NEXT:    vmov.32 q2[1], r0
+; CHECK-NEXT:    vmov.u16 r0, q1[6]
+; CHECK-NEXT:    vmov.32 q2[2], r0
+; CHECK-NEXT:    vmov.u16 r0, q1[7]
+; CHECK-NEXT:    vmov.32 q2[3], r0
+; CHECK-NEXT:    vmov.u16 r0, q0[4]
+; CHECK-NEXT:    vcmp.i32 ne, q2, zr
+; CHECK-NEXT:    vmov.32 q2[0], r0
+; CHECK-NEXT:    vmov.u16 r0, q0[5]
+; CHECK-NEXT:    vmov.32 q2[1], r0
+; CHECK-NEXT:    vmov.u16 r0, q0[6]
+; CHECK-NEXT:    vmov.32 q2[2], r0
+; CHECK-NEXT:    vmov.u16 r0, q0[7]
+; CHECK-NEXT:    vmov.32 q2[3], r0
+; CHECK-NEXT:    vmov.u16 r0, q1[0]
+; CHECK-NEXT:    vmov.32 q4[0], r0
+; CHECK-NEXT:    vmov.u16 r0, q1[1]
+; CHECK-NEXT:    vmov.32 q4[1], r0
+; CHECK-NEXT:    vmov.u16 r0, q1[2]
+; CHECK-NEXT:    vmov.32 q4[2], r0
+; CHECK-NEXT:    vmov.u16 r0, q1[3]
+; CHECK-NEXT:    vmov.32 q4[3], r0
+; CHECK-NEXT:    vmov.u16 r0, q0[0]
+; CHECK-NEXT:    vmov.32 q1[0], r0
+; CHECK-NEXT:    vmov.u16 r0, q0[1]
+; CHECK-NEXT:    vmov.32 q1[1], r0
+; CHECK-NEXT:    vmov.u16 r0, q0[2]
+; CHECK-NEXT:    vmov.32 q1[2], r0
+; CHECK-NEXT:    vmov.u16 r0, q0[3]
+; CHECK-NEXT:    vmovlb.s16 q3, q2
+; CHECK-NEXT:    vmov.i32 q2, #0x0
+; CHECK-NEXT:    vmov.32 q1[3], r0
+; CHECK-NEXT:    vpsel q3, q3, q2
+; CHECK-NEXT:    vcmp.i32 ne, q4, zr
+; CHECK-NEXT:    vmovlb.s16 q0, q1
+; CHECK-NEXT:    vpsel q0, q0, q2
+; CHECK-NEXT:    vadd.i32 q0, q0, q3
+; CHECK-NEXT:    vaddv.u32 r0, q0
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <8 x i16> %b, zeroinitializer
+  %xx = sext <8 x i16> %x to <8 x i32>
+  %s = select <8 x i1> %c, <8 x i32> %xx, <8 x i32> zeroinitializer
+  %z = call i32 @llvm.experimental.vector.reduce.add.v8i32(<8 x i32> %s)
+  ret i32 %z
+}
+
+define arm_aapcs_vfpcc i32 @add_v4i16_v4i32_zext(<4 x i16> %x, <4 x i16> %b) {
+; CHECK-LABEL: add_v4i16_v4i32_zext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmovlb.u16 q1, q1
+; CHECK-NEXT:    vmovlb.u16 q0, q0
+; CHECK-NEXT:    vcmp.i32 eq, q1, zr
+; CHECK-NEXT:    vmov.i32 q1, #0x0
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vaddv.u32 r0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <4 x i16> %b, zeroinitializer
+  %xx = zext <4 x i16> %x to <4 x i32>
+  %s = select <4 x i1> %c, <4 x i32> %xx, <4 x i32> zeroinitializer
+  %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %s)
+  ret i32 %z
+}
+
+define arm_aapcs_vfpcc i32 @add_v4i16_v4i32_sext(<4 x i16> %x, <4 x i16> %b) {
+; CHECK-LABEL: add_v4i16_v4i32_sext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmovlb.u16 q1, q1
+; CHECK-NEXT:    vmovlb.s16 q0, q0
+; CHECK-NEXT:    vcmp.i32 eq, q1, zr
+; CHECK-NEXT:    vmov.i32 q1, #0x0
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vaddv.u32 r0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <4 x i16> %b, zeroinitializer
+  %xx = sext <4 x i16> %x to <4 x i32>
+  %s = select <4 x i1> %c, <4 x i32> %xx, <4 x i32> zeroinitializer
+  %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %s)
+  ret i32 %z
+}
+
+define arm_aapcs_vfpcc zeroext i16 @add_v8i16_v8i16(<8 x i16> %x, <8 x i16> %b) {
+; CHECK-LABEL: add_v8i16_v8i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vcmp.i16 eq, q1, zr
+; CHECK-NEXT:    vmov.i32 q1, #0x0
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vaddv.u16 r0, q0
+; CHECK-NEXT:    uxth r0, r0
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <8 x i16> %b, zeroinitializer
+  %s = select <8 x i1> %c, <8 x i16> %x, <8 x i16> zeroinitializer
+  %z = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %s)
+  ret i16 %z
+}
+
+define arm_aapcs_vfpcc i64 @add_v8i16_v8i64_zext(<8 x i16> %x, <8 x i16> %b) {
+; CHECK-LABEL: add_v8i16_v8i64_zext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    vmov.i8 q2, #0x0
+; CHECK-NEXT:    vmov.i8 q3, #0xff
+; CHECK-NEXT:    vcmp.i16 eq, q1, zr
+; CHECK-NEXT:    vpsel q2, q3, q2
+; CHECK-NEXT:    vmov.u16 r0, q2[0]
+; CHECK-NEXT:    vmov.32 q1[0], r0
+; CHECK-NEXT:    vmov.u16 r0, q2[1]
+; CHECK-NEXT:    vmov.32 q1[1], r0
+; CHECK-NEXT:    vmov.u16 r0, q2[2]
+; CHECK-NEXT:    vmov.32 q1[2], r0
+; CHECK-NEXT:    vmov.u16 r0, q2[3]
+; CHECK-NEXT:    vmov.32 q1[3], r0
+; CHECK-NEXT:    vcmp.i32 ne, q1, zr
+; CHECK-NEXT:    vmov.i64 q1, #0xffff
+; CHECK-NEXT:    vmrs r0, p0
+; CHECK-NEXT:    and r1, r0, #1
+; CHECK-NEXT:    rsbs r1, r1, #0
+; CHECK-NEXT:    vmov.32 q3[0], r1
+; CHECK-NEXT:    vmov.32 q3[1], r1
+; CHECK-NEXT:    ubfx r1, r0, #4, #1
+; CHECK-NEXT:    rsbs r1, r1, #0
+; CHECK-NEXT:    vmov.32 q3[2], r1
+; CHECK-NEXT:    vmov.32 q3[3], r1
+; CHECK-NEXT:    vmov.u16 r1, q0[0]
+; CHECK-NEXT:    vmov.32 q4[0], r1
+; CHECK-NEXT:    vmov.u16 r1, q0[1]
+; CHECK-NEXT:    vmov.32 q4[2], r1
+; CHECK-NEXT:    vand q4, q4, q1
+; CHECK-NEXT:    vand q3, q4, q3
+; CHECK-NEXT:    vmov r1, s15
+; CHECK-NEXT:    vmov r2, s13
+; CHECK-NEXT:    vmov r3, s12
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    vmov r2, s14
+; CHECK-NEXT:    add r2, r3
+; CHECK-NEXT:    ubfx r3, r0, #8, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    ubfx r0, r0, #12, #1
+; CHECK-NEXT:    vmov.32 q3[0], r3
+; CHECK-NEXT:    rsbs r0, r0, #0
+; CHECK-NEXT:    vmov.32 q3[1], r3
+; CHECK-NEXT:    vmov.32 q3[2], r0
+; CHECK-NEXT:    vmov.32 q3[3], r0
+; CHECK-NEXT:    vmov.u16 r0, q0[2]
+; CHECK-NEXT:    vmov.32 q4[0], r0
+; CHECK-NEXT:    vmov.u16 r0, q0[3]
+; CHECK-NEXT:    vmov.32 q4[2], r0
+; CHECK-NEXT:    vand q4, q4, q1
+; CHECK-NEXT:    vand q3, q4, q3
+; CHECK-NEXT:    vmov r3, s12
+; CHECK-NEXT:    vmov r0, s13
+; CHECK-NEXT:    adds r2, r2, r3
+; CHECK-NEXT:    vmov r3, s15
+; CHECK-NEXT:    adcs r1, r0
+; CHECK-NEXT:    vmov r0, s14
+; CHECK-NEXT:    adds.w r12, r2, r0
+; CHECK-NEXT:    vmov.u16 r2, q2[4]
+; CHECK-NEXT:    vmov.32 q3[0], r2
+; CHECK-NEXT:    vmov.u16 r2, q2[5]
+; CHECK-NEXT:    vmov.32 q3[1], r2
+; CHECK-NEXT:    vmov.u16 r2, q2[6]
+; CHECK-NEXT:    vmov.32 q3[2], r2
+; CHECK-NEXT:    vmov.u16 r2, q2[7]
+; CHECK-NEXT:    vmov.32 q3[3], r2
+; CHECK-NEXT:    adcs r1, r3
+; CHECK-NEXT:    vcmp.i32 ne, q3, zr
+; CHECK-NEXT:    vmrs r2, p0
+; CHECK-NEXT:    and r3, r2, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    vmov.32 q2[0], r3
+; CHECK-NEXT:    vmov.32 q2[1], r3
+; CHECK-NEXT:    ubfx r3, r2, #4, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    vmov.32 q2[2], r3
+; CHECK-NEXT:    vmov.32 q2[3], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[4]
+; CHECK-NEXT:    vmov.32 q3[0], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[5]
+; CHECK-NEXT:    vmov.32 q3[2], r3
+; CHECK-NEXT:    vand q3, q3, q1
+; CHECK-NEXT:    vand q2, q3, q2
+; CHECK-NEXT:    vmov r0, s8
+; CHECK-NEXT:    vmov r3, s9
+; CHECK-NEXT:    adds.w r12, r12, r0
+; CHECK-NEXT:    vmov r0, s10
+; CHECK-NEXT:    adcs r1, r3
+; CHECK-NEXT:    vmov r3, s11
+; CHECK-NEXT:    adds.w r0, r0, r12
+; CHECK-NEXT:    adcs r1, r3
+; CHECK-NEXT:    ubfx r3, r2, #8, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    ubfx r2, r2, #12, #1
+; CHECK-NEXT:    vmov.32 q2[0], r3
+; CHECK-NEXT:    rsbs r2, r2, #0
+; CHECK-NEXT:    vmov.32 q2[1], r3
+; CHECK-NEXT:    vmov.32 q2[2], r2
+; CHECK-NEXT:    vmov.32 q2[3], r2
+; CHECK-NEXT:    vmov.u16 r2, q0[6]
+; CHECK-NEXT:    vmov.32 q3[0], r2
+; CHECK-NEXT:    vmov.u16 r2, q0[7]
+; CHECK-NEXT:    vmov.32 q3[2], r2
+; CHECK-NEXT:    vand q0, q3, q1
+; CHECK-NEXT:    vand q0, q0, q2
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov r2, s1
+; CHECK-NEXT:    adds r0, r0, r3
+; CHECK-NEXT:    vmov r3, s2
+; CHECK-NEXT:    adcs r1, r2
+; CHECK-NEXT:    vmov r2, s3
+; CHECK-NEXT:    adds r0, r0, r3
+; CHECK-NEXT:    adcs r1, r2
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <8 x i16> %b, zeroinitializer
+  %xx = zext <8 x i16> %x to <8 x i64>
+  %s = select <8 x i1> %c, <8 x i64> %xx, <8 x i64> zeroinitializer
+  %z = call i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64> %s)
+  ret i64 %z
+}
+
+define arm_aapcs_vfpcc i64 @add_v8i16_v8i64_sext(<8 x i16> %x, <8 x i16> %b) {
+; CHECK-LABEL: add_v8i16_v8i64_sext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.i8 q2, #0x0
+; CHECK-NEXT:    vmov.i8 q3, #0xff
+; CHECK-NEXT:    vcmp.i16 eq, q1, zr
+; CHECK-NEXT:    vpsel q1, q3, q2
+; CHECK-NEXT:    vmov.u16 r0, q1[0]
+; CHECK-NEXT:    vmov.32 q2[0], r0
+; CHECK-NEXT:    vmov.u16 r0, q1[1]
+; CHECK-NEXT:    vmov.32 q2[1], r0
+; CHECK-NEXT:    vmov.u16 r0, q1[2]
+; CHECK-NEXT:    vmov.32 q2[2], r0
+; CHECK-NEXT:    vmov.u16 r0, q1[3]
+; CHECK-NEXT:    vmov.32 q2[3], r0
+; CHECK-NEXT:    vcmp.i32 ne, q2, zr
+; CHECK-NEXT:    vmrs r0, p0
+; CHECK-NEXT:    and r1, r0, #1
+; CHECK-NEXT:    rsbs r1, r1, #0
+; CHECK-NEXT:    vmov.32 q2[0], r1
+; CHECK-NEXT:    vmov.32 q2[1], r1
+; CHECK-NEXT:    ubfx r1, r0, #4, #1
+; CHECK-NEXT:    rsbs r1, r1, #0
+; CHECK-NEXT:    vmov.32 q2[2], r1
+; CHECK-NEXT:    vmov.32 q2[3], r1
+; CHECK-NEXT:    vmov.u16 r1, q0[0]
+; CHECK-NEXT:    sxth r1, r1
+; CHECK-NEXT:    vmov.32 q3[0], r1
+; CHECK-NEXT:    asrs r1, r1, #31
+; CHECK-NEXT:    vmov.32 q3[1], r1
+; CHECK-NEXT:    vmov.u16 r1, q0[1]
+; CHECK-NEXT:    sxth r1, r1
+; CHECK-NEXT:    vmov.32 q3[2], r1
+; CHECK-NEXT:    asrs r1, r1, #31
+; CHECK-NEXT:    vmov.32 q3[3], r1
+; CHECK-NEXT:    vand q2, q3, q2
+; CHECK-NEXT:    vmov r1, s10
+; CHECK-NEXT:    vmov r2, s8
+; CHECK-NEXT:    vmov r12, s11
+; CHECK-NEXT:    vmov r3, s9
+; CHECK-NEXT:    adds r1, r1, r2
+; CHECK-NEXT:    adc.w r2, r3, r12
+; CHECK-NEXT:    ubfx r3, r0, #8, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    ubfx r0, r0, #12, #1
+; CHECK-NEXT:    vmov.32 q2[0], r3
+; CHECK-NEXT:    rsbs r0, r0, #0
+; CHECK-NEXT:    vmov.32 q2[1], r3
+; CHECK-NEXT:    vmov.32 q2[2], r0
+; CHECK-NEXT:    vmov.32 q2[3], r0
+; CHECK-NEXT:    vmov.u16 r0, q0[2]
+; CHECK-NEXT:    sxth r0, r0
+; CHECK-NEXT:    vmov.32 q3[0], r0
+; CHECK-NEXT:    asrs r0, r0, #31
+; CHECK-NEXT:    vmov.32 q3[1], r0
+; CHECK-NEXT:    vmov.u16 r0, q0[3]
+; CHECK-NEXT:    sxth r0, r0
+; CHECK-NEXT:    vmov.32 q3[2], r0
+; CHECK-NEXT:    asrs r0, r0, #31
+; CHECK-NEXT:    vmov.32 q3[3], r0
+; CHECK-NEXT:    vand q2, q3, q2
+; CHECK-NEXT:    vmov r3, s8
+; CHECK-NEXT:    vmov r0, s9
+; CHECK-NEXT:    adds r1, r1, r3
+; CHECK-NEXT:    vmov r3, s11
+; CHECK-NEXT:    adcs r2, r0
+; CHECK-NEXT:    vmov r0, s10
+; CHECK-NEXT:    adds.w r12, r1, r0
+; CHECK-NEXT:    adc.w r1, r2, r3
+; CHECK-NEXT:    vmov.u16 r2, q1[4]
+; CHECK-NEXT:    vmov.32 q2[0], r2
+; CHECK-NEXT:    vmov.u16 r2, q1[5]
+; CHECK-NEXT:    vmov.32 q2[1], r2
+; CHECK-NEXT:    vmov.u16 r2, q1[6]
+; CHECK-NEXT:    vmov.32 q2[2], r2
+; CHECK-NEXT:    vmov.u16 r2, q1[7]
+; CHECK-NEXT:    vmov.32 q2[3], r2
+; CHECK-NEXT:    vcmp.i32 ne, q2, zr
+; CHECK-NEXT:    vmrs r2, p0
+; CHECK-NEXT:    and r3, r2, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    vmov.32 q1[0], r3
+; CHECK-NEXT:    vmov.32 q1[1], r3
+; CHECK-NEXT:    ubfx r3, r2, #4, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    vmov.32 q1[2], r3
+; CHECK-NEXT:    vmov.32 q1[3], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[4]
+; CHECK-NEXT:    sxth r3, r3
+; CHECK-NEXT:    vmov.32 q2[0], r3
+; CHECK-NEXT:    asrs r3, r3, #31
+; CHECK-NEXT:    vmov.32 q2[1], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[5]
+; CHECK-NEXT:    sxth r3, r3
+; CHECK-NEXT:    vmov.32 q2[2], r3
+; CHECK-NEXT:    asrs r3, r3, #31
+; CHECK-NEXT:    vmov.32 q2[3], r3
+; CHECK-NEXT:    vand q1, q2, q1
+; CHECK-NEXT:    vmov r0, s4
+; CHECK-NEXT:    vmov r3, s5
+; CHECK-NEXT:    adds.w r12, r12, r0
+; CHECK-NEXT:    vmov r0, s6
+; CHECK-NEXT:    adcs r1, r3
+; CHECK-NEXT:    vmov r3, s7
+; CHECK-NEXT:    adds.w r0, r0, r12
+; CHECK-NEXT:    adcs r1, r3
+; CHECK-NEXT:    ubfx r3, r2, #8, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    ubfx r2, r2, #12, #1
+; CHECK-NEXT:    vmov.32 q1[0], r3
+; CHECK-NEXT:    rsbs r2, r2, #0
+; CHECK-NEXT:    vmov.32 q1[1], r3
+; CHECK-NEXT:    vmov.32 q1[2], r2
+; CHECK-NEXT:    vmov.32 q1[3], r2
+; CHECK-NEXT:    vmov.u16 r2, q0[6]
+; CHECK-NEXT:    sxth r2, r2
+; CHECK-NEXT:    vmov.32 q2[0], r2
+; CHECK-NEXT:    asrs r2, r2, #31
+; CHECK-NEXT:    vmov.32 q2[1], r2
+; CHECK-NEXT:    vmov.u16 r2, q0[7]
+; CHECK-NEXT:    sxth r2, r2
+; CHECK-NEXT:    vmov.32 q2[2], r2
+; CHECK-NEXT:    asrs r2, r2, #31
+; CHECK-NEXT:    vmov.32 q2[3], r2
+; CHECK-NEXT:    vand q0, q2, q1
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov r2, s1
+; CHECK-NEXT:    adds r0, r0, r3
+; CHECK-NEXT:    vmov r3, s2
+; CHECK-NEXT:    adcs r1, r2
+; CHECK-NEXT:    vmov r2, s3
+; CHECK-NEXT:    adds r0, r0, r3
+; CHECK-NEXT:    adcs r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <8 x i16> %b, zeroinitializer
+  %xx = sext <8 x i16> %x to <8 x i64>
+  %s = select <8 x i1> %c, <8 x i64> %xx, <8 x i64> zeroinitializer
+  %z = call i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64> %s)
+  ret i64 %z
+}
+
+define arm_aapcs_vfpcc i64 @add_v2i16_v2i64_zext(<2 x i16> %x, <2 x i16> %b) {
+; CHECK-LABEL: add_v2i16_v2i64_zext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.i64 q2, #0xffff
+; CHECK-NEXT:    vand q1, q1, q2
+; CHECK-NEXT:    vand q0, q0, q2
+; CHECK-NEXT:    vmov r0, s4
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    cset r0, eq
+; CHECK-NEXT:    tst.w r0, #1
+; CHECK-NEXT:    csetm r0, ne
+; CHECK-NEXT:    vmov.32 q3[0], r0
+; CHECK-NEXT:    vmov.32 q3[1], r0
+; CHECK-NEXT:    vmov r0, s6
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    cset r0, eq
+; CHECK-NEXT:    tst.w r0, #1
+; CHECK-NEXT:    csetm r0, ne
+; CHECK-NEXT:    vmov.32 q3[2], r0
+; CHECK-NEXT:    vmov.32 q3[3], r0
+; CHECK-NEXT:    vand q0, q0, q3
+; CHECK-NEXT:    vmov r0, s2
+; CHECK-NEXT:    vmov r1, s0
+; CHECK-NEXT:    vmov r2, s1
+; CHECK-NEXT:    add r0, r1
+; CHECK-NEXT:    vmov r1, s3
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <2 x i16> %b, zeroinitializer
+  %xx = zext <2 x i16> %x to <2 x i64>
+  %s = select <2 x i1> %c, <2 x i64> %xx, <2 x i64> zeroinitializer
+  %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %s)
+  ret i64 %z
+}
+
+define arm_aapcs_vfpcc i64 @add_v2i16_v2i64_sext(<2 x i16> %x, <2 x i16> %b) {
+; CHECK-LABEL: add_v2i16_v2i64_sext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.i32 q2, #0xffff
+; CHECK-NEXT:    vand q2, q1, q2
+; CHECK-NEXT:    vmov r0, s8
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    cset r0, eq
+; CHECK-NEXT:    tst.w r0, #1
+; CHECK-NEXT:    csetm r0, ne
+; CHECK-NEXT:    vmov.32 q1[0], r0
+; CHECK-NEXT:    vmov.32 q1[1], r0
+; CHECK-NEXT:    vmov r0, s10
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    cset r0, eq
+; CHECK-NEXT:    tst.w r0, #1
+; CHECK-NEXT:    csetm r0, ne
+; CHECK-NEXT:    vmov.32 q1[2], r0
+; CHECK-NEXT:    vmov.32 q1[3], r0
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    sxth r0, r0
+; CHECK-NEXT:    vmov.32 q2[0], r0
+; CHECK-NEXT:    asrs r0, r0, #31
+; CHECK-NEXT:    vmov.32 q2[1], r0
+; CHECK-NEXT:    vmov r0, s2
+; CHECK-NEXT:    sxth r0, r0
+; CHECK-NEXT:    vmov.32 q2[2], r0
+; CHECK-NEXT:    asrs r0, r0, #31
+; CHECK-NEXT:    vmov.32 q2[3], r0
+; CHECK-NEXT:    vand q0, q2, q1
+; CHECK-NEXT:    vmov r0, s2
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov r1, s3
+; CHECK-NEXT:    vmov r2, s1
+; CHECK-NEXT:    adds r0, r0, r3
+; CHECK-NEXT:    adcs r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <2 x i16> %b, zeroinitializer
+  %xx = sext <2 x i16> %x to <2 x i64>
+  %s = select <2 x i1> %c, <2 x i64> %xx, <2 x i64> zeroinitializer
+  %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %s)
+  ret i64 %z
+}
+
+define arm_aapcs_vfpcc i32 @add_v16i8_v16i32_zext(<16 x i8> %x, <16 x i8> %b) {
+; CHECK-LABEL: add_v16i8_v16i32_zext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    .pad #16
+; CHECK-NEXT:    sub sp, #16
+; CHECK-NEXT:    vcmp.i8 eq, q1, zr
+; CHECK-NEXT:    vmov.i8 q7, #0x0
+; CHECK-NEXT:    vmov.i8 q4, #0xff
+; CHECK-NEXT:    vmov q2, q0
+; CHECK-NEXT:    vpsel q6, q4, q7
+; CHECK-NEXT:    vmov.i32 q5, #0xff
+; CHECK-NEXT:    vmov.u8 r0, q6[8]
+; CHECK-NEXT:    vmov.16 q1[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q6[9]
+; CHECK-NEXT:    vmov.16 q1[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q6[10]
+; CHECK-NEXT:    vmov.16 q1[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q6[11]
+; CHECK-NEXT:    vmov.16 q1[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q6[12]
+; CHECK-NEXT:    vmov.16 q1[4], r0
+; CHECK-NEXT:    vmov.u8 r0, q6[13]
+; CHECK-NEXT:    vmov.16 q1[5], r0
+; CHECK-NEXT:    vmov.u8 r0, q6[14]
+; CHECK-NEXT:    vmov.16 q1[6], r0
+; CHECK-NEXT:    vmov.u8 r0, q6[15]
+; CHECK-NEXT:    vmov.16 q1[7], r0
+; CHECK-NEXT:    vcmp.i16 ne, q1, zr
+; CHECK-NEXT:    vmov.i32 q1, #0x0
+; CHECK-NEXT:    vpsel q3, q4, q7
+; CHECK-NEXT:    vmov.u16 r0, q3[4]
+; CHECK-NEXT:    vmov.32 q0[0], r0
+; CHECK-NEXT:    vmov.u16 r0, q3[5]
+; CHECK-NEXT:    vmov.32 q0[1], r0
+; CHECK-NEXT:    vmov.u16 r0, q3[6]
+; CHECK-NEXT:    vmov.32 q0[2], r0
+; CHECK-NEXT:    vmov.u16 r0, q3[7]
+; CHECK-NEXT:    vmov.32 q0[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[12]
+; CHECK-NEXT:    vcmp.i32 ne, q0, zr
+; CHECK-NEXT:    vmov.32 q0[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[13]
+; CHECK-NEXT:    vmov.32 q0[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[14]
+; CHECK-NEXT:    vmov.32 q0[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[15]
+; CHECK-NEXT:    vmov.32 q0[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q6[0]
+; CHECK-NEXT:    vand q0, q0, q5
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vstrw.32 q0, [sp] @ 16-byte Spill
+; CHECK-NEXT:    vmov.16 q0[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q6[1]
+; CHECK-NEXT:    vmov.16 q0[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q6[2]
+; CHECK-NEXT:    vmov.16 q0[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q6[3]
+; CHECK-NEXT:    vmov.16 q0[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q6[4]
+; CHECK-NEXT:    vmov.16 q0[4], r0
+; CHECK-NEXT:    vmov.u8 r0, q6[5]
+; CHECK-NEXT:    vmov.16 q0[5], r0
+; CHECK-NEXT:    vmov.u8 r0, q6[6]
+; CHECK-NEXT:    vmov.16 q0[6], r0
+; CHECK-NEXT:    vmov.u8 r0, q6[7]
+; CHECK-NEXT:    vmov.16 q0[7], r0
+; CHECK-NEXT:    vldrw.u32 q6, [sp] @ 16-byte Reload
+; CHECK-NEXT:    vcmp.i16 ne, q0, zr
+; CHECK-NEXT:    vpsel q4, q4, q7
+; CHECK-NEXT:    vmov.u16 r0, q4[4]
+; CHECK-NEXT:    vmov.32 q0[0], r0
+; CHECK-NEXT:    vmov.u16 r0, q4[5]
+; CHECK-NEXT:    vmov.32 q0[1], r0
+; CHECK-NEXT:    vmov.u16 r0, q4[6]
+; CHECK-NEXT:    vmov.32 q0[2], r0
+; CHECK-NEXT:    vmov.u16 r0, q4[7]
+; CHECK-NEXT:    vmov.32 q0[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[4]
+; CHECK-NEXT:    vcmp.i32 ne, q0, zr
+; CHECK-NEXT:    vmov.32 q0[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[5]
+; CHECK-NEXT:    vmov.32 q0[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[6]
+; CHECK-NEXT:    vmov.32 q0[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[7]
+; CHECK-NEXT:    vmov.32 q0[3], r0
+; CHECK-NEXT:    vmov.u16 r0, q3[0]
+; CHECK-NEXT:    vand q0, q0, q5
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vadd.i32 q0, q0, q6
+; CHECK-NEXT:    vmov.32 q6[0], r0
+; CHECK-NEXT:    vmov.u16 r0, q3[1]
+; CHECK-NEXT:    vmov.32 q6[1], r0
+; CHECK-NEXT:    vmov.u16 r0, q3[2]
+; CHECK-NEXT:    vmov.32 q6[2], r0
+; CHECK-NEXT:    vmov.u16 r0, q3[3]
+; CHECK-NEXT:    vmov.32 q6[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[8]
+; CHECK-NEXT:    vmov.32 q3[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[9]
+; CHECK-NEXT:    vmov.32 q3[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[10]
+; CHECK-NEXT:    vmov.32 q3[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[11]
+; CHECK-NEXT:    vmov.32 q3[3], r0
+; CHECK-NEXT:    vmov.u16 r0, q4[0]
+; CHECK-NEXT:    vcmp.i32 ne, q6, zr
+; CHECK-NEXT:    vmov.32 q6[0], r0
+; CHECK-NEXT:    vmov.u16 r0, q4[1]
+; CHECK-NEXT:    vand q3, q3, q5
+; CHECK-NEXT:    vmov.32 q6[1], r0
+; CHECK-NEXT:    vmov.u16 r0, q4[2]
+; CHECK-NEXT:    vmov.32 q6[2], r0
+; CHECK-NEXT:    vmov.u16 r0, q4[3]
+; CHECK-NEXT:    vmov.32 q6[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[0]
+; CHECK-NEXT:    vmov.32 q4[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[1]
+; CHECK-NEXT:    vmov.32 q4[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[2]
+; CHECK-NEXT:    vmov.32 q4[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[3]
+; CHECK-NEXT:    vmov.32 q4[3], r0
+; CHECK-NEXT:    vpsel q3, q3, q1
+; CHECK-NEXT:    vcmp.i32 ne, q6, zr
+; CHECK-NEXT:    vand q2, q4, q5
+; CHECK-NEXT:    vpsel q1, q2, q1
+; CHECK-NEXT:    vadd.i32 q1, q1, q3
+; CHECK-NEXT:    vadd.i32 q0, q1, q0
+; CHECK-NEXT:    vaddv.u32 r0, q0
+; CHECK-NEXT:    add sp, #16
+; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <16 x i8> %b, zeroinitializer
+  %xx = zext <16 x i8> %x to <16 x i32>
+  %s = select <16 x i1> %c, <16 x i32> %xx, <16 x i32> zeroinitializer
+  %z = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %s)
+  ret i32 %z
+}
+
+define arm_aapcs_vfpcc i32 @add_v16i8_v16i32_sext(<16 x i8> %x, <16 x i8> %b) {
+; CHECK-LABEL: add_v16i8_v16i32_sext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    vcmp.i8 eq, q1, zr
+; CHECK-NEXT:    vmov.i8 q3, #0x0
+; CHECK-NEXT:    vmov.i8 q4, #0xff
+; CHECK-NEXT:    vpsel q5, q4, q3
+; CHECK-NEXT:    vmov.u8 r0, q5[8]
+; CHECK-NEXT:    vmov.16 q1[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q5[9]
+; CHECK-NEXT:    vmov.16 q1[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q5[10]
+; CHECK-NEXT:    vmov.16 q1[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q5[11]
+; CHECK-NEXT:    vmov.16 q1[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q5[12]
+; CHECK-NEXT:    vmov.16 q1[4], r0
+; CHECK-NEXT:    vmov.u8 r0, q5[13]
+; CHECK-NEXT:    vmov.16 q1[5], r0
+; CHECK-NEXT:    vmov.u8 r0, q5[14]
+; CHECK-NEXT:    vmov.16 q1[6], r0
+; CHECK-NEXT:    vmov.u8 r0, q5[15]
+; CHECK-NEXT:    vmov.16 q1[7], r0
+; CHECK-NEXT:    vcmp.i16 ne, q1, zr
+; CHECK-NEXT:    vpsel q2, q4, q3
+; CHECK-NEXT:    vmov.u16 r0, q2[4]
+; CHECK-NEXT:    vmov.32 q1[0], r0
+; CHECK-NEXT:    vmov.u16 r0, q2[5]
+; CHECK-NEXT:    vmov.32 q1[1], r0
+; CHECK-NEXT:    vmov.u16 r0, q2[6]
+; CHECK-NEXT:    vmov.32 q1[2], r0
+; CHECK-NEXT:    vmov.u16 r0, q2[7]
+; CHECK-NEXT:    vmov.32 q1[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[12]
+; CHECK-NEXT:    vcmp.i32 ne, q1, zr
+; CHECK-NEXT:    vmov.32 q1[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[13]
+; CHECK-NEXT:    vmov.32 q1[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[14]
+; CHECK-NEXT:    vmov.32 q1[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[15]
+; CHECK-NEXT:    vmov.32 q1[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q5[0]
+; CHECK-NEXT:    vmov.16 q7[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q5[1]
+; CHECK-NEXT:    vmov.16 q7[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q5[2]
+; CHECK-NEXT:    vmov.16 q7[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q5[3]
+; CHECK-NEXT:    vmov.16 q7[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q5[4]
+; CHECK-NEXT:    vmov.16 q7[4], r0
+; CHECK-NEXT:    vmov.u8 r0, q5[5]
+; CHECK-NEXT:    vmov.16 q7[5], r0
+; CHECK-NEXT:    vmov.u8 r0, q5[6]
+; CHECK-NEXT:    vmovlb.s8 q1, q1
+; CHECK-NEXT:    vmov.16 q7[6], r0
+; CHECK-NEXT:    vmov.u8 r0, q5[7]
+; CHECK-NEXT:    vmovlb.s16 q6, q1
+; CHECK-NEXT:    vmov.i32 q1, #0x0
+; CHECK-NEXT:    vmov.16 q7[7], r0
+; CHECK-NEXT:    vpsel q6, q6, q1
+; CHECK-NEXT:    vcmp.i16 ne, q7, zr
+; CHECK-NEXT:    vpsel q3, q4, q3
+; CHECK-NEXT:    vmov.u16 r0, q3[4]
+; CHECK-NEXT:    vmov.32 q4[0], r0
+; CHECK-NEXT:    vmov.u16 r0, q3[5]
+; CHECK-NEXT:    vmov.32 q4[1], r0
+; CHECK-NEXT:    vmov.u16 r0, q3[6]
+; CHECK-NEXT:    vmov.32 q4[2], r0
+; CHECK-NEXT:    vmov.u16 r0, q3[7]
+; CHECK-NEXT:    vmov.32 q4[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[4]
+; CHECK-NEXT:    vcmp.i32 ne, q4, zr
+; CHECK-NEXT:    vmov.32 q4[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[5]
+; CHECK-NEXT:    vmov.32 q4[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[6]
+; CHECK-NEXT:    vmov.32 q4[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[7]
+; CHECK-NEXT:    vmov.32 q4[3], r0
+; CHECK-NEXT:    vmov.u16 r0, q2[0]
+; CHECK-NEXT:    vmov.32 q5[0], r0
+; CHECK-NEXT:    vmov.u16 r0, q2[1]
+; CHECK-NEXT:    vmov.32 q5[1], r0
+; CHECK-NEXT:    vmov.u16 r0, q2[2]
+; CHECK-NEXT:    vmov.32 q5[2], r0
+; CHECK-NEXT:    vmov.u16 r0, q2[3]
+; CHECK-NEXT:    vmov.32 q5[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[8]
+; CHECK-NEXT:    vmov.32 q2[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[9]
+; CHECK-NEXT:    vmov.32 q2[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[10]
+; CHECK-NEXT:    vmovlb.s8 q4, q4
+; CHECK-NEXT:    vmov.32 q2[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[11]
+; CHECK-NEXT:    vmovlb.s16 q4, q4
+; CHECK-NEXT:    vmov.32 q2[3], r0
+; CHECK-NEXT:    vmov.u16 r0, q3[0]
+; CHECK-NEXT:    vpsel q4, q4, q1
+; CHECK-NEXT:    vcmp.i32 ne, q5, zr
+; CHECK-NEXT:    vmov.32 q5[0], r0
+; CHECK-NEXT:    vmov.u16 r0, q3[1]
+; CHECK-NEXT:    vmov.32 q5[1], r0
+; CHECK-NEXT:    vmov.u16 r0, q3[2]
+; CHECK-NEXT:    vmov.32 q5[2], r0
+; CHECK-NEXT:    vmov.u16 r0, q3[3]
+; CHECK-NEXT:    vmov.32 q5[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[0]
+; CHECK-NEXT:    vmov.32 q3[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[1]
+; CHECK-NEXT:    vmov.32 q3[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[2]
+; CHECK-NEXT:    vmov.32 q3[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[3]
+; CHECK-NEXT:    vmovlb.s8 q2, q2
+; CHECK-NEXT:    vmov.32 q3[3], r0
+; CHECK-NEXT:    vmovlb.s16 q2, q2
+; CHECK-NEXT:    vmovlb.s8 q0, q3
+; CHECK-NEXT:    vpsel q2, q2, q1
+; CHECK-NEXT:    vcmp.i32 ne, q5, zr
+; CHECK-NEXT:    vmovlb.s16 q0, q0
+; CHECK-NEXT:    vadd.i32 q4, q4, q6
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vadd.i32 q0, q0, q2
+; CHECK-NEXT:    vadd.i32 q0, q0, q4
+; CHECK-NEXT:    vaddv.u32 r0, q0
+; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <16 x i8> %b, zeroinitializer
+  %xx = sext <16 x i8> %x to <16 x i32>
+  %s = select <16 x i1> %c, <16 x i32> %xx, <16 x i32> zeroinitializer
+  %z = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %s)
+  ret i32 %z
+}
+
+define arm_aapcs_vfpcc i32 @add_v4i8_v4i32_zext(<4 x i8> %x, <4 x i8> %b) {
+; CHECK-LABEL: add_v4i8_v4i32_zext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.i32 q2, #0xff
+; CHECK-NEXT:    vand q1, q1, q2
+; CHECK-NEXT:    vand q0, q0, q2
+; CHECK-NEXT:    vcmp.i32 eq, q1, zr
+; CHECK-NEXT:    vmov.i32 q1, #0x0
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vaddv.u32 r0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <4 x i8> %b, zeroinitializer
+  %xx = zext <4 x i8> %x to <4 x i32>
+  %s = select <4 x i1> %c, <4 x i32> %xx, <4 x i32> zeroinitializer
+  %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %s)
+  ret i32 %z
+}
+
+define arm_aapcs_vfpcc i32 @add_v4i8_v4i32_sext(<4 x i8> %x, <4 x i8> %b) {
+; CHECK-LABEL: add_v4i8_v4i32_sext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.i32 q2, #0xff
+; CHECK-NEXT:    vmovlb.s8 q0, q0
+; CHECK-NEXT:    vand q1, q1, q2
+; CHECK-NEXT:    vmovlb.s16 q0, q0
+; CHECK-NEXT:    vcmp.i32 eq, q1, zr
+; CHECK-NEXT:    vmov.i32 q1, #0x0
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vaddv.u32 r0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <4 x i8> %b, zeroinitializer
+  %xx = sext <4 x i8> %x to <4 x i32>
+  %s = select <4 x i1> %c, <4 x i32> %xx, <4 x i32> zeroinitializer
+  %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %s)
+  ret i32 %z
+}
+
+define arm_aapcs_vfpcc zeroext i16 @add_v16i8_v16i16_zext(<16 x i8> %x, <16 x i8> %b) {
+; CHECK-LABEL: add_v16i8_v16i16_zext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    vcmp.i8 eq, q1, zr
+; CHECK-NEXT:    vmov.i8 q1, #0x0
+; CHECK-NEXT:    vmov.i8 q2, #0xff
+; CHECK-NEXT:    vpsel q1, q2, q1
+; CHECK-NEXT:    vmov.u8 r0, q1[8]
+; CHECK-NEXT:    vmov.16 q2[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[9]
+; CHECK-NEXT:    vmov.16 q2[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[10]
+; CHECK-NEXT:    vmov.16 q2[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[11]
+; CHECK-NEXT:    vmov.16 q2[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[12]
+; CHECK-NEXT:    vmov.16 q2[4], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[13]
+; CHECK-NEXT:    vmov.16 q2[5], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[14]
+; CHECK-NEXT:    vmov.16 q2[6], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[15]
+; CHECK-NEXT:    vmov.16 q2[7], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[8]
+; CHECK-NEXT:    vcmp.i16 ne, q2, zr
+; CHECK-NEXT:    vmov.16 q2[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[9]
+; CHECK-NEXT:    vmov.16 q2[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[10]
+; CHECK-NEXT:    vmov.16 q2[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[11]
+; CHECK-NEXT:    vmov.16 q2[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[12]
+; CHECK-NEXT:    vmov.16 q2[4], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[13]
+; CHECK-NEXT:    vmov.16 q2[5], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[14]
+; CHECK-NEXT:    vmov.16 q2[6], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[15]
+; CHECK-NEXT:    vmov.16 q2[7], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[0]
+; CHECK-NEXT:    vmov.16 q4[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[1]
+; CHECK-NEXT:    vmov.16 q4[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[2]
+; CHECK-NEXT:    vmov.16 q4[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[3]
+; CHECK-NEXT:    vmov.16 q4[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[4]
+; CHECK-NEXT:    vmov.16 q4[4], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[5]
+; CHECK-NEXT:    vmov.16 q4[5], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[6]
+; CHECK-NEXT:    vmov.16 q4[6], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[7]
+; CHECK-NEXT:    vmov.16 q4[7], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[0]
+; CHECK-NEXT:    vmov.16 q1[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[1]
+; CHECK-NEXT:    vmov.16 q1[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[2]
+; CHECK-NEXT:    vmov.16 q1[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[3]
+; CHECK-NEXT:    vmov.16 q1[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[4]
+; CHECK-NEXT:    vmov.16 q1[4], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[5]
+; CHECK-NEXT:    vmov.16 q1[5], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[6]
+; CHECK-NEXT:    vmov.16 q1[6], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[7]
+; CHECK-NEXT:    vmovlb.u8 q3, q2
+; CHECK-NEXT:    vmov.i32 q2, #0x0
+; CHECK-NEXT:    vmov.16 q1[7], r0
+; CHECK-NEXT:    vpsel q3, q3, q2
+; CHECK-NEXT:    vcmp.i16 ne, q4, zr
+; CHECK-NEXT:    vmovlb.u8 q0, q1
+; CHECK-NEXT:    vpsel q0, q0, q2
+; CHECK-NEXT:    vadd.i16 q0, q0, q3
+; CHECK-NEXT:    vaddv.u16 r0, q0
+; CHECK-NEXT:    uxth r0, r0
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <16 x i8> %b, zeroinitializer
+  %xx = zext <16 x i8> %x to <16 x i16>
+  %s = select <16 x i1> %c, <16 x i16> %xx, <16 x i16> zeroinitializer
+  %z = call i16 @llvm.experimental.vector.reduce.add.v16i16(<16 x i16> %s)
+  ret i16 %z
+}
+
+define arm_aapcs_vfpcc signext i16 @add_v16i8_v16i16_sext(<16 x i8> %x, <16 x i8> %b) {
+; CHECK-LABEL: add_v16i8_v16i16_sext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    vcmp.i8 eq, q1, zr
+; CHECK-NEXT:    vmov.i8 q1, #0x0
+; CHECK-NEXT:    vmov.i8 q2, #0xff
+; CHECK-NEXT:    vpsel q1, q2, q1
+; CHECK-NEXT:    vmov.u8 r0, q1[8]
+; CHECK-NEXT:    vmov.16 q2[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[9]
+; CHECK-NEXT:    vmov.16 q2[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[10]
+; CHECK-NEXT:    vmov.16 q2[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[11]
+; CHECK-NEXT:    vmov.16 q2[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[12]
+; CHECK-NEXT:    vmov.16 q2[4], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[13]
+; CHECK-NEXT:    vmov.16 q2[5], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[14]
+; CHECK-NEXT:    vmov.16 q2[6], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[15]
+; CHECK-NEXT:    vmov.16 q2[7], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[8]
+; CHECK-NEXT:    vcmp.i16 ne, q2, zr
+; CHECK-NEXT:    vmov.16 q2[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[9]
+; CHECK-NEXT:    vmov.16 q2[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[10]
+; CHECK-NEXT:    vmov.16 q2[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[11]
+; CHECK-NEXT:    vmov.16 q2[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[12]
+; CHECK-NEXT:    vmov.16 q2[4], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[13]
+; CHECK-NEXT:    vmov.16 q2[5], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[14]
+; CHECK-NEXT:    vmov.16 q2[6], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[15]
+; CHECK-NEXT:    vmov.16 q2[7], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[0]
+; CHECK-NEXT:    vmov.16 q4[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[1]
+; CHECK-NEXT:    vmov.16 q4[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[2]
+; CHECK-NEXT:    vmov.16 q4[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[3]
+; CHECK-NEXT:    vmov.16 q4[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[4]
+; CHECK-NEXT:    vmov.16 q4[4], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[5]
+; CHECK-NEXT:    vmov.16 q4[5], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[6]
+; CHECK-NEXT:    vmov.16 q4[6], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[7]
+; CHECK-NEXT:    vmov.16 q4[7], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[0]
+; CHECK-NEXT:    vmov.16 q1[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[1]
+; CHECK-NEXT:    vmov.16 q1[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[2]
+; CHECK-NEXT:    vmov.16 q1[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[3]
+; CHECK-NEXT:    vmov.16 q1[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[4]
+; CHECK-NEXT:    vmov.16 q1[4], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[5]
+; CHECK-NEXT:    vmov.16 q1[5], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[6]
+; CHECK-NEXT:    vmov.16 q1[6], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[7]
+; CHECK-NEXT:    vmovlb.s8 q3, q2
+; CHECK-NEXT:    vmov.i32 q2, #0x0
+; CHECK-NEXT:    vmov.16 q1[7], r0
+; CHECK-NEXT:    vpsel q3, q3, q2
+; CHECK-NEXT:    vcmp.i16 ne, q4, zr
+; CHECK-NEXT:    vmovlb.s8 q0, q1
+; CHECK-NEXT:    vpsel q0, q0, q2
+; CHECK-NEXT:    vadd.i16 q0, q0, q3
+; CHECK-NEXT:    vaddv.u16 r0, q0
+; CHECK-NEXT:    sxth r0, r0
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <16 x i8> %b, zeroinitializer
+  %xx = sext <16 x i8> %x to <16 x i16>
+  %s = select <16 x i1> %c, <16 x i16> %xx, <16 x i16> zeroinitializer
+  %z = call i16 @llvm.experimental.vector.reduce.add.v16i16(<16 x i16> %s)
+  ret i16 %z
+}
+
+define arm_aapcs_vfpcc zeroext i16 @add_v8i8_v8i16_zext(<8 x i8> %x, <8 x i8> %b) {
+; CHECK-LABEL: add_v8i8_v8i16_zext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmovlb.u8 q1, q1
+; CHECK-NEXT:    vmovlb.u8 q0, q0
+; CHECK-NEXT:    vcmp.i16 eq, q1, zr
+; CHECK-NEXT:    vmov.i32 q1, #0x0
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vaddv.u16 r0, q0
+; CHECK-NEXT:    uxth r0, r0
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <8 x i8> %b, zeroinitializer
+  %xx = zext <8 x i8> %x to <8 x i16>
+  %s = select <8 x i1> %c, <8 x i16> %xx, <8 x i16> zeroinitializer
+  %z = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %s)
+  ret i16 %z
+}
+
+define arm_aapcs_vfpcc signext i16 @add_v8i8_v8i16_sext(<8 x i8> %x, <8 x i8> %b) {
+; CHECK-LABEL: add_v8i8_v8i16_sext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmovlb.u8 q1, q1
+; CHECK-NEXT:    vmovlb.s8 q0, q0
+; CHECK-NEXT:    vcmp.i16 eq, q1, zr
+; CHECK-NEXT:    vmov.i32 q1, #0x0
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vaddv.u16 r0, q0
+; CHECK-NEXT:    sxth r0, r0
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <8 x i8> %b, zeroinitializer
+  %xx = sext <8 x i8> %x to <8 x i16>
+  %s = select <8 x i1> %c, <8 x i16> %xx, <8 x i16> zeroinitializer
+  %z = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %s)
+  ret i16 %z
+}
+
+define arm_aapcs_vfpcc zeroext i8 @add_v16i8_v16i8(<16 x i8> %x, <16 x i8> %b) {
+; CHECK-LABEL: add_v16i8_v16i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vcmp.i8 eq, q1, zr
+; CHECK-NEXT:    vmov.i32 q1, #0x0
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vaddv.u8 r0, q0
+; CHECK-NEXT:    uxtb r0, r0
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <16 x i8> %b, zeroinitializer
+  %s = select <16 x i1> %c, <16 x i8> %x, <16 x i8> zeroinitializer
+  %z = call i8 @llvm.experimental.vector.reduce.add.v16i8(<16 x i8> %s)
+  ret i8 %z
+}
+
+define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_zext(<16 x i8> %x, <16 x i8> %b) {
+; CHECK-LABEL: add_v16i8_v16i64_zext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    vcmp.i8 eq, q1, zr
+; CHECK-NEXT:    vmov.i8 q2, #0x0
+; CHECK-NEXT:    vmov.i8 q3, #0xff
+; CHECK-NEXT:    vpsel q4, q3, q2
+; CHECK-NEXT:    vmov.u8 r0, q4[0]
+; CHECK-NEXT:    vmov.16 q1[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q4[1]
+; CHECK-NEXT:    vmov.16 q1[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q4[2]
+; CHECK-NEXT:    vmov.16 q1[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q4[3]
+; CHECK-NEXT:    vmov.16 q1[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q4[4]
+; CHECK-NEXT:    vmov.16 q1[4], r0
+; CHECK-NEXT:    vmov.u8 r0, q4[5]
+; CHECK-NEXT:    vmov.16 q1[5], r0
+; CHECK-NEXT:    vmov.u8 r0, q4[6]
+; CHECK-NEXT:    vmov.16 q1[6], r0
+; CHECK-NEXT:    vmov.u8 r0, q4[7]
+; CHECK-NEXT:    vmov.16 q1[7], r0
+; CHECK-NEXT:    vcmp.i16 ne, q1, zr
+; CHECK-NEXT:    vpsel q5, q3, q2
+; CHECK-NEXT:    vmov.u16 r0, q5[0]
+; CHECK-NEXT:    vmov.32 q1[0], r0
+; CHECK-NEXT:    vmov.u16 r0, q5[1]
+; CHECK-NEXT:    vmov.32 q1[1], r0
+; CHECK-NEXT:    vmov.u16 r0, q5[2]
+; CHECK-NEXT:    vmov.32 q1[2], r0
+; CHECK-NEXT:    vmov.u16 r0, q5[3]
+; CHECK-NEXT:    vmov.32 q1[3], r0
+; CHECK-NEXT:    vcmp.i32 ne, q1, zr
+; CHECK-NEXT:    vmov.i64 q1, #0xff
+; CHECK-NEXT:    vmrs r0, p0
+; CHECK-NEXT:    and r1, r0, #1
+; CHECK-NEXT:    rsbs r1, r1, #0
+; CHECK-NEXT:    vmov.32 q6[0], r1
+; CHECK-NEXT:    vmov.32 q6[1], r1
+; CHECK-NEXT:    ubfx r1, r0, #4, #1
+; CHECK-NEXT:    rsbs r1, r1, #0
+; CHECK-NEXT:    vmov.32 q6[2], r1
+; CHECK-NEXT:    vmov.32 q6[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[0]
+; CHECK-NEXT:    vmov.32 q7[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[1]
+; CHECK-NEXT:    vmov.32 q7[2], r1
+; CHECK-NEXT:    vand q7, q7, q1
+; CHECK-NEXT:    vand q6, q7, q6
+; CHECK-NEXT:    vmov r1, s27
+; CHECK-NEXT:    vmov r2, s25
+; CHECK-NEXT:    vmov r3, s24
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    vmov r2, s26
+; CHECK-NEXT:    add r2, r3
+; CHECK-NEXT:    ubfx r3, r0, #8, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    ubfx r0, r0, #12, #1
+; CHECK-NEXT:    vmov.32 q6[0], r3
+; CHECK-NEXT:    rsbs r0, r0, #0
+; CHECK-NEXT:    vmov.32 q6[1], r3
+; CHECK-NEXT:    vmov.32 q6[2], r0
+; CHECK-NEXT:    vmov.32 q6[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[2]
+; CHECK-NEXT:    vmov.32 q7[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[3]
+; CHECK-NEXT:    vmov.32 q7[2], r0
+; CHECK-NEXT:    vand q7, q7, q1
+; CHECK-NEXT:    vand q6, q7, q6
+; CHECK-NEXT:    vmov r3, s24
+; CHECK-NEXT:    vmov r0, s25
+; CHECK-NEXT:    adds r2, r2, r3
+; CHECK-NEXT:    vmov r3, s27
+; CHECK-NEXT:    adcs r1, r0
+; CHECK-NEXT:    vmov r0, s26
+; CHECK-NEXT:    adds.w r12, r2, r0
+; CHECK-NEXT:    vmov.u16 r2, q5[4]
+; CHECK-NEXT:    vmov.32 q6[0], r2
+; CHECK-NEXT:    vmov.u16 r2, q5[5]
+; CHECK-NEXT:    vmov.32 q6[1], r2
+; CHECK-NEXT:    vmov.u16 r2, q5[6]
+; CHECK-NEXT:    vmov.32 q6[2], r2
+; CHECK-NEXT:    vmov.u16 r2, q5[7]
+; CHECK-NEXT:    vmov.32 q6[3], r2
+; CHECK-NEXT:    adcs r1, r3
+; CHECK-NEXT:    vcmp.i32 ne, q6, zr
+; CHECK-NEXT:    vmrs r2, p0
+; CHECK-NEXT:    and r3, r2, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    vmov.32 q5[0], r3
+; CHECK-NEXT:    vmov.32 q5[1], r3
+; CHECK-NEXT:    ubfx r3, r2, #4, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    vmov.32 q5[2], r3
+; CHECK-NEXT:    vmov.32 q5[3], r3
+; CHECK-NEXT:    vmov.u8 r3, q0[4]
+; CHECK-NEXT:    vmov.32 q6[0], r3
+; CHECK-NEXT:    vmov.u8 r3, q0[5]
+; CHECK-NEXT:    vmov.32 q6[2], r3
+; CHECK-NEXT:    vand q6, q6, q1
+; CHECK-NEXT:    vand q5, q6, q5
+; CHECK-NEXT:    vmov r0, s20
+; CHECK-NEXT:    vmov r3, s21
+; CHECK-NEXT:    adds.w r12, r12, r0
+; CHECK-NEXT:    vmov r0, s22
+; CHECK-NEXT:    adcs r1, r3
+; CHECK-NEXT:    vmov r3, s23
+; CHECK-NEXT:    adds.w r0, r0, r12
+; CHECK-NEXT:    adcs r1, r3
+; CHECK-NEXT:    ubfx r3, r2, #8, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    ubfx r2, r2, #12, #1
+; CHECK-NEXT:    vmov.32 q5[0], r3
+; CHECK-NEXT:    rsbs r2, r2, #0
+; CHECK-NEXT:    vmov.32 q5[1], r3
+; CHECK-NEXT:    vmov.32 q5[2], r2
+; CHECK-NEXT:    vmov.32 q5[3], r2
+; CHECK-NEXT:    vmov.u8 r2, q0[6]
+; CHECK-NEXT:    vmov.32 q6[0], r2
+; CHECK-NEXT:    vmov.u8 r2, q0[7]
+; CHECK-NEXT:    vmov.32 q6[2], r2
+; CHECK-NEXT:    vand q6, q6, q1
+; CHECK-NEXT:    vand q5, q6, q5
+; CHECK-NEXT:    vmov r3, s20
+; CHECK-NEXT:    vmov r2, s21
+; CHECK-NEXT:    adds r0, r0, r3
+; CHECK-NEXT:    vmov r3, s22
+; CHECK-NEXT:    adcs r1, r2
+; CHECK-NEXT:    vmov r2, s23
+; CHECK-NEXT:    adds.w r12, r0, r3
+; CHECK-NEXT:    adcs r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q4[8]
+; CHECK-NEXT:    vmov.16 q5[0], r2
+; CHECK-NEXT:    vmov.u8 r2, q4[9]
+; CHECK-NEXT:    vmov.16 q5[1], r2
+; CHECK-NEXT:    vmov.u8 r2, q4[10]
+; CHECK-NEXT:    vmov.16 q5[2], r2
+; CHECK-NEXT:    vmov.u8 r2, q4[11]
+; CHECK-NEXT:    vmov.16 q5[3], r2
+; CHECK-NEXT:    vmov.u8 r2, q4[12]
+; CHECK-NEXT:    vmov.16 q5[4], r2
+; CHECK-NEXT:    vmov.u8 r2, q4[13]
+; CHECK-NEXT:    vmov.16 q5[5], r2
+; CHECK-NEXT:    vmov.u8 r2, q4[14]
+; CHECK-NEXT:    vmov.16 q5[6], r2
+; CHECK-NEXT:    vmov.u8 r2, q4[15]
+; CHECK-NEXT:    vmov.16 q5[7], r2
+; CHECK-NEXT:    vcmp.i16 ne, q5, zr
+; CHECK-NEXT:    vpsel q2, q3, q2
+; CHECK-NEXT:    vmov.u16 r2, q2[0]
+; CHECK-NEXT:    vmov.32 q3[0], r2
+; CHECK-NEXT:    vmov.u16 r2, q2[1]
+; CHECK-NEXT:    vmov.32 q3[1], r2
+; CHECK-NEXT:    vmov.u16 r2, q2[2]
+; CHECK-NEXT:    vmov.32 q3[2], r2
+; CHECK-NEXT:    vmov.u16 r2, q2[3]
+; CHECK-NEXT:    vmov.32 q3[3], r2
+; CHECK-NEXT:    vcmp.i32 ne, q3, zr
+; CHECK-NEXT:    vmrs r2, p0
+; CHECK-NEXT:    and r3, r2, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    vmov.32 q3[0], r3
+; CHECK-NEXT:    vmov.32 q3[1], r3
+; CHECK-NEXT:    ubfx r3, r2, #4, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    vmov.32 q3[2], r3
+; CHECK-NEXT:    vmov.32 q3[3], r3
+; CHECK-NEXT:    vmov.u8 r3, q0[8]
+; CHECK-NEXT:    vmov.32 q4[0], r3
+; CHECK-NEXT:    vmov.u8 r3, q0[9]
+; CHECK-NEXT:    vmov.32 q4[2], r3
+; CHECK-NEXT:    vand q4, q4, q1
+; CHECK-NEXT:    vand q3, q4, q3
+; CHECK-NEXT:    vmov r0, s12
+; CHECK-NEXT:    vmov r3, s13
+; CHECK-NEXT:    adds.w r12, r12, r0
+; CHECK-NEXT:    vmov r0, s14
+; CHECK-NEXT:    adcs r1, r3
+; CHECK-NEXT:    vmov r3, s15
+; CHECK-NEXT:    adds.w r0, r0, r12
+; CHECK-NEXT:    adcs r1, r3
+; CHECK-NEXT:    ubfx r3, r2, #8, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    ubfx r2, r2, #12, #1
+; CHECK-NEXT:    vmov.32 q3[0], r3
+; CHECK-NEXT:    rsbs r2, r2, #0
+; CHECK-NEXT:    vmov.32 q3[1], r3
+; CHECK-NEXT:    vmov.32 q3[2], r2
+; CHECK-NEXT:    vmov.32 q3[3], r2
+; CHECK-NEXT:    vmov.u8 r2, q0[10]
+; CHECK-NEXT:    vmov.32 q4[0], r2
+; CHECK-NEXT:    vmov.u8 r2, q0[11]
+; CHECK-NEXT:    vmov.32 q4[2], r2
+; CHECK-NEXT:    vand q4, q4, q1
+; CHECK-NEXT:    vand q3, q4, q3
+; CHECK-NEXT:    vmov r3, s12
+; CHECK-NEXT:    vmov r2, s13
+; CHECK-NEXT:    adds r0, r0, r3
+; CHECK-NEXT:    vmov r3, s14
+; CHECK-NEXT:    adcs r1, r2
+; CHECK-NEXT:    vmov r2, s15
+; CHECK-NEXT:    adds.w r12, r0, r3
+; CHECK-NEXT:    adcs r1, r2
+; CHECK-NEXT:    vmov.u16 r2, q2[4]
+; CHECK-NEXT:    vmov.32 q3[0], r2
+; CHECK-NEXT:    vmov.u16 r2, q2[5]
+; CHECK-NEXT:    vmov.32 q3[1], r2
+; CHECK-NEXT:    vmov.u16 r2, q2[6]
+; CHECK-NEXT:    vmov.32 q3[2], r2
+; CHECK-NEXT:    vmov.u16 r2, q2[7]
+; CHECK-NEXT:    vmov.32 q3[3], r2
+; CHECK-NEXT:    vcmp.i32 ne, q3, zr
+; CHECK-NEXT:    vmrs r2, p0
+; CHECK-NEXT:    and r3, r2, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    vmov.32 q2[0], r3
+; CHECK-NEXT:    vmov.32 q2[1], r3
+; CHECK-NEXT:    ubfx r3, r2, #4, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    vmov.32 q2[2], r3
+; CHECK-NEXT:    vmov.32 q2[3], r3
+; CHECK-NEXT:    vmov.u8 r3, q0[12]
+; CHECK-NEXT:    vmov.32 q3[0], r3
+; CHECK-NEXT:    vmov.u8 r3, q0[13]
+; CHECK-NEXT:    vmov.32 q3[2], r3
+; CHECK-NEXT:    vand q3, q3, q1
+; CHECK-NEXT:    vand q2, q3, q2
+; CHECK-NEXT:    vmov r0, s8
+; CHECK-NEXT:    vmov r3, s9
+; CHECK-NEXT:    adds.w r12, r12, r0
+; CHECK-NEXT:    vmov r0, s10
+; CHECK-NEXT:    adcs r1, r3
+; CHECK-NEXT:    vmov r3, s11
+; CHECK-NEXT:    adds.w r0, r0, r12
+; CHECK-NEXT:    adcs r1, r3
+; CHECK-NEXT:    ubfx r3, r2, #8, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    ubfx r2, r2, #12, #1
+; CHECK-NEXT:    vmov.32 q2[0], r3
+; CHECK-NEXT:    rsbs r2, r2, #0
+; CHECK-NEXT:    vmov.32 q2[1], r3
+; CHECK-NEXT:    vmov.32 q2[2], r2
+; CHECK-NEXT:    vmov.32 q2[3], r2
+; CHECK-NEXT:    vmov.u8 r2, q0[14]
+; CHECK-NEXT:    vmov.32 q3[0], r2
+; CHECK-NEXT:    vmov.u8 r2, q0[15]
+; CHECK-NEXT:    vmov.32 q3[2], r2
+; CHECK-NEXT:    vand q0, q3, q1
+; CHECK-NEXT:    vand q0, q0, q2
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov r2, s1
+; CHECK-NEXT:    adds r0, r0, r3
+; CHECK-NEXT:    vmov r3, s2
+; CHECK-NEXT:    adcs r1, r2
+; CHECK-NEXT:    vmov r2, s3
+; CHECK-NEXT:    adds r0, r0, r3
+; CHECK-NEXT:    adcs r1, r2
+; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <16 x i8> %b, zeroinitializer
+  %xx = zext <16 x i8> %x to <16 x i64>
+  %s = select <16 x i1> %c, <16 x i64> %xx, <16 x i64> zeroinitializer
+  %z = call i64 @llvm.experimental.vector.reduce.add.v16i64(<16 x i64> %s)
+  ret i64 %z
+}
+
+define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_sext(<16 x i8> %x, <16 x i8> %b) {
+; CHECK-LABEL: add_v16i8_v16i64_sext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13}
+; CHECK-NEXT:    vcmp.i8 eq, q1, zr
+; CHECK-NEXT:    vmov.i8 q1, #0x0
+; CHECK-NEXT:    vmov.i8 q2, #0xff
+; CHECK-NEXT:    vpsel q3, q2, q1
+; CHECK-NEXT:    vmov.u8 r0, q3[0]
+; CHECK-NEXT:    vmov.16 q4[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q3[1]
+; CHECK-NEXT:    vmov.16 q4[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q3[2]
+; CHECK-NEXT:    vmov.16 q4[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q3[3]
+; CHECK-NEXT:    vmov.16 q4[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q3[4]
+; CHECK-NEXT:    vmov.16 q4[4], r0
+; CHECK-NEXT:    vmov.u8 r0, q3[5]
+; CHECK-NEXT:    vmov.16 q4[5], r0
+; CHECK-NEXT:    vmov.u8 r0, q3[6]
+; CHECK-NEXT:    vmov.16 q4[6], r0
+; CHECK-NEXT:    vmov.u8 r0, q3[7]
+; CHECK-NEXT:    vmov.16 q4[7], r0
+; CHECK-NEXT:    vcmp.i16 ne, q4, zr
+; CHECK-NEXT:    vpsel q4, q2, q1
+; CHECK-NEXT:    vmov.u16 r0, q4[0]
+; CHECK-NEXT:    vmov.32 q5[0], r0
+; CHECK-NEXT:    vmov.u16 r0, q4[1]
+; CHECK-NEXT:    vmov.32 q5[1], r0
+; CHECK-NEXT:    vmov.u16 r0, q4[2]
+; CHECK-NEXT:    vmov.32 q5[2], r0
+; CHECK-NEXT:    vmov.u16 r0, q4[3]
+; CHECK-NEXT:    vmov.32 q5[3], r0
+; CHECK-NEXT:    vcmp.i32 ne, q5, zr
+; CHECK-NEXT:    vmrs r0, p0
+; CHECK-NEXT:    and r1, r0, #1
+; CHECK-NEXT:    rsbs r1, r1, #0
+; CHECK-NEXT:    vmov.32 q5[0], r1
+; CHECK-NEXT:    vmov.32 q5[1], r1
+; CHECK-NEXT:    ubfx r1, r0, #4, #1
+; CHECK-NEXT:    rsbs r1, r1, #0
+; CHECK-NEXT:    vmov.32 q5[2], r1
+; CHECK-NEXT:    vmov.32 q5[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[0]
+; CHECK-NEXT:    sxtb r1, r1
+; CHECK-NEXT:    vmov.32 q6[0], r1
+; CHECK-NEXT:    asrs r1, r1, #31
+; CHECK-NEXT:    vmov.32 q6[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[1]
+; CHECK-NEXT:    sxtb r1, r1
+; CHECK-NEXT:    vmov.32 q6[2], r1
+; CHECK-NEXT:    asrs r1, r1, #31
+; CHECK-NEXT:    vmov.32 q6[3], r1
+; CHECK-NEXT:    vand q5, q6, q5
+; CHECK-NEXT:    vmov r1, s22
+; CHECK-NEXT:    vmov r2, s20
+; CHECK-NEXT:    vmov r12, s23
+; CHECK-NEXT:    vmov r3, s21
+; CHECK-NEXT:    adds r1, r1, r2
+; CHECK-NEXT:    adc.w r2, r3, r12
+; CHECK-NEXT:    ubfx r3, r0, #8, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    ubfx r0, r0, #12, #1
+; CHECK-NEXT:    vmov.32 q5[0], r3
+; CHECK-NEXT:    rsbs r0, r0, #0
+; CHECK-NEXT:    vmov.32 q5[1], r3
+; CHECK-NEXT:    vmov.32 q5[2], r0
+; CHECK-NEXT:    vmov.32 q5[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[2]
+; CHECK-NEXT:    sxtb r0, r0
+; CHECK-NEXT:    vmov.32 q6[0], r0
+; CHECK-NEXT:    asrs r0, r0, #31
+; CHECK-NEXT:    vmov.32 q6[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[3]
+; CHECK-NEXT:    sxtb r0, r0
+; CHECK-NEXT:    vmov.32 q6[2], r0
+; CHECK-NEXT:    asrs r0, r0, #31
+; CHECK-NEXT:    vmov.32 q6[3], r0
+; CHECK-NEXT:    vand q5, q6, q5
+; CHECK-NEXT:    vmov r3, s20
+; CHECK-NEXT:    vmov r0, s21
+; CHECK-NEXT:    adds r1, r1, r3
+; CHECK-NEXT:    vmov r3, s23
+; CHECK-NEXT:    adcs r2, r0
+; CHECK-NEXT:    vmov r0, s22
+; CHECK-NEXT:    adds.w r12, r1, r0
+; CHECK-NEXT:    adc.w r1, r2, r3
+; CHECK-NEXT:    vmov.u16 r2, q4[4]
+; CHECK-NEXT:    vmov.32 q5[0], r2
+; CHECK-NEXT:    vmov.u16 r2, q4[5]
+; CHECK-NEXT:    vmov.32 q5[1], r2
+; CHECK-NEXT:    vmov.u16 r2, q4[6]
+; CHECK-NEXT:    vmov.32 q5[2], r2
+; CHECK-NEXT:    vmov.u16 r2, q4[7]
+; CHECK-NEXT:    vmov.32 q5[3], r2
+; CHECK-NEXT:    vcmp.i32 ne, q5, zr
+; CHECK-NEXT:    vmrs r2, p0
+; CHECK-NEXT:    and r3, r2, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    vmov.32 q4[0], r3
+; CHECK-NEXT:    vmov.32 q4[1], r3
+; CHECK-NEXT:    ubfx r3, r2, #4, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    vmov.32 q4[2], r3
+; CHECK-NEXT:    vmov.32 q4[3], r3
+; CHECK-NEXT:    vmov.u8 r3, q0[4]
+; CHECK-NEXT:    sxtb r3, r3
+; CHECK-NEXT:    vmov.32 q5[0], r3
+; CHECK-NEXT:    asrs r3, r3, #31
+; CHECK-NEXT:    vmov.32 q5[1], r3
+; CHECK-NEXT:    vmov.u8 r3, q0[5]
+; CHECK-NEXT:    sxtb r3, r3
+; CHECK-NEXT:    vmov.32 q5[2], r3
+; CHECK-NEXT:    asrs r3, r3, #31
+; CHECK-NEXT:    vmov.32 q5[3], r3
+; CHECK-NEXT:    vand q4, q5, q4
+; CHECK-NEXT:    vmov r0, s16
+; CHECK-NEXT:    vmov r3, s17
+; CHECK-NEXT:    adds.w r12, r12, r0
+; CHECK-NEXT:    vmov r0, s18
+; CHECK-NEXT:    adcs r1, r3
+; CHECK-NEXT:    vmov r3, s19
+; CHECK-NEXT:    adds.w r0, r0, r12
+; CHECK-NEXT:    adcs r1, r3
+; CHECK-NEXT:    ubfx r3, r2, #8, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    ubfx r2, r2, #12, #1
+; CHECK-NEXT:    vmov.32 q4[0], r3
+; CHECK-NEXT:    rsbs r2, r2, #0
+; CHECK-NEXT:    vmov.32 q4[1], r3
+; CHECK-NEXT:    vmov.32 q4[2], r2
+; CHECK-NEXT:    vmov.32 q4[3], r2
+; CHECK-NEXT:    vmov.u8 r2, q0[6]
+; CHECK-NEXT:    sxtb r2, r2
+; CHECK-NEXT:    vmov.32 q5[0], r2
+; CHECK-NEXT:    asrs r2, r2, #31
+; CHECK-NEXT:    vmov.32 q5[1], r2
+; CHECK-NEXT:    vmov.u8 r2, q0[7]
+; CHECK-NEXT:    sxtb r2, r2
+; CHECK-NEXT:    vmov.32 q5[2], r2
+; CHECK-NEXT:    asrs r2, r2, #31
+; CHECK-NEXT:    vmov.32 q5[3], r2
+; CHECK-NEXT:    vand q4, q5, q4
+; CHECK-NEXT:    vmov r3, s16
+; CHECK-NEXT:    vmov r2, s17
+; CHECK-NEXT:    adds r0, r0, r3
+; CHECK-NEXT:    vmov r3, s18
+; CHECK-NEXT:    adcs r1, r2
+; CHECK-NEXT:    vmov r2, s19
+; CHECK-NEXT:    adds.w r12, r0, r3
+; CHECK-NEXT:    adcs r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q3[8]
+; CHECK-NEXT:    vmov.16 q4[0], r2
+; CHECK-NEXT:    vmov.u8 r2, q3[9]
+; CHECK-NEXT:    vmov.16 q4[1], r2
+; CHECK-NEXT:    vmov.u8 r2, q3[10]
+; CHECK-NEXT:    vmov.16 q4[2], r2
+; CHECK-NEXT:    vmov.u8 r2, q3[11]
+; CHECK-NEXT:    vmov.16 q4[3], r2
+; CHECK-NEXT:    vmov.u8 r2, q3[12]
+; CHECK-NEXT:    vmov.16 q4[4], r2
+; CHECK-NEXT:    vmov.u8 r2, q3[13]
+; CHECK-NEXT:    vmov.16 q4[5], r2
+; CHECK-NEXT:    vmov.u8 r2, q3[14]
+; CHECK-NEXT:    vmov.16 q4[6], r2
+; CHECK-NEXT:    vmov.u8 r2, q3[15]
+; CHECK-NEXT:    vmov.16 q4[7], r2
+; CHECK-NEXT:    vcmp.i16 ne, q4, zr
+; CHECK-NEXT:    vpsel q1, q2, q1
+; CHECK-NEXT:    vmov.u16 r2, q1[0]
+; CHECK-NEXT:    vmov.32 q2[0], r2
+; CHECK-NEXT:    vmov.u16 r2, q1[1]
+; CHECK-NEXT:    vmov.32 q2[1], r2
+; CHECK-NEXT:    vmov.u16 r2, q1[2]
+; CHECK-NEXT:    vmov.32 q2[2], r2
+; CHECK-NEXT:    vmov.u16 r2, q1[3]
+; CHECK-NEXT:    vmov.32 q2[3], r2
+; CHECK-NEXT:    vcmp.i32 ne, q2, zr
+; CHECK-NEXT:    vmrs r2, p0
+; CHECK-NEXT:    and r3, r2, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    vmov.32 q2[0], r3
+; CHECK-NEXT:    vmov.32 q2[1], r3
+; CHECK-NEXT:    ubfx r3, r2, #4, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    vmov.32 q2[2], r3
+; CHECK-NEXT:    vmov.32 q2[3], r3
+; CHECK-NEXT:    vmov.u8 r3, q0[8]
+; CHECK-NEXT:    sxtb r3, r3
+; CHECK-NEXT:    vmov.32 q3[0], r3
+; CHECK-NEXT:    asrs r3, r3, #31
+; CHECK-NEXT:    vmov.32 q3[1], r3
+; CHECK-NEXT:    vmov.u8 r3, q0[9]
+; CHECK-NEXT:    sxtb r3, r3
+; CHECK-NEXT:    vmov.32 q3[2], r3
+; CHECK-NEXT:    asrs r3, r3, #31
+; CHECK-NEXT:    vmov.32 q3[3], r3
+; CHECK-NEXT:    vand q2, q3, q2
+; CHECK-NEXT:    vmov r0, s8
+; CHECK-NEXT:    vmov r3, s9
+; CHECK-NEXT:    adds.w r12, r12, r0
+; CHECK-NEXT:    vmov r0, s10
+; CHECK-NEXT:    adcs r1, r3
+; CHECK-NEXT:    vmov r3, s11
+; CHECK-NEXT:    adds.w r0, r0, r12
+; CHECK-NEXT:    adcs r1, r3
+; CHECK-NEXT:    ubfx r3, r2, #8, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    ubfx r2, r2, #12, #1
+; CHECK-NEXT:    vmov.32 q2[0], r3
+; CHECK-NEXT:    rsbs r2, r2, #0
+; CHECK-NEXT:    vmov.32 q2[1], r3
+; CHECK-NEXT:    vmov.32 q2[2], r2
+; CHECK-NEXT:    vmov.32 q2[3], r2
+; CHECK-NEXT:    vmov.u8 r2, q0[10]
+; CHECK-NEXT:    sxtb r2, r2
+; CHECK-NEXT:    vmov.32 q3[0], r2
+; CHECK-NEXT:    asrs r2, r2, #31
+; CHECK-NEXT:    vmov.32 q3[1], r2
+; CHECK-NEXT:    vmov.u8 r2, q0[11]
+; CHECK-NEXT:    sxtb r2, r2
+; CHECK-NEXT:    vmov.32 q3[2], r2
+; CHECK-NEXT:    asrs r2, r2, #31
+; CHECK-NEXT:    vmov.32 q3[3], r2
+; CHECK-NEXT:    vand q2, q3, q2
+; CHECK-NEXT:    vmov r3, s8
+; CHECK-NEXT:    vmov r2, s9
+; CHECK-NEXT:    adds r0, r0, r3
+; CHECK-NEXT:    vmov r3, s10
+; CHECK-NEXT:    adcs r1, r2
+; CHECK-NEXT:    vmov r2, s11
+; CHECK-NEXT:    adds.w r12, r0, r3
+; CHECK-NEXT:    adcs r1, r2
+; CHECK-NEXT:    vmov.u16 r2, q1[4]
+; CHECK-NEXT:    vmov.32 q2[0], r2
+; CHECK-NEXT:    vmov.u16 r2, q1[5]
+; CHECK-NEXT:    vmov.32 q2[1], r2
+; CHECK-NEXT:    vmov.u16 r2, q1[6]
+; CHECK-NEXT:    vmov.32 q2[2], r2
+; CHECK-NEXT:    vmov.u16 r2, q1[7]
+; CHECK-NEXT:    vmov.32 q2[3], r2
+; CHECK-NEXT:    vcmp.i32 ne, q2, zr
+; CHECK-NEXT:    vmrs r2, p0
+; CHECK-NEXT:    and r3, r2, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    vmov.32 q1[0], r3
+; CHECK-NEXT:    vmov.32 q1[1], r3
+; CHECK-NEXT:    ubfx r3, r2, #4, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    vmov.32 q1[2], r3
+; CHECK-NEXT:    vmov.32 q1[3], r3
+; CHECK-NEXT:    vmov.u8 r3, q0[12]
+; CHECK-NEXT:    sxtb r3, r3
+; CHECK-NEXT:    vmov.32 q2[0], r3
+; CHECK-NEXT:    asrs r3, r3, #31
+; CHECK-NEXT:    vmov.32 q2[1], r3
+; CHECK-NEXT:    vmov.u8 r3, q0[13]
+; CHECK-NEXT:    sxtb r3, r3
+; CHECK-NEXT:    vmov.32 q2[2], r3
+; CHECK-NEXT:    asrs r3, r3, #31
+; CHECK-NEXT:    vmov.32 q2[3], r3
+; CHECK-NEXT:    vand q1, q2, q1
+; CHECK-NEXT:    vmov r0, s4
+; CHECK-NEXT:    vmov r3, s5
+; CHECK-NEXT:    adds.w r12, r12, r0
+; CHECK-NEXT:    vmov r0, s6
+; CHECK-NEXT:    adcs r1, r3
+; CHECK-NEXT:    vmov r3, s7
+; CHECK-NEXT:    adds.w r0, r0, r12
+; CHECK-NEXT:    adcs r1, r3
+; CHECK-NEXT:    ubfx r3, r2, #8, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    ubfx r2, r2, #12, #1
+; CHECK-NEXT:    vmov.32 q1[0], r3
+; CHECK-NEXT:    rsbs r2, r2, #0
+; CHECK-NEXT:    vmov.32 q1[1], r3
+; CHECK-NEXT:    vmov.32 q1[2], r2
+; CHECK-NEXT:    vmov.32 q1[3], r2
+; CHECK-NEXT:    vmov.u8 r2, q0[14]
+; CHECK-NEXT:    sxtb r2, r2
+; CHECK-NEXT:    vmov.32 q2[0], r2
+; CHECK-NEXT:    asrs r2, r2, #31
+; CHECK-NEXT:    vmov.32 q2[1], r2
+; CHECK-NEXT:    vmov.u8 r2, q0[15]
+; CHECK-NEXT:    sxtb r2, r2
+; CHECK-NEXT:    vmov.32 q2[2], r2
+; CHECK-NEXT:    asrs r2, r2, #31
+; CHECK-NEXT:    vmov.32 q2[3], r2
+; CHECK-NEXT:    vand q0, q2, q1
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov r2, s1
+; CHECK-NEXT:    adds r0, r0, r3
+; CHECK-NEXT:    vmov r3, s2
+; CHECK-NEXT:    adcs r1, r2
+; CHECK-NEXT:    vmov r2, s3
+; CHECK-NEXT:    adds r0, r0, r3
+; CHECK-NEXT:    adcs r1, r2
+; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13}
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <16 x i8> %b, zeroinitializer
+  %xx = sext <16 x i8> %x to <16 x i64>
+  %s = select <16 x i1> %c, <16 x i64> %xx, <16 x i64> zeroinitializer
+  %z = call i64 @llvm.experimental.vector.reduce.add.v16i64(<16 x i64> %s)
+  ret i64 %z
+}
+
+define arm_aapcs_vfpcc i64 @add_v2i8_v2i64_zext(<2 x i8> %x, <2 x i8> %b) {
+; CHECK-LABEL: add_v2i8_v2i64_zext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.i64 q2, #0xff
+; CHECK-NEXT:    vand q1, q1, q2
+; CHECK-NEXT:    vand q0, q0, q2
+; CHECK-NEXT:    vmov r0, s4
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    cset r0, eq
+; CHECK-NEXT:    tst.w r0, #1
+; CHECK-NEXT:    csetm r0, ne
+; CHECK-NEXT:    vmov.32 q3[0], r0
+; CHECK-NEXT:    vmov.32 q3[1], r0
+; CHECK-NEXT:    vmov r0, s6
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    cset r0, eq
+; CHECK-NEXT:    tst.w r0, #1
+; CHECK-NEXT:    csetm r0, ne
+; CHECK-NEXT:    vmov.32 q3[2], r0
+; CHECK-NEXT:    vmov.32 q3[3], r0
+; CHECK-NEXT:    vand q0, q0, q3
+; CHECK-NEXT:    vmov r0, s2
+; CHECK-NEXT:    vmov r1, s0
+; CHECK-NEXT:    vmov r2, s1
+; CHECK-NEXT:    add r0, r1
+; CHECK-NEXT:    vmov r1, s3
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <2 x i8> %b, zeroinitializer
+  %xx = zext <2 x i8> %x to <2 x i64>
+  %s = select <2 x i1> %c, <2 x i64> %xx, <2 x i64> zeroinitializer
+  %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %s)
+  ret i64 %z
+}
+
+define arm_aapcs_vfpcc i64 @add_v2i8_v2i64_sext(<2 x i8> %x, <2 x i8> %b) {
+; CHECK-LABEL: add_v2i8_v2i64_sext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.i32 q2, #0xff
+; CHECK-NEXT:    vand q2, q1, q2
+; CHECK-NEXT:    vmov r0, s8
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    cset r0, eq
+; CHECK-NEXT:    tst.w r0, #1
+; CHECK-NEXT:    csetm r0, ne
+; CHECK-NEXT:    vmov.32 q1[0], r0
+; CHECK-NEXT:    vmov.32 q1[1], r0
+; CHECK-NEXT:    vmov r0, s10
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    cset r0, eq
+; CHECK-NEXT:    tst.w r0, #1
+; CHECK-NEXT:    csetm r0, ne
+; CHECK-NEXT:    vmov.32 q1[2], r0
+; CHECK-NEXT:    vmov.32 q1[3], r0
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    sxtb r0, r0
+; CHECK-NEXT:    vmov.32 q2[0], r0
+; CHECK-NEXT:    asrs r0, r0, #31
+; CHECK-NEXT:    vmov.32 q2[1], r0
+; CHECK-NEXT:    vmov r0, s2
+; CHECK-NEXT:    sxtb r0, r0
+; CHECK-NEXT:    vmov.32 q2[2], r0
+; CHECK-NEXT:    asrs r0, r0, #31
+; CHECK-NEXT:    vmov.32 q2[3], r0
+; CHECK-NEXT:    vand q0, q2, q1
+; CHECK-NEXT:    vmov r0, s2
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov r1, s3
+; CHECK-NEXT:    vmov r2, s1
+; CHECK-NEXT:    adds r0, r0, r3
+; CHECK-NEXT:    adcs r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <2 x i8> %b, zeroinitializer
+  %xx = sext <2 x i8> %x to <2 x i64>
+  %s = select <2 x i1> %c, <2 x i64> %xx, <2 x i64> zeroinitializer
+  %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %s)
+  ret i64 %z
+}
+
+define arm_aapcs_vfpcc i64 @add_v2i64_v2i64(<2 x i64> %x, <2 x i64> %b) {
+; CHECK-LABEL: add_v2i64_v2i64:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov r0, s5
+; CHECK-NEXT:    vmov r1, s4
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov r1, s6
+; CHECK-NEXT:    cset r0, eq
+; CHECK-NEXT:    tst.w r0, #1
+; CHECK-NEXT:    csetm r0, ne
+; CHECK-NEXT:    vmov.32 q2[0], r0
+; CHECK-NEXT:    vmov.32 q2[1], r0
+; CHECK-NEXT:    vmov r0, s7
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    cset r0, eq
+; CHECK-NEXT:    tst.w r0, #1
+; CHECK-NEXT:    csetm r0, ne
+; CHECK-NEXT:    vmov.32 q2[2], r0
+; CHECK-NEXT:    vmov.32 q2[3], r0
+; CHECK-NEXT:    vand q0, q0, q2
+; CHECK-NEXT:    vmov r0, s2
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov r1, s3
+; CHECK-NEXT:    vmov r2, s1
+; CHECK-NEXT:    adds r0, r0, r3
+; CHECK-NEXT:    adcs r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <2 x i64> %b, zeroinitializer
+  %s = select <2 x i1> %c, <2 x i64> %x, <2 x i64> zeroinitializer
+  %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %s)
+  ret i64 %z
+}
+
+define arm_aapcs_vfpcc i32 @add_v4i32_v4i32_acc(<4 x i32> %x, <4 x i32> %b, i32 %a) {
+; CHECK-LABEL: add_v4i32_v4i32_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vcmp.i32 eq, q1, zr
+; CHECK-NEXT:    vmov.i32 q1, #0x0
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vaddva.u32 r0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <4 x i32> %b, zeroinitializer
+  %s = select <4 x i1> %c, <4 x i32> %x, <4 x i32> zeroinitializer
+  %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %s)
+  %r = add i32 %z, %a
+  ret i32 %r
+}
+
+define arm_aapcs_vfpcc i64 @add_v4i32_v4i64_acc_zext(<4 x i32> %x, <4 x i32> %b, i64 %a) {
+; CHECK-LABEL: add_v4i32_v4i64_acc_zext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r4, r5, r7, lr}
+; CHECK-NEXT:    push {r4, r5, r7, lr}
+; CHECK-NEXT:    vcmp.i32 eq, q1, zr
+; CHECK-NEXT:    vmov.f32 s12, s0
+; CHECK-NEXT:    vmrs r2, p0
+; CHECK-NEXT:    vmov.f32 s14, s1
+; CHECK-NEXT:    vmov.i64 q2, #0xffffffff
+; CHECK-NEXT:    vand q3, q3, q2
+; CHECK-NEXT:    and r3, r2, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    vmov.32 q1[0], r3
+; CHECK-NEXT:    vmov.32 q1[1], r3
+; CHECK-NEXT:    ubfx r3, r2, #4, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    vmov.32 q1[2], r3
+; CHECK-NEXT:    vmov.32 q1[3], r3
+; CHECK-NEXT:    vand q1, q3, q1
+; CHECK-NEXT:    vmov r3, s6
+; CHECK-NEXT:    vmov r4, s4
+; CHECK-NEXT:    vmov r12, s7
+; CHECK-NEXT:    vmov lr, s5
+; CHECK-NEXT:    vmov.f32 s4, s2
+; CHECK-NEXT:    vmov.f32 s6, s3
+; CHECK-NEXT:    vand q0, q1, q2
+; CHECK-NEXT:    adds r5, r4, r3
+; CHECK-NEXT:    ubfx r3, r2, #8, #1
+; CHECK-NEXT:    rsb.w r3, r3, #0
+; CHECK-NEXT:    ubfx r2, r2, #12, #1
+; CHECK-NEXT:    vmov.32 q1[0], r3
+; CHECK-NEXT:    rsb.w r2, r2, #0
+; CHECK-NEXT:    vmov.32 q1[1], r3
+; CHECK-NEXT:    adc.w r4, lr, r12
+; CHECK-NEXT:    vmov.32 q1[2], r2
+; CHECK-NEXT:    vmov.32 q1[3], r2
+; CHECK-NEXT:    vand q0, q0, q1
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov r2, s1
+; CHECK-NEXT:    adds r3, r3, r5
+; CHECK-NEXT:    vmov r5, s3
+; CHECK-NEXT:    adcs r2, r4
+; CHECK-NEXT:    vmov r4, s2
+; CHECK-NEXT:    adds r3, r3, r4
+; CHECK-NEXT:    adcs r2, r5
+; CHECK-NEXT:    adds r0, r0, r3
+; CHECK-NEXT:    adcs r1, r2
+; CHECK-NEXT:    pop {r4, r5, r7, pc}
+entry:
+  %c = icmp eq <4 x i32> %b, zeroinitializer
+  %xx = zext <4 x i32> %x to <4 x i64>
+  %s = select <4 x i1> %c, <4 x i64> %xx, <4 x i64> zeroinitializer
+  %z = call i64 @llvm.experimental.vector.reduce.add.v4i64(<4 x i64> %s)
+  %r = add i64 %z, %a
+  ret i64 %r
+}
+
+define arm_aapcs_vfpcc i64 @add_v4i32_v4i64_acc_sext(<4 x i32> %x, <4 x i32> %b, i64 %a) {
+; CHECK-LABEL: add_v4i32_v4i64_acc_sext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r4, r5, r7, lr}
+; CHECK-NEXT:    push {r4, r5, r7, lr}
+; CHECK-NEXT:    vmov.f32 s12, s0
+; CHECK-NEXT:    vcmp.i32 eq, q1, zr
+; CHECK-NEXT:    vmov.f32 s14, s1
+; CHECK-NEXT:    vmov r2, s12
+; CHECK-NEXT:    vmov.32 q2[0], r2
+; CHECK-NEXT:    asrs r2, r2, #31
+; CHECK-NEXT:    vmov.32 q2[1], r2
+; CHECK-NEXT:    vmov r2, s14
+; CHECK-NEXT:    vmov.32 q2[2], r2
+; CHECK-NEXT:    asrs r2, r2, #31
+; CHECK-NEXT:    vmov.32 q2[3], r2
+; CHECK-NEXT:    vmrs r2, p0
+; CHECK-NEXT:    and r3, r2, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    vmov.32 q1[0], r3
+; CHECK-NEXT:    vmov.32 q1[1], r3
+; CHECK-NEXT:    ubfx r3, r2, #4, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    vmov.32 q1[2], r3
+; CHECK-NEXT:    vmov.32 q1[3], r3
+; CHECK-NEXT:    vand q1, q2, q1
+; CHECK-NEXT:    vmov r3, s6
+; CHECK-NEXT:    vmov r4, s4
+; CHECK-NEXT:    vmov r12, s7
+; CHECK-NEXT:    vmov r5, s5
+; CHECK-NEXT:    vmov.f32 s4, s2
+; CHECK-NEXT:    vmov.f32 s6, s3
+; CHECK-NEXT:    adds.w lr, r4, r3
+; CHECK-NEXT:    vmov r3, s4
+; CHECK-NEXT:    vmov.32 q0[0], r3
+; CHECK-NEXT:    adc.w r12, r12, r5
+; CHECK-NEXT:    asrs r3, r3, #31
+; CHECK-NEXT:    vmov.32 q0[1], r3
+; CHECK-NEXT:    vmov r3, s6
+; CHECK-NEXT:    vmov.32 q0[2], r3
+; CHECK-NEXT:    asrs r3, r3, #31
+; CHECK-NEXT:    vmov.32 q0[3], r3
+; CHECK-NEXT:    ubfx r3, r2, #8, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    ubfx r2, r2, #12, #1
+; CHECK-NEXT:    vmov.32 q1[0], r3
+; CHECK-NEXT:    rsbs r2, r2, #0
+; CHECK-NEXT:    vmov.32 q1[1], r3
+; CHECK-NEXT:    vmov.32 q1[2], r2
+; CHECK-NEXT:    vmov.32 q1[3], r2
+; CHECK-NEXT:    vand q0, q0, q1
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov r2, s1
+; CHECK-NEXT:    vmov r4, s2
+; CHECK-NEXT:    vmov r5, s3
+; CHECK-NEXT:    adds.w r3, r3, lr
+; CHECK-NEXT:    adc.w r2, r2, r12
+; CHECK-NEXT:    adds r3, r3, r4
+; CHECK-NEXT:    adcs r2, r5
+; CHECK-NEXT:    adds r0, r0, r3
+; CHECK-NEXT:    adcs r1, r2
+; CHECK-NEXT:    pop {r4, r5, r7, pc}
+entry:
+  %c = icmp eq <4 x i32> %b, zeroinitializer
+  %xx = sext <4 x i32> %x to <4 x i64>
+  %s = select <4 x i1> %c, <4 x i64> %xx, <4 x i64> zeroinitializer
+  %z = call i64 @llvm.experimental.vector.reduce.add.v4i64(<4 x i64> %s)
+  %r = add i64 %z, %a
+  ret i64 %r
+}
+
+define arm_aapcs_vfpcc i64 @add_v2i32_v2i64_acc_zext(<2 x i32> %x, <2 x i32> %b, i64 %a) {
+; CHECK-LABEL: add_v2i32_v2i64_acc_zext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    vmov r2, s4
+; CHECK-NEXT:    vmov.i64 q2, #0xffffffff
+; CHECK-NEXT:    vand q0, q0, q2
+; CHECK-NEXT:    cmp r2, #0
+; CHECK-NEXT:    cset r2, eq
+; CHECK-NEXT:    tst.w r2, #1
+; CHECK-NEXT:    csetm r2, ne
+; CHECK-NEXT:    vmov.32 q2[0], r2
+; CHECK-NEXT:    vmov.32 q2[1], r2
+; CHECK-NEXT:    vmov r2, s6
+; CHECK-NEXT:    cmp r2, #0
+; CHECK-NEXT:    cset r2, eq
+; CHECK-NEXT:    tst.w r2, #1
+; CHECK-NEXT:    csetm r2, ne
+; CHECK-NEXT:    vmov.32 q2[2], r2
+; CHECK-NEXT:    vmov.32 q2[3], r2
+; CHECK-NEXT:    vand q0, q0, q2
+; CHECK-NEXT:    vmov r2, s2
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov r12, s3
+; CHECK-NEXT:    vmov lr, s1
+; CHECK-NEXT:    adds r2, r2, r3
+; CHECK-NEXT:    adc.w r3, lr, r12
+; CHECK-NEXT:    adds r0, r0, r2
+; CHECK-NEXT:    adcs r1, r3
+; CHECK-NEXT:    pop {r7, pc}
+entry:
+  %c = icmp eq <2 x i32> %b, zeroinitializer
+  %xx = zext <2 x i32> %x to <2 x i64>
+  %s = select <2 x i1> %c, <2 x i64> %xx, <2 x i64> zeroinitializer
+  %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %s)
+  %r = add i64 %z, %a
+  ret i64 %r
+}
+
+define arm_aapcs_vfpcc i64 @add_v2i32_v2i64_acc_sext(<2 x i32> %x, <2 x i32> %b, i64 %a) {
+; CHECK-LABEL: add_v2i32_v2i64_acc_sext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    vmov.32 q2[0], r2
+; CHECK-NEXT:    asrs r2, r2, #31
+; CHECK-NEXT:    vmov.32 q2[1], r2
+; CHECK-NEXT:    vmov r2, s2
+; CHECK-NEXT:    vmov.32 q2[2], r2
+; CHECK-NEXT:    asrs r2, r2, #31
+; CHECK-NEXT:    vmov.32 q2[3], r2
+; CHECK-NEXT:    vmov r2, s4
+; CHECK-NEXT:    cmp r2, #0
+; CHECK-NEXT:    cset r2, eq
+; CHECK-NEXT:    tst.w r2, #1
+; CHECK-NEXT:    csetm r2, ne
+; CHECK-NEXT:    vmov.32 q0[0], r2
+; CHECK-NEXT:    vmov.32 q0[1], r2
+; CHECK-NEXT:    vmov r2, s6
+; CHECK-NEXT:    cmp r2, #0
+; CHECK-NEXT:    cset r2, eq
+; CHECK-NEXT:    tst.w r2, #1
+; CHECK-NEXT:    csetm r2, ne
+; CHECK-NEXT:    vmov.32 q0[2], r2
+; CHECK-NEXT:    vmov.32 q0[3], r2
+; CHECK-NEXT:    vand q0, q2, q0
+; CHECK-NEXT:    vmov r2, s2
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov r12, s3
+; CHECK-NEXT:    vmov lr, s1
+; CHECK-NEXT:    adds r2, r2, r3
+; CHECK-NEXT:    adc.w r3, lr, r12
+; CHECK-NEXT:    adds r0, r0, r2
+; CHECK-NEXT:    adcs r1, r3
+; CHECK-NEXT:    pop {r7, pc}
+entry:
+  %c = icmp eq <2 x i32> %b, zeroinitializer
+  %xx = sext <2 x i32> %x to <2 x i64>
+  %s = select <2 x i1> %c, <2 x i64> %xx, <2 x i64> zeroinitializer
+  %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %s)
+  %r = add i64 %z, %a
+  ret i64 %r
+}
+
+define arm_aapcs_vfpcc i32 @add_v8i16_v8i32_acc_zext(<8 x i16> %x, <8 x i16> %b, i32 %a) {
+; CHECK-LABEL: add_v8i16_v8i32_acc_zext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    vmov.i8 q2, #0x0
+; CHECK-NEXT:    vmov.i8 q3, #0xff
+; CHECK-NEXT:    vcmp.i16 eq, q1, zr
+; CHECK-NEXT:    vpsel q1, q3, q2
+; CHECK-NEXT:    vmov.u16 r1, q1[4]
+; CHECK-NEXT:    vmov.32 q2[0], r1
+; CHECK-NEXT:    vmov.u16 r1, q1[5]
+; CHECK-NEXT:    vmov.32 q2[1], r1
+; CHECK-NEXT:    vmov.u16 r1, q1[6]
+; CHECK-NEXT:    vmov.32 q2[2], r1
+; CHECK-NEXT:    vmov.u16 r1, q1[7]
+; CHECK-NEXT:    vmov.32 q2[3], r1
+; CHECK-NEXT:    vmov.u16 r1, q0[4]
+; CHECK-NEXT:    vcmp.i32 ne, q2, zr
+; CHECK-NEXT:    vmov.32 q2[0], r1
+; CHECK-NEXT:    vmov.u16 r1, q0[5]
+; CHECK-NEXT:    vmov.32 q2[1], r1
+; CHECK-NEXT:    vmov.u16 r1, q0[6]
+; CHECK-NEXT:    vmov.32 q2[2], r1
+; CHECK-NEXT:    vmov.u16 r1, q0[7]
+; CHECK-NEXT:    vmov.32 q2[3], r1
+; CHECK-NEXT:    vmov.u16 r1, q1[0]
+; CHECK-NEXT:    vmov.32 q4[0], r1
+; CHECK-NEXT:    vmov.u16 r1, q1[1]
+; CHECK-NEXT:    vmov.32 q4[1], r1
+; CHECK-NEXT:    vmov.u16 r1, q1[2]
+; CHECK-NEXT:    vmov.32 q4[2], r1
+; CHECK-NEXT:    vmov.u16 r1, q1[3]
+; CHECK-NEXT:    vmov.32 q4[3], r1
+; CHECK-NEXT:    vmov.u16 r1, q0[0]
+; CHECK-NEXT:    vmov.32 q1[0], r1
+; CHECK-NEXT:    vmov.u16 r1, q0[1]
+; CHECK-NEXT:    vmov.32 q1[1], r1
+; CHECK-NEXT:    vmov.u16 r1, q0[2]
+; CHECK-NEXT:    vmov.32 q1[2], r1
+; CHECK-NEXT:    vmov.u16 r1, q0[3]
+; CHECK-NEXT:    vmovlb.u16 q3, q2
+; CHECK-NEXT:    vmov.i32 q2, #0x0
+; CHECK-NEXT:    vmov.32 q1[3], r1
+; CHECK-NEXT:    vpsel q3, q3, q2
+; CHECK-NEXT:    vcmp.i32 ne, q4, zr
+; CHECK-NEXT:    vmovlb.u16 q0, q1
+; CHECK-NEXT:    vpsel q0, q0, q2
+; CHECK-NEXT:    vadd.i32 q0, q0, q3
+; CHECK-NEXT:    vaddva.u32 r0, q0
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <8 x i16> %b, zeroinitializer
+  %xx = zext <8 x i16> %x to <8 x i32>
+  %s = select <8 x i1> %c, <8 x i32> %xx, <8 x i32> zeroinitializer
+  %z = call i32 @llvm.experimental.vector.reduce.add.v8i32(<8 x i32> %s)
+  %r = add i32 %z, %a
+  ret i32 %r
+}
+
+define arm_aapcs_vfpcc i32 @add_v8i16_v8i32_acc_sext(<8 x i16> %x, <8 x i16> %b, i32 %a) {
+; CHECK-LABEL: add_v8i16_v8i32_acc_sext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    vmov.i8 q2, #0x0
+; CHECK-NEXT:    vmov.i8 q3, #0xff
+; CHECK-NEXT:    vcmp.i16 eq, q1, zr
+; CHECK-NEXT:    vpsel q1, q3, q2
+; CHECK-NEXT:    vmov.u16 r1, q1[4]
+; CHECK-NEXT:    vmov.32 q2[0], r1
+; CHECK-NEXT:    vmov.u16 r1, q1[5]
+; CHECK-NEXT:    vmov.32 q2[1], r1
+; CHECK-NEXT:    vmov.u16 r1, q1[6]
+; CHECK-NEXT:    vmov.32 q2[2], r1
+; CHECK-NEXT:    vmov.u16 r1, q1[7]
+; CHECK-NEXT:    vmov.32 q2[3], r1
+; CHECK-NEXT:    vmov.u16 r1, q0[4]
+; CHECK-NEXT:    vcmp.i32 ne, q2, zr
+; CHECK-NEXT:    vmov.32 q2[0], r1
+; CHECK-NEXT:    vmov.u16 r1, q0[5]
+; CHECK-NEXT:    vmov.32 q2[1], r1
+; CHECK-NEXT:    vmov.u16 r1, q0[6]
+; CHECK-NEXT:    vmov.32 q2[2], r1
+; CHECK-NEXT:    vmov.u16 r1, q0[7]
+; CHECK-NEXT:    vmov.32 q2[3], r1
+; CHECK-NEXT:    vmov.u16 r1, q1[0]
+; CHECK-NEXT:    vmov.32 q4[0], r1
+; CHECK-NEXT:    vmov.u16 r1, q1[1]
+; CHECK-NEXT:    vmov.32 q4[1], r1
+; CHECK-NEXT:    vmov.u16 r1, q1[2]
+; CHECK-NEXT:    vmov.32 q4[2], r1
+; CHECK-NEXT:    vmov.u16 r1, q1[3]
+; CHECK-NEXT:    vmov.32 q4[3], r1
+; CHECK-NEXT:    vmov.u16 r1, q0[0]
+; CHECK-NEXT:    vmov.32 q1[0], r1
+; CHECK-NEXT:    vmov.u16 r1, q0[1]
+; CHECK-NEXT:    vmov.32 q1[1], r1
+; CHECK-NEXT:    vmov.u16 r1, q0[2]
+; CHECK-NEXT:    vmov.32 q1[2], r1
+; CHECK-NEXT:    vmov.u16 r1, q0[3]
+; CHECK-NEXT:    vmovlb.s16 q3, q2
+; CHECK-NEXT:    vmov.i32 q2, #0x0
+; CHECK-NEXT:    vmov.32 q1[3], r1
+; CHECK-NEXT:    vpsel q3, q3, q2
+; CHECK-NEXT:    vcmp.i32 ne, q4, zr
+; CHECK-NEXT:    vmovlb.s16 q0, q1
+; CHECK-NEXT:    vpsel q0, q0, q2
+; CHECK-NEXT:    vadd.i32 q0, q0, q3
+; CHECK-NEXT:    vaddva.u32 r0, q0
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <8 x i16> %b, zeroinitializer
+  %xx = sext <8 x i16> %x to <8 x i32>
+  %s = select <8 x i1> %c, <8 x i32> %xx, <8 x i32> zeroinitializer
+  %z = call i32 @llvm.experimental.vector.reduce.add.v8i32(<8 x i32> %s)
+  %r = add i32 %z, %a
+  ret i32 %r
+}
+
+define arm_aapcs_vfpcc i32 @add_v4i16_v4i32_acc_zext(<4 x i16> %x, <4 x i16> %b, i32 %a) {
+; CHECK-LABEL: add_v4i16_v4i32_acc_zext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmovlb.u16 q1, q1
+; CHECK-NEXT:    vmovlb.u16 q0, q0
+; CHECK-NEXT:    vcmp.i32 eq, q1, zr
+; CHECK-NEXT:    vmov.i32 q1, #0x0
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vaddva.u32 r0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <4 x i16> %b, zeroinitializer
+  %xx = zext <4 x i16> %x to <4 x i32>
+  %s = select <4 x i1> %c, <4 x i32> %xx, <4 x i32> zeroinitializer
+  %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %s)
+  %r = add i32 %z, %a
+  ret i32 %r
+}
+
+define arm_aapcs_vfpcc i32 @add_v4i16_v4i32_acc_sext(<4 x i16> %x, <4 x i16> %b, i32 %a) {
+; CHECK-LABEL: add_v4i16_v4i32_acc_sext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmovlb.u16 q1, q1
+; CHECK-NEXT:    vmovlb.s16 q0, q0
+; CHECK-NEXT:    vcmp.i32 eq, q1, zr
+; CHECK-NEXT:    vmov.i32 q1, #0x0
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vaddva.u32 r0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <4 x i16> %b, zeroinitializer
+  %xx = sext <4 x i16> %x to <4 x i32>
+  %s = select <4 x i1> %c, <4 x i32> %xx, <4 x i32> zeroinitializer
+  %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %s)
+  %r = add i32 %z, %a
+  ret i32 %r
+}
+
+define arm_aapcs_vfpcc zeroext i16 @add_v8i16_v8i16_acc(<8 x i16> %x, <8 x i16> %b, i16 %a) {
+; CHECK-LABEL: add_v8i16_v8i16_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vcmp.i16 eq, q1, zr
+; CHECK-NEXT:    vmov.i32 q1, #0x0
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vaddva.u16 r0, q0
+; CHECK-NEXT:    uxth r0, r0
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <8 x i16> %b, zeroinitializer
+  %s = select <8 x i1> %c, <8 x i16> %x, <8 x i16> zeroinitializer
+  %z = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %s)
+  %r = add i16 %z, %a
+  ret i16 %r
+}
+
+define arm_aapcs_vfpcc i64 @add_v8i16_v8i64_acc_zext(<8 x i16> %x, <8 x i16> %b, i64 %a) {
+; CHECK-LABEL: add_v8i16_v8i64_acc_zext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r4, lr}
+; CHECK-NEXT:    push {r4, lr}
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    vmov.i8 q2, #0x0
+; CHECK-NEXT:    vmov.i8 q3, #0xff
+; CHECK-NEXT:    vcmp.i16 eq, q1, zr
+; CHECK-NEXT:    vpsel q2, q3, q2
+; CHECK-NEXT:    vmov.u16 r2, q2[0]
+; CHECK-NEXT:    vmov.32 q1[0], r2
+; CHECK-NEXT:    vmov.u16 r2, q2[1]
+; CHECK-NEXT:    vmov.32 q1[1], r2
+; CHECK-NEXT:    vmov.u16 r2, q2[2]
+; CHECK-NEXT:    vmov.32 q1[2], r2
+; CHECK-NEXT:    vmov.u16 r2, q2[3]
+; CHECK-NEXT:    vmov.32 q1[3], r2
+; CHECK-NEXT:    vcmp.i32 ne, q1, zr
+; CHECK-NEXT:    vmov.i64 q1, #0xffff
+; CHECK-NEXT:    vmrs r2, p0
+; CHECK-NEXT:    and r3, r2, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    vmov.32 q3[0], r3
+; CHECK-NEXT:    vmov.32 q3[1], r3
+; CHECK-NEXT:    ubfx r3, r2, #4, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    vmov.32 q3[2], r3
+; CHECK-NEXT:    vmov.32 q3[3], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[0]
+; CHECK-NEXT:    vmov.32 q4[0], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[1]
+; CHECK-NEXT:    vmov.32 q4[2], r3
+; CHECK-NEXT:    vand q4, q4, q1
+; CHECK-NEXT:    vand q3, q4, q3
+; CHECK-NEXT:    vmov r12, s15
+; CHECK-NEXT:    vmov r3, s13
+; CHECK-NEXT:    vmov lr, s14
+; CHECK-NEXT:    orr.w r12, r12, r3
+; CHECK-NEXT:    vmov r3, s12
+; CHECK-NEXT:    add lr, r3
+; CHECK-NEXT:    ubfx r3, r2, #8, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    ubfx r2, r2, #12, #1
+; CHECK-NEXT:    vmov.32 q3[0], r3
+; CHECK-NEXT:    rsbs r2, r2, #0
+; CHECK-NEXT:    vmov.32 q3[1], r3
+; CHECK-NEXT:    vmov.32 q3[2], r2
+; CHECK-NEXT:    vmov.32 q3[3], r2
+; CHECK-NEXT:    vmov.u16 r2, q0[2]
+; CHECK-NEXT:    vmov.32 q4[0], r2
+; CHECK-NEXT:    vmov.u16 r2, q0[3]
+; CHECK-NEXT:    vmov.32 q4[2], r2
+; CHECK-NEXT:    vand q4, q4, q1
+; CHECK-NEXT:    vand q3, q4, q3
+; CHECK-NEXT:    vmov r3, s12
+; CHECK-NEXT:    vmov r2, s13
+; CHECK-NEXT:    adds.w r4, lr, r3
+; CHECK-NEXT:    vmov r3, s14
+; CHECK-NEXT:    adc.w lr, r12, r2
+; CHECK-NEXT:    vmov r2, s15
+; CHECK-NEXT:    adds.w r12, r4, r3
+; CHECK-NEXT:    adc.w lr, lr, r2
+; CHECK-NEXT:    vmov.u16 r2, q2[4]
+; CHECK-NEXT:    vmov.32 q3[0], r2
+; CHECK-NEXT:    vmov.u16 r2, q2[5]
+; CHECK-NEXT:    vmov.32 q3[1], r2
+; CHECK-NEXT:    vmov.u16 r2, q2[6]
+; CHECK-NEXT:    vmov.32 q3[2], r2
+; CHECK-NEXT:    vmov.u16 r2, q2[7]
+; CHECK-NEXT:    vmov.32 q3[3], r2
+; CHECK-NEXT:    vcmp.i32 ne, q3, zr
+; CHECK-NEXT:    vmrs r2, p0
+; CHECK-NEXT:    and r4, r2, #1
+; CHECK-NEXT:    rsbs r4, r4, #0
+; CHECK-NEXT:    vmov.32 q2[0], r4
+; CHECK-NEXT:    vmov.32 q2[1], r4
+; CHECK-NEXT:    ubfx r4, r2, #4, #1
+; CHECK-NEXT:    rsbs r4, r4, #0
+; CHECK-NEXT:    vmov.32 q2[2], r4
+; CHECK-NEXT:    vmov.32 q2[3], r4
+; CHECK-NEXT:    vmov.u16 r4, q0[4]
+; CHECK-NEXT:    vmov.32 q3[0], r4
+; CHECK-NEXT:    vmov.u16 r4, q0[5]
+; CHECK-NEXT:    vmov.32 q3[2], r4
+; CHECK-NEXT:    vand q3, q3, q1
+; CHECK-NEXT:    vand q2, q3, q2
+; CHECK-NEXT:    vmov r3, s8
+; CHECK-NEXT:    vmov r4, s9
+; CHECK-NEXT:    adds.w r12, r12, r3
+; CHECK-NEXT:    vmov r3, s11
+; CHECK-NEXT:    adc.w lr, lr, r4
+; CHECK-NEXT:    vmov r4, s10
+; CHECK-NEXT:    adds.w r4, r4, r12
+; CHECK-NEXT:    adc.w r12, lr, r3
+; CHECK-NEXT:    ubfx r3, r2, #8, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    ubfx r2, r2, #12, #1
+; CHECK-NEXT:    vmov.32 q2[0], r3
+; CHECK-NEXT:    rsbs r2, r2, #0
+; CHECK-NEXT:    vmov.32 q2[1], r3
+; CHECK-NEXT:    vmov.32 q2[2], r2
+; CHECK-NEXT:    vmov.32 q2[3], r2
+; CHECK-NEXT:    vmov.u16 r2, q0[6]
+; CHECK-NEXT:    vmov.32 q3[0], r2
+; CHECK-NEXT:    vmov.u16 r2, q0[7]
+; CHECK-NEXT:    vmov.32 q3[2], r2
+; CHECK-NEXT:    vand q0, q3, q1
+; CHECK-NEXT:    vand q0, q0, q2
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov r2, s1
+; CHECK-NEXT:    adds r3, r3, r4
+; CHECK-NEXT:    vmov r4, s3
+; CHECK-NEXT:    adc.w r12, r12, r2
+; CHECK-NEXT:    vmov r2, s2
+; CHECK-NEXT:    adds r2, r2, r3
+; CHECK-NEXT:    adc.w r3, r12, r4
+; CHECK-NEXT:    adds r0, r0, r2
+; CHECK-NEXT:    adcs r1, r3
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    pop {r4, pc}
+entry:
+  %c = icmp eq <8 x i16> %b, zeroinitializer
+  %xx = zext <8 x i16> %x to <8 x i64>
+  %s = select <8 x i1> %c, <8 x i64> %xx, <8 x i64> zeroinitializer
+  %z = call i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64> %s)
+  %r = add i64 %z, %a
+  ret i64 %r
+}
+
+define arm_aapcs_vfpcc i64 @add_v8i16_v8i64_acc_sext(<8 x i16> %x, <8 x i16> %b, i64 %a) {
+; CHECK-LABEL: add_v8i16_v8i64_acc_sext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r4, r5, r7, lr}
+; CHECK-NEXT:    push {r4, r5, r7, lr}
+; CHECK-NEXT:    vmov.i8 q2, #0x0
+; CHECK-NEXT:    vmov.i8 q3, #0xff
+; CHECK-NEXT:    vcmp.i16 eq, q1, zr
+; CHECK-NEXT:    vpsel q1, q3, q2
+; CHECK-NEXT:    vmov.u16 r2, q1[0]
+; CHECK-NEXT:    vmov.32 q2[0], r2
+; CHECK-NEXT:    vmov.u16 r2, q1[1]
+; CHECK-NEXT:    vmov.32 q2[1], r2
+; CHECK-NEXT:    vmov.u16 r2, q1[2]
+; CHECK-NEXT:    vmov.32 q2[2], r2
+; CHECK-NEXT:    vmov.u16 r2, q1[3]
+; CHECK-NEXT:    vmov.32 q2[3], r2
+; CHECK-NEXT:    vcmp.i32 ne, q2, zr
+; CHECK-NEXT:    vmrs r2, p0
+; CHECK-NEXT:    and r3, r2, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    vmov.32 q2[0], r3
+; CHECK-NEXT:    vmov.32 q2[1], r3
+; CHECK-NEXT:    ubfx r3, r2, #4, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    vmov.32 q2[2], r3
+; CHECK-NEXT:    vmov.32 q2[3], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[0]
+; CHECK-NEXT:    sxth r3, r3
+; CHECK-NEXT:    vmov.32 q3[0], r3
+; CHECK-NEXT:    asrs r3, r3, #31
+; CHECK-NEXT:    vmov.32 q3[1], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[1]
+; CHECK-NEXT:    sxth r3, r3
+; CHECK-NEXT:    vmov.32 q3[2], r3
+; CHECK-NEXT:    asrs r3, r3, #31
+; CHECK-NEXT:    vmov.32 q3[3], r3
+; CHECK-NEXT:    vand q2, q3, q2
+; CHECK-NEXT:    vmov r3, s10
+; CHECK-NEXT:    vmov r4, s8
+; CHECK-NEXT:    vmov r12, s11
+; CHECK-NEXT:    vmov r5, s9
+; CHECK-NEXT:    adds.w lr, r4, r3
+; CHECK-NEXT:    ubfx r3, r2, #8, #1
+; CHECK-NEXT:    rsb.w r3, r3, #0
+; CHECK-NEXT:    ubfx r2, r2, #12, #1
+; CHECK-NEXT:    vmov.32 q2[0], r3
+; CHECK-NEXT:    rsb.w r2, r2, #0
+; CHECK-NEXT:    vmov.32 q2[1], r3
+; CHECK-NEXT:    adc.w r12, r12, r5
+; CHECK-NEXT:    vmov.32 q2[2], r2
+; CHECK-NEXT:    vmov.32 q2[3], r2
+; CHECK-NEXT:    vmov.u16 r2, q0[2]
+; CHECK-NEXT:    sxth r2, r2
+; CHECK-NEXT:    vmov.32 q3[0], r2
+; CHECK-NEXT:    asrs r2, r2, #31
+; CHECK-NEXT:    vmov.32 q3[1], r2
+; CHECK-NEXT:    vmov.u16 r2, q0[3]
+; CHECK-NEXT:    sxth r2, r2
+; CHECK-NEXT:    vmov.32 q3[2], r2
+; CHECK-NEXT:    asrs r2, r2, #31
+; CHECK-NEXT:    vmov.32 q3[3], r2
+; CHECK-NEXT:    vand q2, q3, q2
+; CHECK-NEXT:    vmov r3, s8
+; CHECK-NEXT:    vmov r2, s9
+; CHECK-NEXT:    vmov r4, s10
+; CHECK-NEXT:    vmov r5, s11
+; CHECK-NEXT:    adds.w r3, r3, lr
+; CHECK-NEXT:    adc.w r2, r2, r12
+; CHECK-NEXT:    adds.w r12, r3, r4
+; CHECK-NEXT:    adc.w r3, r2, r5
+; CHECK-NEXT:    vmov.u16 r2, q1[4]
+; CHECK-NEXT:    vmov.32 q2[0], r2
+; CHECK-NEXT:    vmov.u16 r2, q1[5]
+; CHECK-NEXT:    vmov.32 q2[1], r2
+; CHECK-NEXT:    vmov.u16 r2, q1[6]
+; CHECK-NEXT:    vmov.32 q2[2], r2
+; CHECK-NEXT:    vmov.u16 r2, q1[7]
+; CHECK-NEXT:    vmov.32 q2[3], r2
+; CHECK-NEXT:    vcmp.i32 ne, q2, zr
+; CHECK-NEXT:    vmrs r2, p0
+; CHECK-NEXT:    and r5, r2, #1
+; CHECK-NEXT:    rsbs r5, r5, #0
+; CHECK-NEXT:    vmov.32 q1[0], r5
+; CHECK-NEXT:    vmov.32 q1[1], r5
+; CHECK-NEXT:    ubfx r5, r2, #4, #1
+; CHECK-NEXT:    rsbs r5, r5, #0
+; CHECK-NEXT:    vmov.32 q1[2], r5
+; CHECK-NEXT:    vmov.32 q1[3], r5
+; CHECK-NEXT:    vmov.u16 r5, q0[4]
+; CHECK-NEXT:    sxth r5, r5
+; CHECK-NEXT:    vmov.32 q2[0], r5
+; CHECK-NEXT:    asrs r5, r5, #31
+; CHECK-NEXT:    vmov.32 q2[1], r5
+; CHECK-NEXT:    vmov.u16 r5, q0[5]
+; CHECK-NEXT:    sxth r5, r5
+; CHECK-NEXT:    vmov.32 q2[2], r5
+; CHECK-NEXT:    asrs r5, r5, #31
+; CHECK-NEXT:    vmov.32 q2[3], r5
+; CHECK-NEXT:    vand q1, q2, q1
+; CHECK-NEXT:    vmov r4, s4
+; CHECK-NEXT:    vmov r5, s5
+; CHECK-NEXT:    adds.w r12, r12, r4
+; CHECK-NEXT:    vmov r4, s6
+; CHECK-NEXT:    adcs r5, r3
+; CHECK-NEXT:    vmov r3, s7
+; CHECK-NEXT:    adds.w r4, r4, r12
+; CHECK-NEXT:    adc.w r12, r5, r3
+; CHECK-NEXT:    ubfx r3, r2, #8, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    ubfx r2, r2, #12, #1
+; CHECK-NEXT:    vmov.32 q1[0], r3
+; CHECK-NEXT:    rsbs r2, r2, #0
+; CHECK-NEXT:    vmov.32 q1[1], r3
+; CHECK-NEXT:    vmov.32 q1[2], r2
+; CHECK-NEXT:    vmov.32 q1[3], r2
+; CHECK-NEXT:    vmov.u16 r2, q0[6]
+; CHECK-NEXT:    sxth r2, r2
+; CHECK-NEXT:    vmov.32 q2[0], r2
+; CHECK-NEXT:    asrs r2, r2, #31
+; CHECK-NEXT:    vmov.32 q2[1], r2
+; CHECK-NEXT:    vmov.u16 r2, q0[7]
+; CHECK-NEXT:    sxth r2, r2
+; CHECK-NEXT:    vmov.32 q2[2], r2
+; CHECK-NEXT:    asrs r2, r2, #31
+; CHECK-NEXT:    vmov.32 q2[3], r2
+; CHECK-NEXT:    vand q0, q2, q1
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov r2, s1
+; CHECK-NEXT:    vmov r5, s3
+; CHECK-NEXT:    adds r3, r3, r4
+; CHECK-NEXT:    vmov r4, s2
+; CHECK-NEXT:    adc.w r2, r2, r12
+; CHECK-NEXT:    adds r3, r3, r4
+; CHECK-NEXT:    adcs r2, r5
+; CHECK-NEXT:    adds r0, r0, r3
+; CHECK-NEXT:    adcs r1, r2
+; CHECK-NEXT:    pop {r4, r5, r7, pc}
+entry:
+  %c = icmp eq <8 x i16> %b, zeroinitializer
+  %xx = sext <8 x i16> %x to <8 x i64>
+  %s = select <8 x i1> %c, <8 x i64> %xx, <8 x i64> zeroinitializer
+  %z = call i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64> %s)
+  %r = add i64 %z, %a
+  ret i64 %r
+}
+
+define arm_aapcs_vfpcc i64 @add_v2i16_v2i64_acc_zext(<2 x i16> %x, <2 x i16> %b, i64 %a) {
+; CHECK-LABEL: add_v2i16_v2i64_acc_zext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.i64 q2, #0xffff
+; CHECK-NEXT:    vand q1, q1, q2
+; CHECK-NEXT:    vand q0, q0, q2
+; CHECK-NEXT:    vmov r2, s4
+; CHECK-NEXT:    cmp r2, #0
+; CHECK-NEXT:    cset r2, eq
+; CHECK-NEXT:    tst.w r2, #1
+; CHECK-NEXT:    csetm r2, ne
+; CHECK-NEXT:    vmov.32 q3[0], r2
+; CHECK-NEXT:    vmov.32 q3[1], r2
+; CHECK-NEXT:    vmov r2, s6
+; CHECK-NEXT:    cmp r2, #0
+; CHECK-NEXT:    cset r2, eq
+; CHECK-NEXT:    tst.w r2, #1
+; CHECK-NEXT:    csetm r2, ne
+; CHECK-NEXT:    vmov.32 q3[2], r2
+; CHECK-NEXT:    vmov.32 q3[3], r2
+; CHECK-NEXT:    vand q0, q0, q3
+; CHECK-NEXT:    vmov r2, s3
+; CHECK-NEXT:    vmov r3, s1
+; CHECK-NEXT:    orr.w r12, r3, r2
+; CHECK-NEXT:    vmov r3, s2
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    add r2, r3
+; CHECK-NEXT:    adds r0, r0, r2
+; CHECK-NEXT:    adc.w r1, r1, r12
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <2 x i16> %b, zeroinitializer
+  %xx = zext <2 x i16> %x to <2 x i64>
+  %s = select <2 x i1> %c, <2 x i64> %xx, <2 x i64> zeroinitializer
+  %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %s)
+  %r = add i64 %z, %a
+  ret i64 %r
+}
+
+define arm_aapcs_vfpcc i64 @add_v2i16_v2i64_acc_sext(<2 x i16> %x, <2 x i16> %b, i64 %a) {
+; CHECK-LABEL: add_v2i16_v2i64_acc_sext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    vmov.i32 q2, #0xffff
+; CHECK-NEXT:    vand q2, q1, q2
+; CHECK-NEXT:    vmov r2, s8
+; CHECK-NEXT:    cmp r2, #0
+; CHECK-NEXT:    cset r2, eq
+; CHECK-NEXT:    tst.w r2, #1
+; CHECK-NEXT:    csetm r2, ne
+; CHECK-NEXT:    vmov.32 q1[0], r2
+; CHECK-NEXT:    vmov.32 q1[1], r2
+; CHECK-NEXT:    vmov r2, s10
+; CHECK-NEXT:    cmp r2, #0
+; CHECK-NEXT:    cset r2, eq
+; CHECK-NEXT:    tst.w r2, #1
+; CHECK-NEXT:    csetm r2, ne
+; CHECK-NEXT:    vmov.32 q1[2], r2
+; CHECK-NEXT:    vmov.32 q1[3], r2
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    sxth r2, r2
+; CHECK-NEXT:    vmov.32 q2[0], r2
+; CHECK-NEXT:    asrs r2, r2, #31
+; CHECK-NEXT:    vmov.32 q2[1], r2
+; CHECK-NEXT:    vmov r2, s2
+; CHECK-NEXT:    sxth r2, r2
+; CHECK-NEXT:    vmov.32 q2[2], r2
+; CHECK-NEXT:    asrs r2, r2, #31
+; CHECK-NEXT:    vmov.32 q2[3], r2
+; CHECK-NEXT:    vand q0, q2, q1
+; CHECK-NEXT:    vmov r2, s2
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov r12, s3
+; CHECK-NEXT:    vmov lr, s1
+; CHECK-NEXT:    adds r2, r2, r3
+; CHECK-NEXT:    adc.w r3, lr, r12
+; CHECK-NEXT:    adds r0, r0, r2
+; CHECK-NEXT:    adcs r1, r3
+; CHECK-NEXT:    pop {r7, pc}
+entry:
+  %c = icmp eq <2 x i16> %b, zeroinitializer
+  %xx = sext <2 x i16> %x to <2 x i64>
+  %s = select <2 x i1> %c, <2 x i64> %xx, <2 x i64> zeroinitializer
+  %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %s)
+  %r = add i64 %z, %a
+  ret i64 %r
+}
+
+define arm_aapcs_vfpcc i32 @add_v16i8_v16i32_acc_zext(<16 x i8> %x, <16 x i8> %b, i32 %a) {
+; CHECK-LABEL: add_v16i8_v16i32_acc_zext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    .pad #16
+; CHECK-NEXT:    sub sp, #16
+; CHECK-NEXT:    vcmp.i8 eq, q1, zr
+; CHECK-NEXT:    vmov.i8 q7, #0x0
+; CHECK-NEXT:    vmov.i8 q4, #0xff
+; CHECK-NEXT:    vmov q2, q0
+; CHECK-NEXT:    vpsel q6, q4, q7
+; CHECK-NEXT:    vmov.i32 q5, #0xff
+; CHECK-NEXT:    vmov.u8 r1, q6[8]
+; CHECK-NEXT:    vmov.16 q1[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q6[9]
+; CHECK-NEXT:    vmov.16 q1[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q6[10]
+; CHECK-NEXT:    vmov.16 q1[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q6[11]
+; CHECK-NEXT:    vmov.16 q1[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q6[12]
+; CHECK-NEXT:    vmov.16 q1[4], r1
+; CHECK-NEXT:    vmov.u8 r1, q6[13]
+; CHECK-NEXT:    vmov.16 q1[5], r1
+; CHECK-NEXT:    vmov.u8 r1, q6[14]
+; CHECK-NEXT:    vmov.16 q1[6], r1
+; CHECK-NEXT:    vmov.u8 r1, q6[15]
+; CHECK-NEXT:    vmov.16 q1[7], r1
+; CHECK-NEXT:    vcmp.i16 ne, q1, zr
+; CHECK-NEXT:    vmov.i32 q1, #0x0
+; CHECK-NEXT:    vpsel q3, q4, q7
+; CHECK-NEXT:    vmov.u16 r1, q3[4]
+; CHECK-NEXT:    vmov.32 q0[0], r1
+; CHECK-NEXT:    vmov.u16 r1, q3[5]
+; CHECK-NEXT:    vmov.32 q0[1], r1
+; CHECK-NEXT:    vmov.u16 r1, q3[6]
+; CHECK-NEXT:    vmov.32 q0[2], r1
+; CHECK-NEXT:    vmov.u16 r1, q3[7]
+; CHECK-NEXT:    vmov.32 q0[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q2[12]
+; CHECK-NEXT:    vcmp.i32 ne, q0, zr
+; CHECK-NEXT:    vmov.32 q0[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q2[13]
+; CHECK-NEXT:    vmov.32 q0[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q2[14]
+; CHECK-NEXT:    vmov.32 q0[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q2[15]
+; CHECK-NEXT:    vmov.32 q0[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q6[0]
+; CHECK-NEXT:    vand q0, q0, q5
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vstrw.32 q0, [sp] @ 16-byte Spill
+; CHECK-NEXT:    vmov.16 q0[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q6[1]
+; CHECK-NEXT:    vmov.16 q0[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q6[2]
+; CHECK-NEXT:    vmov.16 q0[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q6[3]
+; CHECK-NEXT:    vmov.16 q0[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q6[4]
+; CHECK-NEXT:    vmov.16 q0[4], r1
+; CHECK-NEXT:    vmov.u8 r1, q6[5]
+; CHECK-NEXT:    vmov.16 q0[5], r1
+; CHECK-NEXT:    vmov.u8 r1, q6[6]
+; CHECK-NEXT:    vmov.16 q0[6], r1
+; CHECK-NEXT:    vmov.u8 r1, q6[7]
+; CHECK-NEXT:    vmov.16 q0[7], r1
+; CHECK-NEXT:    vldrw.u32 q6, [sp] @ 16-byte Reload
+; CHECK-NEXT:    vcmp.i16 ne, q0, zr
+; CHECK-NEXT:    vpsel q4, q4, q7
+; CHECK-NEXT:    vmov.u16 r1, q4[4]
+; CHECK-NEXT:    vmov.32 q0[0], r1
+; CHECK-NEXT:    vmov.u16 r1, q4[5]
+; CHECK-NEXT:    vmov.32 q0[1], r1
+; CHECK-NEXT:    vmov.u16 r1, q4[6]
+; CHECK-NEXT:    vmov.32 q0[2], r1
+; CHECK-NEXT:    vmov.u16 r1, q4[7]
+; CHECK-NEXT:    vmov.32 q0[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q2[4]
+; CHECK-NEXT:    vcmp.i32 ne, q0, zr
+; CHECK-NEXT:    vmov.32 q0[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q2[5]
+; CHECK-NEXT:    vmov.32 q0[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q2[6]
+; CHECK-NEXT:    vmov.32 q0[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q2[7]
+; CHECK-NEXT:    vmov.32 q0[3], r1
+; CHECK-NEXT:    vmov.u16 r1, q3[0]
+; CHECK-NEXT:    vand q0, q0, q5
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vadd.i32 q0, q0, q6
+; CHECK-NEXT:    vmov.32 q6[0], r1
+; CHECK-NEXT:    vmov.u16 r1, q3[1]
+; CHECK-NEXT:    vmov.32 q6[1], r1
+; CHECK-NEXT:    vmov.u16 r1, q3[2]
+; CHECK-NEXT:    vmov.32 q6[2], r1
+; CHECK-NEXT:    vmov.u16 r1, q3[3]
+; CHECK-NEXT:    vmov.32 q6[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q2[8]
+; CHECK-NEXT:    vmov.32 q3[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q2[9]
+; CHECK-NEXT:    vmov.32 q3[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q2[10]
+; CHECK-NEXT:    vmov.32 q3[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q2[11]
+; CHECK-NEXT:    vmov.32 q3[3], r1
+; CHECK-NEXT:    vmov.u16 r1, q4[0]
+; CHECK-NEXT:    vcmp.i32 ne, q6, zr
+; CHECK-NEXT:    vmov.32 q6[0], r1
+; CHECK-NEXT:    vmov.u16 r1, q4[1]
+; CHECK-NEXT:    vand q3, q3, q5
+; CHECK-NEXT:    vmov.32 q6[1], r1
+; CHECK-NEXT:    vmov.u16 r1, q4[2]
+; CHECK-NEXT:    vmov.32 q6[2], r1
+; CHECK-NEXT:    vmov.u16 r1, q4[3]
+; CHECK-NEXT:    vmov.32 q6[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q2[0]
+; CHECK-NEXT:    vmov.32 q4[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q2[1]
+; CHECK-NEXT:    vmov.32 q4[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q2[2]
+; CHECK-NEXT:    vmov.32 q4[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q2[3]
+; CHECK-NEXT:    vmov.32 q4[3], r1
+; CHECK-NEXT:    vpsel q3, q3, q1
+; CHECK-NEXT:    vcmp.i32 ne, q6, zr
+; CHECK-NEXT:    vand q2, q4, q5
+; CHECK-NEXT:    vpsel q1, q2, q1
+; CHECK-NEXT:    vadd.i32 q1, q1, q3
+; CHECK-NEXT:    vadd.i32 q0, q1, q0
+; CHECK-NEXT:    vaddva.u32 r0, q0
+; CHECK-NEXT:    add sp, #16
+; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <16 x i8> %b, zeroinitializer
+  %xx = zext <16 x i8> %x to <16 x i32>
+  %s = select <16 x i1> %c, <16 x i32> %xx, <16 x i32> zeroinitializer
+  %z = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %s)
+  %r = add i32 %z, %a
+  ret i32 %r
+}
+
+define arm_aapcs_vfpcc i32 @add_v16i8_v16i32_acc_sext(<16 x i8> %x, <16 x i8> %b, i32 %a) {
+; CHECK-LABEL: add_v16i8_v16i32_acc_sext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    vcmp.i8 eq, q1, zr
+; CHECK-NEXT:    vmov.i8 q3, #0x0
+; CHECK-NEXT:    vmov.i8 q4, #0xff
+; CHECK-NEXT:    vpsel q5, q4, q3
+; CHECK-NEXT:    vmov.u8 r1, q5[8]
+; CHECK-NEXT:    vmov.16 q1[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q5[9]
+; CHECK-NEXT:    vmov.16 q1[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q5[10]
+; CHECK-NEXT:    vmov.16 q1[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q5[11]
+; CHECK-NEXT:    vmov.16 q1[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q5[12]
+; CHECK-NEXT:    vmov.16 q1[4], r1
+; CHECK-NEXT:    vmov.u8 r1, q5[13]
+; CHECK-NEXT:    vmov.16 q1[5], r1
+; CHECK-NEXT:    vmov.u8 r1, q5[14]
+; CHECK-NEXT:    vmov.16 q1[6], r1
+; CHECK-NEXT:    vmov.u8 r1, q5[15]
+; CHECK-NEXT:    vmov.16 q1[7], r1
+; CHECK-NEXT:    vcmp.i16 ne, q1, zr
+; CHECK-NEXT:    vpsel q2, q4, q3
+; CHECK-NEXT:    vmov.u16 r1, q2[4]
+; CHECK-NEXT:    vmov.32 q1[0], r1
+; CHECK-NEXT:    vmov.u16 r1, q2[5]
+; CHECK-NEXT:    vmov.32 q1[1], r1
+; CHECK-NEXT:    vmov.u16 r1, q2[6]
+; CHECK-NEXT:    vmov.32 q1[2], r1
+; CHECK-NEXT:    vmov.u16 r1, q2[7]
+; CHECK-NEXT:    vmov.32 q1[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[12]
+; CHECK-NEXT:    vcmp.i32 ne, q1, zr
+; CHECK-NEXT:    vmov.32 q1[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[13]
+; CHECK-NEXT:    vmov.32 q1[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[14]
+; CHECK-NEXT:    vmov.32 q1[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[15]
+; CHECK-NEXT:    vmov.32 q1[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q5[0]
+; CHECK-NEXT:    vmov.16 q7[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q5[1]
+; CHECK-NEXT:    vmov.16 q7[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q5[2]
+; CHECK-NEXT:    vmov.16 q7[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q5[3]
+; CHECK-NEXT:    vmov.16 q7[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q5[4]
+; CHECK-NEXT:    vmov.16 q7[4], r1
+; CHECK-NEXT:    vmov.u8 r1, q5[5]
+; CHECK-NEXT:    vmov.16 q7[5], r1
+; CHECK-NEXT:    vmov.u8 r1, q5[6]
+; CHECK-NEXT:    vmovlb.s8 q1, q1
+; CHECK-NEXT:    vmov.16 q7[6], r1
+; CHECK-NEXT:    vmov.u8 r1, q5[7]
+; CHECK-NEXT:    vmovlb.s16 q6, q1
+; CHECK-NEXT:    vmov.i32 q1, #0x0
+; CHECK-NEXT:    vmov.16 q7[7], r1
+; CHECK-NEXT:    vpsel q6, q6, q1
+; CHECK-NEXT:    vcmp.i16 ne, q7, zr
+; CHECK-NEXT:    vpsel q3, q4, q3
+; CHECK-NEXT:    vmov.u16 r1, q3[4]
+; CHECK-NEXT:    vmov.32 q4[0], r1
+; CHECK-NEXT:    vmov.u16 r1, q3[5]
+; CHECK-NEXT:    vmov.32 q4[1], r1
+; CHECK-NEXT:    vmov.u16 r1, q3[6]
+; CHECK-NEXT:    vmov.32 q4[2], r1
+; CHECK-NEXT:    vmov.u16 r1, q3[7]
+; CHECK-NEXT:    vmov.32 q4[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[4]
+; CHECK-NEXT:    vcmp.i32 ne, q4, zr
+; CHECK-NEXT:    vmov.32 q4[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[5]
+; CHECK-NEXT:    vmov.32 q4[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[6]
+; CHECK-NEXT:    vmov.32 q4[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[7]
+; CHECK-NEXT:    vmov.32 q4[3], r1
+; CHECK-NEXT:    vmov.u16 r1, q2[0]
+; CHECK-NEXT:    vmov.32 q5[0], r1
+; CHECK-NEXT:    vmov.u16 r1, q2[1]
+; CHECK-NEXT:    vmov.32 q5[1], r1
+; CHECK-NEXT:    vmov.u16 r1, q2[2]
+; CHECK-NEXT:    vmov.32 q5[2], r1
+; CHECK-NEXT:    vmov.u16 r1, q2[3]
+; CHECK-NEXT:    vmov.32 q5[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[8]
+; CHECK-NEXT:    vmov.32 q2[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[9]
+; CHECK-NEXT:    vmov.32 q2[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[10]
+; CHECK-NEXT:    vmovlb.s8 q4, q4
+; CHECK-NEXT:    vmov.32 q2[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[11]
+; CHECK-NEXT:    vmovlb.s16 q4, q4
+; CHECK-NEXT:    vmov.32 q2[3], r1
+; CHECK-NEXT:    vmov.u16 r1, q3[0]
+; CHECK-NEXT:    vpsel q4, q4, q1
+; CHECK-NEXT:    vcmp.i32 ne, q5, zr
+; CHECK-NEXT:    vmov.32 q5[0], r1
+; CHECK-NEXT:    vmov.u16 r1, q3[1]
+; CHECK-NEXT:    vmov.32 q5[1], r1
+; CHECK-NEXT:    vmov.u16 r1, q3[2]
+; CHECK-NEXT:    vmov.32 q5[2], r1
+; CHECK-NEXT:    vmov.u16 r1, q3[3]
+; CHECK-NEXT:    vmov.32 q5[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[0]
+; CHECK-NEXT:    vmov.32 q3[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[1]
+; CHECK-NEXT:    vmov.32 q3[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[2]
+; CHECK-NEXT:    vmov.32 q3[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[3]
+; CHECK-NEXT:    vmovlb.s8 q2, q2
+; CHECK-NEXT:    vmov.32 q3[3], r1
+; CHECK-NEXT:    vmovlb.s16 q2, q2
+; CHECK-NEXT:    vmovlb.s8 q0, q3
+; CHECK-NEXT:    vpsel q2, q2, q1
+; CHECK-NEXT:    vcmp.i32 ne, q5, zr
+; CHECK-NEXT:    vmovlb.s16 q0, q0
+; CHECK-NEXT:    vadd.i32 q4, q4, q6
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vadd.i32 q0, q0, q2
+; CHECK-NEXT:    vadd.i32 q0, q0, q4
+; CHECK-NEXT:    vaddva.u32 r0, q0
+; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <16 x i8> %b, zeroinitializer
+  %xx = sext <16 x i8> %x to <16 x i32>
+  %s = select <16 x i1> %c, <16 x i32> %xx, <16 x i32> zeroinitializer
+  %z = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %s)
+  %r = add i32 %z, %a
+  ret i32 %r
+}
+
+define arm_aapcs_vfpcc i32 @add_v4i8_v4i32_acc_zext(<4 x i8> %x, <4 x i8> %b, i32 %a) {
+; CHECK-LABEL: add_v4i8_v4i32_acc_zext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.i32 q2, #0xff
+; CHECK-NEXT:    vand q1, q1, q2
+; CHECK-NEXT:    vand q0, q0, q2
+; CHECK-NEXT:    vcmp.i32 eq, q1, zr
+; CHECK-NEXT:    vmov.i32 q1, #0x0
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vaddva.u32 r0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <4 x i8> %b, zeroinitializer
+  %xx = zext <4 x i8> %x to <4 x i32>
+  %s = select <4 x i1> %c, <4 x i32> %xx, <4 x i32> zeroinitializer
+  %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %s)
+  %r = add i32 %z, %a
+  ret i32 %r
+}
+
+define arm_aapcs_vfpcc i32 @add_v4i8_v4i32_acc_sext(<4 x i8> %x, <4 x i8> %b, i32 %a) {
+; CHECK-LABEL: add_v4i8_v4i32_acc_sext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.i32 q2, #0xff
+; CHECK-NEXT:    vmovlb.s8 q0, q0
+; CHECK-NEXT:    vand q1, q1, q2
+; CHECK-NEXT:    vmovlb.s16 q0, q0
+; CHECK-NEXT:    vcmp.i32 eq, q1, zr
+; CHECK-NEXT:    vmov.i32 q1, #0x0
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vaddva.u32 r0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <4 x i8> %b, zeroinitializer
+  %xx = sext <4 x i8> %x to <4 x i32>
+  %s = select <4 x i1> %c, <4 x i32> %xx, <4 x i32> zeroinitializer
+  %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %s)
+  %r = add i32 %z, %a
+  ret i32 %r
+}
+
+define arm_aapcs_vfpcc zeroext i16 @add_v16i8_v16i16_acc_zext(<16 x i8> %x, <16 x i8> %b, i16 %a) {
+; CHECK-LABEL: add_v16i8_v16i16_acc_zext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    vcmp.i8 eq, q1, zr
+; CHECK-NEXT:    vmov.i8 q1, #0x0
+; CHECK-NEXT:    vmov.i8 q2, #0xff
+; CHECK-NEXT:    vpsel q1, q2, q1
+; CHECK-NEXT:    vmov.u8 r1, q1[8]
+; CHECK-NEXT:    vmov.16 q2[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[9]
+; CHECK-NEXT:    vmov.16 q2[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[10]
+; CHECK-NEXT:    vmov.16 q2[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[11]
+; CHECK-NEXT:    vmov.16 q2[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[12]
+; CHECK-NEXT:    vmov.16 q2[4], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[13]
+; CHECK-NEXT:    vmov.16 q2[5], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[14]
+; CHECK-NEXT:    vmov.16 q2[6], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[15]
+; CHECK-NEXT:    vmov.16 q2[7], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[8]
+; CHECK-NEXT:    vcmp.i16 ne, q2, zr
+; CHECK-NEXT:    vmov.16 q2[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[9]
+; CHECK-NEXT:    vmov.16 q2[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[10]
+; CHECK-NEXT:    vmov.16 q2[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[11]
+; CHECK-NEXT:    vmov.16 q2[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[12]
+; CHECK-NEXT:    vmov.16 q2[4], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[13]
+; CHECK-NEXT:    vmov.16 q2[5], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[14]
+; CHECK-NEXT:    vmov.16 q2[6], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[15]
+; CHECK-NEXT:    vmov.16 q2[7], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[0]
+; CHECK-NEXT:    vmov.16 q4[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[1]
+; CHECK-NEXT:    vmov.16 q4[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[2]
+; CHECK-NEXT:    vmov.16 q4[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[3]
+; CHECK-NEXT:    vmov.16 q4[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[4]
+; CHECK-NEXT:    vmov.16 q4[4], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[5]
+; CHECK-NEXT:    vmov.16 q4[5], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[6]
+; CHECK-NEXT:    vmov.16 q4[6], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[7]
+; CHECK-NEXT:    vmov.16 q4[7], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[0]
+; CHECK-NEXT:    vmov.16 q1[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[1]
+; CHECK-NEXT:    vmov.16 q1[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[2]
+; CHECK-NEXT:    vmov.16 q1[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[3]
+; CHECK-NEXT:    vmov.16 q1[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[4]
+; CHECK-NEXT:    vmov.16 q1[4], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[5]
+; CHECK-NEXT:    vmov.16 q1[5], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[6]
+; CHECK-NEXT:    vmov.16 q1[6], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[7]
+; CHECK-NEXT:    vmovlb.u8 q3, q2
+; CHECK-NEXT:    vmov.i32 q2, #0x0
+; CHECK-NEXT:    vmov.16 q1[7], r1
+; CHECK-NEXT:    vpsel q3, q3, q2
+; CHECK-NEXT:    vcmp.i16 ne, q4, zr
+; CHECK-NEXT:    vmovlb.u8 q0, q1
+; CHECK-NEXT:    vpsel q0, q0, q2
+; CHECK-NEXT:    vadd.i16 q0, q0, q3
+; CHECK-NEXT:    vaddva.u16 r0, q0
+; CHECK-NEXT:    uxth r0, r0
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <16 x i8> %b, zeroinitializer
+  %xx = zext <16 x i8> %x to <16 x i16>
+  %s = select <16 x i1> %c, <16 x i16> %xx, <16 x i16> zeroinitializer
+  %z = call i16 @llvm.experimental.vector.reduce.add.v16i16(<16 x i16> %s)
+  %r = add i16 %z, %a
+  ret i16 %r
+}
+
+define arm_aapcs_vfpcc signext i16 @add_v16i8_v16i16_acc_sext(<16 x i8> %x, <16 x i8> %b, i16 %a) {
+; CHECK-LABEL: add_v16i8_v16i16_acc_sext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    vcmp.i8 eq, q1, zr
+; CHECK-NEXT:    vmov.i8 q1, #0x0
+; CHECK-NEXT:    vmov.i8 q2, #0xff
+; CHECK-NEXT:    vpsel q1, q2, q1
+; CHECK-NEXT:    vmov.u8 r1, q1[8]
+; CHECK-NEXT:    vmov.16 q2[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[9]
+; CHECK-NEXT:    vmov.16 q2[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[10]
+; CHECK-NEXT:    vmov.16 q2[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[11]
+; CHECK-NEXT:    vmov.16 q2[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[12]
+; CHECK-NEXT:    vmov.16 q2[4], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[13]
+; CHECK-NEXT:    vmov.16 q2[5], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[14]
+; CHECK-NEXT:    vmov.16 q2[6], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[15]
+; CHECK-NEXT:    vmov.16 q2[7], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[8]
+; CHECK-NEXT:    vcmp.i16 ne, q2, zr
+; CHECK-NEXT:    vmov.16 q2[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[9]
+; CHECK-NEXT:    vmov.16 q2[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[10]
+; CHECK-NEXT:    vmov.16 q2[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[11]
+; CHECK-NEXT:    vmov.16 q2[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[12]
+; CHECK-NEXT:    vmov.16 q2[4], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[13]
+; CHECK-NEXT:    vmov.16 q2[5], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[14]
+; CHECK-NEXT:    vmov.16 q2[6], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[15]
+; CHECK-NEXT:    vmov.16 q2[7], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[0]
+; CHECK-NEXT:    vmov.16 q4[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[1]
+; CHECK-NEXT:    vmov.16 q4[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[2]
+; CHECK-NEXT:    vmov.16 q4[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[3]
+; CHECK-NEXT:    vmov.16 q4[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[4]
+; CHECK-NEXT:    vmov.16 q4[4], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[5]
+; CHECK-NEXT:    vmov.16 q4[5], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[6]
+; CHECK-NEXT:    vmov.16 q4[6], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[7]
+; CHECK-NEXT:    vmov.16 q4[7], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[0]
+; CHECK-NEXT:    vmov.16 q1[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[1]
+; CHECK-NEXT:    vmov.16 q1[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[2]
+; CHECK-NEXT:    vmov.16 q1[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[3]
+; CHECK-NEXT:    vmov.16 q1[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[4]
+; CHECK-NEXT:    vmov.16 q1[4], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[5]
+; CHECK-NEXT:    vmov.16 q1[5], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[6]
+; CHECK-NEXT:    vmov.16 q1[6], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[7]
+; CHECK-NEXT:    vmovlb.s8 q3, q2
+; CHECK-NEXT:    vmov.i32 q2, #0x0
+; CHECK-NEXT:    vmov.16 q1[7], r1
+; CHECK-NEXT:    vpsel q3, q3, q2
+; CHECK-NEXT:    vcmp.i16 ne, q4, zr
+; CHECK-NEXT:    vmovlb.s8 q0, q1
+; CHECK-NEXT:    vpsel q0, q0, q2
+; CHECK-NEXT:    vadd.i16 q0, q0, q3
+; CHECK-NEXT:    vaddva.u16 r0, q0
+; CHECK-NEXT:    sxth r0, r0
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <16 x i8> %b, zeroinitializer
+  %xx = sext <16 x i8> %x to <16 x i16>
+  %s = select <16 x i1> %c, <16 x i16> %xx, <16 x i16> zeroinitializer
+  %z = call i16 @llvm.experimental.vector.reduce.add.v16i16(<16 x i16> %s)
+  %r = add i16 %z, %a
+  ret i16 %r
+}
+
+define arm_aapcs_vfpcc zeroext i16 @add_v8i8_v8i16_acc_zext(<8 x i8> %x, <8 x i8> %b, i16 %a) {
+; CHECK-LABEL: add_v8i8_v8i16_acc_zext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmovlb.u8 q1, q1
+; CHECK-NEXT:    vmovlb.u8 q0, q0
+; CHECK-NEXT:    vcmp.i16 eq, q1, zr
+; CHECK-NEXT:    vmov.i32 q1, #0x0
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vaddva.u16 r0, q0
+; CHECK-NEXT:    uxth r0, r0
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <8 x i8> %b, zeroinitializer
+  %xx = zext <8 x i8> %x to <8 x i16>
+  %s = select <8 x i1> %c, <8 x i16> %xx, <8 x i16> zeroinitializer
+  %z = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %s)
+  %r = add i16 %z, %a
+  ret i16 %r
+}
+
+define arm_aapcs_vfpcc signext i16 @add_v8i8_v8i16_acc_sext(<8 x i8> %x, <8 x i8> %b, i16 %a) {
+; CHECK-LABEL: add_v8i8_v8i16_acc_sext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmovlb.u8 q1, q1
+; CHECK-NEXT:    vmovlb.s8 q0, q0
+; CHECK-NEXT:    vcmp.i16 eq, q1, zr
+; CHECK-NEXT:    vmov.i32 q1, #0x0
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vaddva.u16 r0, q0
+; CHECK-NEXT:    sxth r0, r0
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <8 x i8> %b, zeroinitializer
+  %xx = sext <8 x i8> %x to <8 x i16>
+  %s = select <8 x i1> %c, <8 x i16> %xx, <8 x i16> zeroinitializer
+  %z = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %s)
+  %r = add i16 %z, %a
+  ret i16 %r
+}
+
+define arm_aapcs_vfpcc zeroext i8 @add_v16i8_v16i8_acc(<16 x i8> %x, <16 x i8> %b, i8 %a) {
+; CHECK-LABEL: add_v16i8_v16i8_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vcmp.i8 eq, q1, zr
+; CHECK-NEXT:    vmov.i32 q1, #0x0
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vaddva.u8 r0, q0
+; CHECK-NEXT:    uxtb r0, r0
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <16 x i8> %b, zeroinitializer
+  %s = select <16 x i1> %c, <16 x i8> %x, <16 x i8> zeroinitializer
+  %z = call i8 @llvm.experimental.vector.reduce.add.v16i8(<16 x i8> %s)
+  %r = add i8 %z, %a
+  ret i8 %r
+}
+
+define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_acc_zext(<16 x i8> %x, <16 x i8> %b, i64 %a) {
+; CHECK-LABEL: add_v16i8_v16i64_acc_zext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r4, lr}
+; CHECK-NEXT:    push {r4, lr}
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    vcmp.i8 eq, q1, zr
+; CHECK-NEXT:    vmov.i8 q2, #0x0
+; CHECK-NEXT:    vmov.i8 q3, #0xff
+; CHECK-NEXT:    vpsel q4, q3, q2
+; CHECK-NEXT:    vmov.u8 r2, q4[0]
+; CHECK-NEXT:    vmov.16 q1[0], r2
+; CHECK-NEXT:    vmov.u8 r2, q4[1]
+; CHECK-NEXT:    vmov.16 q1[1], r2
+; CHECK-NEXT:    vmov.u8 r2, q4[2]
+; CHECK-NEXT:    vmov.16 q1[2], r2
+; CHECK-NEXT:    vmov.u8 r2, q4[3]
+; CHECK-NEXT:    vmov.16 q1[3], r2
+; CHECK-NEXT:    vmov.u8 r2, q4[4]
+; CHECK-NEXT:    vmov.16 q1[4], r2
+; CHECK-NEXT:    vmov.u8 r2, q4[5]
+; CHECK-NEXT:    vmov.16 q1[5], r2
+; CHECK-NEXT:    vmov.u8 r2, q4[6]
+; CHECK-NEXT:    vmov.16 q1[6], r2
+; CHECK-NEXT:    vmov.u8 r2, q4[7]
+; CHECK-NEXT:    vmov.16 q1[7], r2
+; CHECK-NEXT:    vcmp.i16 ne, q1, zr
+; CHECK-NEXT:    vpsel q5, q3, q2
+; CHECK-NEXT:    vmov.u16 r2, q5[0]
+; CHECK-NEXT:    vmov.32 q1[0], r2
+; CHECK-NEXT:    vmov.u16 r2, q5[1]
+; CHECK-NEXT:    vmov.32 q1[1], r2
+; CHECK-NEXT:    vmov.u16 r2, q5[2]
+; CHECK-NEXT:    vmov.32 q1[2], r2
+; CHECK-NEXT:    vmov.u16 r2, q5[3]
+; CHECK-NEXT:    vmov.32 q1[3], r2
+; CHECK-NEXT:    vcmp.i32 ne, q1, zr
+; CHECK-NEXT:    vmov.i64 q1, #0xff
+; CHECK-NEXT:    vmrs r2, p0
+; CHECK-NEXT:    and r3, r2, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    vmov.32 q6[0], r3
+; CHECK-NEXT:    vmov.32 q6[1], r3
+; CHECK-NEXT:    ubfx r3, r2, #4, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    vmov.32 q6[2], r3
+; CHECK-NEXT:    vmov.32 q6[3], r3
+; CHECK-NEXT:    vmov.u8 r3, q0[0]
+; CHECK-NEXT:    vmov.32 q7[0], r3
+; CHECK-NEXT:    vmov.u8 r3, q0[1]
+; CHECK-NEXT:    vmov.32 q7[2], r3
+; CHECK-NEXT:    vand q7, q7, q1
+; CHECK-NEXT:    vand q6, q7, q6
+; CHECK-NEXT:    vmov r12, s27
+; CHECK-NEXT:    vmov r3, s25
+; CHECK-NEXT:    vmov lr, s26
+; CHECK-NEXT:    orr.w r12, r12, r3
+; CHECK-NEXT:    vmov r3, s24
+; CHECK-NEXT:    add lr, r3
+; CHECK-NEXT:    ubfx r3, r2, #8, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    ubfx r2, r2, #12, #1
+; CHECK-NEXT:    vmov.32 q6[0], r3
+; CHECK-NEXT:    rsbs r2, r2, #0
+; CHECK-NEXT:    vmov.32 q6[1], r3
+; CHECK-NEXT:    vmov.32 q6[2], r2
+; CHECK-NEXT:    vmov.32 q6[3], r2
+; CHECK-NEXT:    vmov.u8 r2, q0[2]
+; CHECK-NEXT:    vmov.32 q7[0], r2
+; CHECK-NEXT:    vmov.u8 r2, q0[3]
+; CHECK-NEXT:    vmov.32 q7[2], r2
+; CHECK-NEXT:    vand q7, q7, q1
+; CHECK-NEXT:    vand q6, q7, q6
+; CHECK-NEXT:    vmov r3, s24
+; CHECK-NEXT:    vmov r2, s25
+; CHECK-NEXT:    adds.w r4, lr, r3
+; CHECK-NEXT:    vmov r3, s26
+; CHECK-NEXT:    adc.w lr, r12, r2
+; CHECK-NEXT:    vmov r2, s27
+; CHECK-NEXT:    adds.w r12, r4, r3
+; CHECK-NEXT:    adc.w lr, lr, r2
+; CHECK-NEXT:    vmov.u16 r2, q5[4]
+; CHECK-NEXT:    vmov.32 q6[0], r2
+; CHECK-NEXT:    vmov.u16 r2, q5[5]
+; CHECK-NEXT:    vmov.32 q6[1], r2
+; CHECK-NEXT:    vmov.u16 r2, q5[6]
+; CHECK-NEXT:    vmov.32 q6[2], r2
+; CHECK-NEXT:    vmov.u16 r2, q5[7]
+; CHECK-NEXT:    vmov.32 q6[3], r2
+; CHECK-NEXT:    vcmp.i32 ne, q6, zr
+; CHECK-NEXT:    vmrs r2, p0
+; CHECK-NEXT:    and r4, r2, #1
+; CHECK-NEXT:    rsbs r4, r4, #0
+; CHECK-NEXT:    vmov.32 q5[0], r4
+; CHECK-NEXT:    vmov.32 q5[1], r4
+; CHECK-NEXT:    ubfx r4, r2, #4, #1
+; CHECK-NEXT:    rsbs r4, r4, #0
+; CHECK-NEXT:    vmov.32 q5[2], r4
+; CHECK-NEXT:    vmov.32 q5[3], r4
+; CHECK-NEXT:    vmov.u8 r4, q0[4]
+; CHECK-NEXT:    vmov.32 q6[0], r4
+; CHECK-NEXT:    vmov.u8 r4, q0[5]
+; CHECK-NEXT:    vmov.32 q6[2], r4
+; CHECK-NEXT:    vand q6, q6, q1
+; CHECK-NEXT:    vand q5, q6, q5
+; CHECK-NEXT:    vmov r3, s20
+; CHECK-NEXT:    vmov r4, s21
+; CHECK-NEXT:    adds.w r12, r12, r3
+; CHECK-NEXT:    vmov r3, s23
+; CHECK-NEXT:    adc.w lr, lr, r4
+; CHECK-NEXT:    vmov r4, s22
+; CHECK-NEXT:    adds.w r4, r4, r12
+; CHECK-NEXT:    adc.w r12, lr, r3
+; CHECK-NEXT:    ubfx r3, r2, #8, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    ubfx r2, r2, #12, #1
+; CHECK-NEXT:    vmov.32 q5[0], r3
+; CHECK-NEXT:    rsbs r2, r2, #0
+; CHECK-NEXT:    vmov.32 q5[1], r3
+; CHECK-NEXT:    vmov.32 q5[2], r2
+; CHECK-NEXT:    vmov.32 q5[3], r2
+; CHECK-NEXT:    vmov.u8 r2, q0[6]
+; CHECK-NEXT:    vmov.32 q6[0], r2
+; CHECK-NEXT:    vmov.u8 r2, q0[7]
+; CHECK-NEXT:    vmov.32 q6[2], r2
+; CHECK-NEXT:    vand q6, q6, q1
+; CHECK-NEXT:    vand q5, q6, q5
+; CHECK-NEXT:    vmov r3, s20
+; CHECK-NEXT:    vmov r2, s21
+; CHECK-NEXT:    adds r3, r3, r4
+; CHECK-NEXT:    vmov r4, s23
+; CHECK-NEXT:    adc.w lr, r12, r2
+; CHECK-NEXT:    vmov r2, s22
+; CHECK-NEXT:    adds.w r12, r3, r2
+; CHECK-NEXT:    vmov.u8 r2, q4[8]
+; CHECK-NEXT:    vmov.16 q5[0], r2
+; CHECK-NEXT:    vmov.u8 r2, q4[9]
+; CHECK-NEXT:    vmov.16 q5[1], r2
+; CHECK-NEXT:    vmov.u8 r2, q4[10]
+; CHECK-NEXT:    vmov.16 q5[2], r2
+; CHECK-NEXT:    vmov.u8 r2, q4[11]
+; CHECK-NEXT:    vmov.16 q5[3], r2
+; CHECK-NEXT:    vmov.u8 r2, q4[12]
+; CHECK-NEXT:    vmov.16 q5[4], r2
+; CHECK-NEXT:    vmov.u8 r2, q4[13]
+; CHECK-NEXT:    vmov.16 q5[5], r2
+; CHECK-NEXT:    vmov.u8 r2, q4[14]
+; CHECK-NEXT:    vmov.16 q5[6], r2
+; CHECK-NEXT:    vmov.u8 r2, q4[15]
+; CHECK-NEXT:    vmov.16 q5[7], r2
+; CHECK-NEXT:    adc.w lr, lr, r4
+; CHECK-NEXT:    vcmp.i16 ne, q5, zr
+; CHECK-NEXT:    vpsel q2, q3, q2
+; CHECK-NEXT:    vmov.u16 r2, q2[0]
+; CHECK-NEXT:    vmov.32 q3[0], r2
+; CHECK-NEXT:    vmov.u16 r2, q2[1]
+; CHECK-NEXT:    vmov.32 q3[1], r2
+; CHECK-NEXT:    vmov.u16 r2, q2[2]
+; CHECK-NEXT:    vmov.32 q3[2], r2
+; CHECK-NEXT:    vmov.u16 r2, q2[3]
+; CHECK-NEXT:    vmov.32 q3[3], r2
+; CHECK-NEXT:    vcmp.i32 ne, q3, zr
+; CHECK-NEXT:    vmrs r2, p0
+; CHECK-NEXT:    and r4, r2, #1
+; CHECK-NEXT:    rsbs r4, r4, #0
+; CHECK-NEXT:    vmov.32 q3[0], r4
+; CHECK-NEXT:    vmov.32 q3[1], r4
+; CHECK-NEXT:    ubfx r4, r2, #4, #1
+; CHECK-NEXT:    rsbs r4, r4, #0
+; CHECK-NEXT:    vmov.32 q3[2], r4
+; CHECK-NEXT:    vmov.32 q3[3], r4
+; CHECK-NEXT:    vmov.u8 r4, q0[8]
+; CHECK-NEXT:    vmov.32 q4[0], r4
+; CHECK-NEXT:    vmov.u8 r4, q0[9]
+; CHECK-NEXT:    vmov.32 q4[2], r4
+; CHECK-NEXT:    vand q4, q4, q1
+; CHECK-NEXT:    vand q3, q4, q3
+; CHECK-NEXT:    vmov r3, s12
+; CHECK-NEXT:    vmov r4, s13
+; CHECK-NEXT:    adds.w r12, r12, r3
+; CHECK-NEXT:    vmov r3, s15
+; CHECK-NEXT:    adc.w lr, lr, r4
+; CHECK-NEXT:    vmov r4, s14
+; CHECK-NEXT:    adds.w r4, r4, r12
+; CHECK-NEXT:    adc.w r12, lr, r3
+; CHECK-NEXT:    ubfx r3, r2, #8, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    ubfx r2, r2, #12, #1
+; CHECK-NEXT:    vmov.32 q3[0], r3
+; CHECK-NEXT:    rsbs r2, r2, #0
+; CHECK-NEXT:    vmov.32 q3[1], r3
+; CHECK-NEXT:    vmov.32 q3[2], r2
+; CHECK-NEXT:    vmov.32 q3[3], r2
+; CHECK-NEXT:    vmov.u8 r2, q0[10]
+; CHECK-NEXT:    vmov.32 q4[0], r2
+; CHECK-NEXT:    vmov.u8 r2, q0[11]
+; CHECK-NEXT:    vmov.32 q4[2], r2
+; CHECK-NEXT:    vand q4, q4, q1
+; CHECK-NEXT:    vand q3, q4, q3
+; CHECK-NEXT:    vmov r3, s12
+; CHECK-NEXT:    vmov r2, s13
+; CHECK-NEXT:    adds r3, r3, r4
+; CHECK-NEXT:    vmov r4, s15
+; CHECK-NEXT:    adc.w lr, r12, r2
+; CHECK-NEXT:    vmov r2, s14
+; CHECK-NEXT:    adds.w r12, r3, r2
+; CHECK-NEXT:    vmov.u16 r2, q2[4]
+; CHECK-NEXT:    vmov.32 q3[0], r2
+; CHECK-NEXT:    vmov.u16 r2, q2[5]
+; CHECK-NEXT:    vmov.32 q3[1], r2
+; CHECK-NEXT:    vmov.u16 r2, q2[6]
+; CHECK-NEXT:    vmov.32 q3[2], r2
+; CHECK-NEXT:    vmov.u16 r2, q2[7]
+; CHECK-NEXT:    vmov.32 q3[3], r2
+; CHECK-NEXT:    adc.w lr, lr, r4
+; CHECK-NEXT:    vcmp.i32 ne, q3, zr
+; CHECK-NEXT:    vmrs r2, p0
+; CHECK-NEXT:    and r4, r2, #1
+; CHECK-NEXT:    rsbs r4, r4, #0
+; CHECK-NEXT:    vmov.32 q2[0], r4
+; CHECK-NEXT:    vmov.32 q2[1], r4
+; CHECK-NEXT:    ubfx r4, r2, #4, #1
+; CHECK-NEXT:    rsbs r4, r4, #0
+; CHECK-NEXT:    vmov.32 q2[2], r4
+; CHECK-NEXT:    vmov.32 q2[3], r4
+; CHECK-NEXT:    vmov.u8 r4, q0[12]
+; CHECK-NEXT:    vmov.32 q3[0], r4
+; CHECK-NEXT:    vmov.u8 r4, q0[13]
+; CHECK-NEXT:    vmov.32 q3[2], r4
+; CHECK-NEXT:    vand q3, q3, q1
+; CHECK-NEXT:    vand q2, q3, q2
+; CHECK-NEXT:    vmov r3, s8
+; CHECK-NEXT:    vmov r4, s9
+; CHECK-NEXT:    adds.w r12, r12, r3
+; CHECK-NEXT:    vmov r3, s11
+; CHECK-NEXT:    adc.w lr, lr, r4
+; CHECK-NEXT:    vmov r4, s10
+; CHECK-NEXT:    adds.w r4, r4, r12
+; CHECK-NEXT:    adc.w r12, lr, r3
+; CHECK-NEXT:    ubfx r3, r2, #8, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    ubfx r2, r2, #12, #1
+; CHECK-NEXT:    vmov.32 q2[0], r3
+; CHECK-NEXT:    rsbs r2, r2, #0
+; CHECK-NEXT:    vmov.32 q2[1], r3
+; CHECK-NEXT:    vmov.32 q2[2], r2
+; CHECK-NEXT:    vmov.32 q2[3], r2
+; CHECK-NEXT:    vmov.u8 r2, q0[14]
+; CHECK-NEXT:    vmov.32 q3[0], r2
+; CHECK-NEXT:    vmov.u8 r2, q0[15]
+; CHECK-NEXT:    vmov.32 q3[2], r2
+; CHECK-NEXT:    vand q0, q3, q1
+; CHECK-NEXT:    vand q0, q0, q2
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov r2, s1
+; CHECK-NEXT:    adds r3, r3, r4
+; CHECK-NEXT:    vmov r4, s3
+; CHECK-NEXT:    adc.w r12, r12, r2
+; CHECK-NEXT:    vmov r2, s2
+; CHECK-NEXT:    adds r2, r2, r3
+; CHECK-NEXT:    adc.w r3, r12, r4
+; CHECK-NEXT:    adds r0, r0, r2
+; CHECK-NEXT:    adcs r1, r3
+; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    pop {r4, pc}
+entry:
+  %c = icmp eq <16 x i8> %b, zeroinitializer
+  %xx = zext <16 x i8> %x to <16 x i64>
+  %s = select <16 x i1> %c, <16 x i64> %xx, <16 x i64> zeroinitializer
+  %z = call i64 @llvm.experimental.vector.reduce.add.v16i64(<16 x i64> %s)
+  %r = add i64 %z, %a
+  ret i64 %r
+}
+
+define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_acc_sext(<16 x i8> %x, <16 x i8> %b, i64 %a) {
+; CHECK-LABEL: add_v16i8_v16i64_acc_sext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r4, r5, r7, lr}
+; CHECK-NEXT:    push {r4, r5, r7, lr}
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13}
+; CHECK-NEXT:    vcmp.i8 eq, q1, zr
+; CHECK-NEXT:    vmov.i8 q1, #0x0
+; CHECK-NEXT:    vmov.i8 q2, #0xff
+; CHECK-NEXT:    vpsel q3, q2, q1
+; CHECK-NEXT:    vmov.u8 r2, q3[0]
+; CHECK-NEXT:    vmov.16 q4[0], r2
+; CHECK-NEXT:    vmov.u8 r2, q3[1]
+; CHECK-NEXT:    vmov.16 q4[1], r2
+; CHECK-NEXT:    vmov.u8 r2, q3[2]
+; CHECK-NEXT:    vmov.16 q4[2], r2
+; CHECK-NEXT:    vmov.u8 r2, q3[3]
+; CHECK-NEXT:    vmov.16 q4[3], r2
+; CHECK-NEXT:    vmov.u8 r2, q3[4]
+; CHECK-NEXT:    vmov.16 q4[4], r2
+; CHECK-NEXT:    vmov.u8 r2, q3[5]
+; CHECK-NEXT:    vmov.16 q4[5], r2
+; CHECK-NEXT:    vmov.u8 r2, q3[6]
+; CHECK-NEXT:    vmov.16 q4[6], r2
+; CHECK-NEXT:    vmov.u8 r2, q3[7]
+; CHECK-NEXT:    vmov.16 q4[7], r2
+; CHECK-NEXT:    vcmp.i16 ne, q4, zr
+; CHECK-NEXT:    vpsel q4, q2, q1
+; CHECK-NEXT:    vmov.u16 r2, q4[0]
+; CHECK-NEXT:    vmov.32 q5[0], r2
+; CHECK-NEXT:    vmov.u16 r2, q4[1]
+; CHECK-NEXT:    vmov.32 q5[1], r2
+; CHECK-NEXT:    vmov.u16 r2, q4[2]
+; CHECK-NEXT:    vmov.32 q5[2], r2
+; CHECK-NEXT:    vmov.u16 r2, q4[3]
+; CHECK-NEXT:    vmov.32 q5[3], r2
+; CHECK-NEXT:    vcmp.i32 ne, q5, zr
+; CHECK-NEXT:    vmrs r2, p0
+; CHECK-NEXT:    and r3, r2, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    vmov.32 q5[0], r3
+; CHECK-NEXT:    vmov.32 q5[1], r3
+; CHECK-NEXT:    ubfx r3, r2, #4, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    vmov.32 q5[2], r3
+; CHECK-NEXT:    vmov.32 q5[3], r3
+; CHECK-NEXT:    vmov.u8 r3, q0[0]
+; CHECK-NEXT:    sxtb r3, r3
+; CHECK-NEXT:    vmov.32 q6[0], r3
+; CHECK-NEXT:    asrs r3, r3, #31
+; CHECK-NEXT:    vmov.32 q6[1], r3
+; CHECK-NEXT:    vmov.u8 r3, q0[1]
+; CHECK-NEXT:    sxtb r3, r3
+; CHECK-NEXT:    vmov.32 q6[2], r3
+; CHECK-NEXT:    asrs r3, r3, #31
+; CHECK-NEXT:    vmov.32 q6[3], r3
+; CHECK-NEXT:    vand q5, q6, q5
+; CHECK-NEXT:    vmov r3, s22
+; CHECK-NEXT:    vmov r4, s20
+; CHECK-NEXT:    vmov r12, s23
+; CHECK-NEXT:    vmov r5, s21
+; CHECK-NEXT:    adds.w lr, r4, r3
+; CHECK-NEXT:    ubfx r3, r2, #8, #1
+; CHECK-NEXT:    rsb.w r3, r3, #0
+; CHECK-NEXT:    ubfx r2, r2, #12, #1
+; CHECK-NEXT:    vmov.32 q5[0], r3
+; CHECK-NEXT:    rsb.w r2, r2, #0
+; CHECK-NEXT:    vmov.32 q5[1], r3
+; CHECK-NEXT:    adc.w r12, r12, r5
+; CHECK-NEXT:    vmov.32 q5[2], r2
+; CHECK-NEXT:    vmov.32 q5[3], r2
+; CHECK-NEXT:    vmov.u8 r2, q0[2]
+; CHECK-NEXT:    sxtb r2, r2
+; CHECK-NEXT:    vmov.32 q6[0], r2
+; CHECK-NEXT:    asrs r2, r2, #31
+; CHECK-NEXT:    vmov.32 q6[1], r2
+; CHECK-NEXT:    vmov.u8 r2, q0[3]
+; CHECK-NEXT:    sxtb r2, r2
+; CHECK-NEXT:    vmov.32 q6[2], r2
+; CHECK-NEXT:    asrs r2, r2, #31
+; CHECK-NEXT:    vmov.32 q6[3], r2
+; CHECK-NEXT:    vand q5, q6, q5
+; CHECK-NEXT:    vmov r3, s20
+; CHECK-NEXT:    vmov r2, s21
+; CHECK-NEXT:    vmov r4, s22
+; CHECK-NEXT:    vmov r5, s23
+; CHECK-NEXT:    adds.w r3, r3, lr
+; CHECK-NEXT:    adc.w r2, r2, r12
+; CHECK-NEXT:    adds.w r12, r3, r4
+; CHECK-NEXT:    adc.w r3, r2, r5
+; CHECK-NEXT:    vmov.u16 r2, q4[4]
+; CHECK-NEXT:    vmov.32 q5[0], r2
+; CHECK-NEXT:    vmov.u16 r2, q4[5]
+; CHECK-NEXT:    vmov.32 q5[1], r2
+; CHECK-NEXT:    vmov.u16 r2, q4[6]
+; CHECK-NEXT:    vmov.32 q5[2], r2
+; CHECK-NEXT:    vmov.u16 r2, q4[7]
+; CHECK-NEXT:    vmov.32 q5[3], r2
+; CHECK-NEXT:    vcmp.i32 ne, q5, zr
+; CHECK-NEXT:    vmrs r2, p0
+; CHECK-NEXT:    and r5, r2, #1
+; CHECK-NEXT:    rsbs r5, r5, #0
+; CHECK-NEXT:    vmov.32 q4[0], r5
+; CHECK-NEXT:    vmov.32 q4[1], r5
+; CHECK-NEXT:    ubfx r5, r2, #4, #1
+; CHECK-NEXT:    rsbs r5, r5, #0
+; CHECK-NEXT:    vmov.32 q4[2], r5
+; CHECK-NEXT:    vmov.32 q4[3], r5
+; CHECK-NEXT:    vmov.u8 r5, q0[4]
+; CHECK-NEXT:    sxtb r5, r5
+; CHECK-NEXT:    vmov.32 q5[0], r5
+; CHECK-NEXT:    asrs r5, r5, #31
+; CHECK-NEXT:    vmov.32 q5[1], r5
+; CHECK-NEXT:    vmov.u8 r5, q0[5]
+; CHECK-NEXT:    sxtb r5, r5
+; CHECK-NEXT:    vmov.32 q5[2], r5
+; CHECK-NEXT:    asrs r5, r5, #31
+; CHECK-NEXT:    vmov.32 q5[3], r5
+; CHECK-NEXT:    vand q4, q5, q4
+; CHECK-NEXT:    vmov r4, s16
+; CHECK-NEXT:    vmov r5, s17
+; CHECK-NEXT:    adds.w r12, r12, r4
+; CHECK-NEXT:    vmov r4, s18
+; CHECK-NEXT:    adcs r5, r3
+; CHECK-NEXT:    vmov r3, s19
+; CHECK-NEXT:    adds.w r4, r4, r12
+; CHECK-NEXT:    adc.w r12, r5, r3
+; CHECK-NEXT:    ubfx r3, r2, #8, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    ubfx r2, r2, #12, #1
+; CHECK-NEXT:    vmov.32 q4[0], r3
+; CHECK-NEXT:    rsbs r2, r2, #0
+; CHECK-NEXT:    vmov.32 q4[1], r3
+; CHECK-NEXT:    vmov.32 q4[2], r2
+; CHECK-NEXT:    vmov.32 q4[3], r2
+; CHECK-NEXT:    vmov.u8 r2, q0[6]
+; CHECK-NEXT:    sxtb r2, r2
+; CHECK-NEXT:    vmov.32 q5[0], r2
+; CHECK-NEXT:    asrs r2, r2, #31
+; CHECK-NEXT:    vmov.32 q5[1], r2
+; CHECK-NEXT:    vmov.u8 r2, q0[7]
+; CHECK-NEXT:    sxtb r2, r2
+; CHECK-NEXT:    vmov.32 q5[2], r2
+; CHECK-NEXT:    asrs r2, r2, #31
+; CHECK-NEXT:    vmov.32 q5[3], r2
+; CHECK-NEXT:    vand q4, q5, q4
+; CHECK-NEXT:    vmov r3, s16
+; CHECK-NEXT:    vmov r2, s17
+; CHECK-NEXT:    vmov r5, s19
+; CHECK-NEXT:    adds r3, r3, r4
+; CHECK-NEXT:    vmov r4, s18
+; CHECK-NEXT:    adc.w r2, r2, r12
+; CHECK-NEXT:    adds.w r12, r3, r4
+; CHECK-NEXT:    adc.w r3, r2, r5
+; CHECK-NEXT:    vmov.u8 r2, q3[8]
+; CHECK-NEXT:    vmov.16 q4[0], r2
+; CHECK-NEXT:    vmov.u8 r2, q3[9]
+; CHECK-NEXT:    vmov.16 q4[1], r2
+; CHECK-NEXT:    vmov.u8 r2, q3[10]
+; CHECK-NEXT:    vmov.16 q4[2], r2
+; CHECK-NEXT:    vmov.u8 r2, q3[11]
+; CHECK-NEXT:    vmov.16 q4[3], r2
+; CHECK-NEXT:    vmov.u8 r2, q3[12]
+; CHECK-NEXT:    vmov.16 q4[4], r2
+; CHECK-NEXT:    vmov.u8 r2, q3[13]
+; CHECK-NEXT:    vmov.16 q4[5], r2
+; CHECK-NEXT:    vmov.u8 r2, q3[14]
+; CHECK-NEXT:    vmov.16 q4[6], r2
+; CHECK-NEXT:    vmov.u8 r2, q3[15]
+; CHECK-NEXT:    vmov.16 q4[7], r2
+; CHECK-NEXT:    vcmp.i16 ne, q4, zr
+; CHECK-NEXT:    vpsel q1, q2, q1
+; CHECK-NEXT:    vmov.u16 r2, q1[0]
+; CHECK-NEXT:    vmov.32 q2[0], r2
+; CHECK-NEXT:    vmov.u16 r2, q1[1]
+; CHECK-NEXT:    vmov.32 q2[1], r2
+; CHECK-NEXT:    vmov.u16 r2, q1[2]
+; CHECK-NEXT:    vmov.32 q2[2], r2
+; CHECK-NEXT:    vmov.u16 r2, q1[3]
+; CHECK-NEXT:    vmov.32 q2[3], r2
+; CHECK-NEXT:    vcmp.i32 ne, q2, zr
+; CHECK-NEXT:    vmrs r2, p0
+; CHECK-NEXT:    and r5, r2, #1
+; CHECK-NEXT:    rsbs r5, r5, #0
+; CHECK-NEXT:    vmov.32 q2[0], r5
+; CHECK-NEXT:    vmov.32 q2[1], r5
+; CHECK-NEXT:    ubfx r5, r2, #4, #1
+; CHECK-NEXT:    rsbs r5, r5, #0
+; CHECK-NEXT:    vmov.32 q2[2], r5
+; CHECK-NEXT:    vmov.32 q2[3], r5
+; CHECK-NEXT:    vmov.u8 r5, q0[8]
+; CHECK-NEXT:    sxtb r5, r5
+; CHECK-NEXT:    vmov.32 q3[0], r5
+; CHECK-NEXT:    asrs r5, r5, #31
+; CHECK-NEXT:    vmov.32 q3[1], r5
+; CHECK-NEXT:    vmov.u8 r5, q0[9]
+; CHECK-NEXT:    sxtb r5, r5
+; CHECK-NEXT:    vmov.32 q3[2], r5
+; CHECK-NEXT:    asrs r5, r5, #31
+; CHECK-NEXT:    vmov.32 q3[3], r5
+; CHECK-NEXT:    vand q2, q3, q2
+; CHECK-NEXT:    vmov r4, s8
+; CHECK-NEXT:    vmov r5, s9
+; CHECK-NEXT:    adds.w r12, r12, r4
+; CHECK-NEXT:    vmov r4, s10
+; CHECK-NEXT:    adcs r5, r3
+; CHECK-NEXT:    vmov r3, s11
+; CHECK-NEXT:    adds.w r4, r4, r12
+; CHECK-NEXT:    adc.w r12, r5, r3
+; CHECK-NEXT:    ubfx r3, r2, #8, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    ubfx r2, r2, #12, #1
+; CHECK-NEXT:    vmov.32 q2[0], r3
+; CHECK-NEXT:    rsbs r2, r2, #0
+; CHECK-NEXT:    vmov.32 q2[1], r3
+; CHECK-NEXT:    vmov.32 q2[2], r2
+; CHECK-NEXT:    vmov.32 q2[3], r2
+; CHECK-NEXT:    vmov.u8 r2, q0[10]
+; CHECK-NEXT:    sxtb r2, r2
+; CHECK-NEXT:    vmov.32 q3[0], r2
+; CHECK-NEXT:    asrs r2, r2, #31
+; CHECK-NEXT:    vmov.32 q3[1], r2
+; CHECK-NEXT:    vmov.u8 r2, q0[11]
+; CHECK-NEXT:    sxtb r2, r2
+; CHECK-NEXT:    vmov.32 q3[2], r2
+; CHECK-NEXT:    asrs r2, r2, #31
+; CHECK-NEXT:    vmov.32 q3[3], r2
+; CHECK-NEXT:    vand q2, q3, q2
+; CHECK-NEXT:    vmov r3, s8
+; CHECK-NEXT:    vmov r2, s9
+; CHECK-NEXT:    vmov r5, s11
+; CHECK-NEXT:    adds r3, r3, r4
+; CHECK-NEXT:    vmov r4, s10
+; CHECK-NEXT:    adc.w r2, r2, r12
+; CHECK-NEXT:    adds.w r12, r3, r4
+; CHECK-NEXT:    adc.w r3, r2, r5
+; CHECK-NEXT:    vmov.u16 r2, q1[4]
+; CHECK-NEXT:    vmov.32 q2[0], r2
+; CHECK-NEXT:    vmov.u16 r2, q1[5]
+; CHECK-NEXT:    vmov.32 q2[1], r2
+; CHECK-NEXT:    vmov.u16 r2, q1[6]
+; CHECK-NEXT:    vmov.32 q2[2], r2
+; CHECK-NEXT:    vmov.u16 r2, q1[7]
+; CHECK-NEXT:    vmov.32 q2[3], r2
+; CHECK-NEXT:    vcmp.i32 ne, q2, zr
+; CHECK-NEXT:    vmrs r2, p0
+; CHECK-NEXT:    and r5, r2, #1
+; CHECK-NEXT:    rsbs r5, r5, #0
+; CHECK-NEXT:    vmov.32 q1[0], r5
+; CHECK-NEXT:    vmov.32 q1[1], r5
+; CHECK-NEXT:    ubfx r5, r2, #4, #1
+; CHECK-NEXT:    rsbs r5, r5, #0
+; CHECK-NEXT:    vmov.32 q1[2], r5
+; CHECK-NEXT:    vmov.32 q1[3], r5
+; CHECK-NEXT:    vmov.u8 r5, q0[12]
+; CHECK-NEXT:    sxtb r5, r5
+; CHECK-NEXT:    vmov.32 q2[0], r5
+; CHECK-NEXT:    asrs r5, r5, #31
+; CHECK-NEXT:    vmov.32 q2[1], r5
+; CHECK-NEXT:    vmov.u8 r5, q0[13]
+; CHECK-NEXT:    sxtb r5, r5
+; CHECK-NEXT:    vmov.32 q2[2], r5
+; CHECK-NEXT:    asrs r5, r5, #31
+; CHECK-NEXT:    vmov.32 q2[3], r5
+; CHECK-NEXT:    vand q1, q2, q1
+; CHECK-NEXT:    vmov r4, s4
+; CHECK-NEXT:    vmov r5, s5
+; CHECK-NEXT:    adds.w r12, r12, r4
+; CHECK-NEXT:    vmov r4, s6
+; CHECK-NEXT:    adcs r5, r3
+; CHECK-NEXT:    vmov r3, s7
+; CHECK-NEXT:    adds.w r4, r4, r12
+; CHECK-NEXT:    adc.w r12, r5, r3
+; CHECK-NEXT:    ubfx r3, r2, #8, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    ubfx r2, r2, #12, #1
+; CHECK-NEXT:    vmov.32 q1[0], r3
+; CHECK-NEXT:    rsbs r2, r2, #0
+; CHECK-NEXT:    vmov.32 q1[1], r3
+; CHECK-NEXT:    vmov.32 q1[2], r2
+; CHECK-NEXT:    vmov.32 q1[3], r2
+; CHECK-NEXT:    vmov.u8 r2, q0[14]
+; CHECK-NEXT:    sxtb r2, r2
+; CHECK-NEXT:    vmov.32 q2[0], r2
+; CHECK-NEXT:    asrs r2, r2, #31
+; CHECK-NEXT:    vmov.32 q2[1], r2
+; CHECK-NEXT:    vmov.u8 r2, q0[15]
+; CHECK-NEXT:    sxtb r2, r2
+; CHECK-NEXT:    vmov.32 q2[2], r2
+; CHECK-NEXT:    asrs r2, r2, #31
+; CHECK-NEXT:    vmov.32 q2[3], r2
+; CHECK-NEXT:    vand q0, q2, q1
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov r2, s1
+; CHECK-NEXT:    vmov r5, s3
+; CHECK-NEXT:    adds r3, r3, r4
+; CHECK-NEXT:    vmov r4, s2
+; CHECK-NEXT:    adc.w r2, r2, r12
+; CHECK-NEXT:    adds r3, r3, r4
+; CHECK-NEXT:    adcs r2, r5
+; CHECK-NEXT:    adds r0, r0, r3
+; CHECK-NEXT:    adcs r1, r2
+; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13}
+; CHECK-NEXT:    pop {r4, r5, r7, pc}
+entry:
+  %c = icmp eq <16 x i8> %b, zeroinitializer
+  %xx = sext <16 x i8> %x to <16 x i64>
+  %s = select <16 x i1> %c, <16 x i64> %xx, <16 x i64> zeroinitializer
+  %z = call i64 @llvm.experimental.vector.reduce.add.v16i64(<16 x i64> %s)
+  %r = add i64 %z, %a
+  ret i64 %r
+}
+
+define arm_aapcs_vfpcc i64 @add_v2i8_v2i64_acc_zext(<2 x i8> %x, <2 x i8> %b, i64 %a) {
+; CHECK-LABEL: add_v2i8_v2i64_acc_zext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.i64 q2, #0xff
+; CHECK-NEXT:    vand q1, q1, q2
+; CHECK-NEXT:    vand q0, q0, q2
+; CHECK-NEXT:    vmov r2, s4
+; CHECK-NEXT:    cmp r2, #0
+; CHECK-NEXT:    cset r2, eq
+; CHECK-NEXT:    tst.w r2, #1
+; CHECK-NEXT:    csetm r2, ne
+; CHECK-NEXT:    vmov.32 q3[0], r2
+; CHECK-NEXT:    vmov.32 q3[1], r2
+; CHECK-NEXT:    vmov r2, s6
+; CHECK-NEXT:    cmp r2, #0
+; CHECK-NEXT:    cset r2, eq
+; CHECK-NEXT:    tst.w r2, #1
+; CHECK-NEXT:    csetm r2, ne
+; CHECK-NEXT:    vmov.32 q3[2], r2
+; CHECK-NEXT:    vmov.32 q3[3], r2
+; CHECK-NEXT:    vand q0, q0, q3
+; CHECK-NEXT:    vmov r2, s3
+; CHECK-NEXT:    vmov r3, s1
+; CHECK-NEXT:    orr.w r12, r3, r2
+; CHECK-NEXT:    vmov r3, s2
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    add r2, r3
+; CHECK-NEXT:    adds r0, r0, r2
+; CHECK-NEXT:    adc.w r1, r1, r12
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <2 x i8> %b, zeroinitializer
+  %xx = zext <2 x i8> %x to <2 x i64>
+  %s = select <2 x i1> %c, <2 x i64> %xx, <2 x i64> zeroinitializer
+  %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %s)
+  %r = add i64 %z, %a
+  ret i64 %r
+}
+
+define arm_aapcs_vfpcc i64 @add_v2i8_v2i64_acc_sext(<2 x i8> %x, <2 x i8> %b, i64 %a) {
+; CHECK-LABEL: add_v2i8_v2i64_acc_sext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    vmov.i32 q2, #0xff
+; CHECK-NEXT:    vand q2, q1, q2
+; CHECK-NEXT:    vmov r2, s8
+; CHECK-NEXT:    cmp r2, #0
+; CHECK-NEXT:    cset r2, eq
+; CHECK-NEXT:    tst.w r2, #1
+; CHECK-NEXT:    csetm r2, ne
+; CHECK-NEXT:    vmov.32 q1[0], r2
+; CHECK-NEXT:    vmov.32 q1[1], r2
+; CHECK-NEXT:    vmov r2, s10
+; CHECK-NEXT:    cmp r2, #0
+; CHECK-NEXT:    cset r2, eq
+; CHECK-NEXT:    tst.w r2, #1
+; CHECK-NEXT:    csetm r2, ne
+; CHECK-NEXT:    vmov.32 q1[2], r2
+; CHECK-NEXT:    vmov.32 q1[3], r2
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    sxtb r2, r2
+; CHECK-NEXT:    vmov.32 q2[0], r2
+; CHECK-NEXT:    asrs r2, r2, #31
+; CHECK-NEXT:    vmov.32 q2[1], r2
+; CHECK-NEXT:    vmov r2, s2
+; CHECK-NEXT:    sxtb r2, r2
+; CHECK-NEXT:    vmov.32 q2[2], r2
+; CHECK-NEXT:    asrs r2, r2, #31
+; CHECK-NEXT:    vmov.32 q2[3], r2
+; CHECK-NEXT:    vand q0, q2, q1
+; CHECK-NEXT:    vmov r2, s2
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov r12, s3
+; CHECK-NEXT:    vmov lr, s1
+; CHECK-NEXT:    adds r2, r2, r3
+; CHECK-NEXT:    adc.w r3, lr, r12
+; CHECK-NEXT:    adds r0, r0, r2
+; CHECK-NEXT:    adcs r1, r3
+; CHECK-NEXT:    pop {r7, pc}
+entry:
+  %c = icmp eq <2 x i8> %b, zeroinitializer
+  %xx = sext <2 x i8> %x to <2 x i64>
+  %s = select <2 x i1> %c, <2 x i64> %xx, <2 x i64> zeroinitializer
+  %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %s)
+  %r = add i64 %z, %a
+  ret i64 %r
+}
+
+define arm_aapcs_vfpcc i64 @add_v2i64_v2i64_acc(<2 x i64> %x, <2 x i64> %b, i64 %a) {
+; CHECK-LABEL: add_v2i64_v2i64_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    vmov r2, s5
+; CHECK-NEXT:    vmov r3, s4
+; CHECK-NEXT:    orrs r2, r3
+; CHECK-NEXT:    vmov r3, s6
+; CHECK-NEXT:    cset r2, eq
+; CHECK-NEXT:    tst.w r2, #1
+; CHECK-NEXT:    csetm r2, ne
+; CHECK-NEXT:    vmov.32 q2[0], r2
+; CHECK-NEXT:    vmov.32 q2[1], r2
+; CHECK-NEXT:    vmov r2, s7
+; CHECK-NEXT:    orrs r2, r3
+; CHECK-NEXT:    cset r2, eq
+; CHECK-NEXT:    tst.w r2, #1
+; CHECK-NEXT:    csetm r2, ne
+; CHECK-NEXT:    vmov.32 q2[2], r2
+; CHECK-NEXT:    vmov.32 q2[3], r2
+; CHECK-NEXT:    vand q0, q0, q2
+; CHECK-NEXT:    vmov r2, s2
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov r12, s3
+; CHECK-NEXT:    vmov lr, s1
+; CHECK-NEXT:    adds r2, r2, r3
+; CHECK-NEXT:    adc.w r3, lr, r12
+; CHECK-NEXT:    adds r0, r0, r2
+; CHECK-NEXT:    adcs r1, r3
+; CHECK-NEXT:    pop {r7, pc}
+entry:
+  %c = icmp eq <2 x i64> %b, zeroinitializer
+  %s = select <2 x i1> %c, <2 x i64> %x, <2 x i64> zeroinitializer
+  %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %s)
+  %r = add i64 %z, %a
+  ret i64 %r
+}
+
+declare i16 @llvm.experimental.vector.reduce.add.v16i16(<16 x i16>)
+declare i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16>)
+declare i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32>)
+declare i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32>)
+declare i32 @llvm.experimental.vector.reduce.add.v8i32(<8 x i32>)
+declare i64 @llvm.experimental.vector.reduce.add.v16i64(<16 x i64>)
+declare i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64>)
+declare i64 @llvm.experimental.vector.reduce.add.v4i64(<4 x i64>)
+declare i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64>)
+declare i8 @llvm.experimental.vector.reduce.add.v16i8(<16 x i8>)

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vecreduce-mlapred.ll b/llvm/test/CodeGen/Thumb2/mve-vecreduce-mlapred.ll
new file mode 100644
index 000000000000..e75e75e30600
--- /dev/null
+++ b/llvm/test/CodeGen/Thumb2/mve-vecreduce-mlapred.ll
@@ -0,0 +1,5177 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK
+
+define arm_aapcs_vfpcc i32 @add_v4i32_v4i32(<4 x i32> %x, <4 x i32> %y, <4 x i32> %b) {
+; CHECK-LABEL: add_v4i32_v4i32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmul.i32 q0, q0, q1
+; CHECK-NEXT:    vcmp.i32 eq, q2, zr
+; CHECK-NEXT:    vmov.i32 q1, #0x0
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vaddv.u32 r0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <4 x i32> %b, zeroinitializer
+  %m = mul <4 x i32> %x, %y
+  %s = select <4 x i1> %c, <4 x i32> %m, <4 x i32> zeroinitializer
+  %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %s)
+  ret i32 %z
+}
+
+define arm_aapcs_vfpcc i64 @add_v4i32_v4i64_zext(<4 x i32> %x, <4 x i32> %y, <4 x i32> %b) {
+; CHECK-LABEL: add_v4i32_v4i64_zext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11}
+; CHECK-NEXT:    vcmp.i32 eq, q2, zr
+; CHECK-NEXT:    vmov.f32 s12, s4
+; CHECK-NEXT:    vmrs r0, p0
+; CHECK-NEXT:    vmov.f32 s16, s0
+; CHECK-NEXT:    vmov.f32 s14, s5
+; CHECK-NEXT:    vmov.f32 s18, s1
+; CHECK-NEXT:    vmullb.u32 q5, q4, q3
+; CHECK-NEXT:    and r1, r0, #1
+; CHECK-NEXT:    rsbs r1, r1, #0
+; CHECK-NEXT:    vmov.32 q2[0], r1
+; CHECK-NEXT:    vmov.32 q2[1], r1
+; CHECK-NEXT:    ubfx r1, r0, #4, #1
+; CHECK-NEXT:    rsbs r1, r1, #0
+; CHECK-NEXT:    vmov.32 q2[2], r1
+; CHECK-NEXT:    vmov.32 q2[3], r1
+; CHECK-NEXT:    vand q2, q5, q2
+; CHECK-NEXT:    vmov r3, s10
+; CHECK-NEXT:    vmov r1, s8
+; CHECK-NEXT:    vmov r12, s11
+; CHECK-NEXT:    vmov r2, s9
+; CHECK-NEXT:    vmov.f32 s8, s6
+; CHECK-NEXT:    vmov.f32 s10, s7
+; CHECK-NEXT:    vmov.f32 s4, s2
+; CHECK-NEXT:    vmov.f32 s6, s3
+; CHECK-NEXT:    vmullb.u32 q0, q1, q2
+; CHECK-NEXT:    adds r1, r1, r3
+; CHECK-NEXT:    ubfx r3, r0, #8, #1
+; CHECK-NEXT:    rsb.w r3, r3, #0
+; CHECK-NEXT:    ubfx r0, r0, #12, #1
+; CHECK-NEXT:    vmov.32 q1[0], r3
+; CHECK-NEXT:    rsb.w r0, r0, #0
+; CHECK-NEXT:    vmov.32 q1[1], r3
+; CHECK-NEXT:    adc.w r2, r2, r12
+; CHECK-NEXT:    vmov.32 q1[2], r0
+; CHECK-NEXT:    vmov.32 q1[3], r0
+; CHECK-NEXT:    vand q0, q0, q1
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov r0, s1
+; CHECK-NEXT:    adds r1, r1, r3
+; CHECK-NEXT:    vmov r3, s3
+; CHECK-NEXT:    adcs r2, r0
+; CHECK-NEXT:    vmov r0, s2
+; CHECK-NEXT:    adds r0, r0, r1
+; CHECK-NEXT:    adc.w r1, r2, r3
+; CHECK-NEXT:    vpop {d8, d9, d10, d11}
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <4 x i32> %b, zeroinitializer
+  %xx = zext <4 x i32> %x to <4 x i64>
+  %yy = zext <4 x i32> %y to <4 x i64>
+  %m = mul <4 x i64> %xx, %yy
+  %s = select <4 x i1> %c, <4 x i64> %m, <4 x i64> zeroinitializer
+  %z = call i64 @llvm.experimental.vector.reduce.add.v4i64(<4 x i64> %s)
+  ret i64 %z
+}
+
+define arm_aapcs_vfpcc i64 @add_v4i32_v4i64_sext(<4 x i32> %x, <4 x i32> %y, <4 x i32> %b) {
+; CHECK-LABEL: add_v4i32_v4i64_sext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11}
+; CHECK-NEXT:    vmov.f32 s16, s4
+; CHECK-NEXT:    vcmp.i32 eq, q2, zr
+; CHECK-NEXT:    vmov.f32 s20, s0
+; CHECK-NEXT:    vmov.f32 s18, s5
+; CHECK-NEXT:    vmov.f32 s22, s1
+; CHECK-NEXT:    vmov r0, s16
+; CHECK-NEXT:    vmov r1, s20
+; CHECK-NEXT:    smull r0, r1, r1, r0
+; CHECK-NEXT:    vmov.32 q3[0], r0
+; CHECK-NEXT:    vmov r0, s18
+; CHECK-NEXT:    vmov.32 q3[1], r1
+; CHECK-NEXT:    vmov r1, s22
+; CHECK-NEXT:    smull r0, r1, r1, r0
+; CHECK-NEXT:    vmov.32 q3[2], r0
+; CHECK-NEXT:    vmrs r0, p0
+; CHECK-NEXT:    vmov.32 q3[3], r1
+; CHECK-NEXT:    and r1, r0, #1
+; CHECK-NEXT:    rsbs r1, r1, #0
+; CHECK-NEXT:    vmov.32 q2[0], r1
+; CHECK-NEXT:    vmov.32 q2[1], r1
+; CHECK-NEXT:    ubfx r1, r0, #4, #1
+; CHECK-NEXT:    rsbs r1, r1, #0
+; CHECK-NEXT:    vmov.32 q2[2], r1
+; CHECK-NEXT:    vmov.32 q2[3], r1
+; CHECK-NEXT:    vand q2, q3, q2
+; CHECK-NEXT:    vmov r3, s10
+; CHECK-NEXT:    vmov r1, s8
+; CHECK-NEXT:    vmov r12, s11
+; CHECK-NEXT:    vmov r2, s9
+; CHECK-NEXT:    vmov.f32 s8, s6
+; CHECK-NEXT:    vmov.f32 s10, s7
+; CHECK-NEXT:    vmov.f32 s4, s2
+; CHECK-NEXT:    vmov.f32 s6, s3
+; CHECK-NEXT:    vmullb.s32 q0, q1, q2
+; CHECK-NEXT:    adds r1, r1, r3
+; CHECK-NEXT:    ubfx r3, r0, #8, #1
+; CHECK-NEXT:    rsb.w r3, r3, #0
+; CHECK-NEXT:    ubfx r0, r0, #12, #1
+; CHECK-NEXT:    vmov.32 q1[0], r3
+; CHECK-NEXT:    rsb.w r0, r0, #0
+; CHECK-NEXT:    vmov.32 q1[1], r3
+; CHECK-NEXT:    adc.w r2, r2, r12
+; CHECK-NEXT:    vmov.32 q1[2], r0
+; CHECK-NEXT:    vmov.32 q1[3], r0
+; CHECK-NEXT:    vand q0, q0, q1
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov r0, s1
+; CHECK-NEXT:    adds r1, r1, r3
+; CHECK-NEXT:    vmov r3, s3
+; CHECK-NEXT:    adcs r2, r0
+; CHECK-NEXT:    vmov r0, s2
+; CHECK-NEXT:    adds r0, r0, r1
+; CHECK-NEXT:    adc.w r1, r2, r3
+; CHECK-NEXT:    vpop {d8, d9, d10, d11}
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <4 x i32> %b, zeroinitializer
+  %xx = sext <4 x i32> %x to <4 x i64>
+  %yy = sext <4 x i32> %y to <4 x i64>
+  %m = mul <4 x i64> %xx, %yy
+  %s = select <4 x i1> %c, <4 x i64> %m, <4 x i64> zeroinitializer
+  %z = call i64 @llvm.experimental.vector.reduce.add.v4i64(<4 x i64> %s)
+  ret i64 %z
+}
+
+define arm_aapcs_vfpcc i64 @add_v2i32_v2i64_zext(<2 x i32> %x, <2 x i32> %y, <2 x i32> %b) {
+; CHECK-LABEL: add_v2i32_v2i64_zext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov r0, s8
+; CHECK-NEXT:    vmullb.u32 q3, q0, q1
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    cset r0, eq
+; CHECK-NEXT:    tst.w r0, #1
+; CHECK-NEXT:    csetm r0, ne
+; CHECK-NEXT:    vmov.32 q0[0], r0
+; CHECK-NEXT:    vmov.32 q0[1], r0
+; CHECK-NEXT:    vmov r0, s10
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    cset r0, eq
+; CHECK-NEXT:    tst.w r0, #1
+; CHECK-NEXT:    csetm r0, ne
+; CHECK-NEXT:    vmov.32 q0[2], r0
+; CHECK-NEXT:    vmov.32 q0[3], r0
+; CHECK-NEXT:    vand q0, q3, q0
+; CHECK-NEXT:    vmov r0, s2
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov r1, s3
+; CHECK-NEXT:    vmov r2, s1
+; CHECK-NEXT:    adds r0, r0, r3
+; CHECK-NEXT:    adcs r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <2 x i32> %b, zeroinitializer
+  %xx = zext <2 x i32> %x to <2 x i64>
+  %yy = zext <2 x i32> %y to <2 x i64>
+  %m = mul <2 x i64> %xx, %yy
+  %s = select <2 x i1> %c, <2 x i64> %m, <2 x i64> zeroinitializer
+  %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %s)
+  ret i64 %z
+}
+
+define arm_aapcs_vfpcc i64 @add_v2i32_v2i64_sext(<2 x i32> %x, <2 x i32> %y, <2 x i32> %b) {
+; CHECK-LABEL: add_v2i32_v2i64_sext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov r0, s8
+; CHECK-NEXT:    vmullb.s32 q3, q0, q1
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    cset r0, eq
+; CHECK-NEXT:    tst.w r0, #1
+; CHECK-NEXT:    csetm r0, ne
+; CHECK-NEXT:    vmov.32 q0[0], r0
+; CHECK-NEXT:    vmov.32 q0[1], r0
+; CHECK-NEXT:    vmov r0, s10
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    cset r0, eq
+; CHECK-NEXT:    tst.w r0, #1
+; CHECK-NEXT:    csetm r0, ne
+; CHECK-NEXT:    vmov.32 q0[2], r0
+; CHECK-NEXT:    vmov.32 q0[3], r0
+; CHECK-NEXT:    vand q0, q3, q0
+; CHECK-NEXT:    vmov r0, s2
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov r1, s3
+; CHECK-NEXT:    vmov r2, s1
+; CHECK-NEXT:    adds r0, r0, r3
+; CHECK-NEXT:    adcs r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <2 x i32> %b, zeroinitializer
+  %xx = sext <2 x i32> %x to <2 x i64>
+  %yy = sext <2 x i32> %y to <2 x i64>
+  %m = mul <2 x i64> %xx, %yy
+  %s = select <2 x i1> %c, <2 x i64> %m, <2 x i64> zeroinitializer
+  %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %s)
+  ret i64 %z
+}
+
+define arm_aapcs_vfpcc i32 @add_v8i16_v8i32_zext(<8 x i16> %x, <8 x i16> %y, <8 x i16> %b) {
+; CHECK-LABEL: add_v8i16_v8i32_zext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11}
+; CHECK-NEXT:    vmov.i8 q3, #0x0
+; CHECK-NEXT:    vmov.i8 q4, #0xff
+; CHECK-NEXT:    vcmp.i16 eq, q2, zr
+; CHECK-NEXT:    vpsel q2, q4, q3
+; CHECK-NEXT:    vmov.u16 r0, q2[4]
+; CHECK-NEXT:    vmov.32 q3[0], r0
+; CHECK-NEXT:    vmov.u16 r0, q2[5]
+; CHECK-NEXT:    vmov.32 q3[1], r0
+; CHECK-NEXT:    vmov.u16 r0, q2[6]
+; CHECK-NEXT:    vmov.32 q3[2], r0
+; CHECK-NEXT:    vmov.u16 r0, q2[7]
+; CHECK-NEXT:    vmov.32 q3[3], r0
+; CHECK-NEXT:    vmov.u16 r0, q1[4]
+; CHECK-NEXT:    vcmp.i32 ne, q3, zr
+; CHECK-NEXT:    vmov.32 q3[0], r0
+; CHECK-NEXT:    vmov.u16 r0, q1[5]
+; CHECK-NEXT:    vmov.32 q3[1], r0
+; CHECK-NEXT:    vmov.u16 r0, q1[6]
+; CHECK-NEXT:    vmov.32 q3[2], r0
+; CHECK-NEXT:    vmov.u16 r0, q1[7]
+; CHECK-NEXT:    vmov.32 q3[3], r0
+; CHECK-NEXT:    vmov.u16 r0, q0[4]
+; CHECK-NEXT:    vmov.32 q4[0], r0
+; CHECK-NEXT:    vmov.u16 r0, q0[5]
+; CHECK-NEXT:    vmov.32 q4[1], r0
+; CHECK-NEXT:    vmov.u16 r0, q0[6]
+; CHECK-NEXT:    vmov.32 q4[2], r0
+; CHECK-NEXT:    vmov.u16 r0, q0[7]
+; CHECK-NEXT:    vmov.32 q4[3], r0
+; CHECK-NEXT:    vmov.u16 r0, q2[0]
+; CHECK-NEXT:    vmov.32 q5[0], r0
+; CHECK-NEXT:    vmov.u16 r0, q2[1]
+; CHECK-NEXT:    vmov.32 q5[1], r0
+; CHECK-NEXT:    vmov.u16 r0, q2[2]
+; CHECK-NEXT:    vmov.32 q5[2], r0
+; CHECK-NEXT:    vmov.u16 r0, q2[3]
+; CHECK-NEXT:    vmov.32 q5[3], r0
+; CHECK-NEXT:    vmov.u16 r0, q1[0]
+; CHECK-NEXT:    vmov.32 q2[0], r0
+; CHECK-NEXT:    vmov.u16 r0, q1[1]
+; CHECK-NEXT:    vmov.32 q2[1], r0
+; CHECK-NEXT:    vmov.u16 r0, q1[2]
+; CHECK-NEXT:    vmov.32 q2[2], r0
+; CHECK-NEXT:    vmov.u16 r0, q1[3]
+; CHECK-NEXT:    vmov.32 q2[3], r0
+; CHECK-NEXT:    vmov.u16 r0, q0[0]
+; CHECK-NEXT:    vmov.32 q1[0], r0
+; CHECK-NEXT:    vmov.u16 r0, q0[1]
+; CHECK-NEXT:    vmov.32 q1[1], r0
+; CHECK-NEXT:    vmov.u16 r0, q0[2]
+; CHECK-NEXT:    vmov.32 q1[2], r0
+; CHECK-NEXT:    vmov.u16 r0, q0[3]
+; CHECK-NEXT:    vmullb.u16 q4, q4, q3
+; CHECK-NEXT:    vmov.i32 q3, #0x0
+; CHECK-NEXT:    vmov.32 q1[3], r0
+; CHECK-NEXT:    vpsel q4, q4, q3
+; CHECK-NEXT:    vcmp.i32 ne, q5, zr
+; CHECK-NEXT:    vmullb.u16 q0, q1, q2
+; CHECK-NEXT:    vpsel q0, q0, q3
+; CHECK-NEXT:    vadd.i32 q0, q0, q4
+; CHECK-NEXT:    vaddv.u32 r0, q0
+; CHECK-NEXT:    vpop {d8, d9, d10, d11}
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <8 x i16> %b, zeroinitializer
+  %xx = zext <8 x i16> %x to <8 x i32>
+  %yy = zext <8 x i16> %y to <8 x i32>
+  %m = mul <8 x i32> %xx, %yy
+  %s = select <8 x i1> %c, <8 x i32> %m, <8 x i32> zeroinitializer
+  %z = call i32 @llvm.experimental.vector.reduce.add.v8i32(<8 x i32> %s)
+  ret i32 %z
+}
+
+define arm_aapcs_vfpcc i32 @add_v8i16_v8i32_sext(<8 x i16> %x, <8 x i16> %y, <8 x i16> %b) {
+; CHECK-LABEL: add_v8i16_v8i32_sext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11}
+; CHECK-NEXT:    vmov.i8 q3, #0x0
+; CHECK-NEXT:    vmov.i8 q4, #0xff
+; CHECK-NEXT:    vcmp.i16 eq, q2, zr
+; CHECK-NEXT:    vpsel q2, q4, q3
+; CHECK-NEXT:    vmov.u16 r0, q2[4]
+; CHECK-NEXT:    vmov.32 q3[0], r0
+; CHECK-NEXT:    vmov.u16 r0, q2[5]
+; CHECK-NEXT:    vmov.32 q3[1], r0
+; CHECK-NEXT:    vmov.u16 r0, q2[6]
+; CHECK-NEXT:    vmov.32 q3[2], r0
+; CHECK-NEXT:    vmov.u16 r0, q2[7]
+; CHECK-NEXT:    vmov.32 q3[3], r0
+; CHECK-NEXT:    vmov.u16 r0, q1[4]
+; CHECK-NEXT:    vcmp.i32 ne, q3, zr
+; CHECK-NEXT:    vmov.32 q3[0], r0
+; CHECK-NEXT:    vmov.u16 r0, q1[5]
+; CHECK-NEXT:    vmov.32 q3[1], r0
+; CHECK-NEXT:    vmov.u16 r0, q1[6]
+; CHECK-NEXT:    vmov.32 q3[2], r0
+; CHECK-NEXT:    vmov.u16 r0, q1[7]
+; CHECK-NEXT:    vmov.32 q3[3], r0
+; CHECK-NEXT:    vmov.u16 r0, q0[4]
+; CHECK-NEXT:    vmov.32 q4[0], r0
+; CHECK-NEXT:    vmov.u16 r0, q0[5]
+; CHECK-NEXT:    vmov.32 q4[1], r0
+; CHECK-NEXT:    vmov.u16 r0, q0[6]
+; CHECK-NEXT:    vmov.32 q4[2], r0
+; CHECK-NEXT:    vmov.u16 r0, q0[7]
+; CHECK-NEXT:    vmov.32 q4[3], r0
+; CHECK-NEXT:    vmov.u16 r0, q2[0]
+; CHECK-NEXT:    vmov.32 q5[0], r0
+; CHECK-NEXT:    vmov.u16 r0, q2[1]
+; CHECK-NEXT:    vmov.32 q5[1], r0
+; CHECK-NEXT:    vmov.u16 r0, q2[2]
+; CHECK-NEXT:    vmov.32 q5[2], r0
+; CHECK-NEXT:    vmov.u16 r0, q2[3]
+; CHECK-NEXT:    vmov.32 q5[3], r0
+; CHECK-NEXT:    vmov.u16 r0, q1[0]
+; CHECK-NEXT:    vmov.32 q2[0], r0
+; CHECK-NEXT:    vmov.u16 r0, q1[1]
+; CHECK-NEXT:    vmov.32 q2[1], r0
+; CHECK-NEXT:    vmov.u16 r0, q1[2]
+; CHECK-NEXT:    vmov.32 q2[2], r0
+; CHECK-NEXT:    vmov.u16 r0, q1[3]
+; CHECK-NEXT:    vmov.32 q2[3], r0
+; CHECK-NEXT:    vmov.u16 r0, q0[0]
+; CHECK-NEXT:    vmov.32 q1[0], r0
+; CHECK-NEXT:    vmov.u16 r0, q0[1]
+; CHECK-NEXT:    vmov.32 q1[1], r0
+; CHECK-NEXT:    vmov.u16 r0, q0[2]
+; CHECK-NEXT:    vmov.32 q1[2], r0
+; CHECK-NEXT:    vmov.u16 r0, q0[3]
+; CHECK-NEXT:    vmullb.s16 q4, q4, q3
+; CHECK-NEXT:    vmov.i32 q3, #0x0
+; CHECK-NEXT:    vmov.32 q1[3], r0
+; CHECK-NEXT:    vpsel q4, q4, q3
+; CHECK-NEXT:    vcmp.i32 ne, q5, zr
+; CHECK-NEXT:    vmullb.s16 q0, q1, q2
+; CHECK-NEXT:    vpsel q0, q0, q3
+; CHECK-NEXT:    vadd.i32 q0, q0, q4
+; CHECK-NEXT:    vaddv.u32 r0, q0
+; CHECK-NEXT:    vpop {d8, d9, d10, d11}
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <8 x i16> %b, zeroinitializer
+  %xx = sext <8 x i16> %x to <8 x i32>
+  %yy = sext <8 x i16> %y to <8 x i32>
+  %m = mul <8 x i32> %xx, %yy
+  %s = select <8 x i1> %c, <8 x i32> %m, <8 x i32> zeroinitializer
+  %z = call i32 @llvm.experimental.vector.reduce.add.v8i32(<8 x i32> %s)
+  ret i32 %z
+}
+
+define arm_aapcs_vfpcc i32 @add_v4i16_v4i32_zext(<4 x i16> %x, <4 x i16> %y, <4 x i16> %b) {
+; CHECK-LABEL: add_v4i16_v4i32_zext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmullb.u16 q0, q0, q1
+; CHECK-NEXT:    vmovlb.u16 q1, q2
+; CHECK-NEXT:    vcmp.i32 eq, q1, zr
+; CHECK-NEXT:    vmov.i32 q1, #0x0
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vaddv.u32 r0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <4 x i16> %b, zeroinitializer
+  %xx = zext <4 x i16> %x to <4 x i32>
+  %yy = zext <4 x i16> %y to <4 x i32>
+  %m = mul <4 x i32> %xx, %yy
+  %s = select <4 x i1> %c, <4 x i32> %m, <4 x i32> zeroinitializer
+  %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %s)
+  ret i32 %z
+}
+
+define arm_aapcs_vfpcc i32 @add_v4i16_v4i32_sext(<4 x i16> %x, <4 x i16> %y, <4 x i16> %b) {
+; CHECK-LABEL: add_v4i16_v4i32_sext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmullb.s16 q0, q0, q1
+; CHECK-NEXT:    vmovlb.u16 q1, q2
+; CHECK-NEXT:    vcmp.i32 eq, q1, zr
+; CHECK-NEXT:    vmov.i32 q1, #0x0
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vaddv.u32 r0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <4 x i16> %b, zeroinitializer
+  %xx = sext <4 x i16> %x to <4 x i32>
+  %yy = sext <4 x i16> %y to <4 x i32>
+  %m = mul <4 x i32> %xx, %yy
+  %s = select <4 x i1> %c, <4 x i32> %m, <4 x i32> zeroinitializer
+  %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %s)
+  ret i32 %z
+}
+
+define arm_aapcs_vfpcc zeroext i16 @add_v8i16_v8i16(<8 x i16> %x, <8 x i16> %y, <8 x i16> %b) {
+; CHECK-LABEL: add_v8i16_v8i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmul.i16 q0, q0, q1
+; CHECK-NEXT:    vcmp.i16 eq, q2, zr
+; CHECK-NEXT:    vmov.i32 q1, #0x0
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vaddv.u16 r0, q0
+; CHECK-NEXT:    uxth r0, r0
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <8 x i16> %b, zeroinitializer
+  %m = mul <8 x i16> %x, %y
+  %s = select <8 x i1> %c, <8 x i16> %m, <8 x i16> zeroinitializer
+  %z = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %s)
+  ret i16 %z
+}
+
+define arm_aapcs_vfpcc i64 @add_v8i16_v8i64_zext(<8 x i16> %x, <8 x i16> %y, <8 x i16> %b) {
+; CHECK-LABEL: add_v8i16_v8i64_zext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    vmov.i8 q3, #0x0
+; CHECK-NEXT:    vmov.i8 q4, #0xff
+; CHECK-NEXT:    vcmp.i16 eq, q2, zr
+; CHECK-NEXT:    vmov.u16 r2, q0[0]
+; CHECK-NEXT:    vpsel q3, q4, q3
+; CHECK-NEXT:    vmov.32 q6[0], r2
+; CHECK-NEXT:    vmov.u16 r0, q3[0]
+; CHECK-NEXT:    vmov.u16 r2, q0[1]
+; CHECK-NEXT:    vmov.32 q2[0], r0
+; CHECK-NEXT:    vmov.u16 r0, q3[1]
+; CHECK-NEXT:    vmov.32 q2[1], r0
+; CHECK-NEXT:    vmov.u16 r0, q3[2]
+; CHECK-NEXT:    vmov.32 q2[2], r0
+; CHECK-NEXT:    vmov.u16 r0, q3[3]
+; CHECK-NEXT:    vmov.32 q2[3], r0
+; CHECK-NEXT:    vmov.32 q6[2], r2
+; CHECK-NEXT:    vcmp.i32 ne, q2, zr
+; CHECK-NEXT:    vmov.i64 q2, #0xffff
+; CHECK-NEXT:    vmrs r0, p0
+; CHECK-NEXT:    vand q6, q6, q2
+; CHECK-NEXT:    vmov r2, s24
+; CHECK-NEXT:    and r1, r0, #1
+; CHECK-NEXT:    rsbs r1, r1, #0
+; CHECK-NEXT:    vmov.32 q4[0], r1
+; CHECK-NEXT:    vmov.32 q4[1], r1
+; CHECK-NEXT:    ubfx r1, r0, #4, #1
+; CHECK-NEXT:    rsbs r1, r1, #0
+; CHECK-NEXT:    vmov.32 q4[2], r1
+; CHECK-NEXT:    vmov.32 q4[3], r1
+; CHECK-NEXT:    vmov.u16 r1, q1[0]
+; CHECK-NEXT:    vmov.32 q5[0], r1
+; CHECK-NEXT:    vmov.u16 r1, q1[1]
+; CHECK-NEXT:    vmov.32 q5[2], r1
+; CHECK-NEXT:    vand q5, q5, q2
+; CHECK-NEXT:    vmov r1, s20
+; CHECK-NEXT:    umull r1, r2, r2, r1
+; CHECK-NEXT:    vmov.32 q7[0], r1
+; CHECK-NEXT:    vmov r1, s22
+; CHECK-NEXT:    vmov.32 q7[1], r2
+; CHECK-NEXT:    vmov r2, s26
+; CHECK-NEXT:    umull r1, r2, r2, r1
+; CHECK-NEXT:    vmov.32 q7[2], r1
+; CHECK-NEXT:    vmov.32 q7[3], r2
+; CHECK-NEXT:    vand q4, q7, q4
+; CHECK-NEXT:    vmov r1, s18
+; CHECK-NEXT:    vmov r2, s16
+; CHECK-NEXT:    vmov r12, s19
+; CHECK-NEXT:    vmov r3, s17
+; CHECK-NEXT:    adds r1, r1, r2
+; CHECK-NEXT:    adc.w r2, r3, r12
+; CHECK-NEXT:    ubfx r3, r0, #8, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    ubfx r0, r0, #12, #1
+; CHECK-NEXT:    vmov.32 q4[0], r3
+; CHECK-NEXT:    rsbs r0, r0, #0
+; CHECK-NEXT:    vmov.32 q4[1], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[2]
+; CHECK-NEXT:    vmov.32 q4[2], r0
+; CHECK-NEXT:    vmov.32 q6[0], r3
+; CHECK-NEXT:    vmov.32 q4[3], r0
+; CHECK-NEXT:    vmov.u16 r0, q1[2]
+; CHECK-NEXT:    vmov.32 q5[0], r0
+; CHECK-NEXT:    vmov.u16 r0, q1[3]
+; CHECK-NEXT:    vmov.u16 r3, q0[3]
+; CHECK-NEXT:    vmov.32 q5[2], r0
+; CHECK-NEXT:    vmov.32 q6[2], r3
+; CHECK-NEXT:    vand q5, q5, q2
+; CHECK-NEXT:    vand q6, q6, q2
+; CHECK-NEXT:    vmov r0, s20
+; CHECK-NEXT:    vmov r3, s24
+; CHECK-NEXT:    umull r0, r3, r3, r0
+; CHECK-NEXT:    vmov.32 q7[0], r0
+; CHECK-NEXT:    vmov r0, s22
+; CHECK-NEXT:    vmov.32 q7[1], r3
+; CHECK-NEXT:    vmov r3, s26
+; CHECK-NEXT:    umull r0, r3, r3, r0
+; CHECK-NEXT:    vmov.32 q7[2], r0
+; CHECK-NEXT:    vmov.32 q7[3], r3
+; CHECK-NEXT:    vand q4, q7, q4
+; CHECK-NEXT:    vmov r3, s16
+; CHECK-NEXT:    vmov r0, s17
+; CHECK-NEXT:    adds r1, r1, r3
+; CHECK-NEXT:    vmov r3, s19
+; CHECK-NEXT:    adcs r2, r0
+; CHECK-NEXT:    vmov r0, s18
+; CHECK-NEXT:    adds.w r12, r1, r0
+; CHECK-NEXT:    vmov.u16 r0, q0[4]
+; CHECK-NEXT:    adc.w r1, r2, r3
+; CHECK-NEXT:    vmov.u16 r2, q3[4]
+; CHECK-NEXT:    vmov.32 q4[0], r2
+; CHECK-NEXT:    vmov.u16 r2, q3[5]
+; CHECK-NEXT:    vmov.32 q4[1], r2
+; CHECK-NEXT:    vmov.u16 r2, q3[6]
+; CHECK-NEXT:    vmov.32 q4[2], r2
+; CHECK-NEXT:    vmov.u16 r2, q3[7]
+; CHECK-NEXT:    vmov.32 q4[3], r2
+; CHECK-NEXT:    vmov.32 q5[0], r0
+; CHECK-NEXT:    vcmp.i32 ne, q4, zr
+; CHECK-NEXT:    vmov.u16 r0, q0[5]
+; CHECK-NEXT:    vmrs lr, p0
+; CHECK-NEXT:    vmov.32 q5[2], r0
+; CHECK-NEXT:    vand q5, q5, q2
+; CHECK-NEXT:    vmov r0, s20
+; CHECK-NEXT:    and r3, lr, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    vmov.32 q3[0], r3
+; CHECK-NEXT:    vmov.32 q3[1], r3
+; CHECK-NEXT:    ubfx r3, lr, #4, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    vmov.32 q3[2], r3
+; CHECK-NEXT:    vmov.32 q3[3], r3
+; CHECK-NEXT:    vmov.u16 r3, q1[4]
+; CHECK-NEXT:    vmov.32 q4[0], r3
+; CHECK-NEXT:    vmov.u16 r3, q1[5]
+; CHECK-NEXT:    vmov.32 q4[2], r3
+; CHECK-NEXT:    vand q4, q4, q2
+; CHECK-NEXT:    vmov r3, s16
+; CHECK-NEXT:    umull r0, r3, r0, r3
+; CHECK-NEXT:    vmov.32 q6[0], r0
+; CHECK-NEXT:    vmov r0, s18
+; CHECK-NEXT:    vmov.32 q6[1], r3
+; CHECK-NEXT:    vmov r3, s22
+; CHECK-NEXT:    umull r0, r3, r3, r0
+; CHECK-NEXT:    vmov.32 q6[2], r0
+; CHECK-NEXT:    vmov.32 q6[3], r3
+; CHECK-NEXT:    vand q3, q6, q3
+; CHECK-NEXT:    vmov r3, s12
+; CHECK-NEXT:    vmov r0, s13
+; CHECK-NEXT:    vmov r2, s14
+; CHECK-NEXT:    adds.w r3, r3, r12
+; CHECK-NEXT:    adcs r1, r0
+; CHECK-NEXT:    vmov r0, s15
+; CHECK-NEXT:    adds r3, r3, r2
+; CHECK-NEXT:    vmov.u16 r2, q0[6]
+; CHECK-NEXT:    adcs r1, r0
+; CHECK-NEXT:    ubfx r0, lr, #8, #1
+; CHECK-NEXT:    rsbs r0, r0, #0
+; CHECK-NEXT:    vmov.32 q3[0], r0
+; CHECK-NEXT:    vmov.32 q3[1], r0
+; CHECK-NEXT:    ubfx r0, lr, #12, #1
+; CHECK-NEXT:    rsbs r0, r0, #0
+; CHECK-NEXT:    vmov.32 q3[2], r0
+; CHECK-NEXT:    vmov.32 q3[3], r0
+; CHECK-NEXT:    vmov.u16 r0, q1[6]
+; CHECK-NEXT:    vmov.32 q4[0], r0
+; CHECK-NEXT:    vmov.u16 r0, q1[7]
+; CHECK-NEXT:    vmov.32 q4[2], r0
+; CHECK-NEXT:    vand q1, q4, q2
+; CHECK-NEXT:    vmov.32 q4[0], r2
+; CHECK-NEXT:    vmov.u16 r2, q0[7]
+; CHECK-NEXT:    vmov r0, s4
+; CHECK-NEXT:    vmov.32 q4[2], r2
+; CHECK-NEXT:    vand q0, q4, q2
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    umull r0, r2, r2, r0
+; CHECK-NEXT:    vmov.32 q2[0], r0
+; CHECK-NEXT:    vmov r0, s6
+; CHECK-NEXT:    vmov.32 q2[1], r2
+; CHECK-NEXT:    vmov r2, s2
+; CHECK-NEXT:    umull r0, r2, r2, r0
+; CHECK-NEXT:    vmov.32 q2[2], r0
+; CHECK-NEXT:    vmov.32 q2[3], r2
+; CHECK-NEXT:    vand q0, q2, q3
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    vmov r0, s1
+; CHECK-NEXT:    adds r2, r2, r3
+; CHECK-NEXT:    vmov r3, s3
+; CHECK-NEXT:    adcs r1, r0
+; CHECK-NEXT:    vmov r0, s2
+; CHECK-NEXT:    adds r0, r0, r2
+; CHECK-NEXT:    adcs r1, r3
+; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    pop {r7, pc}
+entry:
+  %c = icmp eq <8 x i16> %b, zeroinitializer
+  %xx = zext <8 x i16> %x to <8 x i64>
+  %yy = zext <8 x i16> %y to <8 x i64>
+  %m = mul <8 x i64> %xx, %yy
+  %s = select <8 x i1> %c, <8 x i64> %m, <8 x i64> zeroinitializer
+  %z = call i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64> %s)
+  ret i64 %z
+}
+
+define arm_aapcs_vfpcc i64 @add_v8i16_v8i64_sext(<8 x i16> %x, <8 x i16> %y, <8 x i16> %b) {
+; CHECK-LABEL: add_v8i16_v8i64_sext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    vmov.i8 q3, #0x0
+; CHECK-NEXT:    vmov.i8 q4, #0xff
+; CHECK-NEXT:    vcmp.i16 eq, q2, zr
+; CHECK-NEXT:    vmov.u16 r2, q0[0]
+; CHECK-NEXT:    vpsel q2, q4, q3
+; CHECK-NEXT:    sxth r2, r2
+; CHECK-NEXT:    vmov.u16 r0, q2[0]
+; CHECK-NEXT:    vmov.32 q3[0], r0
+; CHECK-NEXT:    vmov.u16 r0, q2[1]
+; CHECK-NEXT:    vmov.32 q3[1], r0
+; CHECK-NEXT:    vmov.u16 r0, q2[2]
+; CHECK-NEXT:    vmov.32 q3[2], r0
+; CHECK-NEXT:    vmov.u16 r0, q2[3]
+; CHECK-NEXT:    vmov.32 q3[3], r0
+; CHECK-NEXT:    vcmp.i32 ne, q3, zr
+; CHECK-NEXT:    vmrs r0, p0
+; CHECK-NEXT:    and r1, r0, #1
+; CHECK-NEXT:    rsbs r1, r1, #0
+; CHECK-NEXT:    vmov.32 q3[0], r1
+; CHECK-NEXT:    vmov.32 q3[1], r1
+; CHECK-NEXT:    ubfx r1, r0, #4, #1
+; CHECK-NEXT:    rsbs r1, r1, #0
+; CHECK-NEXT:    vmov.32 q3[2], r1
+; CHECK-NEXT:    vmov.32 q3[3], r1
+; CHECK-NEXT:    vmov.u16 r1, q1[0]
+; CHECK-NEXT:    sxth r1, r1
+; CHECK-NEXT:    smull r1, r2, r2, r1
+; CHECK-NEXT:    vmov.32 q4[0], r1
+; CHECK-NEXT:    vmov.u16 r1, q1[1]
+; CHECK-NEXT:    vmov.32 q4[1], r2
+; CHECK-NEXT:    vmov.u16 r2, q0[1]
+; CHECK-NEXT:    sxth r1, r1
+; CHECK-NEXT:    sxth r2, r2
+; CHECK-NEXT:    smull r1, r2, r2, r1
+; CHECK-NEXT:    vmov.32 q4[2], r1
+; CHECK-NEXT:    vmov.32 q4[3], r2
+; CHECK-NEXT:    vand q3, q4, q3
+; CHECK-NEXT:    vmov r1, s14
+; CHECK-NEXT:    vmov r2, s12
+; CHECK-NEXT:    vmov r12, s15
+; CHECK-NEXT:    vmov r3, s13
+; CHECK-NEXT:    adds r1, r1, r2
+; CHECK-NEXT:    adc.w r2, r3, r12
+; CHECK-NEXT:    ubfx r3, r0, #8, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    ubfx r0, r0, #12, #1
+; CHECK-NEXT:    vmov.32 q3[0], r3
+; CHECK-NEXT:    rsbs r0, r0, #0
+; CHECK-NEXT:    vmov.32 q3[1], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[2]
+; CHECK-NEXT:    vmov.32 q3[2], r0
+; CHECK-NEXT:    sxth r3, r3
+; CHECK-NEXT:    vmov.32 q3[3], r0
+; CHECK-NEXT:    vmov.u16 r0, q1[2]
+; CHECK-NEXT:    sxth r0, r0
+; CHECK-NEXT:    smull r0, r3, r3, r0
+; CHECK-NEXT:    vmov.32 q4[0], r0
+; CHECK-NEXT:    vmov.u16 r0, q1[3]
+; CHECK-NEXT:    vmov.32 q4[1], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[3]
+; CHECK-NEXT:    sxth r0, r0
+; CHECK-NEXT:    sxth r3, r3
+; CHECK-NEXT:    smull r0, r3, r3, r0
+; CHECK-NEXT:    vmov.32 q4[2], r0
+; CHECK-NEXT:    vmov.32 q4[3], r3
+; CHECK-NEXT:    vand q3, q4, q3
+; CHECK-NEXT:    vmov r3, s12
+; CHECK-NEXT:    vmov r0, s13
+; CHECK-NEXT:    adds r1, r1, r3
+; CHECK-NEXT:    vmov r3, s15
+; CHECK-NEXT:    adcs r2, r0
+; CHECK-NEXT:    vmov r0, s14
+; CHECK-NEXT:    adds.w r12, r1, r0
+; CHECK-NEXT:    vmov.u16 r0, q0[4]
+; CHECK-NEXT:    adc.w r1, r2, r3
+; CHECK-NEXT:    vmov.u16 r2, q2[4]
+; CHECK-NEXT:    vmov.32 q3[0], r2
+; CHECK-NEXT:    vmov.u16 r2, q2[5]
+; CHECK-NEXT:    vmov.32 q3[1], r2
+; CHECK-NEXT:    vmov.u16 r2, q2[6]
+; CHECK-NEXT:    vmov.32 q3[2], r2
+; CHECK-NEXT:    vmov.u16 r2, q2[7]
+; CHECK-NEXT:    vmov.32 q3[3], r2
+; CHECK-NEXT:    sxth r0, r0
+; CHECK-NEXT:    vcmp.i32 ne, q3, zr
+; CHECK-NEXT:    vmrs r2, p0
+; CHECK-NEXT:    and r3, r2, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    vmov.32 q2[0], r3
+; CHECK-NEXT:    vmov.32 q2[1], r3
+; CHECK-NEXT:    ubfx r3, r2, #4, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    vmov.32 q2[2], r3
+; CHECK-NEXT:    vmov.32 q2[3], r3
+; CHECK-NEXT:    vmov.u16 r3, q1[4]
+; CHECK-NEXT:    sxth r3, r3
+; CHECK-NEXT:    smull r0, r3, r0, r3
+; CHECK-NEXT:    vmov.32 q3[0], r0
+; CHECK-NEXT:    vmov.u16 r0, q1[5]
+; CHECK-NEXT:    vmov.32 q3[1], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[5]
+; CHECK-NEXT:    sxth r0, r0
+; CHECK-NEXT:    sxth r3, r3
+; CHECK-NEXT:    smull r0, r3, r3, r0
+; CHECK-NEXT:    vmov.32 q3[2], r0
+; CHECK-NEXT:    vmov.32 q3[3], r3
+; CHECK-NEXT:    vand q2, q3, q2
+; CHECK-NEXT:    vmov r3, s8
+; CHECK-NEXT:    vmov r0, s9
+; CHECK-NEXT:    adds.w r3, r3, r12
+; CHECK-NEXT:    adc.w r12, r1, r0
+; CHECK-NEXT:    vmov r1, s10
+; CHECK-NEXT:    vmov r0, s11
+; CHECK-NEXT:    adds r3, r3, r1
+; CHECK-NEXT:    adc.w r1, r12, r0
+; CHECK-NEXT:    ubfx r0, r2, #8, #1
+; CHECK-NEXT:    rsbs r0, r0, #0
+; CHECK-NEXT:    vmov.32 q2[0], r0
+; CHECK-NEXT:    vmov.32 q2[1], r0
+; CHECK-NEXT:    ubfx r0, r2, #12, #1
+; CHECK-NEXT:    rsbs r0, r0, #0
+; CHECK-NEXT:    vmov.u16 r2, q0[6]
+; CHECK-NEXT:    vmov.32 q2[2], r0
+; CHECK-NEXT:    sxth r2, r2
+; CHECK-NEXT:    vmov.32 q2[3], r0
+; CHECK-NEXT:    vmov.u16 r0, q1[6]
+; CHECK-NEXT:    sxth r0, r0
+; CHECK-NEXT:    smull r0, r2, r2, r0
+; CHECK-NEXT:    vmov.32 q3[0], r0
+; CHECK-NEXT:    vmov.u16 r0, q1[7]
+; CHECK-NEXT:    vmov.32 q3[1], r2
+; CHECK-NEXT:    vmov.u16 r2, q0[7]
+; CHECK-NEXT:    sxth r0, r0
+; CHECK-NEXT:    sxth r2, r2
+; CHECK-NEXT:    smull r0, r2, r2, r0
+; CHECK-NEXT:    vmov.32 q3[2], r0
+; CHECK-NEXT:    vmov.32 q3[3], r2
+; CHECK-NEXT:    vand q0, q3, q2
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    vmov r0, s1
+; CHECK-NEXT:    adds r2, r2, r3
+; CHECK-NEXT:    vmov r3, s3
+; CHECK-NEXT:    adcs r1, r0
+; CHECK-NEXT:    vmov r0, s2
+; CHECK-NEXT:    adds r0, r0, r2
+; CHECK-NEXT:    adcs r1, r3
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <8 x i16> %b, zeroinitializer
+  %xx = sext <8 x i16> %x to <8 x i64>
+  %yy = sext <8 x i16> %y to <8 x i64>
+  %m = mul <8 x i64> %xx, %yy
+  %s = select <8 x i1> %c, <8 x i64> %m, <8 x i64> zeroinitializer
+  %z = call i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64> %s)
+  ret i64 %z
+}
+
+define arm_aapcs_vfpcc i64 @add_v2i16_v2i64_zext(<2 x i16> %x, <2 x i16> %y, <2 x i16> %b) {
+; CHECK-LABEL: add_v2i16_v2i64_zext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    vmov.i64 q3, #0xffff
+; CHECK-NEXT:    vand q1, q1, q3
+; CHECK-NEXT:    vand q4, q0, q3
+; CHECK-NEXT:    vmov r0, s4
+; CHECK-NEXT:    vmov r1, s16
+; CHECK-NEXT:    umull r0, r1, r1, r0
+; CHECK-NEXT:    vmov.32 q0[0], r0
+; CHECK-NEXT:    vmov r0, s6
+; CHECK-NEXT:    vmov.32 q0[1], r1
+; CHECK-NEXT:    vmov r1, s18
+; CHECK-NEXT:    vand q1, q2, q3
+; CHECK-NEXT:    umull r0, r1, r1, r0
+; CHECK-NEXT:    vmov.32 q0[2], r0
+; CHECK-NEXT:    vmov r0, s4
+; CHECK-NEXT:    vmov.32 q0[3], r1
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    cset r0, eq
+; CHECK-NEXT:    tst.w r0, #1
+; CHECK-NEXT:    csetm r0, ne
+; CHECK-NEXT:    vmov.32 q2[0], r0
+; CHECK-NEXT:    vmov.32 q2[1], r0
+; CHECK-NEXT:    vmov r0, s6
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    cset r0, eq
+; CHECK-NEXT:    tst.w r0, #1
+; CHECK-NEXT:    csetm r0, ne
+; CHECK-NEXT:    vmov.32 q2[2], r0
+; CHECK-NEXT:    vmov.32 q2[3], r0
+; CHECK-NEXT:    vand q0, q0, q2
+; CHECK-NEXT:    vmov r0, s2
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov r1, s3
+; CHECK-NEXT:    vmov r2, s1
+; CHECK-NEXT:    adds r0, r0, r3
+; CHECK-NEXT:    adcs r1, r2
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <2 x i16> %b, zeroinitializer
+  %xx = zext <2 x i16> %x to <2 x i64>
+  %yy = zext <2 x i16> %y to <2 x i64>
+  %m = mul <2 x i64> %xx, %yy
+  %s = select <2 x i1> %c, <2 x i64> %m, <2 x i64> zeroinitializer
+  %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %s)
+  ret i64 %z
+}
+
+define arm_aapcs_vfpcc i64 @add_v2i16_v2i64_sext(<2 x i16> %x, <2 x i16> %y, <2 x i16> %b) {
+; CHECK-LABEL: add_v2i16_v2i64_sext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.i32 q3, #0xffff
+; CHECK-NEXT:    vmov r1, s0
+; CHECK-NEXT:    vand q3, q2, q3
+; CHECK-NEXT:    vmov r0, s12
+; CHECK-NEXT:    sxth r1, r1
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    cset r0, eq
+; CHECK-NEXT:    tst.w r0, #1
+; CHECK-NEXT:    csetm r0, ne
+; CHECK-NEXT:    vmov.32 q2[0], r0
+; CHECK-NEXT:    vmov.32 q2[1], r0
+; CHECK-NEXT:    vmov r0, s14
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    cset r0, eq
+; CHECK-NEXT:    tst.w r0, #1
+; CHECK-NEXT:    csetm r0, ne
+; CHECK-NEXT:    vmov.32 q2[2], r0
+; CHECK-NEXT:    vmov.32 q2[3], r0
+; CHECK-NEXT:    vmov r0, s4
+; CHECK-NEXT:    sxth r0, r0
+; CHECK-NEXT:    smull r0, r1, r1, r0
+; CHECK-NEXT:    vmov.32 q3[0], r0
+; CHECK-NEXT:    vmov r0, s6
+; CHECK-NEXT:    vmov.32 q3[1], r1
+; CHECK-NEXT:    vmov r1, s2
+; CHECK-NEXT:    sxth r0, r0
+; CHECK-NEXT:    sxth r1, r1
+; CHECK-NEXT:    smull r0, r1, r1, r0
+; CHECK-NEXT:    vmov.32 q3[2], r0
+; CHECK-NEXT:    vmov.32 q3[3], r1
+; CHECK-NEXT:    vand q0, q3, q2
+; CHECK-NEXT:    vmov r0, s2
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov r1, s3
+; CHECK-NEXT:    vmov r2, s1
+; CHECK-NEXT:    adds r0, r0, r3
+; CHECK-NEXT:    adcs r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <2 x i16> %b, zeroinitializer
+  %xx = sext <2 x i16> %x to <2 x i64>
+  %yy = sext <2 x i16> %y to <2 x i64>
+  %m = mul <2 x i64> %xx, %yy
+  %s = select <2 x i1> %c, <2 x i64> %m, <2 x i64> zeroinitializer
+  %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %s)
+  ret i64 %z
+}
+
+define arm_aapcs_vfpcc i32 @add_v16i8_v16i32_zext(<16 x i8> %x, <16 x i8> %y, <16 x i8> %b) {
+; CHECK-LABEL: add_v16i8_v16i32_zext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    .pad #48
+; CHECK-NEXT:    sub sp, #48
+; CHECK-NEXT:    vmov q3, q0
+; CHECK-NEXT:    vmov.i8 q0, #0x0
+; CHECK-NEXT:    vcmp.i8 eq, q2, zr
+; CHECK-NEXT:    vmov.i8 q5, #0xff
+; CHECK-NEXT:    vpsel q7, q5, q0
+; CHECK-NEXT:    vstrw.32 q0, [sp, #32] @ 16-byte Spill
+; CHECK-NEXT:    vmov.u8 r0, q7[8]
+; CHECK-NEXT:    vmov.i32 q6, #0xff
+; CHECK-NEXT:    vmov.16 q2[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q7[9]
+; CHECK-NEXT:    vmov.16 q2[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q7[10]
+; CHECK-NEXT:    vmov.16 q2[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q7[11]
+; CHECK-NEXT:    vmov.16 q2[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q7[12]
+; CHECK-NEXT:    vmov.16 q2[4], r0
+; CHECK-NEXT:    vmov.u8 r0, q7[13]
+; CHECK-NEXT:    vmov.16 q2[5], r0
+; CHECK-NEXT:    vmov.u8 r0, q7[14]
+; CHECK-NEXT:    vmov.16 q2[6], r0
+; CHECK-NEXT:    vmov.u8 r0, q7[15]
+; CHECK-NEXT:    vmov.16 q2[7], r0
+; CHECK-NEXT:    vcmp.i16 ne, q2, zr
+; CHECK-NEXT:    vpsel q4, q5, q0
+; CHECK-NEXT:    vmov.u16 r0, q4[4]
+; CHECK-NEXT:    vmov.32 q0[0], r0
+; CHECK-NEXT:    vmov.u16 r0, q4[5]
+; CHECK-NEXT:    vmov.32 q0[1], r0
+; CHECK-NEXT:    vmov.u16 r0, q4[6]
+; CHECK-NEXT:    vmov.32 q0[2], r0
+; CHECK-NEXT:    vmov.u16 r0, q4[7]
+; CHECK-NEXT:    vmov.32 q0[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[12]
+; CHECK-NEXT:    vcmp.i32 ne, q0, zr
+; CHECK-NEXT:    vmov.32 q0[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[13]
+; CHECK-NEXT:    vmov.32 q0[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[14]
+; CHECK-NEXT:    vmov.32 q0[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[15]
+; CHECK-NEXT:    vmov.32 q0[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q3[12]
+; CHECK-NEXT:    vand q2, q0, q6
+; CHECK-NEXT:    vmov.32 q0[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q3[13]
+; CHECK-NEXT:    vmov.32 q0[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q3[14]
+; CHECK-NEXT:    vmov.32 q0[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q3[15]
+; CHECK-NEXT:    vmov.32 q0[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q7[0]
+; CHECK-NEXT:    vand q0, q0, q6
+; CHECK-NEXT:    vmul.i32 q0, q0, q2
+; CHECK-NEXT:    vmov.i32 q2, #0x0
+; CHECK-NEXT:    vpsel q0, q0, q2
+; CHECK-NEXT:    vstrw.32 q2, [sp] @ 16-byte Spill
+; CHECK-NEXT:    vstrw.32 q0, [sp, #16] @ 16-byte Spill
+; CHECK-NEXT:    vmov.16 q0[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q7[1]
+; CHECK-NEXT:    vmov.16 q0[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q7[2]
+; CHECK-NEXT:    vmov.16 q0[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q7[3]
+; CHECK-NEXT:    vmov.16 q0[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q7[4]
+; CHECK-NEXT:    vmov.16 q0[4], r0
+; CHECK-NEXT:    vmov.u8 r0, q7[5]
+; CHECK-NEXT:    vmov.16 q0[5], r0
+; CHECK-NEXT:    vmov.u8 r0, q7[6]
+; CHECK-NEXT:    vmov.16 q0[6], r0
+; CHECK-NEXT:    vmov.u8 r0, q7[7]
+; CHECK-NEXT:    vmov.16 q0[7], r0
+; CHECK-NEXT:    vldrw.u32 q7, [sp] @ 16-byte Reload
+; CHECK-NEXT:    vcmp.i16 ne, q0, zr
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #32] @ 16-byte Reload
+; CHECK-NEXT:    vpsel q5, q5, q0
+; CHECK-NEXT:    vmov.u16 r0, q5[4]
+; CHECK-NEXT:    vmov.32 q0[0], r0
+; CHECK-NEXT:    vmov.u16 r0, q5[5]
+; CHECK-NEXT:    vmov.32 q0[1], r0
+; CHECK-NEXT:    vmov.u16 r0, q5[6]
+; CHECK-NEXT:    vmov.32 q0[2], r0
+; CHECK-NEXT:    vmov.u16 r0, q5[7]
+; CHECK-NEXT:    vmov.32 q0[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[4]
+; CHECK-NEXT:    vcmp.i32 ne, q0, zr
+; CHECK-NEXT:    vmov.32 q0[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[5]
+; CHECK-NEXT:    vmov.32 q0[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[6]
+; CHECK-NEXT:    vmov.32 q0[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[7]
+; CHECK-NEXT:    vmov.32 q0[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q3[4]
+; CHECK-NEXT:    vmov.32 q2[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q3[5]
+; CHECK-NEXT:    vmov.32 q2[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q3[6]
+; CHECK-NEXT:    vmov.32 q2[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q3[7]
+; CHECK-NEXT:    vmov.32 q2[3], r0
+; CHECK-NEXT:    vand q0, q0, q6
+; CHECK-NEXT:    vand q2, q2, q6
+; CHECK-NEXT:    vmov.u16 r0, q4[0]
+; CHECK-NEXT:    vmul.i32 q0, q2, q0
+; CHECK-NEXT:    vldrw.u32 q2, [sp, #16] @ 16-byte Reload
+; CHECK-NEXT:    vpsel q0, q0, q7
+; CHECK-NEXT:    vadd.i32 q0, q0, q2
+; CHECK-NEXT:    vmov.32 q2[0], r0
+; CHECK-NEXT:    vmov.u16 r0, q4[1]
+; CHECK-NEXT:    vmov.32 q2[1], r0
+; CHECK-NEXT:    vmov.u16 r0, q4[2]
+; CHECK-NEXT:    vmov.32 q2[2], r0
+; CHECK-NEXT:    vmov.u16 r0, q4[3]
+; CHECK-NEXT:    vmov.32 q2[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[8]
+; CHECK-NEXT:    vcmp.i32 ne, q2, zr
+; CHECK-NEXT:    vmov.32 q2[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[9]
+; CHECK-NEXT:    vmov.32 q2[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[10]
+; CHECK-NEXT:    vmov.32 q2[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[11]
+; CHECK-NEXT:    vmov.32 q2[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q3[8]
+; CHECK-NEXT:    vmov.32 q4[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q3[9]
+; CHECK-NEXT:    vmov.32 q4[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q3[10]
+; CHECK-NEXT:    vmov.32 q4[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q3[11]
+; CHECK-NEXT:    vmov.32 q4[3], r0
+; CHECK-NEXT:    vand q2, q2, q6
+; CHECK-NEXT:    vand q4, q4, q6
+; CHECK-NEXT:    vmov.u16 r0, q5[0]
+; CHECK-NEXT:    vmul.i32 q2, q4, q2
+; CHECK-NEXT:    vpsel q4, q2, q7
+; CHECK-NEXT:    vmov.32 q2[0], r0
+; CHECK-NEXT:    vmov.u16 r0, q5[1]
+; CHECK-NEXT:    vmov.32 q2[1], r0
+; CHECK-NEXT:    vmov.u16 r0, q5[2]
+; CHECK-NEXT:    vmov.32 q2[2], r0
+; CHECK-NEXT:    vmov.u16 r0, q5[3]
+; CHECK-NEXT:    vmov.32 q2[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[0]
+; CHECK-NEXT:    vcmp.i32 ne, q2, zr
+; CHECK-NEXT:    vmov.32 q2[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[1]
+; CHECK-NEXT:    vmov.32 q2[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[2]
+; CHECK-NEXT:    vmov.32 q2[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[3]
+; CHECK-NEXT:    vmov.32 q2[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q3[0]
+; CHECK-NEXT:    vand q1, q2, q6
+; CHECK-NEXT:    vmov.32 q2[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q3[1]
+; CHECK-NEXT:    vmov.32 q2[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q3[2]
+; CHECK-NEXT:    vmov.32 q2[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q3[3]
+; CHECK-NEXT:    vmov.32 q2[3], r0
+; CHECK-NEXT:    vand q2, q2, q6
+; CHECK-NEXT:    vmul.i32 q1, q2, q1
+; CHECK-NEXT:    vpsel q1, q1, q7
+; CHECK-NEXT:    vadd.i32 q1, q1, q4
+; CHECK-NEXT:    vadd.i32 q0, q1, q0
+; CHECK-NEXT:    vaddv.u32 r0, q0
+; CHECK-NEXT:    add sp, #48
+; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <16 x i8> %b, zeroinitializer
+  %xx = zext <16 x i8> %x to <16 x i32>
+  %yy = zext <16 x i8> %y to <16 x i32>
+  %m = mul <16 x i32> %xx, %yy
+  %s = select <16 x i1> %c, <16 x i32> %m, <16 x i32> zeroinitializer
+  %z = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %s)
+  ret i32 %z
+}
+
+define arm_aapcs_vfpcc i32 @add_v16i8_v16i32_sext(<16 x i8> %x, <16 x i8> %y, <16 x i8> %b) {
+; CHECK-LABEL: add_v16i8_v16i32_sext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    .pad #16
+; CHECK-NEXT:    sub sp, #16
+; CHECK-NEXT:    vcmp.i8 eq, q2, zr
+; CHECK-NEXT:    vmov.i8 q4, #0x0
+; CHECK-NEXT:    vmov.i8 q5, #0xff
+; CHECK-NEXT:    vmov q3, q0
+; CHECK-NEXT:    vpsel q6, q5, q4
+; CHECK-NEXT:    vmov.u8 r0, q6[8]
+; CHECK-NEXT:    vmov.16 q2[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q6[9]
+; CHECK-NEXT:    vmov.16 q2[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q6[10]
+; CHECK-NEXT:    vmov.16 q2[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q6[11]
+; CHECK-NEXT:    vmov.16 q2[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q6[12]
+; CHECK-NEXT:    vmov.16 q2[4], r0
+; CHECK-NEXT:    vmov.u8 r0, q6[13]
+; CHECK-NEXT:    vmov.16 q2[5], r0
+; CHECK-NEXT:    vmov.u8 r0, q6[14]
+; CHECK-NEXT:    vmov.16 q2[6], r0
+; CHECK-NEXT:    vmov.u8 r0, q6[15]
+; CHECK-NEXT:    vmov.16 q2[7], r0
+; CHECK-NEXT:    vcmp.i16 ne, q2, zr
+; CHECK-NEXT:    vpsel q2, q5, q4
+; CHECK-NEXT:    vmov.u16 r0, q2[4]
+; CHECK-NEXT:    vmov.32 q0[0], r0
+; CHECK-NEXT:    vmov.u16 r0, q2[5]
+; CHECK-NEXT:    vmov.32 q0[1], r0
+; CHECK-NEXT:    vmov.u16 r0, q2[6]
+; CHECK-NEXT:    vmov.32 q0[2], r0
+; CHECK-NEXT:    vmov.u16 r0, q2[7]
+; CHECK-NEXT:    vmov.32 q0[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[12]
+; CHECK-NEXT:    vcmp.i32 ne, q0, zr
+; CHECK-NEXT:    vmov.32 q0[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[13]
+; CHECK-NEXT:    vmov.32 q0[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[14]
+; CHECK-NEXT:    vmov.32 q0[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[15]
+; CHECK-NEXT:    vmov.32 q0[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q3[12]
+; CHECK-NEXT:    vmov.32 q7[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q3[13]
+; CHECK-NEXT:    vmov.32 q7[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q3[14]
+; CHECK-NEXT:    vmov.32 q7[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q3[15]
+; CHECK-NEXT:    vmov.32 q7[3], r0
+; CHECK-NEXT:    vmovlb.s8 q0, q0
+; CHECK-NEXT:    vmovlb.s8 q7, q7
+; CHECK-NEXT:    vmovlb.s16 q0, q0
+; CHECK-NEXT:    vmovlb.s16 q7, q7
+; CHECK-NEXT:    vmov.u8 r0, q6[0]
+; CHECK-NEXT:    vmul.i32 q0, q7, q0
+; CHECK-NEXT:    vmov.i32 q7, #0x0
+; CHECK-NEXT:    vpsel q0, q0, q7
+; CHECK-NEXT:    vstrw.32 q0, [sp] @ 16-byte Spill
+; CHECK-NEXT:    vmov.16 q0[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q6[1]
+; CHECK-NEXT:    vmov.16 q0[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q6[2]
+; CHECK-NEXT:    vmov.16 q0[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q6[3]
+; CHECK-NEXT:    vmov.16 q0[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q6[4]
+; CHECK-NEXT:    vmov.16 q0[4], r0
+; CHECK-NEXT:    vmov.u8 r0, q6[5]
+; CHECK-NEXT:    vmov.16 q0[5], r0
+; CHECK-NEXT:    vmov.u8 r0, q6[6]
+; CHECK-NEXT:    vmov.16 q0[6], r0
+; CHECK-NEXT:    vmov.u8 r0, q6[7]
+; CHECK-NEXT:    vmov.16 q0[7], r0
+; CHECK-NEXT:    vcmp.i16 ne, q0, zr
+; CHECK-NEXT:    vpsel q4, q5, q4
+; CHECK-NEXT:    vmov.u16 r0, q4[4]
+; CHECK-NEXT:    vmov.32 q0[0], r0
+; CHECK-NEXT:    vmov.u16 r0, q4[5]
+; CHECK-NEXT:    vmov.32 q0[1], r0
+; CHECK-NEXT:    vmov.u16 r0, q4[6]
+; CHECK-NEXT:    vmov.32 q0[2], r0
+; CHECK-NEXT:    vmov.u16 r0, q4[7]
+; CHECK-NEXT:    vmov.32 q0[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[4]
+; CHECK-NEXT:    vcmp.i32 ne, q0, zr
+; CHECK-NEXT:    vmov.32 q0[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[5]
+; CHECK-NEXT:    vmov.32 q0[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[6]
+; CHECK-NEXT:    vmov.32 q0[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[7]
+; CHECK-NEXT:    vmov.32 q0[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q3[4]
+; CHECK-NEXT:    vmov.32 q5[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q3[5]
+; CHECK-NEXT:    vmov.32 q5[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q3[6]
+; CHECK-NEXT:    vmov.32 q5[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q3[7]
+; CHECK-NEXT:    vmov.32 q5[3], r0
+; CHECK-NEXT:    vmovlb.s8 q0, q0
+; CHECK-NEXT:    vmovlb.s8 q5, q5
+; CHECK-NEXT:    vmovlb.s16 q0, q0
+; CHECK-NEXT:    vmovlb.s16 q5, q5
+; CHECK-NEXT:    vmov.u16 r0, q2[0]
+; CHECK-NEXT:    vmul.i32 q0, q5, q0
+; CHECK-NEXT:    vldrw.u32 q5, [sp] @ 16-byte Reload
+; CHECK-NEXT:    vpsel q0, q0, q7
+; CHECK-NEXT:    vadd.i32 q5, q0, q5
+; CHECK-NEXT:    vmov.32 q0[0], r0
+; CHECK-NEXT:    vmov.u16 r0, q2[1]
+; CHECK-NEXT:    vmov.32 q0[1], r0
+; CHECK-NEXT:    vmov.u16 r0, q2[2]
+; CHECK-NEXT:    vmov.32 q0[2], r0
+; CHECK-NEXT:    vmov.u16 r0, q2[3]
+; CHECK-NEXT:    vmov.32 q0[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[8]
+; CHECK-NEXT:    vcmp.i32 ne, q0, zr
+; CHECK-NEXT:    vmov.32 q0[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[9]
+; CHECK-NEXT:    vmov.32 q0[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[10]
+; CHECK-NEXT:    vmov.32 q0[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[11]
+; CHECK-NEXT:    vmov.32 q0[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q3[8]
+; CHECK-NEXT:    vmov.32 q2[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q3[9]
+; CHECK-NEXT:    vmov.32 q2[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q3[10]
+; CHECK-NEXT:    vmov.32 q2[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q3[11]
+; CHECK-NEXT:    vmov.32 q2[3], r0
+; CHECK-NEXT:    vmovlb.s8 q0, q0
+; CHECK-NEXT:    vmovlb.s8 q2, q2
+; CHECK-NEXT:    vmovlb.s16 q0, q0
+; CHECK-NEXT:    vmovlb.s16 q2, q2
+; CHECK-NEXT:    vmov.u16 r0, q4[0]
+; CHECK-NEXT:    vmul.i32 q0, q2, q0
+; CHECK-NEXT:    vmov.32 q2[0], r0
+; CHECK-NEXT:    vmov.u16 r0, q4[1]
+; CHECK-NEXT:    vpsel q0, q0, q7
+; CHECK-NEXT:    vmov.32 q2[1], r0
+; CHECK-NEXT:    vmov.u16 r0, q4[2]
+; CHECK-NEXT:    vmov.32 q2[2], r0
+; CHECK-NEXT:    vmov.u16 r0, q4[3]
+; CHECK-NEXT:    vmov.32 q2[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[0]
+; CHECK-NEXT:    vcmp.i32 ne, q2, zr
+; CHECK-NEXT:    vmov.32 q2[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[1]
+; CHECK-NEXT:    vmov.32 q2[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[2]
+; CHECK-NEXT:    vmov.32 q2[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[3]
+; CHECK-NEXT:    vmov.32 q2[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q3[0]
+; CHECK-NEXT:    vmovlb.s8 q1, q2
+; CHECK-NEXT:    vmov.32 q2[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q3[1]
+; CHECK-NEXT:    vmovlb.s16 q1, q1
+; CHECK-NEXT:    vmov.32 q2[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q3[2]
+; CHECK-NEXT:    vmov.32 q2[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q3[3]
+; CHECK-NEXT:    vmov.32 q2[3], r0
+; CHECK-NEXT:    vmovlb.s8 q2, q2
+; CHECK-NEXT:    vmovlb.s16 q2, q2
+; CHECK-NEXT:    vmul.i32 q1, q2, q1
+; CHECK-NEXT:    vpsel q1, q1, q7
+; CHECK-NEXT:    vadd.i32 q0, q1, q0
+; CHECK-NEXT:    vadd.i32 q0, q0, q5
+; CHECK-NEXT:    vaddv.u32 r0, q0
+; CHECK-NEXT:    add sp, #16
+; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <16 x i8> %b, zeroinitializer
+  %xx = sext <16 x i8> %x to <16 x i32>
+  %yy = sext <16 x i8> %y to <16 x i32>
+  %m = mul <16 x i32> %xx, %yy
+  %s = select <16 x i1> %c, <16 x i32> %m, <16 x i32> zeroinitializer
+  %z = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %s)
+  ret i32 %z
+}
+
+define arm_aapcs_vfpcc i32 @add_v4i8_v4i32_zext(<4 x i8> %x, <4 x i8> %y, <4 x i8> %b) {
+; CHECK-LABEL: add_v4i8_v4i32_zext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.i32 q3, #0xff
+; CHECK-NEXT:    vand q1, q1, q3
+; CHECK-NEXT:    vand q0, q0, q3
+; CHECK-NEXT:    vmul.i32 q0, q0, q1
+; CHECK-NEXT:    vand q1, q2, q3
+; CHECK-NEXT:    vcmp.i32 eq, q1, zr
+; CHECK-NEXT:    vmov.i32 q1, #0x0
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vaddv.u32 r0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <4 x i8> %b, zeroinitializer
+  %xx = zext <4 x i8> %x to <4 x i32>
+  %yy = zext <4 x i8> %y to <4 x i32>
+  %m = mul <4 x i32> %xx, %yy
+  %s = select <4 x i1> %c, <4 x i32> %m, <4 x i32> zeroinitializer
+  %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %s)
+  ret i32 %z
+}
+
+define arm_aapcs_vfpcc i32 @add_v4i8_v4i32_sext(<4 x i8> %x, <4 x i8> %y, <4 x i8> %b) {
+; CHECK-LABEL: add_v4i8_v4i32_sext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmovlb.s8 q1, q1
+; CHECK-NEXT:    vmovlb.s8 q0, q0
+; CHECK-NEXT:    vmovlb.s16 q1, q1
+; CHECK-NEXT:    vmovlb.s16 q0, q0
+; CHECK-NEXT:    vmul.i32 q0, q0, q1
+; CHECK-NEXT:    vmov.i32 q1, #0xff
+; CHECK-NEXT:    vand q1, q2, q1
+; CHECK-NEXT:    vcmp.i32 eq, q1, zr
+; CHECK-NEXT:    vmov.i32 q1, #0x0
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vaddv.u32 r0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <4 x i8> %b, zeroinitializer
+  %xx = sext <4 x i8> %x to <4 x i32>
+  %yy = sext <4 x i8> %y to <4 x i32>
+  %m = mul <4 x i32> %xx, %yy
+  %s = select <4 x i1> %c, <4 x i32> %m, <4 x i32> zeroinitializer
+  %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %s)
+  ret i32 %z
+}
+
+define arm_aapcs_vfpcc zeroext i16 @add_v16i8_v16i16_zext(<16 x i8> %x, <16 x i8> %y, <16 x i8> %b) {
+; CHECK-LABEL: add_v16i8_v16i16_zext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11}
+; CHECK-NEXT:    vcmp.i8 eq, q2, zr
+; CHECK-NEXT:    vmov.i8 q2, #0x0
+; CHECK-NEXT:    vmov.i8 q3, #0xff
+; CHECK-NEXT:    vpsel q2, q3, q2
+; CHECK-NEXT:    vmov.u8 r0, q2[8]
+; CHECK-NEXT:    vmov.16 q3[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[9]
+; CHECK-NEXT:    vmov.16 q3[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[10]
+; CHECK-NEXT:    vmov.16 q3[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[11]
+; CHECK-NEXT:    vmov.16 q3[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[12]
+; CHECK-NEXT:    vmov.16 q3[4], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[13]
+; CHECK-NEXT:    vmov.16 q3[5], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[14]
+; CHECK-NEXT:    vmov.16 q3[6], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[15]
+; CHECK-NEXT:    vmov.16 q3[7], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[8]
+; CHECK-NEXT:    vcmp.i16 ne, q3, zr
+; CHECK-NEXT:    vmov.16 q3[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[9]
+; CHECK-NEXT:    vmov.16 q3[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[10]
+; CHECK-NEXT:    vmov.16 q3[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[11]
+; CHECK-NEXT:    vmov.16 q3[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[12]
+; CHECK-NEXT:    vmov.16 q3[4], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[13]
+; CHECK-NEXT:    vmov.16 q3[5], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[14]
+; CHECK-NEXT:    vmov.16 q3[6], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[15]
+; CHECK-NEXT:    vmov.16 q3[7], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[8]
+; CHECK-NEXT:    vmov.16 q4[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[9]
+; CHECK-NEXT:    vmov.16 q4[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[10]
+; CHECK-NEXT:    vmov.16 q4[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[11]
+; CHECK-NEXT:    vmov.16 q4[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[12]
+; CHECK-NEXT:    vmov.16 q4[4], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[13]
+; CHECK-NEXT:    vmov.16 q4[5], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[14]
+; CHECK-NEXT:    vmov.16 q4[6], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[15]
+; CHECK-NEXT:    vmov.16 q4[7], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[0]
+; CHECK-NEXT:    vmov.16 q5[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[1]
+; CHECK-NEXT:    vmov.16 q5[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[2]
+; CHECK-NEXT:    vmov.16 q5[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[3]
+; CHECK-NEXT:    vmov.16 q5[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[4]
+; CHECK-NEXT:    vmov.16 q5[4], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[5]
+; CHECK-NEXT:    vmov.16 q5[5], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[6]
+; CHECK-NEXT:    vmov.16 q5[6], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[7]
+; CHECK-NEXT:    vmov.16 q5[7], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[0]
+; CHECK-NEXT:    vmov.16 q2[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[1]
+; CHECK-NEXT:    vmov.16 q2[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[2]
+; CHECK-NEXT:    vmov.16 q2[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[3]
+; CHECK-NEXT:    vmov.16 q2[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[4]
+; CHECK-NEXT:    vmov.16 q2[4], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[5]
+; CHECK-NEXT:    vmov.16 q2[5], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[6]
+; CHECK-NEXT:    vmov.16 q2[6], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[7]
+; CHECK-NEXT:    vmov.16 q2[7], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[0]
+; CHECK-NEXT:    vmov.16 q1[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[1]
+; CHECK-NEXT:    vmov.16 q1[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[2]
+; CHECK-NEXT:    vmov.16 q1[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[3]
+; CHECK-NEXT:    vmov.16 q1[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[4]
+; CHECK-NEXT:    vmov.16 q1[4], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[5]
+; CHECK-NEXT:    vmov.16 q1[5], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[6]
+; CHECK-NEXT:    vmov.16 q1[6], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[7]
+; CHECK-NEXT:    vmullb.u8 q4, q4, q3
+; CHECK-NEXT:    vmov.i32 q3, #0x0
+; CHECK-NEXT:    vmov.16 q1[7], r0
+; CHECK-NEXT:    vpsel q4, q4, q3
+; CHECK-NEXT:    vcmp.i16 ne, q5, zr
+; CHECK-NEXT:    vmullb.u8 q0, q1, q2
+; CHECK-NEXT:    vpsel q0, q0, q3
+; CHECK-NEXT:    vadd.i16 q0, q0, q4
+; CHECK-NEXT:    vaddv.u16 r0, q0
+; CHECK-NEXT:    uxth r0, r0
+; CHECK-NEXT:    vpop {d8, d9, d10, d11}
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <16 x i8> %b, zeroinitializer
+  %xx = zext <16 x i8> %x to <16 x i16>
+  %yy = zext <16 x i8> %y to <16 x i16>
+  %m = mul <16 x i16> %xx, %yy
+  %s = select <16 x i1> %c, <16 x i16> %m, <16 x i16> zeroinitializer
+  %z = call i16 @llvm.experimental.vector.reduce.add.v16i16(<16 x i16> %s)
+  ret i16 %z
+}
+
+define arm_aapcs_vfpcc signext i16 @add_v16i8_v16i16_sext(<16 x i8> %x, <16 x i8> %y, <16 x i8> %b) {
+; CHECK-LABEL: add_v16i8_v16i16_sext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11}
+; CHECK-NEXT:    vcmp.i8 eq, q2, zr
+; CHECK-NEXT:    vmov.i8 q2, #0x0
+; CHECK-NEXT:    vmov.i8 q3, #0xff
+; CHECK-NEXT:    vpsel q2, q3, q2
+; CHECK-NEXT:    vmov.u8 r0, q2[8]
+; CHECK-NEXT:    vmov.16 q3[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[9]
+; CHECK-NEXT:    vmov.16 q3[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[10]
+; CHECK-NEXT:    vmov.16 q3[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[11]
+; CHECK-NEXT:    vmov.16 q3[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[12]
+; CHECK-NEXT:    vmov.16 q3[4], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[13]
+; CHECK-NEXT:    vmov.16 q3[5], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[14]
+; CHECK-NEXT:    vmov.16 q3[6], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[15]
+; CHECK-NEXT:    vmov.16 q3[7], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[8]
+; CHECK-NEXT:    vcmp.i16 ne, q3, zr
+; CHECK-NEXT:    vmov.16 q3[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[9]
+; CHECK-NEXT:    vmov.16 q3[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[10]
+; CHECK-NEXT:    vmov.16 q3[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[11]
+; CHECK-NEXT:    vmov.16 q3[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[12]
+; CHECK-NEXT:    vmov.16 q3[4], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[13]
+; CHECK-NEXT:    vmov.16 q3[5], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[14]
+; CHECK-NEXT:    vmov.16 q3[6], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[15]
+; CHECK-NEXT:    vmov.16 q3[7], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[8]
+; CHECK-NEXT:    vmov.16 q4[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[9]
+; CHECK-NEXT:    vmov.16 q4[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[10]
+; CHECK-NEXT:    vmov.16 q4[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[11]
+; CHECK-NEXT:    vmov.16 q4[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[12]
+; CHECK-NEXT:    vmov.16 q4[4], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[13]
+; CHECK-NEXT:    vmov.16 q4[5], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[14]
+; CHECK-NEXT:    vmov.16 q4[6], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[15]
+; CHECK-NEXT:    vmov.16 q4[7], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[0]
+; CHECK-NEXT:    vmov.16 q5[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[1]
+; CHECK-NEXT:    vmov.16 q5[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[2]
+; CHECK-NEXT:    vmov.16 q5[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[3]
+; CHECK-NEXT:    vmov.16 q5[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[4]
+; CHECK-NEXT:    vmov.16 q5[4], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[5]
+; CHECK-NEXT:    vmov.16 q5[5], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[6]
+; CHECK-NEXT:    vmov.16 q5[6], r0
+; CHECK-NEXT:    vmov.u8 r0, q2[7]
+; CHECK-NEXT:    vmov.16 q5[7], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[0]
+; CHECK-NEXT:    vmov.16 q2[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[1]
+; CHECK-NEXT:    vmov.16 q2[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[2]
+; CHECK-NEXT:    vmov.16 q2[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[3]
+; CHECK-NEXT:    vmov.16 q2[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[4]
+; CHECK-NEXT:    vmov.16 q2[4], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[5]
+; CHECK-NEXT:    vmov.16 q2[5], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[6]
+; CHECK-NEXT:    vmov.16 q2[6], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[7]
+; CHECK-NEXT:    vmov.16 q2[7], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[0]
+; CHECK-NEXT:    vmov.16 q1[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[1]
+; CHECK-NEXT:    vmov.16 q1[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[2]
+; CHECK-NEXT:    vmov.16 q1[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[3]
+; CHECK-NEXT:    vmov.16 q1[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[4]
+; CHECK-NEXT:    vmov.16 q1[4], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[5]
+; CHECK-NEXT:    vmov.16 q1[5], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[6]
+; CHECK-NEXT:    vmov.16 q1[6], r0
+; CHECK-NEXT:    vmov.u8 r0, q0[7]
+; CHECK-NEXT:    vmullb.s8 q4, q4, q3
+; CHECK-NEXT:    vmov.i32 q3, #0x0
+; CHECK-NEXT:    vmov.16 q1[7], r0
+; CHECK-NEXT:    vpsel q4, q4, q3
+; CHECK-NEXT:    vcmp.i16 ne, q5, zr
+; CHECK-NEXT:    vmullb.s8 q0, q1, q2
+; CHECK-NEXT:    vpsel q0, q0, q3
+; CHECK-NEXT:    vadd.i16 q0, q0, q4
+; CHECK-NEXT:    vaddv.u16 r0, q0
+; CHECK-NEXT:    sxth r0, r0
+; CHECK-NEXT:    vpop {d8, d9, d10, d11}
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <16 x i8> %b, zeroinitializer
+  %xx = sext <16 x i8> %x to <16 x i16>
+  %yy = sext <16 x i8> %y to <16 x i16>
+  %m = mul <16 x i16> %xx, %yy
+  %s = select <16 x i1> %c, <16 x i16> %m, <16 x i16> zeroinitializer
+  %z = call i16 @llvm.experimental.vector.reduce.add.v16i16(<16 x i16> %s)
+  ret i16 %z
+}
+
+define arm_aapcs_vfpcc zeroext i16 @add_v8i8_v8i16_zext(<8 x i8> %x, <8 x i8> %y, <8 x i8> %b) {
+; CHECK-LABEL: add_v8i8_v8i16_zext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmullb.u8 q0, q0, q1
+; CHECK-NEXT:    vmovlb.u8 q1, q2
+; CHECK-NEXT:    vcmp.i16 eq, q1, zr
+; CHECK-NEXT:    vmov.i32 q1, #0x0
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vaddv.u16 r0, q0
+; CHECK-NEXT:    uxth r0, r0
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <8 x i8> %b, zeroinitializer
+  %xx = zext <8 x i8> %x to <8 x i16>
+  %yy = zext <8 x i8> %y to <8 x i16>
+  %m = mul <8 x i16> %xx, %yy
+  %s = select <8 x i1> %c, <8 x i16> %m, <8 x i16> zeroinitializer
+  %z = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %s)
+  ret i16 %z
+}
+
+define arm_aapcs_vfpcc signext i16 @add_v8i8_v8i16_sext(<8 x i8> %x, <8 x i8> %y, <8 x i8> %b) {
+; CHECK-LABEL: add_v8i8_v8i16_sext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmullb.s8 q0, q0, q1
+; CHECK-NEXT:    vmovlb.u8 q1, q2
+; CHECK-NEXT:    vcmp.i16 eq, q1, zr
+; CHECK-NEXT:    vmov.i32 q1, #0x0
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vaddv.u16 r0, q0
+; CHECK-NEXT:    sxth r0, r0
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <8 x i8> %b, zeroinitializer
+  %xx = sext <8 x i8> %x to <8 x i16>
+  %yy = sext <8 x i8> %y to <8 x i16>
+  %m = mul <8 x i16> %xx, %yy
+  %s = select <8 x i1> %c, <8 x i16> %m, <8 x i16> zeroinitializer
+  %z = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %s)
+  ret i16 %z
+}
+
+define arm_aapcs_vfpcc zeroext i8 @add_v16i8_v16i8(<16 x i8> %x, <16 x i8> %y, <16 x i8> %b) {
+; CHECK-LABEL: add_v16i8_v16i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmul.i8 q0, q0, q1
+; CHECK-NEXT:    vcmp.i8 eq, q2, zr
+; CHECK-NEXT:    vmov.i32 q1, #0x0
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vaddv.u8 r0, q0
+; CHECK-NEXT:    uxtb r0, r0
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <16 x i8> %b, zeroinitializer
+  %m = mul <16 x i8> %x, %y
+  %s = select <16 x i1> %c, <16 x i8> %m, <16 x i8> zeroinitializer
+  %z = call i8 @llvm.experimental.vector.reduce.add.v16i8(<16 x i8> %s)
+  ret i8 %z
+}
+
+define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_zext(<16 x i8> %x, <16 x i8> %y, <16 x i8> %b) {
+; CHECK-LABEL: add_v16i8_v16i64_zext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    .pad #88
+; CHECK-NEXT:    sub sp, #88
+; CHECK-NEXT:    vmov q3, q1
+; CHECK-NEXT:    vstrw.32 q0, [sp, #64] @ 16-byte Spill
+; CHECK-NEXT:    vmov.i8 q0, #0x0
+; CHECK-NEXT:    vmov.i8 q1, #0xff
+; CHECK-NEXT:    vcmp.i8 eq, q2, zr
+; CHECK-NEXT:    vstrw.32 q1, [sp, #16] @ 16-byte Spill
+; CHECK-NEXT:    vpsel q5, q1, q0
+; CHECK-NEXT:    vstrw.32 q0, [sp, #32] @ 16-byte Spill
+; CHECK-NEXT:    vmov.u8 r0, q5[0]
+; CHECK-NEXT:    vmov.i64 q4, #0xff
+; CHECK-NEXT:    vmov.16 q2[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q5[1]
+; CHECK-NEXT:    vmov.16 q2[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q5[2]
+; CHECK-NEXT:    vmov.16 q2[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q5[3]
+; CHECK-NEXT:    vmov.16 q2[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q5[4]
+; CHECK-NEXT:    vmov.16 q2[4], r0
+; CHECK-NEXT:    vmov.u8 r0, q5[5]
+; CHECK-NEXT:    vmov.16 q2[5], r0
+; CHECK-NEXT:    vmov.u8 r0, q5[6]
+; CHECK-NEXT:    vmov.16 q2[6], r0
+; CHECK-NEXT:    vmov.u8 r0, q5[7]
+; CHECK-NEXT:    vmov.16 q2[7], r0
+; CHECK-NEXT:    vcmp.i16 ne, q2, zr
+; CHECK-NEXT:    vpsel q6, q1, q0
+; CHECK-NEXT:    vmov.u16 r0, q6[0]
+; CHECK-NEXT:    vmov.32 q2[0], r0
+; CHECK-NEXT:    vmov.u16 r0, q6[1]
+; CHECK-NEXT:    vmov.32 q2[1], r0
+; CHECK-NEXT:    vmov.u16 r0, q6[2]
+; CHECK-NEXT:    vmov.32 q2[2], r0
+; CHECK-NEXT:    vmov.u16 r0, q6[3]
+; CHECK-NEXT:    vmov.32 q2[3], r0
+; CHECK-NEXT:    vcmp.i32 ne, q2, zr
+; CHECK-NEXT:    vmrs r0, p0
+; CHECK-NEXT:    vldrw.u32 q1, [sp, #64] @ 16-byte Reload
+; CHECK-NEXT:    vmov.u8 r2, q1[0]
+; CHECK-NEXT:    and r1, r0, #1
+; CHECK-NEXT:    rsbs r1, r1, #0
+; CHECK-NEXT:    vmov.32 q7[0], r1
+; CHECK-NEXT:    vmov.32 q7[1], r1
+; CHECK-NEXT:    ubfx r1, r0, #4, #1
+; CHECK-NEXT:    rsbs r1, r1, #0
+; CHECK-NEXT:    vmov.32 q7[2], r1
+; CHECK-NEXT:    vmov.32 q7[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q3[0]
+; CHECK-NEXT:    vmov.32 q0[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q3[1]
+; CHECK-NEXT:    vmov.32 q0[2], r1
+; CHECK-NEXT:    vand q2, q0, q4
+; CHECK-NEXT:    vmov.32 q0[0], r2
+; CHECK-NEXT:    vmov.u8 r2, q1[1]
+; CHECK-NEXT:    vmov r1, s8
+; CHECK-NEXT:    vmov.32 q0[2], r2
+; CHECK-NEXT:    vand q1, q0, q4
+; CHECK-NEXT:    vmov r2, s4
+; CHECK-NEXT:    umull r1, r2, r2, r1
+; CHECK-NEXT:    vmov.32 q0[0], r1
+; CHECK-NEXT:    vmov r1, s10
+; CHECK-NEXT:    vmov.32 q0[1], r2
+; CHECK-NEXT:    vmov r2, s6
+; CHECK-NEXT:    vldrw.u32 q2, [sp, #64] @ 16-byte Reload
+; CHECK-NEXT:    umull r1, r2, r2, r1
+; CHECK-NEXT:    vmov.32 q0[2], r1
+; CHECK-NEXT:    vmov.32 q0[3], r2
+; CHECK-NEXT:    vand q0, q0, q7
+; CHECK-NEXT:    vmov r1, s2
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    vmov r12, s3
+; CHECK-NEXT:    vmov r3, s1
+; CHECK-NEXT:    adds r1, r1, r2
+; CHECK-NEXT:    adc.w r2, r3, r12
+; CHECK-NEXT:    ubfx r3, r0, #8, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    ubfx r0, r0, #12, #1
+; CHECK-NEXT:    vmov.32 q7[0], r3
+; CHECK-NEXT:    rsbs r0, r0, #0
+; CHECK-NEXT:    vmov.32 q7[1], r3
+; CHECK-NEXT:    vmov.u8 r3, q2[2]
+; CHECK-NEXT:    vmov.32 q7[2], r0
+; CHECK-NEXT:    vmov.32 q1[0], r3
+; CHECK-NEXT:    vmov.32 q7[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q3[2]
+; CHECK-NEXT:    vmov.32 q0[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q3[3]
+; CHECK-NEXT:    vmov.u8 r3, q2[3]
+; CHECK-NEXT:    vmov.32 q0[2], r0
+; CHECK-NEXT:    vmov.32 q1[2], r3
+; CHECK-NEXT:    vand q0, q0, q4
+; CHECK-NEXT:    vand q1, q1, q4
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    vmov r3, s4
+; CHECK-NEXT:    umull r0, r3, r3, r0
+; CHECK-NEXT:    vmov.32 q2[0], r0
+; CHECK-NEXT:    vmov r0, s2
+; CHECK-NEXT:    vmov.32 q2[1], r3
+; CHECK-NEXT:    vmov r3, s6
+; CHECK-NEXT:    umull r0, r3, r3, r0
+; CHECK-NEXT:    vmov.32 q2[2], r0
+; CHECK-NEXT:    vmov.32 q2[3], r3
+; CHECK-NEXT:    vand q0, q2, q7
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov r0, s1
+; CHECK-NEXT:    adds r1, r1, r3
+; CHECK-NEXT:    vmov r3, s3
+; CHECK-NEXT:    adcs r2, r0
+; CHECK-NEXT:    vmov r0, s2
+; CHECK-NEXT:    adds.w r12, r1, r0
+; CHECK-NEXT:    adc.w r1, r2, r3
+; CHECK-NEXT:    vmov.u16 r2, q6[4]
+; CHECK-NEXT:    vmov.32 q0[0], r2
+; CHECK-NEXT:    vmov.u16 r2, q6[5]
+; CHECK-NEXT:    vmov.32 q0[1], r2
+; CHECK-NEXT:    vmov.u16 r2, q6[6]
+; CHECK-NEXT:    vmov.32 q0[2], r2
+; CHECK-NEXT:    vmov.u16 r2, q6[7]
+; CHECK-NEXT:    vmov.32 q0[3], r2
+; CHECK-NEXT:    vcmp.i32 ne, q0, zr
+; CHECK-NEXT:    vmrs lr, p0
+; CHECK-NEXT:    vstrw.32 q3, [sp, #48] @ 16-byte Spill
+; CHECK-NEXT:    vstrw.32 q4, [sp] @ 16-byte Spill
+; CHECK-NEXT:    and r3, lr, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    vmov.32 q6[0], r3
+; CHECK-NEXT:    vmov.32 q6[1], r3
+; CHECK-NEXT:    ubfx r3, lr, #4, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    vmov.32 q6[2], r3
+; CHECK-NEXT:    vmov.32 q6[3], r3
+; CHECK-NEXT:    vmov.u8 r3, q3[4]
+; CHECK-NEXT:    vmov.32 q0[0], r3
+; CHECK-NEXT:    vmov.u8 r3, q3[5]
+; CHECK-NEXT:    vldrw.u32 q3, [sp, #64] @ 16-byte Reload
+; CHECK-NEXT:    vmov.32 q0[2], r3
+; CHECK-NEXT:    vand q0, q0, q4
+; CHECK-NEXT:    vmov.u8 r0, q3[4]
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov.32 q1[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q3[5]
+; CHECK-NEXT:    vmov.32 q1[2], r0
+; CHECK-NEXT:    vmov q7, q3
+; CHECK-NEXT:    vand q1, q1, q4
+; CHECK-NEXT:    vmov r0, s4
+; CHECK-NEXT:    umull r0, r3, r0, r3
+; CHECK-NEXT:    vmov.32 q2[0], r0
+; CHECK-NEXT:    vmov r0, s2
+; CHECK-NEXT:    vmov.32 q2[1], r3
+; CHECK-NEXT:    vmov r3, s6
+; CHECK-NEXT:    vldrw.u32 q1, [sp, #48] @ 16-byte Reload
+; CHECK-NEXT:    umull r0, r3, r3, r0
+; CHECK-NEXT:    vmov.32 q2[2], r0
+; CHECK-NEXT:    vmov.32 q2[3], r3
+; CHECK-NEXT:    vand q0, q2, q6
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov r0, s1
+; CHECK-NEXT:    vmov r2, s2
+; CHECK-NEXT:    adds.w r3, r3, r12
+; CHECK-NEXT:    adcs r1, r0
+; CHECK-NEXT:    vmov r0, s3
+; CHECK-NEXT:    adds r3, r3, r2
+; CHECK-NEXT:    vmov.u8 r2, q3[6]
+; CHECK-NEXT:    adcs r1, r0
+; CHECK-NEXT:    ubfx r0, lr, #8, #1
+; CHECK-NEXT:    rsbs r0, r0, #0
+; CHECK-NEXT:    vmov.32 q6[0], r0
+; CHECK-NEXT:    vmov.32 q6[1], r0
+; CHECK-NEXT:    ubfx r0, lr, #12, #1
+; CHECK-NEXT:    rsbs r0, r0, #0
+; CHECK-NEXT:    vmov.32 q6[2], r0
+; CHECK-NEXT:    vmov.32 q6[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[6]
+; CHECK-NEXT:    vmov.32 q0[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[7]
+; CHECK-NEXT:    vmov.32 q1[0], r2
+; CHECK-NEXT:    vmov.u8 r2, q3[7]
+; CHECK-NEXT:    vmov.32 q0[2], r0
+; CHECK-NEXT:    vmov.32 q1[2], r2
+; CHECK-NEXT:    vand q0, q0, q4
+; CHECK-NEXT:    vand q1, q1, q4
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    vmov r2, s4
+; CHECK-NEXT:    umull r0, r2, r2, r0
+; CHECK-NEXT:    vmov.32 q2[0], r0
+; CHECK-NEXT:    vmov r0, s2
+; CHECK-NEXT:    vmov.32 q2[1], r2
+; CHECK-NEXT:    vmov r2, s6
+; CHECK-NEXT:    vldrw.u32 q1, [sp, #16] @ 16-byte Reload
+; CHECK-NEXT:    umull r0, r2, r2, r0
+; CHECK-NEXT:    vmov.32 q2[2], r0
+; CHECK-NEXT:    vmov.32 q2[3], r2
+; CHECK-NEXT:    vand q0, q2, q6
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    vmov r0, s1
+; CHECK-NEXT:    adds r2, r2, r3
+; CHECK-NEXT:    vmov r3, s3
+; CHECK-NEXT:    adcs r1, r0
+; CHECK-NEXT:    vmov r0, s2
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #32] @ 16-byte Reload
+; CHECK-NEXT:    adds.w r12, r2, r0
+; CHECK-NEXT:    vmov.u8 r2, q5[8]
+; CHECK-NEXT:    vmov.16 q6[0], r2
+; CHECK-NEXT:    vmov.u8 r2, q5[9]
+; CHECK-NEXT:    vmov.16 q6[1], r2
+; CHECK-NEXT:    vmov.u8 r2, q5[10]
+; CHECK-NEXT:    vmov.16 q6[2], r2
+; CHECK-NEXT:    vmov.u8 r2, q5[11]
+; CHECK-NEXT:    vmov.16 q6[3], r2
+; CHECK-NEXT:    vmov.u8 r2, q5[12]
+; CHECK-NEXT:    vmov.16 q6[4], r2
+; CHECK-NEXT:    vmov.u8 r2, q5[13]
+; CHECK-NEXT:    vmov.16 q6[5], r2
+; CHECK-NEXT:    vmov.u8 r2, q5[14]
+; CHECK-NEXT:    vmov.16 q6[6], r2
+; CHECK-NEXT:    vmov.u8 r2, q5[15]
+; CHECK-NEXT:    vmov.16 q6[7], r2
+; CHECK-NEXT:    adcs r1, r3
+; CHECK-NEXT:    vcmp.i16 ne, q6, zr
+; CHECK-NEXT:    vmov.u8 r0, q7[8]
+; CHECK-NEXT:    vpsel q3, q1, q0
+; CHECK-NEXT:    vmov.32 q1[0], r0
+; CHECK-NEXT:    vmov.u16 r2, q3[0]
+; CHECK-NEXT:    vmov.u8 r0, q7[9]
+; CHECK-NEXT:    vmov.32 q0[0], r2
+; CHECK-NEXT:    vmov.u16 r2, q3[1]
+; CHECK-NEXT:    vmov.32 q0[1], r2
+; CHECK-NEXT:    vmov.u16 r2, q3[2]
+; CHECK-NEXT:    vmov.32 q0[2], r2
+; CHECK-NEXT:    vmov.u16 r2, q3[3]
+; CHECK-NEXT:    vmov.32 q0[3], r2
+; CHECK-NEXT:    vmov.32 q1[2], r0
+; CHECK-NEXT:    vcmp.i32 ne, q0, zr
+; CHECK-NEXT:    vmrs lr, p0
+; CHECK-NEXT:    vldrw.u32 q6, [sp, #48] @ 16-byte Reload
+; CHECK-NEXT:    vldrw.u32 q5, [sp] @ 16-byte Reload
+; CHECK-NEXT:    vand q1, q1, q5
+; CHECK-NEXT:    vmov r0, s4
+; CHECK-NEXT:    and r3, lr, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    vmov.32 q4[0], r3
+; CHECK-NEXT:    vmov.32 q4[1], r3
+; CHECK-NEXT:    ubfx r3, lr, #4, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    vmov.32 q4[2], r3
+; CHECK-NEXT:    vmov.32 q4[3], r3
+; CHECK-NEXT:    vmov.u8 r3, q6[8]
+; CHECK-NEXT:    vmov.32 q0[0], r3
+; CHECK-NEXT:    vmov.u8 r3, q6[9]
+; CHECK-NEXT:    vmov.32 q0[2], r3
+; CHECK-NEXT:    vand q0, q0, q5
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    umull r0, r3, r0, r3
+; CHECK-NEXT:    vmov.32 q2[0], r0
+; CHECK-NEXT:    vmov r0, s2
+; CHECK-NEXT:    vmov.32 q2[1], r3
+; CHECK-NEXT:    vmov r3, s6
+; CHECK-NEXT:    umull r0, r3, r3, r0
+; CHECK-NEXT:    vmov.32 q2[2], r0
+; CHECK-NEXT:    vmov.32 q2[3], r3
+; CHECK-NEXT:    vand q0, q2, q4
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov r0, s1
+; CHECK-NEXT:    vmov r2, s2
+; CHECK-NEXT:    adds.w r3, r3, r12
+; CHECK-NEXT:    adcs r1, r0
+; CHECK-NEXT:    vmov r0, s3
+; CHECK-NEXT:    adds r3, r3, r2
+; CHECK-NEXT:    vmov.u8 r2, q7[10]
+; CHECK-NEXT:    vmov.32 q1[0], r2
+; CHECK-NEXT:    vmov.u8 r2, q7[11]
+; CHECK-NEXT:    vmov.32 q1[2], r2
+; CHECK-NEXT:    vand q1, q1, q5
+; CHECK-NEXT:    vmov r2, s4
+; CHECK-NEXT:    adcs r1, r0
+; CHECK-NEXT:    ubfx r0, lr, #8, #1
+; CHECK-NEXT:    rsbs r0, r0, #0
+; CHECK-NEXT:    vmov.32 q4[0], r0
+; CHECK-NEXT:    vmov.32 q4[1], r0
+; CHECK-NEXT:    ubfx r0, lr, #12, #1
+; CHECK-NEXT:    rsbs r0, r0, #0
+; CHECK-NEXT:    vmov.32 q4[2], r0
+; CHECK-NEXT:    vmov.32 q4[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q6[10]
+; CHECK-NEXT:    vmov.32 q0[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q6[11]
+; CHECK-NEXT:    vmov.32 q0[2], r0
+; CHECK-NEXT:    vand q0, q0, q5
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    umull r0, r2, r2, r0
+; CHECK-NEXT:    vmov.32 q2[0], r0
+; CHECK-NEXT:    vmov r0, s2
+; CHECK-NEXT:    vmov.32 q2[1], r2
+; CHECK-NEXT:    vmov r2, s6
+; CHECK-NEXT:    umull r0, r2, r2, r0
+; CHECK-NEXT:    vmov.32 q2[2], r0
+; CHECK-NEXT:    vmov.32 q2[3], r2
+; CHECK-NEXT:    vand q0, q2, q4
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    vmov r0, s1
+; CHECK-NEXT:    adds r2, r2, r3
+; CHECK-NEXT:    vmov r3, s3
+; CHECK-NEXT:    adcs r1, r0
+; CHECK-NEXT:    vmov r0, s2
+; CHECK-NEXT:    adds.w r12, r2, r0
+; CHECK-NEXT:    vmov.u16 r2, q3[4]
+; CHECK-NEXT:    vmov.32 q0[0], r2
+; CHECK-NEXT:    vmov.u16 r2, q3[5]
+; CHECK-NEXT:    vmov.32 q0[1], r2
+; CHECK-NEXT:    vmov.u16 r2, q3[6]
+; CHECK-NEXT:    vmov.32 q0[2], r2
+; CHECK-NEXT:    vmov.u16 r2, q3[7]
+; CHECK-NEXT:    vmov.32 q0[3], r2
+; CHECK-NEXT:    adcs r1, r3
+; CHECK-NEXT:    vcmp.i32 ne, q0, zr
+; CHECK-NEXT:    vmov.u8 r0, q7[12]
+; CHECK-NEXT:    vmrs lr, p0
+; CHECK-NEXT:    vmov.32 q1[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q7[13]
+; CHECK-NEXT:    vmov.32 q1[2], r0
+; CHECK-NEXT:    vand q1, q1, q5
+; CHECK-NEXT:    vmov r0, s4
+; CHECK-NEXT:    and r3, lr, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    vmov.32 q3[0], r3
+; CHECK-NEXT:    vmov.32 q3[1], r3
+; CHECK-NEXT:    ubfx r3, lr, #4, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    vmov.32 q3[2], r3
+; CHECK-NEXT:    vmov.32 q3[3], r3
+; CHECK-NEXT:    vmov.u8 r3, q6[12]
+; CHECK-NEXT:    vmov.32 q0[0], r3
+; CHECK-NEXT:    vmov.u8 r3, q6[13]
+; CHECK-NEXT:    vmov.32 q0[2], r3
+; CHECK-NEXT:    vand q0, q0, q5
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    umull r0, r3, r0, r3
+; CHECK-NEXT:    vmov.32 q2[0], r0
+; CHECK-NEXT:    vmov r0, s2
+; CHECK-NEXT:    vmov.32 q2[1], r3
+; CHECK-NEXT:    vmov r3, s6
+; CHECK-NEXT:    umull r0, r3, r3, r0
+; CHECK-NEXT:    vmov.32 q2[2], r0
+; CHECK-NEXT:    vmov.32 q2[3], r3
+; CHECK-NEXT:    vand q0, q2, q3
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov r0, s1
+; CHECK-NEXT:    vmov r2, s2
+; CHECK-NEXT:    adds.w r3, r3, r12
+; CHECK-NEXT:    adcs r1, r0
+; CHECK-NEXT:    vmov r0, s3
+; CHECK-NEXT:    adds r3, r3, r2
+; CHECK-NEXT:    vmov.u8 r2, q7[14]
+; CHECK-NEXT:    vmov.32 q1[0], r2
+; CHECK-NEXT:    vmov.u8 r2, q7[15]
+; CHECK-NEXT:    vmov.32 q1[2], r2
+; CHECK-NEXT:    vand q1, q1, q5
+; CHECK-NEXT:    vmov r2, s4
+; CHECK-NEXT:    adcs r1, r0
+; CHECK-NEXT:    ubfx r0, lr, #8, #1
+; CHECK-NEXT:    rsbs r0, r0, #0
+; CHECK-NEXT:    vmov.32 q3[0], r0
+; CHECK-NEXT:    vmov.32 q3[1], r0
+; CHECK-NEXT:    ubfx r0, lr, #12, #1
+; CHECK-NEXT:    rsbs r0, r0, #0
+; CHECK-NEXT:    vmov.32 q3[2], r0
+; CHECK-NEXT:    vmov.32 q3[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q6[14]
+; CHECK-NEXT:    vmov.32 q0[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q6[15]
+; CHECK-NEXT:    vmov.32 q0[2], r0
+; CHECK-NEXT:    vand q0, q0, q5
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    umull r0, r2, r2, r0
+; CHECK-NEXT:    vmov.32 q2[0], r0
+; CHECK-NEXT:    vmov r0, s2
+; CHECK-NEXT:    vmov.32 q2[1], r2
+; CHECK-NEXT:    vmov r2, s6
+; CHECK-NEXT:    umull r0, r2, r2, r0
+; CHECK-NEXT:    vmov.32 q2[2], r0
+; CHECK-NEXT:    vmov.32 q2[3], r2
+; CHECK-NEXT:    vand q0, q2, q3
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    vmov r0, s1
+; CHECK-NEXT:    adds r2, r2, r3
+; CHECK-NEXT:    vmov r3, s3
+; CHECK-NEXT:    adcs r1, r0
+; CHECK-NEXT:    vmov r0, s2
+; CHECK-NEXT:    adds r0, r0, r2
+; CHECK-NEXT:    adcs r1, r3
+; CHECK-NEXT:    add sp, #88
+; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    pop {r7, pc}
+entry:
+  %c = icmp eq <16 x i8> %b, zeroinitializer
+  %xx = zext <16 x i8> %x to <16 x i64>
+  %yy = zext <16 x i8> %y to <16 x i64>
+  %m = mul <16 x i64> %xx, %yy
+  %s = select <16 x i1> %c, <16 x i64> %m, <16 x i64> zeroinitializer
+  %z = call i64 @llvm.experimental.vector.reduce.add.v16i64(<16 x i64> %s)
+  ret i64 %z
+}
+
+define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_sext(<16 x i8> %x, <16 x i8> %y, <16 x i8> %b) {
+; CHECK-LABEL: add_v16i8_v16i64_sext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    vcmp.i8 eq, q2, zr
+; CHECK-NEXT:    vmov.i8 q2, #0x0
+; CHECK-NEXT:    vmov.i8 q3, #0xff
+; CHECK-NEXT:    vmov.u8 r2, q0[0]
+; CHECK-NEXT:    vpsel q4, q3, q2
+; CHECK-NEXT:    sxtb r2, r2
+; CHECK-NEXT:    vmov.u8 r0, q4[0]
+; CHECK-NEXT:    vmov.16 q5[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q4[1]
+; CHECK-NEXT:    vmov.16 q5[1], r0
+; CHECK-NEXT:    vmov.u8 r0, q4[2]
+; CHECK-NEXT:    vmov.16 q5[2], r0
+; CHECK-NEXT:    vmov.u8 r0, q4[3]
+; CHECK-NEXT:    vmov.16 q5[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q4[4]
+; CHECK-NEXT:    vmov.16 q5[4], r0
+; CHECK-NEXT:    vmov.u8 r0, q4[5]
+; CHECK-NEXT:    vmov.16 q5[5], r0
+; CHECK-NEXT:    vmov.u8 r0, q4[6]
+; CHECK-NEXT:    vmov.16 q5[6], r0
+; CHECK-NEXT:    vmov.u8 r0, q4[7]
+; CHECK-NEXT:    vmov.16 q5[7], r0
+; CHECK-NEXT:    vcmp.i16 ne, q5, zr
+; CHECK-NEXT:    vpsel q5, q3, q2
+; CHECK-NEXT:    vmov.u16 r0, q5[0]
+; CHECK-NEXT:    vmov.32 q6[0], r0
+; CHECK-NEXT:    vmov.u16 r0, q5[1]
+; CHECK-NEXT:    vmov.32 q6[1], r0
+; CHECK-NEXT:    vmov.u16 r0, q5[2]
+; CHECK-NEXT:    vmov.32 q6[2], r0
+; CHECK-NEXT:    vmov.u16 r0, q5[3]
+; CHECK-NEXT:    vmov.32 q6[3], r0
+; CHECK-NEXT:    vcmp.i32 ne, q6, zr
+; CHECK-NEXT:    vmrs r0, p0
+; CHECK-NEXT:    and r1, r0, #1
+; CHECK-NEXT:    rsbs r1, r1, #0
+; CHECK-NEXT:    vmov.32 q6[0], r1
+; CHECK-NEXT:    vmov.32 q6[1], r1
+; CHECK-NEXT:    ubfx r1, r0, #4, #1
+; CHECK-NEXT:    rsbs r1, r1, #0
+; CHECK-NEXT:    vmov.32 q6[2], r1
+; CHECK-NEXT:    vmov.32 q6[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[0]
+; CHECK-NEXT:    sxtb r1, r1
+; CHECK-NEXT:    smull r1, r2, r2, r1
+; CHECK-NEXT:    vmov.32 q7[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[1]
+; CHECK-NEXT:    vmov.32 q7[1], r2
+; CHECK-NEXT:    vmov.u8 r2, q0[1]
+; CHECK-NEXT:    sxtb r1, r1
+; CHECK-NEXT:    sxtb r2, r2
+; CHECK-NEXT:    smull r1, r2, r2, r1
+; CHECK-NEXT:    vmov.32 q7[2], r1
+; CHECK-NEXT:    vmov.32 q7[3], r2
+; CHECK-NEXT:    vand q6, q7, q6
+; CHECK-NEXT:    vmov r1, s26
+; CHECK-NEXT:    vmov r2, s24
+; CHECK-NEXT:    vmov r12, s27
+; CHECK-NEXT:    vmov r3, s25
+; CHECK-NEXT:    adds r1, r1, r2
+; CHECK-NEXT:    adc.w r2, r3, r12
+; CHECK-NEXT:    ubfx r3, r0, #8, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    ubfx r0, r0, #12, #1
+; CHECK-NEXT:    vmov.32 q6[0], r3
+; CHECK-NEXT:    rsbs r0, r0, #0
+; CHECK-NEXT:    vmov.32 q6[1], r3
+; CHECK-NEXT:    vmov.u8 r3, q0[2]
+; CHECK-NEXT:    vmov.32 q6[2], r0
+; CHECK-NEXT:    sxtb r3, r3
+; CHECK-NEXT:    vmov.32 q6[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[2]
+; CHECK-NEXT:    sxtb r0, r0
+; CHECK-NEXT:    smull r0, r3, r3, r0
+; CHECK-NEXT:    vmov.32 q7[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[3]
+; CHECK-NEXT:    vmov.32 q7[1], r3
+; CHECK-NEXT:    vmov.u8 r3, q0[3]
+; CHECK-NEXT:    sxtb r0, r0
+; CHECK-NEXT:    sxtb r3, r3
+; CHECK-NEXT:    smull r0, r3, r3, r0
+; CHECK-NEXT:    vmov.32 q7[2], r0
+; CHECK-NEXT:    vmov.32 q7[3], r3
+; CHECK-NEXT:    vand q6, q7, q6
+; CHECK-NEXT:    vmov r3, s24
+; CHECK-NEXT:    vmov r0, s25
+; CHECK-NEXT:    adds r1, r1, r3
+; CHECK-NEXT:    vmov r3, s27
+; CHECK-NEXT:    adcs r2, r0
+; CHECK-NEXT:    vmov r0, s26
+; CHECK-NEXT:    adds.w r12, r1, r0
+; CHECK-NEXT:    vmov.u8 r0, q0[4]
+; CHECK-NEXT:    adc.w r1, r2, r3
+; CHECK-NEXT:    vmov.u16 r2, q5[4]
+; CHECK-NEXT:    vmov.32 q6[0], r2
+; CHECK-NEXT:    vmov.u16 r2, q5[5]
+; CHECK-NEXT:    vmov.32 q6[1], r2
+; CHECK-NEXT:    vmov.u16 r2, q5[6]
+; CHECK-NEXT:    vmov.32 q6[2], r2
+; CHECK-NEXT:    vmov.u16 r2, q5[7]
+; CHECK-NEXT:    vmov.32 q6[3], r2
+; CHECK-NEXT:    sxtb r0, r0
+; CHECK-NEXT:    vcmp.i32 ne, q6, zr
+; CHECK-NEXT:    vmrs r2, p0
+; CHECK-NEXT:    and r3, r2, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    vmov.32 q5[0], r3
+; CHECK-NEXT:    vmov.32 q5[1], r3
+; CHECK-NEXT:    ubfx r3, r2, #4, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    vmov.32 q5[2], r3
+; CHECK-NEXT:    vmov.32 q5[3], r3
+; CHECK-NEXT:    vmov.u8 r3, q1[4]
+; CHECK-NEXT:    sxtb r3, r3
+; CHECK-NEXT:    smull r0, r3, r0, r3
+; CHECK-NEXT:    vmov.32 q6[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[5]
+; CHECK-NEXT:    vmov.32 q6[1], r3
+; CHECK-NEXT:    vmov.u8 r3, q0[5]
+; CHECK-NEXT:    sxtb r0, r0
+; CHECK-NEXT:    sxtb r3, r3
+; CHECK-NEXT:    smull r0, r3, r3, r0
+; CHECK-NEXT:    vmov.32 q6[2], r0
+; CHECK-NEXT:    vmov.32 q6[3], r3
+; CHECK-NEXT:    vand q5, q6, q5
+; CHECK-NEXT:    vmov r3, s20
+; CHECK-NEXT:    vmov r0, s21
+; CHECK-NEXT:    adds.w r3, r3, r12
+; CHECK-NEXT:    adc.w r12, r1, r0
+; CHECK-NEXT:    vmov r1, s22
+; CHECK-NEXT:    vmov r0, s23
+; CHECK-NEXT:    adds r3, r3, r1
+; CHECK-NEXT:    adc.w r1, r12, r0
+; CHECK-NEXT:    ubfx r0, r2, #8, #1
+; CHECK-NEXT:    rsbs r0, r0, #0
+; CHECK-NEXT:    vmov.32 q5[0], r0
+; CHECK-NEXT:    vmov.32 q5[1], r0
+; CHECK-NEXT:    ubfx r0, r2, #12, #1
+; CHECK-NEXT:    rsbs r0, r0, #0
+; CHECK-NEXT:    vmov.u8 r2, q0[6]
+; CHECK-NEXT:    vmov.32 q5[2], r0
+; CHECK-NEXT:    sxtb r2, r2
+; CHECK-NEXT:    vmov.32 q5[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[6]
+; CHECK-NEXT:    sxtb r0, r0
+; CHECK-NEXT:    smull r0, r2, r2, r0
+; CHECK-NEXT:    vmov.32 q6[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[7]
+; CHECK-NEXT:    vmov.32 q6[1], r2
+; CHECK-NEXT:    vmov.u8 r2, q0[7]
+; CHECK-NEXT:    sxtb r0, r0
+; CHECK-NEXT:    sxtb r2, r2
+; CHECK-NEXT:    smull r0, r2, r2, r0
+; CHECK-NEXT:    vmov.32 q6[2], r0
+; CHECK-NEXT:    vmov.32 q6[3], r2
+; CHECK-NEXT:    vand q5, q6, q5
+; CHECK-NEXT:    vmov r2, s20
+; CHECK-NEXT:    vmov r0, s21
+; CHECK-NEXT:    adds r2, r2, r3
+; CHECK-NEXT:    vmov r3, s23
+; CHECK-NEXT:    adcs r1, r0
+; CHECK-NEXT:    vmov r0, s22
+; CHECK-NEXT:    adds.w r12, r2, r0
+; CHECK-NEXT:    vmov.u8 r2, q4[8]
+; CHECK-NEXT:    vmov.16 q5[0], r2
+; CHECK-NEXT:    vmov.u8 r2, q4[9]
+; CHECK-NEXT:    vmov.16 q5[1], r2
+; CHECK-NEXT:    vmov.u8 r2, q4[10]
+; CHECK-NEXT:    vmov.16 q5[2], r2
+; CHECK-NEXT:    vmov.u8 r2, q4[11]
+; CHECK-NEXT:    vmov.16 q5[3], r2
+; CHECK-NEXT:    vmov.u8 r2, q4[12]
+; CHECK-NEXT:    vmov.16 q5[4], r2
+; CHECK-NEXT:    vmov.u8 r2, q4[13]
+; CHECK-NEXT:    vmov.16 q5[5], r2
+; CHECK-NEXT:    vmov.u8 r2, q4[14]
+; CHECK-NEXT:    vmov.16 q5[6], r2
+; CHECK-NEXT:    vmov.u8 r2, q4[15]
+; CHECK-NEXT:    vmov.16 q5[7], r2
+; CHECK-NEXT:    adcs r1, r3
+; CHECK-NEXT:    vcmp.i16 ne, q5, zr
+; CHECK-NEXT:    vmov.u8 r0, q0[8]
+; CHECK-NEXT:    vpsel q2, q3, q2
+; CHECK-NEXT:    sxtb r0, r0
+; CHECK-NEXT:    vmov.u16 r2, q2[0]
+; CHECK-NEXT:    vmov.32 q3[0], r2
+; CHECK-NEXT:    vmov.u16 r2, q2[1]
+; CHECK-NEXT:    vmov.32 q3[1], r2
+; CHECK-NEXT:    vmov.u16 r2, q2[2]
+; CHECK-NEXT:    vmov.32 q3[2], r2
+; CHECK-NEXT:    vmov.u16 r2, q2[3]
+; CHECK-NEXT:    vmov.32 q3[3], r2
+; CHECK-NEXT:    vcmp.i32 ne, q3, zr
+; CHECK-NEXT:    vmrs r2, p0
+; CHECK-NEXT:    and r3, r2, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    vmov.32 q3[0], r3
+; CHECK-NEXT:    vmov.32 q3[1], r3
+; CHECK-NEXT:    ubfx r3, r2, #4, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    vmov.32 q3[2], r3
+; CHECK-NEXT:    vmov.32 q3[3], r3
+; CHECK-NEXT:    vmov.u8 r3, q1[8]
+; CHECK-NEXT:    sxtb r3, r3
+; CHECK-NEXT:    smull r0, r3, r0, r3
+; CHECK-NEXT:    vmov.32 q4[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[9]
+; CHECK-NEXT:    vmov.32 q4[1], r3
+; CHECK-NEXT:    vmov.u8 r3, q0[9]
+; CHECK-NEXT:    sxtb r0, r0
+; CHECK-NEXT:    sxtb r3, r3
+; CHECK-NEXT:    smull r0, r3, r3, r0
+; CHECK-NEXT:    vmov.32 q4[2], r0
+; CHECK-NEXT:    vmov.32 q4[3], r3
+; CHECK-NEXT:    vand q3, q4, q3
+; CHECK-NEXT:    vmov r3, s12
+; CHECK-NEXT:    vmov r0, s13
+; CHECK-NEXT:    adds.w r3, r3, r12
+; CHECK-NEXT:    adc.w r12, r1, r0
+; CHECK-NEXT:    vmov r1, s14
+; CHECK-NEXT:    vmov r0, s15
+; CHECK-NEXT:    adds r3, r3, r1
+; CHECK-NEXT:    adc.w r1, r12, r0
+; CHECK-NEXT:    ubfx r0, r2, #8, #1
+; CHECK-NEXT:    rsbs r0, r0, #0
+; CHECK-NEXT:    vmov.32 q3[0], r0
+; CHECK-NEXT:    vmov.32 q3[1], r0
+; CHECK-NEXT:    ubfx r0, r2, #12, #1
+; CHECK-NEXT:    rsbs r0, r0, #0
+; CHECK-NEXT:    vmov.u8 r2, q0[10]
+; CHECK-NEXT:    vmov.32 q3[2], r0
+; CHECK-NEXT:    sxtb r2, r2
+; CHECK-NEXT:    vmov.32 q3[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[10]
+; CHECK-NEXT:    sxtb r0, r0
+; CHECK-NEXT:    smull r0, r2, r2, r0
+; CHECK-NEXT:    vmov.32 q4[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[11]
+; CHECK-NEXT:    vmov.32 q4[1], r2
+; CHECK-NEXT:    vmov.u8 r2, q0[11]
+; CHECK-NEXT:    sxtb r0, r0
+; CHECK-NEXT:    sxtb r2, r2
+; CHECK-NEXT:    smull r0, r2, r2, r0
+; CHECK-NEXT:    vmov.32 q4[2], r0
+; CHECK-NEXT:    vmov.32 q4[3], r2
+; CHECK-NEXT:    vand q3, q4, q3
+; CHECK-NEXT:    vmov r2, s12
+; CHECK-NEXT:    vmov r0, s13
+; CHECK-NEXT:    adds r2, r2, r3
+; CHECK-NEXT:    vmov r3, s15
+; CHECK-NEXT:    adcs r1, r0
+; CHECK-NEXT:    vmov r0, s14
+; CHECK-NEXT:    adds.w r12, r2, r0
+; CHECK-NEXT:    vmov.u16 r2, q2[4]
+; CHECK-NEXT:    vmov.32 q3[0], r2
+; CHECK-NEXT:    vmov.u16 r2, q2[5]
+; CHECK-NEXT:    vmov.32 q3[1], r2
+; CHECK-NEXT:    vmov.u16 r2, q2[6]
+; CHECK-NEXT:    vmov.32 q3[2], r2
+; CHECK-NEXT:    vmov.u16 r2, q2[7]
+; CHECK-NEXT:    vmov.32 q3[3], r2
+; CHECK-NEXT:    adcs r1, r3
+; CHECK-NEXT:    vcmp.i32 ne, q3, zr
+; CHECK-NEXT:    vmov.u8 r0, q0[12]
+; CHECK-NEXT:    vmrs r2, p0
+; CHECK-NEXT:    sxtb r0, r0
+; CHECK-NEXT:    and r3, r2, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    vmov.32 q2[0], r3
+; CHECK-NEXT:    vmov.32 q2[1], r3
+; CHECK-NEXT:    ubfx r3, r2, #4, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    vmov.32 q2[2], r3
+; CHECK-NEXT:    vmov.32 q2[3], r3
+; CHECK-NEXT:    vmov.u8 r3, q1[12]
+; CHECK-NEXT:    sxtb r3, r3
+; CHECK-NEXT:    smull r0, r3, r0, r3
+; CHECK-NEXT:    vmov.32 q3[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[13]
+; CHECK-NEXT:    vmov.32 q3[1], r3
+; CHECK-NEXT:    vmov.u8 r3, q0[13]
+; CHECK-NEXT:    sxtb r0, r0
+; CHECK-NEXT:    sxtb r3, r3
+; CHECK-NEXT:    smull r0, r3, r3, r0
+; CHECK-NEXT:    vmov.32 q3[2], r0
+; CHECK-NEXT:    vmov.32 q3[3], r3
+; CHECK-NEXT:    vand q2, q3, q2
+; CHECK-NEXT:    vmov r3, s8
+; CHECK-NEXT:    vmov r0, s9
+; CHECK-NEXT:    adds.w r3, r3, r12
+; CHECK-NEXT:    adc.w r12, r1, r0
+; CHECK-NEXT:    vmov r1, s10
+; CHECK-NEXT:    vmov r0, s11
+; CHECK-NEXT:    adds r3, r3, r1
+; CHECK-NEXT:    adc.w r1, r12, r0
+; CHECK-NEXT:    ubfx r0, r2, #8, #1
+; CHECK-NEXT:    rsbs r0, r0, #0
+; CHECK-NEXT:    vmov.32 q2[0], r0
+; CHECK-NEXT:    vmov.32 q2[1], r0
+; CHECK-NEXT:    ubfx r0, r2, #12, #1
+; CHECK-NEXT:    rsbs r0, r0, #0
+; CHECK-NEXT:    vmov.u8 r2, q0[14]
+; CHECK-NEXT:    vmov.32 q2[2], r0
+; CHECK-NEXT:    sxtb r2, r2
+; CHECK-NEXT:    vmov.32 q2[3], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[14]
+; CHECK-NEXT:    sxtb r0, r0
+; CHECK-NEXT:    smull r0, r2, r2, r0
+; CHECK-NEXT:    vmov.32 q3[0], r0
+; CHECK-NEXT:    vmov.u8 r0, q1[15]
+; CHECK-NEXT:    vmov.32 q3[1], r2
+; CHECK-NEXT:    vmov.u8 r2, q0[15]
+; CHECK-NEXT:    sxtb r0, r0
+; CHECK-NEXT:    sxtb r2, r2
+; CHECK-NEXT:    smull r0, r2, r2, r0
+; CHECK-NEXT:    vmov.32 q3[2], r0
+; CHECK-NEXT:    vmov.32 q3[3], r2
+; CHECK-NEXT:    vand q0, q3, q2
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    vmov r0, s1
+; CHECK-NEXT:    adds r2, r2, r3
+; CHECK-NEXT:    vmov r3, s3
+; CHECK-NEXT:    adcs r1, r0
+; CHECK-NEXT:    vmov r0, s2
+; CHECK-NEXT:    adds r0, r0, r2
+; CHECK-NEXT:    adcs r1, r3
+; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <16 x i8> %b, zeroinitializer
+  %xx = sext <16 x i8> %x to <16 x i64>
+  %yy = sext <16 x i8> %y to <16 x i64>
+  %m = mul <16 x i64> %xx, %yy
+  %s = select <16 x i1> %c, <16 x i64> %m, <16 x i64> zeroinitializer
+  %z = call i64 @llvm.experimental.vector.reduce.add.v16i64(<16 x i64> %s)
+  ret i64 %z
+}
+
+define arm_aapcs_vfpcc i64 @add_v2i8_v2i64_zext(<2 x i8> %x, <2 x i8> %y, <2 x i8> %b) {
+; CHECK-LABEL: add_v2i8_v2i64_zext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    vmov.i64 q3, #0xff
+; CHECK-NEXT:    vand q1, q1, q3
+; CHECK-NEXT:    vand q4, q0, q3
+; CHECK-NEXT:    vmov r0, s4
+; CHECK-NEXT:    vmov r1, s16
+; CHECK-NEXT:    umull r0, r1, r1, r0
+; CHECK-NEXT:    vmov.32 q0[0], r0
+; CHECK-NEXT:    vmov r0, s6
+; CHECK-NEXT:    vmov.32 q0[1], r1
+; CHECK-NEXT:    vmov r1, s18
+; CHECK-NEXT:    vand q1, q2, q3
+; CHECK-NEXT:    umull r0, r1, r1, r0
+; CHECK-NEXT:    vmov.32 q0[2], r0
+; CHECK-NEXT:    vmov r0, s4
+; CHECK-NEXT:    vmov.32 q0[3], r1
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    cset r0, eq
+; CHECK-NEXT:    tst.w r0, #1
+; CHECK-NEXT:    csetm r0, ne
+; CHECK-NEXT:    vmov.32 q2[0], r0
+; CHECK-NEXT:    vmov.32 q2[1], r0
+; CHECK-NEXT:    vmov r0, s6
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    cset r0, eq
+; CHECK-NEXT:    tst.w r0, #1
+; CHECK-NEXT:    csetm r0, ne
+; CHECK-NEXT:    vmov.32 q2[2], r0
+; CHECK-NEXT:    vmov.32 q2[3], r0
+; CHECK-NEXT:    vand q0, q0, q2
+; CHECK-NEXT:    vmov r0, s2
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov r1, s3
+; CHECK-NEXT:    vmov r2, s1
+; CHECK-NEXT:    adds r0, r0, r3
+; CHECK-NEXT:    adcs r1, r2
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <2 x i8> %b, zeroinitializer
+  %xx = zext <2 x i8> %x to <2 x i64>
+  %yy = zext <2 x i8> %y to <2 x i64>
+  %m = mul <2 x i64> %xx, %yy
+  %s = select <2 x i1> %c, <2 x i64> %m, <2 x i64> zeroinitializer
+  %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %s)
+  ret i64 %z
+}
+
+define arm_aapcs_vfpcc i64 @add_v2i8_v2i64_sext(<2 x i8> %x, <2 x i8> %y, <2 x i8> %b) {
+; CHECK-LABEL: add_v2i8_v2i64_sext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.i32 q3, #0xff
+; CHECK-NEXT:    vmov r1, s0
+; CHECK-NEXT:    vand q3, q2, q3
+; CHECK-NEXT:    vmov r0, s12
+; CHECK-NEXT:    sxtb r1, r1
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    cset r0, eq
+; CHECK-NEXT:    tst.w r0, #1
+; CHECK-NEXT:    csetm r0, ne
+; CHECK-NEXT:    vmov.32 q2[0], r0
+; CHECK-NEXT:    vmov.32 q2[1], r0
+; CHECK-NEXT:    vmov r0, s14
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    cset r0, eq
+; CHECK-NEXT:    tst.w r0, #1
+; CHECK-NEXT:    csetm r0, ne
+; CHECK-NEXT:    vmov.32 q2[2], r0
+; CHECK-NEXT:    vmov.32 q2[3], r0
+; CHECK-NEXT:    vmov r0, s4
+; CHECK-NEXT:    sxtb r0, r0
+; CHECK-NEXT:    smull r0, r1, r1, r0
+; CHECK-NEXT:    vmov.32 q3[0], r0
+; CHECK-NEXT:    vmov r0, s6
+; CHECK-NEXT:    vmov.32 q3[1], r1
+; CHECK-NEXT:    vmov r1, s2
+; CHECK-NEXT:    sxtb r0, r0
+; CHECK-NEXT:    sxtb r1, r1
+; CHECK-NEXT:    smull r0, r1, r1, r0
+; CHECK-NEXT:    vmov.32 q3[2], r0
+; CHECK-NEXT:    vmov.32 q3[3], r1
+; CHECK-NEXT:    vand q0, q3, q2
+; CHECK-NEXT:    vmov r0, s2
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov r1, s3
+; CHECK-NEXT:    vmov r2, s1
+; CHECK-NEXT:    adds r0, r0, r3
+; CHECK-NEXT:    adcs r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <2 x i8> %b, zeroinitializer
+  %xx = sext <2 x i8> %x to <2 x i64>
+  %yy = sext <2 x i8> %y to <2 x i64>
+  %m = mul <2 x i64> %xx, %yy
+  %s = select <2 x i1> %c, <2 x i64> %m, <2 x i64> zeroinitializer
+  %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %s)
+  ret i64 %z
+}
+
+define arm_aapcs_vfpcc i64 @add_v2i64_v2i64(<2 x i64> %x, <2 x i64> %y, <2 x i64> %b) {
+; CHECK-LABEL: add_v2i64_v2i64:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov r0, s4
+; CHECK-NEXT:    vmov r1, s0
+; CHECK-NEXT:    vmov r2, s5
+; CHECK-NEXT:    umull r12, r3, r1, r0
+; CHECK-NEXT:    mla r1, r1, r2, r3
+; CHECK-NEXT:    vmov r2, s1
+; CHECK-NEXT:    vmov.32 q3[0], r12
+; CHECK-NEXT:    mla r0, r2, r0, r1
+; CHECK-NEXT:    vmov r1, s2
+; CHECK-NEXT:    vmov.32 q3[1], r0
+; CHECK-NEXT:    vmov r0, s6
+; CHECK-NEXT:    umull r2, r3, r1, r0
+; CHECK-NEXT:    vmov.32 q3[2], r2
+; CHECK-NEXT:    vmov r2, s7
+; CHECK-NEXT:    mla r1, r1, r2, r3
+; CHECK-NEXT:    vmov r2, s3
+; CHECK-NEXT:    mla r0, r2, r0, r1
+; CHECK-NEXT:    vmov r1, s8
+; CHECK-NEXT:    vmov.32 q3[3], r0
+; CHECK-NEXT:    vmov r0, s9
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov r1, s10
+; CHECK-NEXT:    cset r0, eq
+; CHECK-NEXT:    tst.w r0, #1
+; CHECK-NEXT:    csetm r0, ne
+; CHECK-NEXT:    vmov.32 q0[0], r0
+; CHECK-NEXT:    vmov.32 q0[1], r0
+; CHECK-NEXT:    vmov r0, s11
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    cset r0, eq
+; CHECK-NEXT:    tst.w r0, #1
+; CHECK-NEXT:    csetm r0, ne
+; CHECK-NEXT:    vmov.32 q0[2], r0
+; CHECK-NEXT:    vmov.32 q0[3], r0
+; CHECK-NEXT:    vand q0, q3, q0
+; CHECK-NEXT:    vmov r0, s2
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov r1, s3
+; CHECK-NEXT:    vmov r2, s1
+; CHECK-NEXT:    adds r0, r0, r3
+; CHECK-NEXT:    adcs r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <2 x i64> %b, zeroinitializer
+  %m = mul <2 x i64> %x, %y
+  %s = select <2 x i1> %c, <2 x i64> %m, <2 x i64> zeroinitializer
+  %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %s)
+  ret i64 %z
+}
+
+define arm_aapcs_vfpcc i32 @add_v4i32_v4i32_acc(<4 x i32> %x, <4 x i32> %y, <4 x i32> %b, i32 %a) {
+; CHECK-LABEL: add_v4i32_v4i32_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmul.i32 q0, q0, q1
+; CHECK-NEXT:    vcmp.i32 eq, q2, zr
+; CHECK-NEXT:    vmov.i32 q1, #0x0
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vaddva.u32 r0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <4 x i32> %b, zeroinitializer
+  %m = mul <4 x i32> %x, %y
+  %s = select <4 x i1> %c, <4 x i32> %m, <4 x i32> zeroinitializer
+  %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %s)
+  %r = add i32 %z, %a
+  ret i32 %r
+}
+
+define arm_aapcs_vfpcc i64 @add_v4i32_v4i64_acc_zext(<4 x i32> %x, <4 x i32> %y, <4 x i32> %b, i64 %a) {
+; CHECK-LABEL: add_v4i32_v4i64_acc_zext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r4, r5, r7, lr}
+; CHECK-NEXT:    push {r4, r5, r7, lr}
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11}
+; CHECK-NEXT:    vcmp.i32 eq, q2, zr
+; CHECK-NEXT:    vmov.f32 s12, s4
+; CHECK-NEXT:    vmrs r2, p0
+; CHECK-NEXT:    vmov.f32 s16, s0
+; CHECK-NEXT:    vmov.f32 s14, s5
+; CHECK-NEXT:    vmov.f32 s18, s1
+; CHECK-NEXT:    vmullb.u32 q5, q4, q3
+; CHECK-NEXT:    and r3, r2, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    vmov.32 q2[0], r3
+; CHECK-NEXT:    vmov.32 q2[1], r3
+; CHECK-NEXT:    ubfx r3, r2, #4, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    vmov.32 q2[2], r3
+; CHECK-NEXT:    vmov.32 q2[3], r3
+; CHECK-NEXT:    vand q2, q5, q2
+; CHECK-NEXT:    vmov r3, s10
+; CHECK-NEXT:    vmov r4, s8
+; CHECK-NEXT:    vmov r12, s11
+; CHECK-NEXT:    vmov lr, s9
+; CHECK-NEXT:    vmov.f32 s8, s6
+; CHECK-NEXT:    vmov.f32 s10, s7
+; CHECK-NEXT:    vmov.f32 s4, s2
+; CHECK-NEXT:    vmov.f32 s6, s3
+; CHECK-NEXT:    vmullb.u32 q0, q1, q2
+; CHECK-NEXT:    adds r5, r4, r3
+; CHECK-NEXT:    ubfx r3, r2, #8, #1
+; CHECK-NEXT:    rsb.w r3, r3, #0
+; CHECK-NEXT:    ubfx r2, r2, #12, #1
+; CHECK-NEXT:    vmov.32 q1[0], r3
+; CHECK-NEXT:    rsb.w r2, r2, #0
+; CHECK-NEXT:    vmov.32 q1[1], r3
+; CHECK-NEXT:    adc.w r4, lr, r12
+; CHECK-NEXT:    vmov.32 q1[2], r2
+; CHECK-NEXT:    vmov.32 q1[3], r2
+; CHECK-NEXT:    vand q0, q0, q1
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov r2, s1
+; CHECK-NEXT:    adds r3, r3, r5
+; CHECK-NEXT:    vmov r5, s3
+; CHECK-NEXT:    adcs r2, r4
+; CHECK-NEXT:    vmov r4, s2
+; CHECK-NEXT:    adds r3, r3, r4
+; CHECK-NEXT:    adcs r2, r5
+; CHECK-NEXT:    adds r0, r0, r3
+; CHECK-NEXT:    adcs r1, r2
+; CHECK-NEXT:    vpop {d8, d9, d10, d11}
+; CHECK-NEXT:    pop {r4, r5, r7, pc}
+entry:
+  %c = icmp eq <4 x i32> %b, zeroinitializer
+  %xx = zext <4 x i32> %x to <4 x i64>
+  %yy = zext <4 x i32> %y to <4 x i64>
+  %m = mul <4 x i64> %xx, %yy
+  %s = select <4 x i1> %c, <4 x i64> %m, <4 x i64> zeroinitializer
+  %z = call i64 @llvm.experimental.vector.reduce.add.v4i64(<4 x i64> %s)
+  %r = add i64 %z, %a
+  ret i64 %r
+}
+
+define arm_aapcs_vfpcc i64 @add_v4i32_v4i64_acc_sext(<4 x i32> %x, <4 x i32> %y, <4 x i32> %b, i64 %a) {
+; CHECK-LABEL: add_v4i32_v4i64_acc_sext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r4, r5, r7, lr}
+; CHECK-NEXT:    push {r4, r5, r7, lr}
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11}
+; CHECK-NEXT:    vmov.f32 s16, s4
+; CHECK-NEXT:    vcmp.i32 eq, q2, zr
+; CHECK-NEXT:    vmov.f32 s20, s0
+; CHECK-NEXT:    vmov.f32 s18, s5
+; CHECK-NEXT:    vmov.f32 s22, s1
+; CHECK-NEXT:    vmov r2, s16
+; CHECK-NEXT:    vmov r3, s20
+; CHECK-NEXT:    smull r2, r3, r3, r2
+; CHECK-NEXT:    vmov.32 q3[0], r2
+; CHECK-NEXT:    vmov r2, s18
+; CHECK-NEXT:    vmov.32 q3[1], r3
+; CHECK-NEXT:    vmov r3, s22
+; CHECK-NEXT:    smull r2, r3, r3, r2
+; CHECK-NEXT:    vmov.32 q3[2], r2
+; CHECK-NEXT:    vmrs r2, p0
+; CHECK-NEXT:    vmov.32 q3[3], r3
+; CHECK-NEXT:    and r3, r2, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    vmov.32 q2[0], r3
+; CHECK-NEXT:    vmov.32 q2[1], r3
+; CHECK-NEXT:    ubfx r3, r2, #4, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    vmov.32 q2[2], r3
+; CHECK-NEXT:    vmov.32 q2[3], r3
+; CHECK-NEXT:    vand q2, q3, q2
+; CHECK-NEXT:    vmov r3, s10
+; CHECK-NEXT:    vmov r4, s8
+; CHECK-NEXT:    vmov r12, s11
+; CHECK-NEXT:    vmov lr, s9
+; CHECK-NEXT:    vmov.f32 s8, s6
+; CHECK-NEXT:    vmov.f32 s10, s7
+; CHECK-NEXT:    vmov.f32 s4, s2
+; CHECK-NEXT:    vmov.f32 s6, s3
+; CHECK-NEXT:    vmullb.s32 q0, q1, q2
+; CHECK-NEXT:    adds r5, r4, r3
+; CHECK-NEXT:    ubfx r3, r2, #8, #1
+; CHECK-NEXT:    rsb.w r3, r3, #0
+; CHECK-NEXT:    ubfx r2, r2, #12, #1
+; CHECK-NEXT:    vmov.32 q1[0], r3
+; CHECK-NEXT:    rsb.w r2, r2, #0
+; CHECK-NEXT:    vmov.32 q1[1], r3
+; CHECK-NEXT:    adc.w r4, lr, r12
+; CHECK-NEXT:    vmov.32 q1[2], r2
+; CHECK-NEXT:    vmov.32 q1[3], r2
+; CHECK-NEXT:    vand q0, q0, q1
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov r2, s1
+; CHECK-NEXT:    adds r3, r3, r5
+; CHECK-NEXT:    vmov r5, s3
+; CHECK-NEXT:    adcs r2, r4
+; CHECK-NEXT:    vmov r4, s2
+; CHECK-NEXT:    adds r3, r3, r4
+; CHECK-NEXT:    adcs r2, r5
+; CHECK-NEXT:    adds r0, r0, r3
+; CHECK-NEXT:    adcs r1, r2
+; CHECK-NEXT:    vpop {d8, d9, d10, d11}
+; CHECK-NEXT:    pop {r4, r5, r7, pc}
+entry:
+  %c = icmp eq <4 x i32> %b, zeroinitializer
+  %xx = sext <4 x i32> %x to <4 x i64>
+  %yy = sext <4 x i32> %y to <4 x i64>
+  %m = mul <4 x i64> %xx, %yy
+  %s = select <4 x i1> %c, <4 x i64> %m, <4 x i64> zeroinitializer
+  %z = call i64 @llvm.experimental.vector.reduce.add.v4i64(<4 x i64> %s)
+  %r = add i64 %z, %a
+  ret i64 %r
+}
+
+define arm_aapcs_vfpcc i64 @add_v2i32_v2i64_acc_zext(<2 x i32> %x, <2 x i32> %y, <2 x i32> %b, i64 %a) {
+; CHECK-LABEL: add_v2i32_v2i64_acc_zext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    vmov r2, s8
+; CHECK-NEXT:    vmullb.u32 q3, q0, q1
+; CHECK-NEXT:    cmp r2, #0
+; CHECK-NEXT:    cset r2, eq
+; CHECK-NEXT:    tst.w r2, #1
+; CHECK-NEXT:    csetm r2, ne
+; CHECK-NEXT:    vmov.32 q0[0], r2
+; CHECK-NEXT:    vmov.32 q0[1], r2
+; CHECK-NEXT:    vmov r2, s10
+; CHECK-NEXT:    cmp r2, #0
+; CHECK-NEXT:    cset r2, eq
+; CHECK-NEXT:    tst.w r2, #1
+; CHECK-NEXT:    csetm r2, ne
+; CHECK-NEXT:    vmov.32 q0[2], r2
+; CHECK-NEXT:    vmov.32 q0[3], r2
+; CHECK-NEXT:    vand q0, q3, q0
+; CHECK-NEXT:    vmov r2, s2
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov r12, s3
+; CHECK-NEXT:    vmov lr, s1
+; CHECK-NEXT:    adds r2, r2, r3
+; CHECK-NEXT:    adc.w r3, lr, r12
+; CHECK-NEXT:    adds r0, r0, r2
+; CHECK-NEXT:    adcs r1, r3
+; CHECK-NEXT:    pop {r7, pc}
+entry:
+  %c = icmp eq <2 x i32> %b, zeroinitializer
+  %xx = zext <2 x i32> %x to <2 x i64>
+  %yy = zext <2 x i32> %y to <2 x i64>
+  %m = mul <2 x i64> %xx, %yy
+  %s = select <2 x i1> %c, <2 x i64> %m, <2 x i64> zeroinitializer
+  %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %s)
+  %r = add i64 %z, %a
+  ret i64 %r
+}
+
+define arm_aapcs_vfpcc i64 @add_v2i32_v2i64_acc_sext(<2 x i32> %x, <2 x i32> %y, <2 x i32> %b, i64 %a) {
+; CHECK-LABEL: add_v2i32_v2i64_acc_sext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    vmov r2, s8
+; CHECK-NEXT:    vmullb.s32 q3, q0, q1
+; CHECK-NEXT:    cmp r2, #0
+; CHECK-NEXT:    cset r2, eq
+; CHECK-NEXT:    tst.w r2, #1
+; CHECK-NEXT:    csetm r2, ne
+; CHECK-NEXT:    vmov.32 q0[0], r2
+; CHECK-NEXT:    vmov.32 q0[1], r2
+; CHECK-NEXT:    vmov r2, s10
+; CHECK-NEXT:    cmp r2, #0
+; CHECK-NEXT:    cset r2, eq
+; CHECK-NEXT:    tst.w r2, #1
+; CHECK-NEXT:    csetm r2, ne
+; CHECK-NEXT:    vmov.32 q0[2], r2
+; CHECK-NEXT:    vmov.32 q0[3], r2
+; CHECK-NEXT:    vand q0, q3, q0
+; CHECK-NEXT:    vmov r2, s2
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov r12, s3
+; CHECK-NEXT:    vmov lr, s1
+; CHECK-NEXT:    adds r2, r2, r3
+; CHECK-NEXT:    adc.w r3, lr, r12
+; CHECK-NEXT:    adds r0, r0, r2
+; CHECK-NEXT:    adcs r1, r3
+; CHECK-NEXT:    pop {r7, pc}
+entry:
+  %c = icmp eq <2 x i32> %b, zeroinitializer
+  %xx = sext <2 x i32> %x to <2 x i64>
+  %yy = sext <2 x i32> %y to <2 x i64>
+  %m = mul <2 x i64> %xx, %yy
+  %s = select <2 x i1> %c, <2 x i64> %m, <2 x i64> zeroinitializer
+  %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %s)
+  %r = add i64 %z, %a
+  ret i64 %r
+}
+
+define arm_aapcs_vfpcc i32 @add_v8i16_v8i32_acc_zext(<8 x i16> %x, <8 x i16> %y, <8 x i16> %b, i32 %a) {
+; CHECK-LABEL: add_v8i16_v8i32_acc_zext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11}
+; CHECK-NEXT:    vmov.i8 q3, #0x0
+; CHECK-NEXT:    vmov.i8 q4, #0xff
+; CHECK-NEXT:    vcmp.i16 eq, q2, zr
+; CHECK-NEXT:    vpsel q2, q4, q3
+; CHECK-NEXT:    vmov.u16 r1, q2[4]
+; CHECK-NEXT:    vmov.32 q3[0], r1
+; CHECK-NEXT:    vmov.u16 r1, q2[5]
+; CHECK-NEXT:    vmov.32 q3[1], r1
+; CHECK-NEXT:    vmov.u16 r1, q2[6]
+; CHECK-NEXT:    vmov.32 q3[2], r1
+; CHECK-NEXT:    vmov.u16 r1, q2[7]
+; CHECK-NEXT:    vmov.32 q3[3], r1
+; CHECK-NEXT:    vmov.u16 r1, q1[4]
+; CHECK-NEXT:    vcmp.i32 ne, q3, zr
+; CHECK-NEXT:    vmov.32 q3[0], r1
+; CHECK-NEXT:    vmov.u16 r1, q1[5]
+; CHECK-NEXT:    vmov.32 q3[1], r1
+; CHECK-NEXT:    vmov.u16 r1, q1[6]
+; CHECK-NEXT:    vmov.32 q3[2], r1
+; CHECK-NEXT:    vmov.u16 r1, q1[7]
+; CHECK-NEXT:    vmov.32 q3[3], r1
+; CHECK-NEXT:    vmov.u16 r1, q0[4]
+; CHECK-NEXT:    vmov.32 q4[0], r1
+; CHECK-NEXT:    vmov.u16 r1, q0[5]
+; CHECK-NEXT:    vmov.32 q4[1], r1
+; CHECK-NEXT:    vmov.u16 r1, q0[6]
+; CHECK-NEXT:    vmov.32 q4[2], r1
+; CHECK-NEXT:    vmov.u16 r1, q0[7]
+; CHECK-NEXT:    vmov.32 q4[3], r1
+; CHECK-NEXT:    vmov.u16 r1, q2[0]
+; CHECK-NEXT:    vmov.32 q5[0], r1
+; CHECK-NEXT:    vmov.u16 r1, q2[1]
+; CHECK-NEXT:    vmov.32 q5[1], r1
+; CHECK-NEXT:    vmov.u16 r1, q2[2]
+; CHECK-NEXT:    vmov.32 q5[2], r1
+; CHECK-NEXT:    vmov.u16 r1, q2[3]
+; CHECK-NEXT:    vmov.32 q5[3], r1
+; CHECK-NEXT:    vmov.u16 r1, q1[0]
+; CHECK-NEXT:    vmov.32 q2[0], r1
+; CHECK-NEXT:    vmov.u16 r1, q1[1]
+; CHECK-NEXT:    vmov.32 q2[1], r1
+; CHECK-NEXT:    vmov.u16 r1, q1[2]
+; CHECK-NEXT:    vmov.32 q2[2], r1
+; CHECK-NEXT:    vmov.u16 r1, q1[3]
+; CHECK-NEXT:    vmov.32 q2[3], r1
+; CHECK-NEXT:    vmov.u16 r1, q0[0]
+; CHECK-NEXT:    vmov.32 q1[0], r1
+; CHECK-NEXT:    vmov.u16 r1, q0[1]
+; CHECK-NEXT:    vmov.32 q1[1], r1
+; CHECK-NEXT:    vmov.u16 r1, q0[2]
+; CHECK-NEXT:    vmov.32 q1[2], r1
+; CHECK-NEXT:    vmov.u16 r1, q0[3]
+; CHECK-NEXT:    vmullb.u16 q4, q4, q3
+; CHECK-NEXT:    vmov.i32 q3, #0x0
+; CHECK-NEXT:    vmov.32 q1[3], r1
+; CHECK-NEXT:    vpsel q4, q4, q3
+; CHECK-NEXT:    vcmp.i32 ne, q5, zr
+; CHECK-NEXT:    vmullb.u16 q0, q1, q2
+; CHECK-NEXT:    vpsel q0, q0, q3
+; CHECK-NEXT:    vadd.i32 q0, q0, q4
+; CHECK-NEXT:    vaddva.u32 r0, q0
+; CHECK-NEXT:    vpop {d8, d9, d10, d11}
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <8 x i16> %b, zeroinitializer
+  %xx = zext <8 x i16> %x to <8 x i32>
+  %yy = zext <8 x i16> %y to <8 x i32>
+  %m = mul <8 x i32> %xx, %yy
+  %s = select <8 x i1> %c, <8 x i32> %m, <8 x i32> zeroinitializer
+  %z = call i32 @llvm.experimental.vector.reduce.add.v8i32(<8 x i32> %s)
+  %r = add i32 %z, %a
+  ret i32 %r
+}
+
+define arm_aapcs_vfpcc i32 @add_v8i16_v8i32_acc_sext(<8 x i16> %x, <8 x i16> %y, <8 x i16> %b, i32 %a) {
+; CHECK-LABEL: add_v8i16_v8i32_acc_sext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11}
+; CHECK-NEXT:    vmov.i8 q3, #0x0
+; CHECK-NEXT:    vmov.i8 q4, #0xff
+; CHECK-NEXT:    vcmp.i16 eq, q2, zr
+; CHECK-NEXT:    vpsel q2, q4, q3
+; CHECK-NEXT:    vmov.u16 r1, q2[4]
+; CHECK-NEXT:    vmov.32 q3[0], r1
+; CHECK-NEXT:    vmov.u16 r1, q2[5]
+; CHECK-NEXT:    vmov.32 q3[1], r1
+; CHECK-NEXT:    vmov.u16 r1, q2[6]
+; CHECK-NEXT:    vmov.32 q3[2], r1
+; CHECK-NEXT:    vmov.u16 r1, q2[7]
+; CHECK-NEXT:    vmov.32 q3[3], r1
+; CHECK-NEXT:    vmov.u16 r1, q1[4]
+; CHECK-NEXT:    vcmp.i32 ne, q3, zr
+; CHECK-NEXT:    vmov.32 q3[0], r1
+; CHECK-NEXT:    vmov.u16 r1, q1[5]
+; CHECK-NEXT:    vmov.32 q3[1], r1
+; CHECK-NEXT:    vmov.u16 r1, q1[6]
+; CHECK-NEXT:    vmov.32 q3[2], r1
+; CHECK-NEXT:    vmov.u16 r1, q1[7]
+; CHECK-NEXT:    vmov.32 q3[3], r1
+; CHECK-NEXT:    vmov.u16 r1, q0[4]
+; CHECK-NEXT:    vmov.32 q4[0], r1
+; CHECK-NEXT:    vmov.u16 r1, q0[5]
+; CHECK-NEXT:    vmov.32 q4[1], r1
+; CHECK-NEXT:    vmov.u16 r1, q0[6]
+; CHECK-NEXT:    vmov.32 q4[2], r1
+; CHECK-NEXT:    vmov.u16 r1, q0[7]
+; CHECK-NEXT:    vmov.32 q4[3], r1
+; CHECK-NEXT:    vmov.u16 r1, q2[0]
+; CHECK-NEXT:    vmov.32 q5[0], r1
+; CHECK-NEXT:    vmov.u16 r1, q2[1]
+; CHECK-NEXT:    vmov.32 q5[1], r1
+; CHECK-NEXT:    vmov.u16 r1, q2[2]
+; CHECK-NEXT:    vmov.32 q5[2], r1
+; CHECK-NEXT:    vmov.u16 r1, q2[3]
+; CHECK-NEXT:    vmov.32 q5[3], r1
+; CHECK-NEXT:    vmov.u16 r1, q1[0]
+; CHECK-NEXT:    vmov.32 q2[0], r1
+; CHECK-NEXT:    vmov.u16 r1, q1[1]
+; CHECK-NEXT:    vmov.32 q2[1], r1
+; CHECK-NEXT:    vmov.u16 r1, q1[2]
+; CHECK-NEXT:    vmov.32 q2[2], r1
+; CHECK-NEXT:    vmov.u16 r1, q1[3]
+; CHECK-NEXT:    vmov.32 q2[3], r1
+; CHECK-NEXT:    vmov.u16 r1, q0[0]
+; CHECK-NEXT:    vmov.32 q1[0], r1
+; CHECK-NEXT:    vmov.u16 r1, q0[1]
+; CHECK-NEXT:    vmov.32 q1[1], r1
+; CHECK-NEXT:    vmov.u16 r1, q0[2]
+; CHECK-NEXT:    vmov.32 q1[2], r1
+; CHECK-NEXT:    vmov.u16 r1, q0[3]
+; CHECK-NEXT:    vmullb.s16 q4, q4, q3
+; CHECK-NEXT:    vmov.i32 q3, #0x0
+; CHECK-NEXT:    vmov.32 q1[3], r1
+; CHECK-NEXT:    vpsel q4, q4, q3
+; CHECK-NEXT:    vcmp.i32 ne, q5, zr
+; CHECK-NEXT:    vmullb.s16 q0, q1, q2
+; CHECK-NEXT:    vpsel q0, q0, q3
+; CHECK-NEXT:    vadd.i32 q0, q0, q4
+; CHECK-NEXT:    vaddva.u32 r0, q0
+; CHECK-NEXT:    vpop {d8, d9, d10, d11}
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <8 x i16> %b, zeroinitializer
+  %xx = sext <8 x i16> %x to <8 x i32>
+  %yy = sext <8 x i16> %y to <8 x i32>
+  %m = mul <8 x i32> %xx, %yy
+  %s = select <8 x i1> %c, <8 x i32> %m, <8 x i32> zeroinitializer
+  %z = call i32 @llvm.experimental.vector.reduce.add.v8i32(<8 x i32> %s)
+  %r = add i32 %z, %a
+  ret i32 %r
+}
+
+define arm_aapcs_vfpcc i32 @add_v4i16_v4i32_acc_zext(<4 x i16> %x, <4 x i16> %y, <4 x i16> %b, i32 %a) {
+; CHECK-LABEL: add_v4i16_v4i32_acc_zext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmullb.u16 q0, q0, q1
+; CHECK-NEXT:    vmovlb.u16 q1, q2
+; CHECK-NEXT:    vcmp.i32 eq, q1, zr
+; CHECK-NEXT:    vmov.i32 q1, #0x0
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vaddva.u32 r0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <4 x i16> %b, zeroinitializer
+  %xx = zext <4 x i16> %x to <4 x i32>
+  %yy = zext <4 x i16> %y to <4 x i32>
+  %m = mul <4 x i32> %xx, %yy
+  %s = select <4 x i1> %c, <4 x i32> %m, <4 x i32> zeroinitializer
+  %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %s)
+  %r = add i32 %z, %a
+  ret i32 %r
+}
+
+define arm_aapcs_vfpcc i32 @add_v4i16_v4i32_acc_sext(<4 x i16> %x, <4 x i16> %y, <4 x i16> %b, i32 %a) {
+; CHECK-LABEL: add_v4i16_v4i32_acc_sext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmullb.s16 q0, q0, q1
+; CHECK-NEXT:    vmovlb.u16 q1, q2
+; CHECK-NEXT:    vcmp.i32 eq, q1, zr
+; CHECK-NEXT:    vmov.i32 q1, #0x0
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vaddva.u32 r0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <4 x i16> %b, zeroinitializer
+  %xx = sext <4 x i16> %x to <4 x i32>
+  %yy = sext <4 x i16> %y to <4 x i32>
+  %m = mul <4 x i32> %xx, %yy
+  %s = select <4 x i1> %c, <4 x i32> %m, <4 x i32> zeroinitializer
+  %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %s)
+  %r = add i32 %z, %a
+  ret i32 %r
+}
+
+define arm_aapcs_vfpcc zeroext i16 @add_v8i16_v8i16_acc(<8 x i16> %x, <8 x i16> %y, <8 x i16> %b, i16 %a) {
+; CHECK-LABEL: add_v8i16_v8i16_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmul.i16 q0, q0, q1
+; CHECK-NEXT:    vcmp.i16 eq, q2, zr
+; CHECK-NEXT:    vmov.i32 q1, #0x0
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vaddva.u16 r0, q0
+; CHECK-NEXT:    uxth r0, r0
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <8 x i16> %b, zeroinitializer
+  %m = mul <8 x i16> %x, %y
+  %s = select <8 x i1> %c, <8 x i16> %m, <8 x i16> zeroinitializer
+  %z = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %s)
+  %r = add i16 %z, %a
+  ret i16 %r
+}
+
+define arm_aapcs_vfpcc i64 @add_v8i16_v8i64_acc_zext(<8 x i16> %x, <8 x i16> %y, <8 x i16> %b, i64 %a) {
+; CHECK-LABEL: add_v8i16_v8i64_acc_zext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r4, r5, r6, lr}
+; CHECK-NEXT:    push {r4, r5, r6, lr}
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    vmov.i8 q3, #0x0
+; CHECK-NEXT:    vmov.i8 q4, #0xff
+; CHECK-NEXT:    vcmp.i16 eq, q2, zr
+; CHECK-NEXT:    vpsel q3, q4, q3
+; CHECK-NEXT:    vmov.u16 r2, q3[0]
+; CHECK-NEXT:    vmov.32 q2[0], r2
+; CHECK-NEXT:    vmov.u16 r2, q3[1]
+; CHECK-NEXT:    vmov.32 q2[1], r2
+; CHECK-NEXT:    vmov.u16 r2, q3[2]
+; CHECK-NEXT:    vmov.32 q2[2], r2
+; CHECK-NEXT:    vmov.u16 r2, q3[3]
+; CHECK-NEXT:    vmov.32 q2[3], r2
+; CHECK-NEXT:    vcmp.i32 ne, q2, zr
+; CHECK-NEXT:    vmov.i64 q2, #0xffff
+; CHECK-NEXT:    vmrs lr, p0
+; CHECK-NEXT:    and r3, lr, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    vmov.32 q4[0], r3
+; CHECK-NEXT:    vmov.32 q4[1], r3
+; CHECK-NEXT:    ubfx r3, lr, #4, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    vmov.32 q4[2], r3
+; CHECK-NEXT:    vmov.32 q4[3], r3
+; CHECK-NEXT:    vmov.u16 r3, q1[0]
+; CHECK-NEXT:    vmov.32 q5[0], r3
+; CHECK-NEXT:    vmov.u16 r3, q1[1]
+; CHECK-NEXT:    vmov.32 q5[2], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[0]
+; CHECK-NEXT:    vmov.32 q6[0], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[1]
+; CHECK-NEXT:    vmov.32 q6[2], r3
+; CHECK-NEXT:    vand q5, q5, q2
+; CHECK-NEXT:    vand q6, q6, q2
+; CHECK-NEXT:    vmov r12, s20
+; CHECK-NEXT:    vmov r3, s24
+; CHECK-NEXT:    umull r3, r2, r3, r12
+; CHECK-NEXT:    vmov.32 q7[0], r3
+; CHECK-NEXT:    vmov r3, s26
+; CHECK-NEXT:    vmov.32 q7[1], r2
+; CHECK-NEXT:    vmov r2, s22
+; CHECK-NEXT:    umull r2, r3, r3, r2
+; CHECK-NEXT:    vmov.32 q7[2], r2
+; CHECK-NEXT:    vmov.32 q7[3], r3
+; CHECK-NEXT:    vand q4, q7, q4
+; CHECK-NEXT:    vmov r4, s18
+; CHECK-NEXT:    vmov r2, s16
+; CHECK-NEXT:    vmov r12, s19
+; CHECK-NEXT:    vmov r3, s17
+; CHECK-NEXT:    adds r4, r4, r2
+; CHECK-NEXT:    ubfx r2, lr, #8, #1
+; CHECK-NEXT:    rsb.w r2, r2, #0
+; CHECK-NEXT:    vmov.32 q4[0], r2
+; CHECK-NEXT:    adc.w r12, r12, r3
+; CHECK-NEXT:    vmov.32 q4[1], r2
+; CHECK-NEXT:    ubfx r2, lr, #12, #1
+; CHECK-NEXT:    rsbs r2, r2, #0
+; CHECK-NEXT:    vmov.u16 r3, q0[2]
+; CHECK-NEXT:    vmov.32 q4[2], r2
+; CHECK-NEXT:    vmov.32 q6[0], r3
+; CHECK-NEXT:    vmov.32 q4[3], r2
+; CHECK-NEXT:    vmov.u16 r2, q1[2]
+; CHECK-NEXT:    vmov.32 q5[0], r2
+; CHECK-NEXT:    vmov.u16 r2, q1[3]
+; CHECK-NEXT:    vmov.u16 r3, q0[3]
+; CHECK-NEXT:    vmov.32 q5[2], r2
+; CHECK-NEXT:    vmov.32 q6[2], r3
+; CHECK-NEXT:    vand q5, q5, q2
+; CHECK-NEXT:    vand q6, q6, q2
+; CHECK-NEXT:    vmov r2, s20
+; CHECK-NEXT:    vmov r3, s24
+; CHECK-NEXT:    umull r2, r3, r3, r2
+; CHECK-NEXT:    vmov.32 q7[0], r2
+; CHECK-NEXT:    vmov r2, s22
+; CHECK-NEXT:    vmov.32 q7[1], r3
+; CHECK-NEXT:    vmov r3, s26
+; CHECK-NEXT:    umull r2, r3, r3, r2
+; CHECK-NEXT:    vmov.32 q7[2], r2
+; CHECK-NEXT:    vmov.32 q7[3], r3
+; CHECK-NEXT:    vand q4, q7, q4
+; CHECK-NEXT:    vmov r3, s16
+; CHECK-NEXT:    vmov r2, s17
+; CHECK-NEXT:    adds r3, r3, r4
+; CHECK-NEXT:    vmov r4, s19
+; CHECK-NEXT:    adc.w lr, r12, r2
+; CHECK-NEXT:    vmov r2, s18
+; CHECK-NEXT:    adds.w r12, r3, r2
+; CHECK-NEXT:    vmov.u16 r2, q3[4]
+; CHECK-NEXT:    vmov.32 q4[0], r2
+; CHECK-NEXT:    vmov.u16 r2, q3[5]
+; CHECK-NEXT:    vmov.32 q4[1], r2
+; CHECK-NEXT:    vmov.u16 r2, q3[6]
+; CHECK-NEXT:    vmov.32 q4[2], r2
+; CHECK-NEXT:    vmov.u16 r2, q3[7]
+; CHECK-NEXT:    vmov.32 q4[3], r2
+; CHECK-NEXT:    adc.w lr, lr, r4
+; CHECK-NEXT:    vcmp.i32 ne, q4, zr
+; CHECK-NEXT:    vmov.u16 r3, q0[4]
+; CHECK-NEXT:    vmrs r6, p0
+; CHECK-NEXT:    vmov.32 q5[0], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[5]
+; CHECK-NEXT:    vmov.32 q5[2], r3
+; CHECK-NEXT:    vand q5, q5, q2
+; CHECK-NEXT:    vmov r3, s20
+; CHECK-NEXT:    and r4, r6, #1
+; CHECK-NEXT:    rsbs r4, r4, #0
+; CHECK-NEXT:    vmov.32 q3[0], r4
+; CHECK-NEXT:    vmov.32 q3[1], r4
+; CHECK-NEXT:    ubfx r4, r6, #4, #1
+; CHECK-NEXT:    rsbs r4, r4, #0
+; CHECK-NEXT:    vmov.32 q3[2], r4
+; CHECK-NEXT:    vmov.32 q3[3], r4
+; CHECK-NEXT:    vmov.u16 r4, q1[4]
+; CHECK-NEXT:    vmov.32 q4[0], r4
+; CHECK-NEXT:    vmov.u16 r4, q1[5]
+; CHECK-NEXT:    vmov.32 q4[2], r4
+; CHECK-NEXT:    vand q4, q4, q2
+; CHECK-NEXT:    vmov r4, s16
+; CHECK-NEXT:    umull r3, r4, r3, r4
+; CHECK-NEXT:    vmov.32 q6[0], r3
+; CHECK-NEXT:    vmov r3, s18
+; CHECK-NEXT:    vmov.32 q6[1], r4
+; CHECK-NEXT:    vmov r4, s22
+; CHECK-NEXT:    umull r3, r4, r4, r3
+; CHECK-NEXT:    vmov.32 q6[2], r3
+; CHECK-NEXT:    vmov.32 q6[3], r4
+; CHECK-NEXT:    vand q3, q6, q3
+; CHECK-NEXT:    vmov r4, s12
+; CHECK-NEXT:    vmov r3, s13
+; CHECK-NEXT:    vmov r5, s14
+; CHECK-NEXT:    vmov r2, s15
+; CHECK-NEXT:    adds.w r4, r4, r12
+; CHECK-NEXT:    adc.w r3, r3, lr
+; CHECK-NEXT:    adds r4, r4, r5
+; CHECK-NEXT:    adc.w r12, r3, r2
+; CHECK-NEXT:    ubfx r2, r6, #8, #1
+; CHECK-NEXT:    rsbs r2, r2, #0
+; CHECK-NEXT:    vmov.u16 r3, q0[6]
+; CHECK-NEXT:    vmov.32 q3[0], r2
+; CHECK-NEXT:    vmov.32 q3[1], r2
+; CHECK-NEXT:    ubfx r2, r6, #12, #1
+; CHECK-NEXT:    rsbs r2, r2, #0
+; CHECK-NEXT:    vmov.32 q3[2], r2
+; CHECK-NEXT:    vmov.32 q3[3], r2
+; CHECK-NEXT:    vmov.u16 r2, q1[6]
+; CHECK-NEXT:    vmov.32 q4[0], r2
+; CHECK-NEXT:    vmov.u16 r2, q1[7]
+; CHECK-NEXT:    vmov.32 q4[2], r2
+; CHECK-NEXT:    vand q1, q4, q2
+; CHECK-NEXT:    vmov.32 q4[0], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[7]
+; CHECK-NEXT:    vmov r2, s4
+; CHECK-NEXT:    vmov.32 q4[2], r3
+; CHECK-NEXT:    vand q0, q4, q2
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    umull r2, r3, r3, r2
+; CHECK-NEXT:    vmov.32 q2[0], r2
+; CHECK-NEXT:    vmov r2, s6
+; CHECK-NEXT:    vmov.32 q2[1], r3
+; CHECK-NEXT:    vmov r3, s2
+; CHECK-NEXT:    umull r2, r3, r3, r2
+; CHECK-NEXT:    vmov.32 q2[2], r2
+; CHECK-NEXT:    vmov.32 q2[3], r3
+; CHECK-NEXT:    vand q0, q2, q3
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov r2, s1
+; CHECK-NEXT:    vmov r5, s2
+; CHECK-NEXT:    vmov r6, s3
+; CHECK-NEXT:    adds r3, r3, r4
+; CHECK-NEXT:    adc.w r2, r2, r12
+; CHECK-NEXT:    adds r3, r3, r5
+; CHECK-NEXT:    adcs r2, r6
+; CHECK-NEXT:    adds r0, r0, r3
+; CHECK-NEXT:    adcs r1, r2
+; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    pop {r4, r5, r6, pc}
+entry:
+  %c = icmp eq <8 x i16> %b, zeroinitializer
+  %xx = zext <8 x i16> %x to <8 x i64>
+  %yy = zext <8 x i16> %y to <8 x i64>
+  %m = mul <8 x i64> %xx, %yy
+  %s = select <8 x i1> %c, <8 x i64> %m, <8 x i64> zeroinitializer
+  %z = call i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64> %s)
+  %r = add i64 %z, %a
+  ret i64 %r
+}
+
+define arm_aapcs_vfpcc i64 @add_v8i16_v8i64_acc_sext(<8 x i16> %x, <8 x i16> %y, <8 x i16> %b, i64 %a) {
+; CHECK-LABEL: add_v8i16_v8i64_acc_sext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r4, r5, r7, lr}
+; CHECK-NEXT:    push {r4, r5, r7, lr}
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    vmov.i8 q3, #0x0
+; CHECK-NEXT:    vmov.i8 q4, #0xff
+; CHECK-NEXT:    vcmp.i16 eq, q2, zr
+; CHECK-NEXT:    vpsel q2, q4, q3
+; CHECK-NEXT:    vmov.u16 r2, q2[0]
+; CHECK-NEXT:    vmov.32 q3[0], r2
+; CHECK-NEXT:    vmov.u16 r2, q2[1]
+; CHECK-NEXT:    vmov.32 q3[1], r2
+; CHECK-NEXT:    vmov.u16 r2, q2[2]
+; CHECK-NEXT:    vmov.32 q3[2], r2
+; CHECK-NEXT:    vmov.u16 r2, q2[3]
+; CHECK-NEXT:    vmov.32 q3[3], r2
+; CHECK-NEXT:    vcmp.i32 ne, q3, zr
+; CHECK-NEXT:    vmrs r2, p0
+; CHECK-NEXT:    and r3, r2, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    vmov.32 q3[0], r3
+; CHECK-NEXT:    vmov.32 q3[1], r3
+; CHECK-NEXT:    ubfx r3, r2, #4, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    vmov.32 q3[2], r3
+; CHECK-NEXT:    vmov.32 q3[3], r3
+; CHECK-NEXT:    vmov.u16 r3, q1[0]
+; CHECK-NEXT:    sxth.w r12, r3
+; CHECK-NEXT:    vmov.u16 r3, q0[0]
+; CHECK-NEXT:    sxth r3, r3
+; CHECK-NEXT:    smull r3, r12, r3, r12
+; CHECK-NEXT:    vmov.32 q4[0], r3
+; CHECK-NEXT:    vmov.u16 r3, q1[1]
+; CHECK-NEXT:    vmov.32 q4[1], r12
+; CHECK-NEXT:    sxth.w r12, r3
+; CHECK-NEXT:    vmov.u16 r3, q0[1]
+; CHECK-NEXT:    sxth r3, r3
+; CHECK-NEXT:    smull r3, r12, r3, r12
+; CHECK-NEXT:    vmov.32 q4[2], r3
+; CHECK-NEXT:    vmov.32 q4[3], r12
+; CHECK-NEXT:    vand q3, q4, q3
+; CHECK-NEXT:    vmov r3, s14
+; CHECK-NEXT:    vmov r4, s12
+; CHECK-NEXT:    vmov r12, s15
+; CHECK-NEXT:    vmov r5, s13
+; CHECK-NEXT:    adds.w lr, r4, r3
+; CHECK-NEXT:    ubfx r3, r2, #8, #1
+; CHECK-NEXT:    rsb.w r3, r3, #0
+; CHECK-NEXT:    ubfx r2, r2, #12, #1
+; CHECK-NEXT:    vmov.32 q3[0], r3
+; CHECK-NEXT:    rsb.w r2, r2, #0
+; CHECK-NEXT:    vmov.32 q3[1], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[2]
+; CHECK-NEXT:    vmov.32 q3[2], r2
+; CHECK-NEXT:    sxth r3, r3
+; CHECK-NEXT:    vmov.32 q3[3], r2
+; CHECK-NEXT:    vmov.u16 r2, q1[2]
+; CHECK-NEXT:    sxth r2, r2
+; CHECK-NEXT:    adc.w r12, r12, r5
+; CHECK-NEXT:    smull r2, r3, r3, r2
+; CHECK-NEXT:    vmov.32 q4[0], r2
+; CHECK-NEXT:    vmov.u16 r2, q1[3]
+; CHECK-NEXT:    vmov.32 q4[1], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[3]
+; CHECK-NEXT:    sxth r2, r2
+; CHECK-NEXT:    sxth r3, r3
+; CHECK-NEXT:    smull r2, r3, r3, r2
+; CHECK-NEXT:    vmov.32 q4[2], r2
+; CHECK-NEXT:    vmov.32 q4[3], r3
+; CHECK-NEXT:    vand q3, q4, q3
+; CHECK-NEXT:    vmov r3, s12
+; CHECK-NEXT:    vmov r2, s13
+; CHECK-NEXT:    vmov r4, s14
+; CHECK-NEXT:    vmov r5, s15
+; CHECK-NEXT:    adds.w r3, r3, lr
+; CHECK-NEXT:    adc.w r2, r2, r12
+; CHECK-NEXT:    adds.w r12, r3, r4
+; CHECK-NEXT:    vmov.u16 r4, q0[4]
+; CHECK-NEXT:    adc.w r3, r2, r5
+; CHECK-NEXT:    vmov.u16 r2, q2[4]
+; CHECK-NEXT:    vmov.32 q3[0], r2
+; CHECK-NEXT:    vmov.u16 r2, q2[5]
+; CHECK-NEXT:    vmov.32 q3[1], r2
+; CHECK-NEXT:    vmov.u16 r2, q2[6]
+; CHECK-NEXT:    vmov.32 q3[2], r2
+; CHECK-NEXT:    vmov.u16 r2, q2[7]
+; CHECK-NEXT:    vmov.32 q3[3], r2
+; CHECK-NEXT:    sxth r4, r4
+; CHECK-NEXT:    vcmp.i32 ne, q3, zr
+; CHECK-NEXT:    vmrs r2, p0
+; CHECK-NEXT:    and r5, r2, #1
+; CHECK-NEXT:    rsbs r5, r5, #0
+; CHECK-NEXT:    vmov.32 q2[0], r5
+; CHECK-NEXT:    vmov.32 q2[1], r5
+; CHECK-NEXT:    ubfx r5, r2, #4, #1
+; CHECK-NEXT:    rsbs r5, r5, #0
+; CHECK-NEXT:    vmov.32 q2[2], r5
+; CHECK-NEXT:    vmov.32 q2[3], r5
+; CHECK-NEXT:    vmov.u16 r5, q1[4]
+; CHECK-NEXT:    sxth r5, r5
+; CHECK-NEXT:    smull r5, r4, r4, r5
+; CHECK-NEXT:    vmov.32 q3[0], r5
+; CHECK-NEXT:    vmov.u16 r5, q1[5]
+; CHECK-NEXT:    vmov.32 q3[1], r4
+; CHECK-NEXT:    vmov.u16 r4, q0[5]
+; CHECK-NEXT:    sxth r5, r5
+; CHECK-NEXT:    sxth r4, r4
+; CHECK-NEXT:    smull r5, r4, r4, r5
+; CHECK-NEXT:    vmov.32 q3[2], r5
+; CHECK-NEXT:    vmov.32 q3[3], r4
+; CHECK-NEXT:    vand q2, q3, q2
+; CHECK-NEXT:    vmov r4, s8
+; CHECK-NEXT:    vmov r5, s9
+; CHECK-NEXT:    adds.w r12, r12, r4
+; CHECK-NEXT:    vmov r4, s10
+; CHECK-NEXT:    adcs r5, r3
+; CHECK-NEXT:    vmov r3, s11
+; CHECK-NEXT:    adds.w r4, r4, r12
+; CHECK-NEXT:    adc.w r12, r5, r3
+; CHECK-NEXT:    ubfx r3, r2, #8, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    ubfx r2, r2, #12, #1
+; CHECK-NEXT:    vmov.32 q2[0], r3
+; CHECK-NEXT:    rsbs r2, r2, #0
+; CHECK-NEXT:    vmov.32 q2[1], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[6]
+; CHECK-NEXT:    vmov.32 q2[2], r2
+; CHECK-NEXT:    sxth r3, r3
+; CHECK-NEXT:    vmov.32 q2[3], r2
+; CHECK-NEXT:    vmov.u16 r2, q1[6]
+; CHECK-NEXT:    sxth r2, r2
+; CHECK-NEXT:    smull r2, r3, r3, r2
+; CHECK-NEXT:    vmov.32 q3[0], r2
+; CHECK-NEXT:    vmov.u16 r2, q1[7]
+; CHECK-NEXT:    vmov.32 q3[1], r3
+; CHECK-NEXT:    vmov.u16 r3, q0[7]
+; CHECK-NEXT:    sxth r2, r2
+; CHECK-NEXT:    sxth r3, r3
+; CHECK-NEXT:    smull r2, r3, r3, r2
+; CHECK-NEXT:    vmov.32 q3[2], r2
+; CHECK-NEXT:    vmov.32 q3[3], r3
+; CHECK-NEXT:    vand q0, q3, q2
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov r2, s1
+; CHECK-NEXT:    vmov r5, s3
+; CHECK-NEXT:    adds r3, r3, r4
+; CHECK-NEXT:    vmov r4, s2
+; CHECK-NEXT:    adc.w r2, r2, r12
+; CHECK-NEXT:    adds r3, r3, r4
+; CHECK-NEXT:    adcs r2, r5
+; CHECK-NEXT:    adds r0, r0, r3
+; CHECK-NEXT:    adcs r1, r2
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    pop {r4, r5, r7, pc}
+entry:
+  %c = icmp eq <8 x i16> %b, zeroinitializer
+  %xx = sext <8 x i16> %x to <8 x i64>
+  %yy = sext <8 x i16> %y to <8 x i64>
+  %m = mul <8 x i64> %xx, %yy
+  %s = select <8 x i1> %c, <8 x i64> %m, <8 x i64> zeroinitializer
+  %z = call i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64> %s)
+  %r = add i64 %z, %a
+  ret i64 %r
+}
+
+define arm_aapcs_vfpcc i64 @add_v2i16_v2i64_acc_zext(<2 x i16> %x, <2 x i16> %y, <2 x i16> %b, i64 %a) {
+; CHECK-LABEL: add_v2i16_v2i64_acc_zext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    vmov.i64 q3, #0xffff
+; CHECK-NEXT:    vand q1, q1, q3
+; CHECK-NEXT:    vand q4, q0, q3
+; CHECK-NEXT:    vmov r2, s4
+; CHECK-NEXT:    vmov r3, s16
+; CHECK-NEXT:    umull r2, r3, r3, r2
+; CHECK-NEXT:    vmov.32 q0[0], r2
+; CHECK-NEXT:    vmov r2, s6
+; CHECK-NEXT:    vmov.32 q0[1], r3
+; CHECK-NEXT:    vmov r3, s18
+; CHECK-NEXT:    vand q1, q2, q3
+; CHECK-NEXT:    umull r2, r3, r3, r2
+; CHECK-NEXT:    vmov.32 q0[2], r2
+; CHECK-NEXT:    vmov r2, s4
+; CHECK-NEXT:    vmov.32 q0[3], r3
+; CHECK-NEXT:    cmp r2, #0
+; CHECK-NEXT:    cset r2, eq
+; CHECK-NEXT:    tst.w r2, #1
+; CHECK-NEXT:    csetm r2, ne
+; CHECK-NEXT:    vmov.32 q2[0], r2
+; CHECK-NEXT:    vmov.32 q2[1], r2
+; CHECK-NEXT:    vmov r2, s6
+; CHECK-NEXT:    cmp r2, #0
+; CHECK-NEXT:    cset r2, eq
+; CHECK-NEXT:    tst.w r2, #1
+; CHECK-NEXT:    csetm r2, ne
+; CHECK-NEXT:    vmov.32 q2[2], r2
+; CHECK-NEXT:    vmov.32 q2[3], r2
+; CHECK-NEXT:    vand q0, q0, q2
+; CHECK-NEXT:    vmov r2, s2
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov r12, s3
+; CHECK-NEXT:    vmov lr, s1
+; CHECK-NEXT:    adds r2, r2, r3
+; CHECK-NEXT:    adc.w r3, lr, r12
+; CHECK-NEXT:    adds r0, r0, r2
+; CHECK-NEXT:    adcs r1, r3
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    pop {r7, pc}
+entry:
+  %c = icmp eq <2 x i16> %b, zeroinitializer
+  %xx = zext <2 x i16> %x to <2 x i64>
+  %yy = zext <2 x i16> %y to <2 x i64>
+  %m = mul <2 x i64> %xx, %yy
+  %s = select <2 x i1> %c, <2 x i64> %m, <2 x i64> zeroinitializer
+  %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %s)
+  %r = add i64 %z, %a
+  ret i64 %r
+}
+
+define arm_aapcs_vfpcc i64 @add_v2i16_v2i64_acc_sext(<2 x i16> %x, <2 x i16> %y, <2 x i16> %b, i64 %a) {
+; CHECK-LABEL: add_v2i16_v2i64_acc_sext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    vmov.i32 q3, #0xffff
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vand q3, q2, q3
+; CHECK-NEXT:    vmov r2, s12
+; CHECK-NEXT:    sxth r3, r3
+; CHECK-NEXT:    cmp r2, #0
+; CHECK-NEXT:    cset r2, eq
+; CHECK-NEXT:    tst.w r2, #1
+; CHECK-NEXT:    csetm r2, ne
+; CHECK-NEXT:    vmov.32 q2[0], r2
+; CHECK-NEXT:    vmov.32 q2[1], r2
+; CHECK-NEXT:    vmov r2, s14
+; CHECK-NEXT:    cmp r2, #0
+; CHECK-NEXT:    cset r2, eq
+; CHECK-NEXT:    tst.w r2, #1
+; CHECK-NEXT:    csetm r2, ne
+; CHECK-NEXT:    vmov.32 q2[2], r2
+; CHECK-NEXT:    vmov.32 q2[3], r2
+; CHECK-NEXT:    vmov r2, s4
+; CHECK-NEXT:    sxth r2, r2
+; CHECK-NEXT:    smull r2, r3, r3, r2
+; CHECK-NEXT:    vmov.32 q3[0], r2
+; CHECK-NEXT:    vmov r2, s6
+; CHECK-NEXT:    vmov.32 q3[1], r3
+; CHECK-NEXT:    vmov r3, s2
+; CHECK-NEXT:    sxth r2, r2
+; CHECK-NEXT:    sxth r3, r3
+; CHECK-NEXT:    smull r2, r3, r3, r2
+; CHECK-NEXT:    vmov.32 q3[2], r2
+; CHECK-NEXT:    vmov.32 q3[3], r3
+; CHECK-NEXT:    vand q0, q3, q2
+; CHECK-NEXT:    vmov r2, s2
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov r12, s3
+; CHECK-NEXT:    vmov lr, s1
+; CHECK-NEXT:    adds r2, r2, r3
+; CHECK-NEXT:    adc.w r3, lr, r12
+; CHECK-NEXT:    adds r0, r0, r2
+; CHECK-NEXT:    adcs r1, r3
+; CHECK-NEXT:    pop {r7, pc}
+entry:
+  %c = icmp eq <2 x i16> %b, zeroinitializer
+  %xx = sext <2 x i16> %x to <2 x i64>
+  %yy = sext <2 x i16> %y to <2 x i64>
+  %m = mul <2 x i64> %xx, %yy
+  %s = select <2 x i1> %c, <2 x i64> %m, <2 x i64> zeroinitializer
+  %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %s)
+  %r = add i64 %z, %a
+  ret i64 %r
+}
+
+define arm_aapcs_vfpcc i32 @add_v16i8_v16i32_acc_zext(<16 x i8> %x, <16 x i8> %y, <16 x i8> %b, i32 %a) {
+; CHECK-LABEL: add_v16i8_v16i32_acc_zext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    .pad #48
+; CHECK-NEXT:    sub sp, #48
+; CHECK-NEXT:    vmov q3, q0
+; CHECK-NEXT:    vmov.i8 q0, #0x0
+; CHECK-NEXT:    vcmp.i8 eq, q2, zr
+; CHECK-NEXT:    vmov.i8 q5, #0xff
+; CHECK-NEXT:    vpsel q7, q5, q0
+; CHECK-NEXT:    vstrw.32 q0, [sp, #32] @ 16-byte Spill
+; CHECK-NEXT:    vmov.u8 r1, q7[8]
+; CHECK-NEXT:    vmov.i32 q6, #0xff
+; CHECK-NEXT:    vmov.16 q2[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q7[9]
+; CHECK-NEXT:    vmov.16 q2[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q7[10]
+; CHECK-NEXT:    vmov.16 q2[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q7[11]
+; CHECK-NEXT:    vmov.16 q2[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q7[12]
+; CHECK-NEXT:    vmov.16 q2[4], r1
+; CHECK-NEXT:    vmov.u8 r1, q7[13]
+; CHECK-NEXT:    vmov.16 q2[5], r1
+; CHECK-NEXT:    vmov.u8 r1, q7[14]
+; CHECK-NEXT:    vmov.16 q2[6], r1
+; CHECK-NEXT:    vmov.u8 r1, q7[15]
+; CHECK-NEXT:    vmov.16 q2[7], r1
+; CHECK-NEXT:    vcmp.i16 ne, q2, zr
+; CHECK-NEXT:    vpsel q4, q5, q0
+; CHECK-NEXT:    vmov.u16 r1, q4[4]
+; CHECK-NEXT:    vmov.32 q0[0], r1
+; CHECK-NEXT:    vmov.u16 r1, q4[5]
+; CHECK-NEXT:    vmov.32 q0[1], r1
+; CHECK-NEXT:    vmov.u16 r1, q4[6]
+; CHECK-NEXT:    vmov.32 q0[2], r1
+; CHECK-NEXT:    vmov.u16 r1, q4[7]
+; CHECK-NEXT:    vmov.32 q0[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[12]
+; CHECK-NEXT:    vcmp.i32 ne, q0, zr
+; CHECK-NEXT:    vmov.32 q0[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[13]
+; CHECK-NEXT:    vmov.32 q0[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[14]
+; CHECK-NEXT:    vmov.32 q0[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[15]
+; CHECK-NEXT:    vmov.32 q0[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q3[12]
+; CHECK-NEXT:    vand q2, q0, q6
+; CHECK-NEXT:    vmov.32 q0[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q3[13]
+; CHECK-NEXT:    vmov.32 q0[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q3[14]
+; CHECK-NEXT:    vmov.32 q0[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q3[15]
+; CHECK-NEXT:    vmov.32 q0[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q7[0]
+; CHECK-NEXT:    vand q0, q0, q6
+; CHECK-NEXT:    vmul.i32 q0, q0, q2
+; CHECK-NEXT:    vmov.i32 q2, #0x0
+; CHECK-NEXT:    vpsel q0, q0, q2
+; CHECK-NEXT:    vstrw.32 q2, [sp] @ 16-byte Spill
+; CHECK-NEXT:    vstrw.32 q0, [sp, #16] @ 16-byte Spill
+; CHECK-NEXT:    vmov.16 q0[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q7[1]
+; CHECK-NEXT:    vmov.16 q0[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q7[2]
+; CHECK-NEXT:    vmov.16 q0[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q7[3]
+; CHECK-NEXT:    vmov.16 q0[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q7[4]
+; CHECK-NEXT:    vmov.16 q0[4], r1
+; CHECK-NEXT:    vmov.u8 r1, q7[5]
+; CHECK-NEXT:    vmov.16 q0[5], r1
+; CHECK-NEXT:    vmov.u8 r1, q7[6]
+; CHECK-NEXT:    vmov.16 q0[6], r1
+; CHECK-NEXT:    vmov.u8 r1, q7[7]
+; CHECK-NEXT:    vmov.16 q0[7], r1
+; CHECK-NEXT:    vldrw.u32 q7, [sp] @ 16-byte Reload
+; CHECK-NEXT:    vcmp.i16 ne, q0, zr
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #32] @ 16-byte Reload
+; CHECK-NEXT:    vpsel q5, q5, q0
+; CHECK-NEXT:    vmov.u16 r1, q5[4]
+; CHECK-NEXT:    vmov.32 q0[0], r1
+; CHECK-NEXT:    vmov.u16 r1, q5[5]
+; CHECK-NEXT:    vmov.32 q0[1], r1
+; CHECK-NEXT:    vmov.u16 r1, q5[6]
+; CHECK-NEXT:    vmov.32 q0[2], r1
+; CHECK-NEXT:    vmov.u16 r1, q5[7]
+; CHECK-NEXT:    vmov.32 q0[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[4]
+; CHECK-NEXT:    vcmp.i32 ne, q0, zr
+; CHECK-NEXT:    vmov.32 q0[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[5]
+; CHECK-NEXT:    vmov.32 q0[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[6]
+; CHECK-NEXT:    vmov.32 q0[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[7]
+; CHECK-NEXT:    vmov.32 q0[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q3[4]
+; CHECK-NEXT:    vmov.32 q2[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q3[5]
+; CHECK-NEXT:    vmov.32 q2[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q3[6]
+; CHECK-NEXT:    vmov.32 q2[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q3[7]
+; CHECK-NEXT:    vmov.32 q2[3], r1
+; CHECK-NEXT:    vand q0, q0, q6
+; CHECK-NEXT:    vand q2, q2, q6
+; CHECK-NEXT:    vmov.u16 r1, q4[0]
+; CHECK-NEXT:    vmul.i32 q0, q2, q0
+; CHECK-NEXT:    vldrw.u32 q2, [sp, #16] @ 16-byte Reload
+; CHECK-NEXT:    vpsel q0, q0, q7
+; CHECK-NEXT:    vadd.i32 q0, q0, q2
+; CHECK-NEXT:    vmov.32 q2[0], r1
+; CHECK-NEXT:    vmov.u16 r1, q4[1]
+; CHECK-NEXT:    vmov.32 q2[1], r1
+; CHECK-NEXT:    vmov.u16 r1, q4[2]
+; CHECK-NEXT:    vmov.32 q2[2], r1
+; CHECK-NEXT:    vmov.u16 r1, q4[3]
+; CHECK-NEXT:    vmov.32 q2[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[8]
+; CHECK-NEXT:    vcmp.i32 ne, q2, zr
+; CHECK-NEXT:    vmov.32 q2[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[9]
+; CHECK-NEXT:    vmov.32 q2[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[10]
+; CHECK-NEXT:    vmov.32 q2[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[11]
+; CHECK-NEXT:    vmov.32 q2[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q3[8]
+; CHECK-NEXT:    vmov.32 q4[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q3[9]
+; CHECK-NEXT:    vmov.32 q4[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q3[10]
+; CHECK-NEXT:    vmov.32 q4[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q3[11]
+; CHECK-NEXT:    vmov.32 q4[3], r1
+; CHECK-NEXT:    vand q2, q2, q6
+; CHECK-NEXT:    vand q4, q4, q6
+; CHECK-NEXT:    vmov.u16 r1, q5[0]
+; CHECK-NEXT:    vmul.i32 q2, q4, q2
+; CHECK-NEXT:    vpsel q4, q2, q7
+; CHECK-NEXT:    vmov.32 q2[0], r1
+; CHECK-NEXT:    vmov.u16 r1, q5[1]
+; CHECK-NEXT:    vmov.32 q2[1], r1
+; CHECK-NEXT:    vmov.u16 r1, q5[2]
+; CHECK-NEXT:    vmov.32 q2[2], r1
+; CHECK-NEXT:    vmov.u16 r1, q5[3]
+; CHECK-NEXT:    vmov.32 q2[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[0]
+; CHECK-NEXT:    vcmp.i32 ne, q2, zr
+; CHECK-NEXT:    vmov.32 q2[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[1]
+; CHECK-NEXT:    vmov.32 q2[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[2]
+; CHECK-NEXT:    vmov.32 q2[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[3]
+; CHECK-NEXT:    vmov.32 q2[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q3[0]
+; CHECK-NEXT:    vand q1, q2, q6
+; CHECK-NEXT:    vmov.32 q2[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q3[1]
+; CHECK-NEXT:    vmov.32 q2[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q3[2]
+; CHECK-NEXT:    vmov.32 q2[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q3[3]
+; CHECK-NEXT:    vmov.32 q2[3], r1
+; CHECK-NEXT:    vand q2, q2, q6
+; CHECK-NEXT:    vmul.i32 q1, q2, q1
+; CHECK-NEXT:    vpsel q1, q1, q7
+; CHECK-NEXT:    vadd.i32 q1, q1, q4
+; CHECK-NEXT:    vadd.i32 q0, q1, q0
+; CHECK-NEXT:    vaddva.u32 r0, q0
+; CHECK-NEXT:    add sp, #48
+; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <16 x i8> %b, zeroinitializer
+  %xx = zext <16 x i8> %x to <16 x i32>
+  %yy = zext <16 x i8> %y to <16 x i32>
+  %m = mul <16 x i32> %xx, %yy
+  %s = select <16 x i1> %c, <16 x i32> %m, <16 x i32> zeroinitializer
+  %z = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %s)
+  %r = add i32 %z, %a
+  ret i32 %r
+}
+
+define arm_aapcs_vfpcc i32 @add_v16i8_v16i32_acc_sext(<16 x i8> %x, <16 x i8> %y, <16 x i8> %b, i32 %a) {
+; CHECK-LABEL: add_v16i8_v16i32_acc_sext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    .pad #16
+; CHECK-NEXT:    sub sp, #16
+; CHECK-NEXT:    vcmp.i8 eq, q2, zr
+; CHECK-NEXT:    vmov.i8 q4, #0x0
+; CHECK-NEXT:    vmov.i8 q5, #0xff
+; CHECK-NEXT:    vmov q3, q0
+; CHECK-NEXT:    vpsel q6, q5, q4
+; CHECK-NEXT:    vmov.u8 r1, q6[8]
+; CHECK-NEXT:    vmov.16 q2[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q6[9]
+; CHECK-NEXT:    vmov.16 q2[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q6[10]
+; CHECK-NEXT:    vmov.16 q2[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q6[11]
+; CHECK-NEXT:    vmov.16 q2[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q6[12]
+; CHECK-NEXT:    vmov.16 q2[4], r1
+; CHECK-NEXT:    vmov.u8 r1, q6[13]
+; CHECK-NEXT:    vmov.16 q2[5], r1
+; CHECK-NEXT:    vmov.u8 r1, q6[14]
+; CHECK-NEXT:    vmov.16 q2[6], r1
+; CHECK-NEXT:    vmov.u8 r1, q6[15]
+; CHECK-NEXT:    vmov.16 q2[7], r1
+; CHECK-NEXT:    vcmp.i16 ne, q2, zr
+; CHECK-NEXT:    vpsel q2, q5, q4
+; CHECK-NEXT:    vmov.u16 r1, q2[4]
+; CHECK-NEXT:    vmov.32 q0[0], r1
+; CHECK-NEXT:    vmov.u16 r1, q2[5]
+; CHECK-NEXT:    vmov.32 q0[1], r1
+; CHECK-NEXT:    vmov.u16 r1, q2[6]
+; CHECK-NEXT:    vmov.32 q0[2], r1
+; CHECK-NEXT:    vmov.u16 r1, q2[7]
+; CHECK-NEXT:    vmov.32 q0[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[12]
+; CHECK-NEXT:    vcmp.i32 ne, q0, zr
+; CHECK-NEXT:    vmov.32 q0[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[13]
+; CHECK-NEXT:    vmov.32 q0[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[14]
+; CHECK-NEXT:    vmov.32 q0[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[15]
+; CHECK-NEXT:    vmov.32 q0[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q3[12]
+; CHECK-NEXT:    vmov.32 q7[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q3[13]
+; CHECK-NEXT:    vmov.32 q7[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q3[14]
+; CHECK-NEXT:    vmov.32 q7[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q3[15]
+; CHECK-NEXT:    vmov.32 q7[3], r1
+; CHECK-NEXT:    vmovlb.s8 q0, q0
+; CHECK-NEXT:    vmovlb.s8 q7, q7
+; CHECK-NEXT:    vmovlb.s16 q0, q0
+; CHECK-NEXT:    vmovlb.s16 q7, q7
+; CHECK-NEXT:    vmov.u8 r1, q6[0]
+; CHECK-NEXT:    vmul.i32 q0, q7, q0
+; CHECK-NEXT:    vmov.i32 q7, #0x0
+; CHECK-NEXT:    vpsel q0, q0, q7
+; CHECK-NEXT:    vstrw.32 q0, [sp] @ 16-byte Spill
+; CHECK-NEXT:    vmov.16 q0[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q6[1]
+; CHECK-NEXT:    vmov.16 q0[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q6[2]
+; CHECK-NEXT:    vmov.16 q0[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q6[3]
+; CHECK-NEXT:    vmov.16 q0[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q6[4]
+; CHECK-NEXT:    vmov.16 q0[4], r1
+; CHECK-NEXT:    vmov.u8 r1, q6[5]
+; CHECK-NEXT:    vmov.16 q0[5], r1
+; CHECK-NEXT:    vmov.u8 r1, q6[6]
+; CHECK-NEXT:    vmov.16 q0[6], r1
+; CHECK-NEXT:    vmov.u8 r1, q6[7]
+; CHECK-NEXT:    vmov.16 q0[7], r1
+; CHECK-NEXT:    vcmp.i16 ne, q0, zr
+; CHECK-NEXT:    vpsel q4, q5, q4
+; CHECK-NEXT:    vmov.u16 r1, q4[4]
+; CHECK-NEXT:    vmov.32 q0[0], r1
+; CHECK-NEXT:    vmov.u16 r1, q4[5]
+; CHECK-NEXT:    vmov.32 q0[1], r1
+; CHECK-NEXT:    vmov.u16 r1, q4[6]
+; CHECK-NEXT:    vmov.32 q0[2], r1
+; CHECK-NEXT:    vmov.u16 r1, q4[7]
+; CHECK-NEXT:    vmov.32 q0[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[4]
+; CHECK-NEXT:    vcmp.i32 ne, q0, zr
+; CHECK-NEXT:    vmov.32 q0[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[5]
+; CHECK-NEXT:    vmov.32 q0[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[6]
+; CHECK-NEXT:    vmov.32 q0[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[7]
+; CHECK-NEXT:    vmov.32 q0[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q3[4]
+; CHECK-NEXT:    vmov.32 q5[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q3[5]
+; CHECK-NEXT:    vmov.32 q5[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q3[6]
+; CHECK-NEXT:    vmov.32 q5[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q3[7]
+; CHECK-NEXT:    vmov.32 q5[3], r1
+; CHECK-NEXT:    vmovlb.s8 q0, q0
+; CHECK-NEXT:    vmovlb.s8 q5, q5
+; CHECK-NEXT:    vmovlb.s16 q0, q0
+; CHECK-NEXT:    vmovlb.s16 q5, q5
+; CHECK-NEXT:    vmov.u16 r1, q2[0]
+; CHECK-NEXT:    vmul.i32 q0, q5, q0
+; CHECK-NEXT:    vldrw.u32 q5, [sp] @ 16-byte Reload
+; CHECK-NEXT:    vpsel q0, q0, q7
+; CHECK-NEXT:    vadd.i32 q5, q0, q5
+; CHECK-NEXT:    vmov.32 q0[0], r1
+; CHECK-NEXT:    vmov.u16 r1, q2[1]
+; CHECK-NEXT:    vmov.32 q0[1], r1
+; CHECK-NEXT:    vmov.u16 r1, q2[2]
+; CHECK-NEXT:    vmov.32 q0[2], r1
+; CHECK-NEXT:    vmov.u16 r1, q2[3]
+; CHECK-NEXT:    vmov.32 q0[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[8]
+; CHECK-NEXT:    vcmp.i32 ne, q0, zr
+; CHECK-NEXT:    vmov.32 q0[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[9]
+; CHECK-NEXT:    vmov.32 q0[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[10]
+; CHECK-NEXT:    vmov.32 q0[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[11]
+; CHECK-NEXT:    vmov.32 q0[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q3[8]
+; CHECK-NEXT:    vmov.32 q2[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q3[9]
+; CHECK-NEXT:    vmov.32 q2[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q3[10]
+; CHECK-NEXT:    vmov.32 q2[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q3[11]
+; CHECK-NEXT:    vmov.32 q2[3], r1
+; CHECK-NEXT:    vmovlb.s8 q0, q0
+; CHECK-NEXT:    vmovlb.s8 q2, q2
+; CHECK-NEXT:    vmovlb.s16 q0, q0
+; CHECK-NEXT:    vmovlb.s16 q2, q2
+; CHECK-NEXT:    vmov.u16 r1, q4[0]
+; CHECK-NEXT:    vmul.i32 q0, q2, q0
+; CHECK-NEXT:    vmov.32 q2[0], r1
+; CHECK-NEXT:    vmov.u16 r1, q4[1]
+; CHECK-NEXT:    vpsel q0, q0, q7
+; CHECK-NEXT:    vmov.32 q2[1], r1
+; CHECK-NEXT:    vmov.u16 r1, q4[2]
+; CHECK-NEXT:    vmov.32 q2[2], r1
+; CHECK-NEXT:    vmov.u16 r1, q4[3]
+; CHECK-NEXT:    vmov.32 q2[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[0]
+; CHECK-NEXT:    vcmp.i32 ne, q2, zr
+; CHECK-NEXT:    vmov.32 q2[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[1]
+; CHECK-NEXT:    vmov.32 q2[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[2]
+; CHECK-NEXT:    vmov.32 q2[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[3]
+; CHECK-NEXT:    vmov.32 q2[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q3[0]
+; CHECK-NEXT:    vmovlb.s8 q1, q2
+; CHECK-NEXT:    vmov.32 q2[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q3[1]
+; CHECK-NEXT:    vmovlb.s16 q1, q1
+; CHECK-NEXT:    vmov.32 q2[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q3[2]
+; CHECK-NEXT:    vmov.32 q2[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q3[3]
+; CHECK-NEXT:    vmov.32 q2[3], r1
+; CHECK-NEXT:    vmovlb.s8 q2, q2
+; CHECK-NEXT:    vmovlb.s16 q2, q2
+; CHECK-NEXT:    vmul.i32 q1, q2, q1
+; CHECK-NEXT:    vpsel q1, q1, q7
+; CHECK-NEXT:    vadd.i32 q0, q1, q0
+; CHECK-NEXT:    vadd.i32 q0, q0, q5
+; CHECK-NEXT:    vaddva.u32 r0, q0
+; CHECK-NEXT:    add sp, #16
+; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <16 x i8> %b, zeroinitializer
+  %xx = sext <16 x i8> %x to <16 x i32>
+  %yy = sext <16 x i8> %y to <16 x i32>
+  %m = mul <16 x i32> %xx, %yy
+  %s = select <16 x i1> %c, <16 x i32> %m, <16 x i32> zeroinitializer
+  %z = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %s)
+  %r = add i32 %z, %a
+  ret i32 %r
+}
+
+define arm_aapcs_vfpcc i32 @add_v4i8_v4i32_acc_zext(<4 x i8> %x, <4 x i8> %y, <4 x i8> %b, i32 %a) {
+; CHECK-LABEL: add_v4i8_v4i32_acc_zext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.i32 q3, #0xff
+; CHECK-NEXT:    vand q1, q1, q3
+; CHECK-NEXT:    vand q0, q0, q3
+; CHECK-NEXT:    vmul.i32 q0, q0, q1
+; CHECK-NEXT:    vand q1, q2, q3
+; CHECK-NEXT:    vcmp.i32 eq, q1, zr
+; CHECK-NEXT:    vmov.i32 q1, #0x0
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vaddva.u32 r0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <4 x i8> %b, zeroinitializer
+  %xx = zext <4 x i8> %x to <4 x i32>
+  %yy = zext <4 x i8> %y to <4 x i32>
+  %m = mul <4 x i32> %xx, %yy
+  %s = select <4 x i1> %c, <4 x i32> %m, <4 x i32> zeroinitializer
+  %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %s)
+  %r = add i32 %z, %a
+  ret i32 %r
+}
+
+define arm_aapcs_vfpcc i32 @add_v4i8_v4i32_acc_sext(<4 x i8> %x, <4 x i8> %y, <4 x i8> %b, i32 %a) {
+; CHECK-LABEL: add_v4i8_v4i32_acc_sext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmovlb.s8 q1, q1
+; CHECK-NEXT:    vmovlb.s8 q0, q0
+; CHECK-NEXT:    vmovlb.s16 q1, q1
+; CHECK-NEXT:    vmovlb.s16 q0, q0
+; CHECK-NEXT:    vmul.i32 q0, q0, q1
+; CHECK-NEXT:    vmov.i32 q1, #0xff
+; CHECK-NEXT:    vand q1, q2, q1
+; CHECK-NEXT:    vcmp.i32 eq, q1, zr
+; CHECK-NEXT:    vmov.i32 q1, #0x0
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vaddva.u32 r0, q0
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <4 x i8> %b, zeroinitializer
+  %xx = sext <4 x i8> %x to <4 x i32>
+  %yy = sext <4 x i8> %y to <4 x i32>
+  %m = mul <4 x i32> %xx, %yy
+  %s = select <4 x i1> %c, <4 x i32> %m, <4 x i32> zeroinitializer
+  %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %s)
+  %r = add i32 %z, %a
+  ret i32 %r
+}
+
+define arm_aapcs_vfpcc zeroext i16 @add_v16i8_v16i16_acc_zext(<16 x i8> %x, <16 x i8> %y, <16 x i8> %b, i16 %a) {
+; CHECK-LABEL: add_v16i8_v16i16_acc_zext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11}
+; CHECK-NEXT:    vcmp.i8 eq, q2, zr
+; CHECK-NEXT:    vmov.i8 q2, #0x0
+; CHECK-NEXT:    vmov.i8 q3, #0xff
+; CHECK-NEXT:    vpsel q2, q3, q2
+; CHECK-NEXT:    vmov.u8 r1, q2[8]
+; CHECK-NEXT:    vmov.16 q3[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q2[9]
+; CHECK-NEXT:    vmov.16 q3[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q2[10]
+; CHECK-NEXT:    vmov.16 q3[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q2[11]
+; CHECK-NEXT:    vmov.16 q3[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q2[12]
+; CHECK-NEXT:    vmov.16 q3[4], r1
+; CHECK-NEXT:    vmov.u8 r1, q2[13]
+; CHECK-NEXT:    vmov.16 q3[5], r1
+; CHECK-NEXT:    vmov.u8 r1, q2[14]
+; CHECK-NEXT:    vmov.16 q3[6], r1
+; CHECK-NEXT:    vmov.u8 r1, q2[15]
+; CHECK-NEXT:    vmov.16 q3[7], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[8]
+; CHECK-NEXT:    vcmp.i16 ne, q3, zr
+; CHECK-NEXT:    vmov.16 q3[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[9]
+; CHECK-NEXT:    vmov.16 q3[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[10]
+; CHECK-NEXT:    vmov.16 q3[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[11]
+; CHECK-NEXT:    vmov.16 q3[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[12]
+; CHECK-NEXT:    vmov.16 q3[4], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[13]
+; CHECK-NEXT:    vmov.16 q3[5], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[14]
+; CHECK-NEXT:    vmov.16 q3[6], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[15]
+; CHECK-NEXT:    vmov.16 q3[7], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[8]
+; CHECK-NEXT:    vmov.16 q4[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[9]
+; CHECK-NEXT:    vmov.16 q4[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[10]
+; CHECK-NEXT:    vmov.16 q4[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[11]
+; CHECK-NEXT:    vmov.16 q4[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[12]
+; CHECK-NEXT:    vmov.16 q4[4], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[13]
+; CHECK-NEXT:    vmov.16 q4[5], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[14]
+; CHECK-NEXT:    vmov.16 q4[6], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[15]
+; CHECK-NEXT:    vmov.16 q4[7], r1
+; CHECK-NEXT:    vmov.u8 r1, q2[0]
+; CHECK-NEXT:    vmov.16 q5[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q2[1]
+; CHECK-NEXT:    vmov.16 q5[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q2[2]
+; CHECK-NEXT:    vmov.16 q5[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q2[3]
+; CHECK-NEXT:    vmov.16 q5[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q2[4]
+; CHECK-NEXT:    vmov.16 q5[4], r1
+; CHECK-NEXT:    vmov.u8 r1, q2[5]
+; CHECK-NEXT:    vmov.16 q5[5], r1
+; CHECK-NEXT:    vmov.u8 r1, q2[6]
+; CHECK-NEXT:    vmov.16 q5[6], r1
+; CHECK-NEXT:    vmov.u8 r1, q2[7]
+; CHECK-NEXT:    vmov.16 q5[7], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[0]
+; CHECK-NEXT:    vmov.16 q2[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[1]
+; CHECK-NEXT:    vmov.16 q2[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[2]
+; CHECK-NEXT:    vmov.16 q2[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[3]
+; CHECK-NEXT:    vmov.16 q2[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[4]
+; CHECK-NEXT:    vmov.16 q2[4], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[5]
+; CHECK-NEXT:    vmov.16 q2[5], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[6]
+; CHECK-NEXT:    vmov.16 q2[6], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[7]
+; CHECK-NEXT:    vmov.16 q2[7], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[0]
+; CHECK-NEXT:    vmov.16 q1[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[1]
+; CHECK-NEXT:    vmov.16 q1[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[2]
+; CHECK-NEXT:    vmov.16 q1[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[3]
+; CHECK-NEXT:    vmov.16 q1[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[4]
+; CHECK-NEXT:    vmov.16 q1[4], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[5]
+; CHECK-NEXT:    vmov.16 q1[5], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[6]
+; CHECK-NEXT:    vmov.16 q1[6], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[7]
+; CHECK-NEXT:    vmullb.u8 q4, q4, q3
+; CHECK-NEXT:    vmov.i32 q3, #0x0
+; CHECK-NEXT:    vmov.16 q1[7], r1
+; CHECK-NEXT:    vpsel q4, q4, q3
+; CHECK-NEXT:    vcmp.i16 ne, q5, zr
+; CHECK-NEXT:    vmullb.u8 q0, q1, q2
+; CHECK-NEXT:    vpsel q0, q0, q3
+; CHECK-NEXT:    vadd.i16 q0, q0, q4
+; CHECK-NEXT:    vaddva.u16 r0, q0
+; CHECK-NEXT:    uxth r0, r0
+; CHECK-NEXT:    vpop {d8, d9, d10, d11}
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <16 x i8> %b, zeroinitializer
+  %xx = zext <16 x i8> %x to <16 x i16>
+  %yy = zext <16 x i8> %y to <16 x i16>
+  %m = mul <16 x i16> %xx, %yy
+  %s = select <16 x i1> %c, <16 x i16> %m, <16 x i16> zeroinitializer
+  %z = call i16 @llvm.experimental.vector.reduce.add.v16i16(<16 x i16> %s)
+  %r = add i16 %z, %a
+  ret i16 %r
+}
+
+define arm_aapcs_vfpcc signext i16 @add_v16i8_v16i16_acc_sext(<16 x i8> %x, <16 x i8> %y, <16 x i8> %b, i16 %a) {
+; CHECK-LABEL: add_v16i8_v16i16_acc_sext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11}
+; CHECK-NEXT:    vcmp.i8 eq, q2, zr
+; CHECK-NEXT:    vmov.i8 q2, #0x0
+; CHECK-NEXT:    vmov.i8 q3, #0xff
+; CHECK-NEXT:    vpsel q2, q3, q2
+; CHECK-NEXT:    vmov.u8 r1, q2[8]
+; CHECK-NEXT:    vmov.16 q3[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q2[9]
+; CHECK-NEXT:    vmov.16 q3[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q2[10]
+; CHECK-NEXT:    vmov.16 q3[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q2[11]
+; CHECK-NEXT:    vmov.16 q3[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q2[12]
+; CHECK-NEXT:    vmov.16 q3[4], r1
+; CHECK-NEXT:    vmov.u8 r1, q2[13]
+; CHECK-NEXT:    vmov.16 q3[5], r1
+; CHECK-NEXT:    vmov.u8 r1, q2[14]
+; CHECK-NEXT:    vmov.16 q3[6], r1
+; CHECK-NEXT:    vmov.u8 r1, q2[15]
+; CHECK-NEXT:    vmov.16 q3[7], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[8]
+; CHECK-NEXT:    vcmp.i16 ne, q3, zr
+; CHECK-NEXT:    vmov.16 q3[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[9]
+; CHECK-NEXT:    vmov.16 q3[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[10]
+; CHECK-NEXT:    vmov.16 q3[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[11]
+; CHECK-NEXT:    vmov.16 q3[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[12]
+; CHECK-NEXT:    vmov.16 q3[4], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[13]
+; CHECK-NEXT:    vmov.16 q3[5], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[14]
+; CHECK-NEXT:    vmov.16 q3[6], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[15]
+; CHECK-NEXT:    vmov.16 q3[7], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[8]
+; CHECK-NEXT:    vmov.16 q4[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[9]
+; CHECK-NEXT:    vmov.16 q4[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[10]
+; CHECK-NEXT:    vmov.16 q4[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[11]
+; CHECK-NEXT:    vmov.16 q4[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[12]
+; CHECK-NEXT:    vmov.16 q4[4], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[13]
+; CHECK-NEXT:    vmov.16 q4[5], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[14]
+; CHECK-NEXT:    vmov.16 q4[6], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[15]
+; CHECK-NEXT:    vmov.16 q4[7], r1
+; CHECK-NEXT:    vmov.u8 r1, q2[0]
+; CHECK-NEXT:    vmov.16 q5[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q2[1]
+; CHECK-NEXT:    vmov.16 q5[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q2[2]
+; CHECK-NEXT:    vmov.16 q5[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q2[3]
+; CHECK-NEXT:    vmov.16 q5[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q2[4]
+; CHECK-NEXT:    vmov.16 q5[4], r1
+; CHECK-NEXT:    vmov.u8 r1, q2[5]
+; CHECK-NEXT:    vmov.16 q5[5], r1
+; CHECK-NEXT:    vmov.u8 r1, q2[6]
+; CHECK-NEXT:    vmov.16 q5[6], r1
+; CHECK-NEXT:    vmov.u8 r1, q2[7]
+; CHECK-NEXT:    vmov.16 q5[7], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[0]
+; CHECK-NEXT:    vmov.16 q2[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[1]
+; CHECK-NEXT:    vmov.16 q2[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[2]
+; CHECK-NEXT:    vmov.16 q2[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[3]
+; CHECK-NEXT:    vmov.16 q2[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[4]
+; CHECK-NEXT:    vmov.16 q2[4], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[5]
+; CHECK-NEXT:    vmov.16 q2[5], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[6]
+; CHECK-NEXT:    vmov.16 q2[6], r1
+; CHECK-NEXT:    vmov.u8 r1, q1[7]
+; CHECK-NEXT:    vmov.16 q2[7], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[0]
+; CHECK-NEXT:    vmov.16 q1[0], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[1]
+; CHECK-NEXT:    vmov.16 q1[1], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[2]
+; CHECK-NEXT:    vmov.16 q1[2], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[3]
+; CHECK-NEXT:    vmov.16 q1[3], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[4]
+; CHECK-NEXT:    vmov.16 q1[4], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[5]
+; CHECK-NEXT:    vmov.16 q1[5], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[6]
+; CHECK-NEXT:    vmov.16 q1[6], r1
+; CHECK-NEXT:    vmov.u8 r1, q0[7]
+; CHECK-NEXT:    vmullb.s8 q4, q4, q3
+; CHECK-NEXT:    vmov.i32 q3, #0x0
+; CHECK-NEXT:    vmov.16 q1[7], r1
+; CHECK-NEXT:    vpsel q4, q4, q3
+; CHECK-NEXT:    vcmp.i16 ne, q5, zr
+; CHECK-NEXT:    vmullb.s8 q0, q1, q2
+; CHECK-NEXT:    vpsel q0, q0, q3
+; CHECK-NEXT:    vadd.i16 q0, q0, q4
+; CHECK-NEXT:    vaddva.u16 r0, q0
+; CHECK-NEXT:    sxth r0, r0
+; CHECK-NEXT:    vpop {d8, d9, d10, d11}
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <16 x i8> %b, zeroinitializer
+  %xx = sext <16 x i8> %x to <16 x i16>
+  %yy = sext <16 x i8> %y to <16 x i16>
+  %m = mul <16 x i16> %xx, %yy
+  %s = select <16 x i1> %c, <16 x i16> %m, <16 x i16> zeroinitializer
+  %z = call i16 @llvm.experimental.vector.reduce.add.v16i16(<16 x i16> %s)
+  %r = add i16 %z, %a
+  ret i16 %r
+}
+
+define arm_aapcs_vfpcc zeroext i16 @add_v8i8_v8i16_acc_zext(<8 x i8> %x, <8 x i8> %y, <8 x i8> %b, i16 %a) {
+; CHECK-LABEL: add_v8i8_v8i16_acc_zext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmullb.u8 q0, q0, q1
+; CHECK-NEXT:    vmovlb.u8 q1, q2
+; CHECK-NEXT:    vcmp.i16 eq, q1, zr
+; CHECK-NEXT:    vmov.i32 q1, #0x0
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vaddva.u16 r0, q0
+; CHECK-NEXT:    uxth r0, r0
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <8 x i8> %b, zeroinitializer
+  %xx = zext <8 x i8> %x to <8 x i16>
+  %yy = zext <8 x i8> %y to <8 x i16>
+  %m = mul <8 x i16> %xx, %yy
+  %s = select <8 x i1> %c, <8 x i16> %m, <8 x i16> zeroinitializer
+  %z = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %s)
+  %r = add i16 %z, %a
+  ret i16 %r
+}
+
+define arm_aapcs_vfpcc signext i16 @add_v8i8_v8i16_acc_sext(<8 x i8> %x, <8 x i8> %y, <8 x i8> %b, i16 %a) {
+; CHECK-LABEL: add_v8i8_v8i16_acc_sext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmullb.s8 q0, q0, q1
+; CHECK-NEXT:    vmovlb.u8 q1, q2
+; CHECK-NEXT:    vcmp.i16 eq, q1, zr
+; CHECK-NEXT:    vmov.i32 q1, #0x0
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vaddva.u16 r0, q0
+; CHECK-NEXT:    sxth r0, r0
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <8 x i8> %b, zeroinitializer
+  %xx = sext <8 x i8> %x to <8 x i16>
+  %yy = sext <8 x i8> %y to <8 x i16>
+  %m = mul <8 x i16> %xx, %yy
+  %s = select <8 x i1> %c, <8 x i16> %m, <8 x i16> zeroinitializer
+  %z = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %s)
+  %r = add i16 %z, %a
+  ret i16 %r
+}
+
+define arm_aapcs_vfpcc zeroext i8 @add_v16i8_v16i8_acc(<16 x i8> %x, <16 x i8> %y, <16 x i8> %b, i8 %a) {
+; CHECK-LABEL: add_v16i8_v16i8_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmul.i8 q0, q0, q1
+; CHECK-NEXT:    vcmp.i8 eq, q2, zr
+; CHECK-NEXT:    vmov.i32 q1, #0x0
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    vaddva.u8 r0, q0
+; CHECK-NEXT:    uxtb r0, r0
+; CHECK-NEXT:    bx lr
+entry:
+  %c = icmp eq <16 x i8> %b, zeroinitializer
+  %m = mul <16 x i8> %x, %y
+  %s = select <16 x i1> %c, <16 x i8> %m, <16 x i8> zeroinitializer
+  %z = call i8 @llvm.experimental.vector.reduce.add.v16i8(<16 x i8> %s)
+  %r = add i8 %z, %a
+  ret i8 %r
+}
+
+define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_acc_zext(<16 x i8> %x, <16 x i8> %y, <16 x i8> %b, i64 %a) {
+; CHECK-LABEL: add_v16i8_v16i64_acc_zext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r4, r5, r6, lr}
+; CHECK-NEXT:    push {r4, r5, r6, lr}
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    .pad #80
+; CHECK-NEXT:    sub sp, #80
+; CHECK-NEXT:    vmov q3, q1
+; CHECK-NEXT:    vstrw.32 q0, [sp, #64] @ 16-byte Spill
+; CHECK-NEXT:    vmov.i8 q0, #0x0
+; CHECK-NEXT:    vmov.i8 q1, #0xff
+; CHECK-NEXT:    vcmp.i8 eq, q2, zr
+; CHECK-NEXT:    vstrw.32 q1, [sp, #16] @ 16-byte Spill
+; CHECK-NEXT:    vpsel q5, q1, q0
+; CHECK-NEXT:    vstrw.32 q0, [sp, #32] @ 16-byte Spill
+; CHECK-NEXT:    vmov.u8 r2, q5[0]
+; CHECK-NEXT:    vmov.i64 q4, #0xff
+; CHECK-NEXT:    vmov.16 q2[0], r2
+; CHECK-NEXT:    vmov.u8 r2, q5[1]
+; CHECK-NEXT:    vmov.16 q2[1], r2
+; CHECK-NEXT:    vmov.u8 r2, q5[2]
+; CHECK-NEXT:    vmov.16 q2[2], r2
+; CHECK-NEXT:    vmov.u8 r2, q5[3]
+; CHECK-NEXT:    vmov.16 q2[3], r2
+; CHECK-NEXT:    vmov.u8 r2, q5[4]
+; CHECK-NEXT:    vmov.16 q2[4], r2
+; CHECK-NEXT:    vmov.u8 r2, q5[5]
+; CHECK-NEXT:    vmov.16 q2[5], r2
+; CHECK-NEXT:    vmov.u8 r2, q5[6]
+; CHECK-NEXT:    vmov.16 q2[6], r2
+; CHECK-NEXT:    vmov.u8 r2, q5[7]
+; CHECK-NEXT:    vmov.16 q2[7], r2
+; CHECK-NEXT:    vcmp.i16 ne, q2, zr
+; CHECK-NEXT:    vpsel q6, q1, q0
+; CHECK-NEXT:    vmov.u16 r2, q6[0]
+; CHECK-NEXT:    vmov.32 q2[0], r2
+; CHECK-NEXT:    vmov.u16 r2, q6[1]
+; CHECK-NEXT:    vmov.32 q2[1], r2
+; CHECK-NEXT:    vmov.u16 r2, q6[2]
+; CHECK-NEXT:    vmov.32 q2[2], r2
+; CHECK-NEXT:    vmov.u16 r2, q6[3]
+; CHECK-NEXT:    vmov.32 q2[3], r2
+; CHECK-NEXT:    vcmp.i32 ne, q2, zr
+; CHECK-NEXT:    vmrs lr, p0
+; CHECK-NEXT:    vldrw.u32 q1, [sp, #64] @ 16-byte Reload
+; CHECK-NEXT:    and r3, lr, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    vmov.32 q7[0], r3
+; CHECK-NEXT:    vmov.32 q7[1], r3
+; CHECK-NEXT:    ubfx r3, lr, #4, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    vmov.32 q7[2], r3
+; CHECK-NEXT:    vmov.32 q7[3], r3
+; CHECK-NEXT:    vmov.u8 r3, q3[0]
+; CHECK-NEXT:    vmov.32 q0[0], r3
+; CHECK-NEXT:    vmov.u8 r3, q3[1]
+; CHECK-NEXT:    vmov.32 q0[2], r3
+; CHECK-NEXT:    vmov.u8 r3, q1[0]
+; CHECK-NEXT:    vand q2, q0, q4
+; CHECK-NEXT:    vmov.32 q0[0], r3
+; CHECK-NEXT:    vmov.u8 r3, q1[1]
+; CHECK-NEXT:    vmov r12, s8
+; CHECK-NEXT:    vmov.32 q0[2], r3
+; CHECK-NEXT:    vand q1, q0, q4
+; CHECK-NEXT:    vmov r3, s4
+; CHECK-NEXT:    umull r3, r2, r3, r12
+; CHECK-NEXT:    vmov.32 q0[0], r3
+; CHECK-NEXT:    vmov r3, s6
+; CHECK-NEXT:    vmov.32 q0[1], r2
+; CHECK-NEXT:    vmov r2, s10
+; CHECK-NEXT:    vldrw.u32 q2, [sp, #64] @ 16-byte Reload
+; CHECK-NEXT:    umull r2, r3, r3, r2
+; CHECK-NEXT:    vmov.32 q0[2], r2
+; CHECK-NEXT:    vmov.32 q0[3], r3
+; CHECK-NEXT:    vand q0, q0, q7
+; CHECK-NEXT:    vmov r4, s2
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    vmov r12, s3
+; CHECK-NEXT:    vmov r3, s1
+; CHECK-NEXT:    adds r4, r4, r2
+; CHECK-NEXT:    ubfx r2, lr, #8, #1
+; CHECK-NEXT:    rsb.w r2, r2, #0
+; CHECK-NEXT:    vmov.32 q7[0], r2
+; CHECK-NEXT:    adc.w r12, r12, r3
+; CHECK-NEXT:    vmov.32 q7[1], r2
+; CHECK-NEXT:    ubfx r2, lr, #12, #1
+; CHECK-NEXT:    rsbs r2, r2, #0
+; CHECK-NEXT:    vmov.u8 r3, q2[2]
+; CHECK-NEXT:    vmov.32 q7[2], r2
+; CHECK-NEXT:    vmov.32 q1[0], r3
+; CHECK-NEXT:    vmov.32 q7[3], r2
+; CHECK-NEXT:    vmov.u8 r2, q3[2]
+; CHECK-NEXT:    vmov.32 q0[0], r2
+; CHECK-NEXT:    vmov.u8 r2, q3[3]
+; CHECK-NEXT:    vmov.u8 r3, q2[3]
+; CHECK-NEXT:    vmov.32 q0[2], r2
+; CHECK-NEXT:    vmov.32 q1[2], r3
+; CHECK-NEXT:    vand q0, q0, q4
+; CHECK-NEXT:    vand q1, q1, q4
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    vmov r3, s4
+; CHECK-NEXT:    umull r2, r3, r3, r2
+; CHECK-NEXT:    vmov.32 q2[0], r2
+; CHECK-NEXT:    vmov r2, s2
+; CHECK-NEXT:    vmov.32 q2[1], r3
+; CHECK-NEXT:    vmov r3, s6
+; CHECK-NEXT:    umull r2, r3, r3, r2
+; CHECK-NEXT:    vmov.32 q2[2], r2
+; CHECK-NEXT:    vmov.32 q2[3], r3
+; CHECK-NEXT:    vand q0, q2, q7
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov r2, s1
+; CHECK-NEXT:    adds r3, r3, r4
+; CHECK-NEXT:    vmov r4, s3
+; CHECK-NEXT:    adc.w lr, r12, r2
+; CHECK-NEXT:    vmov r2, s2
+; CHECK-NEXT:    adds.w r12, r3, r2
+; CHECK-NEXT:    vmov.u16 r2, q6[4]
+; CHECK-NEXT:    vmov.32 q0[0], r2
+; CHECK-NEXT:    vmov.u16 r2, q6[5]
+; CHECK-NEXT:    vmov.32 q0[1], r2
+; CHECK-NEXT:    vmov.u16 r2, q6[6]
+; CHECK-NEXT:    vmov.32 q0[2], r2
+; CHECK-NEXT:    vmov.u16 r2, q6[7]
+; CHECK-NEXT:    vmov.32 q0[3], r2
+; CHECK-NEXT:    adc.w lr, lr, r4
+; CHECK-NEXT:    vcmp.i32 ne, q0, zr
+; CHECK-NEXT:    vmrs r6, p0
+; CHECK-NEXT:    vstrw.32 q3, [sp, #48] @ 16-byte Spill
+; CHECK-NEXT:    vstrw.32 q4, [sp] @ 16-byte Spill
+; CHECK-NEXT:    and r4, r6, #1
+; CHECK-NEXT:    rsbs r4, r4, #0
+; CHECK-NEXT:    vmov.32 q6[0], r4
+; CHECK-NEXT:    vmov.32 q6[1], r4
+; CHECK-NEXT:    ubfx r4, r6, #4, #1
+; CHECK-NEXT:    rsbs r4, r4, #0
+; CHECK-NEXT:    vmov.32 q6[2], r4
+; CHECK-NEXT:    vmov.32 q6[3], r4
+; CHECK-NEXT:    vmov.u8 r4, q3[4]
+; CHECK-NEXT:    vmov.32 q0[0], r4
+; CHECK-NEXT:    vmov.u8 r4, q3[5]
+; CHECK-NEXT:    vldrw.u32 q3, [sp, #64] @ 16-byte Reload
+; CHECK-NEXT:    vmov.32 q0[2], r4
+; CHECK-NEXT:    vand q0, q0, q4
+; CHECK-NEXT:    vmov.u8 r3, q3[4]
+; CHECK-NEXT:    vmov r4, s0
+; CHECK-NEXT:    vmov.32 q1[0], r3
+; CHECK-NEXT:    vmov.u8 r3, q3[5]
+; CHECK-NEXT:    vmov.32 q1[2], r3
+; CHECK-NEXT:    vmov q7, q3
+; CHECK-NEXT:    vand q1, q1, q4
+; CHECK-NEXT:    vmov r3, s4
+; CHECK-NEXT:    umull r3, r4, r3, r4
+; CHECK-NEXT:    vmov.32 q2[0], r3
+; CHECK-NEXT:    vmov r3, s2
+; CHECK-NEXT:    vmov.32 q2[1], r4
+; CHECK-NEXT:    vmov r4, s6
+; CHECK-NEXT:    vldrw.u32 q1, [sp, #48] @ 16-byte Reload
+; CHECK-NEXT:    umull r3, r4, r4, r3
+; CHECK-NEXT:    vmov.32 q2[2], r3
+; CHECK-NEXT:    vmov.32 q2[3], r4
+; CHECK-NEXT:    vand q0, q2, q6
+; CHECK-NEXT:    vmov r4, s0
+; CHECK-NEXT:    vmov r3, s1
+; CHECK-NEXT:    vmov r5, s2
+; CHECK-NEXT:    vmov r2, s3
+; CHECK-NEXT:    adds.w r4, r4, r12
+; CHECK-NEXT:    adc.w r3, r3, lr
+; CHECK-NEXT:    adds r4, r4, r5
+; CHECK-NEXT:    adc.w r12, r3, r2
+; CHECK-NEXT:    ubfx r2, r6, #8, #1
+; CHECK-NEXT:    rsbs r2, r2, #0
+; CHECK-NEXT:    vmov.u8 r3, q3[6]
+; CHECK-NEXT:    vmov.32 q6[0], r2
+; CHECK-NEXT:    vmov.32 q6[1], r2
+; CHECK-NEXT:    ubfx r2, r6, #12, #1
+; CHECK-NEXT:    rsbs r2, r2, #0
+; CHECK-NEXT:    vmov.32 q6[2], r2
+; CHECK-NEXT:    vmov.32 q6[3], r2
+; CHECK-NEXT:    vmov.u8 r2, q1[6]
+; CHECK-NEXT:    vmov.32 q0[0], r2
+; CHECK-NEXT:    vmov.u8 r2, q1[7]
+; CHECK-NEXT:    vmov.32 q1[0], r3
+; CHECK-NEXT:    vmov.u8 r3, q3[7]
+; CHECK-NEXT:    vmov.32 q0[2], r2
+; CHECK-NEXT:    vmov.32 q1[2], r3
+; CHECK-NEXT:    vand q0, q0, q4
+; CHECK-NEXT:    vand q1, q1, q4
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    vmov r3, s4
+; CHECK-NEXT:    umull r2, r3, r3, r2
+; CHECK-NEXT:    vmov.32 q2[0], r2
+; CHECK-NEXT:    vmov r2, s2
+; CHECK-NEXT:    vmov.32 q2[1], r3
+; CHECK-NEXT:    vmov r3, s6
+; CHECK-NEXT:    vldrw.u32 q1, [sp, #16] @ 16-byte Reload
+; CHECK-NEXT:    umull r2, r3, r3, r2
+; CHECK-NEXT:    vmov.32 q2[2], r2
+; CHECK-NEXT:    vmov.32 q2[3], r3
+; CHECK-NEXT:    vand q0, q2, q6
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov r2, s1
+; CHECK-NEXT:    vmov r5, s2
+; CHECK-NEXT:    vmov r6, s3
+; CHECK-NEXT:    vldrw.u32 q0, [sp, #32] @ 16-byte Reload
+; CHECK-NEXT:    adds r3, r3, r4
+; CHECK-NEXT:    adc.w r2, r2, r12
+; CHECK-NEXT:    adds.w r12, r3, r5
+; CHECK-NEXT:    vmov.u8 r5, q7[8]
+; CHECK-NEXT:    adc.w r3, r2, r6
+; CHECK-NEXT:    vmov.u8 r2, q5[8]
+; CHECK-NEXT:    vmov.16 q6[0], r2
+; CHECK-NEXT:    vmov.u8 r2, q5[9]
+; CHECK-NEXT:    vmov.16 q6[1], r2
+; CHECK-NEXT:    vmov.u8 r2, q5[10]
+; CHECK-NEXT:    vmov.16 q6[2], r2
+; CHECK-NEXT:    vmov.u8 r2, q5[11]
+; CHECK-NEXT:    vmov.16 q6[3], r2
+; CHECK-NEXT:    vmov.u8 r2, q5[12]
+; CHECK-NEXT:    vmov.16 q6[4], r2
+; CHECK-NEXT:    vmov.u8 r2, q5[13]
+; CHECK-NEXT:    vmov.16 q6[5], r2
+; CHECK-NEXT:    vmov.u8 r2, q5[14]
+; CHECK-NEXT:    vmov.16 q6[6], r2
+; CHECK-NEXT:    vmov.u8 r2, q5[15]
+; CHECK-NEXT:    vmov.16 q6[7], r2
+; CHECK-NEXT:    vcmp.i16 ne, q6, zr
+; CHECK-NEXT:    vpsel q3, q1, q0
+; CHECK-NEXT:    vmov.32 q1[0], r5
+; CHECK-NEXT:    vmov.u16 r2, q3[0]
+; CHECK-NEXT:    vmov.u8 r5, q7[9]
+; CHECK-NEXT:    vmov.32 q0[0], r2
+; CHECK-NEXT:    vmov.u16 r2, q3[1]
+; CHECK-NEXT:    vmov.32 q0[1], r2
+; CHECK-NEXT:    vmov.u16 r2, q3[2]
+; CHECK-NEXT:    vmov.32 q0[2], r2
+; CHECK-NEXT:    vmov.u16 r2, q3[3]
+; CHECK-NEXT:    vmov.32 q0[3], r2
+; CHECK-NEXT:    vmov.32 q1[2], r5
+; CHECK-NEXT:    vcmp.i32 ne, q0, zr
+; CHECK-NEXT:    vmrs r2, p0
+; CHECK-NEXT:    vldrw.u32 q6, [sp, #48] @ 16-byte Reload
+; CHECK-NEXT:    vldrw.u32 q5, [sp] @ 16-byte Reload
+; CHECK-NEXT:    vand q1, q1, q5
+; CHECK-NEXT:    vmov r5, s4
+; CHECK-NEXT:    and r6, r2, #1
+; CHECK-NEXT:    rsbs r6, r6, #0
+; CHECK-NEXT:    vmov.32 q4[0], r6
+; CHECK-NEXT:    vmov.32 q4[1], r6
+; CHECK-NEXT:    ubfx r6, r2, #4, #1
+; CHECK-NEXT:    rsbs r6, r6, #0
+; CHECK-NEXT:    vmov.32 q4[2], r6
+; CHECK-NEXT:    vmov.32 q4[3], r6
+; CHECK-NEXT:    vmov.u8 r6, q6[8]
+; CHECK-NEXT:    vmov.32 q0[0], r6
+; CHECK-NEXT:    vmov.u8 r6, q6[9]
+; CHECK-NEXT:    vmov.32 q0[2], r6
+; CHECK-NEXT:    vand q0, q0, q5
+; CHECK-NEXT:    vmov r6, s0
+; CHECK-NEXT:    umull r6, r5, r5, r6
+; CHECK-NEXT:    vmov.32 q2[0], r6
+; CHECK-NEXT:    vmov r6, s2
+; CHECK-NEXT:    vmov.32 q2[1], r5
+; CHECK-NEXT:    vmov r5, s6
+; CHECK-NEXT:    umull r6, r5, r5, r6
+; CHECK-NEXT:    vmov.32 q2[2], r6
+; CHECK-NEXT:    vmov.32 q2[3], r5
+; CHECK-NEXT:    vand q0, q2, q4
+; CHECK-NEXT:    vmov r5, s0
+; CHECK-NEXT:    vmov r6, s1
+; CHECK-NEXT:    vmov r4, s3
+; CHECK-NEXT:    adds.w r5, r5, r12
+; CHECK-NEXT:    adcs r6, r3
+; CHECK-NEXT:    vmov r3, s2
+; CHECK-NEXT:    adds r3, r3, r5
+; CHECK-NEXT:    adc.w r12, r6, r4
+; CHECK-NEXT:    ubfx r6, r2, #8, #1
+; CHECK-NEXT:    rsbs r6, r6, #0
+; CHECK-NEXT:    ubfx r2, r2, #12, #1
+; CHECK-NEXT:    vmov.32 q4[0], r6
+; CHECK-NEXT:    rsbs r2, r2, #0
+; CHECK-NEXT:    vmov.32 q4[1], r6
+; CHECK-NEXT:    vmov.u8 r6, q7[10]
+; CHECK-NEXT:    vmov.32 q4[2], r2
+; CHECK-NEXT:    vmov.32 q1[0], r6
+; CHECK-NEXT:    vmov.32 q4[3], r2
+; CHECK-NEXT:    vmov.u8 r2, q6[10]
+; CHECK-NEXT:    vmov.32 q0[0], r2
+; CHECK-NEXT:    vmov.u8 r2, q6[11]
+; CHECK-NEXT:    vmov.u8 r6, q7[11]
+; CHECK-NEXT:    vmov.32 q0[2], r2
+; CHECK-NEXT:    vmov.32 q1[2], r6
+; CHECK-NEXT:    vand q0, q0, q5
+; CHECK-NEXT:    vand q1, q1, q5
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    vmov r6, s4
+; CHECK-NEXT:    umull r2, r6, r6, r2
+; CHECK-NEXT:    vmov.32 q2[0], r2
+; CHECK-NEXT:    vmov r2, s2
+; CHECK-NEXT:    vmov.32 q2[1], r6
+; CHECK-NEXT:    vmov r6, s6
+; CHECK-NEXT:    umull r2, r6, r6, r2
+; CHECK-NEXT:    vmov.32 q2[2], r2
+; CHECK-NEXT:    vmov.32 q2[3], r6
+; CHECK-NEXT:    vand q0, q2, q4
+; CHECK-NEXT:    vmov r6, s0
+; CHECK-NEXT:    vmov r2, s1
+; CHECK-NEXT:    vmov r5, s2
+; CHECK-NEXT:    adds r3, r3, r6
+; CHECK-NEXT:    vmov r6, s3
+; CHECK-NEXT:    adc.w r2, r2, r12
+; CHECK-NEXT:    adds.w r12, r3, r5
+; CHECK-NEXT:    vmov.u8 r5, q7[12]
+; CHECK-NEXT:    vmov.32 q1[0], r5
+; CHECK-NEXT:    vmov.u8 r5, q7[13]
+; CHECK-NEXT:    vmov.32 q1[2], r5
+; CHECK-NEXT:    vand q1, q1, q5
+; CHECK-NEXT:    vmov r5, s4
+; CHECK-NEXT:    adc.w r3, r2, r6
+; CHECK-NEXT:    vmov.u16 r2, q3[4]
+; CHECK-NEXT:    vmov.32 q0[0], r2
+; CHECK-NEXT:    vmov.u16 r2, q3[5]
+; CHECK-NEXT:    vmov.32 q0[1], r2
+; CHECK-NEXT:    vmov.u16 r2, q3[6]
+; CHECK-NEXT:    vmov.32 q0[2], r2
+; CHECK-NEXT:    vmov.u16 r2, q3[7]
+; CHECK-NEXT:    vmov.32 q0[3], r2
+; CHECK-NEXT:    vcmp.i32 ne, q0, zr
+; CHECK-NEXT:    vmrs r2, p0
+; CHECK-NEXT:    and r6, r2, #1
+; CHECK-NEXT:    rsbs r6, r6, #0
+; CHECK-NEXT:    vmov.32 q3[0], r6
+; CHECK-NEXT:    vmov.32 q3[1], r6
+; CHECK-NEXT:    ubfx r6, r2, #4, #1
+; CHECK-NEXT:    rsbs r6, r6, #0
+; CHECK-NEXT:    vmov.32 q3[2], r6
+; CHECK-NEXT:    vmov.32 q3[3], r6
+; CHECK-NEXT:    vmov.u8 r6, q6[12]
+; CHECK-NEXT:    vmov.32 q0[0], r6
+; CHECK-NEXT:    vmov.u8 r6, q6[13]
+; CHECK-NEXT:    vmov.32 q0[2], r6
+; CHECK-NEXT:    vand q0, q0, q5
+; CHECK-NEXT:    vmov r6, s0
+; CHECK-NEXT:    umull r6, r5, r5, r6
+; CHECK-NEXT:    vmov.32 q2[0], r6
+; CHECK-NEXT:    vmov r6, s2
+; CHECK-NEXT:    vmov.32 q2[1], r5
+; CHECK-NEXT:    vmov r5, s6
+; CHECK-NEXT:    umull r6, r5, r5, r6
+; CHECK-NEXT:    vmov.32 q2[2], r6
+; CHECK-NEXT:    vmov.32 q2[3], r5
+; CHECK-NEXT:    vand q0, q2, q3
+; CHECK-NEXT:    vmov r5, s0
+; CHECK-NEXT:    vmov r6, s1
+; CHECK-NEXT:    vmov r4, s3
+; CHECK-NEXT:    adds.w r5, r5, r12
+; CHECK-NEXT:    adcs r6, r3
+; CHECK-NEXT:    vmov r3, s2
+; CHECK-NEXT:    adds r3, r3, r5
+; CHECK-NEXT:    adc.w r12, r6, r4
+; CHECK-NEXT:    ubfx r6, r2, #8, #1
+; CHECK-NEXT:    rsbs r6, r6, #0
+; CHECK-NEXT:    ubfx r2, r2, #12, #1
+; CHECK-NEXT:    vmov.32 q3[0], r6
+; CHECK-NEXT:    rsbs r2, r2, #0
+; CHECK-NEXT:    vmov.32 q3[1], r6
+; CHECK-NEXT:    vmov.u8 r6, q7[14]
+; CHECK-NEXT:    vmov.32 q3[2], r2
+; CHECK-NEXT:    vmov.32 q1[0], r6
+; CHECK-NEXT:    vmov.32 q3[3], r2
+; CHECK-NEXT:    vmov.u8 r2, q6[14]
+; CHECK-NEXT:    vmov.32 q0[0], r2
+; CHECK-NEXT:    vmov.u8 r2, q6[15]
+; CHECK-NEXT:    vmov.u8 r6, q7[15]
+; CHECK-NEXT:    vmov.32 q0[2], r2
+; CHECK-NEXT:    vmov.32 q1[2], r6
+; CHECK-NEXT:    vand q0, q0, q5
+; CHECK-NEXT:    vand q1, q1, q5
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    vmov r6, s4
+; CHECK-NEXT:    umull r2, r6, r6, r2
+; CHECK-NEXT:    vmov.32 q2[0], r2
+; CHECK-NEXT:    vmov r2, s2
+; CHECK-NEXT:    vmov.32 q2[1], r6
+; CHECK-NEXT:    vmov r6, s6
+; CHECK-NEXT:    umull r2, r6, r6, r2
+; CHECK-NEXT:    vmov.32 q2[2], r2
+; CHECK-NEXT:    vmov.32 q2[3], r6
+; CHECK-NEXT:    vand q0, q2, q3
+; CHECK-NEXT:    vmov r6, s0
+; CHECK-NEXT:    vmov r2, s1
+; CHECK-NEXT:    vmov r5, s2
+; CHECK-NEXT:    adds r3, r3, r6
+; CHECK-NEXT:    vmov r6, s3
+; CHECK-NEXT:    adc.w r2, r2, r12
+; CHECK-NEXT:    adds r3, r3, r5
+; CHECK-NEXT:    adcs r2, r6
+; CHECK-NEXT:    adds r0, r0, r3
+; CHECK-NEXT:    adcs r1, r2
+; CHECK-NEXT:    add sp, #80
+; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    pop {r4, r5, r6, pc}
+entry:
+  %c = icmp eq <16 x i8> %b, zeroinitializer
+  %xx = zext <16 x i8> %x to <16 x i64>
+  %yy = zext <16 x i8> %y to <16 x i64>
+  %m = mul <16 x i64> %xx, %yy
+  %s = select <16 x i1> %c, <16 x i64> %m, <16 x i64> zeroinitializer
+  %z = call i64 @llvm.experimental.vector.reduce.add.v16i64(<16 x i64> %s)
+  %r = add i64 %z, %a
+  ret i64 %r
+}
+
+define arm_aapcs_vfpcc i64 @add_v16i8_v16i64_acc_sext(<16 x i8> %x, <16 x i8> %y, <16 x i8> %b, i64 %a) {
+; CHECK-LABEL: add_v16i8_v16i64_acc_sext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r4, r5, r7, lr}
+; CHECK-NEXT:    push {r4, r5, r7, lr}
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    vcmp.i8 eq, q2, zr
+; CHECK-NEXT:    vmov.i8 q2, #0x0
+; CHECK-NEXT:    vmov.i8 q3, #0xff
+; CHECK-NEXT:    vpsel q4, q3, q2
+; CHECK-NEXT:    vmov.u8 r2, q4[0]
+; CHECK-NEXT:    vmov.16 q5[0], r2
+; CHECK-NEXT:    vmov.u8 r2, q4[1]
+; CHECK-NEXT:    vmov.16 q5[1], r2
+; CHECK-NEXT:    vmov.u8 r2, q4[2]
+; CHECK-NEXT:    vmov.16 q5[2], r2
+; CHECK-NEXT:    vmov.u8 r2, q4[3]
+; CHECK-NEXT:    vmov.16 q5[3], r2
+; CHECK-NEXT:    vmov.u8 r2, q4[4]
+; CHECK-NEXT:    vmov.16 q5[4], r2
+; CHECK-NEXT:    vmov.u8 r2, q4[5]
+; CHECK-NEXT:    vmov.16 q5[5], r2
+; CHECK-NEXT:    vmov.u8 r2, q4[6]
+; CHECK-NEXT:    vmov.16 q5[6], r2
+; CHECK-NEXT:    vmov.u8 r2, q4[7]
+; CHECK-NEXT:    vmov.16 q5[7], r2
+; CHECK-NEXT:    vcmp.i16 ne, q5, zr
+; CHECK-NEXT:    vpsel q5, q3, q2
+; CHECK-NEXT:    vmov.u16 r2, q5[0]
+; CHECK-NEXT:    vmov.32 q6[0], r2
+; CHECK-NEXT:    vmov.u16 r2, q5[1]
+; CHECK-NEXT:    vmov.32 q6[1], r2
+; CHECK-NEXT:    vmov.u16 r2, q5[2]
+; CHECK-NEXT:    vmov.32 q6[2], r2
+; CHECK-NEXT:    vmov.u16 r2, q5[3]
+; CHECK-NEXT:    vmov.32 q6[3], r2
+; CHECK-NEXT:    vcmp.i32 ne, q6, zr
+; CHECK-NEXT:    vmrs r2, p0
+; CHECK-NEXT:    and r3, r2, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    vmov.32 q6[0], r3
+; CHECK-NEXT:    vmov.32 q6[1], r3
+; CHECK-NEXT:    ubfx r3, r2, #4, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    vmov.32 q6[2], r3
+; CHECK-NEXT:    vmov.32 q6[3], r3
+; CHECK-NEXT:    vmov.u8 r3, q1[0]
+; CHECK-NEXT:    sxtb.w r12, r3
+; CHECK-NEXT:    vmov.u8 r3, q0[0]
+; CHECK-NEXT:    sxtb r3, r3
+; CHECK-NEXT:    smull r3, r12, r3, r12
+; CHECK-NEXT:    vmov.32 q7[0], r3
+; CHECK-NEXT:    vmov.u8 r3, q1[1]
+; CHECK-NEXT:    vmov.32 q7[1], r12
+; CHECK-NEXT:    sxtb.w r12, r3
+; CHECK-NEXT:    vmov.u8 r3, q0[1]
+; CHECK-NEXT:    sxtb r3, r3
+; CHECK-NEXT:    smull r3, r12, r3, r12
+; CHECK-NEXT:    vmov.32 q7[2], r3
+; CHECK-NEXT:    vmov.32 q7[3], r12
+; CHECK-NEXT:    vand q6, q7, q6
+; CHECK-NEXT:    vmov r3, s26
+; CHECK-NEXT:    vmov r4, s24
+; CHECK-NEXT:    vmov r12, s27
+; CHECK-NEXT:    vmov r5, s25
+; CHECK-NEXT:    adds.w lr, r4, r3
+; CHECK-NEXT:    ubfx r3, r2, #8, #1
+; CHECK-NEXT:    rsb.w r3, r3, #0
+; CHECK-NEXT:    ubfx r2, r2, #12, #1
+; CHECK-NEXT:    vmov.32 q6[0], r3
+; CHECK-NEXT:    rsb.w r2, r2, #0
+; CHECK-NEXT:    vmov.32 q6[1], r3
+; CHECK-NEXT:    vmov.u8 r3, q0[2]
+; CHECK-NEXT:    vmov.32 q6[2], r2
+; CHECK-NEXT:    sxtb r3, r3
+; CHECK-NEXT:    vmov.32 q6[3], r2
+; CHECK-NEXT:    vmov.u8 r2, q1[2]
+; CHECK-NEXT:    sxtb r2, r2
+; CHECK-NEXT:    adc.w r12, r12, r5
+; CHECK-NEXT:    smull r2, r3, r3, r2
+; CHECK-NEXT:    vmov.32 q7[0], r2
+; CHECK-NEXT:    vmov.u8 r2, q1[3]
+; CHECK-NEXT:    vmov.32 q7[1], r3
+; CHECK-NEXT:    vmov.u8 r3, q0[3]
+; CHECK-NEXT:    sxtb r2, r2
+; CHECK-NEXT:    sxtb r3, r3
+; CHECK-NEXT:    smull r2, r3, r3, r2
+; CHECK-NEXT:    vmov.32 q7[2], r2
+; CHECK-NEXT:    vmov.32 q7[3], r3
+; CHECK-NEXT:    vand q6, q7, q6
+; CHECK-NEXT:    vmov r3, s24
+; CHECK-NEXT:    vmov r2, s25
+; CHECK-NEXT:    vmov r4, s26
+; CHECK-NEXT:    vmov r5, s27
+; CHECK-NEXT:    adds.w r3, r3, lr
+; CHECK-NEXT:    adc.w r2, r2, r12
+; CHECK-NEXT:    adds.w r12, r3, r4
+; CHECK-NEXT:    vmov.u8 r4, q0[4]
+; CHECK-NEXT:    adc.w r3, r2, r5
+; CHECK-NEXT:    vmov.u16 r2, q5[4]
+; CHECK-NEXT:    vmov.32 q6[0], r2
+; CHECK-NEXT:    vmov.u16 r2, q5[5]
+; CHECK-NEXT:    vmov.32 q6[1], r2
+; CHECK-NEXT:    vmov.u16 r2, q5[6]
+; CHECK-NEXT:    vmov.32 q6[2], r2
+; CHECK-NEXT:    vmov.u16 r2, q5[7]
+; CHECK-NEXT:    vmov.32 q6[3], r2
+; CHECK-NEXT:    sxtb r4, r4
+; CHECK-NEXT:    vcmp.i32 ne, q6, zr
+; CHECK-NEXT:    vmrs r2, p0
+; CHECK-NEXT:    and r5, r2, #1
+; CHECK-NEXT:    rsbs r5, r5, #0
+; CHECK-NEXT:    vmov.32 q5[0], r5
+; CHECK-NEXT:    vmov.32 q5[1], r5
+; CHECK-NEXT:    ubfx r5, r2, #4, #1
+; CHECK-NEXT:    rsbs r5, r5, #0
+; CHECK-NEXT:    vmov.32 q5[2], r5
+; CHECK-NEXT:    vmov.32 q5[3], r5
+; CHECK-NEXT:    vmov.u8 r5, q1[4]
+; CHECK-NEXT:    sxtb r5, r5
+; CHECK-NEXT:    smull r5, r4, r4, r5
+; CHECK-NEXT:    vmov.32 q6[0], r5
+; CHECK-NEXT:    vmov.u8 r5, q1[5]
+; CHECK-NEXT:    vmov.32 q6[1], r4
+; CHECK-NEXT:    vmov.u8 r4, q0[5]
+; CHECK-NEXT:    sxtb r5, r5
+; CHECK-NEXT:    sxtb r4, r4
+; CHECK-NEXT:    smull r5, r4, r4, r5
+; CHECK-NEXT:    vmov.32 q6[2], r5
+; CHECK-NEXT:    vmov.32 q6[3], r4
+; CHECK-NEXT:    vand q5, q6, q5
+; CHECK-NEXT:    vmov r4, s20
+; CHECK-NEXT:    vmov r5, s21
+; CHECK-NEXT:    adds.w r12, r12, r4
+; CHECK-NEXT:    vmov r4, s22
+; CHECK-NEXT:    adcs r5, r3
+; CHECK-NEXT:    vmov r3, s23
+; CHECK-NEXT:    adds.w r4, r4, r12
+; CHECK-NEXT:    adc.w r12, r5, r3
+; CHECK-NEXT:    ubfx r3, r2, #8, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    ubfx r2, r2, #12, #1
+; CHECK-NEXT:    vmov.32 q5[0], r3
+; CHECK-NEXT:    rsbs r2, r2, #0
+; CHECK-NEXT:    vmov.32 q5[1], r3
+; CHECK-NEXT:    vmov.u8 r3, q0[6]
+; CHECK-NEXT:    vmov.32 q5[2], r2
+; CHECK-NEXT:    sxtb r3, r3
+; CHECK-NEXT:    vmov.32 q5[3], r2
+; CHECK-NEXT:    vmov.u8 r2, q1[6]
+; CHECK-NEXT:    sxtb r2, r2
+; CHECK-NEXT:    smull r2, r3, r3, r2
+; CHECK-NEXT:    vmov.32 q6[0], r2
+; CHECK-NEXT:    vmov.u8 r2, q1[7]
+; CHECK-NEXT:    vmov.32 q6[1], r3
+; CHECK-NEXT:    vmov.u8 r3, q0[7]
+; CHECK-NEXT:    sxtb r2, r2
+; CHECK-NEXT:    sxtb r3, r3
+; CHECK-NEXT:    smull r2, r3, r3, r2
+; CHECK-NEXT:    vmov.32 q6[2], r2
+; CHECK-NEXT:    vmov.32 q6[3], r3
+; CHECK-NEXT:    vand q5, q6, q5
+; CHECK-NEXT:    vmov r3, s20
+; CHECK-NEXT:    vmov r2, s21
+; CHECK-NEXT:    vmov r5, s23
+; CHECK-NEXT:    adds r3, r3, r4
+; CHECK-NEXT:    vmov r4, s22
+; CHECK-NEXT:    adc.w r2, r2, r12
+; CHECK-NEXT:    adds.w r12, r3, r4
+; CHECK-NEXT:    vmov.u8 r4, q0[8]
+; CHECK-NEXT:    adc.w r3, r2, r5
+; CHECK-NEXT:    vmov.u8 r2, q4[8]
+; CHECK-NEXT:    vmov.16 q5[0], r2
+; CHECK-NEXT:    vmov.u8 r2, q4[9]
+; CHECK-NEXT:    vmov.16 q5[1], r2
+; CHECK-NEXT:    vmov.u8 r2, q4[10]
+; CHECK-NEXT:    vmov.16 q5[2], r2
+; CHECK-NEXT:    vmov.u8 r2, q4[11]
+; CHECK-NEXT:    vmov.16 q5[3], r2
+; CHECK-NEXT:    vmov.u8 r2, q4[12]
+; CHECK-NEXT:    vmov.16 q5[4], r2
+; CHECK-NEXT:    vmov.u8 r2, q4[13]
+; CHECK-NEXT:    vmov.16 q5[5], r2
+; CHECK-NEXT:    vmov.u8 r2, q4[14]
+; CHECK-NEXT:    vmov.16 q5[6], r2
+; CHECK-NEXT:    vmov.u8 r2, q4[15]
+; CHECK-NEXT:    vmov.16 q5[7], r2
+; CHECK-NEXT:    sxtb r4, r4
+; CHECK-NEXT:    vcmp.i16 ne, q5, zr
+; CHECK-NEXT:    vpsel q2, q3, q2
+; CHECK-NEXT:    vmov.u16 r2, q2[0]
+; CHECK-NEXT:    vmov.32 q3[0], r2
+; CHECK-NEXT:    vmov.u16 r2, q2[1]
+; CHECK-NEXT:    vmov.32 q3[1], r2
+; CHECK-NEXT:    vmov.u16 r2, q2[2]
+; CHECK-NEXT:    vmov.32 q3[2], r2
+; CHECK-NEXT:    vmov.u16 r2, q2[3]
+; CHECK-NEXT:    vmov.32 q3[3], r2
+; CHECK-NEXT:    vcmp.i32 ne, q3, zr
+; CHECK-NEXT:    vmrs r2, p0
+; CHECK-NEXT:    and r5, r2, #1
+; CHECK-NEXT:    rsbs r5, r5, #0
+; CHECK-NEXT:    vmov.32 q3[0], r5
+; CHECK-NEXT:    vmov.32 q3[1], r5
+; CHECK-NEXT:    ubfx r5, r2, #4, #1
+; CHECK-NEXT:    rsbs r5, r5, #0
+; CHECK-NEXT:    vmov.32 q3[2], r5
+; CHECK-NEXT:    vmov.32 q3[3], r5
+; CHECK-NEXT:    vmov.u8 r5, q1[8]
+; CHECK-NEXT:    sxtb r5, r5
+; CHECK-NEXT:    smull r5, r4, r4, r5
+; CHECK-NEXT:    vmov.32 q4[0], r5
+; CHECK-NEXT:    vmov.u8 r5, q1[9]
+; CHECK-NEXT:    vmov.32 q4[1], r4
+; CHECK-NEXT:    vmov.u8 r4, q0[9]
+; CHECK-NEXT:    sxtb r5, r5
+; CHECK-NEXT:    sxtb r4, r4
+; CHECK-NEXT:    smull r5, r4, r4, r5
+; CHECK-NEXT:    vmov.32 q4[2], r5
+; CHECK-NEXT:    vmov.32 q4[3], r4
+; CHECK-NEXT:    vand q3, q4, q3
+; CHECK-NEXT:    vmov r4, s12
+; CHECK-NEXT:    vmov r5, s13
+; CHECK-NEXT:    adds.w r12, r12, r4
+; CHECK-NEXT:    vmov r4, s14
+; CHECK-NEXT:    adcs r5, r3
+; CHECK-NEXT:    vmov r3, s15
+; CHECK-NEXT:    adds.w r4, r4, r12
+; CHECK-NEXT:    adc.w r12, r5, r3
+; CHECK-NEXT:    ubfx r3, r2, #8, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    ubfx r2, r2, #12, #1
+; CHECK-NEXT:    vmov.32 q3[0], r3
+; CHECK-NEXT:    rsbs r2, r2, #0
+; CHECK-NEXT:    vmov.32 q3[1], r3
+; CHECK-NEXT:    vmov.u8 r3, q0[10]
+; CHECK-NEXT:    vmov.32 q3[2], r2
+; CHECK-NEXT:    sxtb r3, r3
+; CHECK-NEXT:    vmov.32 q3[3], r2
+; CHECK-NEXT:    vmov.u8 r2, q1[10]
+; CHECK-NEXT:    sxtb r2, r2
+; CHECK-NEXT:    smull r2, r3, r3, r2
+; CHECK-NEXT:    vmov.32 q4[0], r2
+; CHECK-NEXT:    vmov.u8 r2, q1[11]
+; CHECK-NEXT:    vmov.32 q4[1], r3
+; CHECK-NEXT:    vmov.u8 r3, q0[11]
+; CHECK-NEXT:    sxtb r2, r2
+; CHECK-NEXT:    sxtb r3, r3
+; CHECK-NEXT:    smull r2, r3, r3, r2
+; CHECK-NEXT:    vmov.32 q4[2], r2
+; CHECK-NEXT:    vmov.32 q4[3], r3
+; CHECK-NEXT:    vand q3, q4, q3
+; CHECK-NEXT:    vmov r3, s12
+; CHECK-NEXT:    vmov r2, s13
+; CHECK-NEXT:    vmov r5, s15
+; CHECK-NEXT:    adds r3, r3, r4
+; CHECK-NEXT:    vmov r4, s14
+; CHECK-NEXT:    adc.w r2, r2, r12
+; CHECK-NEXT:    adds.w r12, r3, r4
+; CHECK-NEXT:    vmov.u8 r4, q0[12]
+; CHECK-NEXT:    adc.w r3, r2, r5
+; CHECK-NEXT:    vmov.u16 r2, q2[4]
+; CHECK-NEXT:    vmov.32 q3[0], r2
+; CHECK-NEXT:    vmov.u16 r2, q2[5]
+; CHECK-NEXT:    vmov.32 q3[1], r2
+; CHECK-NEXT:    vmov.u16 r2, q2[6]
+; CHECK-NEXT:    vmov.32 q3[2], r2
+; CHECK-NEXT:    vmov.u16 r2, q2[7]
+; CHECK-NEXT:    vmov.32 q3[3], r2
+; CHECK-NEXT:    sxtb r4, r4
+; CHECK-NEXT:    vcmp.i32 ne, q3, zr
+; CHECK-NEXT:    vmrs r2, p0
+; CHECK-NEXT:    and r5, r2, #1
+; CHECK-NEXT:    rsbs r5, r5, #0
+; CHECK-NEXT:    vmov.32 q2[0], r5
+; CHECK-NEXT:    vmov.32 q2[1], r5
+; CHECK-NEXT:    ubfx r5, r2, #4, #1
+; CHECK-NEXT:    rsbs r5, r5, #0
+; CHECK-NEXT:    vmov.32 q2[2], r5
+; CHECK-NEXT:    vmov.32 q2[3], r5
+; CHECK-NEXT:    vmov.u8 r5, q1[12]
+; CHECK-NEXT:    sxtb r5, r5
+; CHECK-NEXT:    smull r5, r4, r4, r5
+; CHECK-NEXT:    vmov.32 q3[0], r5
+; CHECK-NEXT:    vmov.u8 r5, q1[13]
+; CHECK-NEXT:    vmov.32 q3[1], r4
+; CHECK-NEXT:    vmov.u8 r4, q0[13]
+; CHECK-NEXT:    sxtb r5, r5
+; CHECK-NEXT:    sxtb r4, r4
+; CHECK-NEXT:    smull r5, r4, r4, r5
+; CHECK-NEXT:    vmov.32 q3[2], r5
+; CHECK-NEXT:    vmov.32 q3[3], r4
+; CHECK-NEXT:    vand q2, q3, q2
+; CHECK-NEXT:    vmov r4, s8
+; CHECK-NEXT:    vmov r5, s9
+; CHECK-NEXT:    adds.w r12, r12, r4
+; CHECK-NEXT:    vmov r4, s10
+; CHECK-NEXT:    adcs r5, r3
+; CHECK-NEXT:    vmov r3, s11
+; CHECK-NEXT:    adds.w r4, r4, r12
+; CHECK-NEXT:    adc.w r12, r5, r3
+; CHECK-NEXT:    ubfx r3, r2, #8, #1
+; CHECK-NEXT:    rsbs r3, r3, #0
+; CHECK-NEXT:    ubfx r2, r2, #12, #1
+; CHECK-NEXT:    vmov.32 q2[0], r3
+; CHECK-NEXT:    rsbs r2, r2, #0
+; CHECK-NEXT:    vmov.32 q2[1], r3
+; CHECK-NEXT:    vmov.u8 r3, q0[14]
+; CHECK-NEXT:    vmov.32 q2[2], r2
+; CHECK-NEXT:    sxtb r3, r3
+; CHECK-NEXT:    vmov.32 q2[3], r2
+; CHECK-NEXT:    vmov.u8 r2, q1[14]
+; CHECK-NEXT:    sxtb r2, r2
+; CHECK-NEXT:    smull r2, r3, r3, r2
+; CHECK-NEXT:    vmov.32 q3[0], r2
+; CHECK-NEXT:    vmov.u8 r2, q1[15]
+; CHECK-NEXT:    vmov.32 q3[1], r3
+; CHECK-NEXT:    vmov.u8 r3, q0[15]
+; CHECK-NEXT:    sxtb r2, r2
+; CHECK-NEXT:    sxtb r3, r3
+; CHECK-NEXT:    smull r2, r3, r3, r2
+; CHECK-NEXT:    vmov.32 q3[2], r2
+; CHECK-NEXT:    vmov.32 q3[3], r3
+; CHECK-NEXT:    vand q0, q3, q2
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov r2, s1
+; CHECK-NEXT:    vmov r5, s3
+; CHECK-NEXT:    adds r3, r3, r4
+; CHECK-NEXT:    vmov r4, s2
+; CHECK-NEXT:    adc.w r2, r2, r12
+; CHECK-NEXT:    adds r3, r3, r4
+; CHECK-NEXT:    adcs r2, r5
+; CHECK-NEXT:    adds r0, r0, r3
+; CHECK-NEXT:    adcs r1, r2
+; CHECK-NEXT:    vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT:    pop {r4, r5, r7, pc}
+entry:
+  %c = icmp eq <16 x i8> %b, zeroinitializer
+  %xx = sext <16 x i8> %x to <16 x i64>
+  %yy = sext <16 x i8> %y to <16 x i64>
+  %m = mul <16 x i64> %xx, %yy
+  %s = select <16 x i1> %c, <16 x i64> %m, <16 x i64> zeroinitializer
+  %z = call i64 @llvm.experimental.vector.reduce.add.v16i64(<16 x i64> %s)
+  %r = add i64 %z, %a
+  ret i64 %r
+}
+
+define arm_aapcs_vfpcc i64 @add_v2i8_v2i64_acc_zext(<2 x i8> %x, <2 x i8> %y, <2 x i8> %b, i64 %a) {
+; CHECK-LABEL: add_v2i8_v2i64_acc_zext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    .vsave {d8, d9}
+; CHECK-NEXT:    vpush {d8, d9}
+; CHECK-NEXT:    vmov.i64 q3, #0xff
+; CHECK-NEXT:    vand q1, q1, q3
+; CHECK-NEXT:    vand q4, q0, q3
+; CHECK-NEXT:    vmov r2, s4
+; CHECK-NEXT:    vmov r3, s16
+; CHECK-NEXT:    umull r2, r3, r3, r2
+; CHECK-NEXT:    vmov.32 q0[0], r2
+; CHECK-NEXT:    vmov r2, s6
+; CHECK-NEXT:    vmov.32 q0[1], r3
+; CHECK-NEXT:    vmov r3, s18
+; CHECK-NEXT:    vand q1, q2, q3
+; CHECK-NEXT:    umull r2, r3, r3, r2
+; CHECK-NEXT:    vmov.32 q0[2], r2
+; CHECK-NEXT:    vmov r2, s4
+; CHECK-NEXT:    vmov.32 q0[3], r3
+; CHECK-NEXT:    cmp r2, #0
+; CHECK-NEXT:    cset r2, eq
+; CHECK-NEXT:    tst.w r2, #1
+; CHECK-NEXT:    csetm r2, ne
+; CHECK-NEXT:    vmov.32 q2[0], r2
+; CHECK-NEXT:    vmov.32 q2[1], r2
+; CHECK-NEXT:    vmov r2, s6
+; CHECK-NEXT:    cmp r2, #0
+; CHECK-NEXT:    cset r2, eq
+; CHECK-NEXT:    tst.w r2, #1
+; CHECK-NEXT:    csetm r2, ne
+; CHECK-NEXT:    vmov.32 q2[2], r2
+; CHECK-NEXT:    vmov.32 q2[3], r2
+; CHECK-NEXT:    vand q0, q0, q2
+; CHECK-NEXT:    vmov r2, s2
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov r12, s3
+; CHECK-NEXT:    vmov lr, s1
+; CHECK-NEXT:    adds r2, r2, r3
+; CHECK-NEXT:    adc.w r3, lr, r12
+; CHECK-NEXT:    adds r0, r0, r2
+; CHECK-NEXT:    adcs r1, r3
+; CHECK-NEXT:    vpop {d8, d9}
+; CHECK-NEXT:    pop {r7, pc}
+entry:
+  %c = icmp eq <2 x i8> %b, zeroinitializer
+  %xx = zext <2 x i8> %x to <2 x i64>
+  %yy = zext <2 x i8> %y to <2 x i64>
+  %m = mul <2 x i64> %xx, %yy
+  %s = select <2 x i1> %c, <2 x i64> %m, <2 x i64> zeroinitializer
+  %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %s)
+  %r = add i64 %z, %a
+  ret i64 %r
+}
+
+define arm_aapcs_vfpcc i64 @add_v2i8_v2i64_acc_sext(<2 x i8> %x, <2 x i8> %y, <2 x i8> %b, i64 %a) {
+; CHECK-LABEL: add_v2i8_v2i64_acc_sext:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    vmov.i32 q3, #0xff
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vand q3, q2, q3
+; CHECK-NEXT:    vmov r2, s12
+; CHECK-NEXT:    sxtb r3, r3
+; CHECK-NEXT:    cmp r2, #0
+; CHECK-NEXT:    cset r2, eq
+; CHECK-NEXT:    tst.w r2, #1
+; CHECK-NEXT:    csetm r2, ne
+; CHECK-NEXT:    vmov.32 q2[0], r2
+; CHECK-NEXT:    vmov.32 q2[1], r2
+; CHECK-NEXT:    vmov r2, s14
+; CHECK-NEXT:    cmp r2, #0
+; CHECK-NEXT:    cset r2, eq
+; CHECK-NEXT:    tst.w r2, #1
+; CHECK-NEXT:    csetm r2, ne
+; CHECK-NEXT:    vmov.32 q2[2], r2
+; CHECK-NEXT:    vmov.32 q2[3], r2
+; CHECK-NEXT:    vmov r2, s4
+; CHECK-NEXT:    sxtb r2, r2
+; CHECK-NEXT:    smull r2, r3, r3, r2
+; CHECK-NEXT:    vmov.32 q3[0], r2
+; CHECK-NEXT:    vmov r2, s6
+; CHECK-NEXT:    vmov.32 q3[1], r3
+; CHECK-NEXT:    vmov r3, s2
+; CHECK-NEXT:    sxtb r2, r2
+; CHECK-NEXT:    sxtb r3, r3
+; CHECK-NEXT:    smull r2, r3, r3, r2
+; CHECK-NEXT:    vmov.32 q3[2], r2
+; CHECK-NEXT:    vmov.32 q3[3], r3
+; CHECK-NEXT:    vand q0, q3, q2
+; CHECK-NEXT:    vmov r2, s2
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov r12, s3
+; CHECK-NEXT:    vmov lr, s1
+; CHECK-NEXT:    adds r2, r2, r3
+; CHECK-NEXT:    adc.w r3, lr, r12
+; CHECK-NEXT:    adds r0, r0, r2
+; CHECK-NEXT:    adcs r1, r3
+; CHECK-NEXT:    pop {r7, pc}
+entry:
+  %c = icmp eq <2 x i8> %b, zeroinitializer
+  %xx = sext <2 x i8> %x to <2 x i64>
+  %yy = sext <2 x i8> %y to <2 x i64>
+  %m = mul <2 x i64> %xx, %yy
+  %s = select <2 x i1> %c, <2 x i64> %m, <2 x i64> zeroinitializer
+  %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %s)
+  %r = add i64 %z, %a
+  ret i64 %r
+}
+
+define arm_aapcs_vfpcc i64 @add_v2i64_v2i64_acc(<2 x i64> %x, <2 x i64> %y, <2 x i64> %b, i64 %a) {
+; CHECK-LABEL: add_v2i64_v2i64_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r4, lr}
+; CHECK-NEXT:    push {r4, lr}
+; CHECK-NEXT:    vmov r2, s4
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov r4, s5
+; CHECK-NEXT:    umull r12, lr, r3, r2
+; CHECK-NEXT:    mla r3, r3, r4, lr
+; CHECK-NEXT:    vmov r4, s1
+; CHECK-NEXT:    vmov.32 q3[0], r12
+; CHECK-NEXT:    mla r2, r4, r2, r3
+; CHECK-NEXT:    vmov r3, s2
+; CHECK-NEXT:    vmov.32 q3[1], r2
+; CHECK-NEXT:    vmov r2, s6
+; CHECK-NEXT:    umull r4, r12, r3, r2
+; CHECK-NEXT:    vmov.32 q3[2], r4
+; CHECK-NEXT:    vmov r4, s7
+; CHECK-NEXT:    mla r3, r3, r4, r12
+; CHECK-NEXT:    vmov r4, s3
+; CHECK-NEXT:    mla r2, r4, r2, r3
+; CHECK-NEXT:    vmov r3, s8
+; CHECK-NEXT:    vmov.32 q3[3], r2
+; CHECK-NEXT:    vmov r2, s9
+; CHECK-NEXT:    orrs r2, r3
+; CHECK-NEXT:    vmov r3, s10
+; CHECK-NEXT:    cset r2, eq
+; CHECK-NEXT:    tst.w r2, #1
+; CHECK-NEXT:    csetm r2, ne
+; CHECK-NEXT:    vmov.32 q0[0], r2
+; CHECK-NEXT:    vmov.32 q0[1], r2
+; CHECK-NEXT:    vmov r2, s11
+; CHECK-NEXT:    orrs r2, r3
+; CHECK-NEXT:    cset r2, eq
+; CHECK-NEXT:    tst.w r2, #1
+; CHECK-NEXT:    csetm r2, ne
+; CHECK-NEXT:    vmov.32 q0[2], r2
+; CHECK-NEXT:    vmov.32 q0[3], r2
+; CHECK-NEXT:    vand q0, q3, q0
+; CHECK-NEXT:    vmov r4, s2
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    vmov r12, s3
+; CHECK-NEXT:    vmov r3, s1
+; CHECK-NEXT:    adds r2, r2, r4
+; CHECK-NEXT:    adc.w r3, r3, r12
+; CHECK-NEXT:    adds r0, r0, r2
+; CHECK-NEXT:    adcs r1, r3
+; CHECK-NEXT:    pop {r4, pc}
+entry:
+  %c = icmp eq <2 x i64> %b, zeroinitializer
+  %m = mul <2 x i64> %x, %y
+  %s = select <2 x i1> %c, <2 x i64> %m, <2 x i64> zeroinitializer
+  %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %s)
+  %r = add i64 %z, %a
+  ret i64 %r
+}
+
+declare i16 @llvm.experimental.vector.reduce.add.v16i16(<16 x i16>)
+declare i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16>)
+declare i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32>)
+declare i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32>)
+declare i32 @llvm.experimental.vector.reduce.add.v8i32(<8 x i32>)
+declare i64 @llvm.experimental.vector.reduce.add.v16i64(<16 x i64>)
+declare i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64>)
+declare i64 @llvm.experimental.vector.reduce.add.v4i64(<4 x i64>)
+declare i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64>)
+declare i8 @llvm.experimental.vector.reduce.add.v16i8(<16 x i8>)


        


More information about the llvm-commits mailing list