[llvm] c755157 - [ARM] Add some MVE vecreduce tests. NFC

David Green via llvm-commits llvm-commits at lists.llvm.org
Tue Jun 9 04:09:28 PDT 2020


Author: David Green
Date: 2020-06-09T12:07:19+01:00
New Revision: c755157de940b889f73ae00a632b734d4d6fc06d

URL: https://github.com/llvm/llvm-project/commit/c755157de940b889f73ae00a632b734d4d6fc06d
DIFF: https://github.com/llvm/llvm-project/commit/c755157de940b889f73ae00a632b734d4d6fc06d.diff

LOG: [ARM] Add some MVE vecreduce tests. NFC

Added: 
    llvm/test/CodeGen/Thumb2/mve-vecreduce-bit.ll
    llvm/test/CodeGen/Thumb2/mve-vecreduce-loops.ll
    llvm/test/CodeGen/Thumb2/mve-vecreduce-mul.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/Thumb2/mve-vecreduce-bit.ll b/llvm/test/CodeGen/Thumb2/mve-vecreduce-bit.ll
new file mode 100644
index 000000000000..650db38d0089
--- /dev/null
+++ b/llvm/test/CodeGen/Thumb2/mve-vecreduce-bit.ll
@@ -0,0 +1,1650 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=thumbv8.1m.main-arm-none-eabi -mattr=+mve -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK
+
+define arm_aapcs_vfpcc i32 @and_v2i32(<2 x i32> %x) {
+; CHECK-LABEL: and_v2i32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov r0, s2
+; CHECK-NEXT:    vmov r1, s0
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i32 @llvm.experimental.vector.reduce.and.v2i32(<2 x i32> %x)
+  ret i32 %z
+}
+
+define arm_aapcs_vfpcc i32 @and_v4i32(<4 x i32> %x) {
+; CHECK-LABEL: and_v4i32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov r0, s1
+; CHECK-NEXT:    vmov r1, s0
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    vmov r1, s2
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    vmov r1, s3
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i32 @llvm.experimental.vector.reduce.and.v4i32(<4 x i32> %x)
+  ret i32 %z
+}
+
+define arm_aapcs_vfpcc i32 @and_v8i32(<8 x i32> %x) {
+; CHECK-LABEL: and_v8i32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vand q0, q0, q1
+; CHECK-NEXT:    vmov r0, s1
+; CHECK-NEXT:    vmov r1, s0
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    vmov r1, s2
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    vmov r1, s3
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i32 @llvm.experimental.vector.reduce.and.v8i32(<8 x i32> %x)
+  ret i32 %z
+}
+
+define arm_aapcs_vfpcc i16 @and_v4i16(<4 x i16> %x) {
+; CHECK-LABEL: and_v4i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov r0, s1
+; CHECK-NEXT:    vmov r1, s0
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    vmov r1, s2
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    vmov r1, s3
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i16 @llvm.experimental.vector.reduce.and.v4i16(<4 x i16> %x)
+  ret i16 %z
+}
+
+define arm_aapcs_vfpcc i16 @and_v8i16(<8 x i16> %x) {
+; CHECK-LABEL: and_v8i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.u16 r0, q0[1]
+; CHECK-NEXT:    vmov.u16 r1, q0[0]
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    vmov.u16 r1, q0[2]
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    vmov.u16 r1, q0[3]
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    vmov.u16 r1, q0[4]
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    vmov.u16 r1, q0[5]
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    vmov.u16 r1, q0[6]
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    vmov.u16 r1, q0[7]
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i16 @llvm.experimental.vector.reduce.and.v8i16(<8 x i16> %x)
+  ret i16 %z
+}
+
+define arm_aapcs_vfpcc i16 @and_v16i16(<16 x i16> %x) {
+; CHECK-LABEL: and_v16i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vand q0, q0, q1
+; CHECK-NEXT:    vmov.u16 r0, q0[1]
+; CHECK-NEXT:    vmov.u16 r1, q0[0]
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    vmov.u16 r1, q0[2]
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    vmov.u16 r1, q0[3]
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    vmov.u16 r1, q0[4]
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    vmov.u16 r1, q0[5]
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    vmov.u16 r1, q0[6]
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    vmov.u16 r1, q0[7]
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i16 @llvm.experimental.vector.reduce.and.v16i16(<16 x i16> %x)
+  ret i16 %z
+}
+
+define arm_aapcs_vfpcc i8 @and_v8i8(<8 x i8> %x) {
+; CHECK-LABEL: and_v8i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.u16 r0, q0[1]
+; CHECK-NEXT:    vmov.u16 r1, q0[0]
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    vmov.u16 r1, q0[2]
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    vmov.u16 r1, q0[3]
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    vmov.u16 r1, q0[4]
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    vmov.u16 r1, q0[5]
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    vmov.u16 r1, q0[6]
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    vmov.u16 r1, q0[7]
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i8 @llvm.experimental.vector.reduce.and.v8i8(<8 x i8> %x)
+  ret i8 %z
+}
+
+define arm_aapcs_vfpcc i8 @and_v16i8(<16 x i8> %x) {
+; CHECK-LABEL: and_v16i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.u8 r0, q0[1]
+; CHECK-NEXT:    vmov.u8 r1, q0[0]
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[2]
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[3]
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[4]
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[5]
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[6]
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[7]
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[8]
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[9]
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[10]
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[11]
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[12]
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[13]
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[14]
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[15]
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i8 @llvm.experimental.vector.reduce.and.v16i8(<16 x i8> %x)
+  ret i8 %z
+}
+
+define arm_aapcs_vfpcc i8 @and_v32i8(<32 x i8> %x) {
+; CHECK-LABEL: and_v32i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vand q0, q0, q1
+; CHECK-NEXT:    vmov.u8 r0, q0[1]
+; CHECK-NEXT:    vmov.u8 r1, q0[0]
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[2]
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[3]
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[4]
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[5]
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[6]
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[7]
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[8]
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[9]
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[10]
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[11]
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[12]
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[13]
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[14]
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[15]
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i8 @llvm.experimental.vector.reduce.and.v32i8(<32 x i8> %x)
+  ret i8 %z
+}
+
+define arm_aapcs_vfpcc i64 @and_v1i64(<1 x i64> %x) {
+; CHECK-LABEL: and_v1i64:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i64 @llvm.experimental.vector.reduce.and.v1i64(<1 x i64> %x)
+  ret i64 %z
+}
+
+define arm_aapcs_vfpcc i64 @and_v2i64(<2 x i64> %x) {
+; CHECK-LABEL: and_v2i64:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov r0, s2
+; CHECK-NEXT:    vmov r1, s0
+; CHECK-NEXT:    vmov r2, s1
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    vmov r1, s3
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i64 @llvm.experimental.vector.reduce.and.v2i64(<2 x i64> %x)
+  ret i64 %z
+}
+
+define arm_aapcs_vfpcc i64 @and_v4i64(<4 x i64> %x) {
+; CHECK-LABEL: and_v4i64:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vand q0, q0, q1
+; CHECK-NEXT:    vmov r0, s2
+; CHECK-NEXT:    vmov r1, s0
+; CHECK-NEXT:    vmov r2, s1
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    vmov r1, s3
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i64 @llvm.experimental.vector.reduce.and.v4i64(<4 x i64> %x)
+  ret i64 %z
+}
+
+define arm_aapcs_vfpcc i32 @and_v2i32_acc(<2 x i32> %x, i32 %y) {
+; CHECK-LABEL: and_v2i32_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov r1, s2
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i32 @llvm.experimental.vector.reduce.and.v2i32(<2 x i32> %x)
+  %r = and i32 %y, %z
+  ret i32 %r
+}
+
+define arm_aapcs_vfpcc i32 @and_v4i32_acc(<4 x i32> %x, i32 %y) {
+; CHECK-LABEL: and_v4i32_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov r1, s1
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    vmov r2, s2
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    vmov r2, s3
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i32 @llvm.experimental.vector.reduce.and.v4i32(<4 x i32> %x)
+  %r = and i32 %y, %z
+  ret i32 %r
+}
+
+define arm_aapcs_vfpcc i32 @and_v8i32_acc(<8 x i32> %x, i32 %y) {
+; CHECK-LABEL: and_v8i32_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vand q0, q0, q1
+; CHECK-NEXT:    vmov r1, s1
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    vmov r2, s2
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    vmov r2, s3
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i32 @llvm.experimental.vector.reduce.and.v8i32(<8 x i32> %x)
+  %r = and i32 %y, %z
+  ret i32 %r
+}
+
+define arm_aapcs_vfpcc i16 @and_v4i16_acc(<4 x i16> %x, i16 %y) {
+; CHECK-LABEL: and_v4i16_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov r1, s1
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    vmov r2, s2
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    vmov r2, s3
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i16 @llvm.experimental.vector.reduce.and.v4i16(<4 x i16> %x)
+  %r = and i16 %y, %z
+  ret i16 %r
+}
+
+define arm_aapcs_vfpcc i16 @and_v8i16_acc(<8 x i16> %x, i16 %y) {
+; CHECK-LABEL: and_v8i16_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.u16 r1, q0[1]
+; CHECK-NEXT:    vmov.u16 r2, q0[0]
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    vmov.u16 r2, q0[2]
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    vmov.u16 r2, q0[3]
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    vmov.u16 r2, q0[4]
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    vmov.u16 r2, q0[5]
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    vmov.u16 r2, q0[6]
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    vmov.u16 r2, q0[7]
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i16 @llvm.experimental.vector.reduce.and.v8i16(<8 x i16> %x)
+  %r = and i16 %y, %z
+  ret i16 %r
+}
+
+define arm_aapcs_vfpcc i16 @and_v16i16_acc(<16 x i16> %x, i16 %y) {
+; CHECK-LABEL: and_v16i16_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vand q0, q0, q1
+; CHECK-NEXT:    vmov.u16 r1, q0[1]
+; CHECK-NEXT:    vmov.u16 r2, q0[0]
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    vmov.u16 r2, q0[2]
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    vmov.u16 r2, q0[3]
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    vmov.u16 r2, q0[4]
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    vmov.u16 r2, q0[5]
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    vmov.u16 r2, q0[6]
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    vmov.u16 r2, q0[7]
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i16 @llvm.experimental.vector.reduce.and.v16i16(<16 x i16> %x)
+  %r = and i16 %y, %z
+  ret i16 %r
+}
+
+define arm_aapcs_vfpcc i8 @and_v8i8_acc(<8 x i8> %x, i8 %y) {
+; CHECK-LABEL: and_v8i8_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.u16 r1, q0[1]
+; CHECK-NEXT:    vmov.u16 r2, q0[0]
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    vmov.u16 r2, q0[2]
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    vmov.u16 r2, q0[3]
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    vmov.u16 r2, q0[4]
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    vmov.u16 r2, q0[5]
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    vmov.u16 r2, q0[6]
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    vmov.u16 r2, q0[7]
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i8 @llvm.experimental.vector.reduce.and.v8i8(<8 x i8> %x)
+  %r = and i8 %y, %z
+  ret i8 %r
+}
+
+define arm_aapcs_vfpcc i8 @and_v16i8_acc(<16 x i8> %x, i8 %y) {
+; CHECK-LABEL: and_v16i8_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.u8 r1, q0[1]
+; CHECK-NEXT:    vmov.u8 r2, q0[0]
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[2]
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[3]
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[4]
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[5]
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[6]
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[7]
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[8]
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[9]
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[10]
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[11]
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[12]
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[13]
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[14]
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[15]
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i8 @llvm.experimental.vector.reduce.and.v16i8(<16 x i8> %x)
+  %r = and i8 %y, %z
+  ret i8 %r
+}
+
+define arm_aapcs_vfpcc i8 @and_v32i8_acc(<32 x i8> %x, i8 %y) {
+; CHECK-LABEL: and_v32i8_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vand q0, q0, q1
+; CHECK-NEXT:    vmov.u8 r1, q0[1]
+; CHECK-NEXT:    vmov.u8 r2, q0[0]
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[2]
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[3]
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[4]
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[5]
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[6]
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[7]
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[8]
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[9]
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[10]
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[11]
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[12]
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[13]
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[14]
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[15]
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i8 @llvm.experimental.vector.reduce.and.v32i8(<32 x i8> %x)
+  %r = and i8 %y, %z
+  ret i8 %r
+}
+
+define arm_aapcs_vfpcc i64 @and_v1i64_acc(<1 x i64> %x, i64 %y) {
+; CHECK-LABEL: and_v1i64_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    ands r0, r2
+; CHECK-NEXT:    ands r1, r3
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i64 @llvm.experimental.vector.reduce.and.v1i64(<1 x i64> %x)
+  %r = and i64 %y, %z
+  ret i64 %r
+}
+
+define arm_aapcs_vfpcc i64 @and_v2i64_acc(<2 x i64> %x, i64 %y) {
+; CHECK-LABEL: and_v2i64_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov r2, s2
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    ands r2, r3
+; CHECK-NEXT:    vmov r3, s1
+; CHECK-NEXT:    ands r0, r2
+; CHECK-NEXT:    vmov r2, s3
+; CHECK-NEXT:    ands r2, r3
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i64 @llvm.experimental.vector.reduce.and.v2i64(<2 x i64> %x)
+  %r = and i64 %y, %z
+  ret i64 %r
+}
+
+define arm_aapcs_vfpcc i64 @and_v4i64_acc(<4 x i64> %x, i64 %y) {
+; CHECK-LABEL: and_v4i64_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vand q0, q0, q1
+; CHECK-NEXT:    vmov r2, s2
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    ands r2, r3
+; CHECK-NEXT:    vmov r3, s1
+; CHECK-NEXT:    ands r0, r2
+; CHECK-NEXT:    vmov r2, s3
+; CHECK-NEXT:    ands r2, r3
+; CHECK-NEXT:    ands r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i64 @llvm.experimental.vector.reduce.and.v4i64(<4 x i64> %x)
+  %r = and i64 %y, %z
+  ret i64 %r
+}
+
+define arm_aapcs_vfpcc i32 @or_v2i32(<2 x i32> %x) {
+; CHECK-LABEL: or_v2i32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov r0, s2
+; CHECK-NEXT:    vmov r1, s0
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i32 @llvm.experimental.vector.reduce.or.v2i32(<2 x i32> %x)
+  ret i32 %z
+}
+
+define arm_aapcs_vfpcc i32 @or_v4i32(<4 x i32> %x) {
+; CHECK-LABEL: or_v4i32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov r0, s1
+; CHECK-NEXT:    vmov r1, s0
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov r1, s2
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov r1, s3
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i32 @llvm.experimental.vector.reduce.or.v4i32(<4 x i32> %x)
+  ret i32 %z
+}
+
+define arm_aapcs_vfpcc i32 @or_v8i32(<8 x i32> %x) {
+; CHECK-LABEL: or_v8i32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vorr q0, q0, q1
+; CHECK-NEXT:    vmov r0, s1
+; CHECK-NEXT:    vmov r1, s0
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov r1, s2
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov r1, s3
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i32 @llvm.experimental.vector.reduce.or.v8i32(<8 x i32> %x)
+  ret i32 %z
+}
+
+define arm_aapcs_vfpcc i16 @or_v4i16(<4 x i16> %x) {
+; CHECK-LABEL: or_v4i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov r0, s1
+; CHECK-NEXT:    vmov r1, s0
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov r1, s2
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov r1, s3
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i16 @llvm.experimental.vector.reduce.or.v4i16(<4 x i16> %x)
+  ret i16 %z
+}
+
+define arm_aapcs_vfpcc i16 @or_v8i16(<8 x i16> %x) {
+; CHECK-LABEL: or_v8i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.u16 r0, q0[1]
+; CHECK-NEXT:    vmov.u16 r1, q0[0]
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov.u16 r1, q0[2]
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov.u16 r1, q0[3]
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov.u16 r1, q0[4]
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov.u16 r1, q0[5]
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov.u16 r1, q0[6]
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov.u16 r1, q0[7]
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i16 @llvm.experimental.vector.reduce.or.v8i16(<8 x i16> %x)
+  ret i16 %z
+}
+
+define arm_aapcs_vfpcc i16 @or_v16i16(<16 x i16> %x) {
+; CHECK-LABEL: or_v16i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vorr q0, q0, q1
+; CHECK-NEXT:    vmov.u16 r0, q0[1]
+; CHECK-NEXT:    vmov.u16 r1, q0[0]
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov.u16 r1, q0[2]
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov.u16 r1, q0[3]
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov.u16 r1, q0[4]
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov.u16 r1, q0[5]
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov.u16 r1, q0[6]
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov.u16 r1, q0[7]
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i16 @llvm.experimental.vector.reduce.or.v16i16(<16 x i16> %x)
+  ret i16 %z
+}
+
+define arm_aapcs_vfpcc i8 @or_v8i8(<8 x i8> %x) {
+; CHECK-LABEL: or_v8i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.u16 r0, q0[1]
+; CHECK-NEXT:    vmov.u16 r1, q0[0]
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov.u16 r1, q0[2]
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov.u16 r1, q0[3]
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov.u16 r1, q0[4]
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov.u16 r1, q0[5]
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov.u16 r1, q0[6]
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov.u16 r1, q0[7]
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i8 @llvm.experimental.vector.reduce.or.v8i8(<8 x i8> %x)
+  ret i8 %z
+}
+
+define arm_aapcs_vfpcc i8 @or_v16i8(<16 x i8> %x) {
+; CHECK-LABEL: or_v16i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.u8 r0, q0[1]
+; CHECK-NEXT:    vmov.u8 r1, q0[0]
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[2]
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[3]
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[4]
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[5]
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[6]
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[7]
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[8]
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[9]
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[10]
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[11]
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[12]
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[13]
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[14]
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[15]
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i8 @llvm.experimental.vector.reduce.or.v16i8(<16 x i8> %x)
+  ret i8 %z
+}
+
+define arm_aapcs_vfpcc i8 @or_v32i8(<32 x i8> %x) {
+; CHECK-LABEL: or_v32i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vorr q0, q0, q1
+; CHECK-NEXT:    vmov.u8 r0, q0[1]
+; CHECK-NEXT:    vmov.u8 r1, q0[0]
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[2]
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[3]
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[4]
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[5]
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[6]
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[7]
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[8]
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[9]
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[10]
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[11]
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[12]
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[13]
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[14]
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[15]
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i8 @llvm.experimental.vector.reduce.or.v32i8(<32 x i8> %x)
+  ret i8 %z
+}
+
+define arm_aapcs_vfpcc i64 @or_v1i64(<1 x i64> %x) {
+; CHECK-LABEL: or_v1i64:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i64 @llvm.experimental.vector.reduce.or.v1i64(<1 x i64> %x)
+  ret i64 %z
+}
+
+define arm_aapcs_vfpcc i64 @or_v2i64(<2 x i64> %x) {
+; CHECK-LABEL: or_v2i64:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov r0, s2
+; CHECK-NEXT:    vmov r1, s0
+; CHECK-NEXT:    vmov r2, s1
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov r1, s3
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i64 @llvm.experimental.vector.reduce.or.v2i64(<2 x i64> %x)
+  ret i64 %z
+}
+
+define arm_aapcs_vfpcc i64 @or_v4i64(<4 x i64> %x) {
+; CHECK-LABEL: or_v4i64:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vorr q0, q0, q1
+; CHECK-NEXT:    vmov r0, s2
+; CHECK-NEXT:    vmov r1, s0
+; CHECK-NEXT:    vmov r2, s1
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    vmov r1, s3
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i64 @llvm.experimental.vector.reduce.or.v4i64(<4 x i64> %x)
+  ret i64 %z
+}
+
+define arm_aapcs_vfpcc i32 @or_v2i32_acc(<2 x i32> %x, i32 %y) {
+; CHECK-LABEL: or_v2i32_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov r1, s2
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i32 @llvm.experimental.vector.reduce.or.v2i32(<2 x i32> %x)
+  %r = or i32 %y, %z
+  ret i32 %r
+}
+
+define arm_aapcs_vfpcc i32 @or_v4i32_acc(<4 x i32> %x, i32 %y) {
+; CHECK-LABEL: or_v4i32_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov r1, s1
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    vmov r2, s2
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    vmov r2, s3
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i32 @llvm.experimental.vector.reduce.or.v4i32(<4 x i32> %x)
+  %r = or i32 %y, %z
+  ret i32 %r
+}
+
+define arm_aapcs_vfpcc i32 @or_v8i32_acc(<8 x i32> %x, i32 %y) {
+; CHECK-LABEL: or_v8i32_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vorr q0, q0, q1
+; CHECK-NEXT:    vmov r1, s1
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    vmov r2, s2
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    vmov r2, s3
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i32 @llvm.experimental.vector.reduce.or.v8i32(<8 x i32> %x)
+  %r = or i32 %y, %z
+  ret i32 %r
+}
+
+define arm_aapcs_vfpcc i16 @or_v4i16_acc(<4 x i16> %x, i16 %y) {
+; CHECK-LABEL: or_v4i16_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov r1, s1
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    vmov r2, s2
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    vmov r2, s3
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i16 @llvm.experimental.vector.reduce.or.v4i16(<4 x i16> %x)
+  %r = or i16 %y, %z
+  ret i16 %r
+}
+
+define arm_aapcs_vfpcc i16 @or_v8i16_acc(<8 x i16> %x, i16 %y) {
+; CHECK-LABEL: or_v8i16_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.u16 r1, q0[1]
+; CHECK-NEXT:    vmov.u16 r2, q0[0]
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    vmov.u16 r2, q0[2]
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    vmov.u16 r2, q0[3]
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    vmov.u16 r2, q0[4]
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    vmov.u16 r2, q0[5]
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    vmov.u16 r2, q0[6]
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    vmov.u16 r2, q0[7]
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i16 @llvm.experimental.vector.reduce.or.v8i16(<8 x i16> %x)
+  %r = or i16 %y, %z
+  ret i16 %r
+}
+
+define arm_aapcs_vfpcc i16 @or_v16i16_acc(<16 x i16> %x, i16 %y) {
+; CHECK-LABEL: or_v16i16_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vorr q0, q0, q1
+; CHECK-NEXT:    vmov.u16 r1, q0[1]
+; CHECK-NEXT:    vmov.u16 r2, q0[0]
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    vmov.u16 r2, q0[2]
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    vmov.u16 r2, q0[3]
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    vmov.u16 r2, q0[4]
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    vmov.u16 r2, q0[5]
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    vmov.u16 r2, q0[6]
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    vmov.u16 r2, q0[7]
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i16 @llvm.experimental.vector.reduce.or.v16i16(<16 x i16> %x)
+  %r = or i16 %y, %z
+  ret i16 %r
+}
+
+define arm_aapcs_vfpcc i8 @or_v8i8_acc(<8 x i8> %x, i8 %y) {
+; CHECK-LABEL: or_v8i8_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.u16 r1, q0[1]
+; CHECK-NEXT:    vmov.u16 r2, q0[0]
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    vmov.u16 r2, q0[2]
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    vmov.u16 r2, q0[3]
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    vmov.u16 r2, q0[4]
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    vmov.u16 r2, q0[5]
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    vmov.u16 r2, q0[6]
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    vmov.u16 r2, q0[7]
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i8 @llvm.experimental.vector.reduce.or.v8i8(<8 x i8> %x)
+  %r = or i8 %y, %z
+  ret i8 %r
+}
+
+define arm_aapcs_vfpcc i8 @or_v16i8_acc(<16 x i8> %x, i8 %y) {
+; CHECK-LABEL: or_v16i8_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.u8 r1, q0[1]
+; CHECK-NEXT:    vmov.u8 r2, q0[0]
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[2]
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[3]
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[4]
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[5]
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[6]
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[7]
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[8]
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[9]
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[10]
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[11]
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[12]
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[13]
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[14]
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[15]
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i8 @llvm.experimental.vector.reduce.or.v16i8(<16 x i8> %x)
+  %r = or i8 %y, %z
+  ret i8 %r
+}
+
+define arm_aapcs_vfpcc i8 @or_v32i8_acc(<32 x i8> %x, i8 %y) {
+; CHECK-LABEL: or_v32i8_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vorr q0, q0, q1
+; CHECK-NEXT:    vmov.u8 r1, q0[1]
+; CHECK-NEXT:    vmov.u8 r2, q0[0]
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[2]
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[3]
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[4]
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[5]
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[6]
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[7]
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[8]
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[9]
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[10]
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[11]
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[12]
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[13]
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[14]
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[15]
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i8 @llvm.experimental.vector.reduce.or.v32i8(<32 x i8> %x)
+  %r = or i8 %y, %z
+  ret i8 %r
+}
+
+define arm_aapcs_vfpcc i64 @or_v1i64_acc(<1 x i64> %x, i64 %y) {
+; CHECK-LABEL: or_v1i64_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    orrs r0, r2
+; CHECK-NEXT:    orrs r1, r3
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i64 @llvm.experimental.vector.reduce.or.v1i64(<1 x i64> %x)
+  %r = or i64 %y, %z
+  ret i64 %r
+}
+
+define arm_aapcs_vfpcc i64 @or_v2i64_acc(<2 x i64> %x, i64 %y) {
+; CHECK-LABEL: or_v2i64_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov r2, s2
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    orrs r2, r3
+; CHECK-NEXT:    vmov r3, s1
+; CHECK-NEXT:    orrs r0, r2
+; CHECK-NEXT:    vmov r2, s3
+; CHECK-NEXT:    orrs r2, r3
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i64 @llvm.experimental.vector.reduce.or.v2i64(<2 x i64> %x)
+  %r = or i64 %y, %z
+  ret i64 %r
+}
+
+define arm_aapcs_vfpcc i64 @or_v4i64_acc(<4 x i64> %x, i64 %y) {
+; CHECK-LABEL: or_v4i64_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vorr q0, q0, q1
+; CHECK-NEXT:    vmov r2, s2
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    orrs r2, r3
+; CHECK-NEXT:    vmov r3, s1
+; CHECK-NEXT:    orrs r0, r2
+; CHECK-NEXT:    vmov r2, s3
+; CHECK-NEXT:    orrs r2, r3
+; CHECK-NEXT:    orrs r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i64 @llvm.experimental.vector.reduce.or.v4i64(<4 x i64> %x)
+  %r = or i64 %y, %z
+  ret i64 %r
+}
+
+define arm_aapcs_vfpcc i32 @xor_v2i32(<2 x i32> %x) {
+; CHECK-LABEL: xor_v2i32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov r0, s2
+; CHECK-NEXT:    vmov r1, s0
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i32 @llvm.experimental.vector.reduce.xor.v2i32(<2 x i32> %x)
+  ret i32 %z
+}
+
+define arm_aapcs_vfpcc i32 @xor_v4i32(<4 x i32> %x) {
+; CHECK-LABEL: xor_v4i32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov r0, s1
+; CHECK-NEXT:    vmov r1, s0
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    vmov r1, s2
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    vmov r1, s3
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i32 @llvm.experimental.vector.reduce.xor.v4i32(<4 x i32> %x)
+  ret i32 %z
+}
+
+define arm_aapcs_vfpcc i32 @xor_v8i32(<8 x i32> %x) {
+; CHECK-LABEL: xor_v8i32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    veor q0, q0, q1
+; CHECK-NEXT:    vmov r0, s1
+; CHECK-NEXT:    vmov r1, s0
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    vmov r1, s2
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    vmov r1, s3
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i32 @llvm.experimental.vector.reduce.xor.v8i32(<8 x i32> %x)
+  ret i32 %z
+}
+
+define arm_aapcs_vfpcc i16 @xor_v4i16(<4 x i16> %x) {
+; CHECK-LABEL: xor_v4i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov r0, s1
+; CHECK-NEXT:    vmov r1, s0
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    vmov r1, s2
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    vmov r1, s3
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i16 @llvm.experimental.vector.reduce.xor.v4i16(<4 x i16> %x)
+  ret i16 %z
+}
+
+define arm_aapcs_vfpcc i16 @xor_v8i16(<8 x i16> %x) {
+; CHECK-LABEL: xor_v8i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.u16 r0, q0[1]
+; CHECK-NEXT:    vmov.u16 r1, q0[0]
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    vmov.u16 r1, q0[2]
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    vmov.u16 r1, q0[3]
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    vmov.u16 r1, q0[4]
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    vmov.u16 r1, q0[5]
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    vmov.u16 r1, q0[6]
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    vmov.u16 r1, q0[7]
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i16 @llvm.experimental.vector.reduce.xor.v8i16(<8 x i16> %x)
+  ret i16 %z
+}
+
+define arm_aapcs_vfpcc i16 @xor_v16i16(<16 x i16> %x) {
+; CHECK-LABEL: xor_v16i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    veor q0, q0, q1
+; CHECK-NEXT:    vmov.u16 r0, q0[1]
+; CHECK-NEXT:    vmov.u16 r1, q0[0]
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    vmov.u16 r1, q0[2]
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    vmov.u16 r1, q0[3]
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    vmov.u16 r1, q0[4]
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    vmov.u16 r1, q0[5]
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    vmov.u16 r1, q0[6]
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    vmov.u16 r1, q0[7]
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i16 @llvm.experimental.vector.reduce.xor.v16i16(<16 x i16> %x)
+  ret i16 %z
+}
+
+define arm_aapcs_vfpcc i8 @xor_v8i8(<8 x i8> %x) {
+; CHECK-LABEL: xor_v8i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.u16 r0, q0[1]
+; CHECK-NEXT:    vmov.u16 r1, q0[0]
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    vmov.u16 r1, q0[2]
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    vmov.u16 r1, q0[3]
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    vmov.u16 r1, q0[4]
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    vmov.u16 r1, q0[5]
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    vmov.u16 r1, q0[6]
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    vmov.u16 r1, q0[7]
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i8 @llvm.experimental.vector.reduce.xor.v8i8(<8 x i8> %x)
+  ret i8 %z
+}
+
+define arm_aapcs_vfpcc i8 @xor_v16i8(<16 x i8> %x) {
+; CHECK-LABEL: xor_v16i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.u8 r0, q0[1]
+; CHECK-NEXT:    vmov.u8 r1, q0[0]
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[2]
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[3]
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[4]
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[5]
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[6]
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[7]
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[8]
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[9]
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[10]
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[11]
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[12]
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[13]
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[14]
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[15]
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i8 @llvm.experimental.vector.reduce.xor.v16i8(<16 x i8> %x)
+  ret i8 %z
+}
+
+define arm_aapcs_vfpcc i8 @xor_v32i8(<32 x i8> %x) {
+; CHECK-LABEL: xor_v32i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    veor q0, q0, q1
+; CHECK-NEXT:    vmov.u8 r0, q0[1]
+; CHECK-NEXT:    vmov.u8 r1, q0[0]
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[2]
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[3]
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[4]
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[5]
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[6]
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[7]
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[8]
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[9]
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[10]
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[11]
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[12]
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[13]
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[14]
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    vmov.u8 r1, q0[15]
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i8 @llvm.experimental.vector.reduce.xor.v32i8(<32 x i8> %x)
+  ret i8 %z
+}
+
+define arm_aapcs_vfpcc i64 @xor_v1i64(<1 x i64> %x) {
+; CHECK-LABEL: xor_v1i64:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i64 @llvm.experimental.vector.reduce.xor.v1i64(<1 x i64> %x)
+  ret i64 %z
+}
+
+define arm_aapcs_vfpcc i64 @xor_v2i64(<2 x i64> %x) {
+; CHECK-LABEL: xor_v2i64:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov r0, s2
+; CHECK-NEXT:    vmov r1, s0
+; CHECK-NEXT:    vmov r2, s1
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    vmov r1, s3
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i64 @llvm.experimental.vector.reduce.xor.v2i64(<2 x i64> %x)
+  ret i64 %z
+}
+
+define arm_aapcs_vfpcc i64 @xor_v4i64(<4 x i64> %x) {
+; CHECK-LABEL: xor_v4i64:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    veor q0, q0, q1
+; CHECK-NEXT:    vmov r0, s2
+; CHECK-NEXT:    vmov r1, s0
+; CHECK-NEXT:    vmov r2, s1
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    vmov r1, s3
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i64 @llvm.experimental.vector.reduce.xor.v4i64(<4 x i64> %x)
+  ret i64 %z
+}
+
+define arm_aapcs_vfpcc i32 @xor_v2i32_acc(<2 x i32> %x, i32 %y) {
+; CHECK-LABEL: xor_v2i32_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov r1, s2
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i32 @llvm.experimental.vector.reduce.xor.v2i32(<2 x i32> %x)
+  %r = xor i32 %y, %z
+  ret i32 %r
+}
+
+define arm_aapcs_vfpcc i32 @xor_v4i32_acc(<4 x i32> %x, i32 %y) {
+; CHECK-LABEL: xor_v4i32_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov r1, s1
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    vmov r2, s2
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    vmov r2, s3
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i32 @llvm.experimental.vector.reduce.xor.v4i32(<4 x i32> %x)
+  %r = xor i32 %y, %z
+  ret i32 %r
+}
+
+define arm_aapcs_vfpcc i32 @xor_v8i32_acc(<8 x i32> %x, i32 %y) {
+; CHECK-LABEL: xor_v8i32_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    veor q0, q0, q1
+; CHECK-NEXT:    vmov r1, s1
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    vmov r2, s2
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    vmov r2, s3
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i32 @llvm.experimental.vector.reduce.xor.v8i32(<8 x i32> %x)
+  %r = xor i32 %y, %z
+  ret i32 %r
+}
+
+define arm_aapcs_vfpcc i16 @xor_v4i16_acc(<4 x i16> %x, i16 %y) {
+; CHECK-LABEL: xor_v4i16_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov r1, s1
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    vmov r2, s2
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    vmov r2, s3
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i16 @llvm.experimental.vector.reduce.xor.v4i16(<4 x i16> %x)
+  %r = xor i16 %y, %z
+  ret i16 %r
+}
+
+define arm_aapcs_vfpcc i16 @xor_v8i16_acc(<8 x i16> %x, i16 %y) {
+; CHECK-LABEL: xor_v8i16_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.u16 r1, q0[1]
+; CHECK-NEXT:    vmov.u16 r2, q0[0]
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    vmov.u16 r2, q0[2]
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    vmov.u16 r2, q0[3]
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    vmov.u16 r2, q0[4]
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    vmov.u16 r2, q0[5]
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    vmov.u16 r2, q0[6]
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    vmov.u16 r2, q0[7]
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i16 @llvm.experimental.vector.reduce.xor.v8i16(<8 x i16> %x)
+  %r = xor i16 %y, %z
+  ret i16 %r
+}
+
+define arm_aapcs_vfpcc i16 @xor_v16i16_acc(<16 x i16> %x, i16 %y) {
+; CHECK-LABEL: xor_v16i16_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    veor q0, q0, q1
+; CHECK-NEXT:    vmov.u16 r1, q0[1]
+; CHECK-NEXT:    vmov.u16 r2, q0[0]
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    vmov.u16 r2, q0[2]
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    vmov.u16 r2, q0[3]
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    vmov.u16 r2, q0[4]
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    vmov.u16 r2, q0[5]
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    vmov.u16 r2, q0[6]
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    vmov.u16 r2, q0[7]
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i16 @llvm.experimental.vector.reduce.xor.v16i16(<16 x i16> %x)
+  %r = xor i16 %y, %z
+  ret i16 %r
+}
+
+define arm_aapcs_vfpcc i8 @xor_v8i8_acc(<8 x i8> %x, i8 %y) {
+; CHECK-LABEL: xor_v8i8_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.u16 r1, q0[1]
+; CHECK-NEXT:    vmov.u16 r2, q0[0]
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    vmov.u16 r2, q0[2]
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    vmov.u16 r2, q0[3]
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    vmov.u16 r2, q0[4]
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    vmov.u16 r2, q0[5]
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    vmov.u16 r2, q0[6]
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    vmov.u16 r2, q0[7]
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i8 @llvm.experimental.vector.reduce.xor.v8i8(<8 x i8> %x)
+  %r = xor i8 %y, %z
+  ret i8 %r
+}
+
+define arm_aapcs_vfpcc i8 @xor_v16i8_acc(<16 x i8> %x, i8 %y) {
+; CHECK-LABEL: xor_v16i8_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.u8 r1, q0[1]
+; CHECK-NEXT:    vmov.u8 r2, q0[0]
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[2]
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[3]
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[4]
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[5]
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[6]
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[7]
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[8]
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[9]
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[10]
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[11]
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[12]
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[13]
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[14]
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[15]
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i8 @llvm.experimental.vector.reduce.xor.v16i8(<16 x i8> %x)
+  %r = xor i8 %y, %z
+  ret i8 %r
+}
+
+define arm_aapcs_vfpcc i8 @xor_v32i8_acc(<32 x i8> %x, i8 %y) {
+; CHECK-LABEL: xor_v32i8_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    veor q0, q0, q1
+; CHECK-NEXT:    vmov.u8 r1, q0[1]
+; CHECK-NEXT:    vmov.u8 r2, q0[0]
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[2]
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[3]
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[4]
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[5]
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[6]
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[7]
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[8]
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[9]
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[10]
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[11]
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[12]
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[13]
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[14]
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    vmov.u8 r2, q0[15]
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    eors r0, r1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i8 @llvm.experimental.vector.reduce.xor.v32i8(<32 x i8> %x)
+  %r = xor i8 %y, %z
+  ret i8 %r
+}
+
+define arm_aapcs_vfpcc i64 @xor_v1i64_acc(<1 x i64> %x, i64 %y) {
+; CHECK-LABEL: xor_v1i64_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    eors r0, r2
+; CHECK-NEXT:    eors r1, r3
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i64 @llvm.experimental.vector.reduce.xor.v1i64(<1 x i64> %x)
+  %r = xor i64 %y, %z
+  ret i64 %r
+}
+
+define arm_aapcs_vfpcc i64 @xor_v2i64_acc(<2 x i64> %x, i64 %y) {
+; CHECK-LABEL: xor_v2i64_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov r2, s2
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    eors r2, r3
+; CHECK-NEXT:    vmov r3, s1
+; CHECK-NEXT:    eors r0, r2
+; CHECK-NEXT:    vmov r2, s3
+; CHECK-NEXT:    eors r2, r3
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i64 @llvm.experimental.vector.reduce.xor.v2i64(<2 x i64> %x)
+  %r = xor i64 %y, %z
+  ret i64 %r
+}
+
+define arm_aapcs_vfpcc i64 @xor_v4i64_acc(<4 x i64> %x, i64 %y) {
+; CHECK-LABEL: xor_v4i64_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    veor q0, q0, q1
+; CHECK-NEXT:    vmov r2, s2
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    eors r2, r3
+; CHECK-NEXT:    vmov r3, s1
+; CHECK-NEXT:    eors r0, r2
+; CHECK-NEXT:    vmov r2, s3
+; CHECK-NEXT:    eors r2, r3
+; CHECK-NEXT:    eors r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i64 @llvm.experimental.vector.reduce.xor.v4i64(<4 x i64> %x)
+  %r = xor i64 %y, %z
+  ret i64 %r
+}
+
+declare i16 @llvm.experimental.vector.reduce.and.v16i16(<16 x i16>)
+declare i16 @llvm.experimental.vector.reduce.and.v4i16(<4 x i16>)
+declare i16 @llvm.experimental.vector.reduce.and.v8i16(<8 x i16>)
+declare i16 @llvm.experimental.vector.reduce.or.v16i16(<16 x i16>)
+declare i16 @llvm.experimental.vector.reduce.or.v4i16(<4 x i16>)
+declare i16 @llvm.experimental.vector.reduce.or.v8i16(<8 x i16>)
+declare i16 @llvm.experimental.vector.reduce.xor.v16i16(<16 x i16>)
+declare i16 @llvm.experimental.vector.reduce.xor.v4i16(<4 x i16>)
+declare i16 @llvm.experimental.vector.reduce.xor.v8i16(<8 x i16>)
+declare i32 @llvm.experimental.vector.reduce.and.v2i32(<2 x i32>)
+declare i32 @llvm.experimental.vector.reduce.and.v4i32(<4 x i32>)
+declare i32 @llvm.experimental.vector.reduce.and.v8i32(<8 x i32>)
+declare i32 @llvm.experimental.vector.reduce.or.v2i32(<2 x i32>)
+declare i32 @llvm.experimental.vector.reduce.or.v4i32(<4 x i32>)
+declare i32 @llvm.experimental.vector.reduce.or.v8i32(<8 x i32>)
+declare i32 @llvm.experimental.vector.reduce.xor.v2i32(<2 x i32>)
+declare i32 @llvm.experimental.vector.reduce.xor.v4i32(<4 x i32>)
+declare i32 @llvm.experimental.vector.reduce.xor.v8i32(<8 x i32>)
+declare i64 @llvm.experimental.vector.reduce.and.v1i64(<1 x i64>)
+declare i64 @llvm.experimental.vector.reduce.and.v2i64(<2 x i64>)
+declare i64 @llvm.experimental.vector.reduce.and.v4i64(<4 x i64>)
+declare i64 @llvm.experimental.vector.reduce.or.v1i64(<1 x i64>)
+declare i64 @llvm.experimental.vector.reduce.or.v2i64(<2 x i64>)
+declare i64 @llvm.experimental.vector.reduce.or.v4i64(<4 x i64>)
+declare i64 @llvm.experimental.vector.reduce.xor.v1i64(<1 x i64>)
+declare i64 @llvm.experimental.vector.reduce.xor.v2i64(<2 x i64>)
+declare i64 @llvm.experimental.vector.reduce.xor.v4i64(<4 x i64>)
+declare i8 @llvm.experimental.vector.reduce.and.v16i8(<16 x i8>)
+declare i8 @llvm.experimental.vector.reduce.and.v32i8(<32 x i8>)
+declare i8 @llvm.experimental.vector.reduce.and.v8i8(<8 x i8>)
+declare i8 @llvm.experimental.vector.reduce.or.v16i8(<16 x i8>)
+declare i8 @llvm.experimental.vector.reduce.or.v32i8(<32 x i8>)
+declare i8 @llvm.experimental.vector.reduce.or.v8i8(<8 x i8>)
+declare i8 @llvm.experimental.vector.reduce.xor.v16i8(<16 x i8>)
+declare i8 @llvm.experimental.vector.reduce.xor.v32i8(<32 x i8>)
+declare i8 @llvm.experimental.vector.reduce.xor.v8i8(<8 x i8>)

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vecreduce-loops.ll b/llvm/test/CodeGen/Thumb2/mve-vecreduce-loops.ll
new file mode 100644
index 000000000000..c817f73ff817
--- /dev/null
+++ b/llvm/test/CodeGen/Thumb2/mve-vecreduce-loops.ll
@@ -0,0 +1,1723 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -verify-machineinstrs %s -o - | FileCheck %s
+
+define i32 @add_i32(i32* nocapture readonly %x, i32 %n) {
+; CHECK-LABEL: add_i32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    cmp r1, #1
+; CHECK-NEXT:    blt .LBB0_3
+; CHECK-NEXT:  @ %bb.1: @ %for.body.preheader
+; CHECK-NEXT:    mov r12, r0
+; CHECK-NEXT:    cmp r1, #4
+; CHECK-NEXT:    bhs .LBB0_4
+; CHECK-NEXT:  @ %bb.2:
+; CHECK-NEXT:    movs r3, #0
+; CHECK-NEXT:    movs r0, #0
+; CHECK-NEXT:    b .LBB0_7
+; CHECK-NEXT:  .LBB0_3:
+; CHECK-NEXT:    movs r0, #0
+; CHECK-NEXT:    b .LBB0_9
+; CHECK-NEXT:  .LBB0_4: @ %vector.ph
+; CHECK-NEXT:    bic r3, r1, #3
+; CHECK-NEXT:    movs r2, #1
+; CHECK-NEXT:    subs r0, r3, #4
+; CHECK-NEXT:    add.w lr, r2, r0, lsr #2
+; CHECK-NEXT:    movs r0, #0
+; CHECK-NEXT:    mov r2, r12
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB0_5: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vldrw.u32 q0, [r2], #16
+; CHECK-NEXT:    vaddva.u32 r0, q0
+; CHECK-NEXT:    le lr, .LBB0_5
+; CHECK-NEXT:  @ %bb.6: @ %middle.block
+; CHECK-NEXT:    cmp r3, r1
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    popeq {r7, pc}
+; CHECK-NEXT:  .LBB0_7: @ %for.body.preheader1
+; CHECK-NEXT:    sub.w lr, r1, r3
+; CHECK-NEXT:    add.w r1, r12, r3, lsl #2
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB0_8: @ %for.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    ldr r2, [r1], #4
+; CHECK-NEXT:    add r0, r2
+; CHECK-NEXT:    le lr, .LBB0_8
+; CHECK-NEXT:  .LBB0_9: @ %for.cond.cleanup
+; CHECK-NEXT:    pop {r7, pc}
+entry:
+  %cmp6 = icmp sgt i32 %n, 0
+  br i1 %cmp6, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:                               ; preds = %entry
+  %min.iters.check = icmp ult i32 %n, 4
+  br i1 %min.iters.check, label %for.body.preheader1, label %vector.ph
+
+vector.ph:                                        ; preds = %for.body.preheader
+  %n.vec = and i32 %n, -4
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %vector.ph
+  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+  %vec.phi = phi i32 [ 0, %vector.ph ], [ %3, %vector.body ]
+  %0 = getelementptr inbounds i32, i32* %x, i32 %index
+  %1 = bitcast i32* %0 to <4 x i32>*
+  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
+  %2 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %wide.load)
+  %3 = add i32 %2, %vec.phi
+  %index.next = add i32 %index, 4
+  %4 = icmp eq i32 %index.next, %n.vec
+  br i1 %4, label %middle.block, label %vector.body
+
+middle.block:                                     ; preds = %vector.body
+  %cmp.n = icmp eq i32 %n.vec, %n
+  br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader1
+
+for.body.preheader1:                              ; preds = %middle.block, %for.body.preheader
+  %i.08.ph = phi i32 [ 0, %for.body.preheader ], [ %n.vec, %middle.block ]
+  %r.07.ph = phi i32 [ 0, %for.body.preheader ], [ %3, %middle.block ]
+  br label %for.body
+
+for.body:                                         ; preds = %for.body.preheader1, %for.body
+  %i.08 = phi i32 [ %inc, %for.body ], [ %i.08.ph, %for.body.preheader1 ]
+  %r.07 = phi i32 [ %add, %for.body ], [ %r.07.ph, %for.body.preheader1 ]
+  %arrayidx = getelementptr inbounds i32, i32* %x, i32 %i.08
+  %5 = load i32, i32* %arrayidx, align 4
+  %add = add nsw i32 %5, %r.07
+  %inc = add nuw nsw i32 %i.08, 1
+  %exitcond = icmp eq i32 %inc, %n
+  br i1 %exitcond, label %for.cond.cleanup, label %for.body
+
+for.cond.cleanup:                                 ; preds = %for.body, %middle.block, %entry
+  %r.0.lcssa = phi i32 [ 0, %entry ], [ %3, %middle.block ], [ %add, %for.body ]
+  ret i32 %r.0.lcssa
+}
+
+define i32 @mul_i32(i32* nocapture readonly %x, i32 %n) {
+; CHECK-LABEL: mul_i32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    movs r2, #1
+; CHECK-NEXT:    cmp r1, #1
+; CHECK-NEXT:    blt .LBB1_8
+; CHECK-NEXT:  @ %bb.1: @ %for.body.preheader
+; CHECK-NEXT:    cmp r1, #4
+; CHECK-NEXT:    bhs .LBB1_3
+; CHECK-NEXT:  @ %bb.2:
+; CHECK-NEXT:    mov.w r12, #0
+; CHECK-NEXT:    b .LBB1_6
+; CHECK-NEXT:  .LBB1_3: @ %vector.ph
+; CHECK-NEXT:    bic r12, r1, #3
+; CHECK-NEXT:    vmov.i32 q0, #0x1
+; CHECK-NEXT:    sub.w r3, r12, #4
+; CHECK-NEXT:    add.w lr, r2, r3, lsr #2
+; CHECK-NEXT:    mov r2, r0
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB1_4: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vldrw.u32 q1, [r2], #16
+; CHECK-NEXT:    vmul.i32 q0, q1, q0
+; CHECK-NEXT:    le lr, .LBB1_4
+; CHECK-NEXT:  @ %bb.5: @ %middle.block
+; CHECK-NEXT:    vmov r2, s1
+; CHECK-NEXT:    cmp r12, r1
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    mul r2, r3, r2
+; CHECK-NEXT:    vmov r3, s2
+; CHECK-NEXT:    mul r2, r3, r2
+; CHECK-NEXT:    vmov r3, s3
+; CHECK-NEXT:    mul r2, r3, r2
+; CHECK-NEXT:    beq .LBB1_8
+; CHECK-NEXT:  .LBB1_6: @ %for.body.preheader1
+; CHECK-NEXT:    sub.w lr, r1, r12
+; CHECK-NEXT:    add.w r0, r0, r12, lsl #2
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB1_7: @ %for.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    ldr r1, [r0], #4
+; CHECK-NEXT:    muls r2, r1, r2
+; CHECK-NEXT:    le lr, .LBB1_7
+; CHECK-NEXT:  .LBB1_8: @ %for.cond.cleanup
+; CHECK-NEXT:    mov r0, r2
+; CHECK-NEXT:    pop {r7, pc}
+entry:
+  %cmp6 = icmp sgt i32 %n, 0
+  br i1 %cmp6, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:                               ; preds = %entry
+  %min.iters.check = icmp ult i32 %n, 4
+  br i1 %min.iters.check, label %for.body.preheader1, label %vector.ph
+
+vector.ph:                                        ; preds = %for.body.preheader
+  %n.vec = and i32 %n, -4
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %vector.ph
+  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+  %vec.phi = phi <4 x i32> [ <i32 1, i32 1, i32 1, i32 1>, %vector.ph ], [ %2, %vector.body ]
+  %0 = getelementptr inbounds i32, i32* %x, i32 %index
+  %1 = bitcast i32* %0 to <4 x i32>*
+  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
+  %2 = mul <4 x i32> %wide.load, %vec.phi
+  %index.next = add i32 %index, 4
+  %3 = icmp eq i32 %index.next, %n.vec
+  br i1 %3, label %middle.block, label %vector.body
+
+middle.block:                                     ; preds = %vector.body
+  %4 = call i32 @llvm.experimental.vector.reduce.mul.v4i32(<4 x i32> %2)
+  %cmp.n = icmp eq i32 %n.vec, %n
+  br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader1
+
+for.body.preheader1:                              ; preds = %middle.block, %for.body.preheader
+  %i.08.ph = phi i32 [ 0, %for.body.preheader ], [ %n.vec, %middle.block ]
+  %r.07.ph = phi i32 [ 1, %for.body.preheader ], [ %4, %middle.block ]
+  br label %for.body
+
+for.body:                                         ; preds = %for.body.preheader1, %for.body
+  %i.08 = phi i32 [ %inc, %for.body ], [ %i.08.ph, %for.body.preheader1 ]
+  %r.07 = phi i32 [ %add, %for.body ], [ %r.07.ph, %for.body.preheader1 ]
+  %arrayidx = getelementptr inbounds i32, i32* %x, i32 %i.08
+  %5 = load i32, i32* %arrayidx, align 4
+  %add = mul nsw i32 %5, %r.07
+  %inc = add nuw nsw i32 %i.08, 1
+  %exitcond = icmp eq i32 %inc, %n
+  br i1 %exitcond, label %for.cond.cleanup, label %for.body
+
+for.cond.cleanup:                                 ; preds = %for.body, %middle.block, %entry
+  %r.0.lcssa = phi i32 [ 1, %entry ], [ %4, %middle.block ], [ %add, %for.body ]
+  ret i32 %r.0.lcssa
+}
+
+define i32 @and_i32(i32* nocapture readonly %x, i32 %n) {
+; CHECK-LABEL: and_i32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    cmp r1, #1
+; CHECK-NEXT:    blt .LBB2_3
+; CHECK-NEXT:  @ %bb.1: @ %for.body.preheader
+; CHECK-NEXT:    cmp r1, #4
+; CHECK-NEXT:    bhs .LBB2_4
+; CHECK-NEXT:  @ %bb.2:
+; CHECK-NEXT:    mov.w r2, #-1
+; CHECK-NEXT:    movs r3, #0
+; CHECK-NEXT:    b .LBB2_7
+; CHECK-NEXT:  .LBB2_3:
+; CHECK-NEXT:    mov.w r2, #-1
+; CHECK-NEXT:    b .LBB2_9
+; CHECK-NEXT:  .LBB2_4: @ %vector.ph
+; CHECK-NEXT:    bic r3, r1, #3
+; CHECK-NEXT:    movs r2, #1
+; CHECK-NEXT:    sub.w r12, r3, #4
+; CHECK-NEXT:    vmov.i8 q0, #0xff
+; CHECK-NEXT:    add.w lr, r2, r12, lsr #2
+; CHECK-NEXT:    mov r2, r0
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB2_5: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vldrw.u32 q1, [r2], #16
+; CHECK-NEXT:    vand q0, q1, q0
+; CHECK-NEXT:    le lr, .LBB2_5
+; CHECK-NEXT:  @ %bb.6: @ %middle.block
+; CHECK-NEXT:    vmov r12, s1
+; CHECK-NEXT:    cmp r3, r1
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    and.w r12, r12, r2
+; CHECK-NEXT:    vmov r2, s2
+; CHECK-NEXT:    and.w r12, r12, r2
+; CHECK-NEXT:    vmov r2, s3
+; CHECK-NEXT:    and.w r2, r2, r12
+; CHECK-NEXT:    beq .LBB2_9
+; CHECK-NEXT:  .LBB2_7: @ %for.body.preheader1
+; CHECK-NEXT:    sub.w lr, r1, r3
+; CHECK-NEXT:    add.w r0, r0, r3, lsl #2
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB2_8: @ %for.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    ldr r1, [r0], #4
+; CHECK-NEXT:    ands r2, r1
+; CHECK-NEXT:    le lr, .LBB2_8
+; CHECK-NEXT:  .LBB2_9: @ %for.cond.cleanup
+; CHECK-NEXT:    mov r0, r2
+; CHECK-NEXT:    pop {r7, pc}
+entry:
+  %cmp6 = icmp sgt i32 %n, 0
+  br i1 %cmp6, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:                               ; preds = %entry
+  %min.iters.check = icmp ult i32 %n, 4
+  br i1 %min.iters.check, label %for.body.preheader1, label %vector.ph
+
+vector.ph:                                        ; preds = %for.body.preheader
+  %n.vec = and i32 %n, -4
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %vector.ph
+  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+  %vec.phi = phi <4 x i32> [ <i32 -1, i32 -1, i32 -1, i32 -1>, %vector.ph ], [ %2, %vector.body ]
+  %0 = getelementptr inbounds i32, i32* %x, i32 %index
+  %1 = bitcast i32* %0 to <4 x i32>*
+  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
+  %2 = and <4 x i32> %wide.load, %vec.phi
+  %index.next = add i32 %index, 4
+  %3 = icmp eq i32 %index.next, %n.vec
+  br i1 %3, label %middle.block, label %vector.body
+
+middle.block:                                     ; preds = %vector.body
+  %4 = call i32 @llvm.experimental.vector.reduce.and.v4i32(<4 x i32> %2)
+  %cmp.n = icmp eq i32 %n.vec, %n
+  br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader1
+
+for.body.preheader1:                              ; preds = %middle.block, %for.body.preheader
+  %i.08.ph = phi i32 [ 0, %for.body.preheader ], [ %n.vec, %middle.block ]
+  %r.07.ph = phi i32 [ -1, %for.body.preheader ], [ %4, %middle.block ]
+  br label %for.body
+
+for.body:                                         ; preds = %for.body.preheader1, %for.body
+  %i.08 = phi i32 [ %inc, %for.body ], [ %i.08.ph, %for.body.preheader1 ]
+  %r.07 = phi i32 [ %add, %for.body ], [ %r.07.ph, %for.body.preheader1 ]
+  %arrayidx = getelementptr inbounds i32, i32* %x, i32 %i.08
+  %5 = load i32, i32* %arrayidx, align 4
+  %add = and i32 %5, %r.07
+  %inc = add nuw nsw i32 %i.08, 1
+  %exitcond = icmp eq i32 %inc, %n
+  br i1 %exitcond, label %for.cond.cleanup, label %for.body
+
+for.cond.cleanup:                                 ; preds = %for.body, %middle.block, %entry
+  %r.0.lcssa = phi i32 [ -1, %entry ], [ %4, %middle.block ], [ %add, %for.body ]
+  ret i32 %r.0.lcssa
+}
+
+define i32 @or_i32(i32* nocapture readonly %x, i32 %n) {
+; CHECK-LABEL: or_i32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    cmp r1, #1
+; CHECK-NEXT:    blt .LBB3_3
+; CHECK-NEXT:  @ %bb.1: @ %for.body.preheader
+; CHECK-NEXT:    cmp r1, #4
+; CHECK-NEXT:    bhs .LBB3_4
+; CHECK-NEXT:  @ %bb.2:
+; CHECK-NEXT:    movs r3, #0
+; CHECK-NEXT:    movs r2, #0
+; CHECK-NEXT:    b .LBB3_7
+; CHECK-NEXT:  .LBB3_3:
+; CHECK-NEXT:    movs r2, #0
+; CHECK-NEXT:    b .LBB3_9
+; CHECK-NEXT:  .LBB3_4: @ %vector.ph
+; CHECK-NEXT:    bic r3, r1, #3
+; CHECK-NEXT:    movs r2, #1
+; CHECK-NEXT:    sub.w r12, r3, #4
+; CHECK-NEXT:    vmov.i32 q0, #0x0
+; CHECK-NEXT:    add.w lr, r2, r12, lsr #2
+; CHECK-NEXT:    mov r2, r0
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB3_5: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vldrw.u32 q1, [r2], #16
+; CHECK-NEXT:    vorr q0, q1, q0
+; CHECK-NEXT:    le lr, .LBB3_5
+; CHECK-NEXT:  @ %bb.6: @ %middle.block
+; CHECK-NEXT:    vmov r12, s1
+; CHECK-NEXT:    cmp r3, r1
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    orr.w r12, r12, r2
+; CHECK-NEXT:    vmov r2, s2
+; CHECK-NEXT:    orr.w r12, r12, r2
+; CHECK-NEXT:    vmov r2, s3
+; CHECK-NEXT:    orr.w r2, r2, r12
+; CHECK-NEXT:    beq .LBB3_9
+; CHECK-NEXT:  .LBB3_7: @ %for.body.preheader1
+; CHECK-NEXT:    sub.w lr, r1, r3
+; CHECK-NEXT:    add.w r0, r0, r3, lsl #2
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB3_8: @ %for.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    ldr r1, [r0], #4
+; CHECK-NEXT:    orrs r2, r1
+; CHECK-NEXT:    le lr, .LBB3_8
+; CHECK-NEXT:  .LBB3_9: @ %for.cond.cleanup
+; CHECK-NEXT:    mov r0, r2
+; CHECK-NEXT:    pop {r7, pc}
+entry:
+  %cmp6 = icmp sgt i32 %n, 0
+  br i1 %cmp6, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:                               ; preds = %entry
+  %min.iters.check = icmp ult i32 %n, 4
+  br i1 %min.iters.check, label %for.body.preheader1, label %vector.ph
+
+vector.ph:                                        ; preds = %for.body.preheader
+  %n.vec = and i32 %n, -4
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %vector.ph
+  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+  %vec.phi = phi <4 x i32> [ zeroinitializer, %vector.ph ], [ %2, %vector.body ]
+  %0 = getelementptr inbounds i32, i32* %x, i32 %index
+  %1 = bitcast i32* %0 to <4 x i32>*
+  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
+  %2 = or <4 x i32> %wide.load, %vec.phi
+  %index.next = add i32 %index, 4
+  %3 = icmp eq i32 %index.next, %n.vec
+  br i1 %3, label %middle.block, label %vector.body
+
+middle.block:                                     ; preds = %vector.body
+  %4 = call i32 @llvm.experimental.vector.reduce.or.v4i32(<4 x i32> %2)
+  %cmp.n = icmp eq i32 %n.vec, %n
+  br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader1
+
+for.body.preheader1:                              ; preds = %middle.block, %for.body.preheader
+  %i.08.ph = phi i32 [ 0, %for.body.preheader ], [ %n.vec, %middle.block ]
+  %r.07.ph = phi i32 [ 0, %for.body.preheader ], [ %4, %middle.block ]
+  br label %for.body
+
+for.body:                                         ; preds = %for.body.preheader1, %for.body
+  %i.08 = phi i32 [ %inc, %for.body ], [ %i.08.ph, %for.body.preheader1 ]
+  %r.07 = phi i32 [ %add, %for.body ], [ %r.07.ph, %for.body.preheader1 ]
+  %arrayidx = getelementptr inbounds i32, i32* %x, i32 %i.08
+  %5 = load i32, i32* %arrayidx, align 4
+  %add = or i32 %5, %r.07
+  %inc = add nuw nsw i32 %i.08, 1
+  %exitcond = icmp eq i32 %inc, %n
+  br i1 %exitcond, label %for.cond.cleanup, label %for.body
+
+for.cond.cleanup:                                 ; preds = %for.body, %middle.block, %entry
+  %r.0.lcssa = phi i32 [ 0, %entry ], [ %4, %middle.block ], [ %add, %for.body ]
+  ret i32 %r.0.lcssa
+}
+
+define i32 @xor_i32(i32* nocapture readonly %x, i32 %n) {
+; CHECK-LABEL: xor_i32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    cmp r1, #1
+; CHECK-NEXT:    blt .LBB4_3
+; CHECK-NEXT:  @ %bb.1: @ %for.body.preheader
+; CHECK-NEXT:    cmp r1, #4
+; CHECK-NEXT:    bhs .LBB4_4
+; CHECK-NEXT:  @ %bb.2:
+; CHECK-NEXT:    movs r3, #0
+; CHECK-NEXT:    movs r2, #0
+; CHECK-NEXT:    b .LBB4_7
+; CHECK-NEXT:  .LBB4_3:
+; CHECK-NEXT:    movs r2, #0
+; CHECK-NEXT:    b .LBB4_9
+; CHECK-NEXT:  .LBB4_4: @ %vector.ph
+; CHECK-NEXT:    bic r3, r1, #3
+; CHECK-NEXT:    movs r2, #1
+; CHECK-NEXT:    sub.w r12, r3, #4
+; CHECK-NEXT:    vmov.i32 q0, #0x0
+; CHECK-NEXT:    add.w lr, r2, r12, lsr #2
+; CHECK-NEXT:    mov r2, r0
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB4_5: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vldrw.u32 q1, [r2], #16
+; CHECK-NEXT:    veor q0, q1, q0
+; CHECK-NEXT:    le lr, .LBB4_5
+; CHECK-NEXT:  @ %bb.6: @ %middle.block
+; CHECK-NEXT:    vmov r12, s1
+; CHECK-NEXT:    cmp r3, r1
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    eor.w r12, r12, r2
+; CHECK-NEXT:    vmov r2, s2
+; CHECK-NEXT:    eor.w r12, r12, r2
+; CHECK-NEXT:    vmov r2, s3
+; CHECK-NEXT:    eor.w r2, r2, r12
+; CHECK-NEXT:    beq .LBB4_9
+; CHECK-NEXT:  .LBB4_7: @ %for.body.preheader1
+; CHECK-NEXT:    sub.w lr, r1, r3
+; CHECK-NEXT:    add.w r0, r0, r3, lsl #2
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB4_8: @ %for.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    ldr r1, [r0], #4
+; CHECK-NEXT:    eors r2, r1
+; CHECK-NEXT:    le lr, .LBB4_8
+; CHECK-NEXT:  .LBB4_9: @ %for.cond.cleanup
+; CHECK-NEXT:    mov r0, r2
+; CHECK-NEXT:    pop {r7, pc}
+entry:
+  %cmp6 = icmp sgt i32 %n, 0
+  br i1 %cmp6, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:                               ; preds = %entry
+  %min.iters.check = icmp ult i32 %n, 4
+  br i1 %min.iters.check, label %for.body.preheader1, label %vector.ph
+
+vector.ph:                                        ; preds = %for.body.preheader
+  %n.vec = and i32 %n, -4
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %vector.ph
+  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+  %vec.phi = phi <4 x i32> [ zeroinitializer, %vector.ph ], [ %2, %vector.body ]
+  %0 = getelementptr inbounds i32, i32* %x, i32 %index
+  %1 = bitcast i32* %0 to <4 x i32>*
+  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
+  %2 = xor <4 x i32> %wide.load, %vec.phi
+  %index.next = add i32 %index, 4
+  %3 = icmp eq i32 %index.next, %n.vec
+  br i1 %3, label %middle.block, label %vector.body
+
+middle.block:                                     ; preds = %vector.body
+  %4 = call i32 @llvm.experimental.vector.reduce.xor.v4i32(<4 x i32> %2)
+  %cmp.n = icmp eq i32 %n.vec, %n
+  br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader1
+
+for.body.preheader1:                              ; preds = %middle.block, %for.body.preheader
+  %i.08.ph = phi i32 [ 0, %for.body.preheader ], [ %n.vec, %middle.block ]
+  %r.07.ph = phi i32 [ 0, %for.body.preheader ], [ %4, %middle.block ]
+  br label %for.body
+
+for.body:                                         ; preds = %for.body.preheader1, %for.body
+  %i.08 = phi i32 [ %inc, %for.body ], [ %i.08.ph, %for.body.preheader1 ]
+  %r.07 = phi i32 [ %add, %for.body ], [ %r.07.ph, %for.body.preheader1 ]
+  %arrayidx = getelementptr inbounds i32, i32* %x, i32 %i.08
+  %5 = load i32, i32* %arrayidx, align 4
+  %add = xor i32 %5, %r.07
+  %inc = add nuw nsw i32 %i.08, 1
+  %exitcond = icmp eq i32 %inc, %n
+  br i1 %exitcond, label %for.cond.cleanup, label %for.body
+
+for.cond.cleanup:                                 ; preds = %for.body, %middle.block, %entry
+  %r.0.lcssa = phi i32 [ 0, %entry ], [ %4, %middle.block ], [ %add, %for.body ]
+  ret i32 %r.0.lcssa
+}
+
+define float @fadd_f32(float* nocapture readonly %x, i32 %n) {
+; CHECK-LABEL: fadd_f32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    cmp r1, #1
+; CHECK-NEXT:    blt .LBB5_3
+; CHECK-NEXT:  @ %bb.1: @ %for.body.preheader
+; CHECK-NEXT:    cmp r1, #4
+; CHECK-NEXT:    bhs .LBB5_4
+; CHECK-NEXT:  @ %bb.2:
+; CHECK-NEXT:    vldr s0, .LCPI5_0
+; CHECK-NEXT:    movs r2, #0
+; CHECK-NEXT:    b .LBB5_7
+; CHECK-NEXT:  .LBB5_3:
+; CHECK-NEXT:    vldr s0, .LCPI5_0
+; CHECK-NEXT:    b .LBB5_9
+; CHECK-NEXT:  .LBB5_4: @ %vector.ph
+; CHECK-NEXT:    bic r2, r1, #3
+; CHECK-NEXT:    movs r3, #1
+; CHECK-NEXT:    sub.w r12, r2, #4
+; CHECK-NEXT:    vmov.i32 q0, #0x0
+; CHECK-NEXT:    add.w lr, r3, r12, lsr #2
+; CHECK-NEXT:    mov r3, r0
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB5_5: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vldrw.u32 q1, [r3], #16
+; CHECK-NEXT:    vadd.f32 q0, q1, q0
+; CHECK-NEXT:    le lr, .LBB5_5
+; CHECK-NEXT:  @ %bb.6: @ %middle.block
+; CHECK-NEXT:    vadd.f32 s4, s0, s1
+; CHECK-NEXT:    cmp r2, r1
+; CHECK-NEXT:    vadd.f32 s4, s4, s2
+; CHECK-NEXT:    vadd.f32 s0, s4, s3
+; CHECK-NEXT:    beq .LBB5_9
+; CHECK-NEXT:  .LBB5_7: @ %for.body.preheader1
+; CHECK-NEXT:    sub.w lr, r1, r2
+; CHECK-NEXT:    add.w r0, r0, r2, lsl #2
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB5_8: @ %for.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vldr s2, [r0]
+; CHECK-NEXT:    adds r0, #4
+; CHECK-NEXT:    vadd.f32 s0, s2, s0
+; CHECK-NEXT:    le lr, .LBB5_8
+; CHECK-NEXT:  .LBB5_9: @ %for.cond.cleanup
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    pop {r7, pc}
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.10:
+; CHECK-NEXT:  .LCPI5_0:
+; CHECK-NEXT:    .long 0x00000000 @ float 0
+entry:
+  %cmp6 = icmp sgt i32 %n, 0
+  br i1 %cmp6, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:                               ; preds = %entry
+  %min.iters.check = icmp ult i32 %n, 4
+  br i1 %min.iters.check, label %for.body.preheader1, label %vector.ph
+
+vector.ph:                                        ; preds = %for.body.preheader
+  %n.vec = and i32 %n, -4
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %vector.ph
+  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+  %vec.phi = phi <4 x float> [ zeroinitializer, %vector.ph ], [ %2, %vector.body ]
+  %0 = getelementptr inbounds float, float* %x, i32 %index
+  %1 = bitcast float* %0 to <4 x float>*
+  %wide.load = load <4 x float>, <4 x float>* %1, align 4
+  %2 = fadd fast <4 x float> %wide.load, %vec.phi
+  %index.next = add i32 %index, 4
+  %3 = icmp eq i32 %index.next, %n.vec
+  br i1 %3, label %middle.block, label %vector.body
+
+middle.block:                                     ; preds = %vector.body
+  %4 = call fast float @llvm.experimental.vector.reduce.v2.fadd.f32.v4f32(float 0.000000e+00, <4 x float> %2)
+  %cmp.n = icmp eq i32 %n.vec, %n
+  br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader1
+
+for.body.preheader1:                              ; preds = %middle.block, %for.body.preheader
+  %i.08.ph = phi i32 [ 0, %for.body.preheader ], [ %n.vec, %middle.block ]
+  %r.07.ph = phi float [ 0.000000e+00, %for.body.preheader ], [ %4, %middle.block ]
+  br label %for.body
+
+for.body:                                         ; preds = %for.body.preheader1, %for.body
+  %i.08 = phi i32 [ %inc, %for.body ], [ %i.08.ph, %for.body.preheader1 ]
+  %r.07 = phi float [ %add, %for.body ], [ %r.07.ph, %for.body.preheader1 ]
+  %arrayidx = getelementptr inbounds float, float* %x, i32 %i.08
+  %5 = load float, float* %arrayidx, align 4
+  %add = fadd fast float %5, %r.07
+  %inc = add nuw nsw i32 %i.08, 1
+  %exitcond = icmp eq i32 %inc, %n
+  br i1 %exitcond, label %for.cond.cleanup, label %for.body
+
+for.cond.cleanup:                                 ; preds = %for.body, %middle.block, %entry
+  %r.0.lcssa = phi float [ 0.000000e+00, %entry ], [ %4, %middle.block ], [ %add, %for.body ]
+  ret float %r.0.lcssa
+}
+
+define float @fmul_f32(float* nocapture readonly %x, i32 %n) {
+; CHECK-LABEL: fmul_f32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    cmp r1, #1
+; CHECK-NEXT:    blt .LBB6_3
+; CHECK-NEXT:  @ %bb.1: @ %for.body.preheader
+; CHECK-NEXT:    cmp r1, #4
+; CHECK-NEXT:    bhs .LBB6_4
+; CHECK-NEXT:  @ %bb.2:
+; CHECK-NEXT:    vmov.f32 s0, #1.000000e+00
+; CHECK-NEXT:    movs r2, #0
+; CHECK-NEXT:    b .LBB6_7
+; CHECK-NEXT:  .LBB6_3:
+; CHECK-NEXT:    vmov.f32 s0, #1.000000e+00
+; CHECK-NEXT:    b .LBB6_9
+; CHECK-NEXT:  .LBB6_4: @ %vector.ph
+; CHECK-NEXT:    bic r2, r1, #3
+; CHECK-NEXT:    movs r3, #1
+; CHECK-NEXT:    sub.w r12, r2, #4
+; CHECK-NEXT:    vmov.f32 q0, #1.000000e+00
+; CHECK-NEXT:    add.w lr, r3, r12, lsr #2
+; CHECK-NEXT:    mov r3, r0
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB6_5: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vldrw.u32 q1, [r3], #16
+; CHECK-NEXT:    vmul.f32 q0, q1, q0
+; CHECK-NEXT:    le lr, .LBB6_5
+; CHECK-NEXT:  @ %bb.6: @ %middle.block
+; CHECK-NEXT:    vmul.f32 s4, s0, s1
+; CHECK-NEXT:    cmp r2, r1
+; CHECK-NEXT:    vmul.f32 s4, s4, s2
+; CHECK-NEXT:    vmul.f32 s0, s4, s3
+; CHECK-NEXT:    beq .LBB6_9
+; CHECK-NEXT:  .LBB6_7: @ %for.body.preheader1
+; CHECK-NEXT:    sub.w lr, r1, r2
+; CHECK-NEXT:    add.w r0, r0, r2, lsl #2
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB6_8: @ %for.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vldr s2, [r0]
+; CHECK-NEXT:    adds r0, #4
+; CHECK-NEXT:    vmul.f32 s0, s2, s0
+; CHECK-NEXT:    le lr, .LBB6_8
+; CHECK-NEXT:  .LBB6_9: @ %for.cond.cleanup
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    pop {r7, pc}
+entry:
+  %cmp6 = icmp sgt i32 %n, 0
+  br i1 %cmp6, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:                               ; preds = %entry
+  %min.iters.check = icmp ult i32 %n, 4
+  br i1 %min.iters.check, label %for.body.preheader1, label %vector.ph
+
+vector.ph:                                        ; preds = %for.body.preheader
+  %n.vec = and i32 %n, -4
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %vector.ph
+  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+  %vec.phi = phi <4 x float> [ <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>, %vector.ph ], [ %2, %vector.body ]
+  %0 = getelementptr inbounds float, float* %x, i32 %index
+  %1 = bitcast float* %0 to <4 x float>*
+  %wide.load = load <4 x float>, <4 x float>* %1, align 4
+  %2 = fmul fast <4 x float> %wide.load, %vec.phi
+  %index.next = add i32 %index, 4
+  %3 = icmp eq i32 %index.next, %n.vec
+  br i1 %3, label %middle.block, label %vector.body
+
+middle.block:                                     ; preds = %vector.body
+  %4 = call fast float @llvm.experimental.vector.reduce.v2.fmul.f32.v4f32(float 1.000000e+00, <4 x float> %2)
+  %cmp.n = icmp eq i32 %n.vec, %n
+  br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader1
+
+for.body.preheader1:                              ; preds = %middle.block, %for.body.preheader
+  %i.08.ph = phi i32 [ 0, %for.body.preheader ], [ %n.vec, %middle.block ]
+  %r.07.ph = phi float [ 1.000000e+00, %for.body.preheader ], [ %4, %middle.block ]
+  br label %for.body
+
+for.body:                                         ; preds = %for.body.preheader1, %for.body
+  %i.08 = phi i32 [ %inc, %for.body ], [ %i.08.ph, %for.body.preheader1 ]
+  %r.07 = phi float [ %add, %for.body ], [ %r.07.ph, %for.body.preheader1 ]
+  %arrayidx = getelementptr inbounds float, float* %x, i32 %i.08
+  %5 = load float, float* %arrayidx, align 4
+  %add = fmul fast float %5, %r.07
+  %inc = add nuw nsw i32 %i.08, 1
+  %exitcond = icmp eq i32 %inc, %n
+  br i1 %exitcond, label %for.cond.cleanup, label %for.body
+
+for.cond.cleanup:                                 ; preds = %for.body, %middle.block, %entry
+  %r.0.lcssa = phi float [ 1.000000e+00, %entry ], [ %4, %middle.block ], [ %add, %for.body ]
+  ret float %r.0.lcssa
+}
+
+define i32 @smin_i32(i32* nocapture readonly %x, i32 %n) {
+; CHECK-LABEL: smin_i32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    cmp r1, #1
+; CHECK-NEXT:    blt .LBB7_3
+; CHECK-NEXT:  @ %bb.1: @ %for.body.preheader
+; CHECK-NEXT:    cmp r1, #4
+; CHECK-NEXT:    bhs .LBB7_4
+; CHECK-NEXT:  @ %bb.2:
+; CHECK-NEXT:    mvn r2, #-2147483648
+; CHECK-NEXT:    movs r3, #0
+; CHECK-NEXT:    b .LBB7_7
+; CHECK-NEXT:  .LBB7_3:
+; CHECK-NEXT:    mvn r2, #-2147483648
+; CHECK-NEXT:    b .LBB7_9
+; CHECK-NEXT:  .LBB7_4: @ %vector.ph
+; CHECK-NEXT:    bic r3, r1, #3
+; CHECK-NEXT:    movs r2, #1
+; CHECK-NEXT:    sub.w r12, r3, #4
+; CHECK-NEXT:    vmvn.i32 q0, #0x80000000
+; CHECK-NEXT:    add.w lr, r2, r12, lsr #2
+; CHECK-NEXT:    mov r2, r0
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB7_5: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vldrw.u32 q1, [r2], #16
+; CHECK-NEXT:    vmin.s32 q0, q0, q1
+; CHECK-NEXT:    le lr, .LBB7_5
+; CHECK-NEXT:  @ %bb.6: @ %middle.block
+; CHECK-NEXT:    mvn r2, #-2147483648
+; CHECK-NEXT:    cmp r3, r1
+; CHECK-NEXT:    vminv.s32 r2, q0
+; CHECK-NEXT:    beq .LBB7_9
+; CHECK-NEXT:  .LBB7_7: @ %for.body.preheader1
+; CHECK-NEXT:    sub.w lr, r1, r3
+; CHECK-NEXT:    add.w r0, r0, r3, lsl #2
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB7_8: @ %for.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    ldr r1, [r0], #4
+; CHECK-NEXT:    cmp r2, r1
+; CHECK-NEXT:    it ge
+; CHECK-NEXT:    movge r2, r1
+; CHECK-NEXT:    le lr, .LBB7_8
+; CHECK-NEXT:  .LBB7_9: @ %for.cond.cleanup
+; CHECK-NEXT:    mov r0, r2
+; CHECK-NEXT:    pop {r7, pc}
+entry:
+  %cmp6 = icmp sgt i32 %n, 0
+  br i1 %cmp6, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:                               ; preds = %entry
+  %min.iters.check = icmp ult i32 %n, 4
+  br i1 %min.iters.check, label %for.body.preheader1, label %vector.ph
+
+vector.ph:                                        ; preds = %for.body.preheader
+  %n.vec = and i32 %n, -4
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %vector.ph
+  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+  %vec.phi = phi <4 x i32> [ <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647>, %vector.ph ], [ %3, %vector.body ]
+  %0 = getelementptr inbounds i32, i32* %x, i32 %index
+  %1 = bitcast i32* %0 to <4 x i32>*
+  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
+  %2 = icmp slt <4 x i32> %vec.phi, %wide.load
+  %3 = select <4 x i1> %2, <4 x i32> %vec.phi, <4 x i32> %wide.load
+  %index.next = add i32 %index, 4
+  %4 = icmp eq i32 %index.next, %n.vec
+  br i1 %4, label %middle.block, label %vector.body
+
+middle.block:                                     ; preds = %vector.body
+  %5 = call i32 @llvm.experimental.vector.reduce.smin.v4i32(<4 x i32> %3)
+  %cmp.n = icmp eq i32 %n.vec, %n
+  br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader1
+
+for.body.preheader1:                              ; preds = %middle.block, %for.body.preheader
+  %i.08.ph = phi i32 [ 0, %for.body.preheader ], [ %n.vec, %middle.block ]
+  %r.07.ph = phi i32 [ 2147483647, %for.body.preheader ], [ %5, %middle.block ]
+  br label %for.body
+
+for.body:                                         ; preds = %for.body.preheader1, %for.body
+  %i.08 = phi i32 [ %inc, %for.body ], [ %i.08.ph, %for.body.preheader1 ]
+  %r.07 = phi i32 [ %add, %for.body ], [ %r.07.ph, %for.body.preheader1 ]
+  %arrayidx = getelementptr inbounds i32, i32* %x, i32 %i.08
+  %6 = load i32, i32* %arrayidx, align 4
+  %c = icmp slt i32 %r.07, %6
+  %add = select i1 %c, i32 %r.07, i32 %6
+  %inc = add nuw nsw i32 %i.08, 1
+  %exitcond = icmp eq i32 %inc, %n
+  br i1 %exitcond, label %for.cond.cleanup, label %for.body
+
+for.cond.cleanup:                                 ; preds = %for.body, %middle.block, %entry
+  %r.0.lcssa = phi i32 [ 2147483647, %entry ], [ %5, %middle.block ], [ %add, %for.body ]
+  ret i32 %r.0.lcssa
+}
+
+define i32 @smin_i32_inloop(i32* nocapture readonly %x, i32 %n) {
+; CHECK-LABEL: smin_i32_inloop:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r4, lr}
+; CHECK-NEXT:    push {r4, lr}
+; CHECK-NEXT:    cmp r1, #1
+; CHECK-NEXT:    blt .LBB8_3
+; CHECK-NEXT:  @ %bb.1: @ %for.body.preheader
+; CHECK-NEXT:    mov r12, r0
+; CHECK-NEXT:    mvn r0, #-2147483648
+; CHECK-NEXT:    cmp r1, #4
+; CHECK-NEXT:    bhs .LBB8_4
+; CHECK-NEXT:  @ %bb.2:
+; CHECK-NEXT:    movs r3, #0
+; CHECK-NEXT:    b .LBB8_7
+; CHECK-NEXT:  .LBB8_3:
+; CHECK-NEXT:    mvn r0, #-2147483648
+; CHECK-NEXT:    b .LBB8_9
+; CHECK-NEXT:  .LBB8_4: @ %vector.ph
+; CHECK-NEXT:    bic r3, r1, #3
+; CHECK-NEXT:    movs r2, #1
+; CHECK-NEXT:    sub.w lr, r3, #4
+; CHECK-NEXT:    add.w lr, r2, lr, lsr #2
+; CHECK-NEXT:    mov r2, r12
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB8_5: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vldrw.u32 q0, [r2], #16
+; CHECK-NEXT:    mvn r4, #-2147483648
+; CHECK-NEXT:    vminv.s32 r4, q0
+; CHECK-NEXT:    cmp r0, r4
+; CHECK-NEXT:    it ge
+; CHECK-NEXT:    movge r0, r4
+; CHECK-NEXT:    le lr, .LBB8_5
+; CHECK-NEXT:  @ %bb.6: @ %middle.block
+; CHECK-NEXT:    cmp r3, r1
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    popeq {r4, pc}
+; CHECK-NEXT:  .LBB8_7: @ %for.body.preheader1
+; CHECK-NEXT:    sub.w lr, r1, r3
+; CHECK-NEXT:    add.w r1, r12, r3, lsl #2
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB8_8: @ %for.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    ldr r2, [r1], #4
+; CHECK-NEXT:    cmp r0, r2
+; CHECK-NEXT:    it ge
+; CHECK-NEXT:    movge r0, r2
+; CHECK-NEXT:    le lr, .LBB8_8
+; CHECK-NEXT:  .LBB8_9: @ %for.cond.cleanup
+; CHECK-NEXT:    pop {r4, pc}
+entry:
+  %cmp6 = icmp sgt i32 %n, 0
+  br i1 %cmp6, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:                               ; preds = %entry
+  %min.iters.check = icmp ult i32 %n, 4
+  br i1 %min.iters.check, label %for.body.preheader1, label %vector.ph
+
+vector.ph:                                        ; preds = %for.body.preheader
+  %n.vec = and i32 %n, -4
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %vector.ph
+  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+  %vec.phi = phi i32 [ 2147483647, %vector.ph ], [ %3, %vector.body ]
+  %0 = getelementptr inbounds i32, i32* %x, i32 %index
+  %1 = bitcast i32* %0 to <4 x i32>*
+  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
+  %l5 = call i32 @llvm.experimental.vector.reduce.smin.v4i32(<4 x i32> %wide.load)
+  %2 = icmp slt i32 %vec.phi, %l5
+  %3 = select i1 %2, i32 %vec.phi, i32 %l5
+  %index.next = add i32 %index, 4
+  %4 = icmp eq i32 %index.next, %n.vec
+  br i1 %4, label %middle.block, label %vector.body
+
+middle.block:                                     ; preds = %vector.body
+  %5 = phi i32 [ %3, %vector.body ]
+  %cmp.n = icmp eq i32 %n.vec, %n
+  br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader1
+
+for.body.preheader1:                              ; preds = %middle.block, %for.body.preheader
+  %i.08.ph = phi i32 [ 0, %for.body.preheader ], [ %n.vec, %middle.block ]
+  %r.07.ph = phi i32 [ 2147483647, %for.body.preheader ], [ %5, %middle.block ]
+  br label %for.body
+
+for.body:                                         ; preds = %for.body.preheader1, %for.body
+  %i.08 = phi i32 [ %inc, %for.body ], [ %i.08.ph, %for.body.preheader1 ]
+  %r.07 = phi i32 [ %add, %for.body ], [ %r.07.ph, %for.body.preheader1 ]
+  %arrayidx = getelementptr inbounds i32, i32* %x, i32 %i.08
+  %6 = load i32, i32* %arrayidx, align 4
+  %c = icmp slt i32 %r.07, %6
+  %add = select i1 %c, i32 %r.07, i32 %6
+  %inc = add nuw nsw i32 %i.08, 1
+  %exitcond = icmp eq i32 %inc, %n
+  br i1 %exitcond, label %for.cond.cleanup, label %for.body
+
+for.cond.cleanup:                                 ; preds = %for.body, %middle.block, %entry
+  %r.0.lcssa = phi i32 [ 2147483647, %entry ], [ %5, %middle.block ], [ %add, %for.body ]
+  ret i32 %r.0.lcssa
+}
+
+define i32 @smax_i32(i32* nocapture readonly %x, i32 %n) {
+; CHECK-LABEL: smax_i32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    cmp r1, #1
+; CHECK-NEXT:    blt .LBB9_3
+; CHECK-NEXT:  @ %bb.1: @ %for.body.preheader
+; CHECK-NEXT:    cmp r1, #4
+; CHECK-NEXT:    bhs .LBB9_4
+; CHECK-NEXT:  @ %bb.2:
+; CHECK-NEXT:    mov.w r2, #-2147483648
+; CHECK-NEXT:    movs r3, #0
+; CHECK-NEXT:    b .LBB9_7
+; CHECK-NEXT:  .LBB9_3:
+; CHECK-NEXT:    mov.w r2, #-2147483648
+; CHECK-NEXT:    b .LBB9_9
+; CHECK-NEXT:  .LBB9_4: @ %vector.ph
+; CHECK-NEXT:    bic r3, r1, #3
+; CHECK-NEXT:    movs r2, #1
+; CHECK-NEXT:    sub.w r12, r3, #4
+; CHECK-NEXT:    vmov.i32 q0, #0x80000000
+; CHECK-NEXT:    add.w lr, r2, r12, lsr #2
+; CHECK-NEXT:    mov r2, r0
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB9_5: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vldrw.u32 q1, [r2], #16
+; CHECK-NEXT:    vmax.s32 q0, q0, q1
+; CHECK-NEXT:    le lr, .LBB9_5
+; CHECK-NEXT:  @ %bb.6: @ %middle.block
+; CHECK-NEXT:    mov.w r2, #-2147483648
+; CHECK-NEXT:    cmp r3, r1
+; CHECK-NEXT:    vmaxv.s32 r2, q0
+; CHECK-NEXT:    beq .LBB9_9
+; CHECK-NEXT:  .LBB9_7: @ %for.body.preheader1
+; CHECK-NEXT:    sub.w lr, r1, r3
+; CHECK-NEXT:    add.w r0, r0, r3, lsl #2
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB9_8: @ %for.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    ldr r1, [r0], #4
+; CHECK-NEXT:    cmp r2, r1
+; CHECK-NEXT:    it le
+; CHECK-NEXT:    movle r2, r1
+; CHECK-NEXT:    le lr, .LBB9_8
+; CHECK-NEXT:  .LBB9_9: @ %for.cond.cleanup
+; CHECK-NEXT:    mov r0, r2
+; CHECK-NEXT:    pop {r7, pc}
+entry:
+  %cmp6 = icmp sgt i32 %n, 0
+  br i1 %cmp6, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:                               ; preds = %entry
+  %min.iters.check = icmp ult i32 %n, 4
+  br i1 %min.iters.check, label %for.body.preheader1, label %vector.ph
+
+vector.ph:                                        ; preds = %for.body.preheader
+  %n.vec = and i32 %n, -4
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %vector.ph
+  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+  %vec.phi = phi <4 x i32> [ <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648>, %vector.ph ], [ %3, %vector.body ]
+  %0 = getelementptr inbounds i32, i32* %x, i32 %index
+  %1 = bitcast i32* %0 to <4 x i32>*
+  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
+  %2 = icmp sgt <4 x i32> %vec.phi, %wide.load
+  %3 = select <4 x i1> %2, <4 x i32> %vec.phi, <4 x i32> %wide.load
+  %index.next = add i32 %index, 4
+  %4 = icmp eq i32 %index.next, %n.vec
+  br i1 %4, label %middle.block, label %vector.body
+
+middle.block:                                     ; preds = %vector.body
+  %5 = call i32 @llvm.experimental.vector.reduce.smax.v4i32(<4 x i32> %3)
+  %cmp.n = icmp eq i32 %n.vec, %n
+  br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader1
+
+for.body.preheader1:                              ; preds = %middle.block, %for.body.preheader
+  %i.08.ph = phi i32 [ 0, %for.body.preheader ], [ %n.vec, %middle.block ]
+  %r.07.ph = phi i32 [ -2147483648, %for.body.preheader ], [ %5, %middle.block ]
+  br label %for.body
+
+for.body:                                         ; preds = %for.body.preheader1, %for.body
+  %i.08 = phi i32 [ %inc, %for.body ], [ %i.08.ph, %for.body.preheader1 ]
+  %r.07 = phi i32 [ %add, %for.body ], [ %r.07.ph, %for.body.preheader1 ]
+  %arrayidx = getelementptr inbounds i32, i32* %x, i32 %i.08
+  %6 = load i32, i32* %arrayidx, align 4
+  %c = icmp sgt i32 %r.07, %6
+  %add = select i1 %c, i32 %r.07, i32 %6
+  %inc = add nuw nsw i32 %i.08, 1
+  %exitcond = icmp eq i32 %inc, %n
+  br i1 %exitcond, label %for.cond.cleanup, label %for.body
+
+for.cond.cleanup:                                 ; preds = %for.body, %middle.block, %entry
+  %r.0.lcssa = phi i32 [ -2147483648, %entry ], [ %5, %middle.block ], [ %add, %for.body ]
+  ret i32 %r.0.lcssa
+}
+
+define i32 @smax_i32_inloop(i32* nocapture readonly %x, i32 %n) {
+; CHECK-LABEL: smax_i32_inloop:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r4, lr}
+; CHECK-NEXT:    push {r4, lr}
+; CHECK-NEXT:    cmp r1, #1
+; CHECK-NEXT:    blt .LBB10_3
+; CHECK-NEXT:  @ %bb.1: @ %for.body.preheader
+; CHECK-NEXT:    mov r12, r0
+; CHECK-NEXT:    mov.w r0, #-2147483648
+; CHECK-NEXT:    cmp r1, #4
+; CHECK-NEXT:    bhs .LBB10_4
+; CHECK-NEXT:  @ %bb.2:
+; CHECK-NEXT:    movs r3, #0
+; CHECK-NEXT:    b .LBB10_7
+; CHECK-NEXT:  .LBB10_3:
+; CHECK-NEXT:    mov.w r0, #-2147483648
+; CHECK-NEXT:    b .LBB10_9
+; CHECK-NEXT:  .LBB10_4: @ %vector.ph
+; CHECK-NEXT:    bic r3, r1, #3
+; CHECK-NEXT:    movs r2, #1
+; CHECK-NEXT:    sub.w lr, r3, #4
+; CHECK-NEXT:    add.w lr, r2, lr, lsr #2
+; CHECK-NEXT:    mov r2, r12
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB10_5: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vldrw.u32 q0, [r2], #16
+; CHECK-NEXT:    mov.w r4, #-2147483648
+; CHECK-NEXT:    vmaxv.s32 r4, q0
+; CHECK-NEXT:    cmp r0, r4
+; CHECK-NEXT:    it le
+; CHECK-NEXT:    movle r0, r4
+; CHECK-NEXT:    le lr, .LBB10_5
+; CHECK-NEXT:  @ %bb.6: @ %middle.block
+; CHECK-NEXT:    cmp r3, r1
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    popeq {r4, pc}
+; CHECK-NEXT:  .LBB10_7: @ %for.body.preheader1
+; CHECK-NEXT:    sub.w lr, r1, r3
+; CHECK-NEXT:    add.w r1, r12, r3, lsl #2
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB10_8: @ %for.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    ldr r2, [r1], #4
+; CHECK-NEXT:    cmp r0, r2
+; CHECK-NEXT:    it le
+; CHECK-NEXT:    movle r0, r2
+; CHECK-NEXT:    le lr, .LBB10_8
+; CHECK-NEXT:  .LBB10_9: @ %for.cond.cleanup
+; CHECK-NEXT:    pop {r4, pc}
+entry:
+  %cmp6 = icmp sgt i32 %n, 0
+  br i1 %cmp6, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:                               ; preds = %entry
+  %min.iters.check = icmp ult i32 %n, 4
+  br i1 %min.iters.check, label %for.body.preheader1, label %vector.ph
+
+vector.ph:                                        ; preds = %for.body.preheader
+  %n.vec = and i32 %n, -4
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %vector.ph
+  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+  %vec.phi = phi i32 [ -2147483648, %vector.ph ], [ %3, %vector.body ]
+  %0 = getelementptr inbounds i32, i32* %x, i32 %index
+  %1 = bitcast i32* %0 to <4 x i32>*
+  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
+  %l5 = call i32 @llvm.experimental.vector.reduce.smax.v4i32(<4 x i32> %wide.load)
+  %2 = icmp sgt i32 %vec.phi, %l5
+  %3 = select i1 %2, i32 %vec.phi, i32 %l5
+  %index.next = add i32 %index, 4
+  %4 = icmp eq i32 %index.next, %n.vec
+  br i1 %4, label %middle.block, label %vector.body
+
+middle.block:                                     ; preds = %vector.body
+  %5 = phi i32 [ %3, %vector.body ]
+  %cmp.n = icmp eq i32 %n.vec, %n
+  br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader1
+
+for.body.preheader1:                              ; preds = %middle.block, %for.body.preheader
+  %i.08.ph = phi i32 [ 0, %for.body.preheader ], [ %n.vec, %middle.block ]
+  %r.07.ph = phi i32 [ -2147483648, %for.body.preheader ], [ %5, %middle.block ]
+  br label %for.body
+
+for.body:                                         ; preds = %for.body.preheader1, %for.body
+  %i.08 = phi i32 [ %inc, %for.body ], [ %i.08.ph, %for.body.preheader1 ]
+  %r.07 = phi i32 [ %add, %for.body ], [ %r.07.ph, %for.body.preheader1 ]
+  %arrayidx = getelementptr inbounds i32, i32* %x, i32 %i.08
+  %6 = load i32, i32* %arrayidx, align 4
+  %c = icmp sgt i32 %r.07, %6
+  %add = select i1 %c, i32 %r.07, i32 %6
+  %inc = add nuw nsw i32 %i.08, 1
+  %exitcond = icmp eq i32 %inc, %n
+  br i1 %exitcond, label %for.cond.cleanup, label %for.body
+
+for.cond.cleanup:                                 ; preds = %for.body, %middle.block, %entry
+  %r.0.lcssa = phi i32 [ -2147483648, %entry ], [ %5, %middle.block ], [ %add, %for.body ]
+  ret i32 %r.0.lcssa
+}
+
+define i32 @umin_i32(i32* nocapture readonly %x, i32 %n) {
+; CHECK-LABEL: umin_i32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    cmp r1, #1
+; CHECK-NEXT:    blt .LBB11_3
+; CHECK-NEXT:  @ %bb.1: @ %for.body.preheader
+; CHECK-NEXT:    cmp r1, #4
+; CHECK-NEXT:    bhs .LBB11_4
+; CHECK-NEXT:  @ %bb.2:
+; CHECK-NEXT:    mov.w r2, #-1
+; CHECK-NEXT:    movs r3, #0
+; CHECK-NEXT:    b .LBB11_7
+; CHECK-NEXT:  .LBB11_3:
+; CHECK-NEXT:    mov.w r2, #-1
+; CHECK-NEXT:    b .LBB11_9
+; CHECK-NEXT:  .LBB11_4: @ %vector.ph
+; CHECK-NEXT:    bic r3, r1, #3
+; CHECK-NEXT:    movs r2, #1
+; CHECK-NEXT:    sub.w r12, r3, #4
+; CHECK-NEXT:    vmov.i8 q0, #0xff
+; CHECK-NEXT:    add.w lr, r2, r12, lsr #2
+; CHECK-NEXT:    mov r2, r0
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB11_5: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vldrw.u32 q1, [r2], #16
+; CHECK-NEXT:    vmin.u32 q0, q0, q1
+; CHECK-NEXT:    le lr, .LBB11_5
+; CHECK-NEXT:  @ %bb.6: @ %middle.block
+; CHECK-NEXT:    mov.w r2, #-1
+; CHECK-NEXT:    cmp r3, r1
+; CHECK-NEXT:    vminv.u32 r2, q0
+; CHECK-NEXT:    beq .LBB11_9
+; CHECK-NEXT:  .LBB11_7: @ %for.body.preheader1
+; CHECK-NEXT:    sub.w lr, r1, r3
+; CHECK-NEXT:    add.w r0, r0, r3, lsl #2
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB11_8: @ %for.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    ldr r1, [r0], #4
+; CHECK-NEXT:    cmp r2, r1
+; CHECK-NEXT:    it hs
+; CHECK-NEXT:    movhs r2, r1
+; CHECK-NEXT:    le lr, .LBB11_8
+; CHECK-NEXT:  .LBB11_9: @ %for.cond.cleanup
+; CHECK-NEXT:    mov r0, r2
+; CHECK-NEXT:    pop {r7, pc}
+entry:
+  %cmp6 = icmp sgt i32 %n, 0
+  br i1 %cmp6, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:                               ; preds = %entry
+  %min.iters.check = icmp ult i32 %n, 4
+  br i1 %min.iters.check, label %for.body.preheader1, label %vector.ph
+
+vector.ph:                                        ; preds = %for.body.preheader
+  %n.vec = and i32 %n, -4
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %vector.ph
+  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+  %vec.phi = phi <4 x i32> [ <i32 -1, i32 -1, i32 -1, i32 -1>, %vector.ph ], [ %3, %vector.body ]
+  %0 = getelementptr inbounds i32, i32* %x, i32 %index
+  %1 = bitcast i32* %0 to <4 x i32>*
+  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
+  %2 = icmp ult <4 x i32> %vec.phi, %wide.load
+  %3 = select <4 x i1> %2, <4 x i32> %vec.phi, <4 x i32> %wide.load
+  %index.next = add i32 %index, 4
+  %4 = icmp eq i32 %index.next, %n.vec
+  br i1 %4, label %middle.block, label %vector.body
+
+middle.block:                                     ; preds = %vector.body
+  %5 = call i32 @llvm.experimental.vector.reduce.umin.v4i32(<4 x i32> %3)
+  %cmp.n = icmp eq i32 %n.vec, %n
+  br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader1
+
+for.body.preheader1:                              ; preds = %middle.block, %for.body.preheader
+  %i.08.ph = phi i32 [ 0, %for.body.preheader ], [ %n.vec, %middle.block ]
+  %r.07.ph = phi i32 [ -1, %for.body.preheader ], [ %5, %middle.block ]
+  br label %for.body
+
+for.body:                                         ; preds = %for.body.preheader1, %for.body
+  %i.08 = phi i32 [ %inc, %for.body ], [ %i.08.ph, %for.body.preheader1 ]
+  %r.07 = phi i32 [ %add, %for.body ], [ %r.07.ph, %for.body.preheader1 ]
+  %arrayidx = getelementptr inbounds i32, i32* %x, i32 %i.08
+  %6 = load i32, i32* %arrayidx, align 4
+  %c = icmp ult i32 %r.07, %6
+  %add = select i1 %c, i32 %r.07, i32 %6
+  %inc = add nuw nsw i32 %i.08, 1
+  %exitcond = icmp eq i32 %inc, %n
+  br i1 %exitcond, label %for.cond.cleanup, label %for.body
+
+for.cond.cleanup:                                 ; preds = %for.body, %middle.block, %entry
+  %r.0.lcssa = phi i32 [ -1, %entry ], [ %5, %middle.block ], [ %add, %for.body ]
+  ret i32 %r.0.lcssa
+}
+
+define i32 @umin_i32_inloop(i32* nocapture readonly %x, i32 %n) {
+; CHECK-LABEL: umin_i32_inloop:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r4, lr}
+; CHECK-NEXT:    push {r4, lr}
+; CHECK-NEXT:    cmp r1, #1
+; CHECK-NEXT:    blt .LBB12_3
+; CHECK-NEXT:  @ %bb.1: @ %for.body.preheader
+; CHECK-NEXT:    mov r12, r0
+; CHECK-NEXT:    mov.w r0, #-1
+; CHECK-NEXT:    cmp r1, #4
+; CHECK-NEXT:    bhs .LBB12_4
+; CHECK-NEXT:  @ %bb.2:
+; CHECK-NEXT:    movs r3, #0
+; CHECK-NEXT:    b .LBB12_7
+; CHECK-NEXT:  .LBB12_3:
+; CHECK-NEXT:    mov.w r0, #-1
+; CHECK-NEXT:    b .LBB12_9
+; CHECK-NEXT:  .LBB12_4: @ %vector.ph
+; CHECK-NEXT:    bic r3, r1, #3
+; CHECK-NEXT:    movs r2, #1
+; CHECK-NEXT:    sub.w lr, r3, #4
+; CHECK-NEXT:    add.w lr, r2, lr, lsr #2
+; CHECK-NEXT:    mov r2, r12
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB12_5: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vldrw.u32 q0, [r2], #16
+; CHECK-NEXT:    mov.w r4, #-1
+; CHECK-NEXT:    vminv.u32 r4, q0
+; CHECK-NEXT:    cmp r0, r4
+; CHECK-NEXT:    it hs
+; CHECK-NEXT:    movhs r0, r4
+; CHECK-NEXT:    le lr, .LBB12_5
+; CHECK-NEXT:  @ %bb.6: @ %middle.block
+; CHECK-NEXT:    cmp r3, r1
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    popeq {r4, pc}
+; CHECK-NEXT:  .LBB12_7: @ %for.body.preheader1
+; CHECK-NEXT:    sub.w lr, r1, r3
+; CHECK-NEXT:    add.w r1, r12, r3, lsl #2
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB12_8: @ %for.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    ldr r2, [r1], #4
+; CHECK-NEXT:    cmp r0, r2
+; CHECK-NEXT:    it ls
+; CHECK-NEXT:    movls r0, r2
+; CHECK-NEXT:    le lr, .LBB12_8
+; CHECK-NEXT:  .LBB12_9: @ %for.cond.cleanup
+; CHECK-NEXT:    pop {r4, pc}
+entry:
+  %cmp6 = icmp sgt i32 %n, 0
+  br i1 %cmp6, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:                               ; preds = %entry
+  %min.iters.check = icmp ult i32 %n, 4
+  br i1 %min.iters.check, label %for.body.preheader1, label %vector.ph
+
+vector.ph:                                        ; preds = %for.body.preheader
+  %n.vec = and i32 %n, -4
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %vector.ph
+  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+  %vec.phi = phi i32 [ -1, %vector.ph ], [ %3, %vector.body ]
+  %0 = getelementptr inbounds i32, i32* %x, i32 %index
+  %1 = bitcast i32* %0 to <4 x i32>*
+  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
+  %l5 = call i32 @llvm.experimental.vector.reduce.umin.v4i32(<4 x i32> %wide.load)
+  %2 = icmp ult i32 %vec.phi, %l5
+  %3 = select i1 %2, i32 %vec.phi, i32 %l5
+  %index.next = add i32 %index, 4
+  %4 = icmp eq i32 %index.next, %n.vec
+  br i1 %4, label %middle.block, label %vector.body
+
+middle.block:                                     ; preds = %vector.body
+  %5 = phi i32 [ %3, %vector.body ]
+  %cmp.n = icmp eq i32 %n.vec, %n
+  br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader1
+
+for.body.preheader1:                              ; preds = %middle.block, %for.body.preheader
+  %i.08.ph = phi i32 [ 0, %for.body.preheader ], [ %n.vec, %middle.block ]
+  %r.07.ph = phi i32 [ -1, %for.body.preheader ], [ %5, %middle.block ]
+  br label %for.body
+
+for.body:                                         ; preds = %for.body.preheader1, %for.body
+  %i.08 = phi i32 [ %inc, %for.body ], [ %i.08.ph, %for.body.preheader1 ]
+  %r.07 = phi i32 [ %add, %for.body ], [ %r.07.ph, %for.body.preheader1 ]
+  %arrayidx = getelementptr inbounds i32, i32* %x, i32 %i.08
+  %6 = load i32, i32* %arrayidx, align 4
+  %c = icmp ugt i32 %r.07, %6
+  %add = select i1 %c, i32 %r.07, i32 %6
+  %inc = add nuw nsw i32 %i.08, 1
+  %exitcond = icmp eq i32 %inc, %n
+  br i1 %exitcond, label %for.cond.cleanup, label %for.body
+
+for.cond.cleanup:                                 ; preds = %for.body, %middle.block, %entry
+  %r.0.lcssa = phi i32 [ -1, %entry ], [ %5, %middle.block ], [ %add, %for.body ]
+  ret i32 %r.0.lcssa
+}
+
+define i32 @umax_i32(i32* nocapture readonly %x, i32 %n) {
+; CHECK-LABEL: umax_i32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    cmp r1, #1
+; CHECK-NEXT:    blt .LBB13_3
+; CHECK-NEXT:  @ %bb.1: @ %for.body.preheader
+; CHECK-NEXT:    cmp r1, #4
+; CHECK-NEXT:    bhs .LBB13_4
+; CHECK-NEXT:  @ %bb.2:
+; CHECK-NEXT:    movs r3, #0
+; CHECK-NEXT:    movs r2, #0
+; CHECK-NEXT:    b .LBB13_7
+; CHECK-NEXT:  .LBB13_3:
+; CHECK-NEXT:    movs r2, #0
+; CHECK-NEXT:    b .LBB13_9
+; CHECK-NEXT:  .LBB13_4: @ %vector.ph
+; CHECK-NEXT:    bic r3, r1, #3
+; CHECK-NEXT:    movs r2, #1
+; CHECK-NEXT:    sub.w r12, r3, #4
+; CHECK-NEXT:    vmov.i32 q0, #0x0
+; CHECK-NEXT:    add.w lr, r2, r12, lsr #2
+; CHECK-NEXT:    mov r2, r0
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB13_5: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vldrw.u32 q1, [r2], #16
+; CHECK-NEXT:    vmax.u32 q0, q0, q1
+; CHECK-NEXT:    le lr, .LBB13_5
+; CHECK-NEXT:  @ %bb.6: @ %middle.block
+; CHECK-NEXT:    movs r2, #0
+; CHECK-NEXT:    cmp r3, r1
+; CHECK-NEXT:    vmaxv.u32 r2, q0
+; CHECK-NEXT:    beq .LBB13_9
+; CHECK-NEXT:  .LBB13_7: @ %for.body.preheader1
+; CHECK-NEXT:    sub.w lr, r1, r3
+; CHECK-NEXT:    add.w r0, r0, r3, lsl #2
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB13_8: @ %for.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    ldr r1, [r0], #4
+; CHECK-NEXT:    cmp r2, r1
+; CHECK-NEXT:    it ls
+; CHECK-NEXT:    movls r2, r1
+; CHECK-NEXT:    le lr, .LBB13_8
+; CHECK-NEXT:  .LBB13_9: @ %for.cond.cleanup
+; CHECK-NEXT:    mov r0, r2
+; CHECK-NEXT:    pop {r7, pc}
+entry:
+  %cmp6 = icmp sgt i32 %n, 0
+  br i1 %cmp6, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:                               ; preds = %entry
+  %min.iters.check = icmp ult i32 %n, 4
+  br i1 %min.iters.check, label %for.body.preheader1, label %vector.ph
+
+vector.ph:                                        ; preds = %for.body.preheader
+  %n.vec = and i32 %n, -4
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %vector.ph
+  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+  %vec.phi = phi <4 x i32> [ zeroinitializer, %vector.ph ], [ %3, %vector.body ]
+  %0 = getelementptr inbounds i32, i32* %x, i32 %index
+  %1 = bitcast i32* %0 to <4 x i32>*
+  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
+  %2 = icmp ugt <4 x i32> %vec.phi, %wide.load
+  %3 = select <4 x i1> %2, <4 x i32> %vec.phi, <4 x i32> %wide.load
+  %index.next = add i32 %index, 4
+  %4 = icmp eq i32 %index.next, %n.vec
+  br i1 %4, label %middle.block, label %vector.body
+
+middle.block:                                     ; preds = %vector.body
+  %5 = call i32 @llvm.experimental.vector.reduce.umax.v4i32(<4 x i32> %3)
+  %cmp.n = icmp eq i32 %n.vec, %n
+  br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader1
+
+for.body.preheader1:                              ; preds = %middle.block, %for.body.preheader
+  %i.08.ph = phi i32 [ 0, %for.body.preheader ], [ %n.vec, %middle.block ]
+  %r.07.ph = phi i32 [ 0, %for.body.preheader ], [ %5, %middle.block ]
+  br label %for.body
+
+for.body:                                         ; preds = %for.body.preheader1, %for.body
+  %i.08 = phi i32 [ %inc, %for.body ], [ %i.08.ph, %for.body.preheader1 ]
+  %r.07 = phi i32 [ %add, %for.body ], [ %r.07.ph, %for.body.preheader1 ]
+  %arrayidx = getelementptr inbounds i32, i32* %x, i32 %i.08
+  %6 = load i32, i32* %arrayidx, align 4
+  %c = icmp ugt i32 %r.07, %6
+  %add = select i1 %c, i32 %r.07, i32 %6
+  %inc = add nuw nsw i32 %i.08, 1
+  %exitcond = icmp eq i32 %inc, %n
+  br i1 %exitcond, label %for.cond.cleanup, label %for.body
+
+for.cond.cleanup:                                 ; preds = %for.body, %middle.block, %entry
+  %r.0.lcssa = phi i32 [ 0, %entry ], [ %5, %middle.block ], [ %add, %for.body ]
+  ret i32 %r.0.lcssa
+}
+
+define i32 @umax_i32_inloop(i32* nocapture readonly %x, i32 %n) {
+; CHECK-LABEL: umax_i32_inloop:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r4, lr}
+; CHECK-NEXT:    push {r4, lr}
+; CHECK-NEXT:    cmp r1, #1
+; CHECK-NEXT:    blt .LBB14_8
+; CHECK-NEXT:  @ %bb.1: @ %for.body.preheader
+; CHECK-NEXT:    mov r12, r0
+; CHECK-NEXT:    movs r3, #0
+; CHECK-NEXT:    cmp r1, #4
+; CHECK-NEXT:    mov.w r0, #0
+; CHECK-NEXT:    blo .LBB14_5
+; CHECK-NEXT:  @ %bb.2: @ %vector.ph
+; CHECK-NEXT:    bic r3, r1, #3
+; CHECK-NEXT:    movs r2, #1
+; CHECK-NEXT:    subs r0, r3, #4
+; CHECK-NEXT:    add.w lr, r2, r0, lsr #2
+; CHECK-NEXT:    movs r0, #0
+; CHECK-NEXT:    mov r2, r12
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB14_3: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vldrw.u32 q0, [r2], #16
+; CHECK-NEXT:    movs r4, #0
+; CHECK-NEXT:    vmaxv.u32 r4, q0
+; CHECK-NEXT:    cmp r0, r4
+; CHECK-NEXT:    it ls
+; CHECK-NEXT:    movls r0, r4
+; CHECK-NEXT:    le lr, .LBB14_3
+; CHECK-NEXT:  @ %bb.4: @ %middle.block
+; CHECK-NEXT:    cmp r3, r1
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    popeq {r4, pc}
+; CHECK-NEXT:  .LBB14_5: @ %for.body.preheader1
+; CHECK-NEXT:    sub.w lr, r1, r3
+; CHECK-NEXT:    add.w r1, r12, r3, lsl #2
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB14_6: @ %for.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    ldr r2, [r1], #4
+; CHECK-NEXT:    cmp r0, r2
+; CHECK-NEXT:    it ls
+; CHECK-NEXT:    movls r0, r2
+; CHECK-NEXT:    le lr, .LBB14_6
+; CHECK-NEXT:  @ %bb.7: @ %for.cond.cleanup
+; CHECK-NEXT:    pop {r4, pc}
+; CHECK-NEXT:  .LBB14_8:
+; CHECK-NEXT:    movs r0, #0
+; CHECK-NEXT:    pop {r4, pc}
+entry:
+  %cmp6 = icmp sgt i32 %n, 0
+  br i1 %cmp6, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:                               ; preds = %entry
+  %min.iters.check = icmp ult i32 %n, 4
+  br i1 %min.iters.check, label %for.body.preheader1, label %vector.ph
+
+vector.ph:                                        ; preds = %for.body.preheader
+  %n.vec = and i32 %n, -4
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %vector.ph
+  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+  %vec.phi = phi i32 [ 0, %vector.ph ], [ %3, %vector.body ]
+  %0 = getelementptr inbounds i32, i32* %x, i32 %index
+  %1 = bitcast i32* %0 to <4 x i32>*
+  %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
+  %l5 = call i32 @llvm.experimental.vector.reduce.umax.v4i32(<4 x i32> %wide.load)
+  %2 = icmp ugt i32 %vec.phi, %l5
+  %3 = select i1 %2, i32 %vec.phi, i32 %l5
+  %index.next = add i32 %index, 4
+  %4 = icmp eq i32 %index.next, %n.vec
+  br i1 %4, label %middle.block, label %vector.body
+
+middle.block:                                     ; preds = %vector.body
+  %5 = phi i32 [ %3, %vector.body ]
+  %cmp.n = icmp eq i32 %n.vec, %n
+  br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader1
+
+for.body.preheader1:                              ; preds = %middle.block, %for.body.preheader
+  %i.08.ph = phi i32 [ 0, %for.body.preheader ], [ %n.vec, %middle.block ]
+  %r.07.ph = phi i32 [ 0, %for.body.preheader ], [ %5, %middle.block ]
+  br label %for.body
+
+for.body:                                         ; preds = %for.body.preheader1, %for.body
+  %i.08 = phi i32 [ %inc, %for.body ], [ %i.08.ph, %for.body.preheader1 ]
+  %r.07 = phi i32 [ %add, %for.body ], [ %r.07.ph, %for.body.preheader1 ]
+  %arrayidx = getelementptr inbounds i32, i32* %x, i32 %i.08
+  %6 = load i32, i32* %arrayidx, align 4
+  %c = icmp ugt i32 %r.07, %6
+  %add = select i1 %c, i32 %r.07, i32 %6
+  %inc = add nuw nsw i32 %i.08, 1
+  %exitcond = icmp eq i32 %inc, %n
+  br i1 %exitcond, label %for.cond.cleanup, label %for.body
+
+for.cond.cleanup:                                 ; preds = %for.body, %middle.block, %entry
+  %r.0.lcssa = phi i32 [ 0, %entry ], [ %5, %middle.block ], [ %add, %for.body ]
+  ret i32 %r.0.lcssa
+}
+
+define float @fmin_f32(float* nocapture readonly %x, i32 %n) {
+; CHECK-LABEL: fmin_f32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    cmp r1, #1
+; CHECK-NEXT:    blt .LBB15_3
+; CHECK-NEXT:  @ %bb.1: @ %for.body.preheader
+; CHECK-NEXT:    cmp r1, #4
+; CHECK-NEXT:    bhs .LBB15_4
+; CHECK-NEXT:  @ %bb.2:
+; CHECK-NEXT:    vldr s0, .LCPI15_0
+; CHECK-NEXT:    movs r2, #0
+; CHECK-NEXT:    b .LBB15_7
+; CHECK-NEXT:  .LBB15_3:
+; CHECK-NEXT:    vldr s0, .LCPI15_0
+; CHECK-NEXT:    b .LBB15_9
+; CHECK-NEXT:  .LBB15_4: @ %vector.ph
+; CHECK-NEXT:    bic r2, r1, #3
+; CHECK-NEXT:    movs r3, #1
+; CHECK-NEXT:    sub.w r12, r2, #4
+; CHECK-NEXT:    vmov.i32 q0, #0x0
+; CHECK-NEXT:    add.w lr, r3, r12, lsr #2
+; CHECK-NEXT:    mov r3, r0
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB15_5: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vldrw.u32 q1, [r3], #16
+; CHECK-NEXT:    vcmp.f32 lt, q0, q1
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    le lr, .LBB15_5
+; CHECK-NEXT:  @ %bb.6: @ %middle.block
+; CHECK-NEXT:    vmov.f32 s4, s2
+; CHECK-NEXT:    cmp r2, r1
+; CHECK-NEXT:    vmov.f32 s5, s3
+; CHECK-NEXT:    vminnm.f32 q0, q0, q1
+; CHECK-NEXT:    vmov r3, s1
+; CHECK-NEXT:    vdup.32 q1, r3
+; CHECK-NEXT:    vminnm.f32 q0, q0, q1
+; CHECK-NEXT:    beq .LBB15_9
+; CHECK-NEXT:  .LBB15_7: @ %for.body.preheader1
+; CHECK-NEXT:    sub.w lr, r1, r2
+; CHECK-NEXT:    add.w r0, r0, r2, lsl #2
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB15_8: @ %for.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vldmia r0!, {s4}
+; CHECK-NEXT:    vcmp.f32 s0, s4
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vselge.f32 s0, s4, s0
+; CHECK-NEXT:    le lr, .LBB15_8
+; CHECK-NEXT:  .LBB15_9: @ %for.cond.cleanup
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    pop {r7, pc}
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.10:
+; CHECK-NEXT:  .LCPI15_0:
+; CHECK-NEXT:    .long 0x00000000 @ float 0
+entry:
+  %cmp6 = icmp sgt i32 %n, 0
+  br i1 %cmp6, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:                               ; preds = %entry
+  %min.iters.check = icmp ult i32 %n, 4
+  br i1 %min.iters.check, label %for.body.preheader1, label %vector.ph
+
+vector.ph:                                        ; preds = %for.body.preheader
+  %n.vec = and i32 %n, -4
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %vector.ph
+  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+  %vec.phi = phi <4 x float> [ zeroinitializer, %vector.ph ], [ %3, %vector.body ]
+  %0 = getelementptr inbounds float, float* %x, i32 %index
+  %1 = bitcast float* %0 to <4 x float>*
+  %wide.load = load <4 x float>, <4 x float>* %1, align 4
+  %2 = fcmp ult <4 x float> %vec.phi, %wide.load
+  %3 = select <4 x i1> %2, <4 x float> %vec.phi, <4 x float> %wide.load
+  %index.next = add i32 %index, 4
+  %4 = icmp eq i32 %index.next, %n.vec
+  br i1 %4, label %middle.block, label %vector.body
+
+middle.block:                                     ; preds = %vector.body
+  %5 = call float @llvm.experimental.vector.reduce.fmin.v4f32(<4 x float> %3)
+  %cmp.n = icmp eq i32 %n.vec, %n
+  br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader1
+
+for.body.preheader1:                              ; preds = %middle.block, %for.body.preheader
+  %i.08.ph = phi i32 [ 0, %for.body.preheader ], [ %n.vec, %middle.block ]
+  %r.07.ph = phi float [ 0.0, %for.body.preheader ], [ %5, %middle.block ]
+  br label %for.body
+
+for.body:                                         ; preds = %for.body.preheader1, %for.body
+  %i.08 = phi i32 [ %inc, %for.body ], [ %i.08.ph, %for.body.preheader1 ]
+  %r.07 = phi float [ %add, %for.body ], [ %r.07.ph, %for.body.preheader1 ]
+  %arrayidx = getelementptr inbounds float, float* %x, i32 %i.08
+  %6 = load float, float* %arrayidx, align 4
+  %c = fcmp ult float %r.07, %6
+  %add = select i1 %c, float %r.07, float %6
+  %inc = add nuw nsw i32 %i.08, 1
+  %exitcond = icmp eq i32 %inc, %n
+  br i1 %exitcond, label %for.cond.cleanup, label %for.body
+
+for.cond.cleanup:                                 ; preds = %for.body, %middle.block, %entry
+  %r.0.lcssa = phi float [ 0.0, %entry ], [ %5, %middle.block ], [ %add, %for.body ]
+  ret float %r.0.lcssa
+}
+
+define float @fmax_f32(float* nocapture readonly %x, i32 %n) {
+; CHECK-LABEL: fmax_f32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    cmp r1, #1
+; CHECK-NEXT:    blt .LBB16_3
+; CHECK-NEXT:  @ %bb.1: @ %for.body.preheader
+; CHECK-NEXT:    cmp r1, #4
+; CHECK-NEXT:    bhs .LBB16_4
+; CHECK-NEXT:  @ %bb.2:
+; CHECK-NEXT:    vldr s0, .LCPI16_0
+; CHECK-NEXT:    movs r2, #0
+; CHECK-NEXT:    b .LBB16_7
+; CHECK-NEXT:  .LBB16_3:
+; CHECK-NEXT:    vldr s0, .LCPI16_0
+; CHECK-NEXT:    b .LBB16_9
+; CHECK-NEXT:  .LBB16_4: @ %vector.ph
+; CHECK-NEXT:    bic r2, r1, #3
+; CHECK-NEXT:    movs r3, #1
+; CHECK-NEXT:    sub.w r12, r2, #4
+; CHECK-NEXT:    vmov.i32 q0, #0x0
+; CHECK-NEXT:    add.w lr, r3, r12, lsr #2
+; CHECK-NEXT:    mov r3, r0
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB16_5: @ %vector.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vldrw.u32 q1, [r3], #16
+; CHECK-NEXT:    vcmp.f32 lt, q1, q0
+; CHECK-NEXT:    vpsel q0, q0, q1
+; CHECK-NEXT:    le lr, .LBB16_5
+; CHECK-NEXT:  @ %bb.6: @ %middle.block
+; CHECK-NEXT:    vmov.f32 s4, s2
+; CHECK-NEXT:    cmp r2, r1
+; CHECK-NEXT:    vmov.f32 s5, s3
+; CHECK-NEXT:    vmaxnm.f32 q0, q0, q1
+; CHECK-NEXT:    vmov r3, s1
+; CHECK-NEXT:    vdup.32 q1, r3
+; CHECK-NEXT:    vmaxnm.f32 q0, q0, q1
+; CHECK-NEXT:    beq .LBB16_9
+; CHECK-NEXT:  .LBB16_7: @ %for.body.preheader1
+; CHECK-NEXT:    sub.w lr, r1, r2
+; CHECK-NEXT:    add.w r0, r0, r2, lsl #2
+; CHECK-NEXT:    dls lr, lr
+; CHECK-NEXT:  .LBB16_8: @ %for.body
+; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vldmia r0!, {s4}
+; CHECK-NEXT:    vcmp.f32 s4, s0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vselge.f32 s0, s4, s0
+; CHECK-NEXT:    le lr, .LBB16_8
+; CHECK-NEXT:  .LBB16_9: @ %for.cond.cleanup
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    pop {r7, pc}
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.10:
+; CHECK-NEXT:  .LCPI16_0:
+; CHECK-NEXT:    .long 0x00000000 @ float 0
+entry:
+  %cmp6 = icmp sgt i32 %n, 0
+  br i1 %cmp6, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:                               ; preds = %entry
+  %min.iters.check = icmp ult i32 %n, 4
+  br i1 %min.iters.check, label %for.body.preheader1, label %vector.ph
+
+vector.ph:                                        ; preds = %for.body.preheader
+  %n.vec = and i32 %n, -4
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %vector.ph
+  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+  %vec.phi = phi <4 x float> [ zeroinitializer, %vector.ph ], [ %3, %vector.body ]
+  %0 = getelementptr inbounds float, float* %x, i32 %index
+  %1 = bitcast float* %0 to <4 x float>*
+  %wide.load = load <4 x float>, <4 x float>* %1, align 4
+  %2 = fcmp ugt <4 x float> %vec.phi, %wide.load
+  %3 = select <4 x i1> %2, <4 x float> %vec.phi, <4 x float> %wide.load
+  %index.next = add i32 %index, 4
+  %4 = icmp eq i32 %index.next, %n.vec
+  br i1 %4, label %middle.block, label %vector.body
+
+middle.block:                                     ; preds = %vector.body
+  %5 = call float @llvm.experimental.vector.reduce.fmax.v4f32(<4 x float> %3)
+  %cmp.n = icmp eq i32 %n.vec, %n
+  br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader1
+
+for.body.preheader1:                              ; preds = %middle.block, %for.body.preheader
+  %i.08.ph = phi i32 [ 0, %for.body.preheader ], [ %n.vec, %middle.block ]
+  %r.07.ph = phi float [ 0.0, %for.body.preheader ], [ %5, %middle.block ]
+  br label %for.body
+
+for.body:                                         ; preds = %for.body.preheader1, %for.body
+  %i.08 = phi i32 [ %inc, %for.body ], [ %i.08.ph, %for.body.preheader1 ]
+  %r.07 = phi float [ %add, %for.body ], [ %r.07.ph, %for.body.preheader1 ]
+  %arrayidx = getelementptr inbounds float, float* %x, i32 %i.08
+  %6 = load float, float* %arrayidx, align 4
+  %c = fcmp ugt float %r.07, %6
+  %add = select i1 %c, float %r.07, float %6
+  %inc = add nuw nsw i32 %i.08, 1
+  %exitcond = icmp eq i32 %inc, %n
+  br i1 %exitcond, label %for.cond.cleanup, label %for.body
+
+for.cond.cleanup:                                 ; preds = %for.body, %middle.block, %entry
+  %r.0.lcssa = phi float [ 0.0, %entry ], [ %5, %middle.block ], [ %add, %for.body ]
+  ret float %r.0.lcssa
+}
+
+declare i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32>)
+declare i32 @llvm.experimental.vector.reduce.mul.v4i32(<4 x i32>)
+declare i32 @llvm.experimental.vector.reduce.and.v4i32(<4 x i32>)
+declare i32 @llvm.experimental.vector.reduce.or.v4i32(<4 x i32>)
+declare i32 @llvm.experimental.vector.reduce.xor.v4i32(<4 x i32>)
+declare float @llvm.experimental.vector.reduce.v2.fadd.f32.v4f32(float, <4 x float>)
+declare float @llvm.experimental.vector.reduce.v2.fmul.f32.v4f32(float, <4 x float>)
+declare i32 @llvm.experimental.vector.reduce.smin.v4i32(<4 x i32>)
+declare i32 @llvm.experimental.vector.reduce.smax.v4i32(<4 x i32>)
+declare i32 @llvm.experimental.vector.reduce.umin.v4i32(<4 x i32>)
+declare i32 @llvm.experimental.vector.reduce.umax.v4i32(<4 x i32>)
+declare float @llvm.experimental.vector.reduce.fmin.v4f32(<4 x float>)
+declare float @llvm.experimental.vector.reduce.fmax.v4f32(<4 x float>)

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vecreduce-mul.ll b/llvm/test/CodeGen/Thumb2/mve-vecreduce-mul.ll
new file mode 100644
index 000000000000..15ae58de9357
--- /dev/null
+++ b/llvm/test/CodeGen/Thumb2/mve-vecreduce-mul.ll
@@ -0,0 +1,588 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=thumbv8.1m.main-arm-none-eabi -mattr=+mve -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK
+
+define arm_aapcs_vfpcc i32 @mul_v2i32(<2 x i32> %x) {
+; CHECK-LABEL: mul_v2i32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov r0, s2
+; CHECK-NEXT:    vmov r1, s0
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i32 @llvm.experimental.vector.reduce.mul.v2i32(<2 x i32> %x)
+  ret i32 %z
+}
+
+define arm_aapcs_vfpcc i32 @mul_v4i32(<4 x i32> %x) {
+; CHECK-LABEL: mul_v4i32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov r0, s1
+; CHECK-NEXT:    vmov r1, s0
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    vmov r1, s2
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    vmov r1, s3
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i32 @llvm.experimental.vector.reduce.mul.v4i32(<4 x i32> %x)
+  ret i32 %z
+}
+
+define arm_aapcs_vfpcc i32 @mul_v8i32(<8 x i32> %x) {
+; CHECK-LABEL: mul_v8i32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmul.i32 q0, q0, q1
+; CHECK-NEXT:    vmov r0, s1
+; CHECK-NEXT:    vmov r1, s0
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    vmov r1, s2
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    vmov r1, s3
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i32 @llvm.experimental.vector.reduce.mul.v8i32(<8 x i32> %x)
+  ret i32 %z
+}
+
+define arm_aapcs_vfpcc i16 @mul_v4i16(<4 x i16> %x) {
+; CHECK-LABEL: mul_v4i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov r0, s1
+; CHECK-NEXT:    vmov r1, s0
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    vmov r1, s2
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    vmov r1, s3
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i16 @llvm.experimental.vector.reduce.mul.v4i16(<4 x i16> %x)
+  ret i16 %z
+}
+
+define arm_aapcs_vfpcc i16 @mul_v8i16(<8 x i16> %x) {
+; CHECK-LABEL: mul_v8i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.u16 r0, q0[1]
+; CHECK-NEXT:    vmov.u16 r1, q0[0]
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    vmov.u16 r1, q0[2]
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    vmov.u16 r1, q0[3]
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    vmov.u16 r1, q0[4]
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    vmov.u16 r1, q0[5]
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    vmov.u16 r1, q0[6]
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    vmov.u16 r1, q0[7]
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i16 @llvm.experimental.vector.reduce.mul.v8i16(<8 x i16> %x)
+  ret i16 %z
+}
+
+define arm_aapcs_vfpcc i16 @mul_v16i16(<16 x i16> %x) {
+; CHECK-LABEL: mul_v16i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmul.i16 q0, q0, q1
+; CHECK-NEXT:    vmov.u16 r0, q0[1]
+; CHECK-NEXT:    vmov.u16 r1, q0[0]
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    vmov.u16 r1, q0[2]
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    vmov.u16 r1, q0[3]
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    vmov.u16 r1, q0[4]
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    vmov.u16 r1, q0[5]
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    vmov.u16 r1, q0[6]
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    vmov.u16 r1, q0[7]
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i16 @llvm.experimental.vector.reduce.mul.v16i16(<16 x i16> %x)
+  ret i16 %z
+}
+
+define arm_aapcs_vfpcc i8 @mul_v8i8(<8 x i8> %x) {
+; CHECK-LABEL: mul_v8i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.u16 r0, q0[1]
+; CHECK-NEXT:    vmov.u16 r1, q0[0]
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    vmov.u16 r1, q0[2]
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    vmov.u16 r1, q0[3]
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    vmov.u16 r1, q0[4]
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    vmov.u16 r1, q0[5]
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    vmov.u16 r1, q0[6]
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    vmov.u16 r1, q0[7]
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i8 @llvm.experimental.vector.reduce.mul.v8i8(<8 x i8> %x)
+  ret i8 %z
+}
+
+define arm_aapcs_vfpcc i8 @mul_v16i8(<16 x i8> %x) {
+; CHECK-LABEL: mul_v16i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.u8 r0, q0[1]
+; CHECK-NEXT:    vmov.u8 r1, q0[0]
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    vmov.u8 r1, q0[2]
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    vmov.u8 r1, q0[3]
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    vmov.u8 r1, q0[4]
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    vmov.u8 r1, q0[5]
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    vmov.u8 r1, q0[6]
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    vmov.u8 r1, q0[7]
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    vmov.u8 r1, q0[8]
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    vmov.u8 r1, q0[9]
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    vmov.u8 r1, q0[10]
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    vmov.u8 r1, q0[11]
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    vmov.u8 r1, q0[12]
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    vmov.u8 r1, q0[13]
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    vmov.u8 r1, q0[14]
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    vmov.u8 r1, q0[15]
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i8 @llvm.experimental.vector.reduce.mul.v16i8(<16 x i8> %x)
+  ret i8 %z
+}
+
+define arm_aapcs_vfpcc i8 @mul_v32i8(<32 x i8> %x) {
+; CHECK-LABEL: mul_v32i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmul.i8 q0, q0, q1
+; CHECK-NEXT:    vmov.u8 r0, q0[1]
+; CHECK-NEXT:    vmov.u8 r1, q0[0]
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    vmov.u8 r1, q0[2]
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    vmov.u8 r1, q0[3]
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    vmov.u8 r1, q0[4]
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    vmov.u8 r1, q0[5]
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    vmov.u8 r1, q0[6]
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    vmov.u8 r1, q0[7]
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    vmov.u8 r1, q0[8]
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    vmov.u8 r1, q0[9]
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    vmov.u8 r1, q0[10]
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    vmov.u8 r1, q0[11]
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    vmov.u8 r1, q0[12]
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    vmov.u8 r1, q0[13]
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    vmov.u8 r1, q0[14]
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    vmov.u8 r1, q0[15]
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i8 @llvm.experimental.vector.reduce.mul.v32i8(<32 x i8> %x)
+  ret i8 %z
+}
+
+define arm_aapcs_vfpcc i64 @mul_v1i64(<1 x i64> %x) {
+; CHECK-LABEL: mul_v1i64:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i64 @llvm.experimental.vector.reduce.mul.v1i64(<1 x i64> %x)
+  ret i64 %z
+}
+
+define arm_aapcs_vfpcc i64 @mul_v2i64(<2 x i64> %x) {
+; CHECK-LABEL: mul_v2i64:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov r1, s2
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    vmov r3, s3
+; CHECK-NEXT:    umull r0, r12, r2, r1
+; CHECK-NEXT:    mla r2, r2, r3, r12
+; CHECK-NEXT:    vmov r3, s1
+; CHECK-NEXT:    mla r1, r3, r1, r2
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i64 @llvm.experimental.vector.reduce.mul.v2i64(<2 x i64> %x)
+  ret i64 %z
+}
+
+define arm_aapcs_vfpcc i64 @mul_v4i64(<4 x i64> %x) {
+; CHECK-LABEL: mul_v4i64:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, lr}
+; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, lr}
+; CHECK-NEXT:    vmov lr, s2
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    vmov r1, s4
+; CHECK-NEXT:    vmov r6, s6
+; CHECK-NEXT:    vmov r5, s7
+; CHECK-NEXT:    umull r3, r12, r2, lr
+; CHECK-NEXT:    umull r4, r8, r3, r1
+; CHECK-NEXT:    umull r0, r7, r4, r6
+; CHECK-NEXT:    mla r4, r4, r5, r7
+; CHECK-NEXT:    vmov r5, s5
+; CHECK-NEXT:    vmov r7, s1
+; CHECK-NEXT:    mla r3, r3, r5, r8
+; CHECK-NEXT:    vmov r5, s3
+; CHECK-NEXT:    mla r2, r2, r5, r12
+; CHECK-NEXT:    mla r2, r7, lr, r2
+; CHECK-NEXT:    mla r1, r2, r1, r3
+; CHECK-NEXT:    mla r1, r1, r6, r4
+; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, pc}
+entry:
+  %z = call i64 @llvm.experimental.vector.reduce.mul.v4i64(<4 x i64> %x)
+  ret i64 %z
+}
+
+define arm_aapcs_vfpcc i32 @mul_v2i32_acc(<2 x i32> %x, i32 %y) {
+; CHECK-LABEL: mul_v2i32_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov r1, s2
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i32 @llvm.experimental.vector.reduce.mul.v2i32(<2 x i32> %x)
+  %r = mul i32 %y, %z
+  ret i32 %r
+}
+
+define arm_aapcs_vfpcc i32 @mul_v4i32_acc(<4 x i32> %x, i32 %y) {
+; CHECK-LABEL: mul_v4i32_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov r1, s1
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    vmov r2, s2
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    vmov r2, s3
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i32 @llvm.experimental.vector.reduce.mul.v4i32(<4 x i32> %x)
+  %r = mul i32 %y, %z
+  ret i32 %r
+}
+
+define arm_aapcs_vfpcc i32 @mul_v8i32_acc(<8 x i32> %x, i32 %y) {
+; CHECK-LABEL: mul_v8i32_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmul.i32 q0, q0, q1
+; CHECK-NEXT:    vmov r1, s1
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    vmov r2, s2
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    vmov r2, s3
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i32 @llvm.experimental.vector.reduce.mul.v8i32(<8 x i32> %x)
+  %r = mul i32 %y, %z
+  ret i32 %r
+}
+
+define arm_aapcs_vfpcc i16 @mul_v4i16_acc(<4 x i16> %x, i16 %y) {
+; CHECK-LABEL: mul_v4i16_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov r1, s1
+; CHECK-NEXT:    vmov r2, s0
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    vmov r2, s2
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    vmov r2, s3
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i16 @llvm.experimental.vector.reduce.mul.v4i16(<4 x i16> %x)
+  %r = mul i16 %y, %z
+  ret i16 %r
+}
+
+define arm_aapcs_vfpcc i16 @mul_v8i16_acc(<8 x i16> %x, i16 %y) {
+; CHECK-LABEL: mul_v8i16_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.u16 r1, q0[1]
+; CHECK-NEXT:    vmov.u16 r2, q0[0]
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    vmov.u16 r2, q0[2]
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    vmov.u16 r2, q0[3]
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    vmov.u16 r2, q0[4]
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    vmov.u16 r2, q0[5]
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    vmov.u16 r2, q0[6]
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    vmov.u16 r2, q0[7]
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i16 @llvm.experimental.vector.reduce.mul.v8i16(<8 x i16> %x)
+  %r = mul i16 %y, %z
+  ret i16 %r
+}
+
+define arm_aapcs_vfpcc i16 @mul_v16i16_acc(<16 x i16> %x, i16 %y) {
+; CHECK-LABEL: mul_v16i16_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmul.i16 q0, q0, q1
+; CHECK-NEXT:    vmov.u16 r1, q0[1]
+; CHECK-NEXT:    vmov.u16 r2, q0[0]
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    vmov.u16 r2, q0[2]
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    vmov.u16 r2, q0[3]
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    vmov.u16 r2, q0[4]
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    vmov.u16 r2, q0[5]
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    vmov.u16 r2, q0[6]
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    vmov.u16 r2, q0[7]
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i16 @llvm.experimental.vector.reduce.mul.v16i16(<16 x i16> %x)
+  %r = mul i16 %y, %z
+  ret i16 %r
+}
+
+define arm_aapcs_vfpcc i8 @mul_v8i8_acc(<8 x i8> %x, i8 %y) {
+; CHECK-LABEL: mul_v8i8_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.u16 r1, q0[1]
+; CHECK-NEXT:    vmov.u16 r2, q0[0]
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    vmov.u16 r2, q0[2]
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    vmov.u16 r2, q0[3]
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    vmov.u16 r2, q0[4]
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    vmov.u16 r2, q0[5]
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    vmov.u16 r2, q0[6]
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    vmov.u16 r2, q0[7]
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i8 @llvm.experimental.vector.reduce.mul.v8i8(<8 x i8> %x)
+  %r = mul i8 %y, %z
+  ret i8 %r
+}
+
+define arm_aapcs_vfpcc i8 @mul_v16i8_acc(<16 x i8> %x, i8 %y) {
+; CHECK-LABEL: mul_v16i8_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.u8 r1, q0[1]
+; CHECK-NEXT:    vmov.u8 r2, q0[0]
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    vmov.u8 r2, q0[2]
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    vmov.u8 r2, q0[3]
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    vmov.u8 r2, q0[4]
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    vmov.u8 r2, q0[5]
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    vmov.u8 r2, q0[6]
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    vmov.u8 r2, q0[7]
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    vmov.u8 r2, q0[8]
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    vmov.u8 r2, q0[9]
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    vmov.u8 r2, q0[10]
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    vmov.u8 r2, q0[11]
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    vmov.u8 r2, q0[12]
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    vmov.u8 r2, q0[13]
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    vmov.u8 r2, q0[14]
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    vmov.u8 r2, q0[15]
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i8 @llvm.experimental.vector.reduce.mul.v16i8(<16 x i8> %x)
+  %r = mul i8 %y, %z
+  ret i8 %r
+}
+
+define arm_aapcs_vfpcc i8 @mul_v32i8_acc(<32 x i8> %x, i8 %y) {
+; CHECK-LABEL: mul_v32i8_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmul.i8 q0, q0, q1
+; CHECK-NEXT:    vmov.u8 r1, q0[1]
+; CHECK-NEXT:    vmov.u8 r2, q0[0]
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    vmov.u8 r2, q0[2]
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    vmov.u8 r2, q0[3]
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    vmov.u8 r2, q0[4]
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    vmov.u8 r2, q0[5]
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    vmov.u8 r2, q0[6]
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    vmov.u8 r2, q0[7]
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    vmov.u8 r2, q0[8]
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    vmov.u8 r2, q0[9]
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    vmov.u8 r2, q0[10]
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    vmov.u8 r2, q0[11]
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    vmov.u8 r2, q0[12]
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    vmov.u8 r2, q0[13]
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    vmov.u8 r2, q0[14]
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    vmov.u8 r2, q0[15]
+; CHECK-NEXT:    muls r1, r2, r1
+; CHECK-NEXT:    muls r0, r1, r0
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call i8 @llvm.experimental.vector.reduce.mul.v32i8(<32 x i8> %x)
+  %r = mul i8 %y, %z
+  ret i8 %r
+}
+
+define arm_aapcs_vfpcc i64 @mul_v1i64_acc(<1 x i64> %x, i64 %y) {
+; CHECK-LABEL: mul_v1i64_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    umull r12, lr, r2, r0
+; CHECK-NEXT:    mla r1, r2, r1, lr
+; CHECK-NEXT:    mla r1, r3, r0, r1
+; CHECK-NEXT:    mov r0, r12
+; CHECK-NEXT:    pop {r7, pc}
+entry:
+  %z = call i64 @llvm.experimental.vector.reduce.mul.v1i64(<1 x i64> %x)
+  %r = mul i64 %y, %z
+  ret i64 %r
+}
+
+define arm_aapcs_vfpcc i64 @mul_v2i64_acc(<2 x i64> %x, i64 %y) {
+; CHECK-LABEL: mul_v2i64_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r4, lr}
+; CHECK-NEXT:    push {r4, lr}
+; CHECK-NEXT:    vmov r2, s2
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov r4, s3
+; CHECK-NEXT:    umull r12, lr, r3, r2
+; CHECK-NEXT:    mla r3, r3, r4, lr
+; CHECK-NEXT:    vmov r4, s1
+; CHECK-NEXT:    mla r3, r4, r2, r3
+; CHECK-NEXT:    umull r2, r4, r0, r12
+; CHECK-NEXT:    mla r0, r0, r3, r4
+; CHECK-NEXT:    mla r1, r1, r12, r0
+; CHECK-NEXT:    mov r0, r2
+; CHECK-NEXT:    pop {r4, pc}
+entry:
+  %z = call i64 @llvm.experimental.vector.reduce.mul.v2i64(<2 x i64> %x)
+  %r = mul i64 %y, %z
+  ret i64 %r
+}
+
+define arm_aapcs_vfpcc i64 @mul_v4i64_acc(<4 x i64> %x, i64 %y) {
+; CHECK-LABEL: mul_v4i64_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, lr}
+; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, lr}
+; CHECK-NEXT:    vmov r12, s2
+; CHECK-NEXT:    vmov r3, s0
+; CHECK-NEXT:    vmov r4, s4
+; CHECK-NEXT:    vmov r7, s6
+; CHECK-NEXT:    vmov r6, s7
+; CHECK-NEXT:    umull r2, lr, r3, r12
+; CHECK-NEXT:    umull r5, r8, r2, r4
+; CHECK-NEXT:    umull r10, r9, r5, r7
+; CHECK-NEXT:    mla r5, r5, r6, r9
+; CHECK-NEXT:    vmov r6, s5
+; CHECK-NEXT:    mla r2, r2, r6, r8
+; CHECK-NEXT:    vmov r6, s3
+; CHECK-NEXT:    mla r3, r3, r6, lr
+; CHECK-NEXT:    vmov r6, s1
+; CHECK-NEXT:    mla r3, r6, r12, r3
+; CHECK-NEXT:    mla r2, r3, r4, r2
+; CHECK-NEXT:    mla r3, r2, r7, r5
+; CHECK-NEXT:    umull r2, r7, r0, r10
+; CHECK-NEXT:    mla r0, r0, r3, r7
+; CHECK-NEXT:    mla r1, r1, r10, r0
+; CHECK-NEXT:    mov r0, r2
+; CHECK-NEXT:    pop.w {r4, r5, r6, r7, r8, r9, r10, pc}
+entry:
+  %z = call i64 @llvm.experimental.vector.reduce.mul.v4i64(<4 x i64> %x)
+  %r = mul i64 %y, %z
+  ret i64 %r
+}
+
+declare i16 @llvm.experimental.vector.reduce.mul.v16i16(<16 x i16>)
+declare i16 @llvm.experimental.vector.reduce.mul.v4i16(<4 x i16>)
+declare i16 @llvm.experimental.vector.reduce.mul.v8i16(<8 x i16>)
+declare i32 @llvm.experimental.vector.reduce.mul.v2i32(<2 x i32>)
+declare i32 @llvm.experimental.vector.reduce.mul.v4i32(<4 x i32>)
+declare i32 @llvm.experimental.vector.reduce.mul.v8i32(<8 x i32>)
+declare i64 @llvm.experimental.vector.reduce.mul.v1i64(<1 x i64>)
+declare i64 @llvm.experimental.vector.reduce.mul.v2i64(<2 x i64>)
+declare i64 @llvm.experimental.vector.reduce.mul.v4i64(<4 x i64>)
+declare i8 @llvm.experimental.vector.reduce.mul.v16i8(<16 x i8>)
+declare i8 @llvm.experimental.vector.reduce.mul.v32i8(<32 x i8>)
+declare i8 @llvm.experimental.vector.reduce.mul.v8i8(<8 x i8>)


        


More information about the llvm-commits mailing list