[llvm] 362d00e - [ARM][VecReduce] Force expand vector_reduce_fmin

David Green via llvm-commits llvm-commits at lists.llvm.org
Tue Feb 4 01:37:29 PST 2020


Author: David Green
Date: 2020-02-04T09:36:59Z
New Revision: 362d00e0510ee75750499e2993a782428e377215

URL: https://github.com/llvm/llvm-project/commit/362d00e0510ee75750499e2993a782428e377215
DIFF: https://github.com/llvm/llvm-project/commit/362d00e0510ee75750499e2993a782428e377215.diff

LOG: [ARM][VecReduce] Force expand vector_reduce_fmin

Under MVE, we do not have any lowering for fminimum, which a
vector_reduce_fmin without NoNan will be expanded into. As with the
other recent patches, force this to expand in the pre-isel pass. Note
that Neon lowering would be OK because the scalar fminimum uses the
vector VMIN instruction, but is probably better to just rely on the
scalar operations, which is what is done here.

Also fixes what appears to be the reversal of INF vs -INF in the
vector_reduce_fmin widening code.

Added: 
    llvm/test/CodeGen/Thumb2/mve-vecreduce-fminmax.ll

Modified: 
    llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
    llvm/lib/Target/ARM/ARMTargetTransformInfo.h
    llvm/test/CodeGen/AArch64/vecreduce-fmax-legalization.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index a8e98ebf1540..fa2b52df27bf 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -4678,11 +4678,11 @@ SDValue DAGTypeLegalizer::WidenVecOp_VECREDUCE(SDNode *N) {
     break;
   case ISD::VECREDUCE_FMAX:
     NeutralElem = DAG.getConstantFP(
-        std::numeric_limits<double>::infinity(), dl, ElemVT);
+        -std::numeric_limits<double>::infinity(), dl, ElemVT);
     break;
   case ISD::VECREDUCE_FMIN:
     NeutralElem = DAG.getConstantFP(
-        -std::numeric_limits<double>::infinity(), dl, ElemVT);
+        std::numeric_limits<double>::infinity(), dl, ElemVT);
     break;
   }
 

diff  --git a/llvm/lib/Target/ARM/ARMTargetTransformInfo.h b/llvm/lib/Target/ARM/ARMTargetTransformInfo.h
index c74a104b2fb5..cab1514be4ad 100644
--- a/llvm/lib/Target/ARM/ARMTargetTransformInfo.h
+++ b/llvm/lib/Target/ARM/ARMTargetTransformInfo.h
@@ -176,12 +176,15 @@ class ARMTTIImpl : public BasicTTIImplBase<ARMTTIImpl> {
       // We don't have legalization support for ordered FP reductions.
       if (!II->getFastMathFlags().allowReassoc())
         return true;
-      LLVM_FALLTHROUGH;
+      // Can't legalize reductions with soft floats.
+      return TLI->useSoftFloat() || !TLI->getSubtarget()->hasFPRegs();
 
     case Intrinsic::experimental_vector_reduce_fmin:
     case Intrinsic::experimental_vector_reduce_fmax:
-      // Can't legalize reductions with soft floats.
-      return TLI->useSoftFloat() || !TLI->getSubtarget()->hasFPRegs();
+      // Can't legalize reductions with soft floats, and NoNan will create
+      // fminimum which we do not know how to lower.
+      return TLI->useSoftFloat() || !TLI->getSubtarget()->hasFPRegs() ||
+             !II->getFastMathFlags().noNaNs();
 
     default:
       // Don't expand anything else, let legalization deal with it.

diff  --git a/llvm/test/CodeGen/AArch64/vecreduce-fmax-legalization.ll b/llvm/test/CodeGen/AArch64/vecreduce-fmax-legalization.ll
index 72c94aaf2d60..975ba2687792 100644
--- a/llvm/test/CodeGen/AArch64/vecreduce-fmax-legalization.ll
+++ b/llvm/test/CodeGen/AArch64/vecreduce-fmax-legalization.ll
@@ -47,7 +47,7 @@ define fp128 @test_v1f128(<1 x fp128> %a) nounwind {
 define float @test_v3f32(<3 x float> %a) nounwind {
 ; CHECK-LABEL: test_v3f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #2139095040
+; CHECK-NEXT:    mov w8, #-8388608
 ; CHECK-NEXT:    fmov s1, w8
 ; CHECK-NEXT:    mov v0.s[3], v1.s[0]
 ; CHECK-NEXT:    fmaxnmv s0, v0.4s

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vecreduce-fminmax.ll b/llvm/test/CodeGen/Thumb2/mve-vecreduce-fminmax.ll
new file mode 100644
index 000000000000..26541e6ec2bc
--- /dev/null
+++ b/llvm/test/CodeGen/Thumb2/mve-vecreduce-fminmax.ll
@@ -0,0 +1,2264 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=thumbv8.1m.main-arm-none-eabi -mattr=+mve.fp,+fp64 -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-FP
+; RUN: llc -mtriple=thumbv8.1m.main-arm-none-eabi -mattr=+mve,+fullfp16,+fp64 -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-NOFP
+
+; FIXME minnum nonan X, +Inf -> X   ?
+define arm_aapcs_vfpcc float @fmin_v2f32(<2 x float> %x) {
+; CHECK-LABEL: fmin_v2f32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldr s4, .LCPI0_0
+; CHECK-NEXT:    vminnm.f32 s0, s0, s1
+; CHECK-NEXT:    vminnm.f32 s0, s0, s4
+; CHECK-NEXT:    vminnm.f32 s0, s0, s4
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI0_0:
+; CHECK-NEXT:    .long 2139095040 @ float +Inf
+entry:
+  %z = call fast float @llvm.experimental.vector.reduce.fmin.v2f32(<2 x float> %x)
+  ret float %z
+}
+
+define arm_aapcs_vfpcc float @fmin_v4f32(<4 x float> %x) {
+; CHECK-LABEL: fmin_v4f32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vminnm.f32 s4, s0, s1
+; CHECK-NEXT:    vminnm.f32 s4, s4, s2
+; CHECK-NEXT:    vminnm.f32 s0, s4, s3
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call fast float @llvm.experimental.vector.reduce.fmin.v4f32(<4 x float> %x)
+  ret float %z
+}
+
+; FIXME fminnum (vector) -> fminnum (scalar)  ?
+define arm_aapcs_vfpcc float @fmin_v8f32(<8 x float> %x) {
+; CHECK-FP-LABEL: fmin_v8f32:
+; CHECK-FP:       @ %bb.0: @ %entry
+; CHECK-FP-NEXT:    vminnm.f32 q0, q0, q1
+; CHECK-FP-NEXT:    vminnm.f32 s4, s0, s1
+; CHECK-FP-NEXT:    vminnm.f32 s4, s4, s2
+; CHECK-FP-NEXT:    vminnm.f32 s0, s4, s3
+; CHECK-FP-NEXT:    bx lr
+;
+; CHECK-NOFP-LABEL: fmin_v8f32:
+; CHECK-NOFP:       @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT:    vcmp.f32 s5, s1
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f32 s4, s0
+; CHECK-NOFP-NEXT:    vselgt.f32 s8, s1, s5
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f32 s6, s2
+; CHECK-NOFP-NEXT:    vselgt.f32 s10, s0, s4
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f32 s7, s3
+; CHECK-NOFP-NEXT:    vselgt.f32 s12, s2, s6
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f32 s0, s3, s7
+; CHECK-NOFP-NEXT:    vminnm.f32 s2, s10, s8
+; CHECK-NOFP-NEXT:    vminnm.f32 s2, s2, s12
+; CHECK-NOFP-NEXT:    vminnm.f32 s0, s2, s0
+; CHECK-NOFP-NEXT:    bx lr
+entry:
+  %z = call fast float @llvm.experimental.vector.reduce.fmin.v8f32(<8 x float> %x)
+  ret float %z
+}
+
+define arm_aapcs_vfpcc half @fmin_v4f16(<4 x half> %x) {
+; CHECK-LABEL: fmin_v4f16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmovx.f16 s4, s0
+; CHECK-NEXT:    vminnm.f16 s4, s0, s4
+; CHECK-NEXT:    vmovx.f16 s0, s1
+; CHECK-NEXT:    vminnm.f16 s4, s4, s1
+; CHECK-NEXT:    vldr.16 s2, .LCPI3_0
+; CHECK-NEXT:    vminnm.f16 s0, s4, s0
+; CHECK-NEXT:    vminnm.f16 s0, s0, s2
+; CHECK-NEXT:    vminnm.f16 s0, s0, s2
+; CHECK-NEXT:    vminnm.f16 s0, s0, s2
+; CHECK-NEXT:    vminnm.f16 s0, s0, s2
+; CHECK-NEXT:    vstr.16 s0, [r0]
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 1
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI3_0:
+; CHECK-NEXT:    .short 31744 @ half +Inf
+entry:
+  %z = call fast half @llvm.experimental.vector.reduce.fmin.v4f16(<4 x half> %x)
+  ret half %z
+}
+
+define arm_aapcs_vfpcc half @fmin_v8f16(<8 x half> %x) {
+; CHECK-LABEL: fmin_v8f16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmovx.f16 s4, s0
+; CHECK-NEXT:    vmovx.f16 s6, s1
+; CHECK-NEXT:    vminnm.f16 s4, s0, s4
+; CHECK-NEXT:    vmovx.f16 s0, s3
+; CHECK-NEXT:    vminnm.f16 s4, s4, s1
+; CHECK-NEXT:    vminnm.f16 s4, s4, s6
+; CHECK-NEXT:    vmovx.f16 s6, s2
+; CHECK-NEXT:    vminnm.f16 s4, s4, s2
+; CHECK-NEXT:    vminnm.f16 s4, s4, s6
+; CHECK-NEXT:    vminnm.f16 s4, s4, s3
+; CHECK-NEXT:    vminnm.f16 s0, s4, s0
+; CHECK-NEXT:    vstr.16 s0, [r0]
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call fast half @llvm.experimental.vector.reduce.fmin.v8f16(<8 x half> %x)
+  ret half %z
+}
+
+define arm_aapcs_vfpcc half @fmin_v16f16(<16 x half> %x) {
+; CHECK-FP-LABEL: fmin_v16f16:
+; CHECK-FP:       @ %bb.0: @ %entry
+; CHECK-FP-NEXT:    vminnm.f16 q0, q0, q1
+; CHECK-FP-NEXT:    vmovx.f16 s4, s0
+; CHECK-FP-NEXT:    vmovx.f16 s6, s1
+; CHECK-FP-NEXT:    vminnm.f16 s4, s0, s4
+; CHECK-FP-NEXT:    vmovx.f16 s0, s3
+; CHECK-FP-NEXT:    vminnm.f16 s4, s4, s1
+; CHECK-FP-NEXT:    vminnm.f16 s4, s4, s6
+; CHECK-FP-NEXT:    vmovx.f16 s6, s2
+; CHECK-FP-NEXT:    vminnm.f16 s4, s4, s2
+; CHECK-FP-NEXT:    vminnm.f16 s4, s4, s6
+; CHECK-FP-NEXT:    vminnm.f16 s4, s4, s3
+; CHECK-FP-NEXT:    vminnm.f16 s0, s4, s0
+; CHECK-FP-NEXT:    vstr.16 s0, [r0]
+; CHECK-FP-NEXT:    bx lr
+;
+; CHECK-NOFP-LABEL: fmin_v16f16:
+; CHECK-NOFP:       @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT:    vmovx.f16 s8, s4
+; CHECK-NOFP-NEXT:    vmovx.f16 s10, s0
+; CHECK-NOFP-NEXT:    vcmp.f16 s8, s10
+; CHECK-NOFP-NEXT:    vmovx.f16 s12, s1
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f16 s4, s0
+; CHECK-NOFP-NEXT:    vselgt.f16 s8, s10, s8
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f16 s5, s1
+; CHECK-NOFP-NEXT:    vselgt.f16 s10, s0, s4
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vminnm.f16 s8, s10, s8
+; CHECK-NOFP-NEXT:    vmovx.f16 s4, s7
+; CHECK-NOFP-NEXT:    vmovx.f16 s0, s3
+; CHECK-NOFP-NEXT:    vselgt.f16 s10, s1, s5
+; CHECK-NOFP-NEXT:    vminnm.f16 s8, s8, s10
+; CHECK-NOFP-NEXT:    vmovx.f16 s10, s5
+; CHECK-NOFP-NEXT:    vcmp.f16 s10, s12
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f16 s6, s2
+; CHECK-NOFP-NEXT:    vselgt.f16 s10, s12, s10
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vminnm.f16 s8, s8, s10
+; CHECK-NOFP-NEXT:    vmovx.f16 s12, s2
+; CHECK-NOFP-NEXT:    vselgt.f16 s10, s2, s6
+; CHECK-NOFP-NEXT:    vminnm.f16 s8, s8, s10
+; CHECK-NOFP-NEXT:    vmovx.f16 s10, s6
+; CHECK-NOFP-NEXT:    vcmp.f16 s10, s12
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f16 s7, s3
+; CHECK-NOFP-NEXT:    vselgt.f16 s10, s12, s10
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f16 s4, s0
+; CHECK-NOFP-NEXT:    vminnm.f16 s8, s8, s10
+; CHECK-NOFP-NEXT:    vselgt.f16 s10, s3, s7
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vminnm.f16 s8, s8, s10
+; CHECK-NOFP-NEXT:    vselgt.f16 s0, s0, s4
+; CHECK-NOFP-NEXT:    vminnm.f16 s0, s8, s0
+; CHECK-NOFP-NEXT:    vstr.16 s0, [r0]
+; CHECK-NOFP-NEXT:    bx lr
+entry:
+  %z = call fast half @llvm.experimental.vector.reduce.fmin.v16f16(<16 x half> %x)
+  ret half %z
+}
+
+define arm_aapcs_vfpcc double @fmin_v1f64(<1 x double> %x) {
+; CHECK-LABEL: fmin_v1f64:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call fast double @llvm.experimental.vector.reduce.fmin.v1f64(<1 x double> %x)
+  ret double %z
+}
+
+define arm_aapcs_vfpcc double @fmin_v2f64(<2 x double> %x) {
+; CHECK-LABEL: fmin_v2f64:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vminnm.f64 d0, d0, d1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call fast double @llvm.experimental.vector.reduce.fmin.v2f64(<2 x double> %x)
+  ret double %z
+}
+
+define arm_aapcs_vfpcc double @fmin_v4f64(<4 x double> %x) {
+; CHECK-LABEL: fmin_v4f64:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vcmp.f64 d3, d1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f64 d2, d0
+; CHECK-NEXT:    vselgt.f64 d4, d1, d3
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vselgt.f64 d0, d0, d2
+; CHECK-NEXT:    vminnm.f64 d0, d0, d4
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call fast double @llvm.experimental.vector.reduce.fmin.v4f64(<4 x double> %x)
+  ret double %z
+}
+
+; FIXME should not be vminnm
+; FIXME better reductions (no vmovs/vdups)
+define arm_aapcs_vfpcc float @fmin_v2f32_nofast(<2 x float> %x) {
+; CHECK-FP-LABEL: fmin_v2f32_nofast:
+; CHECK-FP:       @ %bb.0: @ %entry
+; CHECK-FP-NEXT:    vmov.32 r0, q0[1]
+; CHECK-FP-NEXT:    vdup.32 q1, r0
+; CHECK-FP-NEXT:    vminnm.f32 q0, q0, q1
+; CHECK-FP-NEXT:    @ kill: def $s0 killed $s0 killed $q0
+; CHECK-FP-NEXT:    bx lr
+;
+; CHECK-NOFP-LABEL: fmin_v2f32_nofast:
+; CHECK-NOFP:       @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT:    vmov.32 r0, q0[1]
+; CHECK-NOFP-NEXT:    vdup.32 q1, r0
+; CHECK-NOFP-NEXT:    vcmp.f32 s4, s0
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f32 s0, s0, s4
+; CHECK-NOFP-NEXT:    bx lr
+entry:
+  %z = call float @llvm.experimental.vector.reduce.fmin.v2f32(<2 x float> %x)
+  ret float %z
+}
+
+define arm_aapcs_vfpcc float @fmin_v4f32_nofast(<4 x float> %x) {
+; CHECK-FP-LABEL: fmin_v4f32_nofast:
+; CHECK-FP:       @ %bb.0: @ %entry
+; CHECK-FP-NEXT:    vmov.f64 d2, d1
+; CHECK-FP-NEXT:    vmov.f32 s5, s3
+; CHECK-FP-NEXT:    vminnm.f32 q0, q0, q1
+; CHECK-FP-NEXT:    vmov.32 r0, q0[1]
+; CHECK-FP-NEXT:    vdup.32 q1, r0
+; CHECK-FP-NEXT:    vminnm.f32 q0, q0, q1
+; CHECK-FP-NEXT:    @ kill: def $s0 killed $s0 killed $q0
+; CHECK-FP-NEXT:    bx lr
+;
+; CHECK-NOFP-LABEL: fmin_v4f32_nofast:
+; CHECK-NOFP:       @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT:    vcmp.f32 s3, s1
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vmov.f64 d2, d1
+; CHECK-NOFP-NEXT:    vmov.f32 s5, s3
+; CHECK-NOFP-NEXT:    vcmp.f32 s4, s0
+; CHECK-NOFP-NEXT:    vselgt.f32 s8, s1, s3
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f32 s0, s0, s4
+; CHECK-NOFP-NEXT:    vcmp.f32 s8, s0
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f32 s0, s0, s8
+; CHECK-NOFP-NEXT:    bx lr
+entry:
+  %z = call float @llvm.experimental.vector.reduce.fmin.v4f32(<4 x float> %x)
+  ret float %z
+}
+
+define arm_aapcs_vfpcc float @fmin_v8f32_nofast(<8 x float> %x) {
+; CHECK-FP-LABEL: fmin_v8f32_nofast:
+; CHECK-FP:       @ %bb.0: @ %entry
+; CHECK-FP-NEXT:    vminnm.f32 q0, q0, q1
+; CHECK-FP-NEXT:    vmov.f64 d2, d1
+; CHECK-FP-NEXT:    vmov.f32 s5, s3
+; CHECK-FP-NEXT:    vminnm.f32 q0, q0, q1
+; CHECK-FP-NEXT:    vmov.32 r0, q0[1]
+; CHECK-FP-NEXT:    vdup.32 q1, r0
+; CHECK-FP-NEXT:    vminnm.f32 q0, q0, q1
+; CHECK-FP-NEXT:    @ kill: def $s0 killed $s0 killed $q0
+; CHECK-FP-NEXT:    bx lr
+;
+; CHECK-NOFP-LABEL: fmin_v8f32_nofast:
+; CHECK-NOFP:       @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT:    vcmp.f32 s7, s3
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f32 s5, s1
+; CHECK-NOFP-NEXT:    vselgt.f32 s8, s3, s7
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f32 s6, s2
+; CHECK-NOFP-NEXT:    vselgt.f32 s10, s1, s5
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f32 s4, s0
+; CHECK-NOFP-NEXT:    vselgt.f32 s12, s2, s6
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f32 s8, s10
+; CHECK-NOFP-NEXT:    vselgt.f32 s0, s0, s4
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f32 s12, s0
+; CHECK-NOFP-NEXT:    vselgt.f32 s2, s10, s8
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f32 s0, s0, s12
+; CHECK-NOFP-NEXT:    vcmp.f32 s2, s0
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f32 s0, s0, s2
+; CHECK-NOFP-NEXT:    bx lr
+entry:
+  %z = call float @llvm.experimental.vector.reduce.fmin.v8f32(<8 x float> %x)
+  ret float %z
+}
+
+define arm_aapcs_vfpcc half @fmin_v4f16_nofast(<4 x half> %x) {
+; CHECK-FP-LABEL: fmin_v4f16_nofast:
+; CHECK-FP:       @ %bb.0: @ %entry
+; CHECK-FP-NEXT:    vmov.32 r1, q0[1]
+; CHECK-FP-NEXT:    vdup.32 q1, r1
+; CHECK-FP-NEXT:    vminnm.f16 q0, q0, q1
+; CHECK-FP-NEXT:    vmov.u16 r1, q0[1]
+; CHECK-FP-NEXT:    vdup.16 q1, r1
+; CHECK-FP-NEXT:    vminnm.f16 q0, q0, q1
+; CHECK-FP-NEXT:    vstr.16 s0, [r0]
+; CHECK-FP-NEXT:    bx lr
+;
+; CHECK-NOFP-LABEL: fmin_v4f16_nofast:
+; CHECK-NOFP:       @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT:    vmov.32 r1, q0[1]
+; CHECK-NOFP-NEXT:    vmovx.f16 s10, s0
+; CHECK-NOFP-NEXT:    vdup.32 q1, r1
+; CHECK-NOFP-NEXT:    vmovx.f16 s8, s4
+; CHECK-NOFP-NEXT:    vcmp.f16 s8, s10
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f16 s4, s0
+; CHECK-NOFP-NEXT:    vselgt.f16 s8, s10, s8
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s0, s0, s4
+; CHECK-NOFP-NEXT:    vcmp.f16 s8, s0
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s0, s0, s8
+; CHECK-NOFP-NEXT:    vstr.16 s0, [r0]
+; CHECK-NOFP-NEXT:    bx lr
+entry:
+  %z = call half @llvm.experimental.vector.reduce.fmin.v4f16(<4 x half> %x)
+  ret half %z
+}
+
+define arm_aapcs_vfpcc half @fmin_v8f16_nofast(<8 x half> %x) {
+; CHECK-FP-LABEL: fmin_v8f16_nofast:
+; CHECK-FP:       @ %bb.0: @ %entry
+; CHECK-FP-NEXT:    vmov.f64 d2, d1
+; CHECK-FP-NEXT:    vmov.f32 s5, s3
+; CHECK-FP-NEXT:    vminnm.f16 q0, q0, q1
+; CHECK-FP-NEXT:    vmov.32 r1, q0[1]
+; CHECK-FP-NEXT:    vdup.32 q1, r1
+; CHECK-FP-NEXT:    vminnm.f16 q0, q0, q1
+; CHECK-FP-NEXT:    vmov.u16 r1, q0[1]
+; CHECK-FP-NEXT:    vdup.16 q1, r1
+; CHECK-FP-NEXT:    vminnm.f16 q0, q0, q1
+; CHECK-FP-NEXT:    vstr.16 s0, [r0]
+; CHECK-FP-NEXT:    bx lr
+;
+; CHECK-NOFP-LABEL: fmin_v8f16_nofast:
+; CHECK-NOFP:       @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT:    vmovx.f16 s8, s3
+; CHECK-NOFP-NEXT:    vmovx.f16 s10, s1
+; CHECK-NOFP-NEXT:    vcmp.f16 s8, s10
+; CHECK-NOFP-NEXT:    vmov.f64 d2, d1
+; CHECK-NOFP-NEXT:    vmovx.f16 s12, s0
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vmov.f32 s5, s3
+; CHECK-NOFP-NEXT:    vselgt.f16 s8, s10, s8
+; CHECK-NOFP-NEXT:    vmovx.f16 s10, s4
+; CHECK-NOFP-NEXT:    vcmp.f16 s10, s12
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s10, s12, s10
+; CHECK-NOFP-NEXT:    vcmp.f16 s8, s10
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f16 s3, s1
+; CHECK-NOFP-NEXT:    vselgt.f16 s8, s10, s8
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f16 s4, s0
+; CHECK-NOFP-NEXT:    vselgt.f16 s10, s1, s3
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s0, s0, s4
+; CHECK-NOFP-NEXT:    vcmp.f16 s10, s0
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s0, s0, s10
+; CHECK-NOFP-NEXT:    vcmp.f16 s8, s0
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s0, s0, s8
+; CHECK-NOFP-NEXT:    vstr.16 s0, [r0]
+; CHECK-NOFP-NEXT:    bx lr
+entry:
+  %z = call half @llvm.experimental.vector.reduce.fmin.v8f16(<8 x half> %x)
+  ret half %z
+}
+
+define arm_aapcs_vfpcc half @fmin_v16f16_nofast(<16 x half> %x) {
+; CHECK-FP-LABEL: fmin_v16f16_nofast:
+; CHECK-FP:       @ %bb.0: @ %entry
+; CHECK-FP-NEXT:    vminnm.f16 q0, q0, q1
+; CHECK-FP-NEXT:    vmov.f64 d2, d1
+; CHECK-FP-NEXT:    vmov.f32 s5, s3
+; CHECK-FP-NEXT:    vminnm.f16 q0, q0, q1
+; CHECK-FP-NEXT:    vmov.32 r1, q0[1]
+; CHECK-FP-NEXT:    vdup.32 q1, r1
+; CHECK-FP-NEXT:    vminnm.f16 q0, q0, q1
+; CHECK-FP-NEXT:    vmov.u16 r1, q0[1]
+; CHECK-FP-NEXT:    vdup.16 q1, r1
+; CHECK-FP-NEXT:    vminnm.f16 q0, q0, q1
+; CHECK-FP-NEXT:    vstr.16 s0, [r0]
+; CHECK-FP-NEXT:    bx lr
+;
+; CHECK-NOFP-LABEL: fmin_v16f16_nofast:
+; CHECK-NOFP:       @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT:    vmovx.f16 s8, s7
+; CHECK-NOFP-NEXT:    vmovx.f16 s10, s3
+; CHECK-NOFP-NEXT:    vcmp.f16 s8, s10
+; CHECK-NOFP-NEXT:    vmovx.f16 s12, s1
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vmovx.f16 s14, s0
+; CHECK-NOFP-NEXT:    vselgt.f16 s8, s10, s8
+; CHECK-NOFP-NEXT:    vmovx.f16 s10, s5
+; CHECK-NOFP-NEXT:    vcmp.f16 s10, s12
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s10, s12, s10
+; CHECK-NOFP-NEXT:    vmovx.f16 s12, s2
+; CHECK-NOFP-NEXT:    vcmp.f16 s8, s10
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s8, s10, s8
+; CHECK-NOFP-NEXT:    vmovx.f16 s10, s6
+; CHECK-NOFP-NEXT:    vcmp.f16 s10, s12
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s10, s12, s10
+; CHECK-NOFP-NEXT:    vmovx.f16 s12, s4
+; CHECK-NOFP-NEXT:    vcmp.f16 s12, s14
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s12, s14, s12
+; CHECK-NOFP-NEXT:    vcmp.f16 s10, s12
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s10, s12, s10
+; CHECK-NOFP-NEXT:    vcmp.f16 s8, s10
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f16 s7, s3
+; CHECK-NOFP-NEXT:    vselgt.f16 s8, s10, s8
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f16 s5, s1
+; CHECK-NOFP-NEXT:    vselgt.f16 s10, s3, s7
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s12, s1, s5
+; CHECK-NOFP-NEXT:    vcmp.f16 s10, s12
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f16 s6, s2
+; CHECK-NOFP-NEXT:    vselgt.f16 s10, s12, s10
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f16 s4, s0
+; CHECK-NOFP-NEXT:    vselgt.f16 s12, s2, s6
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s0, s0, s4
+; CHECK-NOFP-NEXT:    vcmp.f16 s12, s0
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s0, s0, s12
+; CHECK-NOFP-NEXT:    vcmp.f16 s10, s0
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s0, s0, s10
+; CHECK-NOFP-NEXT:    vcmp.f16 s8, s0
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s0, s0, s8
+; CHECK-NOFP-NEXT:    vstr.16 s0, [r0]
+; CHECK-NOFP-NEXT:    bx lr
+entry:
+  %z = call half @llvm.experimental.vector.reduce.fmin.v16f16(<16 x half> %x)
+  ret half %z
+}
+
+define arm_aapcs_vfpcc double @fmin_v1f64_nofast(<1 x double> %x) {
+; CHECK-LABEL: fmin_v1f64_nofast:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call double @llvm.experimental.vector.reduce.fmin.v1f64(<1 x double> %x)
+  ret double %z
+}
+
+define arm_aapcs_vfpcc double @fmin_v2f64_nofast(<2 x double> %x) {
+; CHECK-LABEL: fmin_v2f64_nofast:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vcmp.f64 d1, d0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vselgt.f64 d0, d0, d1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call double @llvm.experimental.vector.reduce.fmin.v2f64(<2 x double> %x)
+  ret double %z
+}
+
+define arm_aapcs_vfpcc double @fmin_v4f64_nofast(<4 x double> %x) {
+; CHECK-LABEL: fmin_v4f64_nofast:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vcmp.f64 d3, d1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f64 d2, d0
+; CHECK-NEXT:    vselgt.f64 d4, d1, d3
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vselgt.f64 d0, d0, d2
+; CHECK-NEXT:    vcmp.f64 d4, d0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vselgt.f64 d0, d0, d4
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call double @llvm.experimental.vector.reduce.fmin.v4f64(<4 x double> %x)
+  ret double %z
+}
+
+define arm_aapcs_vfpcc float @fmin_v2f32_acc(<2 x float> %x, float %y) {
+; CHECK-LABEL: fmin_v2f32_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldr s6, .LCPI18_0
+; CHECK-NEXT:    vminnm.f32 s0, s0, s1
+; CHECK-NEXT:    vminnm.f32 s0, s0, s6
+; CHECK-NEXT:    vminnm.f32 s0, s0, s6
+; CHECK-NEXT:    vminnm.f32 s0, s4, s0
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI18_0:
+; CHECK-NEXT:    .long 2139095040 @ float +Inf
+entry:
+  %z = call fast float @llvm.experimental.vector.reduce.fmin.v2f32(<2 x float> %x)
+  %c = fcmp fast olt float %y, %z
+  %r = select i1 %c, float %y, float %z
+  ret float %r
+}
+
+define arm_aapcs_vfpcc float @fmin_v4f32_acc(<4 x float> %x, float %y) {
+; CHECK-LABEL: fmin_v4f32_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vminnm.f32 s6, s0, s1
+; CHECK-NEXT:    vminnm.f32 s6, s6, s2
+; CHECK-NEXT:    vminnm.f32 s0, s6, s3
+; CHECK-NEXT:    vminnm.f32 s0, s4, s0
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call fast float @llvm.experimental.vector.reduce.fmin.v4f32(<4 x float> %x)
+  %c = fcmp fast olt float %y, %z
+  %r = select i1 %c, float %y, float %z
+  ret float %r
+}
+
+define arm_aapcs_vfpcc float @fmin_v8f32_acc(<8 x float> %x, float %y) {
+; CHECK-FP-LABEL: fmin_v8f32_acc:
+; CHECK-FP:       @ %bb.0: @ %entry
+; CHECK-FP-NEXT:    vminnm.f32 q0, q0, q1
+; CHECK-FP-NEXT:    vminnm.f32 s4, s0, s1
+; CHECK-FP-NEXT:    vminnm.f32 s4, s4, s2
+; CHECK-FP-NEXT:    vminnm.f32 s0, s4, s3
+; CHECK-FP-NEXT:    vminnm.f32 s0, s8, s0
+; CHECK-FP-NEXT:    bx lr
+;
+; CHECK-NOFP-LABEL: fmin_v8f32_acc:
+; CHECK-NOFP:       @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT:    vcmp.f32 s5, s1
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f32 s4, s0
+; CHECK-NOFP-NEXT:    vselgt.f32 s10, s1, s5
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f32 s6, s2
+; CHECK-NOFP-NEXT:    vselgt.f32 s12, s0, s4
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f32 s7, s3
+; CHECK-NOFP-NEXT:    vselgt.f32 s14, s2, s6
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f32 s0, s3, s7
+; CHECK-NOFP-NEXT:    vminnm.f32 s2, s12, s10
+; CHECK-NOFP-NEXT:    vminnm.f32 s2, s2, s14
+; CHECK-NOFP-NEXT:    vminnm.f32 s0, s2, s0
+; CHECK-NOFP-NEXT:    vminnm.f32 s0, s8, s0
+; CHECK-NOFP-NEXT:    bx lr
+entry:
+  %z = call fast float @llvm.experimental.vector.reduce.fmin.v8f32(<8 x float> %x)
+  %c = fcmp fast olt float %y, %z
+  %r = select i1 %c, float %y, float %z
+  ret float %r
+}
+
+define arm_aapcs_vfpcc void @fmin_v4f16_acc(<4 x half> %x, half* %yy) {
+; CHECK-LABEL: fmin_v4f16_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmovx.f16 s4, s0
+; CHECK-NEXT:    vminnm.f16 s4, s0, s4
+; CHECK-NEXT:    vmovx.f16 s0, s1
+; CHECK-NEXT:    vminnm.f16 s4, s4, s1
+; CHECK-NEXT:    vldr.16 s2, .LCPI21_0
+; CHECK-NEXT:    vminnm.f16 s0, s4, s0
+; CHECK-NEXT:    vminnm.f16 s0, s0, s2
+; CHECK-NEXT:    vminnm.f16 s0, s0, s2
+; CHECK-NEXT:    vminnm.f16 s0, s0, s2
+; CHECK-NEXT:    vminnm.f16 s0, s0, s2
+; CHECK-NEXT:    vldr.16 s2, [r0]
+; CHECK-NEXT:    vminnm.f16 s0, s2, s0
+; CHECK-NEXT:    vstr.16 s0, [r0]
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 1
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI21_0:
+; CHECK-NEXT:    .short 31744 @ half +Inf
+entry:
+  %y = load half, half* %yy
+  %z = call fast half @llvm.experimental.vector.reduce.fmin.v4f16(<4 x half> %x)
+  %c = fcmp fast olt half %y, %z
+  %r = select i1 %c, half %y, half %z
+  store half %r, half* %yy
+  ret void
+}
+
+define arm_aapcs_vfpcc void @fmin_v8f16_acc(<8 x half> %x, half* %yy) {
+; CHECK-LABEL: fmin_v8f16_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmovx.f16 s4, s0
+; CHECK-NEXT:    vmovx.f16 s6, s1
+; CHECK-NEXT:    vminnm.f16 s4, s0, s4
+; CHECK-NEXT:    vmovx.f16 s0, s3
+; CHECK-NEXT:    vminnm.f16 s4, s4, s1
+; CHECK-NEXT:    vminnm.f16 s4, s4, s6
+; CHECK-NEXT:    vmovx.f16 s6, s2
+; CHECK-NEXT:    vminnm.f16 s4, s4, s2
+; CHECK-NEXT:    vldr.16 s2, [r0]
+; CHECK-NEXT:    vminnm.f16 s4, s4, s6
+; CHECK-NEXT:    vminnm.f16 s4, s4, s3
+; CHECK-NEXT:    vminnm.f16 s0, s4, s0
+; CHECK-NEXT:    vminnm.f16 s0, s2, s0
+; CHECK-NEXT:    vstr.16 s0, [r0]
+; CHECK-NEXT:    bx lr
+entry:
+  %y = load half, half* %yy
+  %z = call fast half @llvm.experimental.vector.reduce.fmin.v8f16(<8 x half> %x)
+  %c = fcmp fast olt half %y, %z
+  %r = select i1 %c, half %y, half %z
+  store half %r, half* %yy
+  ret void
+}
+
+define arm_aapcs_vfpcc void @fmin_v16f16_acc(<16 x half> %x, half* %yy) {
+; CHECK-FP-LABEL: fmin_v16f16_acc:
+; CHECK-FP:       @ %bb.0: @ %entry
+; CHECK-FP-NEXT:    vminnm.f16 q0, q0, q1
+; CHECK-FP-NEXT:    vmovx.f16 s4, s0
+; CHECK-FP-NEXT:    vmovx.f16 s6, s1
+; CHECK-FP-NEXT:    vminnm.f16 s4, s0, s4
+; CHECK-FP-NEXT:    vmovx.f16 s0, s3
+; CHECK-FP-NEXT:    vminnm.f16 s4, s4, s1
+; CHECK-FP-NEXT:    vminnm.f16 s4, s4, s6
+; CHECK-FP-NEXT:    vmovx.f16 s6, s2
+; CHECK-FP-NEXT:    vminnm.f16 s4, s4, s2
+; CHECK-FP-NEXT:    vldr.16 s2, [r0]
+; CHECK-FP-NEXT:    vminnm.f16 s4, s4, s6
+; CHECK-FP-NEXT:    vminnm.f16 s4, s4, s3
+; CHECK-FP-NEXT:    vminnm.f16 s0, s4, s0
+; CHECK-FP-NEXT:    vminnm.f16 s0, s2, s0
+; CHECK-FP-NEXT:    vstr.16 s0, [r0]
+; CHECK-FP-NEXT:    bx lr
+;
+; CHECK-NOFP-LABEL: fmin_v16f16_acc:
+; CHECK-NOFP:       @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT:    vmovx.f16 s8, s4
+; CHECK-NOFP-NEXT:    vmovx.f16 s10, s0
+; CHECK-NOFP-NEXT:    vcmp.f16 s8, s10
+; CHECK-NOFP-NEXT:    vmovx.f16 s12, s1
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f16 s4, s0
+; CHECK-NOFP-NEXT:    vselgt.f16 s8, s10, s8
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f16 s5, s1
+; CHECK-NOFP-NEXT:    vselgt.f16 s10, s0, s4
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vminnm.f16 s8, s10, s8
+; CHECK-NOFP-NEXT:    vmovx.f16 s4, s7
+; CHECK-NOFP-NEXT:    vmovx.f16 s0, s3
+; CHECK-NOFP-NEXT:    vselgt.f16 s10, s1, s5
+; CHECK-NOFP-NEXT:    vminnm.f16 s8, s8, s10
+; CHECK-NOFP-NEXT:    vmovx.f16 s10, s5
+; CHECK-NOFP-NEXT:    vcmp.f16 s10, s12
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f16 s6, s2
+; CHECK-NOFP-NEXT:    vselgt.f16 s10, s12, s10
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vminnm.f16 s8, s8, s10
+; CHECK-NOFP-NEXT:    vmovx.f16 s12, s2
+; CHECK-NOFP-NEXT:    vselgt.f16 s10, s2, s6
+; CHECK-NOFP-NEXT:    vldr.16 s2, [r0]
+; CHECK-NOFP-NEXT:    vminnm.f16 s8, s8, s10
+; CHECK-NOFP-NEXT:    vmovx.f16 s10, s6
+; CHECK-NOFP-NEXT:    vcmp.f16 s10, s12
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f16 s7, s3
+; CHECK-NOFP-NEXT:    vselgt.f16 s10, s12, s10
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f16 s4, s0
+; CHECK-NOFP-NEXT:    vminnm.f16 s8, s8, s10
+; CHECK-NOFP-NEXT:    vselgt.f16 s10, s3, s7
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vminnm.f16 s8, s8, s10
+; CHECK-NOFP-NEXT:    vselgt.f16 s0, s0, s4
+; CHECK-NOFP-NEXT:    vminnm.f16 s0, s8, s0
+; CHECK-NOFP-NEXT:    vminnm.f16 s0, s2, s0
+; CHECK-NOFP-NEXT:    vstr.16 s0, [r0]
+; CHECK-NOFP-NEXT:    bx lr
+entry:
+  %y = load half, half* %yy
+  %z = call fast half @llvm.experimental.vector.reduce.fmin.v16f16(<16 x half> %x)
+  %c = fcmp fast olt half %y, %z
+  %r = select i1 %c, half %y, half %z
+  store half %r, half* %yy
+  ret void
+}
+
+define arm_aapcs_vfpcc double @fmin_v1f64_acc(<1 x double> %x, double %y) {
+; CHECK-LABEL: fmin_v1f64_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vminnm.f64 d0, d1, d0
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call fast double @llvm.experimental.vector.reduce.fmin.v1f64(<1 x double> %x)
+  %c = fcmp fast olt double %y, %z
+  %r = select i1 %c, double %y, double %z
+  ret double %r
+}
+
+define arm_aapcs_vfpcc double @fmin_v2f64_acc(<2 x double> %x, double %y) {
+; CHECK-LABEL: fmin_v2f64_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vminnm.f64 d0, d0, d1
+; CHECK-NEXT:    vminnm.f64 d0, d2, d0
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call fast double @llvm.experimental.vector.reduce.fmin.v2f64(<2 x double> %x)
+  %c = fcmp fast olt double %y, %z
+  %r = select i1 %c, double %y, double %z
+  ret double %r
+}
+
+define arm_aapcs_vfpcc double @fmin_v4f64_acc(<4 x double> %x, double %y) {
+; CHECK-LABEL: fmin_v4f64_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vcmp.f64 d3, d1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f64 d2, d0
+; CHECK-NEXT:    vselgt.f64 d5, d1, d3
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vselgt.f64 d0, d0, d2
+; CHECK-NEXT:    vminnm.f64 d0, d0, d5
+; CHECK-NEXT:    vminnm.f64 d0, d4, d0
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call fast double @llvm.experimental.vector.reduce.fmin.v4f64(<4 x double> %x)
+  %c = fcmp fast olt double %y, %z
+  %r = select i1 %c, double %y, double %z
+  ret double %r
+}
+
+define arm_aapcs_vfpcc float @fmin_v2f32_acc_nofast(<2 x float> %x, float %y) {
+; CHECK-FP-LABEL: fmin_v2f32_acc_nofast:
+; CHECK-FP:       @ %bb.0: @ %entry
+; CHECK-FP-NEXT:    vmov.32 r0, q0[1]
+; CHECK-FP-NEXT:    vdup.32 q2, r0
+; CHECK-FP-NEXT:    vminnm.f32 q0, q0, q2
+; CHECK-FP-NEXT:    vcmp.f32 s0, s4
+; CHECK-FP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-FP-NEXT:    vselgt.f32 s0, s4, s0
+; CHECK-FP-NEXT:    bx lr
+;
+; CHECK-NOFP-LABEL: fmin_v2f32_acc_nofast:
+; CHECK-NOFP:       @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT:    vmov.32 r0, q0[1]
+; CHECK-NOFP-NEXT:    vdup.32 q2, r0
+; CHECK-NOFP-NEXT:    vcmp.f32 s8, s0
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f32 s0, s0, s8
+; CHECK-NOFP-NEXT:    vcmp.f32 s0, s4
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f32 s0, s4, s0
+; CHECK-NOFP-NEXT:    bx lr
+entry:
+  %z = call float @llvm.experimental.vector.reduce.fmin.v2f32(<2 x float> %x)
+  %c = fcmp olt float %y, %z
+  %r = select i1 %c, float %y, float %z
+  ret float %r
+}
+
+define arm_aapcs_vfpcc float @fmin_v4f32_acc_nofast(<4 x float> %x, float %y) {
+; CHECK-FP-LABEL: fmin_v4f32_acc_nofast:
+; CHECK-FP:       @ %bb.0: @ %entry
+; CHECK-FP-NEXT:    vmov.f64 d4, d1
+; CHECK-FP-NEXT:    vmov.f32 s9, s3
+; CHECK-FP-NEXT:    vminnm.f32 q0, q0, q2
+; CHECK-FP-NEXT:    vmov.32 r0, q0[1]
+; CHECK-FP-NEXT:    vdup.32 q2, r0
+; CHECK-FP-NEXT:    vminnm.f32 q0, q0, q2
+; CHECK-FP-NEXT:    vcmp.f32 s0, s4
+; CHECK-FP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-FP-NEXT:    vselgt.f32 s0, s4, s0
+; CHECK-FP-NEXT:    bx lr
+;
+; CHECK-NOFP-LABEL: fmin_v4f32_acc_nofast:
+; CHECK-NOFP:       @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT:    vcmp.f32 s3, s1
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vmov.f64 d4, d1
+; CHECK-NOFP-NEXT:    vmov.f32 s9, s3
+; CHECK-NOFP-NEXT:    vcmp.f32 s8, s0
+; CHECK-NOFP-NEXT:    vselgt.f32 s6, s1, s3
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f32 s0, s0, s8
+; CHECK-NOFP-NEXT:    vcmp.f32 s6, s0
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f32 s0, s0, s6
+; CHECK-NOFP-NEXT:    vcmp.f32 s0, s4
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f32 s0, s4, s0
+; CHECK-NOFP-NEXT:    bx lr
+entry:
+  %z = call float @llvm.experimental.vector.reduce.fmin.v4f32(<4 x float> %x)
+  %c = fcmp olt float %y, %z
+  %r = select i1 %c, float %y, float %z
+  ret float %r
+}
+
+define arm_aapcs_vfpcc float @fmin_v8f32_acc_nofast(<8 x float> %x, float %y) {
+; CHECK-FP-LABEL: fmin_v8f32_acc_nofast:
+; CHECK-FP:       @ %bb.0: @ %entry
+; CHECK-FP-NEXT:    vminnm.f32 q0, q0, q1
+; CHECK-FP-NEXT:    vmov.f64 d2, d1
+; CHECK-FP-NEXT:    vmov.f32 s5, s3
+; CHECK-FP-NEXT:    vminnm.f32 q0, q0, q1
+; CHECK-FP-NEXT:    vmov.32 r0, q0[1]
+; CHECK-FP-NEXT:    vdup.32 q1, r0
+; CHECK-FP-NEXT:    vminnm.f32 q0, q0, q1
+; CHECK-FP-NEXT:    vcmp.f32 s0, s8
+; CHECK-FP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-FP-NEXT:    vselgt.f32 s0, s8, s0
+; CHECK-FP-NEXT:    bx lr
+;
+; CHECK-NOFP-LABEL: fmin_v8f32_acc_nofast:
+; CHECK-NOFP:       @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT:    vcmp.f32 s7, s3
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f32 s5, s1
+; CHECK-NOFP-NEXT:    vselgt.f32 s10, s3, s7
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f32 s6, s2
+; CHECK-NOFP-NEXT:    vselgt.f32 s12, s1, s5
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f32 s4, s0
+; CHECK-NOFP-NEXT:    vselgt.f32 s14, s2, s6
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f32 s10, s12
+; CHECK-NOFP-NEXT:    vselgt.f32 s0, s0, s4
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f32 s14, s0
+; CHECK-NOFP-NEXT:    vselgt.f32 s2, s12, s10
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f32 s0, s0, s14
+; CHECK-NOFP-NEXT:    vcmp.f32 s2, s0
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f32 s0, s0, s2
+; CHECK-NOFP-NEXT:    vcmp.f32 s0, s8
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f32 s0, s8, s0
+; CHECK-NOFP-NEXT:    bx lr
+entry:
+  %z = call float @llvm.experimental.vector.reduce.fmin.v8f32(<8 x float> %x)
+  %c = fcmp olt float %y, %z
+  %r = select i1 %c, float %y, float %z
+  ret float %r
+}
+
+define arm_aapcs_vfpcc void @fmin_v4f16_acc_nofast(<4 x half> %x, half* %yy) {
+; CHECK-FP-LABEL: fmin_v4f16_acc_nofast:
+; CHECK-FP:       @ %bb.0: @ %entry
+; CHECK-FP-NEXT:    vmov.32 r1, q0[1]
+; CHECK-FP-NEXT:    vdup.32 q1, r1
+; CHECK-FP-NEXT:    vminnm.f16 q0, q0, q1
+; CHECK-FP-NEXT:    vmov.u16 r1, q0[1]
+; CHECK-FP-NEXT:    vdup.16 q1, r1
+; CHECK-FP-NEXT:    vminnm.f16 q0, q0, q1
+; CHECK-FP-NEXT:    vldr.16 s4, [r0]
+; CHECK-FP-NEXT:    vcmp.f16 s0, s4
+; CHECK-FP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-FP-NEXT:    vselgt.f16 s0, s4, s0
+; CHECK-FP-NEXT:    vstr.16 s0, [r0]
+; CHECK-FP-NEXT:    bx lr
+;
+; CHECK-NOFP-LABEL: fmin_v4f16_acc_nofast:
+; CHECK-NOFP:       @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT:    vmov.32 r1, q0[1]
+; CHECK-NOFP-NEXT:    vmovx.f16 s10, s0
+; CHECK-NOFP-NEXT:    vdup.32 q1, r1
+; CHECK-NOFP-NEXT:    vmovx.f16 s8, s4
+; CHECK-NOFP-NEXT:    vcmp.f16 s8, s10
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f16 s4, s0
+; CHECK-NOFP-NEXT:    vselgt.f16 s8, s10, s8
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s0, s0, s4
+; CHECK-NOFP-NEXT:    vldr.16 s2, [r0]
+; CHECK-NOFP-NEXT:    vcmp.f16 s8, s0
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s0, s0, s8
+; CHECK-NOFP-NEXT:    vcmp.f16 s0, s2
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s0, s2, s0
+; CHECK-NOFP-NEXT:    vstr.16 s0, [r0]
+; CHECK-NOFP-NEXT:    bx lr
+entry:
+  %y = load half, half* %yy
+  %z = call half @llvm.experimental.vector.reduce.fmin.v4f16(<4 x half> %x)
+  %c = fcmp olt half %y, %z
+  %r = select i1 %c, half %y, half %z
+  store half %r, half* %yy
+  ret void
+}
+
+define arm_aapcs_vfpcc void @fmin_v8f16_acc_nofast(<8 x half> %x, half* %yy) {
+; CHECK-FP-LABEL: fmin_v8f16_acc_nofast:
+; CHECK-FP:       @ %bb.0: @ %entry
+; CHECK-FP-NEXT:    vmov.f64 d2, d1
+; CHECK-FP-NEXT:    vmov.f32 s5, s3
+; CHECK-FP-NEXT:    vminnm.f16 q0, q0, q1
+; CHECK-FP-NEXT:    vmov.32 r1, q0[1]
+; CHECK-FP-NEXT:    vdup.32 q1, r1
+; CHECK-FP-NEXT:    vminnm.f16 q0, q0, q1
+; CHECK-FP-NEXT:    vmov.u16 r1, q0[1]
+; CHECK-FP-NEXT:    vdup.16 q1, r1
+; CHECK-FP-NEXT:    vminnm.f16 q0, q0, q1
+; CHECK-FP-NEXT:    vldr.16 s4, [r0]
+; CHECK-FP-NEXT:    vcmp.f16 s0, s4
+; CHECK-FP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-FP-NEXT:    vselgt.f16 s0, s4, s0
+; CHECK-FP-NEXT:    vstr.16 s0, [r0]
+; CHECK-FP-NEXT:    bx lr
+;
+; CHECK-NOFP-LABEL: fmin_v8f16_acc_nofast:
+; CHECK-NOFP:       @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT:    vmovx.f16 s8, s3
+; CHECK-NOFP-NEXT:    vmovx.f16 s10, s1
+; CHECK-NOFP-NEXT:    vcmp.f16 s8, s10
+; CHECK-NOFP-NEXT:    vmov.f64 d2, d1
+; CHECK-NOFP-NEXT:    vmovx.f16 s12, s0
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vmov.f32 s5, s3
+; CHECK-NOFP-NEXT:    vselgt.f16 s8, s10, s8
+; CHECK-NOFP-NEXT:    vmovx.f16 s10, s4
+; CHECK-NOFP-NEXT:    vcmp.f16 s10, s12
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s10, s12, s10
+; CHECK-NOFP-NEXT:    vcmp.f16 s8, s10
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f16 s3, s1
+; CHECK-NOFP-NEXT:    vselgt.f16 s8, s10, s8
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f16 s4, s0
+; CHECK-NOFP-NEXT:    vselgt.f16 s10, s1, s3
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s0, s0, s4
+; CHECK-NOFP-NEXT:    vldr.16 s2, [r0]
+; CHECK-NOFP-NEXT:    vcmp.f16 s10, s0
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s0, s0, s10
+; CHECK-NOFP-NEXT:    vcmp.f16 s8, s0
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s0, s0, s8
+; CHECK-NOFP-NEXT:    vcmp.f16 s0, s2
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s0, s2, s0
+; CHECK-NOFP-NEXT:    vstr.16 s0, [r0]
+; CHECK-NOFP-NEXT:    bx lr
+entry:
+  %y = load half, half* %yy
+  %z = call half @llvm.experimental.vector.reduce.fmin.v8f16(<8 x half> %x)
+  %c = fcmp olt half %y, %z
+  %r = select i1 %c, half %y, half %z
+  store half %r, half* %yy
+  ret void
+}
+
+define arm_aapcs_vfpcc void @fmin_v16f16_acc_nofast(<16 x half> %x, half* %yy) {
+; CHECK-FP-LABEL: fmin_v16f16_acc_nofast:
+; CHECK-FP:       @ %bb.0: @ %entry
+; CHECK-FP-NEXT:    vminnm.f16 q0, q0, q1
+; CHECK-FP-NEXT:    vmov.f64 d2, d1
+; CHECK-FP-NEXT:    vmov.f32 s5, s3
+; CHECK-FP-NEXT:    vminnm.f16 q0, q0, q1
+; CHECK-FP-NEXT:    vmov.32 r1, q0[1]
+; CHECK-FP-NEXT:    vdup.32 q1, r1
+; CHECK-FP-NEXT:    vminnm.f16 q0, q0, q1
+; CHECK-FP-NEXT:    vmov.u16 r1, q0[1]
+; CHECK-FP-NEXT:    vdup.16 q1, r1
+; CHECK-FP-NEXT:    vminnm.f16 q0, q0, q1
+; CHECK-FP-NEXT:    vldr.16 s4, [r0]
+; CHECK-FP-NEXT:    vcmp.f16 s0, s4
+; CHECK-FP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-FP-NEXT:    vselgt.f16 s0, s4, s0
+; CHECK-FP-NEXT:    vstr.16 s0, [r0]
+; CHECK-FP-NEXT:    bx lr
+;
+; CHECK-NOFP-LABEL: fmin_v16f16_acc_nofast:
+; CHECK-NOFP:       @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT:    vmovx.f16 s8, s7
+; CHECK-NOFP-NEXT:    vmovx.f16 s10, s3
+; CHECK-NOFP-NEXT:    vcmp.f16 s8, s10
+; CHECK-NOFP-NEXT:    vmovx.f16 s12, s1
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vmovx.f16 s14, s0
+; CHECK-NOFP-NEXT:    vselgt.f16 s8, s10, s8
+; CHECK-NOFP-NEXT:    vmovx.f16 s10, s5
+; CHECK-NOFP-NEXT:    vcmp.f16 s10, s12
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s10, s12, s10
+; CHECK-NOFP-NEXT:    vmovx.f16 s12, s2
+; CHECK-NOFP-NEXT:    vcmp.f16 s8, s10
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s8, s10, s8
+; CHECK-NOFP-NEXT:    vmovx.f16 s10, s6
+; CHECK-NOFP-NEXT:    vcmp.f16 s10, s12
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s10, s12, s10
+; CHECK-NOFP-NEXT:    vmovx.f16 s12, s4
+; CHECK-NOFP-NEXT:    vcmp.f16 s12, s14
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s12, s14, s12
+; CHECK-NOFP-NEXT:    vcmp.f16 s10, s12
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s10, s12, s10
+; CHECK-NOFP-NEXT:    vcmp.f16 s8, s10
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f16 s7, s3
+; CHECK-NOFP-NEXT:    vselgt.f16 s8, s10, s8
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f16 s5, s1
+; CHECK-NOFP-NEXT:    vselgt.f16 s10, s3, s7
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s12, s1, s5
+; CHECK-NOFP-NEXT:    vcmp.f16 s10, s12
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f16 s6, s2
+; CHECK-NOFP-NEXT:    vselgt.f16 s10, s12, s10
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f16 s4, s0
+; CHECK-NOFP-NEXT:    vselgt.f16 s12, s2, s6
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s0, s0, s4
+; CHECK-NOFP-NEXT:    vldr.16 s2, [r0]
+; CHECK-NOFP-NEXT:    vcmp.f16 s12, s0
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s0, s0, s12
+; CHECK-NOFP-NEXT:    vcmp.f16 s10, s0
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s0, s0, s10
+; CHECK-NOFP-NEXT:    vcmp.f16 s8, s0
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s0, s0, s8
+; CHECK-NOFP-NEXT:    vcmp.f16 s0, s2
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s0, s2, s0
+; CHECK-NOFP-NEXT:    vstr.16 s0, [r0]
+; CHECK-NOFP-NEXT:    bx lr
+entry:
+  %y = load half, half* %yy
+  %z = call half @llvm.experimental.vector.reduce.fmin.v16f16(<16 x half> %x)
+  %c = fcmp olt half %y, %z
+  %r = select i1 %c, half %y, half %z
+  store half %r, half* %yy
+  ret void
+}
+
+define arm_aapcs_vfpcc double @fmin_v1f64_acc_nofast(<1 x double> %x, double %y) {
+; CHECK-LABEL: fmin_v1f64_acc_nofast:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vcmp.f64 d0, d1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vselgt.f64 d0, d1, d0
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call double @llvm.experimental.vector.reduce.fmin.v1f64(<1 x double> %x)
+  %c = fcmp olt double %y, %z
+  %r = select i1 %c, double %y, double %z
+  ret double %r
+}
+
+define arm_aapcs_vfpcc double @fmin_v2f64_acc_nofast(<2 x double> %x, double %y) {
+; CHECK-LABEL: fmin_v2f64_acc_nofast:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vcmp.f64 d1, d0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vselgt.f64 d0, d0, d1
+; CHECK-NEXT:    vcmp.f64 d0, d2
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vselgt.f64 d0, d2, d0
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call double @llvm.experimental.vector.reduce.fmin.v2f64(<2 x double> %x)
+  %c = fcmp olt double %y, %z
+  %r = select i1 %c, double %y, double %z
+  ret double %r
+}
+
+define arm_aapcs_vfpcc double @fmin_v4f64_acc_nofast(<4 x double> %x, double %y) {
+; CHECK-LABEL: fmin_v4f64_acc_nofast:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vcmp.f64 d3, d1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f64 d2, d0
+; CHECK-NEXT:    vselgt.f64 d5, d1, d3
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vselgt.f64 d0, d0, d2
+; CHECK-NEXT:    vcmp.f64 d5, d0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vselgt.f64 d0, d0, d5
+; CHECK-NEXT:    vcmp.f64 d0, d4
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vselgt.f64 d0, d4, d0
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call double @llvm.experimental.vector.reduce.fmin.v4f64(<4 x double> %x)
+  %c = fcmp olt double %y, %z
+  %r = select i1 %c, double %y, double %z
+  ret double %r
+}
+
+define arm_aapcs_vfpcc float @fmax_v2f32(<2 x float> %x) {
+; CHECK-LABEL: fmax_v2f32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldr s4, .LCPI36_0
+; CHECK-NEXT:    vmaxnm.f32 s0, s0, s1
+; CHECK-NEXT:    vmaxnm.f32 s0, s0, s4
+; CHECK-NEXT:    vmaxnm.f32 s0, s0, s4
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI36_0:
+; CHECK-NEXT:    .long 4286578688 @ float -Inf
+entry:
+  %z = call fast float @llvm.experimental.vector.reduce.fmax.v2f32(<2 x float> %x)
+  ret float %z
+}
+
+define arm_aapcs_vfpcc float @fmax_v4f32(<4 x float> %x) {
+; CHECK-LABEL: fmax_v4f32:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmaxnm.f32 s4, s0, s1
+; CHECK-NEXT:    vmaxnm.f32 s4, s4, s2
+; CHECK-NEXT:    vmaxnm.f32 s0, s4, s3
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call fast float @llvm.experimental.vector.reduce.fmax.v4f32(<4 x float> %x)
+  ret float %z
+}
+
+define arm_aapcs_vfpcc float @fmax_v8f32(<8 x float> %x) {
+; CHECK-FP-LABEL: fmax_v8f32:
+; CHECK-FP:       @ %bb.0: @ %entry
+; CHECK-FP-NEXT:    vmaxnm.f32 q0, q0, q1
+; CHECK-FP-NEXT:    vmaxnm.f32 s4, s0, s1
+; CHECK-FP-NEXT:    vmaxnm.f32 s4, s4, s2
+; CHECK-FP-NEXT:    vmaxnm.f32 s0, s4, s3
+; CHECK-FP-NEXT:    bx lr
+;
+; CHECK-NOFP-LABEL: fmax_v8f32:
+; CHECK-NOFP:       @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT:    vcmp.f32 s1, s5
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f32 s0, s4
+; CHECK-NOFP-NEXT:    vselgt.f32 s8, s1, s5
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f32 s2, s6
+; CHECK-NOFP-NEXT:    vselgt.f32 s10, s0, s4
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f32 s3, s7
+; CHECK-NOFP-NEXT:    vselgt.f32 s12, s2, s6
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f32 s0, s3, s7
+; CHECK-NOFP-NEXT:    vmaxnm.f32 s2, s10, s8
+; CHECK-NOFP-NEXT:    vmaxnm.f32 s2, s2, s12
+; CHECK-NOFP-NEXT:    vmaxnm.f32 s0, s2, s0
+; CHECK-NOFP-NEXT:    bx lr
+entry:
+  %z = call fast float @llvm.experimental.vector.reduce.fmax.v8f32(<8 x float> %x)
+  ret float %z
+}
+
+define arm_aapcs_vfpcc half @fmax_v4f16(<4 x half> %x) {
+; CHECK-LABEL: fmax_v4f16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmovx.f16 s4, s0
+; CHECK-NEXT:    vmaxnm.f16 s4, s0, s4
+; CHECK-NEXT:    vmovx.f16 s0, s1
+; CHECK-NEXT:    vmaxnm.f16 s4, s4, s1
+; CHECK-NEXT:    vldr.16 s2, .LCPI39_0
+; CHECK-NEXT:    vmaxnm.f16 s0, s4, s0
+; CHECK-NEXT:    vmaxnm.f16 s0, s0, s2
+; CHECK-NEXT:    vmaxnm.f16 s0, s0, s2
+; CHECK-NEXT:    vmaxnm.f16 s0, s0, s2
+; CHECK-NEXT:    vmaxnm.f16 s0, s0, s2
+; CHECK-NEXT:    vstr.16 s0, [r0]
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 1
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI39_0:
+; CHECK-NEXT:    .short 64512 @ half -Inf
+entry:
+  %z = call fast half @llvm.experimental.vector.reduce.fmax.v4f16(<4 x half> %x)
+  ret half %z
+}
+
+define arm_aapcs_vfpcc half @fmax_v8f16(<8 x half> %x) {
+; CHECK-LABEL: fmax_v8f16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmovx.f16 s4, s0
+; CHECK-NEXT:    vmovx.f16 s6, s1
+; CHECK-NEXT:    vmaxnm.f16 s4, s0, s4
+; CHECK-NEXT:    vmovx.f16 s0, s3
+; CHECK-NEXT:    vmaxnm.f16 s4, s4, s1
+; CHECK-NEXT:    vmaxnm.f16 s4, s4, s6
+; CHECK-NEXT:    vmovx.f16 s6, s2
+; CHECK-NEXT:    vmaxnm.f16 s4, s4, s2
+; CHECK-NEXT:    vmaxnm.f16 s4, s4, s6
+; CHECK-NEXT:    vmaxnm.f16 s4, s4, s3
+; CHECK-NEXT:    vmaxnm.f16 s0, s4, s0
+; CHECK-NEXT:    vstr.16 s0, [r0]
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call fast half @llvm.experimental.vector.reduce.fmax.v8f16(<8 x half> %x)
+  ret half %z
+}
+
+define arm_aapcs_vfpcc half @fmax_v16f16(<16 x half> %x) {
+; CHECK-FP-LABEL: fmax_v16f16:
+; CHECK-FP:       @ %bb.0: @ %entry
+; CHECK-FP-NEXT:    vmaxnm.f16 q0, q0, q1
+; CHECK-FP-NEXT:    vmovx.f16 s4, s0
+; CHECK-FP-NEXT:    vmovx.f16 s6, s1
+; CHECK-FP-NEXT:    vmaxnm.f16 s4, s0, s4
+; CHECK-FP-NEXT:    vmovx.f16 s0, s3
+; CHECK-FP-NEXT:    vmaxnm.f16 s4, s4, s1
+; CHECK-FP-NEXT:    vmaxnm.f16 s4, s4, s6
+; CHECK-FP-NEXT:    vmovx.f16 s6, s2
+; CHECK-FP-NEXT:    vmaxnm.f16 s4, s4, s2
+; CHECK-FP-NEXT:    vmaxnm.f16 s4, s4, s6
+; CHECK-FP-NEXT:    vmaxnm.f16 s4, s4, s3
+; CHECK-FP-NEXT:    vmaxnm.f16 s0, s4, s0
+; CHECK-FP-NEXT:    vstr.16 s0, [r0]
+; CHECK-FP-NEXT:    bx lr
+;
+; CHECK-NOFP-LABEL: fmax_v16f16:
+; CHECK-NOFP:       @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT:    vmovx.f16 s8, s4
+; CHECK-NOFP-NEXT:    vmovx.f16 s10, s0
+; CHECK-NOFP-NEXT:    vcmp.f16 s10, s8
+; CHECK-NOFP-NEXT:    vmovx.f16 s12, s1
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f16 s0, s4
+; CHECK-NOFP-NEXT:    vselgt.f16 s8, s10, s8
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f16 s1, s5
+; CHECK-NOFP-NEXT:    vselgt.f16 s10, s0, s4
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vmaxnm.f16 s8, s10, s8
+; CHECK-NOFP-NEXT:    vmovx.f16 s4, s7
+; CHECK-NOFP-NEXT:    vmovx.f16 s0, s3
+; CHECK-NOFP-NEXT:    vselgt.f16 s10, s1, s5
+; CHECK-NOFP-NEXT:    vmaxnm.f16 s8, s8, s10
+; CHECK-NOFP-NEXT:    vmovx.f16 s10, s5
+; CHECK-NOFP-NEXT:    vcmp.f16 s12, s10
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f16 s2, s6
+; CHECK-NOFP-NEXT:    vselgt.f16 s10, s12, s10
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vmaxnm.f16 s8, s8, s10
+; CHECK-NOFP-NEXT:    vmovx.f16 s12, s2
+; CHECK-NOFP-NEXT:    vselgt.f16 s10, s2, s6
+; CHECK-NOFP-NEXT:    vmaxnm.f16 s8, s8, s10
+; CHECK-NOFP-NEXT:    vmovx.f16 s10, s6
+; CHECK-NOFP-NEXT:    vcmp.f16 s12, s10
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f16 s3, s7
+; CHECK-NOFP-NEXT:    vselgt.f16 s10, s12, s10
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f16 s0, s4
+; CHECK-NOFP-NEXT:    vmaxnm.f16 s8, s8, s10
+; CHECK-NOFP-NEXT:    vselgt.f16 s10, s3, s7
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vmaxnm.f16 s8, s8, s10
+; CHECK-NOFP-NEXT:    vselgt.f16 s0, s0, s4
+; CHECK-NOFP-NEXT:    vmaxnm.f16 s0, s8, s0
+; CHECK-NOFP-NEXT:    vstr.16 s0, [r0]
+; CHECK-NOFP-NEXT:    bx lr
+entry:
+  %z = call fast half @llvm.experimental.vector.reduce.fmax.v16f16(<16 x half> %x)
+  ret half %z
+}
+
+define arm_aapcs_vfpcc double @fmax_v1f64(<1 x double> %x) {
+; CHECK-LABEL: fmax_v1f64:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call fast double @llvm.experimental.vector.reduce.fmax.v1f64(<1 x double> %x)
+  ret double %z
+}
+
+define arm_aapcs_vfpcc double @fmax_v2f64(<2 x double> %x) {
+; CHECK-LABEL: fmax_v2f64:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmaxnm.f64 d0, d0, d1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call fast double @llvm.experimental.vector.reduce.fmax.v2f64(<2 x double> %x)
+  ret double %z
+}
+
+define arm_aapcs_vfpcc double @fmax_v4f64(<4 x double> %x) {
+; CHECK-LABEL: fmax_v4f64:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vcmp.f64 d1, d3
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f64 d0, d2
+; CHECK-NEXT:    vselgt.f64 d4, d1, d3
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vselgt.f64 d0, d0, d2
+; CHECK-NEXT:    vmaxnm.f64 d0, d0, d4
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call fast double @llvm.experimental.vector.reduce.fmax.v4f64(<4 x double> %x)
+  ret double %z
+}
+
+define arm_aapcs_vfpcc float @fmax_v2f32_nofast(<2 x float> %x) {
+; CHECK-FP-LABEL: fmax_v2f32_nofast:
+; CHECK-FP:       @ %bb.0: @ %entry
+; CHECK-FP-NEXT:    vmov.32 r0, q0[1]
+; CHECK-FP-NEXT:    vdup.32 q1, r0
+; CHECK-FP-NEXT:    vmaxnm.f32 q0, q0, q1
+; CHECK-FP-NEXT:    @ kill: def $s0 killed $s0 killed $q0
+; CHECK-FP-NEXT:    bx lr
+;
+; CHECK-NOFP-LABEL: fmax_v2f32_nofast:
+; CHECK-NOFP:       @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT:    vmov.32 r0, q0[1]
+; CHECK-NOFP-NEXT:    vdup.32 q1, r0
+; CHECK-NOFP-NEXT:    vcmp.f32 s0, s4
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f32 s0, s0, s4
+; CHECK-NOFP-NEXT:    bx lr
+entry:
+  %z = call float @llvm.experimental.vector.reduce.fmax.v2f32(<2 x float> %x)
+  ret float %z
+}
+
+define arm_aapcs_vfpcc float @fmax_v4f32_nofast(<4 x float> %x) {
+; CHECK-FP-LABEL: fmax_v4f32_nofast:
+; CHECK-FP:       @ %bb.0: @ %entry
+; CHECK-FP-NEXT:    vmov.f64 d2, d1
+; CHECK-FP-NEXT:    vmov.f32 s5, s3
+; CHECK-FP-NEXT:    vmaxnm.f32 q0, q0, q1
+; CHECK-FP-NEXT:    vmov.32 r0, q0[1]
+; CHECK-FP-NEXT:    vdup.32 q1, r0
+; CHECK-FP-NEXT:    vmaxnm.f32 q0, q0, q1
+; CHECK-FP-NEXT:    @ kill: def $s0 killed $s0 killed $q0
+; CHECK-FP-NEXT:    bx lr
+;
+; CHECK-NOFP-LABEL: fmax_v4f32_nofast:
+; CHECK-NOFP:       @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT:    vcmp.f32 s1, s3
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vmov.f64 d2, d1
+; CHECK-NOFP-NEXT:    vmov.f32 s5, s3
+; CHECK-NOFP-NEXT:    vcmp.f32 s0, s4
+; CHECK-NOFP-NEXT:    vselgt.f32 s8, s1, s3
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f32 s0, s0, s4
+; CHECK-NOFP-NEXT:    vcmp.f32 s0, s8
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f32 s0, s0, s8
+; CHECK-NOFP-NEXT:    bx lr
+entry:
+  %z = call float @llvm.experimental.vector.reduce.fmax.v4f32(<4 x float> %x)
+  ret float %z
+}
+
+define arm_aapcs_vfpcc float @fmax_v8f32_nofast(<8 x float> %x) {
+; CHECK-FP-LABEL: fmax_v8f32_nofast:
+; CHECK-FP:       @ %bb.0: @ %entry
+; CHECK-FP-NEXT:    vmaxnm.f32 q0, q0, q1
+; CHECK-FP-NEXT:    vmov.f64 d2, d1
+; CHECK-FP-NEXT:    vmov.f32 s5, s3
+; CHECK-FP-NEXT:    vmaxnm.f32 q0, q0, q1
+; CHECK-FP-NEXT:    vmov.32 r0, q0[1]
+; CHECK-FP-NEXT:    vdup.32 q1, r0
+; CHECK-FP-NEXT:    vmaxnm.f32 q0, q0, q1
+; CHECK-FP-NEXT:    @ kill: def $s0 killed $s0 killed $q0
+; CHECK-FP-NEXT:    bx lr
+;
+; CHECK-NOFP-LABEL: fmax_v8f32_nofast:
+; CHECK-NOFP:       @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT:    vcmp.f32 s3, s7
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f32 s1, s5
+; CHECK-NOFP-NEXT:    vselgt.f32 s8, s3, s7
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f32 s2, s6
+; CHECK-NOFP-NEXT:    vselgt.f32 s10, s1, s5
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f32 s0, s4
+; CHECK-NOFP-NEXT:    vselgt.f32 s12, s2, s6
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f32 s10, s8
+; CHECK-NOFP-NEXT:    vselgt.f32 s0, s0, s4
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f32 s0, s12
+; CHECK-NOFP-NEXT:    vselgt.f32 s2, s10, s8
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f32 s0, s0, s12
+; CHECK-NOFP-NEXT:    vcmp.f32 s0, s2
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f32 s0, s0, s2
+; CHECK-NOFP-NEXT:    bx lr
+entry:
+  %z = call float @llvm.experimental.vector.reduce.fmax.v8f32(<8 x float> %x)
+  ret float %z
+}
+
+define arm_aapcs_vfpcc half @fmax_v4f16_nofast(<4 x half> %x) {
+; CHECK-FP-LABEL: fmax_v4f16_nofast:
+; CHECK-FP:       @ %bb.0: @ %entry
+; CHECK-FP-NEXT:    vmov.32 r1, q0[1]
+; CHECK-FP-NEXT:    vdup.32 q1, r1
+; CHECK-FP-NEXT:    vmaxnm.f16 q0, q0, q1
+; CHECK-FP-NEXT:    vmov.u16 r1, q0[1]
+; CHECK-FP-NEXT:    vdup.16 q1, r1
+; CHECK-FP-NEXT:    vmaxnm.f16 q0, q0, q1
+; CHECK-FP-NEXT:    vstr.16 s0, [r0]
+; CHECK-FP-NEXT:    bx lr
+;
+; CHECK-NOFP-LABEL: fmax_v4f16_nofast:
+; CHECK-NOFP:       @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT:    vmov.32 r1, q0[1]
+; CHECK-NOFP-NEXT:    vmovx.f16 s10, s0
+; CHECK-NOFP-NEXT:    vdup.32 q1, r1
+; CHECK-NOFP-NEXT:    vmovx.f16 s8, s4
+; CHECK-NOFP-NEXT:    vcmp.f16 s10, s8
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f16 s0, s4
+; CHECK-NOFP-NEXT:    vselgt.f16 s8, s10, s8
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s0, s0, s4
+; CHECK-NOFP-NEXT:    vcmp.f16 s0, s8
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s0, s0, s8
+; CHECK-NOFP-NEXT:    vstr.16 s0, [r0]
+; CHECK-NOFP-NEXT:    bx lr
+entry:
+  %z = call half @llvm.experimental.vector.reduce.fmax.v4f16(<4 x half> %x)
+  ret half %z
+}
+
+define arm_aapcs_vfpcc half @fmax_v8f16_nofast(<8 x half> %x) {
+; CHECK-FP-LABEL: fmax_v8f16_nofast:
+; CHECK-FP:       @ %bb.0: @ %entry
+; CHECK-FP-NEXT:    vmov.f64 d2, d1
+; CHECK-FP-NEXT:    vmov.f32 s5, s3
+; CHECK-FP-NEXT:    vmaxnm.f16 q0, q0, q1
+; CHECK-FP-NEXT:    vmov.32 r1, q0[1]
+; CHECK-FP-NEXT:    vdup.32 q1, r1
+; CHECK-FP-NEXT:    vmaxnm.f16 q0, q0, q1
+; CHECK-FP-NEXT:    vmov.u16 r1, q0[1]
+; CHECK-FP-NEXT:    vdup.16 q1, r1
+; CHECK-FP-NEXT:    vmaxnm.f16 q0, q0, q1
+; CHECK-FP-NEXT:    vstr.16 s0, [r0]
+; CHECK-FP-NEXT:    bx lr
+;
+; CHECK-NOFP-LABEL: fmax_v8f16_nofast:
+; CHECK-NOFP:       @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT:    vmovx.f16 s8, s3
+; CHECK-NOFP-NEXT:    vmovx.f16 s10, s1
+; CHECK-NOFP-NEXT:    vcmp.f16 s10, s8
+; CHECK-NOFP-NEXT:    vmov.f64 d2, d1
+; CHECK-NOFP-NEXT:    vmovx.f16 s12, s0
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vmov.f32 s5, s3
+; CHECK-NOFP-NEXT:    vselgt.f16 s8, s10, s8
+; CHECK-NOFP-NEXT:    vmovx.f16 s10, s4
+; CHECK-NOFP-NEXT:    vcmp.f16 s12, s10
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s10, s12, s10
+; CHECK-NOFP-NEXT:    vcmp.f16 s10, s8
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f16 s1, s3
+; CHECK-NOFP-NEXT:    vselgt.f16 s8, s10, s8
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f16 s0, s4
+; CHECK-NOFP-NEXT:    vselgt.f16 s10, s1, s3
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s0, s0, s4
+; CHECK-NOFP-NEXT:    vcmp.f16 s0, s10
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s0, s0, s10
+; CHECK-NOFP-NEXT:    vcmp.f16 s0, s8
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s0, s0, s8
+; CHECK-NOFP-NEXT:    vstr.16 s0, [r0]
+; CHECK-NOFP-NEXT:    bx lr
+entry:
+  %z = call half @llvm.experimental.vector.reduce.fmax.v8f16(<8 x half> %x)
+  ret half %z
+}
+
+define arm_aapcs_vfpcc half @fmax_v16f16_nofast(<16 x half> %x) {
+; CHECK-FP-LABEL: fmax_v16f16_nofast:
+; CHECK-FP:       @ %bb.0: @ %entry
+; CHECK-FP-NEXT:    vmaxnm.f16 q0, q0, q1
+; CHECK-FP-NEXT:    vmov.f64 d2, d1
+; CHECK-FP-NEXT:    vmov.f32 s5, s3
+; CHECK-FP-NEXT:    vmaxnm.f16 q0, q0, q1
+; CHECK-FP-NEXT:    vmov.32 r1, q0[1]
+; CHECK-FP-NEXT:    vdup.32 q1, r1
+; CHECK-FP-NEXT:    vmaxnm.f16 q0, q0, q1
+; CHECK-FP-NEXT:    vmov.u16 r1, q0[1]
+; CHECK-FP-NEXT:    vdup.16 q1, r1
+; CHECK-FP-NEXT:    vmaxnm.f16 q0, q0, q1
+; CHECK-FP-NEXT:    vstr.16 s0, [r0]
+; CHECK-FP-NEXT:    bx lr
+;
+; CHECK-NOFP-LABEL: fmax_v16f16_nofast:
+; CHECK-NOFP:       @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT:    vmovx.f16 s8, s7
+; CHECK-NOFP-NEXT:    vmovx.f16 s10, s3
+; CHECK-NOFP-NEXT:    vcmp.f16 s10, s8
+; CHECK-NOFP-NEXT:    vmovx.f16 s12, s1
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vmovx.f16 s14, s0
+; CHECK-NOFP-NEXT:    vselgt.f16 s8, s10, s8
+; CHECK-NOFP-NEXT:    vmovx.f16 s10, s5
+; CHECK-NOFP-NEXT:    vcmp.f16 s12, s10
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s10, s12, s10
+; CHECK-NOFP-NEXT:    vmovx.f16 s12, s2
+; CHECK-NOFP-NEXT:    vcmp.f16 s10, s8
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s8, s10, s8
+; CHECK-NOFP-NEXT:    vmovx.f16 s10, s6
+; CHECK-NOFP-NEXT:    vcmp.f16 s12, s10
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s10, s12, s10
+; CHECK-NOFP-NEXT:    vmovx.f16 s12, s4
+; CHECK-NOFP-NEXT:    vcmp.f16 s14, s12
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s12, s14, s12
+; CHECK-NOFP-NEXT:    vcmp.f16 s12, s10
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s10, s12, s10
+; CHECK-NOFP-NEXT:    vcmp.f16 s10, s8
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f16 s3, s7
+; CHECK-NOFP-NEXT:    vselgt.f16 s8, s10, s8
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f16 s1, s5
+; CHECK-NOFP-NEXT:    vselgt.f16 s10, s3, s7
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s12, s1, s5
+; CHECK-NOFP-NEXT:    vcmp.f16 s12, s10
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f16 s2, s6
+; CHECK-NOFP-NEXT:    vselgt.f16 s10, s12, s10
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f16 s0, s4
+; CHECK-NOFP-NEXT:    vselgt.f16 s12, s2, s6
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s0, s0, s4
+; CHECK-NOFP-NEXT:    vcmp.f16 s0, s12
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s0, s0, s12
+; CHECK-NOFP-NEXT:    vcmp.f16 s0, s10
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s0, s0, s10
+; CHECK-NOFP-NEXT:    vcmp.f16 s0, s8
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s0, s0, s8
+; CHECK-NOFP-NEXT:    vstr.16 s0, [r0]
+; CHECK-NOFP-NEXT:    bx lr
+entry:
+  %z = call half @llvm.experimental.vector.reduce.fmax.v16f16(<16 x half> %x)
+  ret half %z
+}
+
+define arm_aapcs_vfpcc double @fmax_v1f64_nofast(<1 x double> %x) {
+; CHECK-LABEL: fmax_v1f64_nofast:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call double @llvm.experimental.vector.reduce.fmax.v1f64(<1 x double> %x)
+  ret double %z
+}
+
+define arm_aapcs_vfpcc double @fmax_v2f64_nofast(<2 x double> %x) {
+; CHECK-LABEL: fmax_v2f64_nofast:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vcmp.f64 d0, d1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vselgt.f64 d0, d0, d1
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call double @llvm.experimental.vector.reduce.fmax.v2f64(<2 x double> %x)
+  ret double %z
+}
+
+define arm_aapcs_vfpcc double @fmax_v4f64_nofast(<4 x double> %x) {
+; CHECK-LABEL: fmax_v4f64_nofast:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vcmp.f64 d1, d3
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f64 d0, d2
+; CHECK-NEXT:    vselgt.f64 d4, d1, d3
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vselgt.f64 d0, d0, d2
+; CHECK-NEXT:    vcmp.f64 d0, d4
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vselgt.f64 d0, d0, d4
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call double @llvm.experimental.vector.reduce.fmax.v4f64(<4 x double> %x)
+  ret double %z
+}
+
+define arm_aapcs_vfpcc float @fmax_v2f32_acc(<2 x float> %x, float %y) {
+; CHECK-LABEL: fmax_v2f32_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vldr s6, .LCPI54_0
+; CHECK-NEXT:    vmaxnm.f32 s0, s0, s1
+; CHECK-NEXT:    vmaxnm.f32 s0, s0, s6
+; CHECK-NEXT:    vmaxnm.f32 s0, s0, s6
+; CHECK-NEXT:    vmaxnm.f32 s0, s4, s0
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI54_0:
+; CHECK-NEXT:    .long 4286578688 @ float -Inf
+entry:
+  %z = call fast float @llvm.experimental.vector.reduce.fmax.v2f32(<2 x float> %x)
+  %c = fcmp fast ogt float %y, %z
+  %r = select i1 %c, float %y, float %z
+  ret float %r
+}
+
+define arm_aapcs_vfpcc float @fmax_v4f32_acc(<4 x float> %x, float %y) {
+; CHECK-LABEL: fmax_v4f32_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmaxnm.f32 s6, s0, s1
+; CHECK-NEXT:    vmaxnm.f32 s6, s6, s2
+; CHECK-NEXT:    vmaxnm.f32 s0, s6, s3
+; CHECK-NEXT:    vmaxnm.f32 s0, s4, s0
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call fast float @llvm.experimental.vector.reduce.fmax.v4f32(<4 x float> %x)
+  %c = fcmp fast ogt float %y, %z
+  %r = select i1 %c, float %y, float %z
+  ret float %r
+}
+
+define arm_aapcs_vfpcc float @fmax_v8f32_acc(<8 x float> %x, float %y) {
+; CHECK-FP-LABEL: fmax_v8f32_acc:
+; CHECK-FP:       @ %bb.0: @ %entry
+; CHECK-FP-NEXT:    vmaxnm.f32 q0, q0, q1
+; CHECK-FP-NEXT:    vmaxnm.f32 s4, s0, s1
+; CHECK-FP-NEXT:    vmaxnm.f32 s4, s4, s2
+; CHECK-FP-NEXT:    vmaxnm.f32 s0, s4, s3
+; CHECK-FP-NEXT:    vmaxnm.f32 s0, s8, s0
+; CHECK-FP-NEXT:    bx lr
+;
+; CHECK-NOFP-LABEL: fmax_v8f32_acc:
+; CHECK-NOFP:       @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT:    vcmp.f32 s1, s5
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f32 s0, s4
+; CHECK-NOFP-NEXT:    vselgt.f32 s10, s1, s5
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f32 s2, s6
+; CHECK-NOFP-NEXT:    vselgt.f32 s12, s0, s4
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f32 s3, s7
+; CHECK-NOFP-NEXT:    vselgt.f32 s14, s2, s6
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f32 s0, s3, s7
+; CHECK-NOFP-NEXT:    vmaxnm.f32 s2, s12, s10
+; CHECK-NOFP-NEXT:    vmaxnm.f32 s2, s2, s14
+; CHECK-NOFP-NEXT:    vmaxnm.f32 s0, s2, s0
+; CHECK-NOFP-NEXT:    vmaxnm.f32 s0, s8, s0
+; CHECK-NOFP-NEXT:    bx lr
+entry:
+  %z = call fast float @llvm.experimental.vector.reduce.fmax.v8f32(<8 x float> %x)
+  %c = fcmp fast ogt float %y, %z
+  %r = select i1 %c, float %y, float %z
+  ret float %r
+}
+
+define arm_aapcs_vfpcc void @fmax_v4f16_acc(<4 x half> %x, half* %yy) {
+; CHECK-LABEL: fmax_v4f16_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmovx.f16 s4, s0
+; CHECK-NEXT:    vmaxnm.f16 s4, s0, s4
+; CHECK-NEXT:    vmovx.f16 s0, s1
+; CHECK-NEXT:    vmaxnm.f16 s4, s4, s1
+; CHECK-NEXT:    vldr.16 s2, .LCPI57_0
+; CHECK-NEXT:    vmaxnm.f16 s0, s4, s0
+; CHECK-NEXT:    vmaxnm.f16 s0, s0, s2
+; CHECK-NEXT:    vmaxnm.f16 s0, s0, s2
+; CHECK-NEXT:    vmaxnm.f16 s0, s0, s2
+; CHECK-NEXT:    vmaxnm.f16 s0, s0, s2
+; CHECK-NEXT:    vldr.16 s2, [r0]
+; CHECK-NEXT:    vmaxnm.f16 s0, s2, s0
+; CHECK-NEXT:    vstr.16 s0, [r0]
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 1
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI57_0:
+; CHECK-NEXT:    .short 64512 @ half -Inf
+entry:
+  %y = load half, half* %yy
+  %z = call fast half @llvm.experimental.vector.reduce.fmax.v4f16(<4 x half> %x)
+  %c = fcmp fast ogt half %y, %z
+  %r = select i1 %c, half %y, half %z
+  store half %r, half* %yy
+  ret void
+}
+
+define arm_aapcs_vfpcc void @fmax_v8f16_acc(<8 x half> %x, half* %yy) {
+; CHECK-LABEL: fmax_v8f16_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmovx.f16 s4, s0
+; CHECK-NEXT:    vmovx.f16 s6, s1
+; CHECK-NEXT:    vmaxnm.f16 s4, s0, s4
+; CHECK-NEXT:    vmovx.f16 s0, s3
+; CHECK-NEXT:    vmaxnm.f16 s4, s4, s1
+; CHECK-NEXT:    vmaxnm.f16 s4, s4, s6
+; CHECK-NEXT:    vmovx.f16 s6, s2
+; CHECK-NEXT:    vmaxnm.f16 s4, s4, s2
+; CHECK-NEXT:    vldr.16 s2, [r0]
+; CHECK-NEXT:    vmaxnm.f16 s4, s4, s6
+; CHECK-NEXT:    vmaxnm.f16 s4, s4, s3
+; CHECK-NEXT:    vmaxnm.f16 s0, s4, s0
+; CHECK-NEXT:    vmaxnm.f16 s0, s2, s0
+; CHECK-NEXT:    vstr.16 s0, [r0]
+; CHECK-NEXT:    bx lr
+entry:
+  %y = load half, half* %yy
+  %z = call fast half @llvm.experimental.vector.reduce.fmax.v8f16(<8 x half> %x)
+  %c = fcmp fast ogt half %y, %z
+  %r = select i1 %c, half %y, half %z
+  store half %r, half* %yy
+  ret void
+}
+
+define arm_aapcs_vfpcc void @fmax_v16f16_acc(<16 x half> %x, half* %yy) {
+; CHECK-FP-LABEL: fmax_v16f16_acc:
+; CHECK-FP:       @ %bb.0: @ %entry
+; CHECK-FP-NEXT:    vmaxnm.f16 q0, q0, q1
+; CHECK-FP-NEXT:    vmovx.f16 s4, s0
+; CHECK-FP-NEXT:    vmovx.f16 s6, s1
+; CHECK-FP-NEXT:    vmaxnm.f16 s4, s0, s4
+; CHECK-FP-NEXT:    vmovx.f16 s0, s3
+; CHECK-FP-NEXT:    vmaxnm.f16 s4, s4, s1
+; CHECK-FP-NEXT:    vmaxnm.f16 s4, s4, s6
+; CHECK-FP-NEXT:    vmovx.f16 s6, s2
+; CHECK-FP-NEXT:    vmaxnm.f16 s4, s4, s2
+; CHECK-FP-NEXT:    vldr.16 s2, [r0]
+; CHECK-FP-NEXT:    vmaxnm.f16 s4, s4, s6
+; CHECK-FP-NEXT:    vmaxnm.f16 s4, s4, s3
+; CHECK-FP-NEXT:    vmaxnm.f16 s0, s4, s0
+; CHECK-FP-NEXT:    vmaxnm.f16 s0, s2, s0
+; CHECK-FP-NEXT:    vstr.16 s0, [r0]
+; CHECK-FP-NEXT:    bx lr
+;
+; CHECK-NOFP-LABEL: fmax_v16f16_acc:
+; CHECK-NOFP:       @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT:    vmovx.f16 s8, s4
+; CHECK-NOFP-NEXT:    vmovx.f16 s10, s0
+; CHECK-NOFP-NEXT:    vcmp.f16 s10, s8
+; CHECK-NOFP-NEXT:    vmovx.f16 s12, s1
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f16 s0, s4
+; CHECK-NOFP-NEXT:    vselgt.f16 s8, s10, s8
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f16 s1, s5
+; CHECK-NOFP-NEXT:    vselgt.f16 s10, s0, s4
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vmaxnm.f16 s8, s10, s8
+; CHECK-NOFP-NEXT:    vmovx.f16 s4, s7
+; CHECK-NOFP-NEXT:    vmovx.f16 s0, s3
+; CHECK-NOFP-NEXT:    vselgt.f16 s10, s1, s5
+; CHECK-NOFP-NEXT:    vmaxnm.f16 s8, s8, s10
+; CHECK-NOFP-NEXT:    vmovx.f16 s10, s5
+; CHECK-NOFP-NEXT:    vcmp.f16 s12, s10
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f16 s2, s6
+; CHECK-NOFP-NEXT:    vselgt.f16 s10, s12, s10
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vmaxnm.f16 s8, s8, s10
+; CHECK-NOFP-NEXT:    vmovx.f16 s12, s2
+; CHECK-NOFP-NEXT:    vselgt.f16 s10, s2, s6
+; CHECK-NOFP-NEXT:    vldr.16 s2, [r0]
+; CHECK-NOFP-NEXT:    vmaxnm.f16 s8, s8, s10
+; CHECK-NOFP-NEXT:    vmovx.f16 s10, s6
+; CHECK-NOFP-NEXT:    vcmp.f16 s12, s10
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f16 s3, s7
+; CHECK-NOFP-NEXT:    vselgt.f16 s10, s12, s10
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f16 s0, s4
+; CHECK-NOFP-NEXT:    vmaxnm.f16 s8, s8, s10
+; CHECK-NOFP-NEXT:    vselgt.f16 s10, s3, s7
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vmaxnm.f16 s8, s8, s10
+; CHECK-NOFP-NEXT:    vselgt.f16 s0, s0, s4
+; CHECK-NOFP-NEXT:    vmaxnm.f16 s0, s8, s0
+; CHECK-NOFP-NEXT:    vmaxnm.f16 s0, s2, s0
+; CHECK-NOFP-NEXT:    vstr.16 s0, [r0]
+; CHECK-NOFP-NEXT:    bx lr
+entry:
+  %y = load half, half* %yy
+  %z = call fast half @llvm.experimental.vector.reduce.fmax.v16f16(<16 x half> %x)
+  %c = fcmp fast ogt half %y, %z
+  %r = select i1 %c, half %y, half %z
+  store half %r, half* %yy
+  ret void
+}
+
+define arm_aapcs_vfpcc double @fmax_v1f64_acc(<1 x double> %x, double %y) {
+; CHECK-LABEL: fmax_v1f64_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmaxnm.f64 d0, d1, d0
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call fast double @llvm.experimental.vector.reduce.fmax.v1f64(<1 x double> %x)
+  %c = fcmp fast ogt double %y, %z
+  %r = select i1 %c, double %y, double %z
+  ret double %r
+}
+
+define arm_aapcs_vfpcc double @fmax_v2f64_acc(<2 x double> %x, double %y) {
+; CHECK-LABEL: fmax_v2f64_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmaxnm.f64 d0, d0, d1
+; CHECK-NEXT:    vmaxnm.f64 d0, d2, d0
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call fast double @llvm.experimental.vector.reduce.fmax.v2f64(<2 x double> %x)
+  %c = fcmp fast ogt double %y, %z
+  %r = select i1 %c, double %y, double %z
+  ret double %r
+}
+
+define arm_aapcs_vfpcc double @fmax_v4f64_acc(<4 x double> %x, double %y) {
+; CHECK-LABEL: fmax_v4f64_acc:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vcmp.f64 d1, d3
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f64 d0, d2
+; CHECK-NEXT:    vselgt.f64 d5, d1, d3
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vselgt.f64 d0, d0, d2
+; CHECK-NEXT:    vmaxnm.f64 d0, d0, d5
+; CHECK-NEXT:    vmaxnm.f64 d0, d4, d0
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call fast double @llvm.experimental.vector.reduce.fmax.v4f64(<4 x double> %x)
+  %c = fcmp fast ogt double %y, %z
+  %r = select i1 %c, double %y, double %z
+  ret double %r
+}
+
+define arm_aapcs_vfpcc float @fmax_v2f32_acc_nofast(<2 x float> %x, float %y) {
+; CHECK-FP-LABEL: fmax_v2f32_acc_nofast:
+; CHECK-FP:       @ %bb.0: @ %entry
+; CHECK-FP-NEXT:    vmov.32 r0, q0[1]
+; CHECK-FP-NEXT:    vdup.32 q2, r0
+; CHECK-FP-NEXT:    vmaxnm.f32 q0, q0, q2
+; CHECK-FP-NEXT:    vcmp.f32 s4, s0
+; CHECK-FP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-FP-NEXT:    vselgt.f32 s0, s4, s0
+; CHECK-FP-NEXT:    bx lr
+;
+; CHECK-NOFP-LABEL: fmax_v2f32_acc_nofast:
+; CHECK-NOFP:       @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT:    vmov.32 r0, q0[1]
+; CHECK-NOFP-NEXT:    vdup.32 q2, r0
+; CHECK-NOFP-NEXT:    vcmp.f32 s0, s8
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f32 s0, s0, s8
+; CHECK-NOFP-NEXT:    vcmp.f32 s4, s0
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f32 s0, s4, s0
+; CHECK-NOFP-NEXT:    bx lr
+entry:
+  %z = call float @llvm.experimental.vector.reduce.fmax.v2f32(<2 x float> %x)
+  %c = fcmp ogt float %y, %z
+  %r = select i1 %c, float %y, float %z
+  ret float %r
+}
+
+define arm_aapcs_vfpcc float @fmax_v4f32_acc_nofast(<4 x float> %x, float %y) {
+; CHECK-FP-LABEL: fmax_v4f32_acc_nofast:
+; CHECK-FP:       @ %bb.0: @ %entry
+; CHECK-FP-NEXT:    vmov.f64 d4, d1
+; CHECK-FP-NEXT:    vmov.f32 s9, s3
+; CHECK-FP-NEXT:    vmaxnm.f32 q0, q0, q2
+; CHECK-FP-NEXT:    vmov.32 r0, q0[1]
+; CHECK-FP-NEXT:    vdup.32 q2, r0
+; CHECK-FP-NEXT:    vmaxnm.f32 q0, q0, q2
+; CHECK-FP-NEXT:    vcmp.f32 s4, s0
+; CHECK-FP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-FP-NEXT:    vselgt.f32 s0, s4, s0
+; CHECK-FP-NEXT:    bx lr
+;
+; CHECK-NOFP-LABEL: fmax_v4f32_acc_nofast:
+; CHECK-NOFP:       @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT:    vcmp.f32 s1, s3
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vmov.f64 d4, d1
+; CHECK-NOFP-NEXT:    vmov.f32 s9, s3
+; CHECK-NOFP-NEXT:    vcmp.f32 s0, s8
+; CHECK-NOFP-NEXT:    vselgt.f32 s6, s1, s3
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f32 s0, s0, s8
+; CHECK-NOFP-NEXT:    vcmp.f32 s0, s6
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f32 s0, s0, s6
+; CHECK-NOFP-NEXT:    vcmp.f32 s4, s0
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f32 s0, s4, s0
+; CHECK-NOFP-NEXT:    bx lr
+entry:
+  %z = call float @llvm.experimental.vector.reduce.fmax.v4f32(<4 x float> %x)
+  %c = fcmp ogt float %y, %z
+  %r = select i1 %c, float %y, float %z
+  ret float %r
+}
+
+define arm_aapcs_vfpcc float @fmax_v8f32_acc_nofast(<8 x float> %x, float %y) {
+; CHECK-FP-LABEL: fmax_v8f32_acc_nofast:
+; CHECK-FP:       @ %bb.0: @ %entry
+; CHECK-FP-NEXT:    vmaxnm.f32 q0, q0, q1
+; CHECK-FP-NEXT:    vmov.f64 d2, d1
+; CHECK-FP-NEXT:    vmov.f32 s5, s3
+; CHECK-FP-NEXT:    vmaxnm.f32 q0, q0, q1
+; CHECK-FP-NEXT:    vmov.32 r0, q0[1]
+; CHECK-FP-NEXT:    vdup.32 q1, r0
+; CHECK-FP-NEXT:    vmaxnm.f32 q0, q0, q1
+; CHECK-FP-NEXT:    vcmp.f32 s8, s0
+; CHECK-FP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-FP-NEXT:    vselgt.f32 s0, s8, s0
+; CHECK-FP-NEXT:    bx lr
+;
+; CHECK-NOFP-LABEL: fmax_v8f32_acc_nofast:
+; CHECK-NOFP:       @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT:    vcmp.f32 s3, s7
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f32 s1, s5
+; CHECK-NOFP-NEXT:    vselgt.f32 s10, s3, s7
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f32 s2, s6
+; CHECK-NOFP-NEXT:    vselgt.f32 s12, s1, s5
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f32 s0, s4
+; CHECK-NOFP-NEXT:    vselgt.f32 s14, s2, s6
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f32 s12, s10
+; CHECK-NOFP-NEXT:    vselgt.f32 s0, s0, s4
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f32 s0, s14
+; CHECK-NOFP-NEXT:    vselgt.f32 s2, s12, s10
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f32 s0, s0, s14
+; CHECK-NOFP-NEXT:    vcmp.f32 s0, s2
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f32 s0, s0, s2
+; CHECK-NOFP-NEXT:    vcmp.f32 s8, s0
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f32 s0, s8, s0
+; CHECK-NOFP-NEXT:    bx lr
+entry:
+  %z = call float @llvm.experimental.vector.reduce.fmax.v8f32(<8 x float> %x)
+  %c = fcmp ogt float %y, %z
+  %r = select i1 %c, float %y, float %z
+  ret float %r
+}
+
+define arm_aapcs_vfpcc void @fmax_v4f16_acc_nofast(<4 x half> %x, half* %yy) {
+; CHECK-FP-LABEL: fmax_v4f16_acc_nofast:
+; CHECK-FP:       @ %bb.0: @ %entry
+; CHECK-FP-NEXT:    vmov.32 r1, q0[1]
+; CHECK-FP-NEXT:    vdup.32 q1, r1
+; CHECK-FP-NEXT:    vmaxnm.f16 q0, q0, q1
+; CHECK-FP-NEXT:    vmov.u16 r1, q0[1]
+; CHECK-FP-NEXT:    vdup.16 q1, r1
+; CHECK-FP-NEXT:    vmaxnm.f16 q0, q0, q1
+; CHECK-FP-NEXT:    vldr.16 s4, [r0]
+; CHECK-FP-NEXT:    vcmp.f16 s4, s0
+; CHECK-FP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-FP-NEXT:    vselgt.f16 s0, s4, s0
+; CHECK-FP-NEXT:    vstr.16 s0, [r0]
+; CHECK-FP-NEXT:    bx lr
+;
+; CHECK-NOFP-LABEL: fmax_v4f16_acc_nofast:
+; CHECK-NOFP:       @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT:    vmov.32 r1, q0[1]
+; CHECK-NOFP-NEXT:    vmovx.f16 s10, s0
+; CHECK-NOFP-NEXT:    vdup.32 q1, r1
+; CHECK-NOFP-NEXT:    vmovx.f16 s8, s4
+; CHECK-NOFP-NEXT:    vcmp.f16 s10, s8
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f16 s0, s4
+; CHECK-NOFP-NEXT:    vselgt.f16 s8, s10, s8
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s0, s0, s4
+; CHECK-NOFP-NEXT:    vldr.16 s2, [r0]
+; CHECK-NOFP-NEXT:    vcmp.f16 s0, s8
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s0, s0, s8
+; CHECK-NOFP-NEXT:    vcmp.f16 s2, s0
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s0, s2, s0
+; CHECK-NOFP-NEXT:    vstr.16 s0, [r0]
+; CHECK-NOFP-NEXT:    bx lr
+entry:
+  %y = load half, half* %yy
+  %z = call half @llvm.experimental.vector.reduce.fmax.v4f16(<4 x half> %x)
+  %c = fcmp ogt half %y, %z
+  %r = select i1 %c, half %y, half %z
+  store half %r, half* %yy
+  ret void
+}
+
+define arm_aapcs_vfpcc void @fmax_v8f16_acc_nofast(<8 x half> %x, half* %yy) {
+; CHECK-FP-LABEL: fmax_v8f16_acc_nofast:
+; CHECK-FP:       @ %bb.0: @ %entry
+; CHECK-FP-NEXT:    vmov.f64 d2, d1
+; CHECK-FP-NEXT:    vmov.f32 s5, s3
+; CHECK-FP-NEXT:    vmaxnm.f16 q0, q0, q1
+; CHECK-FP-NEXT:    vmov.32 r1, q0[1]
+; CHECK-FP-NEXT:    vdup.32 q1, r1
+; CHECK-FP-NEXT:    vmaxnm.f16 q0, q0, q1
+; CHECK-FP-NEXT:    vmov.u16 r1, q0[1]
+; CHECK-FP-NEXT:    vdup.16 q1, r1
+; CHECK-FP-NEXT:    vmaxnm.f16 q0, q0, q1
+; CHECK-FP-NEXT:    vldr.16 s4, [r0]
+; CHECK-FP-NEXT:    vcmp.f16 s4, s0
+; CHECK-FP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-FP-NEXT:    vselgt.f16 s0, s4, s0
+; CHECK-FP-NEXT:    vstr.16 s0, [r0]
+; CHECK-FP-NEXT:    bx lr
+;
+; CHECK-NOFP-LABEL: fmax_v8f16_acc_nofast:
+; CHECK-NOFP:       @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT:    vmovx.f16 s8, s3
+; CHECK-NOFP-NEXT:    vmovx.f16 s10, s1
+; CHECK-NOFP-NEXT:    vcmp.f16 s10, s8
+; CHECK-NOFP-NEXT:    vmov.f64 d2, d1
+; CHECK-NOFP-NEXT:    vmovx.f16 s12, s0
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vmov.f32 s5, s3
+; CHECK-NOFP-NEXT:    vselgt.f16 s8, s10, s8
+; CHECK-NOFP-NEXT:    vmovx.f16 s10, s4
+; CHECK-NOFP-NEXT:    vcmp.f16 s12, s10
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s10, s12, s10
+; CHECK-NOFP-NEXT:    vcmp.f16 s10, s8
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f16 s1, s3
+; CHECK-NOFP-NEXT:    vselgt.f16 s8, s10, s8
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f16 s0, s4
+; CHECK-NOFP-NEXT:    vselgt.f16 s10, s1, s3
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s0, s0, s4
+; CHECK-NOFP-NEXT:    vldr.16 s2, [r0]
+; CHECK-NOFP-NEXT:    vcmp.f16 s0, s10
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s0, s0, s10
+; CHECK-NOFP-NEXT:    vcmp.f16 s0, s8
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s0, s0, s8
+; CHECK-NOFP-NEXT:    vcmp.f16 s2, s0
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s0, s2, s0
+; CHECK-NOFP-NEXT:    vstr.16 s0, [r0]
+; CHECK-NOFP-NEXT:    bx lr
+entry:
+  %y = load half, half* %yy
+  %z = call half @llvm.experimental.vector.reduce.fmax.v8f16(<8 x half> %x)
+  %c = fcmp ogt half %y, %z
+  %r = select i1 %c, half %y, half %z
+  store half %r, half* %yy
+  ret void
+}
+
+define arm_aapcs_vfpcc void @fmax_v16f16_acc_nofast(<16 x half> %x, half* %yy) {
+; CHECK-FP-LABEL: fmax_v16f16_acc_nofast:
+; CHECK-FP:       @ %bb.0: @ %entry
+; CHECK-FP-NEXT:    vmaxnm.f16 q0, q0, q1
+; CHECK-FP-NEXT:    vmov.f64 d2, d1
+; CHECK-FP-NEXT:    vmov.f32 s5, s3
+; CHECK-FP-NEXT:    vmaxnm.f16 q0, q0, q1
+; CHECK-FP-NEXT:    vmov.32 r1, q0[1]
+; CHECK-FP-NEXT:    vdup.32 q1, r1
+; CHECK-FP-NEXT:    vmaxnm.f16 q0, q0, q1
+; CHECK-FP-NEXT:    vmov.u16 r1, q0[1]
+; CHECK-FP-NEXT:    vdup.16 q1, r1
+; CHECK-FP-NEXT:    vmaxnm.f16 q0, q0, q1
+; CHECK-FP-NEXT:    vldr.16 s4, [r0]
+; CHECK-FP-NEXT:    vcmp.f16 s4, s0
+; CHECK-FP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-FP-NEXT:    vselgt.f16 s0, s4, s0
+; CHECK-FP-NEXT:    vstr.16 s0, [r0]
+; CHECK-FP-NEXT:    bx lr
+;
+; CHECK-NOFP-LABEL: fmax_v16f16_acc_nofast:
+; CHECK-NOFP:       @ %bb.0: @ %entry
+; CHECK-NOFP-NEXT:    vmovx.f16 s8, s7
+; CHECK-NOFP-NEXT:    vmovx.f16 s10, s3
+; CHECK-NOFP-NEXT:    vcmp.f16 s10, s8
+; CHECK-NOFP-NEXT:    vmovx.f16 s12, s1
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vmovx.f16 s14, s0
+; CHECK-NOFP-NEXT:    vselgt.f16 s8, s10, s8
+; CHECK-NOFP-NEXT:    vmovx.f16 s10, s5
+; CHECK-NOFP-NEXT:    vcmp.f16 s12, s10
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s10, s12, s10
+; CHECK-NOFP-NEXT:    vmovx.f16 s12, s2
+; CHECK-NOFP-NEXT:    vcmp.f16 s10, s8
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s8, s10, s8
+; CHECK-NOFP-NEXT:    vmovx.f16 s10, s6
+; CHECK-NOFP-NEXT:    vcmp.f16 s12, s10
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s10, s12, s10
+; CHECK-NOFP-NEXT:    vmovx.f16 s12, s4
+; CHECK-NOFP-NEXT:    vcmp.f16 s14, s12
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s12, s14, s12
+; CHECK-NOFP-NEXT:    vcmp.f16 s12, s10
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s10, s12, s10
+; CHECK-NOFP-NEXT:    vcmp.f16 s10, s8
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f16 s3, s7
+; CHECK-NOFP-NEXT:    vselgt.f16 s8, s10, s8
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f16 s1, s5
+; CHECK-NOFP-NEXT:    vselgt.f16 s10, s3, s7
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s12, s1, s5
+; CHECK-NOFP-NEXT:    vcmp.f16 s12, s10
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f16 s2, s6
+; CHECK-NOFP-NEXT:    vselgt.f16 s10, s12, s10
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vcmp.f16 s0, s4
+; CHECK-NOFP-NEXT:    vselgt.f16 s12, s2, s6
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s0, s0, s4
+; CHECK-NOFP-NEXT:    vldr.16 s2, [r0]
+; CHECK-NOFP-NEXT:    vcmp.f16 s0, s12
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s0, s0, s12
+; CHECK-NOFP-NEXT:    vcmp.f16 s0, s10
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s0, s0, s10
+; CHECK-NOFP-NEXT:    vcmp.f16 s0, s8
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s0, s0, s8
+; CHECK-NOFP-NEXT:    vcmp.f16 s2, s0
+; CHECK-NOFP-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NOFP-NEXT:    vselgt.f16 s0, s2, s0
+; CHECK-NOFP-NEXT:    vstr.16 s0, [r0]
+; CHECK-NOFP-NEXT:    bx lr
+entry:
+  %y = load half, half* %yy
+  %z = call half @llvm.experimental.vector.reduce.fmax.v16f16(<16 x half> %x)
+  %c = fcmp ogt half %y, %z
+  %r = select i1 %c, half %y, half %z
+  store half %r, half* %yy
+  ret void
+}
+
+define arm_aapcs_vfpcc double @fmax_v1f64_acc_nofast(<1 x double> %x, double %y) {
+; CHECK-LABEL: fmax_v1f64_acc_nofast:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vcmp.f64 d1, d0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vselgt.f64 d0, d1, d0
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call double @llvm.experimental.vector.reduce.fmax.v1f64(<1 x double> %x)
+  %c = fcmp ogt double %y, %z
+  %r = select i1 %c, double %y, double %z
+  ret double %r
+}
+
+define arm_aapcs_vfpcc double @fmax_v2f64_acc_nofast(<2 x double> %x, double %y) {
+; CHECK-LABEL: fmax_v2f64_acc_nofast:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vcmp.f64 d0, d1
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vselgt.f64 d0, d0, d1
+; CHECK-NEXT:    vcmp.f64 d2, d0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vselgt.f64 d0, d2, d0
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call double @llvm.experimental.vector.reduce.fmax.v2f64(<2 x double> %x)
+  %c = fcmp ogt double %y, %z
+  %r = select i1 %c, double %y, double %z
+  ret double %r
+}
+
+define arm_aapcs_vfpcc double @fmax_v4f64_acc_nofast(<4 x double> %x, double %y) {
+; CHECK-LABEL: fmax_v4f64_acc_nofast:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vcmp.f64 d1, d3
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vcmp.f64 d0, d2
+; CHECK-NEXT:    vselgt.f64 d5, d1, d3
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vselgt.f64 d0, d0, d2
+; CHECK-NEXT:    vcmp.f64 d0, d5
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vselgt.f64 d0, d0, d5
+; CHECK-NEXT:    vcmp.f64 d4, d0
+; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
+; CHECK-NEXT:    vselgt.f64 d0, d4, d0
+; CHECK-NEXT:    bx lr
+entry:
+  %z = call double @llvm.experimental.vector.reduce.fmax.v4f64(<4 x double> %x)
+  %c = fcmp ogt double %y, %z
+  %r = select i1 %c, double %y, double %z
+  ret double %r
+}
+
+declare double @llvm.experimental.vector.reduce.fmax.v1f64(<1 x double>)
+declare double @llvm.experimental.vector.reduce.fmax.v2f64(<2 x double>)
+declare double @llvm.experimental.vector.reduce.fmax.v4f64(<4 x double>)
+declare double @llvm.experimental.vector.reduce.fmin.v1f64(<1 x double>)
+declare double @llvm.experimental.vector.reduce.fmin.v2f64(<2 x double>)
+declare double @llvm.experimental.vector.reduce.fmin.v4f64(<4 x double>)
+declare float @llvm.experimental.vector.reduce.fmax.v2f32(<2 x float>)
+declare float @llvm.experimental.vector.reduce.fmax.v4f32(<4 x float>)
+declare float @llvm.experimental.vector.reduce.fmax.v8f32(<8 x float>)
+declare float @llvm.experimental.vector.reduce.fmin.v2f32(<2 x float>)
+declare float @llvm.experimental.vector.reduce.fmin.v4f32(<4 x float>)
+declare float @llvm.experimental.vector.reduce.fmin.v8f32(<8 x float>)
+declare half @llvm.experimental.vector.reduce.fmax.v16f16(<16 x half>)
+declare half @llvm.experimental.vector.reduce.fmax.v4f16(<4 x half>)
+declare half @llvm.experimental.vector.reduce.fmax.v8f16(<8 x half>)
+declare half @llvm.experimental.vector.reduce.fmin.v16f16(<16 x half>)
+declare half @llvm.experimental.vector.reduce.fmin.v4f16(<4 x half>)
+declare half @llvm.experimental.vector.reduce.fmin.v8f16(<8 x half>)


        


More information about the llvm-commits mailing list