[llvm] 70d345e - [AArch64][ARM] Always expand ordered vector reductions (PR44600)

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Thu Jan 30 09:40:35 PST 2020


Author: Nikita Popov
Date: 2020-01-30T18:40:24+01:00
New Revision: 70d345e687caba4ac1f95655c6924dfa91e0083f

URL: https://github.com/llvm/llvm-project/commit/70d345e687caba4ac1f95655c6924dfa91e0083f
DIFF: https://github.com/llvm/llvm-project/commit/70d345e687caba4ac1f95655c6924dfa91e0083f.diff

LOG: [AArch64][ARM] Always expand ordered vector reductions (PR44600)

fadd/fmul reductions without reassoc are lowered to
VECREDUCE_STRICT_FADD/FMUL nodes, which don't have legalization
support. Until that is in place, expand these intrinsics on
ARM and AArch64. Other targets always expand the vector reduction
intrinsics.

Additionally expand fmax/fmin reductions without nonan flag on
AArch64, as the backend asserts that the flag is present when
lowering VECREDUCE_FMIN/FMAX.

This fixes https://bugs.llvm.org/show_bug.cgi?id=44600.

Differential Revision: https://reviews.llvm.org/D73135

Added: 
    llvm/test/CodeGen/AArch64/vecreduce-fadd-legalization-strict.ll
    llvm/test/CodeGen/AArch64/vecreduce-fmax-legalization-nan.ll
    llvm/test/CodeGen/AArch64/vecreduce-fmul-legalization-strict.ll
    llvm/test/CodeGen/ARM/vecreduce-fadd-legalization-strict.ll
    llvm/test/CodeGen/ARM/vecreduce-fmul-legalization-strict.ll

Modified: 
    llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h
    llvm/lib/Target/ARM/ARMTargetTransformInfo.h

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h
index b143b45e57ab..2f07448acc10 100644
--- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h
+++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h
@@ -201,7 +201,21 @@ class AArch64TTIImpl : public BasicTTIImplBase<AArch64TTIImpl> {
                                      bool &AllowPromotionWithoutCommonHeader);
 
   bool shouldExpandReduction(const IntrinsicInst *II) const {
-    return false;
+    switch (II->getIntrinsicID()) {
+    case Intrinsic::experimental_vector_reduce_v2_fadd:
+    case Intrinsic::experimental_vector_reduce_v2_fmul:
+      // We don't have legalization support for ordered FP reductions.
+      return !II->getFastMathFlags().allowReassoc();
+
+    case Intrinsic::experimental_vector_reduce_fmax:
+    case Intrinsic::experimental_vector_reduce_fmin:
+      // Lowering asserts that there are no NaNs.
+      return !II->getFastMathFlags().noNaNs();
+
+    default:
+      // Don't expand anything else, let legalization deal with it.
+      return false;
+    }
   }
 
   unsigned getGISelRematGlobalCost() const {

diff  --git a/llvm/lib/Target/ARM/ARMTargetTransformInfo.h b/llvm/lib/Target/ARM/ARMTargetTransformInfo.h
index 2c35e4131baf..4f7aaaab0d31 100644
--- a/llvm/lib/Target/ARM/ARMTargetTransformInfo.h
+++ b/llvm/lib/Target/ARM/ARMTargetTransformInfo.h
@@ -170,7 +170,16 @@ class ARMTTIImpl : public BasicTTIImplBase<ARMTTIImpl> {
                              TTI::ReductionFlags Flags) const;
 
   bool shouldExpandReduction(const IntrinsicInst *II) const {
-    return false;
+    switch (II->getIntrinsicID()) {
+    case Intrinsic::experimental_vector_reduce_v2_fadd:
+    case Intrinsic::experimental_vector_reduce_v2_fmul:
+      // We don't have legalization support for ordered FP reductions.
+      return !II->getFastMathFlags().allowReassoc();
+
+    default:
+      // Don't expand anything else, let legalization deal with it.
+      return false;
+    }
   }
 
   int getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,

diff  --git a/llvm/test/CodeGen/AArch64/vecreduce-fadd-legalization-strict.ll b/llvm/test/CodeGen/AArch64/vecreduce-fadd-legalization-strict.ll
new file mode 100644
index 000000000000..5d6f2e40d4d5
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/vecreduce-fadd-legalization-strict.ll
@@ -0,0 +1,128 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s --check-prefix=CHECK
+
+; Same as vecreduce-fadd-legalization.ll, but without fmf.
+
+declare half @llvm.experimental.vector.reduce.v2.fadd.f16.v1f16(half, <1 x half>)
+declare float @llvm.experimental.vector.reduce.v2.fadd.f32.v1f32(float, <1 x float>)
+declare double @llvm.experimental.vector.reduce.v2.fadd.f64.v1f64(double, <1 x double>)
+declare fp128 @llvm.experimental.vector.reduce.v2.fadd.f128.v1f128(fp128, <1 x fp128>)
+
+declare float @llvm.experimental.vector.reduce.v2.fadd.f32.v3f32(float, <3 x float>)
+declare fp128 @llvm.experimental.vector.reduce.v2.fadd.f128.v2f128(fp128, <2 x fp128>)
+declare float @llvm.experimental.vector.reduce.v2.fadd.f32.v16f32(float, <16 x float>)
+
+define half @test_v1f16(<1 x half> %a) nounwind {
+; CHECK-LABEL: test_v1f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvt s0, h0
+; CHECK-NEXT:    fmov s1, wzr
+; CHECK-NEXT:    fadd s0, s0, s1
+; CHECK-NEXT:    fcvt h0, s0
+; CHECK-NEXT:    ret
+  %b = call half @llvm.experimental.vector.reduce.v2.fadd.f16.v1f16(half 0.0, <1 x half> %a)
+  ret half %b
+}
+
+define float @test_v1f32(<1 x float> %a) nounwind {
+; CHECK-LABEL: test_v1f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    fmov s1, wzr
+; CHECK-NEXT:    fadd s0, s0, s1
+; CHECK-NEXT:    ret
+  %b = call float @llvm.experimental.vector.reduce.v2.fadd.f32.v1f32(float 0.0, <1 x float> %a)
+  ret float %b
+}
+
+define double @test_v1f64(<1 x double> %a) nounwind {
+; CHECK-LABEL: test_v1f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov d1, xzr
+; CHECK-NEXT:    fadd d0, d0, d1
+; CHECK-NEXT:    ret
+  %b = call double @llvm.experimental.vector.reduce.v2.fadd.f64.v1f64(double 0.0, <1 x double> %a)
+  ret double %b
+}
+
+define fp128 @test_v1f128(<1 x fp128> %a) nounwind {
+; CHECK-LABEL: test_v1f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    adrp x8, .LCPI3_0
+; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI3_0]
+; CHECK-NEXT:    bl __addtf3
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
+  %b = call fp128 @llvm.experimental.vector.reduce.v2.fadd.f128.v1f128(fp128 zeroinitializer, <1 x fp128> %a)
+  ret fp128 %b
+}
+
+define float @test_v3f32(<3 x float> %a) nounwind {
+; CHECK-LABEL: test_v3f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov s1, wzr
+; CHECK-NEXT:    mov s2, v0.s[1]
+; CHECK-NEXT:    fadd s1, s0, s1
+; CHECK-NEXT:    fadd s1, s1, s2
+; CHECK-NEXT:    mov s0, v0.s[2]
+; CHECK-NEXT:    fadd s0, s1, s0
+; CHECK-NEXT:    ret
+  %b = call float @llvm.experimental.vector.reduce.v2.fadd.f32.v3f32(float 0.0, <3 x float> %a)
+  ret float %b
+}
+
+define fp128 @test_v2f128(<2 x fp128> %a) nounwind {
+; CHECK-LABEL: test_v2f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #32 // =32
+; CHECK-NEXT:    adrp x8, .LCPI5_0
+; CHECK-NEXT:    str q1, [sp] // 16-byte Folded Spill
+; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI5_0]
+; CHECK-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-NEXT:    bl __addtf3
+; CHECK-NEXT:    ldr q1, [sp] // 16-byte Folded Reload
+; CHECK-NEXT:    bl __addtf3
+; CHECK-NEXT:    ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-NEXT:    add sp, sp, #32 // =32
+; CHECK-NEXT:    ret
+  %b = call fp128 @llvm.experimental.vector.reduce.v2.fadd.f128.v2f128(fp128 zeroinitializer, <2 x fp128> %a)
+  ret fp128 %b
+}
+
+define float @test_v16f32(<16 x float> %a) nounwind {
+; CHECK-LABEL: test_v16f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov s4, wzr
+; CHECK-NEXT:    mov s5, v0.s[1]
+; CHECK-NEXT:    fadd s4, s0, s4
+; CHECK-NEXT:    fadd s4, s4, s5
+; CHECK-NEXT:    mov s5, v0.s[2]
+; CHECK-NEXT:    mov s0, v0.s[3]
+; CHECK-NEXT:    fadd s4, s4, s5
+; CHECK-NEXT:    fadd s0, s4, s0
+; CHECK-NEXT:    mov s5, v1.s[1]
+; CHECK-NEXT:    fadd s0, s0, s1
+; CHECK-NEXT:    mov s4, v1.s[2]
+; CHECK-NEXT:    fadd s0, s0, s5
+; CHECK-NEXT:    mov s1, v1.s[3]
+; CHECK-NEXT:    fadd s0, s0, s4
+; CHECK-NEXT:    fadd s0, s0, s1
+; CHECK-NEXT:    mov s5, v2.s[1]
+; CHECK-NEXT:    fadd s0, s0, s2
+; CHECK-NEXT:    mov s4, v2.s[2]
+; CHECK-NEXT:    fadd s0, s0, s5
+; CHECK-NEXT:    mov s1, v2.s[3]
+; CHECK-NEXT:    fadd s0, s0, s4
+; CHECK-NEXT:    fadd s0, s0, s1
+; CHECK-NEXT:    mov s2, v3.s[1]
+; CHECK-NEXT:    fadd s0, s0, s3
+; CHECK-NEXT:    mov s5, v3.s[2]
+; CHECK-NEXT:    fadd s0, s0, s2
+; CHECK-NEXT:    fadd s0, s0, s5
+; CHECK-NEXT:    mov s1, v3.s[3]
+; CHECK-NEXT:    fadd s0, s0, s1
+; CHECK-NEXT:    ret
+  %b = call float @llvm.experimental.vector.reduce.v2.fadd.f32.v16f32(float 0.0, <16 x float> %a)
+  ret float %b
+}

diff  --git a/llvm/test/CodeGen/AArch64/vecreduce-fmax-legalization-nan.ll b/llvm/test/CodeGen/AArch64/vecreduce-fmax-legalization-nan.ll
new file mode 100644
index 000000000000..c669a55519d8
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/vecreduce-fmax-legalization-nan.ll
@@ -0,0 +1,88 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s --check-prefix=CHECK
+
+declare half @llvm.experimental.vector.reduce.fmax.v1f16(<1 x half> %a)
+declare float @llvm.experimental.vector.reduce.fmax.v1f32(<1 x float> %a)
+declare double @llvm.experimental.vector.reduce.fmax.v1f64(<1 x double> %a)
+declare fp128 @llvm.experimental.vector.reduce.fmax.v1f128(<1 x fp128> %a)
+
+declare float @llvm.experimental.vector.reduce.fmax.v3f32(<3 x float> %a)
+declare fp128 @llvm.experimental.vector.reduce.fmax.v2f128(<2 x fp128> %a)
+declare float @llvm.experimental.vector.reduce.fmax.v16f32(<16 x float> %a)
+
+define half @test_v1f16(<1 x half> %a) nounwind {
+; CHECK-LABEL: test_v1f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ret
+  %b = call half @llvm.experimental.vector.reduce.fmax.v1f16(<1 x half> %a)
+  ret half %b
+}
+
+define float @test_v1f32(<1 x float> %a) nounwind {
+; CHECK-LABEL: test_v1f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    // kill: def $s0 killed $s0 killed $q0
+; CHECK-NEXT:    ret
+  %b = call float @llvm.experimental.vector.reduce.fmax.v1f32(<1 x float> %a)
+  ret float %b
+}
+
+define double @test_v1f64(<1 x double> %a) nounwind {
+; CHECK-LABEL: test_v1f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ret
+  %b = call double @llvm.experimental.vector.reduce.fmax.v1f64(<1 x double> %a)
+  ret double %b
+}
+
+define fp128 @test_v1f128(<1 x fp128> %a) nounwind {
+; CHECK-LABEL: test_v1f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ret
+  %b = call fp128 @llvm.experimental.vector.reduce.fmax.v1f128(<1 x fp128> %a)
+  ret fp128 %b
+}
+
+; TODO: This doesn't work, because ExpandReductions only supports power of two
+; unordered reductions.
+;define float @test_v3f32(<3 x float> %a) nounwind {
+;  %b = call float @llvm.experimental.vector.reduce.fmax.v3f32(<3 x float> %a)
+;  ret float %b
+;}
+
+define fp128 @test_v2f128(<2 x fp128> %a) nounwind {
+; CHECK-LABEL: test_v2f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #48 // =48
+; CHECK-NEXT:    str x30, [sp, #32] // 8-byte Folded Spill
+; CHECK-NEXT:    stp q0, q1, [sp] // 32-byte Folded Spill
+; CHECK-NEXT:    bl __gttf2
+; CHECK-NEXT:    ldr q0, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    b.le .LBB4_2
+; CHECK-NEXT:  // %bb.1:
+; CHECK-NEXT:    ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-NEXT:  .LBB4_2:
+; CHECK-NEXT:    ldr x30, [sp, #32] // 8-byte Folded Reload
+; CHECK-NEXT:    add sp, sp, #48 // =48
+; CHECK-NEXT:    ret
+  %b = call fp128 @llvm.experimental.vector.reduce.fmax.v2f128(<2 x fp128> %a)
+  ret fp128 %b
+}
+
+define float @test_v16f32(<16 x float> %a) nounwind {
+; CHECK-LABEL: test_v16f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmaxnm v1.4s, v1.4s, v3.4s
+; CHECK-NEXT:    fmaxnm v0.4s, v0.4s, v2.4s
+; CHECK-NEXT:    fmaxnm v0.4s, v0.4s, v1.4s
+; CHECK-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    fmaxnm v0.4s, v0.4s, v1.4s
+; CHECK-NEXT:    dup v1.4s, v0.s[1]
+; CHECK-NEXT:    fmaxnm v0.4s, v0.4s, v1.4s
+; CHECK-NEXT:    // kill: def $s0 killed $s0 killed $q0
+; CHECK-NEXT:    ret
+  %b = call float @llvm.experimental.vector.reduce.fmax.v16f32(<16 x float> %a)
+  ret float %b
+}

diff  --git a/llvm/test/CodeGen/AArch64/vecreduce-fmul-legalization-strict.ll b/llvm/test/CodeGen/AArch64/vecreduce-fmul-legalization-strict.ll
new file mode 100644
index 000000000000..41914ca73afa
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/vecreduce-fmul-legalization-strict.ll
@@ -0,0 +1,114 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s --check-prefix=CHECK
+
+; Same as vecreduce-fmul-legalization.ll, but without fmf.
+
+declare half @llvm.experimental.vector.reduce.v2.fmul.f16.v1f16(half, <1 x half>)
+declare float @llvm.experimental.vector.reduce.v2.fmul.f32.v1f32(float, <1 x float>)
+declare double @llvm.experimental.vector.reduce.v2.fmul.f64.v1f64(double, <1 x double>)
+declare fp128 @llvm.experimental.vector.reduce.v2.fmul.f128.v1f128(fp128, <1 x fp128>)
+
+declare float @llvm.experimental.vector.reduce.v2.fmul.f32.v3f32(float, <3 x float>)
+declare fp128 @llvm.experimental.vector.reduce.v2.fmul.f128.v2f128(fp128, <2 x fp128>)
+declare float @llvm.experimental.vector.reduce.v2.fmul.f32.v16f32(float, <16 x float>)
+
+define half @test_v1f16(<1 x half> %a) nounwind {
+; CHECK-LABEL: test_v1f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcvt s0, h0
+; CHECK-NEXT:    fmov s1, wzr
+; CHECK-NEXT:    fmul s0, s0, s1
+; CHECK-NEXT:    fcvt h0, s0
+; CHECK-NEXT:    ret
+  %b = call half @llvm.experimental.vector.reduce.v2.fmul.f16.v1f16(half 0.0, <1 x half> %a)
+  ret half %b
+}
+
+define float @test_v1f32(<1 x float> %a) nounwind {
+; CHECK-LABEL: test_v1f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    fmov s1, wzr
+; CHECK-NEXT:    fmul s0, s1, v0.s[0]
+; CHECK-NEXT:    ret
+  %b = call float @llvm.experimental.vector.reduce.v2.fmul.f32.v1f32(float 0.0, <1 x float> %a)
+  ret float %b
+}
+
+define double @test_v1f64(<1 x double> %a) nounwind {
+; CHECK-LABEL: test_v1f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov d1, xzr
+; CHECK-NEXT:    fmul d0, d0, d1
+; CHECK-NEXT:    ret
+  %b = call double @llvm.experimental.vector.reduce.v2.fmul.f64.v1f64(double 0.0, <1 x double> %a)
+  ret double %b
+}
+
+define fp128 @test_v1f128(<1 x fp128> %a) nounwind {
+; CHECK-LABEL: test_v1f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    adrp x8, .LCPI3_0
+; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI3_0]
+; CHECK-NEXT:    bl __multf3
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
+  %b = call fp128 @llvm.experimental.vector.reduce.v2.fmul.f128.v1f128(fp128 zeroinitializer, <1 x fp128> %a)
+  ret fp128 %b
+}
+
+define float @test_v3f32(<3 x float> %a) nounwind {
+; CHECK-LABEL: test_v3f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov s1, wzr
+; CHECK-NEXT:    fmul s1, s1, v0.s[0]
+; CHECK-NEXT:    fmul s1, s1, v0.s[1]
+; CHECK-NEXT:    fmul s0, s1, v0.s[2]
+; CHECK-NEXT:    ret
+  %b = call float @llvm.experimental.vector.reduce.v2.fmul.f32.v3f32(float 0.0, <3 x float> %a)
+  ret float %b
+}
+
+define fp128 @test_v2f128(<2 x fp128> %a) nounwind {
+; CHECK-LABEL: test_v2f128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #32 // =32
+; CHECK-NEXT:    adrp x8, .LCPI5_0
+; CHECK-NEXT:    str q1, [sp] // 16-byte Folded Spill
+; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI5_0]
+; CHECK-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-NEXT:    bl __multf3
+; CHECK-NEXT:    ldr q1, [sp] // 16-byte Folded Reload
+; CHECK-NEXT:    bl __multf3
+; CHECK-NEXT:    ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-NEXT:    add sp, sp, #32 // =32
+; CHECK-NEXT:    ret
+  %b = call fp128 @llvm.experimental.vector.reduce.v2.fmul.f128.v2f128(fp128 zeroinitializer, <2 x fp128> %a)
+  ret fp128 %b
+}
+
+define float @test_v16f32(<16 x float> %a) nounwind {
+; CHECK-LABEL: test_v16f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov s4, wzr
+; CHECK-NEXT:    fmul s4, s4, v0.s[0]
+; CHECK-NEXT:    fmul s4, s4, v0.s[1]
+; CHECK-NEXT:    fmul s4, s4, v0.s[2]
+; CHECK-NEXT:    fmul s0, s4, v0.s[3]
+; CHECK-NEXT:    fmul s0, s0, v1.s[0]
+; CHECK-NEXT:    fmul s0, s0, v1.s[1]
+; CHECK-NEXT:    fmul s0, s0, v1.s[2]
+; CHECK-NEXT:    fmul s0, s0, v1.s[3]
+; CHECK-NEXT:    fmul s0, s0, v2.s[0]
+; CHECK-NEXT:    fmul s0, s0, v2.s[1]
+; CHECK-NEXT:    fmul s0, s0, v2.s[2]
+; CHECK-NEXT:    fmul s0, s0, v2.s[3]
+; CHECK-NEXT:    fmul s0, s0, v3.s[0]
+; CHECK-NEXT:    fmul s0, s0, v3.s[1]
+; CHECK-NEXT:    fmul s0, s0, v3.s[2]
+; CHECK-NEXT:    fmul s0, s0, v3.s[3]
+; CHECK-NEXT:    ret
+  %b = call float @llvm.experimental.vector.reduce.v2.fmul.f32.v16f32(float 0.0, <16 x float> %a)
+  ret float %b
+}

diff  --git a/llvm/test/CodeGen/ARM/vecreduce-fadd-legalization-strict.ll b/llvm/test/CodeGen/ARM/vecreduce-fadd-legalization-strict.ll
new file mode 100644
index 000000000000..f2f4ee28966b
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/vecreduce-fadd-legalization-strict.ll
@@ -0,0 +1,166 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=arm-none-eabi -mattr=+neon | FileCheck %s --check-prefix=CHECK
+
+declare half @llvm.experimental.vector.reduce.v2.fadd.f16.v1f16(half, <1 x half>)
+declare float @llvm.experimental.vector.reduce.v2.fadd.f32.v1f32(float, <1 x float>)
+declare double @llvm.experimental.vector.reduce.v2.fadd.f64.v1f64(double, <1 x double>)
+declare fp128 @llvm.experimental.vector.reduce.v2.fadd.f128.v1f128(fp128, <1 x fp128>)
+
+declare float @llvm.experimental.vector.reduce.v2.fadd.f32.v3f32(float, <3 x float>)
+declare fp128 @llvm.experimental.vector.reduce.v2.fadd.f128.v2f128(fp128, <2 x fp128>)
+declare float @llvm.experimental.vector.reduce.v2.fadd.f32.v16f32(float, <16 x float>)
+
+define half @test_v1f16(<1 x half> %a) nounwind {
+; CHECK-LABEL: test_v1f16:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r11, lr}
+; CHECK-NEXT:    push {r11, lr}
+; CHECK-NEXT:    bl __aeabi_f2h
+; CHECK-NEXT:    bl __aeabi_h2f
+; CHECK-NEXT:    vldr s0, .LCPI0_0
+; CHECK-NEXT:    vmov s2, r0
+; CHECK-NEXT:    vadd.f32 s0, s2, s0
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    pop {r11, lr}
+; CHECK-NEXT:    mov pc, lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI0_0:
+; CHECK-NEXT:    .long 0 @ float 0
+  %b = call half @llvm.experimental.vector.reduce.v2.fadd.f16.v1f16(half 0.0, <1 x half> %a)
+  ret half %b
+}
+
+define float @test_v1f32(<1 x float> %a) nounwind {
+; CHECK-LABEL: test_v1f32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr s0, .LCPI1_0
+; CHECK-NEXT:    vmov s2, r0
+; CHECK-NEXT:    vadd.f32 s0, s2, s0
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    mov pc, lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI1_0:
+; CHECK-NEXT:    .long 0 @ float 0
+  %b = call float @llvm.experimental.vector.reduce.v2.fadd.f32.v1f32(float 0.0, <1 x float> %a)
+  ret float %b
+}
+
+define double @test_v1f64(<1 x double> %a) nounwind {
+; CHECK-LABEL: test_v1f64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov.i32 d16, #0x0
+; CHECK-NEXT:    vmov d17, r0, r1
+; CHECK-NEXT:    vadd.f64 d16, d17, d16
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    mov pc, lr
+  %b = call double @llvm.experimental.vector.reduce.v2.fadd.f64.v1f64(double 0.0, <1 x double> %a)
+  ret double %b
+}
+
+define fp128 @test_v1f128(<1 x fp128> %a) nounwind {
+; CHECK-LABEL: test_v1f128:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r11, lr}
+; CHECK-NEXT:    push {r11, lr}
+; CHECK-NEXT:    .pad #16
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    mov r12, #0
+; CHECK-NEXT:    str r12, [sp]
+; CHECK-NEXT:    str r12, [sp, #4]
+; CHECK-NEXT:    str r12, [sp, #8]
+; CHECK-NEXT:    str r12, [sp, #12]
+; CHECK-NEXT:    bl __addtf3
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    pop {r11, lr}
+; CHECK-NEXT:    mov pc, lr
+  %b = call fp128 @llvm.experimental.vector.reduce.v2.fadd.f128.v1f128(fp128 zeroinitializer, <1 x fp128> %a)
+  ret fp128 %b
+}
+
+define float @test_v3f32(<3 x float> %a) nounwind {
+; CHECK-LABEL: test_v3f32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov d3, r2, r3
+; CHECK-NEXT:    vldr s0, .LCPI4_0
+; CHECK-NEXT:    vmov d2, r0, r1
+; CHECK-NEXT:    vadd.f32 s0, s4, s0
+; CHECK-NEXT:    vadd.f32 s0, s0, s5
+; CHECK-NEXT:    vadd.f32 s0, s0, s6
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    mov pc, lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI4_0:
+; CHECK-NEXT:    .long 0 @ float 0
+  %b = call float @llvm.experimental.vector.reduce.v2.fadd.f32.v3f32(float 0.0, <3 x float> %a)
+  ret float %b
+}
+
+define fp128 @test_v2f128(<2 x fp128> %a) nounwind {
+; CHECK-LABEL: test_v2f128:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r11, lr}
+; CHECK-NEXT:    push {r4, r5, r11, lr}
+; CHECK-NEXT:    .pad #16
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    mov r12, #0
+; CHECK-NEXT:    str r12, [sp]
+; CHECK-NEXT:    str r12, [sp, #4]
+; CHECK-NEXT:    str r12, [sp, #8]
+; CHECK-NEXT:    str r12, [sp, #12]
+; CHECK-NEXT:    bl __addtf3
+; CHECK-NEXT:    ldr r12, [sp, #36]
+; CHECK-NEXT:    ldr lr, [sp, #32]
+; CHECK-NEXT:    ldr r4, [sp, #40]
+; CHECK-NEXT:    ldr r5, [sp, #44]
+; CHECK-NEXT:    str lr, [sp]
+; CHECK-NEXT:    str r12, [sp, #4]
+; CHECK-NEXT:    str r4, [sp, #8]
+; CHECK-NEXT:    str r5, [sp, #12]
+; CHECK-NEXT:    bl __addtf3
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    pop {r4, r5, r11, lr}
+; CHECK-NEXT:    mov pc, lr
+  %b = call fp128 @llvm.experimental.vector.reduce.v2.fadd.f128.v2f128(fp128 zeroinitializer, <2 x fp128> %a)
+  ret fp128 %b
+}
+
+define float @test_v16f32(<16 x float> %a) nounwind {
+; CHECK-LABEL: test_v16f32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov d3, r2, r3
+; CHECK-NEXT:    vldr s0, .LCPI6_0
+; CHECK-NEXT:    vmov d2, r0, r1
+; CHECK-NEXT:    mov r0, sp
+; CHECK-NEXT:    vadd.f32 s0, s4, s0
+; CHECK-NEXT:    vadd.f32 s0, s0, s5
+; CHECK-NEXT:    vadd.f32 s0, s0, s6
+; CHECK-NEXT:    vadd.f32 s0, s0, s7
+; CHECK-NEXT:    vld1.64 {d2, d3}, [r0]
+; CHECK-NEXT:    add r0, sp, #16
+; CHECK-NEXT:    vadd.f32 s0, s0, s4
+; CHECK-NEXT:    vadd.f32 s0, s0, s5
+; CHECK-NEXT:    vadd.f32 s0, s0, s6
+; CHECK-NEXT:    vadd.f32 s0, s0, s7
+; CHECK-NEXT:    vld1.64 {d2, d3}, [r0]
+; CHECK-NEXT:    add r0, sp, #32
+; CHECK-NEXT:    vadd.f32 s0, s0, s4
+; CHECK-NEXT:    vadd.f32 s0, s0, s5
+; CHECK-NEXT:    vadd.f32 s0, s0, s6
+; CHECK-NEXT:    vadd.f32 s0, s0, s7
+; CHECK-NEXT:    vld1.64 {d2, d3}, [r0]
+; CHECK-NEXT:    vadd.f32 s0, s0, s4
+; CHECK-NEXT:    vadd.f32 s0, s0, s5
+; CHECK-NEXT:    vadd.f32 s0, s0, s6
+; CHECK-NEXT:    vadd.f32 s0, s0, s7
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    mov pc, lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI6_0:
+; CHECK-NEXT:    .long 0 @ float 0
+  %b = call float @llvm.experimental.vector.reduce.v2.fadd.f32.v16f32(float 0.0, <16 x float> %a)
+  ret float %b
+}

diff  --git a/llvm/test/CodeGen/ARM/vecreduce-fmul-legalization-strict.ll b/llvm/test/CodeGen/ARM/vecreduce-fmul-legalization-strict.ll
new file mode 100644
index 000000000000..396cbc806e53
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/vecreduce-fmul-legalization-strict.ll
@@ -0,0 +1,166 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=arm-none-eabi -mattr=+neon | FileCheck %s --check-prefix=CHECK
+
+declare half @llvm.experimental.vector.reduce.v2.fmul.f16.v1f16(half, <1 x half>)
+declare float @llvm.experimental.vector.reduce.v2.fmul.f32.v1f32(float, <1 x float>)
+declare double @llvm.experimental.vector.reduce.v2.fmul.f64.v1f64(double, <1 x double>)
+declare fp128 @llvm.experimental.vector.reduce.v2.fmul.f128.v1f128(fp128, <1 x fp128>)
+
+declare float @llvm.experimental.vector.reduce.v2.fmul.f32.v3f32(float, <3 x float>)
+declare fp128 @llvm.experimental.vector.reduce.v2.fmul.f128.v2f128(fp128, <2 x fp128>)
+declare float @llvm.experimental.vector.reduce.v2.fmul.f32.v16f32(float, <16 x float>)
+
+define half @test_v1f16(<1 x half> %a) nounwind {
+; CHECK-LABEL: test_v1f16:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r11, lr}
+; CHECK-NEXT:    push {r11, lr}
+; CHECK-NEXT:    bl __aeabi_f2h
+; CHECK-NEXT:    bl __aeabi_h2f
+; CHECK-NEXT:    vldr s0, .LCPI0_0
+; CHECK-NEXT:    vmov s2, r0
+; CHECK-NEXT:    vmul.f32 s0, s2, s0
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    pop {r11, lr}
+; CHECK-NEXT:    mov pc, lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI0_0:
+; CHECK-NEXT:    .long 0 @ float 0
+  %b = call half @llvm.experimental.vector.reduce.v2.fmul.f16.v1f16(half 0.0, <1 x half> %a)
+  ret half %b
+}
+
+define float @test_v1f32(<1 x float> %a) nounwind {
+; CHECK-LABEL: test_v1f32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vldr s0, .LCPI1_0
+; CHECK-NEXT:    vmov s2, r0
+; CHECK-NEXT:    vmul.f32 s0, s2, s0
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    mov pc, lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI1_0:
+; CHECK-NEXT:    .long 0 @ float 0
+  %b = call float @llvm.experimental.vector.reduce.v2.fmul.f32.v1f32(float 0.0, <1 x float> %a)
+  ret float %b
+}
+
+define double @test_v1f64(<1 x double> %a) nounwind {
+; CHECK-LABEL: test_v1f64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov.i32 d16, #0x0
+; CHECK-NEXT:    vmov d17, r0, r1
+; CHECK-NEXT:    vmul.f64 d16, d17, d16
+; CHECK-NEXT:    vmov r0, r1, d16
+; CHECK-NEXT:    mov pc, lr
+  %b = call double @llvm.experimental.vector.reduce.v2.fmul.f64.v1f64(double 0.0, <1 x double> %a)
+  ret double %b
+}
+
+define fp128 @test_v1f128(<1 x fp128> %a) nounwind {
+; CHECK-LABEL: test_v1f128:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r11, lr}
+; CHECK-NEXT:    push {r11, lr}
+; CHECK-NEXT:    .pad #16
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    mov r12, #0
+; CHECK-NEXT:    str r12, [sp]
+; CHECK-NEXT:    str r12, [sp, #4]
+; CHECK-NEXT:    str r12, [sp, #8]
+; CHECK-NEXT:    str r12, [sp, #12]
+; CHECK-NEXT:    bl __multf3
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    pop {r11, lr}
+; CHECK-NEXT:    mov pc, lr
+  %b = call fp128 @llvm.experimental.vector.reduce.v2.fmul.f128.v1f128(fp128 zeroinitializer, <1 x fp128> %a)
+  ret fp128 %b
+}
+
+define float @test_v3f32(<3 x float> %a) nounwind {
+; CHECK-LABEL: test_v3f32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov d3, r2, r3
+; CHECK-NEXT:    vldr s0, .LCPI4_0
+; CHECK-NEXT:    vmov d2, r0, r1
+; CHECK-NEXT:    vmul.f32 s0, s4, s0
+; CHECK-NEXT:    vmul.f32 s0, s0, s5
+; CHECK-NEXT:    vmul.f32 s0, s0, s6
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    mov pc, lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI4_0:
+; CHECK-NEXT:    .long 0 @ float 0
+  %b = call float @llvm.experimental.vector.reduce.v2.fmul.f32.v3f32(float 0.0, <3 x float> %a)
+  ret float %b
+}
+
+define fp128 @test_v2f128(<2 x fp128> %a) nounwind {
+; CHECK-LABEL: test_v2f128:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r11, lr}
+; CHECK-NEXT:    push {r4, r5, r11, lr}
+; CHECK-NEXT:    .pad #16
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    mov r12, #0
+; CHECK-NEXT:    str r12, [sp]
+; CHECK-NEXT:    str r12, [sp, #4]
+; CHECK-NEXT:    str r12, [sp, #8]
+; CHECK-NEXT:    str r12, [sp, #12]
+; CHECK-NEXT:    bl __multf3
+; CHECK-NEXT:    ldr r12, [sp, #36]
+; CHECK-NEXT:    ldr lr, [sp, #32]
+; CHECK-NEXT:    ldr r4, [sp, #40]
+; CHECK-NEXT:    ldr r5, [sp, #44]
+; CHECK-NEXT:    str lr, [sp]
+; CHECK-NEXT:    str r12, [sp, #4]
+; CHECK-NEXT:    str r4, [sp, #8]
+; CHECK-NEXT:    str r5, [sp, #12]
+; CHECK-NEXT:    bl __multf3
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    pop {r4, r5, r11, lr}
+; CHECK-NEXT:    mov pc, lr
+  %b = call fp128 @llvm.experimental.vector.reduce.v2.fmul.f128.v2f128(fp128 zeroinitializer, <2 x fp128> %a)
+  ret fp128 %b
+}
+
+define float @test_v16f32(<16 x float> %a) nounwind {
+; CHECK-LABEL: test_v16f32:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov d3, r2, r3
+; CHECK-NEXT:    vldr s0, .LCPI6_0
+; CHECK-NEXT:    vmov d2, r0, r1
+; CHECK-NEXT:    mov r0, sp
+; CHECK-NEXT:    vmul.f32 s0, s4, s0
+; CHECK-NEXT:    vmul.f32 s0, s0, s5
+; CHECK-NEXT:    vmul.f32 s0, s0, s6
+; CHECK-NEXT:    vmul.f32 s0, s0, s7
+; CHECK-NEXT:    vld1.64 {d2, d3}, [r0]
+; CHECK-NEXT:    add r0, sp, #16
+; CHECK-NEXT:    vmul.f32 s0, s0, s4
+; CHECK-NEXT:    vmul.f32 s0, s0, s5
+; CHECK-NEXT:    vmul.f32 s0, s0, s6
+; CHECK-NEXT:    vmul.f32 s0, s0, s7
+; CHECK-NEXT:    vld1.64 {d2, d3}, [r0]
+; CHECK-NEXT:    add r0, sp, #32
+; CHECK-NEXT:    vmul.f32 s0, s0, s4
+; CHECK-NEXT:    vmul.f32 s0, s0, s5
+; CHECK-NEXT:    vmul.f32 s0, s0, s6
+; CHECK-NEXT:    vmul.f32 s0, s0, s7
+; CHECK-NEXT:    vld1.64 {d2, d3}, [r0]
+; CHECK-NEXT:    vmul.f32 s0, s0, s4
+; CHECK-NEXT:    vmul.f32 s0, s0, s5
+; CHECK-NEXT:    vmul.f32 s0, s0, s6
+; CHECK-NEXT:    vmul.f32 s0, s0, s7
+; CHECK-NEXT:    vmov r0, s0
+; CHECK-NEXT:    mov pc, lr
+; CHECK-NEXT:    .p2align 2
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI6_0:
+; CHECK-NEXT:    .long 0 @ float 0
+  %b = call float @llvm.experimental.vector.reduce.v2.fmul.f32.v16f32(float 0.0, <16 x float> %a)
+  ret float %b
+}


        


More information about the llvm-commits mailing list