[llvm] c126eb7 - [SelectionDAG] Add legalizations for VECREDUCE_SEQ_FMUL

Cameron McInally via llvm-commits llvm-commits at lists.llvm.org
Wed Nov 4 12:20:47 PST 2020


Author: Cameron McInally
Date: 2020-11-04T14:20:31-06:00
New Revision: c126eb7529bef379f5609df162de13f7916ae991

URL: https://github.com/llvm/llvm-project/commit/c126eb7529bef379f5609df162de13f7916ae991
DIFF: https://github.com/llvm/llvm-project/commit/c126eb7529bef379f5609df162de13f7916ae991.diff

LOG: [SelectionDAG] Add legalizations for VECREDUCE_SEQ_FMUL

Hook up legalizations for VECREDUCE_SEQ_FMUL. This is following up on the VECREDUCE_SEQ_FADD work from D90247.

Differential Revision: https://reviews.llvm.org/D90644

Added: 
    

Modified: 
    llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
    llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
    llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
    llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
    llvm/lib/CodeGen/TargetLoweringBase.cpp
    llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h
    llvm/lib/Target/ARM/ARMTargetTransformInfo.h
    llvm/test/CodeGen/AArch64/vecreduce-fmul-legalization-strict.ll
    llvm/test/CodeGen/ARM/vecreduce-fmul-legalization-soft-float.ll
    llvm/test/CodeGen/ARM/vecreduce-fmul-legalization-strict.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
index 1377a25d6a7e..4f58f6deb456 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
@@ -141,6 +141,7 @@ void DAGTypeLegalizer::SoftenFloatResult(SDNode *N, unsigned ResNo) {
       R = SoftenFloatRes_VECREDUCE(N);
       break;
     case ISD::VECREDUCE_SEQ_FADD:
+    case ISD::VECREDUCE_SEQ_FMUL:
       R = SoftenFloatRes_VECREDUCE_SEQ(N);
       break;
   }
@@ -2261,6 +2262,7 @@ void DAGTypeLegalizer::PromoteFloatResult(SDNode *N, unsigned ResNo) {
       R = PromoteFloatRes_VECREDUCE(N);
       break;
     case ISD::VECREDUCE_SEQ_FADD:
+    case ISD::VECREDUCE_SEQ_FMUL:
       R = PromoteFloatRes_VECREDUCE_SEQ(N);
       break;
   }
@@ -2623,6 +2625,7 @@ void DAGTypeLegalizer::SoftPromoteHalfResult(SDNode *N, unsigned ResNo) {
     R = SoftPromoteHalfRes_VECREDUCE(N);
     break;
   case ISD::VECREDUCE_SEQ_FADD:
+  case ISD::VECREDUCE_SEQ_FMUL:
     R = SoftPromoteHalfRes_VECREDUCE_SEQ(N);
     break;
   }

diff  --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
index 0869f618dd35..c356895c61a7 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
@@ -490,6 +490,7 @@ SDValue VectorLegalizer::LegalizeOp(SDValue Op) {
                                     Node->getOperand(0).getValueType());
     break;
   case ISD::VECREDUCE_SEQ_FADD:
+  case ISD::VECREDUCE_SEQ_FMUL:
     Action = TLI.getOperationAction(Node->getOpcode(),
                                     Node->getOperand(1).getValueType());
     break;
@@ -875,6 +876,7 @@ void VectorLegalizer::Expand(SDNode *Node, SmallVectorImpl<SDValue> &Results) {
     Results.push_back(TLI.expandVecReduce(Node, DAG));
     return;
   case ISD::VECREDUCE_SEQ_FADD:
+  case ISD::VECREDUCE_SEQ_FMUL:
     Results.push_back(TLI.expandVecReduceSeq(Node, DAG));
     return;
   case ISD::SREM:

diff  --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index e8186a1ee543..606b38d48c52 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -624,6 +624,7 @@ bool DAGTypeLegalizer::ScalarizeVectorOperand(SDNode *N, unsigned OpNo) {
       Res = ScalarizeVecOp_VECREDUCE(N);
       break;
     case ISD::VECREDUCE_SEQ_FADD:
+    case ISD::VECREDUCE_SEQ_FMUL:
       Res = ScalarizeVecOp_VECREDUCE_SEQ(N);
       break;
     }
@@ -2090,6 +2091,7 @@ bool DAGTypeLegalizer::SplitVectorOperand(SDNode *N, unsigned OpNo) {
       Res = SplitVecOp_VECREDUCE(N, OpNo);
       break;
     case ISD::VECREDUCE_SEQ_FADD:
+    case ISD::VECREDUCE_SEQ_FMUL:
       Res = SplitVecOp_VECREDUCE_SEQ(N);
       break;
     }
@@ -4358,6 +4360,7 @@ bool DAGTypeLegalizer::WidenVectorOperand(SDNode *N, unsigned OpNo) {
     Res = WidenVecOp_VECREDUCE(N);
     break;
   case ISD::VECREDUCE_SEQ_FADD:
+  case ISD::VECREDUCE_SEQ_FMUL:
     Res = WidenVecOp_VECREDUCE_SEQ(N);
     break;
   }

diff  --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 4180fbf45221..b9039eed888d 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -341,6 +341,7 @@ ISD::NodeType ISD::getVecReduceBaseOpcode(unsigned VecReduceOpcode) {
   case ISD::VECREDUCE_SEQ_FADD:
     return ISD::FADD;
   case ISD::VECREDUCE_FMUL:
+  case ISD::VECREDUCE_SEQ_FMUL:
     return ISD::FMUL;
   case ISD::VECREDUCE_ADD:
     return ISD::ADD;

diff  --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp
index a61574546367..89cd0584017b 100644
--- a/llvm/lib/CodeGen/TargetLoweringBase.cpp
+++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp
@@ -734,6 +734,7 @@ void TargetLoweringBase::initActions() {
     setOperationAction(ISD::VECREDUCE_FMAX, VT, Expand);
     setOperationAction(ISD::VECREDUCE_FMIN, VT, Expand);
     setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Expand);
+    setOperationAction(ISD::VECREDUCE_SEQ_FMUL, VT, Expand);
   }
 
   // Most targets ignore the @llvm.prefetch intrinsic.

diff  --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h
index 0d81ca1fbdd8..a624f8bfd745 100644
--- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h
+++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h
@@ -221,17 +221,7 @@ class AArch64TTIImpl : public BasicTTIImplBase<AArch64TTIImpl> {
   shouldConsiderAddressTypePromotion(const Instruction &I,
                                      bool &AllowPromotionWithoutCommonHeader);
 
-  bool shouldExpandReduction(const IntrinsicInst *II) const {
-    switch (II->getIntrinsicID()) {
-    case Intrinsic::vector_reduce_fmul:
-      // We don't have legalization support for ordered FMUL reductions.
-      return !II->getFastMathFlags().allowReassoc();
-
-    default:
-      // Don't expand anything else, let legalization deal with it.
-      return false;
-    }
-  }
+  bool shouldExpandReduction(const IntrinsicInst *II) const { return false; }
 
   unsigned getGISelRematGlobalCost() const {
     return 2;

diff  --git a/llvm/lib/Target/ARM/ARMTargetTransformInfo.h b/llvm/lib/Target/ARM/ARMTargetTransformInfo.h
index 3898272ed168..a0f4a157387c 100644
--- a/llvm/lib/Target/ARM/ARMTargetTransformInfo.h
+++ b/llvm/lib/Target/ARM/ARMTargetTransformInfo.h
@@ -195,16 +195,7 @@ class ARMTTIImpl : public BasicTTIImplBase<ARMTTIImpl> {
   bool preferPredicatedReductionSelect(unsigned Opcode, Type *Ty,
                                        TTI::ReductionFlags Flags) const;
 
-  bool shouldExpandReduction(const IntrinsicInst *II) const {
-    switch (II->getIntrinsicID()) {
-    case Intrinsic::vector_reduce_fmul:
-      // We don't have legalization support for ordered FMUL reductions.
-      return !II->getFastMathFlags().allowReassoc();
-    default:
-      // Don't expand anything else, let legalization deal with it.
-      return false;
-    }
-  }
+  bool shouldExpandReduction(const IntrinsicInst *II) const { return false; }
 
   int getCFInstrCost(unsigned Opcode,
                      TTI::TargetCostKind CostKind);

diff  --git a/llvm/test/CodeGen/AArch64/vecreduce-fmul-legalization-strict.ll b/llvm/test/CodeGen/AArch64/vecreduce-fmul-legalization-strict.ll
index be661127f066..d6efeda596e0 100644
--- a/llvm/test/CodeGen/AArch64/vecreduce-fmul-legalization-strict.ll
+++ b/llvm/test/CodeGen/AArch64/vecreduce-fmul-legalization-strict.ll
@@ -15,12 +15,8 @@ declare float @llvm.vector.reduce.fmul.f32.v16f32(float, <16 x float>)
 define half @test_v1f16(<1 x half> %a) nounwind {
 ; CHECK-LABEL: test_v1f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    fcvt s0, h0
-; CHECK-NEXT:    fmov s1, wzr
-; CHECK-NEXT:    fmul s0, s0, s1
-; CHECK-NEXT:    fcvt h0, s0
 ; CHECK-NEXT:    ret
-  %b = call half @llvm.vector.reduce.fmul.f16.v1f16(half 0.0, <1 x half> %a)
+  %b = call half @llvm.vector.reduce.fmul.f16.v1f16(half 1.0, <1 x half> %a)
   ret half %b
 }
 
@@ -28,72 +24,53 @@ define float @test_v1f32(<1 x float> %a) nounwind {
 ; CHECK-LABEL: test_v1f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT:    fmov s1, wzr
-; CHECK-NEXT:    fmul s0, s1, v0.s[0]
+; CHECK-NEXT:    // kill: def $s0 killed $s0 killed $q0
 ; CHECK-NEXT:    ret
-  %b = call float @llvm.vector.reduce.fmul.f32.v1f32(float 0.0, <1 x float> %a)
+  %b = call float @llvm.vector.reduce.fmul.f32.v1f32(float 1.0, <1 x float> %a)
   ret float %b
 }
 
 define double @test_v1f64(<1 x double> %a) nounwind {
 ; CHECK-LABEL: test_v1f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    fmov d1, xzr
-; CHECK-NEXT:    fmul d0, d0, d1
 ; CHECK-NEXT:    ret
-  %b = call double @llvm.vector.reduce.fmul.f64.v1f64(double 0.0, <1 x double> %a)
+  %b = call double @llvm.vector.reduce.fmul.f64.v1f64(double 1.0, <1 x double> %a)
   ret double %b
 }
 
 define fp128 @test_v1f128(<1 x fp128> %a) nounwind {
 ; CHECK-LABEL: test_v1f128:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    adrp x8, .LCPI3_0
-; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI3_0]
-; CHECK-NEXT:    bl __multf3
-; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
-  %b = call fp128 @llvm.vector.reduce.fmul.f128.v1f128(fp128 zeroinitializer, <1 x fp128> %a)
+  %b = call fp128 @llvm.vector.reduce.fmul.f128.v1f128(fp128 0xL00000000000000003fff00000000000000, <1 x fp128> %a)
   ret fp128 %b
 }
 
 define float @test_v3f32(<3 x float> %a) nounwind {
 ; CHECK-LABEL: test_v3f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    fmov s1, wzr
-; CHECK-NEXT:    fmul s1, s1, v0.s[0]
-; CHECK-NEXT:    fmul s1, s1, v0.s[1]
+; CHECK-NEXT:    fmul s1, s0, v0.s[1]
 ; CHECK-NEXT:    fmul s0, s1, v0.s[2]
 ; CHECK-NEXT:    ret
-  %b = call float @llvm.vector.reduce.fmul.f32.v3f32(float 0.0, <3 x float> %a)
+  %b = call float @llvm.vector.reduce.fmul.f32.v3f32(float 1.0, <3 x float> %a)
   ret float %b
 }
 
 define fp128 @test_v2f128(<2 x fp128> %a) nounwind {
 ; CHECK-LABEL: test_v2f128:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #32 // =32
-; CHECK-NEXT:    adrp x8, .LCPI5_0
-; CHECK-NEXT:    str q1, [sp] // 16-byte Folded Spill
-; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI5_0]
-; CHECK-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
-; CHECK-NEXT:    bl __multf3
-; CHECK-NEXT:    ldr q1, [sp] // 16-byte Folded Reload
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    bl __multf3
-; CHECK-NEXT:    ldr x30, [sp, #16] // 8-byte Folded Reload
-; CHECK-NEXT:    add sp, sp, #32 // =32
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
-  %b = call fp128 @llvm.vector.reduce.fmul.f128.v2f128(fp128 zeroinitializer, <2 x fp128> %a)
+  %b = call fp128 @llvm.vector.reduce.fmul.f128.v2f128(fp128 0xL00000000000000003fff00000000000000, <2 x fp128> %a)
   ret fp128 %b
 }
 
 define float @test_v16f32(<16 x float> %a) nounwind {
 ; CHECK-LABEL: test_v16f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    fmov s4, wzr
-; CHECK-NEXT:    fmul s4, s4, v0.s[0]
-; CHECK-NEXT:    fmul s4, s4, v0.s[1]
+; CHECK-NEXT:    fmul s4, s0, v0.s[1]
 ; CHECK-NEXT:    fmul s4, s4, v0.s[2]
 ; CHECK-NEXT:    fmul s0, s4, v0.s[3]
 ; CHECK-NEXT:    fmul s0, s0, v1.s[0]
@@ -109,6 +86,6 @@ define float @test_v16f32(<16 x float> %a) nounwind {
 ; CHECK-NEXT:    fmul s0, s0, v3.s[2]
 ; CHECK-NEXT:    fmul s0, s0, v3.s[3]
 ; CHECK-NEXT:    ret
-  %b = call float @llvm.vector.reduce.fmul.f32.v16f32(float 0.0, <16 x float> %a)
+  %b = call float @llvm.vector.reduce.fmul.f32.v16f32(float 1.0, <16 x float> %a)
   ret float %b
 }

diff  --git a/llvm/test/CodeGen/ARM/vecreduce-fmul-legalization-soft-float.ll b/llvm/test/CodeGen/ARM/vecreduce-fmul-legalization-soft-float.ll
index 4217eab6ba70..b3334c43ef58 100644
--- a/llvm/test/CodeGen/ARM/vecreduce-fmul-legalization-soft-float.ll
+++ b/llvm/test/CodeGen/ARM/vecreduce-fmul-legalization-soft-float.ll
@@ -59,6 +59,24 @@ define float @test_v4f32(<4 x float> %a) nounwind {
   ret float %b
 }
 
+define float @test_v4f32_strict(<4 x float> %a) nounwind {
+; CHECK-LABEL: test_v4f32_strict:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r4, r5, r11, lr}
+; CHECK-NEXT:    push {r4, r5, r11, lr}
+; CHECK-NEXT:    mov r4, r3
+; CHECK-NEXT:    mov r5, r2
+; CHECK-NEXT:    bl __aeabi_fmul
+; CHECK-NEXT:    mov r1, r5
+; CHECK-NEXT:    bl __aeabi_fmul
+; CHECK-NEXT:    mov r1, r4
+; CHECK-NEXT:    bl __aeabi_fmul
+; CHECK-NEXT:    pop {r4, r5, r11, lr}
+; CHECK-NEXT:    mov pc, lr
+  %b = call float @llvm.vector.reduce.fmul.f32.v4f32(float 1.0, <4 x float> %a)
+  ret float %b
+}
+
 define double @test_v2f64(<2 x double> %a) nounwind {
 ; CHECK-LABEL: test_v2f64:
 ; CHECK:       @ %bb.0:
@@ -71,6 +89,18 @@ define double @test_v2f64(<2 x double> %a) nounwind {
   ret double %b
 }
 
+define double @test_v2f64_strict(<2 x double> %a) nounwind {
+; CHECK-LABEL: test_v2f64_strict:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r11, lr}
+; CHECK-NEXT:    push {r11, lr}
+; CHECK-NEXT:    bl __aeabi_dmul
+; CHECK-NEXT:    pop {r11, lr}
+; CHECK-NEXT:    mov pc, lr
+  %b = call double @llvm.vector.reduce.fmul.f64.v2f64(double 1.0, <2 x double> %a)
+  ret double %b
+}
+
 define fp128 @test_v2f128(<2 x fp128> %a) nounwind {
 ; CHECK-LABEL: test_v2f128:
 ; CHECK:       @ %bb.0:
@@ -93,3 +123,26 @@ define fp128 @test_v2f128(<2 x fp128> %a) nounwind {
   %b = call fast fp128 @llvm.vector.reduce.fmul.f128.v2f128(fp128 0xL00000000000000003fff00000000000000, <2 x fp128> %a)
   ret fp128 %b
 }
+
+define fp128 @test_v2f128_strict(<2 x fp128> %a) nounwind {
+; CHECK-LABEL: test_v2f128_strict:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    .save {r11, lr}
+; CHECK-NEXT:    push {r11, lr}
+; CHECK-NEXT:    .pad #16
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    ldr r12, [sp, #36]
+; CHECK-NEXT:    str r12, [sp, #12]
+; CHECK-NEXT:    ldr r12, [sp, #32]
+; CHECK-NEXT:    str r12, [sp, #8]
+; CHECK-NEXT:    ldr r12, [sp, #28]
+; CHECK-NEXT:    str r12, [sp, #4]
+; CHECK-NEXT:    ldr r12, [sp, #24]
+; CHECK-NEXT:    str r12, [sp]
+; CHECK-NEXT:    bl __multf3
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    pop {r11, lr}
+; CHECK-NEXT:    mov pc, lr
+  %b = call fp128 @llvm.vector.reduce.fmul.f128.v2f128(fp128 0xL00000000000000003fff00000000000000, <2 x fp128> %a)
+  ret fp128 %b
+}

diff  --git a/llvm/test/CodeGen/ARM/vecreduce-fmul-legalization-strict.ll b/llvm/test/CodeGen/ARM/vecreduce-fmul-legalization-strict.ll
index b3d23addad41..d2476ab3f528 100644
--- a/llvm/test/CodeGen/ARM/vecreduce-fmul-legalization-strict.ll
+++ b/llvm/test/CodeGen/ARM/vecreduce-fmul-legalization-strict.ll
@@ -16,86 +16,49 @@ define half @test_v1f16(<1 x half> %a) nounwind {
 ; CHECK-NEXT:    .save {r11, lr}
 ; CHECK-NEXT:    push {r11, lr}
 ; CHECK-NEXT:    bl __aeabi_f2h
-; CHECK-NEXT:    bl __aeabi_h2f
-; CHECK-NEXT:    vldr s0, .LCPI0_0
-; CHECK-NEXT:    vmov s2, r0
-; CHECK-NEXT:    vmul.f32 s0, s2, s0
-; CHECK-NEXT:    vmov r0, s0
-; CHECK-NEXT:    bl __aeabi_f2h
+; CHECK-NEXT:    mov r1, #255
+; CHECK-NEXT:    orr r1, r1, #65280
+; CHECK-NEXT:    and r0, r0, r1
 ; CHECK-NEXT:    pop {r11, lr}
 ; CHECK-NEXT:    mov pc, lr
-; CHECK-NEXT:    .p2align 2
-; CHECK-NEXT:  @ %bb.1:
-; CHECK-NEXT:  .LCPI0_0:
-; CHECK-NEXT:    .long 0x00000000 @ float 0
-  %b = call half @llvm.vector.reduce.fmul.f16.v1f16(half 0.0, <1 x half> %a)
+  %b = call half @llvm.vector.reduce.fmul.f16.v1f16(half 1.0, <1 x half> %a)
   ret half %b
 }
 
 define float @test_v1f32(<1 x float> %a) nounwind {
 ; CHECK-LABEL: test_v1f32:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vldr s0, .LCPI1_0
-; CHECK-NEXT:    vmov s2, r0
-; CHECK-NEXT:    vmul.f32 s0, s2, s0
-; CHECK-NEXT:    vmov r0, s0
 ; CHECK-NEXT:    mov pc, lr
-; CHECK-NEXT:    .p2align 2
-; CHECK-NEXT:  @ %bb.1:
-; CHECK-NEXT:  .LCPI1_0:
-; CHECK-NEXT:    .long 0x00000000 @ float 0
-  %b = call float @llvm.vector.reduce.fmul.f32.v1f32(float 0.0, <1 x float> %a)
+  %b = call float @llvm.vector.reduce.fmul.f32.v1f32(float 1.0, <1 x float> %a)
   ret float %b
 }
 
 define double @test_v1f64(<1 x double> %a) nounwind {
 ; CHECK-LABEL: test_v1f64:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov.i32 d16, #0x0
-; CHECK-NEXT:    vmov d17, r0, r1
-; CHECK-NEXT:    vmul.f64 d16, d17, d16
-; CHECK-NEXT:    vmov r0, r1, d16
 ; CHECK-NEXT:    mov pc, lr
-  %b = call double @llvm.vector.reduce.fmul.f64.v1f64(double 0.0, <1 x double> %a)
+  %b = call double @llvm.vector.reduce.fmul.f64.v1f64(double 1.0, <1 x double> %a)
   ret double %b
 }
 
 define fp128 @test_v1f128(<1 x fp128> %a) nounwind {
 ; CHECK-LABEL: test_v1f128:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    .save {r11, lr}
-; CHECK-NEXT:    push {r11, lr}
-; CHECK-NEXT:    .pad #16
-; CHECK-NEXT:    sub sp, sp, #16
-; CHECK-NEXT:    mov r12, #0
-; CHECK-NEXT:    str r12, [sp]
-; CHECK-NEXT:    str r12, [sp, #4]
-; CHECK-NEXT:    str r12, [sp, #8]
-; CHECK-NEXT:    str r12, [sp, #12]
-; CHECK-NEXT:    bl __multf3
-; CHECK-NEXT:    add sp, sp, #16
-; CHECK-NEXT:    pop {r11, lr}
 ; CHECK-NEXT:    mov pc, lr
-  %b = call fp128 @llvm.vector.reduce.fmul.f128.v1f128(fp128 zeroinitializer, <1 x fp128> %a)
+  %b = call fp128 @llvm.vector.reduce.fmul.f128.v1f128(fp128 0xL00000000000000003fff00000000000000, <1 x fp128> %a)
   ret fp128 %b
 }
 
 define float @test_v3f32(<3 x float> %a) nounwind {
 ; CHECK-LABEL: test_v3f32:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d3, r2, r3
-; CHECK-NEXT:    vldr s0, .LCPI4_0
-; CHECK-NEXT:    vmov d2, r0, r1
-; CHECK-NEXT:    vmul.f32 s0, s4, s0
-; CHECK-NEXT:    vmul.f32 s0, s0, s5
-; CHECK-NEXT:    vmul.f32 s0, s0, s6
+; CHECK-NEXT:    vmov d1, r2, r3
+; CHECK-NEXT:    vmov d0, r0, r1
+; CHECK-NEXT:    vmul.f32 s4, s0, s1
+; CHECK-NEXT:    vmul.f32 s0, s4, s2
 ; CHECK-NEXT:    vmov r0, s0
 ; CHECK-NEXT:    mov pc, lr
-; CHECK-NEXT:    .p2align 2
-; CHECK-NEXT:  @ %bb.1:
-; CHECK-NEXT:  .LCPI4_0:
-; CHECK-NEXT:    .long 0x00000000 @ float 0
-  %b = call float @llvm.vector.reduce.fmul.f32.v3f32(float 0.0, <3 x float> %a)
+  %b = call float @llvm.vector.reduce.fmul.f32.v3f32(float 1.0, <3 x float> %a)
   ret float %b
 }
 
@@ -106,12 +69,6 @@ define fp128 @test_v2f128(<2 x fp128> %a) nounwind {
 ; CHECK-NEXT:    push {r4, r5, r11, lr}
 ; CHECK-NEXT:    .pad #16
 ; CHECK-NEXT:    sub sp, sp, #16
-; CHECK-NEXT:    mov r12, #0
-; CHECK-NEXT:    str r12, [sp]
-; CHECK-NEXT:    str r12, [sp, #4]
-; CHECK-NEXT:    str r12, [sp, #8]
-; CHECK-NEXT:    str r12, [sp, #12]
-; CHECK-NEXT:    bl __multf3
 ; CHECK-NEXT:    ldr r12, [sp, #36]
 ; CHECK-NEXT:    ldr lr, [sp, #32]
 ; CHECK-NEXT:    ldr r4, [sp, #40]
@@ -124,21 +81,19 @@ define fp128 @test_v2f128(<2 x fp128> %a) nounwind {
 ; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    pop {r4, r5, r11, lr}
 ; CHECK-NEXT:    mov pc, lr
-  %b = call fp128 @llvm.vector.reduce.fmul.f128.v2f128(fp128 zeroinitializer, <2 x fp128> %a)
+  %b = call fp128 @llvm.vector.reduce.fmul.f128.v2f128(fp128 0xL00000000000000003fff00000000000000, <2 x fp128> %a)
   ret fp128 %b
 }
 
 define float @test_v16f32(<16 x float> %a) nounwind {
 ; CHECK-LABEL: test_v16f32:
 ; CHECK:       @ %bb.0:
-; CHECK-NEXT:    vmov d3, r2, r3
-; CHECK-NEXT:    vldr s0, .LCPI6_0
-; CHECK-NEXT:    vmov d2, r0, r1
+; CHECK-NEXT:    vmov d1, r2, r3
+; CHECK-NEXT:    vmov d0, r0, r1
 ; CHECK-NEXT:    mov r0, sp
-; CHECK-NEXT:    vmul.f32 s0, s4, s0
-; CHECK-NEXT:    vmul.f32 s0, s0, s5
-; CHECK-NEXT:    vmul.f32 s0, s0, s6
-; CHECK-NEXT:    vmul.f32 s0, s0, s7
+; CHECK-NEXT:    vmul.f32 s4, s0, s1
+; CHECK-NEXT:    vmul.f32 s4, s4, s2
+; CHECK-NEXT:    vmul.f32 s0, s4, s3
 ; CHECK-NEXT:    vld1.64 {d2, d3}, [r0]
 ; CHECK-NEXT:    add r0, sp, #16
 ; CHECK-NEXT:    vmul.f32 s0, s0, s4
@@ -158,10 +113,6 @@ define float @test_v16f32(<16 x float> %a) nounwind {
 ; CHECK-NEXT:    vmul.f32 s0, s0, s7
 ; CHECK-NEXT:    vmov r0, s0
 ; CHECK-NEXT:    mov pc, lr
-; CHECK-NEXT:    .p2align 2
-; CHECK-NEXT:  @ %bb.1:
-; CHECK-NEXT:  .LCPI6_0:
-; CHECK-NEXT:    .long 0x00000000 @ float 0
-  %b = call float @llvm.vector.reduce.fmul.f32.v16f32(float 0.0, <16 x float> %a)
+  %b = call float @llvm.vector.reduce.fmul.f32.v16f32(float 1.0, <16 x float> %a)
   ret float %b
 }


        


More information about the llvm-commits mailing list