[llvm] 5a12024 - [RISCV] Optimize lowering of floating-point -0.0

Fraser Cormack via llvm-commits llvm-commits at lists.llvm.org
Thu Jan 20 03:56:40 PST 2022


Author: Fraser Cormack
Date: 2022-01-20T11:46:28Z
New Revision: 5a12024b95e4bdac8719f18a0e58c692aa471534

URL: https://github.com/llvm/llvm-project/commit/5a12024b95e4bdac8719f18a0e58c692aa471534
DIFF: https://github.com/llvm/llvm-project/commit/5a12024b95e4bdac8719f18a0e58c692aa471534.diff

LOG: [RISCV] Optimize lowering of floating-point -0.0

This idea has come up in several reviews -- D115978 and D105902 -- so I
can't take any credit for the idea. Instead of using a constant pool to
lower -0.0, we can emit a sequence of two instructions:

    fmv.[hwd].x freg, zero
    fsgnjn.[hsd] freg, freg, freg

This is only done when the floating-point type is legal.

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D117687

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/lib/Target/RISCV/RISCVInstrInfoD.td
    llvm/lib/Target/RISCV/RISCVInstrInfoF.td
    llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td
    llvm/test/CodeGen/RISCV/fp-imm.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll
    llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll
    llvm/test/CodeGen/RISCV/zfh-imm.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index bbce828876770..304c05d9378f2 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -1305,8 +1305,6 @@ bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
     return false;
   if (VT == MVT::f64 && !Subtarget.hasStdExtD())
     return false;
-  if (Imm.isNegZero())
-    return false;
   return Imm.isZero();
 }
 

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoD.td b/llvm/lib/Target/RISCV/RISCVInstrInfoD.td
index e43ec89007455..2837b92da81f4 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoD.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoD.td
@@ -285,6 +285,8 @@ let Predicates = [HasStdExtD, IsRV32] in {
 
 /// Float constants
 def : Pat<(f64 (fpimm0)), (FCVT_D_W (i32 X0))>;
+def : Pat<(f64 (fpimmneg0)), (FSGNJN_D (FCVT_D_W (i32 X0)),
+                                       (FCVT_D_W (i32 X0)))>;
 
 // double->[u]int. Round-to-zero must be used.
 def : Pat<(i32 (any_fp_to_sint FPR64:$rs1)), (FCVT_W_D FPR64:$rs1, 0b001)>;
@@ -309,6 +311,8 @@ let Predicates = [HasStdExtD, IsRV64] in {
 
 /// Float constants
 def : Pat<(f64 (fpimm0)), (FMV_D_X (i64 X0))>;
+def : Pat<(f64 (fpimmneg0)), (FSGNJN_D (FMV_D_X (i64 X0)),
+                                       (FMV_D_X (i64 X0)))>;
 
 // Moves (no conversion)
 def : Pat<(bitconvert (i64 GPR:$rs1)), (FMV_D_X GPR:$rs1)>;

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoF.td b/llvm/lib/Target/RISCV/RISCVInstrInfoF.td
index ce70844f187c8..a8ac06ba8da3f 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoF.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoF.td
@@ -320,7 +320,8 @@ def PseudoQuietFLT_S : PseudoQuietFCMP<FPR32>;
 //===----------------------------------------------------------------------===//
 
 /// Floating point constants
-def fpimm0 : PatLeaf<(fpimm), [{ return N->isExactlyValue(+0.0); }]>;
+def fpimm0    : PatLeaf<(fpimm), [{ return N->isExactlyValue(+0.0); }]>;
+def fpimmneg0 : PatLeaf<(fpimm), [{ return N->isExactlyValue(-0.0); }]>;
 
 /// Generic pattern classes
 class PatSetCC<RegisterClass Ty, SDPatternOperator OpNode, CondCode Cond, RVInst Inst>
@@ -336,6 +337,7 @@ let Predicates = [HasStdExtF] in {
 
 /// Float constants
 def : Pat<(f32 (fpimm0)), (FMV_W_X X0)>;
+def : Pat<(f32 (fpimmneg0)), (FSGNJN_S (FMV_W_X X0), (FMV_W_X X0))>;
 
 /// Float conversion operations
 

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td
index e1c29dffa62dd..dfd0c74ee26c1 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td
@@ -201,6 +201,7 @@ let Predicates = [HasStdExtZfh] in {
 
 /// Float constants
 def : Pat<(f16 (fpimm0)), (FMV_H_X X0)>;
+def : Pat<(f16 (fpimmneg0)), (FSGNJN_H (FMV_H_X X0), (FMV_H_X X0))>;
 
 /// Float conversion operations
 

diff  --git a/llvm/test/CodeGen/RISCV/fp-imm.ll b/llvm/test/CodeGen/RISCV/fp-imm.ll
index 55c413989bfb0..e2d7a92f000d5 100644
--- a/llvm/test/CodeGen/RISCV/fp-imm.ll
+++ b/llvm/test/CodeGen/RISCV/fp-imm.ll
@@ -34,26 +34,26 @@ define float @f32_positive_zero(float *%pf) nounwind {
 define float @f32_negative_zero(float *%pf) nounwind {
 ; RV32F-LABEL: f32_negative_zero:
 ; RV32F:       # %bb.0:
-; RV32F-NEXT:    lui a0, %hi(.LCPI1_0)
-; RV32F-NEXT:    flw fa0, %lo(.LCPI1_0)(a0)
+; RV32F-NEXT:    fmv.w.x ft0, zero
+; RV32F-NEXT:    fneg.s fa0, ft0
 ; RV32F-NEXT:    ret
 ;
 ; RV32D-LABEL: f32_negative_zero:
 ; RV32D:       # %bb.0:
-; RV32D-NEXT:    lui a0, %hi(.LCPI1_0)
-; RV32D-NEXT:    flw fa0, %lo(.LCPI1_0)(a0)
+; RV32D-NEXT:    fmv.w.x ft0, zero
+; RV32D-NEXT:    fneg.s fa0, ft0
 ; RV32D-NEXT:    ret
 ;
 ; RV64F-LABEL: f32_negative_zero:
 ; RV64F:       # %bb.0:
-; RV64F-NEXT:    lui a0, %hi(.LCPI1_0)
-; RV64F-NEXT:    flw fa0, %lo(.LCPI1_0)(a0)
+; RV64F-NEXT:    fmv.w.x ft0, zero
+; RV64F-NEXT:    fneg.s fa0, ft0
 ; RV64F-NEXT:    ret
 ;
 ; RV64D-LABEL: f32_negative_zero:
 ; RV64D:       # %bb.0:
-; RV64D-NEXT:    lui a0, %hi(.LCPI1_0)
-; RV64D-NEXT:    flw fa0, %lo(.LCPI1_0)(a0)
+; RV64D-NEXT:    fmv.w.x ft0, zero
+; RV64D-NEXT:    fneg.s fa0, ft0
 ; RV64D-NEXT:    ret
   ret float -0.0
 }
@@ -91,8 +91,8 @@ define double @f64_negative_zero(double *%pd) nounwind {
 ;
 ; RV32D-LABEL: f64_negative_zero:
 ; RV32D:       # %bb.0:
-; RV32D-NEXT:    lui a0, %hi(.LCPI3_0)
-; RV32D-NEXT:    fld fa0, %lo(.LCPI3_0)(a0)
+; RV32D-NEXT:    fcvt.d.w ft0, zero
+; RV32D-NEXT:    fneg.d fa0, ft0
 ; RV32D-NEXT:    ret
 ;
 ; RV64F-LABEL: f64_negative_zero:
@@ -103,8 +103,8 @@ define double @f64_negative_zero(double *%pd) nounwind {
 ;
 ; RV64D-LABEL: f64_negative_zero:
 ; RV64D:       # %bb.0:
-; RV64D-NEXT:    lui a0, %hi(.LCPI3_0)
-; RV64D-NEXT:    fld fa0, %lo(.LCPI3_0)(a0)
+; RV64D-NEXT:    fmv.d.x ft0, zero
+; RV64D-NEXT:    fneg.d fa0, ft0
 ; RV64D-NEXT:    ret
   ret double -0.0
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll
index bb77c3b593476..e37227ef22261 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+experimental-v,+zfh,+f,+d -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
-; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+experimental-v,+zfh,+f,+d -riscv-v-vector-bits-min=128  -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+experimental-v,+zfh,+f,+d -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+experimental-v,+zfh,+f,+d -riscv-v-vector-bits-min=128  -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
 
 declare half @llvm.vector.reduce.fadd.v1f16(half, <1 x half>)
 
@@ -38,11 +38,9 @@ define half @vreduce_fadd_v2f16(<2 x half>* %x, half %s) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    lui a0, %hi(.LCPI2_0)
-; CHECK-NEXT:    addi a0, a0, %lo(.LCPI2_0)
-; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
-; CHECK-NEXT:    vlse16.v v9, (a0), zero
-; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
+; CHECK-NEXT:    fmv.h.x ft0, zero
+; CHECK-NEXT:    fneg.h ft0, ft0
+; CHECK-NEXT:    vfmv.s.f v9, ft0
 ; CHECK-NEXT:    vfredusum.vs v8, v8, v9
 ; CHECK-NEXT:    vfmv.f.s ft0, v8
 ; CHECK-NEXT:    fadd.h fa0, fa0, ft0
@@ -73,11 +71,9 @@ define half @vreduce_fadd_v4f16(<4 x half>* %x, half %s) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    lui a0, %hi(.LCPI4_0)
-; CHECK-NEXT:    addi a0, a0, %lo(.LCPI4_0)
-; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
-; CHECK-NEXT:    vlse16.v v9, (a0), zero
-; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
+; CHECK-NEXT:    fmv.h.x ft0, zero
+; CHECK-NEXT:    fneg.h ft0, ft0
+; CHECK-NEXT:    vfmv.s.f v9, ft0
 ; CHECK-NEXT:    vfredusum.vs v8, v8, v9
 ; CHECK-NEXT:    vfmv.f.s ft0, v8
 ; CHECK-NEXT:    fadd.h fa0, fa0, ft0
@@ -108,11 +104,9 @@ define half @vreduce_fadd_v8f16(<8 x half>* %x, half %s) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    lui a0, %hi(.LCPI6_0)
-; CHECK-NEXT:    addi a0, a0, %lo(.LCPI6_0)
-; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
-; CHECK-NEXT:    vlse16.v v9, (a0), zero
-; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
+; CHECK-NEXT:    fmv.h.x ft0, zero
+; CHECK-NEXT:    fneg.h ft0, ft0
+; CHECK-NEXT:    vfmv.s.f v9, ft0
 ; CHECK-NEXT:    vfredusum.vs v8, v8, v9
 ; CHECK-NEXT:    vfmv.f.s ft0, v8
 ; CHECK-NEXT:    fadd.h fa0, fa0, ft0
@@ -143,11 +137,9 @@ define half @vreduce_fadd_v16f16(<16 x half>* %x, half %s) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    lui a0, %hi(.LCPI8_0)
-; CHECK-NEXT:    addi a0, a0, %lo(.LCPI8_0)
-; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
-; CHECK-NEXT:    vlse16.v v10, (a0), zero
-; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, mu
+; CHECK-NEXT:    fmv.h.x ft0, zero
+; CHECK-NEXT:    fneg.h ft0, ft0
+; CHECK-NEXT:    vfmv.s.f v10, ft0
 ; CHECK-NEXT:    vfredusum.vs v8, v8, v10
 ; CHECK-NEXT:    vfmv.f.s ft0, v8
 ; CHECK-NEXT:    fadd.h fa0, fa0, ft0
@@ -179,10 +171,10 @@ define half @vreduce_fadd_v32f16(<32 x half>* %x, half %s) {
 ; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    lui a0, %hi(.LCPI10_0)
-; CHECK-NEXT:    addi a0, a0, %lo(.LCPI10_0)
+; CHECK-NEXT:    fmv.h.x ft0, zero
+; CHECK-NEXT:    fneg.h ft0, ft0
 ; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
-; CHECK-NEXT:    vlse16.v v12, (a0), zero
+; CHECK-NEXT:    vfmv.s.f v12, ft0
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vfredusum.vs v8, v8, v12
 ; CHECK-NEXT:    vfmv.f.s ft0, v8
@@ -218,10 +210,10 @@ define half @vreduce_fadd_v64f16(<64 x half>* %x, half %s) {
 ; CHECK-NEXT:    li a1, 64
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    lui a0, %hi(.LCPI12_0)
-; CHECK-NEXT:    addi a0, a0, %lo(.LCPI12_0)
+; CHECK-NEXT:    fmv.h.x ft0, zero
+; CHECK-NEXT:    fneg.h ft0, ft0
 ; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
-; CHECK-NEXT:    vlse16.v v16, (a0), zero
+; CHECK-NEXT:    vfmv.s.f v16, ft0
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; CHECK-NEXT:    vfredusum.vs v8, v8, v16
 ; CHECK-NEXT:    vfmv.f.s ft0, v8
@@ -260,10 +252,10 @@ define half @vreduce_fadd_v128f16(<128 x half>* %x, half %s) {
 ; CHECK-NEXT:    addi a0, a0, 128
 ; CHECK-NEXT:    vle16.v v16, (a0)
 ; CHECK-NEXT:    vfadd.vv v8, v8, v16
-; CHECK-NEXT:    lui a0, %hi(.LCPI14_0)
-; CHECK-NEXT:    addi a0, a0, %lo(.LCPI14_0)
+; CHECK-NEXT:    fmv.h.x ft0, zero
+; CHECK-NEXT:    fneg.h ft0, ft0
 ; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
-; CHECK-NEXT:    vlse16.v v16, (a0), zero
+; CHECK-NEXT:    vfmv.s.f v16, ft0
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; CHECK-NEXT:    vfredusum.vs v8, v8, v16
 ; CHECK-NEXT:    vfmv.f.s ft0, v8
@@ -334,11 +326,9 @@ define float @vreduce_fadd_v2f32(<2 x float>* %x, float %s) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    lui a0, %hi(.LCPI18_0)
-; CHECK-NEXT:    addi a0, a0, %lo(.LCPI18_0)
-; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, mu
-; CHECK-NEXT:    vlse32.v v9, (a0), zero
-; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
+; CHECK-NEXT:    fmv.w.x ft0, zero
+; CHECK-NEXT:    fneg.s ft0, ft0
+; CHECK-NEXT:    vfmv.s.f v9, ft0
 ; CHECK-NEXT:    vfredusum.vs v8, v8, v9
 ; CHECK-NEXT:    vfmv.f.s ft0, v8
 ; CHECK-NEXT:    fadd.s fa0, fa0, ft0
@@ -369,11 +359,9 @@ define float @vreduce_fadd_v4f32(<4 x float>* %x, float %s) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    lui a0, %hi(.LCPI20_0)
-; CHECK-NEXT:    addi a0, a0, %lo(.LCPI20_0)
-; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, mu
-; CHECK-NEXT:    vlse32.v v9, (a0), zero
-; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
+; CHECK-NEXT:    fmv.w.x ft0, zero
+; CHECK-NEXT:    fneg.s ft0, ft0
+; CHECK-NEXT:    vfmv.s.f v9, ft0
 ; CHECK-NEXT:    vfredusum.vs v8, v8, v9
 ; CHECK-NEXT:    vfmv.f.s ft0, v8
 ; CHECK-NEXT:    fadd.s fa0, fa0, ft0
@@ -404,11 +392,9 @@ define float @vreduce_fadd_v8f32(<8 x float>* %x, float %s) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    lui a0, %hi(.LCPI22_0)
-; CHECK-NEXT:    addi a0, a0, %lo(.LCPI22_0)
-; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, mu
-; CHECK-NEXT:    vlse32.v v10, (a0), zero
-; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; CHECK-NEXT:    fmv.w.x ft0, zero
+; CHECK-NEXT:    fneg.s ft0, ft0
+; CHECK-NEXT:    vfmv.s.f v10, ft0
 ; CHECK-NEXT:    vfredusum.vs v8, v8, v10
 ; CHECK-NEXT:    vfmv.f.s ft0, v8
 ; CHECK-NEXT:    fadd.s fa0, fa0, ft0
@@ -439,11 +425,9 @@ define float @vreduce_fadd_v16f32(<16 x float>* %x, float %s) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    lui a0, %hi(.LCPI24_0)
-; CHECK-NEXT:    addi a0, a0, %lo(.LCPI24_0)
-; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, mu
-; CHECK-NEXT:    vlse32.v v12, (a0), zero
-; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, mu
+; CHECK-NEXT:    fmv.w.x ft0, zero
+; CHECK-NEXT:    fneg.s ft0, ft0
+; CHECK-NEXT:    vfmv.s.f v12, ft0
 ; CHECK-NEXT:    vfredusum.vs v8, v8, v12
 ; CHECK-NEXT:    vfmv.f.s ft0, v8
 ; CHECK-NEXT:    fadd.s fa0, fa0, ft0
@@ -475,10 +459,10 @@ define float @vreduce_fadd_v32f32(<32 x float>* %x, float %s) {
 ; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    lui a0, %hi(.LCPI26_0)
-; CHECK-NEXT:    addi a0, a0, %lo(.LCPI26_0)
+; CHECK-NEXT:    fmv.w.x ft0, zero
+; CHECK-NEXT:    fneg.s ft0, ft0
 ; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, mu
-; CHECK-NEXT:    vlse32.v v16, (a0), zero
+; CHECK-NEXT:    vfmv.s.f v16, ft0
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; CHECK-NEXT:    vfredusum.vs v8, v8, v16
 ; CHECK-NEXT:    vfmv.f.s ft0, v8
@@ -517,10 +501,10 @@ define float @vreduce_fadd_v64f32(<64 x float>* %x, float %s) {
 ; CHECK-NEXT:    addi a0, a0, 128
 ; CHECK-NEXT:    vle32.v v16, (a0)
 ; CHECK-NEXT:    vfadd.vv v8, v8, v16
-; CHECK-NEXT:    lui a0, %hi(.LCPI28_0)
-; CHECK-NEXT:    addi a0, a0, %lo(.LCPI28_0)
+; CHECK-NEXT:    fmv.w.x ft0, zero
+; CHECK-NEXT:    fneg.s ft0, ft0
 ; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, mu
-; CHECK-NEXT:    vlse32.v v16, (a0), zero
+; CHECK-NEXT:    vfmv.s.f v16, ft0
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; CHECK-NEXT:    vfredusum.vs v8, v8, v16
 ; CHECK-NEXT:    vfmv.f.s ft0, v8
@@ -587,19 +571,29 @@ define double @vreduce_ord_fadd_v1f64(<1 x double>* %x, double %s) {
 declare double @llvm.vector.reduce.fadd.v2f64(double, <2 x double>)
 
 define double @vreduce_fadd_v2f64(<2 x double>* %x, double %s) {
-; CHECK-LABEL: vreduce_fadd_v2f64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
-; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    lui a0, %hi(.LCPI32_0)
-; CHECK-NEXT:    addi a0, a0, %lo(.LCPI32_0)
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vlse64.v v9, (a0), zero
-; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
-; CHECK-NEXT:    vfredusum.vs v8, v8, v9
-; CHECK-NEXT:    vfmv.f.s ft0, v8
-; CHECK-NEXT:    fadd.d fa0, fa0, ft0
-; CHECK-NEXT:    ret
+; RV32-LABEL: vreduce_fadd_v2f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
+; RV32-NEXT:    vle64.v v8, (a0)
+; RV32-NEXT:    fcvt.d.w ft0, zero
+; RV32-NEXT:    fneg.d ft0, ft0
+; RV32-NEXT:    vfmv.s.f v9, ft0
+; RV32-NEXT:    vfredusum.vs v8, v8, v9
+; RV32-NEXT:    vfmv.f.s ft0, v8
+; RV32-NEXT:    fadd.d fa0, fa0, ft0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vreduce_fadd_v2f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
+; RV64-NEXT:    vle64.v v8, (a0)
+; RV64-NEXT:    fmv.d.x ft0, zero
+; RV64-NEXT:    fneg.d ft0, ft0
+; RV64-NEXT:    vfmv.s.f v9, ft0
+; RV64-NEXT:    vfredusum.vs v8, v8, v9
+; RV64-NEXT:    vfmv.f.s ft0, v8
+; RV64-NEXT:    fadd.d fa0, fa0, ft0
+; RV64-NEXT:    ret
   %v = load <2 x double>, <2 x double>* %x
   %red = call reassoc double @llvm.vector.reduce.fadd.v2f64(double %s, <2 x double> %v)
   ret double %red
@@ -622,19 +616,29 @@ define double @vreduce_ord_fadd_v2f64(<2 x double>* %x, double %s) {
 declare double @llvm.vector.reduce.fadd.v4f64(double, <4 x double>)
 
 define double @vreduce_fadd_v4f64(<4 x double>* %x, double %s) {
-; CHECK-LABEL: vreduce_fadd_v4f64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
-; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    lui a0, %hi(.LCPI34_0)
-; CHECK-NEXT:    addi a0, a0, %lo(.LCPI34_0)
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vlse64.v v10, (a0), zero
-; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
-; CHECK-NEXT:    vfredusum.vs v8, v8, v10
-; CHECK-NEXT:    vfmv.f.s ft0, v8
-; CHECK-NEXT:    fadd.d fa0, fa0, ft0
-; CHECK-NEXT:    ret
+; RV32-LABEL: vreduce_fadd_v4f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
+; RV32-NEXT:    vle64.v v8, (a0)
+; RV32-NEXT:    fcvt.d.w ft0, zero
+; RV32-NEXT:    fneg.d ft0, ft0
+; RV32-NEXT:    vfmv.s.f v10, ft0
+; RV32-NEXT:    vfredusum.vs v8, v8, v10
+; RV32-NEXT:    vfmv.f.s ft0, v8
+; RV32-NEXT:    fadd.d fa0, fa0, ft0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vreduce_fadd_v4f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
+; RV64-NEXT:    vle64.v v8, (a0)
+; RV64-NEXT:    fmv.d.x ft0, zero
+; RV64-NEXT:    fneg.d ft0, ft0
+; RV64-NEXT:    vfmv.s.f v10, ft0
+; RV64-NEXT:    vfredusum.vs v8, v8, v10
+; RV64-NEXT:    vfmv.f.s ft0, v8
+; RV64-NEXT:    fadd.d fa0, fa0, ft0
+; RV64-NEXT:    ret
   %v = load <4 x double>, <4 x double>* %x
   %red = call reassoc double @llvm.vector.reduce.fadd.v4f64(double %s, <4 x double> %v)
   ret double %red
@@ -657,19 +661,29 @@ define double @vreduce_ord_fadd_v4f64(<4 x double>* %x, double %s) {
 declare double @llvm.vector.reduce.fadd.v8f64(double, <8 x double>)
 
 define double @vreduce_fadd_v8f64(<8 x double>* %x, double %s) {
-; CHECK-LABEL: vreduce_fadd_v8f64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
-; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    lui a0, %hi(.LCPI36_0)
-; CHECK-NEXT:    addi a0, a0, %lo(.LCPI36_0)
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
-; CHECK-NEXT:    vfredusum.vs v8, v8, v12
-; CHECK-NEXT:    vfmv.f.s ft0, v8
-; CHECK-NEXT:    fadd.d fa0, fa0, ft0
-; CHECK-NEXT:    ret
+; RV32-LABEL: vreduce_fadd_v8f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV32-NEXT:    vle64.v v8, (a0)
+; RV32-NEXT:    fcvt.d.w ft0, zero
+; RV32-NEXT:    fneg.d ft0, ft0
+; RV32-NEXT:    vfmv.s.f v12, ft0
+; RV32-NEXT:    vfredusum.vs v8, v8, v12
+; RV32-NEXT:    vfmv.f.s ft0, v8
+; RV32-NEXT:    fadd.d fa0, fa0, ft0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vreduce_fadd_v8f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT:    vle64.v v8, (a0)
+; RV64-NEXT:    fmv.d.x ft0, zero
+; RV64-NEXT:    fneg.d ft0, ft0
+; RV64-NEXT:    vfmv.s.f v12, ft0
+; RV64-NEXT:    vfredusum.vs v8, v8, v12
+; RV64-NEXT:    vfmv.f.s ft0, v8
+; RV64-NEXT:    fadd.d fa0, fa0, ft0
+; RV64-NEXT:    ret
   %v = load <8 x double>, <8 x double>* %x
   %red = call reassoc double @llvm.vector.reduce.fadd.v8f64(double %s, <8 x double> %v)
   ret double %red
@@ -692,19 +706,29 @@ define double @vreduce_ord_fadd_v8f64(<8 x double>* %x, double %s) {
 declare double @llvm.vector.reduce.fadd.v16f64(double, <16 x double>)
 
 define double @vreduce_fadd_v16f64(<16 x double>* %x, double %s) {
-; CHECK-LABEL: vreduce_fadd_v16f64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
-; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    lui a0, %hi(.LCPI38_0)
-; CHECK-NEXT:    addi a0, a0, %lo(.LCPI38_0)
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vlse64.v v16, (a0), zero
-; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
-; CHECK-NEXT:    vfredusum.vs v8, v8, v16
-; CHECK-NEXT:    vfmv.f.s ft0, v8
-; CHECK-NEXT:    fadd.d fa0, fa0, ft0
-; CHECK-NEXT:    ret
+; RV32-LABEL: vreduce_fadd_v16f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
+; RV32-NEXT:    vle64.v v8, (a0)
+; RV32-NEXT:    fcvt.d.w ft0, zero
+; RV32-NEXT:    fneg.d ft0, ft0
+; RV32-NEXT:    vfmv.s.f v16, ft0
+; RV32-NEXT:    vfredusum.vs v8, v8, v16
+; RV32-NEXT:    vfmv.f.s ft0, v8
+; RV32-NEXT:    fadd.d fa0, fa0, ft0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vreduce_fadd_v16f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
+; RV64-NEXT:    vle64.v v8, (a0)
+; RV64-NEXT:    fmv.d.x ft0, zero
+; RV64-NEXT:    fneg.d ft0, ft0
+; RV64-NEXT:    vfmv.s.f v16, ft0
+; RV64-NEXT:    vfredusum.vs v8, v8, v16
+; RV64-NEXT:    vfmv.f.s ft0, v8
+; RV64-NEXT:    fadd.d fa0, fa0, ft0
+; RV64-NEXT:    ret
   %v = load <16 x double>, <16 x double>* %x
   %red = call reassoc double @llvm.vector.reduce.fadd.v16f64(double %s, <16 x double> %v)
   ret double %red
@@ -727,22 +751,35 @@ define double @vreduce_ord_fadd_v16f64(<16 x double>* %x, double %s) {
 declare double @llvm.vector.reduce.fadd.v32f64(double, <32 x double>)
 
 define double @vreduce_fadd_v32f64(<32 x double>* %x, double %s) {
-; CHECK-LABEL: vreduce_fadd_v32f64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
-; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    addi a0, a0, 128
-; CHECK-NEXT:    vle64.v v16, (a0)
-; CHECK-NEXT:    vfadd.vv v8, v8, v16
-; CHECK-NEXT:    lui a0, %hi(.LCPI40_0)
-; CHECK-NEXT:    addi a0, a0, %lo(.LCPI40_0)
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vlse64.v v16, (a0), zero
-; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
-; CHECK-NEXT:    vfredusum.vs v8, v8, v16
-; CHECK-NEXT:    vfmv.f.s ft0, v8
-; CHECK-NEXT:    fadd.d fa0, fa0, ft0
-; CHECK-NEXT:    ret
+; RV32-LABEL: vreduce_fadd_v32f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
+; RV32-NEXT:    vle64.v v8, (a0)
+; RV32-NEXT:    addi a0, a0, 128
+; RV32-NEXT:    vle64.v v16, (a0)
+; RV32-NEXT:    fcvt.d.w ft0, zero
+; RV32-NEXT:    fneg.d ft0, ft0
+; RV32-NEXT:    vfmv.s.f v24, ft0
+; RV32-NEXT:    vfadd.vv v8, v8, v16
+; RV32-NEXT:    vfredusum.vs v8, v8, v24
+; RV32-NEXT:    vfmv.f.s ft0, v8
+; RV32-NEXT:    fadd.d fa0, fa0, ft0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vreduce_fadd_v32f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
+; RV64-NEXT:    vle64.v v8, (a0)
+; RV64-NEXT:    addi a0, a0, 128
+; RV64-NEXT:    vle64.v v16, (a0)
+; RV64-NEXT:    fmv.d.x ft0, zero
+; RV64-NEXT:    fneg.d ft0, ft0
+; RV64-NEXT:    vfmv.s.f v24, ft0
+; RV64-NEXT:    vfadd.vv v8, v8, v16
+; RV64-NEXT:    vfredusum.vs v8, v8, v24
+; RV64-NEXT:    vfmv.f.s ft0, v8
+; RV64-NEXT:    fadd.d fa0, fa0, ft0
+; RV64-NEXT:    ret
   %v = load <32 x double>, <32 x double>* %x
   %red = call reassoc double @llvm.vector.reduce.fadd.v32f64(double %s, <32 x double> %v)
   ret double %red

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll
index c5f37613ef915..ee6eeb9275760 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll
@@ -1,18 +1,18 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-v -target-abi=ilp32d \
-; RUN:     -verify-machineinstrs < %s | FileCheck %s
+; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-v -target-abi=lp64d \
-; RUN:     -verify-machineinstrs < %s | FileCheck %s
+; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
 
 declare half @llvm.vector.reduce.fadd.nxv1f16(half, <vscale x 1 x half>)
 
 define half @vreduce_fadd_nxv1f16(<vscale x 1 x half> %v, half %s) {
 ; CHECK-LABEL: vreduce_fadd_nxv1f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, %hi(.LCPI0_0)
-; CHECK-NEXT:    addi a0, a0, %lo(.LCPI0_0)
+; CHECK-NEXT:    fmv.h.x ft0, zero
+; CHECK-NEXT:    fneg.h ft0, ft0
 ; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
-; CHECK-NEXT:    vlse16.v v9, (a0), zero
+; CHECK-NEXT:    vfmv.s.f v9, ft0
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vfredusum.vs v8, v8, v9
 ; CHECK-NEXT:    vfmv.f.s ft0, v8
@@ -40,10 +40,10 @@ declare half @llvm.vector.reduce.fadd.nxv2f16(half, <vscale x 2 x half>)
 define half @vreduce_fadd_nxv2f16(<vscale x 2 x half> %v, half %s) {
 ; CHECK-LABEL: vreduce_fadd_nxv2f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, %hi(.LCPI2_0)
-; CHECK-NEXT:    addi a0, a0, %lo(.LCPI2_0)
+; CHECK-NEXT:    fmv.h.x ft0, zero
+; CHECK-NEXT:    fneg.h ft0, ft0
 ; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
-; CHECK-NEXT:    vlse16.v v9, (a0), zero
+; CHECK-NEXT:    vfmv.s.f v9, ft0
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vfredusum.vs v8, v8, v9
 ; CHECK-NEXT:    vfmv.f.s ft0, v8
@@ -71,10 +71,10 @@ declare half @llvm.vector.reduce.fadd.nxv4f16(half, <vscale x 4 x half>)
 define half @vreduce_fadd_nxv4f16(<vscale x 4 x half> %v, half %s) {
 ; CHECK-LABEL: vreduce_fadd_nxv4f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, %hi(.LCPI4_0)
-; CHECK-NEXT:    addi a0, a0, %lo(.LCPI4_0)
+; CHECK-NEXT:    fmv.h.x ft0, zero
+; CHECK-NEXT:    fneg.h ft0, ft0
 ; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
-; CHECK-NEXT:    vlse16.v v9, (a0), zero
+; CHECK-NEXT:    vfmv.s.f v9, ft0
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vfredusum.vs v8, v8, v9
 ; CHECK-NEXT:    vfmv.f.s ft0, v8
@@ -102,10 +102,10 @@ declare float @llvm.vector.reduce.fadd.nxv1f32(float, <vscale x 1 x float>)
 define float @vreduce_fadd_nxv1f32(<vscale x 1 x float> %v, float %s) {
 ; CHECK-LABEL: vreduce_fadd_nxv1f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, %hi(.LCPI6_0)
-; CHECK-NEXT:    addi a0, a0, %lo(.LCPI6_0)
+; CHECK-NEXT:    fmv.w.x ft0, zero
+; CHECK-NEXT:    fneg.s ft0, ft0
 ; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, mu
-; CHECK-NEXT:    vlse32.v v9, (a0), zero
+; CHECK-NEXT:    vfmv.s.f v9, ft0
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfredusum.vs v8, v8, v9
 ; CHECK-NEXT:    vfmv.f.s ft0, v8
@@ -133,10 +133,10 @@ declare float @llvm.vector.reduce.fadd.nxv2f32(float, <vscale x 2 x float>)
 define float @vreduce_fadd_nxv2f32(<vscale x 2 x float> %v, float %s) {
 ; CHECK-LABEL: vreduce_fadd_nxv2f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, %hi(.LCPI8_0)
-; CHECK-NEXT:    addi a0, a0, %lo(.LCPI8_0)
+; CHECK-NEXT:    fmv.w.x ft0, zero
+; CHECK-NEXT:    fneg.s ft0, ft0
 ; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, mu
-; CHECK-NEXT:    vlse32.v v9, (a0), zero
+; CHECK-NEXT:    vfmv.s.f v9, ft0
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfredusum.vs v8, v8, v9
 ; CHECK-NEXT:    vfmv.f.s ft0, v8
@@ -164,10 +164,10 @@ declare float @llvm.vector.reduce.fadd.nxv4f32(float, <vscale x 4 x float>)
 define float @vreduce_fadd_nxv4f32(<vscale x 4 x float> %v, float %s) {
 ; CHECK-LABEL: vreduce_fadd_nxv4f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, %hi(.LCPI10_0)
-; CHECK-NEXT:    addi a0, a0, %lo(.LCPI10_0)
+; CHECK-NEXT:    fmv.w.x ft0, zero
+; CHECK-NEXT:    fneg.s ft0, ft0
 ; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, mu
-; CHECK-NEXT:    vlse32.v v10, (a0), zero
+; CHECK-NEXT:    vfmv.s.f v10, ft0
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfredusum.vs v8, v8, v10
 ; CHECK-NEXT:    vfmv.f.s ft0, v8
@@ -193,17 +193,29 @@ define float @vreduce_ord_fadd_nxv4f32(<vscale x 4 x float> %v, float %s) {
 declare double @llvm.vector.reduce.fadd.nxv1f64(double, <vscale x 1 x double>)
 
 define double @vreduce_fadd_nxv1f64(<vscale x 1 x double> %v, double %s) {
-; CHECK-LABEL: vreduce_fadd_nxv1f64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, %hi(.LCPI12_0)
-; CHECK-NEXT:    addi a0, a0, %lo(.LCPI12_0)
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vlse64.v v9, (a0), zero
-; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
-; CHECK-NEXT:    vfredusum.vs v8, v8, v9
-; CHECK-NEXT:    vfmv.f.s ft0, v8
-; CHECK-NEXT:    fadd.d fa0, fa0, ft0
-; CHECK-NEXT:    ret
+; RV32-LABEL: vreduce_fadd_nxv1f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    fcvt.d.w ft0, zero
+; RV32-NEXT:    fneg.d ft0, ft0
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vfmv.s.f v9, ft0
+; RV32-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
+; RV32-NEXT:    vfredusum.vs v8, v8, v9
+; RV32-NEXT:    vfmv.f.s ft0, v8
+; RV32-NEXT:    fadd.d fa0, fa0, ft0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vreduce_fadd_nxv1f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    fmv.d.x ft0, zero
+; RV64-NEXT:    fneg.d ft0, ft0
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV64-NEXT:    vfmv.s.f v9, ft0
+; RV64-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
+; RV64-NEXT:    vfredusum.vs v8, v8, v9
+; RV64-NEXT:    vfmv.f.s ft0, v8
+; RV64-NEXT:    fadd.d fa0, fa0, ft0
+; RV64-NEXT:    ret
   %red = call reassoc double @llvm.vector.reduce.fadd.nxv1f64(double %s, <vscale x 1 x double> %v)
   ret double %red
 }
@@ -224,17 +236,29 @@ define double @vreduce_ord_fadd_nxv1f64(<vscale x 1 x double> %v, double %s) {
 declare double @llvm.vector.reduce.fadd.nxv2f64(double, <vscale x 2 x double>)
 
 define double @vreduce_fadd_nxv2f64(<vscale x 2 x double> %v, double %s) {
-; CHECK-LABEL: vreduce_fadd_nxv2f64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, %hi(.LCPI14_0)
-; CHECK-NEXT:    addi a0, a0, %lo(.LCPI14_0)
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vlse64.v v10, (a0), zero
-; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
-; CHECK-NEXT:    vfredusum.vs v8, v8, v10
-; CHECK-NEXT:    vfmv.f.s ft0, v8
-; CHECK-NEXT:    fadd.d fa0, fa0, ft0
-; CHECK-NEXT:    ret
+; RV32-LABEL: vreduce_fadd_nxv2f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    fcvt.d.w ft0, zero
+; RV32-NEXT:    fneg.d ft0, ft0
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vfmv.s.f v10, ft0
+; RV32-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
+; RV32-NEXT:    vfredusum.vs v8, v8, v10
+; RV32-NEXT:    vfmv.f.s ft0, v8
+; RV32-NEXT:    fadd.d fa0, fa0, ft0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vreduce_fadd_nxv2f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    fmv.d.x ft0, zero
+; RV64-NEXT:    fneg.d ft0, ft0
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV64-NEXT:    vfmv.s.f v10, ft0
+; RV64-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
+; RV64-NEXT:    vfredusum.vs v8, v8, v10
+; RV64-NEXT:    vfmv.f.s ft0, v8
+; RV64-NEXT:    fadd.d fa0, fa0, ft0
+; RV64-NEXT:    ret
   %red = call reassoc double @llvm.vector.reduce.fadd.nxv2f64(double %s, <vscale x 2 x double> %v)
   ret double %red
 }
@@ -255,17 +279,29 @@ define double @vreduce_ord_fadd_nxv2f64(<vscale x 2 x double> %v, double %s) {
 declare double @llvm.vector.reduce.fadd.nxv4f64(double, <vscale x 4 x double>)
 
 define double @vreduce_fadd_nxv4f64(<vscale x 4 x double> %v, double %s) {
-; CHECK-LABEL: vreduce_fadd_nxv4f64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, %hi(.LCPI16_0)
-; CHECK-NEXT:    addi a0, a0, %lo(.LCPI16_0)
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
-; CHECK-NEXT:    vfredusum.vs v8, v8, v12
-; CHECK-NEXT:    vfmv.f.s ft0, v8
-; CHECK-NEXT:    fadd.d fa0, fa0, ft0
-; CHECK-NEXT:    ret
+; RV32-LABEL: vreduce_fadd_nxv4f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    fcvt.d.w ft0, zero
+; RV32-NEXT:    fneg.d ft0, ft0
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vfmv.s.f v12, ft0
+; RV32-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
+; RV32-NEXT:    vfredusum.vs v8, v8, v12
+; RV32-NEXT:    vfmv.f.s ft0, v8
+; RV32-NEXT:    fadd.d fa0, fa0, ft0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vreduce_fadd_nxv4f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    fmv.d.x ft0, zero
+; RV64-NEXT:    fneg.d ft0, ft0
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV64-NEXT:    vfmv.s.f v12, ft0
+; RV64-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
+; RV64-NEXT:    vfredusum.vs v8, v8, v12
+; RV64-NEXT:    vfmv.f.s ft0, v8
+; RV64-NEXT:    fadd.d fa0, fa0, ft0
+; RV64-NEXT:    ret
   %red = call reassoc double @llvm.vector.reduce.fadd.nxv4f64(double %s, <vscale x 4 x double> %v)
   ret double %red
 }

diff  --git a/llvm/test/CodeGen/RISCV/zfh-imm.ll b/llvm/test/CodeGen/RISCV/zfh-imm.ll
index daaa04ea97917..2247acf4344ba 100644
--- a/llvm/test/CodeGen/RISCV/zfh-imm.ll
+++ b/llvm/test/CodeGen/RISCV/zfh-imm.ll
@@ -34,26 +34,26 @@ define half @f16_positive_zero(half *%pf) nounwind {
 define half @f16_negative_zero(half *%pf) nounwind {
 ; RV32IZFH-LABEL: f16_negative_zero:
 ; RV32IZFH:       # %bb.0:
-; RV32IZFH-NEXT:    lui a0, %hi(.LCPI1_0)
-; RV32IZFH-NEXT:    flh fa0, %lo(.LCPI1_0)(a0)
+; RV32IZFH-NEXT:    fmv.h.x ft0, zero
+; RV32IZFH-NEXT:    fneg.h fa0, ft0
 ; RV32IZFH-NEXT:    ret
 ;
 ; RV32IDZFH-LABEL: f16_negative_zero:
 ; RV32IDZFH:       # %bb.0:
-; RV32IDZFH-NEXT:    lui a0, %hi(.LCPI1_0)
-; RV32IDZFH-NEXT:    flh fa0, %lo(.LCPI1_0)(a0)
+; RV32IDZFH-NEXT:    fmv.h.x ft0, zero
+; RV32IDZFH-NEXT:    fneg.h fa0, ft0
 ; RV32IDZFH-NEXT:    ret
 ;
 ; RV64IZFH-LABEL: f16_negative_zero:
 ; RV64IZFH:       # %bb.0:
-; RV64IZFH-NEXT:    lui a0, %hi(.LCPI1_0)
-; RV64IZFH-NEXT:    flh fa0, %lo(.LCPI1_0)(a0)
+; RV64IZFH-NEXT:    fmv.h.x ft0, zero
+; RV64IZFH-NEXT:    fneg.h fa0, ft0
 ; RV64IZFH-NEXT:    ret
 ;
 ; RV64IDZFH-LABEL: f16_negative_zero:
 ; RV64IDZFH:       # %bb.0:
-; RV64IDZFH-NEXT:    lui a0, %hi(.LCPI1_0)
-; RV64IDZFH-NEXT:    flh fa0, %lo(.LCPI1_0)(a0)
+; RV64IDZFH-NEXT:    fmv.h.x ft0, zero
+; RV64IDZFH-NEXT:    fneg.h fa0, ft0
 ; RV64IDZFH-NEXT:    ret
   ret half -0.0
 }


        


More information about the llvm-commits mailing list