[llvm] 3c24aee - [RISCV] Select +0.0 immediate using fmv.{w, d}.x / fcvt.d.w

Roger Ferrer Ibanez via llvm-commits llvm-commits at lists.llvm.org
Fri Mar 20 02:45:18 PDT 2020


Author: Roger Ferrer Ibanez
Date: 2020-03-20T09:42:24Z
New Revision: 3c24aee7ee8b00ff3825684f3e5436d8f71e4046

URL: https://github.com/llvm/llvm-project/commit/3c24aee7ee8b00ff3825684f3e5436d8f71e4046
DIFF: https://github.com/llvm/llvm-project/commit/3c24aee7ee8b00ff3825684f3e5436d8f71e4046.diff

LOG: [RISCV] Select +0.0 immediate using fmv.{w,d}.x / fcvt.d.w

Floating point positive zero can be selected using fmv.w.x / fmv.d.x /
fcvt.d.w and the zero source register.

Differential Revision: https://reviews.llvm.org/D75729

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/lib/Target/RISCV/RISCVISelLowering.h
    llvm/lib/Target/RISCV/RISCVInstrInfoD.td
    llvm/lib/Target/RISCV/RISCVInstrInfoF.td
    llvm/test/CodeGen/RISCV/double-arith.ll
    llvm/test/CodeGen/RISCV/float-arith.ll
    llvm/test/CodeGen/RISCV/float-br-fcmp.ll
    llvm/test/CodeGen/RISCV/fp-imm.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index be6d3f522242..4e3fde556068 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -336,6 +336,17 @@ bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const {
   return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
 }
 
+bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
+                                       bool ForCodeSize) const {
+  if (VT == MVT::f32 && !Subtarget.hasStdExtF())
+    return false;
+  if (VT == MVT::f64 && !Subtarget.hasStdExtD())
+    return false;
+  if (Imm.isNegZero())
+    return false;
+  return Imm.isZero();
+}
+
 bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
   return (VT == MVT::f32 && Subtarget.hasStdExtF()) ||
          (VT == MVT::f64 && Subtarget.hasStdExtD());

diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h
index 58bb0f3264b9..b56d6dce2757 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -74,6 +74,8 @@ class RISCVTargetLowering : public TargetLowering {
   bool isTruncateFree(EVT SrcVT, EVT DstVT) const override;
   bool isZExtFree(SDValue Val, EVT VT2) const override;
   bool isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const override;
+  bool isFPImmLegal(const APFloat &Imm, EVT VT,
+                    bool ForCodeSize) const override;
 
   bool hasBitPreservingFPLogic(EVT VT) const override;
 

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoD.td b/llvm/lib/Target/RISCV/RISCVInstrInfoD.td
index 8b3274ddc461..d32c5f37d630 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoD.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoD.td
@@ -339,6 +339,10 @@ def SplitF64Pseudo
 } // Predicates = [HasStdExtD]
 
 let Predicates = [HasStdExtD, IsRV32] in {
+
+/// Float constants
+def : Pat<(f64 (fpimm0)), (FCVT_D_W X0)>;
+
 // double->[u]int. Round-to-zero must be used.
 def : Pat<(fp_to_sint FPR64:$rs1), (FCVT_W_D FPR64:$rs1, 0b001)>;
 def : Pat<(fp_to_uint FPR64:$rs1), (FCVT_WU_D FPR64:$rs1, 0b001)>;
@@ -349,6 +353,10 @@ def : Pat<(uint_to_fp GPR:$rs1), (FCVT_D_WU GPR:$rs1)>;
 } // Predicates = [HasStdExtD, IsRV32]
 
 let Predicates = [HasStdExtD, IsRV64] in {
+
+/// Float constants
+def : Pat<(f64 (fpimm0)), (FMV_D_X X0)>;
+
 def : Pat<(bitconvert GPR:$rs1), (FMV_D_X GPR:$rs1)>;
 def : Pat<(bitconvert FPR64:$rs1), (FMV_X_D FPR64:$rs1)>;
 

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoF.td b/llvm/lib/Target/RISCV/RISCVInstrInfoF.td
index 757fc7de56a9..190007fe6f1f 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoF.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoF.td
@@ -286,6 +286,9 @@ def PseudoFSW  : PseudoStore<"fsw", FPR32>;
 // Pseudo-instructions and codegen patterns
 //===----------------------------------------------------------------------===//
 
+/// Floating point constants
+def fpimm0 : PatLeaf<(fpimm), [{ return N->isExactlyValue(+0.0); }]>;
+
 /// Generic pattern classes
 class PatFpr32Fpr32<SDPatternOperator OpNode, RVInstR Inst>
     : Pat<(OpNode FPR32:$rs1, FPR32:$rs2), (Inst $rs1, $rs2)>;
@@ -295,6 +298,9 @@ class PatFpr32Fpr32DynFrm<SDPatternOperator OpNode, RVInstRFrm Inst>
 
 let Predicates = [HasStdExtF] in {
 
+/// Float constants
+def : Pat<(f32 (fpimm0)), (FMV_W_X X0)>;
+
 /// Float conversion operations
 
 // Moves (no conversion)

diff  --git a/llvm/test/CodeGen/RISCV/double-arith.ll b/llvm/test/CodeGen/RISCV/double-arith.ll
index ad68dca154dc..604911ae49f2 100644
--- a/llvm/test/CodeGen/RISCV/double-arith.ll
+++ b/llvm/test/CodeGen/RISCV/double-arith.ll
@@ -460,9 +460,7 @@ define double @fmsub_d(double %a, double %b, double %c) nounwind {
 ; RV32IFD-NEXT:    sw a4, 8(sp)
 ; RV32IFD-NEXT:    sw a5, 12(sp)
 ; RV32IFD-NEXT:    fld ft2, 8(sp)
-; RV32IFD-NEXT:    lui a0, %hi(.LCPI15_0)
-; RV32IFD-NEXT:    addi a0, a0, %lo(.LCPI15_0)
-; RV32IFD-NEXT:    fld ft3, 0(a0)
+; RV32IFD-NEXT:    fcvt.d.w ft3, zero
 ; RV32IFD-NEXT:    fadd.d ft2, ft2, ft3
 ; RV32IFD-NEXT:    fmsub.d ft0, ft1, ft0, ft2
 ; RV32IFD-NEXT:    fsd ft0, 8(sp)
@@ -473,14 +471,12 @@ define double @fmsub_d(double %a, double %b, double %c) nounwind {
 ;
 ; RV64IFD-LABEL: fmsub_d:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    lui a3, %hi(.LCPI15_0)
-; RV64IFD-NEXT:    addi a3, a3, %lo(.LCPI15_0)
-; RV64IFD-NEXT:    fld ft0, 0(a3)
-; RV64IFD-NEXT:    fmv.d.x ft1, a1
-; RV64IFD-NEXT:    fmv.d.x ft2, a0
-; RV64IFD-NEXT:    fmv.d.x ft3, a2
-; RV64IFD-NEXT:    fadd.d ft0, ft3, ft0
-; RV64IFD-NEXT:    fmsub.d ft0, ft2, ft1, ft0
+; RV64IFD-NEXT:    fmv.d.x ft0, a1
+; RV64IFD-NEXT:    fmv.d.x ft1, a0
+; RV64IFD-NEXT:    fmv.d.x ft2, a2
+; RV64IFD-NEXT:    fmv.d.x ft3, zero
+; RV64IFD-NEXT:    fadd.d ft2, ft2, ft3
+; RV64IFD-NEXT:    fmsub.d ft0, ft1, ft0, ft2
 ; RV64IFD-NEXT:    fmv.x.d a0, ft0
 ; RV64IFD-NEXT:    ret
   %c_ = fadd double 0.0, %c ; avoid negation using xor
@@ -502,9 +498,7 @@ define double @fnmadd_d(double %a, double %b, double %c) nounwind {
 ; RV32IFD-NEXT:    sw a0, 8(sp)
 ; RV32IFD-NEXT:    sw a1, 12(sp)
 ; RV32IFD-NEXT:    fld ft2, 8(sp)
-; RV32IFD-NEXT:    lui a0, %hi(.LCPI16_0)
-; RV32IFD-NEXT:    addi a0, a0, %lo(.LCPI16_0)
-; RV32IFD-NEXT:    fld ft3, 0(a0)
+; RV32IFD-NEXT:    fcvt.d.w ft3, zero
 ; RV32IFD-NEXT:    fadd.d ft2, ft2, ft3
 ; RV32IFD-NEXT:    fadd.d ft1, ft1, ft3
 ; RV32IFD-NEXT:    fnmadd.d ft0, ft2, ft0, ft1
@@ -516,15 +510,13 @@ define double @fnmadd_d(double %a, double %b, double %c) nounwind {
 ;
 ; RV64IFD-LABEL: fnmadd_d:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    lui a3, %hi(.LCPI16_0)
-; RV64IFD-NEXT:    addi a3, a3, %lo(.LCPI16_0)
-; RV64IFD-NEXT:    fld ft0, 0(a3)
-; RV64IFD-NEXT:    fmv.d.x ft1, a1
-; RV64IFD-NEXT:    fmv.d.x ft2, a2
-; RV64IFD-NEXT:    fmv.d.x ft3, a0
-; RV64IFD-NEXT:    fadd.d ft3, ft3, ft0
-; RV64IFD-NEXT:    fadd.d ft0, ft2, ft0
-; RV64IFD-NEXT:    fnmadd.d ft0, ft3, ft1, ft0
+; RV64IFD-NEXT:    fmv.d.x ft0, a1
+; RV64IFD-NEXT:    fmv.d.x ft1, a2
+; RV64IFD-NEXT:    fmv.d.x ft2, a0
+; RV64IFD-NEXT:    fmv.d.x ft3, zero
+; RV64IFD-NEXT:    fadd.d ft2, ft2, ft3
+; RV64IFD-NEXT:    fadd.d ft1, ft1, ft3
+; RV64IFD-NEXT:    fnmadd.d ft0, ft2, ft0, ft1
 ; RV64IFD-NEXT:    fmv.x.d a0, ft0
 ; RV64IFD-NEXT:    ret
   %a_ = fadd double 0.0, %a
@@ -548,9 +540,7 @@ define double @fnmsub_d(double %a, double %b, double %c) nounwind {
 ; RV32IFD-NEXT:    sw a0, 8(sp)
 ; RV32IFD-NEXT:    sw a1, 12(sp)
 ; RV32IFD-NEXT:    fld ft2, 8(sp)
-; RV32IFD-NEXT:    lui a0, %hi(.LCPI17_0)
-; RV32IFD-NEXT:    addi a0, a0, %lo(.LCPI17_0)
-; RV32IFD-NEXT:    fld ft3, 0(a0)
+; RV32IFD-NEXT:    fcvt.d.w ft3, zero
 ; RV32IFD-NEXT:    fadd.d ft2, ft2, ft3
 ; RV32IFD-NEXT:    fnmsub.d ft0, ft2, ft1, ft0
 ; RV32IFD-NEXT:    fsd ft0, 8(sp)
@@ -561,14 +551,12 @@ define double @fnmsub_d(double %a, double %b, double %c) nounwind {
 ;
 ; RV64IFD-LABEL: fnmsub_d:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    lui a3, %hi(.LCPI17_0)
-; RV64IFD-NEXT:    addi a3, a3, %lo(.LCPI17_0)
-; RV64IFD-NEXT:    fld ft0, 0(a3)
-; RV64IFD-NEXT:    fmv.d.x ft1, a2
-; RV64IFD-NEXT:    fmv.d.x ft2, a1
-; RV64IFD-NEXT:    fmv.d.x ft3, a0
-; RV64IFD-NEXT:    fadd.d ft0, ft3, ft0
-; RV64IFD-NEXT:    fnmsub.d ft0, ft0, ft2, ft1
+; RV64IFD-NEXT:    fmv.d.x ft0, a2
+; RV64IFD-NEXT:    fmv.d.x ft1, a1
+; RV64IFD-NEXT:    fmv.d.x ft2, a0
+; RV64IFD-NEXT:    fmv.d.x ft3, zero
+; RV64IFD-NEXT:    fadd.d ft2, ft2, ft3
+; RV64IFD-NEXT:    fnmsub.d ft0, ft2, ft1, ft0
 ; RV64IFD-NEXT:    fmv.x.d a0, ft0
 ; RV64IFD-NEXT:    ret
   %a_ = fadd double 0.0, %a

diff  --git a/llvm/test/CodeGen/RISCV/float-arith.ll b/llvm/test/CodeGen/RISCV/float-arith.ll
index 5244a69a6fad..f22f85d5d790 100644
--- a/llvm/test/CodeGen/RISCV/float-arith.ll
+++ b/llvm/test/CodeGen/RISCV/float-arith.ll
@@ -339,27 +339,23 @@ define float @fmadd_s(float %a, float %b, float %c) nounwind {
 define float @fmsub_s(float %a, float %b, float %c) nounwind {
 ; RV32IF-LABEL: fmsub_s:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    lui a3, %hi(.LCPI15_0)
-; RV32IF-NEXT:    addi a3, a3, %lo(.LCPI15_0)
-; RV32IF-NEXT:    flw ft0, 0(a3)
-; RV32IF-NEXT:    fmv.w.x ft1, a1
-; RV32IF-NEXT:    fmv.w.x ft2, a0
-; RV32IF-NEXT:    fmv.w.x ft3, a2
-; RV32IF-NEXT:    fadd.s ft0, ft3, ft0
-; RV32IF-NEXT:    fmsub.s ft0, ft2, ft1, ft0
+; RV32IF-NEXT:    fmv.w.x ft0, a1
+; RV32IF-NEXT:    fmv.w.x ft1, a0
+; RV32IF-NEXT:    fmv.w.x ft2, a2
+; RV32IF-NEXT:    fmv.w.x ft3, zero
+; RV32IF-NEXT:    fadd.s ft2, ft2, ft3
+; RV32IF-NEXT:    fmsub.s ft0, ft1, ft0, ft2
 ; RV32IF-NEXT:    fmv.x.w a0, ft0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fmsub_s:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    lui a3, %hi(.LCPI15_0)
-; RV64IF-NEXT:    addi a3, a3, %lo(.LCPI15_0)
-; RV64IF-NEXT:    flw ft0, 0(a3)
-; RV64IF-NEXT:    fmv.w.x ft1, a1
-; RV64IF-NEXT:    fmv.w.x ft2, a0
-; RV64IF-NEXT:    fmv.w.x ft3, a2
-; RV64IF-NEXT:    fadd.s ft0, ft3, ft0
-; RV64IF-NEXT:    fmsub.s ft0, ft2, ft1, ft0
+; RV64IF-NEXT:    fmv.w.x ft0, a1
+; RV64IF-NEXT:    fmv.w.x ft1, a0
+; RV64IF-NEXT:    fmv.w.x ft2, a2
+; RV64IF-NEXT:    fmv.w.x ft3, zero
+; RV64IF-NEXT:    fadd.s ft2, ft2, ft3
+; RV64IF-NEXT:    fmsub.s ft0, ft1, ft0, ft2
 ; RV64IF-NEXT:    fmv.x.w a0, ft0
 ; RV64IF-NEXT:    ret
   %c_ = fadd float 0.0, %c ; avoid negation using xor
@@ -371,29 +367,25 @@ define float @fmsub_s(float %a, float %b, float %c) nounwind {
 define float @fnmadd_s(float %a, float %b, float %c) nounwind {
 ; RV32IF-LABEL: fnmadd_s:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    lui a3, %hi(.LCPI16_0)
-; RV32IF-NEXT:    addi a3, a3, %lo(.LCPI16_0)
-; RV32IF-NEXT:    flw ft0, 0(a3)
-; RV32IF-NEXT:    fmv.w.x ft1, a1
-; RV32IF-NEXT:    fmv.w.x ft2, a2
-; RV32IF-NEXT:    fmv.w.x ft3, a0
-; RV32IF-NEXT:    fadd.s ft3, ft3, ft0
-; RV32IF-NEXT:    fadd.s ft0, ft2, ft0
-; RV32IF-NEXT:    fnmadd.s ft0, ft3, ft1, ft0
+; RV32IF-NEXT:    fmv.w.x ft0, a1
+; RV32IF-NEXT:    fmv.w.x ft1, a2
+; RV32IF-NEXT:    fmv.w.x ft2, a0
+; RV32IF-NEXT:    fmv.w.x ft3, zero
+; RV32IF-NEXT:    fadd.s ft2, ft2, ft3
+; RV32IF-NEXT:    fadd.s ft1, ft1, ft3
+; RV32IF-NEXT:    fnmadd.s ft0, ft2, ft0, ft1
 ; RV32IF-NEXT:    fmv.x.w a0, ft0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fnmadd_s:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    lui a3, %hi(.LCPI16_0)
-; RV64IF-NEXT:    addi a3, a3, %lo(.LCPI16_0)
-; RV64IF-NEXT:    flw ft0, 0(a3)
-; RV64IF-NEXT:    fmv.w.x ft1, a1
-; RV64IF-NEXT:    fmv.w.x ft2, a2
-; RV64IF-NEXT:    fmv.w.x ft3, a0
-; RV64IF-NEXT:    fadd.s ft3, ft3, ft0
-; RV64IF-NEXT:    fadd.s ft0, ft2, ft0
-; RV64IF-NEXT:    fnmadd.s ft0, ft3, ft1, ft0
+; RV64IF-NEXT:    fmv.w.x ft0, a1
+; RV64IF-NEXT:    fmv.w.x ft1, a2
+; RV64IF-NEXT:    fmv.w.x ft2, a0
+; RV64IF-NEXT:    fmv.w.x ft3, zero
+; RV64IF-NEXT:    fadd.s ft2, ft2, ft3
+; RV64IF-NEXT:    fadd.s ft1, ft1, ft3
+; RV64IF-NEXT:    fnmadd.s ft0, ft2, ft0, ft1
 ; RV64IF-NEXT:    fmv.x.w a0, ft0
 ; RV64IF-NEXT:    ret
   %a_ = fadd float 0.0, %a
@@ -407,27 +399,23 @@ define float @fnmadd_s(float %a, float %b, float %c) nounwind {
 define float @fnmsub_s(float %a, float %b, float %c) nounwind {
 ; RV32IF-LABEL: fnmsub_s:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    lui a3, %hi(.LCPI17_0)
-; RV32IF-NEXT:    addi a3, a3, %lo(.LCPI17_0)
-; RV32IF-NEXT:    flw ft0, 0(a3)
-; RV32IF-NEXT:    fmv.w.x ft1, a2
-; RV32IF-NEXT:    fmv.w.x ft2, a1
-; RV32IF-NEXT:    fmv.w.x ft3, a0
-; RV32IF-NEXT:    fadd.s ft0, ft3, ft0
-; RV32IF-NEXT:    fnmsub.s ft0, ft0, ft2, ft1
+; RV32IF-NEXT:    fmv.w.x ft0, a2
+; RV32IF-NEXT:    fmv.w.x ft1, a1
+; RV32IF-NEXT:    fmv.w.x ft2, a0
+; RV32IF-NEXT:    fmv.w.x ft3, zero
+; RV32IF-NEXT:    fadd.s ft2, ft2, ft3
+; RV32IF-NEXT:    fnmsub.s ft0, ft2, ft1, ft0
 ; RV32IF-NEXT:    fmv.x.w a0, ft0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fnmsub_s:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    lui a3, %hi(.LCPI17_0)
-; RV64IF-NEXT:    addi a3, a3, %lo(.LCPI17_0)
-; RV64IF-NEXT:    flw ft0, 0(a3)
-; RV64IF-NEXT:    fmv.w.x ft1, a2
-; RV64IF-NEXT:    fmv.w.x ft2, a1
-; RV64IF-NEXT:    fmv.w.x ft3, a0
-; RV64IF-NEXT:    fadd.s ft0, ft3, ft0
-; RV64IF-NEXT:    fnmsub.s ft0, ft0, ft2, ft1
+; RV64IF-NEXT:    fmv.w.x ft0, a2
+; RV64IF-NEXT:    fmv.w.x ft1, a1
+; RV64IF-NEXT:    fmv.w.x ft2, a0
+; RV64IF-NEXT:    fmv.w.x ft3, zero
+; RV64IF-NEXT:    fadd.s ft2, ft2, ft3
+; RV64IF-NEXT:    fnmsub.s ft0, ft2, ft1, ft0
 ; RV64IF-NEXT:    fmv.x.w a0, ft0
 ; RV64IF-NEXT:    ret
   %a_ = fadd float 0.0, %a

diff  --git a/llvm/test/CodeGen/RISCV/float-br-fcmp.ll b/llvm/test/CodeGen/RISCV/float-br-fcmp.ll
index a1e5b32eeaff..dede086fed85 100644
--- a/llvm/test/CodeGen/RISCV/float-br-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/float-br-fcmp.ll
@@ -720,10 +720,8 @@ define i32 @br_fcmp_store_load_stack_slot(float %a, float %b) nounwind {
 ; RV32IF-NEXT:    sw ra, 12(sp)
 ; RV32IF-NEXT:    mv a0, zero
 ; RV32IF-NEXT:    call dummy
-; RV32IF-NEXT:    lui a1, %hi(.LCPI17_0)
-; RV32IF-NEXT:    addi a1, a1, %lo(.LCPI17_0)
-; RV32IF-NEXT:    flw ft1, 0(a1)
 ; RV32IF-NEXT:    fmv.w.x ft0, a0
+; RV32IF-NEXT:    fmv.w.x ft1, zero
 ; RV32IF-NEXT:    fsw ft1, 8(sp)
 ; RV32IF-NEXT:    feq.s a0, ft0, ft1
 ; RV32IF-NEXT:    beqz a0, .LBB17_3
@@ -747,9 +745,7 @@ define i32 @br_fcmp_store_load_stack_slot(float %a, float %b) nounwind {
 ; RV64IF-NEXT:    addi sp, sp, -32
 ; RV64IF-NEXT:    sd ra, 24(sp)
 ; RV64IF-NEXT:    sd s0, 16(sp)
-; RV64IF-NEXT:    lui a0, %hi(.LCPI17_0)
-; RV64IF-NEXT:    addi a0, a0, %lo(.LCPI17_0)
-; RV64IF-NEXT:    flw ft0, 0(a0)
+; RV64IF-NEXT:    fmv.w.x ft0, zero
 ; RV64IF-NEXT:    fsw ft0, 12(sp)
 ; RV64IF-NEXT:    fmv.x.w s0, ft0
 ; RV64IF-NEXT:    mv a0, s0

diff  --git a/llvm/test/CodeGen/RISCV/fp-imm.ll b/llvm/test/CodeGen/RISCV/fp-imm.ll
index bcc00c80fa8c..f70eaab625f6 100644
--- a/llvm/test/CodeGen/RISCV/fp-imm.ll
+++ b/llvm/test/CodeGen/RISCV/fp-imm.ll
@@ -11,30 +11,22 @@
 define float @f32_positive_zero(float *%pf) nounwind {
 ; RV32F-LABEL: f32_positive_zero:
 ; RV32F:       # %bb.0:
-; RV32F-NEXT:    lui a0, %hi(.LCPI0_0)
-; RV32F-NEXT:    addi a0, a0, %lo(.LCPI0_0)
-; RV32F-NEXT:    flw fa0, 0(a0)
+; RV32F-NEXT:    fmv.w.x fa0, zero
 ; RV32F-NEXT:    ret
 ;
 ; RV32D-LABEL: f32_positive_zero:
 ; RV32D:       # %bb.0:
-; RV32D-NEXT:    lui a0, %hi(.LCPI0_0)
-; RV32D-NEXT:    addi a0, a0, %lo(.LCPI0_0)
-; RV32D-NEXT:    flw fa0, 0(a0)
+; RV32D-NEXT:    fmv.w.x fa0, zero
 ; RV32D-NEXT:    ret
 ;
 ; RV64F-LABEL: f32_positive_zero:
 ; RV64F:       # %bb.0:
-; RV64F-NEXT:    lui a0, %hi(.LCPI0_0)
-; RV64F-NEXT:    addi a0, a0, %lo(.LCPI0_0)
-; RV64F-NEXT:    flw fa0, 0(a0)
+; RV64F-NEXT:    fmv.w.x fa0, zero
 ; RV64F-NEXT:    ret
 ;
 ; RV64D-LABEL: f32_positive_zero:
 ; RV64D:       # %bb.0:
-; RV64D-NEXT:    lui a0, %hi(.LCPI0_0)
-; RV64D-NEXT:    addi a0, a0, %lo(.LCPI0_0)
-; RV64D-NEXT:    flw fa0, 0(a0)
+; RV64D-NEXT:    fmv.w.x fa0, zero
 ; RV64D-NEXT:    ret
   ret float 0.0
 }
@@ -79,9 +71,7 @@ define double @f64_positive_zero(double *%pd) nounwind {
 ;
 ; RV32D-LABEL: f64_positive_zero:
 ; RV32D:       # %bb.0:
-; RV32D-NEXT:    lui a0, %hi(.LCPI2_0)
-; RV32D-NEXT:    addi a0, a0, %lo(.LCPI2_0)
-; RV32D-NEXT:    fld fa0, 0(a0)
+; RV32D-NEXT:    fcvt.d.w fa0, zero
 ; RV32D-NEXT:    ret
 ;
 ; RV64F-LABEL: f64_positive_zero:
@@ -91,9 +81,7 @@ define double @f64_positive_zero(double *%pd) nounwind {
 ;
 ; RV64D-LABEL: f64_positive_zero:
 ; RV64D:       # %bb.0:
-; RV64D-NEXT:    lui a0, %hi(.LCPI2_0)
-; RV64D-NEXT:    addi a0, a0, %lo(.LCPI2_0)
-; RV64D-NEXT:    fld fa0, 0(a0)
+; RV64D-NEXT:    fmv.d.x fa0, zero
 ; RV64D-NEXT:    ret
   ret double 0.0
 }


        


More information about the llvm-commits mailing list