[llvm] dbbc95e - [RISCV] Use softPromoteHalf legalization for fp16 without Zfh rather than PromoteFloat.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Thu Apr 1 12:42:09 PDT 2021


Author: Craig Topper
Date: 2021-04-01T12:41:57-07:00
New Revision: dbbc95e3e5aa09928ed4531f7ca01dd979cabab7

URL: https://github.com/llvm/llvm-project/commit/dbbc95e3e5aa09928ed4531f7ca01dd979cabab7
DIFF: https://github.com/llvm/llvm-project/commit/dbbc95e3e5aa09928ed4531f7ca01dd979cabab7.diff

LOG: [RISCV] Use softPromoteHalf legalization for fp16 without Zfh rather than PromoteFloat.

The default legalization strategy is PromoteFloat which keeps
half in single precision format through multiple floating point
operations. Conversion to/from float is done at loads, stores,
bitcasts, and other places that care about the exact size being 16
bits.

This patches switches to the alternative method softPromoteHalf.
This aims to keep the type in 16-bit format between every operation.
So we promote to float and immediately round for any arithmetic
operation. This should be closer to the IR semantics since we
are rounding after each operation and not accumulating extra
precision across multiple operations. X86 is the only other
target that enables this today. See https://reviews.llvm.org/D73749

I had to update getRegisterTypeForCallingConv to force f16 to
use f32 when the F extension is enabled. This way we can still
pass it in the lower bits of an FPR for ilp32f and lp64f ABIs.
The softPromoteHalf would otherwise always give i16 as the
argument type.

Reviewed By: asb, frasercrmck

Differential Revision: https://reviews.llvm.org/D99148

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/lib/Target/RISCV/RISCVISelLowering.h
    llvm/test/CodeGen/RISCV/calling-conv-half.ll
    llvm/test/CodeGen/RISCV/copysign-casts.ll
    llvm/test/CodeGen/RISCV/fp16-promote.ll
    llvm/test/CodeGen/RISCV/half-bitmanip-dagcombines.ll
    llvm/test/CodeGen/RISCV/rv32i-rv64i-half.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 08df519626b1..c951e487e2ba 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -858,6 +858,28 @@ bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
          (VT == MVT::f64 && Subtarget.hasStdExtD());
 }
 
+MVT RISCVTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
+                                                      CallingConv::ID CC,
+                                                      EVT VT) const {
+  // Use f32 to pass f16 if it is legal and Zfh is not enabled. We might still
+  // end up using a GPR but that will be decided based on ABI.
+  if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
+    return MVT::f32;
+
+  return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
+}
+
+unsigned RISCVTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
+                                                           CallingConv::ID CC,
+                                                           EVT VT) const {
+  // Use f32 to pass f16 if it is legal and Zfh is not enabled. We might still
+  // end up using a GPR but that will be decided based on ABI.
+  if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
+    return 1;
+
+  return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
+}
+
 // Changes the condition code and swaps operands if necessary, so the SetCC
 // operation matches one of the comparisons supported directly by branches
 // in the RISC-V ISA. May adjust compares to favor compare with 0 over compare

diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h
index b17aa1527b79..a684d1caca4a 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -261,6 +261,19 @@ class RISCVTargetLowering : public TargetLowering {
   bool isFPImmLegal(const APFloat &Imm, EVT VT,
                     bool ForCodeSize) const override;
 
+  bool softPromoteHalfType() const override { return true; }
+
+  /// Return the register type for a given MVT, ensuring vectors are treated
+  /// as a series of gpr sized integers.
+  MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC,
+                                    EVT VT) const override;
+
+  /// Return the number of registers for a given MVT, ensuring vectors are
+  /// treated as a series of gpr sized integers.
+  unsigned getNumRegistersForCallingConv(LLVMContext &Context,
+                                         CallingConv::ID CC,
+                                         EVT VT) const override;
+
   /// Return true if the given shuffle mask can be codegen'd directly, or if it
   /// should be stack expanded.
   bool isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const override;

diff  --git a/llvm/test/CodeGen/RISCV/calling-conv-half.ll b/llvm/test/CodeGen/RISCV/calling-conv-half.ll
index 79534313154c..46251f9660d4 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-half.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-half.ll
@@ -191,8 +191,8 @@ define i32 @callee_half_on_stack(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f,
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv s0, a7
 ; RV32I-NEXT:    lhu a0, 16(sp)
+; RV32I-NEXT:    mv s0, a7
 ; RV32I-NEXT:    call __gnu_h2f_ieee at plt
 ; RV32I-NEXT:    call __fixsfsi at plt
 ; RV32I-NEXT:    add a0, s0, a0
@@ -206,8 +206,8 @@ define i32 @callee_half_on_stack(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f,
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv s0, a7
 ; RV64I-NEXT:    lhu a0, 16(sp)
+; RV64I-NEXT:    mv s0, a7
 ; RV64I-NEXT:    call __gnu_h2f_ieee at plt
 ; RV64I-NEXT:    call __fixsfdi at plt
 ; RV64I-NEXT:    addw a0, s0, a0
@@ -221,8 +221,8 @@ define i32 @callee_half_on_stack(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f,
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    mv s0, a7
 ; RV32IF-NEXT:    lhu a0, 16(sp)
+; RV32IF-NEXT:    mv s0, a7
 ; RV32IF-NEXT:    call __gnu_h2f_ieee at plt
 ; RV32IF-NEXT:    fmv.w.x ft0, a0
 ; RV32IF-NEXT:    fcvt.w.s a0, ft0, rtz
@@ -237,8 +237,8 @@ define i32 @callee_half_on_stack(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f,
 ; RV64IF-NEXT:    addi sp, sp, -16
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IF-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
-; RV64IF-NEXT:    mv s0, a7
 ; RV64IF-NEXT:    lhu a0, 16(sp)
+; RV64IF-NEXT:    mv s0, a7
 ; RV64IF-NEXT:    call __gnu_h2f_ieee at plt
 ; RV64IF-NEXT:    fmv.w.x ft0, a0
 ; RV64IF-NEXT:    fcvt.l.s a0, ft0, rtz

diff  --git a/llvm/test/CodeGen/RISCV/copysign-casts.ll b/llvm/test/CodeGen/RISCV/copysign-casts.ll
index bfeddeab8731..ab4580da2551 100644
--- a/llvm/test/CodeGen/RISCV/copysign-casts.ll
+++ b/llvm/test/CodeGen/RISCV/copysign-casts.ll
@@ -340,98 +340,73 @@ define float @fold_demote_s_d(float %a, double %b) nounwind {
 define half @fold_demote_h_s(half %a, float %b) nounwind {
 ; RV32I-LABEL: fold_demote_h_s:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv s0, a1
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
-; RV32I-NEXT:    call __gnu_h2f_ieee at plt
-; RV32I-NEXT:    lui a1, 524288
-; RV32I-NEXT:    and a2, s0, a1
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
-; RV32I-NEXT:    or a0, a0, a2
-; RV32I-NEXT:    call __gnu_f2h_ieee at plt
-; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    lui a2, 8
+; RV32I-NEXT:    addi a2, a2, -1
+; RV32I-NEXT:    and a0, a0, a2
+; RV32I-NEXT:    lui a2, 524288
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    srli a1, a1, 16
+; RV32I-NEXT:    or a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: fold_demote_h_s:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv s0, a1
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    call __gnu_h2f_ieee at plt
-; RV64I-NEXT:    lui a1, 524288
-; RV64I-NEXT:    and a2, s0, a1
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    or a0, a0, a2
-; RV64I-NEXT:    call __gnu_f2h_ieee at plt
-; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    lui a2, 8
+; RV64I-NEXT:    addiw a2, a2, -1
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    addi a2, zero, 1
+; RV64I-NEXT:    slli a2, a2, 31
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    srli a1, a1, 16
+; RV64I-NEXT:    or a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
 ; RV32IF-LABEL: fold_demote_h_s:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    fsw fs0, 8(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    fmv.s fs0, fa1
-; RV32IF-NEXT:    fmv.x.w a0, fa0
-; RV32IF-NEXT:    call __gnu_h2f_ieee at plt
-; RV32IF-NEXT:    fsgnj.s fa0, fa0, fs0
-; RV32IF-NEXT:    call __gnu_f2h_ieee at plt
+; RV32IF-NEXT:    fmv.x.w a0, fa1
+; RV32IF-NEXT:    fmv.x.w a1, fa0
+; RV32IF-NEXT:    lui a2, 8
+; RV32IF-NEXT:    addi a2, a2, -1
+; RV32IF-NEXT:    and a1, a1, a2
+; RV32IF-NEXT:    lui a2, 524288
+; RV32IF-NEXT:    and a0, a0, a2
+; RV32IF-NEXT:    srli a0, a0, 16
+; RV32IF-NEXT:    or a0, a1, a0
 ; RV32IF-NEXT:    lui a1, 1048560
 ; RV32IF-NEXT:    or a0, a0, a1
 ; RV32IF-NEXT:    fmv.w.x fa0, a0
-; RV32IF-NEXT:    flw fs0, 8(sp) # 4-byte Folded Reload
-; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ;
 ; RV32IFD-LABEL: fold_demote_h_s:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    fsd fs0, 0(sp) # 8-byte Folded Spill
-; RV32IFD-NEXT:    fmv.s fs0, fa1
-; RV32IFD-NEXT:    fmv.x.w a0, fa0
-; RV32IFD-NEXT:    call __gnu_h2f_ieee at plt
-; RV32IFD-NEXT:    fsgnj.s fa0, fa0, fs0
-; RV32IFD-NEXT:    call __gnu_f2h_ieee at plt
+; RV32IFD-NEXT:    fmv.x.w a0, fa1
+; RV32IFD-NEXT:    fmv.x.w a1, fa0
+; RV32IFD-NEXT:    lui a2, 8
+; RV32IFD-NEXT:    addi a2, a2, -1
+; RV32IFD-NEXT:    and a1, a1, a2
+; RV32IFD-NEXT:    lui a2, 524288
+; RV32IFD-NEXT:    and a0, a0, a2
+; RV32IFD-NEXT:    srli a0, a0, 16
+; RV32IFD-NEXT:    or a0, a1, a0
 ; RV32IFD-NEXT:    lui a1, 1048560
 ; RV32IFD-NEXT:    or a0, a0, a1
 ; RV32IFD-NEXT:    fmv.w.x fa0, a0
-; RV32IFD-NEXT:    fld fs0, 0(sp) # 8-byte Folded Reload
-; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fold_demote_h_s:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    fsd fs0, 0(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    fmv.s fs0, fa1
-; RV64IFD-NEXT:    fmv.x.w a0, fa0
-; RV64IFD-NEXT:    call __gnu_h2f_ieee at plt
-; RV64IFD-NEXT:    fsgnj.s fa0, fa0, fs0
-; RV64IFD-NEXT:    call __gnu_f2h_ieee at plt
+; RV64IFD-NEXT:    fmv.x.w a0, fa1
+; RV64IFD-NEXT:    fmv.x.w a1, fa0
+; RV64IFD-NEXT:    lui a2, 8
+; RV64IFD-NEXT:    addiw a2, a2, -1
+; RV64IFD-NEXT:    and a1, a1, a2
+; RV64IFD-NEXT:    lui a2, 524288
+; RV64IFD-NEXT:    and a0, a0, a2
+; RV64IFD-NEXT:    srli a0, a0, 16
+; RV64IFD-NEXT:    or a0, a1, a0
 ; RV64IFD-NEXT:    lui a1, 1048560
 ; RV64IFD-NEXT:    or a0, a0, a1
 ; RV64IFD-NEXT:    fmv.w.x fa0, a0
-; RV64IFD-NEXT:    fld fs0, 0(sp) # 8-byte Folded Reload
-; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32IFZFH-LABEL: fold_demote_h_s:
@@ -459,104 +434,76 @@ define half @fold_demote_h_s(half %a, float %b) nounwind {
 define half @fold_demote_h_d(half %a, double %b) nounwind {
 ; RV32I-LABEL: fold_demote_h_d:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv s0, a2
-; RV32I-NEXT:    lui a1, 16
+; RV32I-NEXT:    lui a1, 8
 ; RV32I-NEXT:    addi a1, a1, -1
 ; RV32I-NEXT:    and a0, a0, a1
-; RV32I-NEXT:    call __gnu_h2f_ieee at plt
 ; RV32I-NEXT:    lui a1, 524288
-; RV32I-NEXT:    and a2, s0, a1
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
-; RV32I-NEXT:    or a0, a0, a2
-; RV32I-NEXT:    call __gnu_f2h_ieee at plt
-; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    and a1, a2, a1
+; RV32I-NEXT:    srli a1, a1, 16
+; RV32I-NEXT:    or a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: fold_demote_h_d:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv s0, a1
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    call __gnu_h2f_ieee at plt
-; RV64I-NEXT:    lui a1, 524288
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    addi a1, zero, -1
-; RV64I-NEXT:    slli a1, a1, 63
-; RV64I-NEXT:    and a1, s0, a1
-; RV64I-NEXT:    srli a1, a1, 32
+; RV64I-NEXT:    lui a2, 8
+; RV64I-NEXT:    addiw a2, a2, -1
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    addi a2, zero, -1
+; RV64I-NEXT:    slli a2, a2, 63
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    srli a1, a1, 48
 ; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    call __gnu_f2h_ieee at plt
-; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
 ; RV32IF-LABEL: fold_demote_h_d:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    addi sp, sp, -16
-; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    mv s0, a1
 ; RV32IF-NEXT:    fmv.x.w a0, fa0
-; RV32IF-NEXT:    call __gnu_h2f_ieee at plt
-; RV32IF-NEXT:    fmv.w.x ft0, s0
-; RV32IF-NEXT:    fsgnj.s fa0, fa0, ft0
-; RV32IF-NEXT:    call __gnu_f2h_ieee at plt
+; RV32IF-NEXT:    lui a2, 8
+; RV32IF-NEXT:    addi a2, a2, -1
+; RV32IF-NEXT:    and a0, a0, a2
+; RV32IF-NEXT:    lui a2, 524288
+; RV32IF-NEXT:    and a1, a1, a2
+; RV32IF-NEXT:    srli a1, a1, 16
+; RV32IF-NEXT:    or a0, a0, a1
 ; RV32IF-NEXT:    lui a1, 1048560
 ; RV32IF-NEXT:    or a0, a0, a1
 ; RV32IF-NEXT:    fmv.w.x fa0, a0
-; RV32IF-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
-; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
 ;
 ; RV32IFD-LABEL: fold_demote_h_d:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
-; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    fsd fs0, 0(sp) # 8-byte Folded Spill
-; RV32IFD-NEXT:    fmv.d fs0, fa1
+; RV32IFD-NEXT:    fsd fa1, 8(sp)
 ; RV32IFD-NEXT:    fmv.x.w a0, fa0
-; RV32IFD-NEXT:    call __gnu_h2f_ieee at plt
-; RV32IFD-NEXT:    fcvt.s.d ft0, fs0
-; RV32IFD-NEXT:    fsgnj.s fa0, fa0, ft0
-; RV32IFD-NEXT:    call __gnu_f2h_ieee at plt
+; RV32IFD-NEXT:    lw a1, 12(sp)
+; RV32IFD-NEXT:    lui a2, 8
+; RV32IFD-NEXT:    addi a2, a2, -1
+; RV32IFD-NEXT:    and a0, a0, a2
+; RV32IFD-NEXT:    lui a2, 524288
+; RV32IFD-NEXT:    and a1, a1, a2
+; RV32IFD-NEXT:    srli a1, a1, 16
+; RV32IFD-NEXT:    or a0, a0, a1
 ; RV32IFD-NEXT:    lui a1, 1048560
 ; RV32IFD-NEXT:    or a0, a0, a1
 ; RV32IFD-NEXT:    fmv.w.x fa0, a0
-; RV32IFD-NEXT:    fld fs0, 0(sp) # 8-byte Folded Reload
-; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fold_demote_h_d:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    addi sp, sp, -16
-; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    fsd fs0, 0(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT:    fmv.d fs0, fa1
-; RV64IFD-NEXT:    fmv.x.w a0, fa0
-; RV64IFD-NEXT:    call __gnu_h2f_ieee at plt
-; RV64IFD-NEXT:    fcvt.s.d ft0, fs0
-; RV64IFD-NEXT:    fsgnj.s fa0, fa0, ft0
-; RV64IFD-NEXT:    call __gnu_f2h_ieee at plt
+; RV64IFD-NEXT:    fmv.x.d a0, fa1
+; RV64IFD-NEXT:    fmv.x.w a1, fa0
+; RV64IFD-NEXT:    lui a2, 8
+; RV64IFD-NEXT:    addiw a2, a2, -1
+; RV64IFD-NEXT:    and a1, a1, a2
+; RV64IFD-NEXT:    addi a2, zero, -1
+; RV64IFD-NEXT:    slli a2, a2, 63
+; RV64IFD-NEXT:    and a0, a0, a2
+; RV64IFD-NEXT:    srli a0, a0, 48
+; RV64IFD-NEXT:    or a0, a1, a0
 ; RV64IFD-NEXT:    lui a1, 1048560
 ; RV64IFD-NEXT:    or a0, a0, a1
 ; RV64IFD-NEXT:    fmv.w.x fa0, a0
-; RV64IFD-NEXT:    fld fs0, 0(sp) # 8-byte Folded Reload
-; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32IFZFH-LABEL: fold_demote_h_d:

diff  --git a/llvm/test/CodeGen/RISCV/fp16-promote.ll b/llvm/test/CodeGen/RISCV/fp16-promote.ll
index c7b67b28bb00..b2c72505ebd6 100644
--- a/llvm/test/CodeGen/RISCV/fp16-promote.ll
+++ b/llvm/test/CodeGen/RISCV/fp16-promote.ll
@@ -87,16 +87,16 @@ define void @test_fadd(half* %p, half* %q) nounwind {
 ; CHECK-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
 ; CHECK-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
 ; CHECK-NEXT:    fsd fs0, 8(sp) # 8-byte Folded Spill
-; CHECK-NEXT:    mv s0, a1
-; CHECK-NEXT:    mv s1, a0
-; CHECK-NEXT:    lhu a0, 0(a0)
+; CHECK-NEXT:    mv s0, a0
+; CHECK-NEXT:    lhu s1, 0(a0)
+; CHECK-NEXT:    lhu a0, 0(a1)
 ; CHECK-NEXT:    call __gnu_h2f_ieee at plt
 ; CHECK-NEXT:    fmv.s fs0, fa0
-; CHECK-NEXT:    lhu a0, 0(s0)
+; CHECK-NEXT:    mv a0, s1
 ; CHECK-NEXT:    call __gnu_h2f_ieee at plt
-; CHECK-NEXT:    fadd.s fa0, fs0, fa0
+; CHECK-NEXT:    fadd.s fa0, fa0, fs0
 ; CHECK-NEXT:    call __gnu_f2h_ieee at plt
-; CHECK-NEXT:    sh a0, 0(s1)
+; CHECK-NEXT:    sh a0, 0(s0)
 ; CHECK-NEXT:    fld fs0, 8(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
 ; CHECK-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
@@ -118,16 +118,16 @@ define void @test_fmul(half* %p, half* %q) nounwind {
 ; CHECK-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
 ; CHECK-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
 ; CHECK-NEXT:    fsd fs0, 8(sp) # 8-byte Folded Spill
-; CHECK-NEXT:    mv s0, a1
-; CHECK-NEXT:    mv s1, a0
-; CHECK-NEXT:    lhu a0, 0(a0)
+; CHECK-NEXT:    mv s0, a0
+; CHECK-NEXT:    lhu s1, 0(a0)
+; CHECK-NEXT:    lhu a0, 0(a1)
 ; CHECK-NEXT:    call __gnu_h2f_ieee at plt
 ; CHECK-NEXT:    fmv.s fs0, fa0
-; CHECK-NEXT:    lhu a0, 0(s0)
+; CHECK-NEXT:    mv a0, s1
 ; CHECK-NEXT:    call __gnu_h2f_ieee at plt
-; CHECK-NEXT:    fmul.s fa0, fs0, fa0
+; CHECK-NEXT:    fmul.s fa0, fa0, fs0
 ; CHECK-NEXT:    call __gnu_f2h_ieee at plt
-; CHECK-NEXT:    sh a0, 0(s1)
+; CHECK-NEXT:    sh a0, 0(s0)
 ; CHECK-NEXT:    fld fs0, 8(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
 ; CHECK-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload

diff  --git a/llvm/test/CodeGen/RISCV/half-bitmanip-dagcombines.ll b/llvm/test/CodeGen/RISCV/half-bitmanip-dagcombines.ll
index 8f4cce9f77d6..0c789b2bbf1f 100644
--- a/llvm/test/CodeGen/RISCV/half-bitmanip-dagcombines.ll
+++ b/llvm/test/CodeGen/RISCV/half-bitmanip-dagcombines.ll
@@ -81,26 +81,13 @@ declare half @llvm.copysign.f16(half, half)
 define half @fcopysign_fneg(half %a, half %b) nounwind {
 ; RV32I-LABEL: fcopysign_fneg:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv s0, a1
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
-; RV32I-NEXT:    call __gnu_h2f_ieee at plt
-; RV32I-NEXT:    not a1, s0
-; RV32I-NEXT:    lui a2, 524288
+; RV32I-NEXT:    not a1, a1
+; RV32I-NEXT:    lui a2, 1048568
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    lui a2, 8
 ; RV32I-NEXT:    addi a2, a2, -1
 ; RV32I-NEXT:    and a0, a0, a2
-; RV32I-NEXT:    lui a2, 8
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    slli a1, a1, 16
 ; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    call __gnu_f2h_ieee at plt
-; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32IZFH-LABEL: fcopysign_fneg:
@@ -110,26 +97,13 @@ define half @fcopysign_fneg(half %a, half %b) nounwind {
 ;
 ; RV64I-LABEL: fcopysign_fneg:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv s0, a1
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    call __gnu_h2f_ieee at plt
-; RV64I-NEXT:    not a1, s0
-; RV64I-NEXT:    lui a2, 524288
-; RV64I-NEXT:    addiw a2, a2, -1
-; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    not a1, a1
 ; RV64I-NEXT:    lui a2, 1048568
 ; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    slli a1, a1, 16
+; RV64I-NEXT:    lui a2, 8
+; RV64I-NEXT:    addiw a2, a2, -1
+; RV64I-NEXT:    and a0, a0, a2
 ; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    call __gnu_f2h_ieee at plt
-; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
 ; RV64IZFH-LABEL: fcopysign_fneg:

diff  --git a/llvm/test/CodeGen/RISCV/rv32i-rv64i-half.ll b/llvm/test/CodeGen/RISCV/rv32i-rv64i-half.ll
index 10cb5882b5f7..7f7cf044ec28 100644
--- a/llvm/test/CodeGen/RISCV/rv32i-rv64i-half.ll
+++ b/llvm/test/CodeGen/RISCV/rv32i-rv64i-half.ll
@@ -17,18 +17,21 @@ define half @half_test(half %a, half %b) nounwind {
 ; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw s2, 0(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv s2, a1
+; RV32I-NEXT:    mv s0, a1
 ; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi s0, a1, -1
-; RV32I-NEXT:    and a0, a0, s0
+; RV32I-NEXT:    addi s1, a1, -1
+; RV32I-NEXT:    and a0, a0, s1
 ; RV32I-NEXT:    call __gnu_h2f_ieee at plt
-; RV32I-NEXT:    mv s1, a0
-; RV32I-NEXT:    and a0, s2, s0
+; RV32I-NEXT:    mv s2, a0
+; RV32I-NEXT:    and a0, s0, s1
 ; RV32I-NEXT:    call __gnu_h2f_ieee at plt
 ; RV32I-NEXT:    mv s0, a0
-; RV32I-NEXT:    mv a0, s1
+; RV32I-NEXT:    mv a0, s2
 ; RV32I-NEXT:    mv a1, s0
 ; RV32I-NEXT:    call __addsf3 at plt
+; RV32I-NEXT:    call __gnu_f2h_ieee at plt
+; RV32I-NEXT:    and a0, a0, s1
+; RV32I-NEXT:    call __gnu_h2f_ieee at plt
 ; RV32I-NEXT:    mv a1, s0
 ; RV32I-NEXT:    call __divsf3 at plt
 ; RV32I-NEXT:    call __gnu_f2h_ieee at plt
@@ -46,18 +49,21 @@ define half @half_test(half %a, half %b) nounwind {
 ; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv s2, a1
+; RV64I-NEXT:    mv s0, a1
 ; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw s0, a1, -1
-; RV64I-NEXT:    and a0, a0, s0
+; RV64I-NEXT:    addiw s1, a1, -1
+; RV64I-NEXT:    and a0, a0, s1
 ; RV64I-NEXT:    call __gnu_h2f_ieee at plt
-; RV64I-NEXT:    mv s1, a0
-; RV64I-NEXT:    and a0, s2, s0
+; RV64I-NEXT:    mv s2, a0
+; RV64I-NEXT:    and a0, s0, s1
 ; RV64I-NEXT:    call __gnu_h2f_ieee at plt
 ; RV64I-NEXT:    mv s0, a0
-; RV64I-NEXT:    mv a0, s1
+; RV64I-NEXT:    mv a0, s2
 ; RV64I-NEXT:    mv a1, s0
 ; RV64I-NEXT:    call __addsf3 at plt
+; RV64I-NEXT:    call __gnu_f2h_ieee at plt
+; RV64I-NEXT:    and a0, a0, s1
+; RV64I-NEXT:    call __gnu_h2f_ieee at plt
 ; RV64I-NEXT:    mv a1, s0
 ; RV64I-NEXT:    call __divsf3 at plt
 ; RV64I-NEXT:    call __gnu_f2h_ieee at plt


        


More information about the llvm-commits mailing list