[llvm] 66bbefe - [RISCV] Revert Zfhmin related changes that aren't tested and depend on f16 being a legal type.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Thu Dec 16 08:56:02 PST 2021


Author: Craig Topper
Date: 2021-12-16T08:55:28-08:00
New Revision: 66bbefeb1389d9fdfc66920d9cfa9b73783ccfc4

URL: https://github.com/llvm/llvm-project/commit/66bbefeb1389d9fdfc66920d9cfa9b73783ccfc4
DIFF: https://github.com/llvm/llvm-project/commit/66bbefeb1389d9fdfc66920d9cfa9b73783ccfc4.diff

LOG: [RISCV] Revert Zfhmin related changes that aren't tested and depend on f16 being a legal type.

Our Zfhmin support is only MC layer, but these are CodeGen layer
interfaces. If f16 isn't a Legal type for CodeGen with Zfhmin, then
these interfaces should keep their non-Zfh behavior.

Reviewed By: luismarques

Differential Revision: https://reviews.llvm.org/D115822

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/test/CodeGen/RISCV/calling-conv-half.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 1464cd4f2a56..f33965b50459 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -1254,7 +1254,8 @@ bool RISCVTargetLowering::shouldSinkOperands(
 
 bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
                                        bool ForCodeSize) const {
-  if (VT == MVT::f16 && !Subtarget.hasStdExtZfhmin())
+  // FIXME: Change to Zfhmin once f16 becomes a legal type with Zfhmin.
+  if (VT == MVT::f16 && !Subtarget.hasStdExtZfh())
     return false;
   if (VT == MVT::f32 && !Subtarget.hasStdExtF())
     return false;
@@ -1274,9 +1275,10 @@ bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
 MVT RISCVTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
                                                       CallingConv::ID CC,
                                                       EVT VT) const {
-  // Use f32 to pass f16 if it is legal and Zfhmin/Zfh is not enabled.
+  // Use f32 to pass f16 if it is legal and Zfh is not enabled.
   // We might still end up using a GPR but that will be decided based on ABI.
-  if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfhmin())
+  // FIXME: Change to Zfhmin once f16 becomes a legal type with Zfhmin.
+  if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
     return MVT::f32;
 
   return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
@@ -1285,9 +1287,10 @@ MVT RISCVTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
 unsigned RISCVTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
                                                            CallingConv::ID CC,
                                                            EVT VT) const {
-  // Use f32 to pass f16 if it is legal and Zfhmin/Zfh is not enabled.
+  // Use f32 to pass f16 if it is legal and Zfh is not enabled.
   // We might still end up using a GPR but that will be decided based on ABI.
-  if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfhmin())
+  // FIXME: Change to Zfhmin once f16 becomes a legal type with Zfhmin.
+  if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
     return 1;
 
   return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);

diff  --git a/llvm/test/CodeGen/RISCV/calling-conv-half.ll b/llvm/test/CodeGen/RISCV/calling-conv-half.ll
index 3f7515c9523e..c5ec12f77511 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-half.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-half.ll
@@ -5,6 +5,8 @@
 ; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s  | FileCheck %s -check-prefix=RV64IF
 ; RUN: llc -mtriple=riscv32 -mattr=+f -target-abi=ilp32f -verify-machineinstrs < %s  | FileCheck %s -check-prefix=RV32-ILP32F
 ; RUN: llc -mtriple=riscv64 -mattr=+f -target-abi=lp64f -verify-machineinstrs < %s  | FileCheck %s -check-prefix=RV64-LP64F
+; RUN: llc -mtriple=riscv32 -mattr=+f,+experimental-zfhmin -target-abi=ilp32f -verify-machineinstrs < %s  | FileCheck %s -check-prefix=RV32-ILP32ZFHMIN
+; RUN: llc -mtriple=riscv64 -mattr=+f,+experimental-zfhmin -target-abi=lp64f -verify-machineinstrs < %s  | FileCheck %s -check-prefix=RV64-LP64ZFHMIN
 
 ; Tests passing half arguments and returns without Zfh.
 ; Covers with and without F extension and ilp32f/ilp64f
@@ -106,6 +108,36 @@ define i32 @callee_half_in_regs(i32 %a, half %b) nounwind {
 ; RV64-LP64F-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
 ; RV64-LP64F-NEXT:    addi sp, sp, 16
 ; RV64-LP64F-NEXT:    ret
+;
+; RV32-ILP32ZFHMIN-LABEL: callee_half_in_regs:
+; RV32-ILP32ZFHMIN:       # %bb.0:
+; RV32-ILP32ZFHMIN-NEXT:    addi sp, sp, -16
+; RV32-ILP32ZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-ILP32ZFHMIN-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32-ILP32ZFHMIN-NEXT:    mv s0, a0
+; RV32-ILP32ZFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV32-ILP32ZFHMIN-NEXT:    call __gnu_h2f_ieee at plt
+; RV32-ILP32ZFHMIN-NEXT:    fcvt.w.s a0, fa0, rtz
+; RV32-ILP32ZFHMIN-NEXT:    add a0, s0, a0
+; RV32-ILP32ZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-ILP32ZFHMIN-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-ILP32ZFHMIN-NEXT:    addi sp, sp, 16
+; RV32-ILP32ZFHMIN-NEXT:    ret
+;
+; RV64-LP64ZFHMIN-LABEL: callee_half_in_regs:
+; RV64-LP64ZFHMIN:       # %bb.0:
+; RV64-LP64ZFHMIN-NEXT:    addi sp, sp, -16
+; RV64-LP64ZFHMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-LP64ZFHMIN-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
+; RV64-LP64ZFHMIN-NEXT:    mv s0, a0
+; RV64-LP64ZFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV64-LP64ZFHMIN-NEXT:    call __gnu_h2f_ieee at plt
+; RV64-LP64ZFHMIN-NEXT:    fcvt.l.s a0, fa0, rtz
+; RV64-LP64ZFHMIN-NEXT:    addw a0, s0, a0
+; RV64-LP64ZFHMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-LP64ZFHMIN-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
+; RV64-LP64ZFHMIN-NEXT:    addi sp, sp, 16
+; RV64-LP64ZFHMIN-NEXT:    ret
   %b_fptosi = fptosi half %b to i32
   %1 = add i32 %a, %b_fptosi
   ret i32 %1
@@ -181,6 +213,30 @@ define i32 @caller_half_in_regs() nounwind {
 ; RV64-LP64F-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-LP64F-NEXT:    addi sp, sp, 16
 ; RV64-LP64F-NEXT:    ret
+;
+; RV32-ILP32ZFHMIN-LABEL: caller_half_in_regs:
+; RV32-ILP32ZFHMIN:       # %bb.0:
+; RV32-ILP32ZFHMIN-NEXT:    addi sp, sp, -16
+; RV32-ILP32ZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-ILP32ZFHMIN-NEXT:    lui a0, %hi(.LCPI1_0)
+; RV32-ILP32ZFHMIN-NEXT:    flw fa0, %lo(.LCPI1_0)(a0)
+; RV32-ILP32ZFHMIN-NEXT:    li a0, 1
+; RV32-ILP32ZFHMIN-NEXT:    call callee_half_in_regs at plt
+; RV32-ILP32ZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-ILP32ZFHMIN-NEXT:    addi sp, sp, 16
+; RV32-ILP32ZFHMIN-NEXT:    ret
+;
+; RV64-LP64ZFHMIN-LABEL: caller_half_in_regs:
+; RV64-LP64ZFHMIN:       # %bb.0:
+; RV64-LP64ZFHMIN-NEXT:    addi sp, sp, -16
+; RV64-LP64ZFHMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-LP64ZFHMIN-NEXT:    lui a0, %hi(.LCPI1_0)
+; RV64-LP64ZFHMIN-NEXT:    flw fa0, %lo(.LCPI1_0)(a0)
+; RV64-LP64ZFHMIN-NEXT:    li a0, 1
+; RV64-LP64ZFHMIN-NEXT:    call callee_half_in_regs at plt
+; RV64-LP64ZFHMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-LP64ZFHMIN-NEXT:    addi sp, sp, 16
+; RV64-LP64ZFHMIN-NEXT:    ret
   %1 = call i32 @callee_half_in_regs(i32 1, half 2.0)
   ret i32 %1
 }
@@ -277,6 +333,36 @@ define i32 @callee_half_on_stack(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f,
 ; RV64-LP64F-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
 ; RV64-LP64F-NEXT:    addi sp, sp, 16
 ; RV64-LP64F-NEXT:    ret
+;
+; RV32-ILP32ZFHMIN-LABEL: callee_half_on_stack:
+; RV32-ILP32ZFHMIN:       # %bb.0:
+; RV32-ILP32ZFHMIN-NEXT:    addi sp, sp, -16
+; RV32-ILP32ZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-ILP32ZFHMIN-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32-ILP32ZFHMIN-NEXT:    mv s0, a7
+; RV32-ILP32ZFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV32-ILP32ZFHMIN-NEXT:    call __gnu_h2f_ieee at plt
+; RV32-ILP32ZFHMIN-NEXT:    fcvt.w.s a0, fa0, rtz
+; RV32-ILP32ZFHMIN-NEXT:    add a0, s0, a0
+; RV32-ILP32ZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-ILP32ZFHMIN-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-ILP32ZFHMIN-NEXT:    addi sp, sp, 16
+; RV32-ILP32ZFHMIN-NEXT:    ret
+;
+; RV64-LP64ZFHMIN-LABEL: callee_half_on_stack:
+; RV64-LP64ZFHMIN:       # %bb.0:
+; RV64-LP64ZFHMIN-NEXT:    addi sp, sp, -16
+; RV64-LP64ZFHMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-LP64ZFHMIN-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
+; RV64-LP64ZFHMIN-NEXT:    mv s0, a7
+; RV64-LP64ZFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV64-LP64ZFHMIN-NEXT:    call __gnu_h2f_ieee at plt
+; RV64-LP64ZFHMIN-NEXT:    fcvt.l.s a0, fa0, rtz
+; RV64-LP64ZFHMIN-NEXT:    addw a0, s0, a0
+; RV64-LP64ZFHMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-LP64ZFHMIN-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
+; RV64-LP64ZFHMIN-NEXT:    addi sp, sp, 16
+; RV64-LP64ZFHMIN-NEXT:    ret
   %1 = fptosi half %i to i32
   %2 = add i32 %h, %1
   ret i32 %2
@@ -400,6 +486,44 @@ define i32 @caller_half_on_stack() nounwind {
 ; RV64-LP64F-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-LP64F-NEXT:    addi sp, sp, 16
 ; RV64-LP64F-NEXT:    ret
+;
+; RV32-ILP32ZFHMIN-LABEL: caller_half_on_stack:
+; RV32-ILP32ZFHMIN:       # %bb.0:
+; RV32-ILP32ZFHMIN-NEXT:    addi sp, sp, -16
+; RV32-ILP32ZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-ILP32ZFHMIN-NEXT:    lui a0, %hi(.LCPI3_0)
+; RV32-ILP32ZFHMIN-NEXT:    flw fa0, %lo(.LCPI3_0)(a0)
+; RV32-ILP32ZFHMIN-NEXT:    li a0, 1
+; RV32-ILP32ZFHMIN-NEXT:    li a1, 2
+; RV32-ILP32ZFHMIN-NEXT:    li a2, 3
+; RV32-ILP32ZFHMIN-NEXT:    li a3, 4
+; RV32-ILP32ZFHMIN-NEXT:    li a4, 5
+; RV32-ILP32ZFHMIN-NEXT:    li a5, 6
+; RV32-ILP32ZFHMIN-NEXT:    li a6, 7
+; RV32-ILP32ZFHMIN-NEXT:    li a7, 8
+; RV32-ILP32ZFHMIN-NEXT:    call callee_half_on_stack at plt
+; RV32-ILP32ZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-ILP32ZFHMIN-NEXT:    addi sp, sp, 16
+; RV32-ILP32ZFHMIN-NEXT:    ret
+;
+; RV64-LP64ZFHMIN-LABEL: caller_half_on_stack:
+; RV64-LP64ZFHMIN:       # %bb.0:
+; RV64-LP64ZFHMIN-NEXT:    addi sp, sp, -16
+; RV64-LP64ZFHMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-LP64ZFHMIN-NEXT:    lui a0, %hi(.LCPI3_0)
+; RV64-LP64ZFHMIN-NEXT:    flw fa0, %lo(.LCPI3_0)(a0)
+; RV64-LP64ZFHMIN-NEXT:    li a0, 1
+; RV64-LP64ZFHMIN-NEXT:    li a1, 2
+; RV64-LP64ZFHMIN-NEXT:    li a2, 3
+; RV64-LP64ZFHMIN-NEXT:    li a3, 4
+; RV64-LP64ZFHMIN-NEXT:    li a4, 5
+; RV64-LP64ZFHMIN-NEXT:    li a5, 6
+; RV64-LP64ZFHMIN-NEXT:    li a6, 7
+; RV64-LP64ZFHMIN-NEXT:    li a7, 8
+; RV64-LP64ZFHMIN-NEXT:    call callee_half_on_stack at plt
+; RV64-LP64ZFHMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-LP64ZFHMIN-NEXT:    addi sp, sp, 16
+; RV64-LP64ZFHMIN-NEXT:    ret
   %1 = call i32 @callee_half_on_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, half 10.0)
   ret i32 %1
 }
@@ -441,6 +565,18 @@ define half @callee_half_ret() nounwind {
 ; RV64-LP64F-NEXT:    lui a0, %hi(.LCPI4_0)
 ; RV64-LP64F-NEXT:    flw fa0, %lo(.LCPI4_0)(a0)
 ; RV64-LP64F-NEXT:    ret
+;
+; RV32-ILP32ZFHMIN-LABEL: callee_half_ret:
+; RV32-ILP32ZFHMIN:       # %bb.0:
+; RV32-ILP32ZFHMIN-NEXT:    lui a0, %hi(.LCPI4_0)
+; RV32-ILP32ZFHMIN-NEXT:    flw fa0, %lo(.LCPI4_0)(a0)
+; RV32-ILP32ZFHMIN-NEXT:    ret
+;
+; RV64-LP64ZFHMIN-LABEL: callee_half_ret:
+; RV64-LP64ZFHMIN:       # %bb.0:
+; RV64-LP64ZFHMIN-NEXT:    lui a0, %hi(.LCPI4_0)
+; RV64-LP64ZFHMIN-NEXT:    flw fa0, %lo(.LCPI4_0)(a0)
+; RV64-LP64ZFHMIN-NEXT:    ret
   ret half 1.0
 }
 
@@ -520,6 +656,30 @@ define i32 @caller_half_ret() nounwind {
 ; RV64-LP64F-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-LP64F-NEXT:    addi sp, sp, 16
 ; RV64-LP64F-NEXT:    ret
+;
+; RV32-ILP32ZFHMIN-LABEL: caller_half_ret:
+; RV32-ILP32ZFHMIN:       # %bb.0:
+; RV32-ILP32ZFHMIN-NEXT:    addi sp, sp, -16
+; RV32-ILP32ZFHMIN-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-ILP32ZFHMIN-NEXT:    call callee_half_ret at plt
+; RV32-ILP32ZFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV32-ILP32ZFHMIN-NEXT:    call __gnu_h2f_ieee at plt
+; RV32-ILP32ZFHMIN-NEXT:    fcvt.w.s a0, fa0, rtz
+; RV32-ILP32ZFHMIN-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-ILP32ZFHMIN-NEXT:    addi sp, sp, 16
+; RV32-ILP32ZFHMIN-NEXT:    ret
+;
+; RV64-LP64ZFHMIN-LABEL: caller_half_ret:
+; RV64-LP64ZFHMIN:       # %bb.0:
+; RV64-LP64ZFHMIN-NEXT:    addi sp, sp, -16
+; RV64-LP64ZFHMIN-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-LP64ZFHMIN-NEXT:    call callee_half_ret at plt
+; RV64-LP64ZFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV64-LP64ZFHMIN-NEXT:    call __gnu_h2f_ieee at plt
+; RV64-LP64ZFHMIN-NEXT:    fcvt.l.s a0, fa0, rtz
+; RV64-LP64ZFHMIN-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-LP64ZFHMIN-NEXT:    addi sp, sp, 16
+; RV64-LP64ZFHMIN-NEXT:    ret
   %1 = call half @callee_half_ret()
   %2 = fptosi half %1 to i32
   ret i32 %2


        


More information about the llvm-commits mailing list