[llvm] eb89bf8 - [RISCV] Do not use FPR registers for fastcc if zfh/f/d is not specified in the architecture

via llvm-commits llvm-commits at lists.llvm.org
Tue Jul 18 10:03:10 PDT 2023


Author: eopXD
Date: 2023-07-18T10:03:04-07:00
New Revision: eb89bf8d0d10e870c638818a7b019b2eda027735

URL: https://github.com/llvm/llvm-project/commit/eb89bf8d0d10e870c638818a7b019b2eda027735
DIFF: https://github.com/llvm/llvm-project/commit/eb89bf8d0d10e870c638818a7b019b2eda027735.diff

LOG: [RISCV] Do not use FPR registers for fastcc if zfh/f/d is not specified in the architecture

Resolves #63917.

Also lets the compiler check for available GPR before hitting the stack.

Reviewed By: asb

Differential Revision: https://reviews.llvm.org/D155502

Added: 
    llvm/test/CodeGen/RISCV/fastcc-without-f-reg.ll

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 9ad9d9354d9da2..e01b261c2f43d6 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -15189,7 +15189,10 @@ bool RISCV::CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI,
     }
   }
 
-  if (LocVT == MVT::f16) {
+  const RISCVSubtarget &Subtarget = TLI.getSubtarget();
+
+  if (LocVT == MVT::f16 &&
+      (Subtarget.hasStdExtZfh() || Subtarget.hasStdExtZfhmin())) {
     static const MCPhysReg FPR16List[] = {
         RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H,
         RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H,  RISCV::F1_H,
@@ -15201,7 +15204,7 @@ bool RISCV::CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI,
     }
   }
 
-  if (LocVT == MVT::f32) {
+  if (LocVT == MVT::f32 && Subtarget.hasStdExtF()) {
     static const MCPhysReg FPR32List[] = {
         RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F,
         RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F,  RISCV::F1_F,
@@ -15213,7 +15216,7 @@ bool RISCV::CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI,
     }
   }
 
-  if (LocVT == MVT::f64) {
+  if (LocVT == MVT::f64 && Subtarget.hasStdExtD()) {
     static const MCPhysReg FPR64List[] = {
         RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D,
         RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D,  RISCV::F1_D,
@@ -15225,6 +15228,18 @@ bool RISCV::CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI,
     }
   }
 
+  // Check if there is an available GPR before hitting the stack.
+  if ((LocVT == MVT::f16 &&
+       (Subtarget.hasStdExtZhinx() || Subtarget.hasStdExtZhinxmin())) ||
+      (LocVT == MVT::f32 && Subtarget.hasStdExtZfinx()) ||
+      (LocVT == MVT::f64 && Subtarget.is64Bit() &&
+       Subtarget.hasStdExtZdinx())) {
+    if (unsigned Reg = State.AllocateReg(GPRList)) {
+      State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
+      return false;
+    }
+  }
+
   if (LocVT == MVT::i32 || LocVT == MVT::f32) {
     unsigned Offset4 = State.AllocateStack(4, Align(4));
     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo));

diff  --git a/llvm/test/CodeGen/RISCV/fastcc-without-f-reg.ll b/llvm/test/CodeGen/RISCV/fastcc-without-f-reg.ll
new file mode 100644
index 00000000000000..94cf8becd5236c
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/fastcc-without-f-reg.ll
@@ -0,0 +1,218 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; RUN: llc -mtriple=riscv32 -mattr=+zhinx -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=ZHINX32 %s
+; RUN: llc -mtriple=riscv64 -mattr=+zhinx -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=ZHINX64 %s
+; RUN: llc -mtriple=riscv32 -mattr=+zfinx -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=ZFINX32 %s
+; RUN: llc -mtriple=riscv64 -mattr=+zfinx -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=ZFINX64 %s
+; RUN: llc -mtriple=riscv32 -mattr=+zdinx -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=ZDINX32 %s
+; RUN: llc -mtriple=riscv64 -mattr=+zdinx -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=ZDINX64 %s
+
+define half @caller_half(half %x) nounwind {
+; ZHINX32-LABEL: caller_half:
+; ZHINX32:       # %bb.0: # %entry
+; ZHINX32-NEXT:    tail h
+;
+; ZHINX64-LABEL: caller_half:
+; ZHINX64:       # %bb.0: # %entry
+; ZHINX64-NEXT:    tail h
+;
+; ZFINX32-LABEL: caller_half:
+; ZFINX32:       # %bb.0: # %entry
+; ZFINX32-NEXT:    lui a1, 1048560
+; ZFINX32-NEXT:    or a0, a0, a1
+; ZFINX32-NEXT:    tail h
+;
+; ZFINX64-LABEL: caller_half:
+; ZFINX64:       # %bb.0: # %entry
+; ZFINX64-NEXT:    lui a1, 1048560
+; ZFINX64-NEXT:    or a0, a0, a1
+; ZFINX64-NEXT:    tail h
+;
+; ZDINX32-LABEL: caller_half:
+; ZDINX32:       # %bb.0: # %entry
+; ZDINX32-NEXT:    lui a1, 1048560
+; ZDINX32-NEXT:    or a0, a0, a1
+; ZDINX32-NEXT:    tail h
+;
+; ZDINX64-LABEL: caller_half:
+; ZDINX64:       # %bb.0: # %entry
+; ZDINX64-NEXT:    lui a1, 1048560
+; ZDINX64-NEXT:    or a0, a0, a1
+; ZDINX64-NEXT:    tail h
+entry:
+  %0 = tail call fastcc half @h(half %x)
+  ret half %0
+}
+
+define internal fastcc half @h(half %x) nounwind {
+; ZHINX32-LABEL: h:
+; ZHINX32:       # %bb.0:
+; ZHINX32-NEXT:    ret
+;
+; ZHINX64-LABEL: h:
+; ZHINX64:       # %bb.0:
+; ZHINX64-NEXT:    ret
+;
+; ZFINX32-LABEL: h:
+; ZFINX32:       # %bb.0:
+; ZFINX32-NEXT:    lui a1, 1048560
+; ZFINX32-NEXT:    or a0, a0, a1
+; ZFINX32-NEXT:    ret
+;
+; ZFINX64-LABEL: h:
+; ZFINX64:       # %bb.0:
+; ZFINX64-NEXT:    lui a1, 1048560
+; ZFINX64-NEXT:    or a0, a0, a1
+; ZFINX64-NEXT:    ret
+;
+; ZDINX32-LABEL: h:
+; ZDINX32:       # %bb.0:
+; ZDINX32-NEXT:    lui a1, 1048560
+; ZDINX32-NEXT:    or a0, a0, a1
+; ZDINX32-NEXT:    ret
+;
+; ZDINX64-LABEL: h:
+; ZDINX64:       # %bb.0:
+; ZDINX64-NEXT:    lui a1, 1048560
+; ZDINX64-NEXT:    or a0, a0, a1
+; ZDINX64-NEXT:    ret
+  ret half %x
+}
+
+define float @caller_float(float %x) nounwind {
+; ZHINX32-LABEL: caller_float:
+; ZHINX32:       # %bb.0: # %entry
+; ZHINX32-NEXT:    tail f
+;
+; ZHINX64-LABEL: caller_float:
+; ZHINX64:       # %bb.0: # %entry
+; ZHINX64-NEXT:    tail f
+;
+; ZFINX32-LABEL: caller_float:
+; ZFINX32:       # %bb.0: # %entry
+; ZFINX32-NEXT:    tail f
+;
+; ZFINX64-LABEL: caller_float:
+; ZFINX64:       # %bb.0: # %entry
+; ZFINX64-NEXT:    tail f
+;
+; ZDINX32-LABEL: caller_float:
+; ZDINX32:       # %bb.0: # %entry
+; ZDINX32-NEXT:    tail f
+;
+; ZDINX64-LABEL: caller_float:
+; ZDINX64:       # %bb.0: # %entry
+; ZDINX64-NEXT:    tail f
+entry:
+  %0 = tail call fastcc float @f(float %x)
+  ret float %0
+}
+
+define internal fastcc float @f(float %x) nounwind {
+; ZHINX32-LABEL: f:
+; ZHINX32:       # %bb.0: # %entry
+; ZHINX32-NEXT:    ret
+;
+; ZHINX64-LABEL: f:
+; ZHINX64:       # %bb.0: # %entry
+; ZHINX64-NEXT:    ret
+;
+; ZFINX32-LABEL: f:
+; ZFINX32:       # %bb.0: # %entry
+; ZFINX32-NEXT:    ret
+;
+; ZFINX64-LABEL: f:
+; ZFINX64:       # %bb.0: # %entry
+; ZFINX64-NEXT:    ret
+;
+; ZDINX32-LABEL: f:
+; ZDINX32:       # %bb.0: # %entry
+; ZDINX32-NEXT:    ret
+;
+; ZDINX64-LABEL: f:
+; ZDINX64:       # %bb.0: # %entry
+; ZDINX64-NEXT:    ret
+entry:
+  ret float %x
+}
+
+define double @caller_double(double %x) nounwind {
+; ZHINX32-LABEL: caller_double:
+; ZHINX32:       # %bb.0: # %entry
+; ZHINX32-NEXT:    tail d
+;
+; ZHINX64-LABEL: caller_double:
+; ZHINX64:       # %bb.0: # %entry
+; ZHINX64-NEXT:    tail d
+;
+; ZFINX32-LABEL: caller_double:
+; ZFINX32:       # %bb.0: # %entry
+; ZFINX32-NEXT:    tail d
+;
+; ZFINX64-LABEL: caller_double:
+; ZFINX64:       # %bb.0: # %entry
+; ZFINX64-NEXT:    tail d
+;
+; ZDINX32-LABEL: caller_double:
+; ZDINX32:       # %bb.0: # %entry
+; ZDINX32-NEXT:    addi sp, sp, -32
+; ZDINX32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; ZDINX32-NEXT:    sw a0, 16(sp)
+; ZDINX32-NEXT:    sw a1, 20(sp)
+; ZDINX32-NEXT:    lw a0, 16(sp)
+; ZDINX32-NEXT:    lw a1, 20(sp)
+; ZDINX32-NEXT:    sw a0, 0(sp)
+; ZDINX32-NEXT:    sw a1, 4(sp)
+; ZDINX32-NEXT:    call d
+; ZDINX32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; ZDINX32-NEXT:    addi sp, sp, 32
+; ZDINX32-NEXT:    ret
+;
+; ZDINX64-LABEL: caller_double:
+; ZDINX64:       # %bb.0: # %entry
+; ZDINX64-NEXT:    tail d
+entry:
+  %0 = tail call fastcc double @d(double %x)
+  ret double %0
+}
+
+define internal fastcc double @d(double %x) nounwind {
+; ZHINX32-LABEL: d:
+; ZHINX32:       # %bb.0: # %entry
+; ZHINX32-NEXT:    ret
+;
+; ZHINX64-LABEL: d:
+; ZHINX64:       # %bb.0: # %entry
+; ZHINX64-NEXT:    ret
+;
+; ZFINX32-LABEL: d:
+; ZFINX32:       # %bb.0: # %entry
+; ZFINX32-NEXT:    ret
+;
+; ZFINX64-LABEL: d:
+; ZFINX64:       # %bb.0: # %entry
+; ZFINX64-NEXT:    ret
+;
+; ZDINX32-LABEL: d:
+; ZDINX32:       # %bb.0: # %entry
+; ZDINX32-NEXT:    addi sp, sp, -16
+; ZDINX32-NEXT:    lw a0, 16(sp)
+; ZDINX32-NEXT:    lw a1, 20(sp)
+; ZDINX32-NEXT:    sw a0, 8(sp)
+; ZDINX32-NEXT:    sw a1, 12(sp)
+; ZDINX32-NEXT:    lw a0, 8(sp)
+; ZDINX32-NEXT:    lw a1, 12(sp)
+; ZDINX32-NEXT:    addi sp, sp, 16
+; ZDINX32-NEXT:    ret
+;
+; ZDINX64-LABEL: d:
+; ZDINX64:       # %bb.0: # %entry
+; ZDINX64-NEXT:    ret
+entry:
+  ret double %x
+}


        


More information about the llvm-commits mailing list