[llvm] 859b785 - [RISCV] Restructure CC_RISCV_FastCC to reduce code duplication. NFC (#107671)

via llvm-commits llvm-commits at lists.llvm.org
Thu Sep 12 08:29:21 PDT 2024


Author: Craig Topper
Date: 2024-09-12T08:29:18-07:00
New Revision: 859b785bb6a1ee20ffca58102d877fc9a4a639e4

URL: https://github.com/llvm/llvm-project/commit/859b785bb6a1ee20ffca58102d877fc9a4a639e4
DIFF: https://github.com/llvm/llvm-project/commit/859b785bb6a1ee20ffca58102d877fc9a4a639e4.diff

LOG: [RISCV] Restructure CC_RISCV_FastCC to reduce code duplication. NFC (#107671)

Move GPR handling closer to the end so we can share it with the indirect
handling for vector. Use a single block for storing any type to the
stack.

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVCallingConv.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVCallingConv.cpp b/llvm/lib/Target/RISCV/RISCVCallingConv.cpp
index deba85946be53a..fc276d1063281c 100644
--- a/llvm/lib/Target/RISCV/RISCVCallingConv.cpp
+++ b/llvm/lib/Target/RISCV/RISCVCallingConv.cpp
@@ -521,13 +521,6 @@ bool llvm::CC_RISCV_FastCC(unsigned ValNo, MVT ValVT, MVT LocVT,
   const RISCVTargetLowering &TLI = *Subtarget.getTargetLowering();
   RISCVABI::ABI ABI = Subtarget.getTargetABI();
 
-  if (LocVT == MVT::i32 || LocVT == MVT::i64) {
-    if (MCRegister Reg = State.AllocateReg(getFastCCArgGPRs(ABI))) {
-      State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
-      return false;
-    }
-  }
-
   if ((LocVT == MVT::f16 && Subtarget.hasStdExtZfhmin()) ||
       (LocVT == MVT::bf16 && Subtarget.hasStdExtZfbfmin())) {
     static const MCPhysReg FPR16List[] = {
@@ -565,6 +558,8 @@ bool llvm::CC_RISCV_FastCC(unsigned ValNo, MVT ValVT, MVT LocVT,
     }
   }
 
+  MVT XLenVT = Subtarget.getXLenVT();
+
   // Check if there is an available GPR before hitting the stack.
   if ((LocVT == MVT::f16 && Subtarget.hasStdExtZhinxmin()) ||
       (LocVT == MVT::f32 && Subtarget.hasStdExtZfinx()) ||
@@ -572,7 +567,7 @@ bool llvm::CC_RISCV_FastCC(unsigned ValNo, MVT ValVT, MVT LocVT,
        Subtarget.hasStdExtZdinx())) {
     if (MCRegister Reg = State.AllocateReg(getFastCCArgGPRs(ABI))) {
       if (LocVT.getSizeInBits() != Subtarget.getXLen()) {
-        LocVT = Subtarget.getXLenVT();
+        LocVT = XLenVT;
         State.addLoc(
             CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
         return false;
@@ -582,58 +577,39 @@ bool llvm::CC_RISCV_FastCC(unsigned ValNo, MVT ValVT, MVT LocVT,
     }
   }
 
-  if (LocVT == MVT::f16 || LocVT == MVT::bf16) {
-    int64_t Offset2 = State.AllocateStack(2, Align(2));
-    State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset2, LocVT, LocInfo));
-    return false;
-  }
-
-  if (LocVT == MVT::i32 || LocVT == MVT::f32) {
-    int64_t Offset4 = State.AllocateStack(4, Align(4));
-    State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo));
-    return false;
-  }
-
-  if (LocVT == MVT::i64 || LocVT == MVT::f64) {
-    int64_t Offset5 = State.AllocateStack(8, Align(8));
-    State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo));
-    return false;
-  }
+  ArrayRef<MCPhysReg> ArgGPRs = getFastCCArgGPRs(ABI);
 
   if (LocVT.isVector()) {
     if (MCRegister Reg = allocateRVVReg(ValVT, ValNo, State, TLI)) {
       // Fixed-length vectors are located in the corresponding scalable-vector
       // container types.
-      if (ValVT.isFixedLengthVector())
+      if (LocVT.isFixedLengthVector())
         LocVT = TLI.getContainerForFixedLengthVector(LocVT);
       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
       return false;
     }
 
-    // Try and pass the address via a "fast" GPR.
-    if (MCRegister GPRReg = State.AllocateReg(getFastCCArgGPRs(ABI))) {
+    // Pass scalable vectors indirectly. Pass fixed vectors indirectly if we
+    // have a free GPR.
+    if (LocVT.isScalableVector() ||
+        State.getFirstUnallocated(ArgGPRs) != ArgGPRs.size()) {
       LocInfo = CCValAssign::Indirect;
-      LocVT = Subtarget.getXLenVT();
-      State.addLoc(CCValAssign::getReg(ValNo, ValVT, GPRReg, LocVT, LocInfo));
-      return false;
+      LocVT = XLenVT;
     }
+  }
 
-    // Pass scalable vectors indirectly by storing the pointer on the stack.
-    if (ValVT.isScalableVector()) {
-      LocInfo = CCValAssign::Indirect;
-      LocVT = Subtarget.getXLenVT();
-      unsigned XLen = Subtarget.getXLen();
-      int64_t StackOffset = State.AllocateStack(XLen / 8, Align(XLen / 8));
-      State.addLoc(
-          CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
+  if (LocVT == XLenVT) {
+    if (MCRegister Reg = State.AllocateReg(getFastCCArgGPRs(ABI))) {
+      State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
       return false;
     }
+  }
 
-    // Pass fixed-length vectors on the stack.
-    auto StackAlign = MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
-    int64_t StackOffset = State.AllocateStack(ValVT.getStoreSize(), StackAlign);
-    State.addLoc(
-        CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
+  if (LocVT == XLenVT || LocVT == MVT::f16 || LocVT == MVT::bf16 ||
+      LocVT == MVT::f32 || LocVT == MVT::f64 || LocVT.isFixedLengthVector()) {
+    Align StackAlign = MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
+    int64_t Offset = State.AllocateStack(LocVT.getStoreSize(), StackAlign);
+    State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
     return false;
   }
 


        


More information about the llvm-commits mailing list