[llvm] [RISCV][GISel] Add FP calling convention support using FPR and GPR registers. (PR #69138)

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Sun Oct 15 20:01:20 PDT 2023


https://github.com/topperc created https://github.com/llvm/llvm-project/pull/69138

This does not include passing values on the stack.
    
Test cases copied from SelectionDAG and converted to check MIR output. Test cases that require stack were removed.

This is stacked on #69118 and #69129

>From dcb4ed7959e4eaec279a23a09d8f2550439c8dd4 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Sun, 15 Oct 2023 11:15:17 -0700
Subject: [PATCH 1/5] [RISCV] Use f64 for LocVT on RV32 when whole f64 is
 passed on the stack. NFC

This removes the special case from unpackF64OnRV32DSoftABI. We can
use the default MemLoc handling.

This also allows us to remove a isRegLoc() check from LowerCall.

This part of preparation for supporting FP arguments with GISel.
---
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 14 +++-----------
 1 file changed, 3 insertions(+), 11 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index d7552317fd8bc69..fba9dbabf689219 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -16452,13 +16452,13 @@ bool RISCV::CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
     // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these
     // cases.
     Register Reg = State.AllocateReg(ArgGPRs);
-    LocVT = MVT::i32;
     if (!Reg) {
       unsigned StackOffset = State.AllocateStack(8, Align(8));
       State.addLoc(
           CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
       return false;
     }
+    LocVT = MVT::i32;
     if (!State.AllocateReg(ArgGPRs))
       State.AllocateStack(4, Align(4));
     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
@@ -16777,15 +16777,6 @@ static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain,
   MachineFrameInfo &MFI = MF.getFrameInfo();
   MachineRegisterInfo &RegInfo = MF.getRegInfo();
 
-  if (VA.isMemLoc()) {
-    // f64 is passed on the stack.
-    int FI =
-        MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*IsImmutable=*/true);
-    SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
-    return DAG.getLoad(MVT::f64, DL, Chain, FIN,
-                       MachinePointerInfo::getFixedStack(MF, FI));
-  }
-
   assert(VA.isRegLoc() && "Expected register VA assignment");
 
   Register LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
@@ -17300,7 +17291,8 @@ SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
     // Handle passing f64 on RV32D with a soft float ABI as a special case.
     bool IsF64OnRV32DSoftABI =
         VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64;
-    if (IsF64OnRV32DSoftABI && VA.isRegLoc()) {
+    if (IsF64OnRV32DSoftABI) {
+      assert(VA.isRegLoc() && "Expected register VA assignment");
       SDValue SplitF64 = DAG.getNode(
           RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue);
       SDValue Lo = SplitF64.getValue(0);

>From 562cccd068ab8651f56605280499d6d74e50ee94 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Sun, 15 Oct 2023 13:32:19 -0700
Subject: [PATCH 2/5] [RISCV] Use separate CCValAssign for both parts of f64
 with ilp32.

Mark any registers as CustomReg.

This allows us to more directly emit the register or memory
access for the high part. Previously we assume the offset was
0 and we needed a memory access if the low register was X17.

This is another part of supporting FP arguments with GISel.
---
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 94 +++++++++++++--------
 1 file changed, 57 insertions(+), 37 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index fba9dbabf689219..91abe256ebd7f48 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -16459,9 +16459,16 @@ bool RISCV::CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
       return false;
     }
     LocVT = MVT::i32;
-    if (!State.AllocateReg(ArgGPRs))
-      State.AllocateStack(4, Align(4));
-    State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
+    State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
+    Register HiReg = State.AllocateReg(ArgGPRs);
+    if (HiReg) {
+      State.addLoc(
+          CCValAssign::getCustomReg(ValNo, ValVT, HiReg, LocVT, LocInfo));
+    } else {
+      unsigned StackOffset = State.AllocateStack(4, Align(4));
+      State.addLoc(
+          CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
+    }
     return false;
   }
 
@@ -16770,7 +16777,9 @@ static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain,
 }
 
 static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain,
-                                       const CCValAssign &VA, const SDLoc &DL) {
+                                       const CCValAssign &VA,
+                                       const CCValAssign &HiVA,
+                                       const SDLoc &DL) {
   assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 &&
          "Unexpected VA");
   MachineFunction &MF = DAG.getMachineFunction();
@@ -16783,16 +16792,17 @@ static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain,
   RegInfo.addLiveIn(VA.getLocReg(), LoVReg);
   SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32);
   SDValue Hi;
-  if (VA.getLocReg() == RISCV::X17) {
+  if (HiVA.isMemLoc()) {
     // Second half of f64 is passed on the stack.
-    int FI = MFI.CreateFixedObject(4, 0, /*IsImmutable=*/true);
+    int FI = MFI.CreateFixedObject(4, HiVA.getLocMemOffset(),
+                                   /*IsImmutable=*/true);
     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
     Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN,
                      MachinePointerInfo::getFixedStack(MF, FI));
   } else {
     // Second half of f64 is passed in another GPR.
     Register HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
-    RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg);
+    RegInfo.addLiveIn(HiVA.getLocReg(), HiVReg);
     Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32);
   }
   return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
@@ -17035,15 +17045,16 @@ SDValue RISCVTargetLowering::LowerFormalArguments(
                      CallConv == CallingConv::Fast ? RISCV::CC_RISCV_FastCC
                                                    : RISCV::CC_RISCV);
 
-  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
+  for (unsigned i = 0, e = ArgLocs.size(), InsIdx = 0; i != e; ++i, ++InsIdx) {
     CCValAssign &VA = ArgLocs[i];
     SDValue ArgValue;
     // Passing f64 on RV32D with a soft float ABI must be handled as a special
     // case.
-    if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64)
-      ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL);
-    else if (VA.isRegLoc())
-      ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL, Ins[i], *this);
+    if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
+      assert(VA.needsCustom());
+      ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, ArgLocs[++i], DL);
+    } else if (VA.isRegLoc())
+      ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL, Ins[InsIdx], *this);
     else
       ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL);
 
@@ -17055,12 +17066,12 @@ SDValue RISCVTargetLowering::LowerFormalArguments(
       // stores are relative to that.
       InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
                                    MachinePointerInfo()));
-      unsigned ArgIndex = Ins[i].OrigArgIndex;
-      unsigned ArgPartOffset = Ins[i].PartOffset;
+      unsigned ArgIndex = Ins[InsIdx].OrigArgIndex;
+      unsigned ArgPartOffset = Ins[InsIdx].PartOffset;
       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
-      while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) {
+      while (i + 1 != e && Ins[InsIdx + 1].OrigArgIndex == ArgIndex) {
         CCValAssign &PartVA = ArgLocs[i + 1];
-        unsigned PartOffset = Ins[i + 1].PartOffset - ArgPartOffset;
+        unsigned PartOffset = Ins[InsIdx + 1].PartOffset - ArgPartOffset;
         SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
         if (PartVA.getValVT().isScalableVector())
           Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset);
@@ -17068,6 +17079,7 @@ SDValue RISCVTargetLowering::LowerFormalArguments(
         InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
                                      MachinePointerInfo()));
         ++i;
+        ++InsIdx;
       }
       continue;
     }
@@ -17283,16 +17295,18 @@ SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
   SmallVector<std::pair<Register, SDValue>, 8> RegsToPass;
   SmallVector<SDValue, 8> MemOpChains;
   SDValue StackPtr;
-  for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) {
+  for (unsigned i = 0, j = 0, e = ArgLocs.size(), OutIdx = 0; i != e;
+       ++i, ++OutIdx) {
     CCValAssign &VA = ArgLocs[i];
-    SDValue ArgValue = OutVals[i];
-    ISD::ArgFlagsTy Flags = Outs[i].Flags;
+    SDValue ArgValue = OutVals[OutIdx];
+    ISD::ArgFlagsTy Flags = Outs[OutIdx].Flags;
 
     // Handle passing f64 on RV32D with a soft float ABI as a special case.
     bool IsF64OnRV32DSoftABI =
         VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64;
     if (IsF64OnRV32DSoftABI) {
       assert(VA.isRegLoc() && "Expected register VA assignment");
+      assert(VA.needsCustom());
       SDValue SplitF64 = DAG.getNode(
           RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue);
       SDValue Lo = SplitF64.getValue(0);
@@ -17301,18 +17315,22 @@ SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
       Register RegLo = VA.getLocReg();
       RegsToPass.push_back(std::make_pair(RegLo, Lo));
 
-      if (RegLo == RISCV::X17) {
+      // Get the CCValAssign for the Hi part.
+      CCValAssign &HiVA = ArgLocs[++i];
+
+      if (HiVA.isMemLoc()) {
         // Second half of f64 is passed on the stack.
-        // Work out the address of the stack slot.
         if (!StackPtr.getNode())
           StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
+        SDValue Address =
+            DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
+                        DAG.getIntPtrConstant(HiVA.getLocMemOffset(), DL));
         // Emit the store.
         MemOpChains.push_back(
-            DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo()));
+            DAG.getStore(Chain, DL, Hi, Address, MachinePointerInfo()));
       } else {
         // Second half of f64 is passed in another GPR.
-        assert(RegLo < RISCV::X31 && "Invalid register pair");
-        Register RegHigh = RegLo + 1;
+        Register RegHigh = HiVA.getLocReg();
         RegsToPass.push_back(std::make_pair(RegHigh, Hi));
       }
       continue;
@@ -17326,7 +17344,7 @@ SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
     if (VA.getLocInfo() == CCValAssign::Indirect) {
       // Store the argument in a stack slot and pass its address.
       Align StackAlign =
-          std::max(getPrefTypeAlign(Outs[i].ArgVT, DAG),
+          std::max(getPrefTypeAlign(Outs[OutIdx].ArgVT, DAG),
                    getPrefTypeAlign(ArgValue.getValueType(), DAG));
       TypeSize StoredSize = ArgValue.getValueType().getStoreSize();
       // If the original argument was split (e.g. i128), we need
@@ -17334,16 +17352,16 @@ SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
       // Vectors may be partly split to registers and partly to the stack, in
       // which case the base address is partly offset and subsequent stores are
       // relative to that.
-      unsigned ArgIndex = Outs[i].OrigArgIndex;
-      unsigned ArgPartOffset = Outs[i].PartOffset;
+      unsigned ArgIndex = Outs[OutIdx].OrigArgIndex;
+      unsigned ArgPartOffset = Outs[OutIdx].PartOffset;
       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
       // Calculate the total size to store. We don't have access to what we're
       // actually storing other than performing the loop and collecting the
       // info.
       SmallVector<std::pair<SDValue, SDValue>> Parts;
-      while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) {
+      while (i + 1 != e && Outs[OutIdx + 1].OrigArgIndex == ArgIndex) {
         SDValue PartValue = OutVals[i + 1];
-        unsigned PartOffset = Outs[i + 1].PartOffset - ArgPartOffset;
+        unsigned PartOffset = Outs[OutIdx + 1].PartOffset - ArgPartOffset;
         SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
         EVT PartVT = PartValue.getValueType();
         if (PartVT.isScalableVector())
@@ -17352,6 +17370,7 @@ SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
         StackAlign = std::max(StackAlign, getPrefTypeAlign(PartVT, DAG));
         Parts.push_back(std::make_pair(PartValue, Offset));
         ++i;
+        ++OutIdx;
       }
       SDValue SpillSlot = DAG.CreateStackTemporary(StoredSize, StackAlign);
       int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
@@ -17493,7 +17512,8 @@ SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
   analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true, RISCV::CC_RISCV);
 
   // Copy all of the result registers out of their specified physreg.
-  for (auto &VA : RVLocs) {
+  for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
+    auto &VA = RVLocs[i];
     // Copy the value out
     SDValue RetValue =
         DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue);
@@ -17502,9 +17522,9 @@ SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
     Glue = RetValue.getValue(2);
 
     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
-      assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment");
-      SDValue RetValue2 =
-          DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue);
+      assert(VA.needsCustom());
+      SDValue RetValue2 = DAG.getCopyFromReg(Chain, DL, RVLocs[++i].getLocReg(),
+                                             MVT::i32, Glue);
       Chain = RetValue2.getValue(1);
       Glue = RetValue2.getValue(2);
       RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue,
@@ -17567,21 +17587,21 @@ RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
   SmallVector<SDValue, 4> RetOps(1, Chain);
 
   // Copy the result values into the output registers.
-  for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) {
-    SDValue Val = OutVals[i];
+  for (unsigned i = 0, e = RVLocs.size(), OutIdx = 0; i < e; ++i, ++OutIdx) {
+    SDValue Val = OutVals[OutIdx];
     CCValAssign &VA = RVLocs[i];
     assert(VA.isRegLoc() && "Can only return in registers!");
 
     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
       // Handle returning f64 on RV32D with a soft float ABI.
       assert(VA.isRegLoc() && "Expected return via registers");
+      assert(VA.needsCustom());
       SDValue SplitF64 = DAG.getNode(RISCVISD::SplitF64, DL,
                                      DAG.getVTList(MVT::i32, MVT::i32), Val);
       SDValue Lo = SplitF64.getValue(0);
       SDValue Hi = SplitF64.getValue(1);
       Register RegLo = VA.getLocReg();
-      assert(RegLo < RISCV::X31 && "Invalid register pair");
-      Register RegHi = RegLo + 1;
+      Register RegHi = RVLocs[++i].getLocReg();
 
       if (STI.isRegisterReservedByUser(RegLo) ||
           STI.isRegisterReservedByUser(RegHi))

>From 7eca0449a29d6650809f59bfc47c4890f76b8fff Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Sun, 15 Oct 2023 14:59:35 -0700
Subject: [PATCH 3/5] !fixup Use getCustomMem for the high memory location.

---
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 91abe256ebd7f48..50e48c86b2a5077 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -16467,7 +16467,7 @@ bool RISCV::CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
     } else {
       unsigned StackOffset = State.AllocateStack(4, Align(4));
       State.addLoc(
-          CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
+          CCValAssign::getCustomMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
     }
     return false;
   }

>From af4991622944a46d1133141808105041007b2706 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Sun, 15 Oct 2023 17:44:01 -0700
Subject: [PATCH 4/5] [RISCV][GISel] Minor refactoring of
 RISCVCallReturnHandler and RISCVIncomingValueHandler. NFC

Forward assignValueToReg to the base class to make the copy. Add
markPhysRegUsed to contain the differences between call handling
and argument handling. Introduce RISCVFormalArgHandler.

This structure matches how AArch64, AMDGPU, and X86 are structured.
---
 .../Target/RISCV/GISel/RISCVCallLowering.cpp  | 29 +++++++++++++------
 1 file changed, 20 insertions(+), 9 deletions(-)

diff --git a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
index a362a709329d5df..8e68ca4d474fdf6 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
@@ -127,9 +127,23 @@ struct RISCVIncomingValueHandler : public CallLowering::IncomingValueHandler {
 
   void assignValueToReg(Register ValVReg, Register PhysReg,
                         CCValAssign VA) override {
-    // Copy argument received in physical register to desired VReg.
+    markPhysRegUsed(PhysReg);
+    IncomingValueHandler::assignValueToReg(ValVReg, PhysReg, VA);
+  }
+
+  /// How the physical register gets marked varies between formal
+  /// parameters (it's a basic-block live-in), and a call instruction
+  /// (it's an implicit-def of the BL).
+  virtual void markPhysRegUsed(MCRegister PhysReg) = 0;
+};
+
+struct RISCVFormalArgHandler : public RISCVIncomingValueHandler {
+  RISCVFormalArgHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI)
+      : RISCVIncomingValueHandler(B, MRI) {}
+
+  void markPhysRegUsed(MCRegister PhysReg) override {
+    MIRBuilder.getMRI()->addLiveIn(PhysReg);
     MIRBuilder.getMBB().addLiveIn(PhysReg);
-    MIRBuilder.buildCopy(ValVReg, PhysReg);
   }
 };
 
@@ -138,14 +152,11 @@ struct RISCVCallReturnHandler : public RISCVIncomingValueHandler {
                          MachineInstrBuilder &MIB)
       : RISCVIncomingValueHandler(B, MRI), MIB(MIB) {}
 
-  MachineInstrBuilder MIB;
-
-  void assignValueToReg(Register ValVReg, Register PhysReg,
-                        CCValAssign VA) override {
-    // Copy argument received in physical register to desired VReg.
+  void markPhysRegUsed(MCRegister PhysReg) override {
     MIB.addDef(PhysReg, RegState::Implicit);
-    MIRBuilder.buildCopy(ValVReg, PhysReg);
   }
+
+  MachineInstrBuilder MIB;
 };
 
 } // namespace
@@ -239,7 +250,7 @@ bool RISCVCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
   RISCVIncomingValueAssigner Assigner(
       CC == CallingConv::Fast ? RISCV::CC_RISCV_FastCC : RISCV::CC_RISCV,
       /*IsRet=*/false);
-  RISCVIncomingValueHandler Handler(MIRBuilder, MF.getRegInfo());
+  RISCVFormalArgHandler Handler(MIRBuilder, MF.getRegInfo());
 
   return determineAndHandleAssignments(Handler, Assigner, SplitArgInfos,
                                        MIRBuilder, CC, F.isVarArg());

>From 755b5e244409774d31a8d89e8152c93f98ba1790 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Sun, 15 Oct 2023 18:07:00 -0700
Subject: [PATCH 5/5] [RISCV][GISel] Add FP calling convention support using
 FPR and GPR registers.

This does not include passing values on the stack.

Test cases copied from SelectionDAG and converted to check MIR output.
Test cases that require stack were removed.
---
 .../Target/RISCV/GISel/RISCVCallLowering.cpp  | 114 +++++++++++++---
 .../calling-conv-ilp32-ilp32f-common.ll       |  75 +++++++++++
 .../irtranslator/calling-conv-ilp32.ll        |  60 +++++++++
 .../irtranslator/calling-conv-ilp32d.ll       | 122 ++++++++++++++++++
 .../calling-conv-ilp32f-ilp32d-common.ll      | 119 +++++++++++++++++
 .../calling-conv-lp64-lp64f-common.ll         |  62 +++++++++
 .../irtranslator/calling-conv-lp64.ll         |  67 ++++++++++
 7 files changed, 599 insertions(+), 20 deletions(-)
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32-ilp32f-common.ll
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32.ll
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32d.ll
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32f-ilp32d-common.ll
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-lp64-lp64f-common.ll
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-lp64.ll

diff --git a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
index 8e68ca4d474fdf6..b0d692795730df5 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
@@ -77,6 +77,44 @@ struct RISCVOutgoingValueHandler : public CallLowering::OutgoingValueHandler {
     MIRBuilder.buildCopy(PhysReg, ExtReg);
     MIB.addUse(PhysReg, RegState::Implicit);
   }
+
+  unsigned assignCustomValue(CallLowering::ArgInfo &Arg,
+                             ArrayRef<CCValAssign> VAs,
+                             std::function<void()> *Thunk) override {
+    assert(Arg.Regs.size() == 1 && "Can't handle multple regs yet");
+
+    CCValAssign VA = VAs[0];
+    assert(VA.needsCustom() && "Value doesn't need custom handling");
+
+    // Custom lowering for other types, such as f16, is currently not supported
+    if (VA.getValVT() != MVT::f64)
+      return 0;
+
+    CCValAssign NextVA = VAs[1];
+    assert(NextVA.needsCustom() && "Value doesn't need custom handling");
+    assert(NextVA.getValVT() == MVT::f64 && "Unsupported type");
+
+    assert(VA.getValNo() == NextVA.getValNo() &&
+           "Values belong to different arguments");
+
+    assert(VA.isRegLoc() && "Value should be in reg");
+    assert(NextVA.isRegLoc() && "Value should be in reg");
+
+    Register NewRegs[] = {MRI.createGenericVirtualRegister(LLT::scalar(32)),
+                          MRI.createGenericVirtualRegister(LLT::scalar(32))};
+    MIRBuilder.buildUnmerge(NewRegs, Arg.Regs[0]);
+
+    if (Thunk) {
+      *Thunk = [=]() {
+        assignValueToReg(NewRegs[0], VA.getLocReg(), VA);
+        assignValueToReg(NewRegs[1], NextVA.getLocReg(), NextVA);
+      };
+      return 1;
+    }
+    assignValueToReg(NewRegs[0], VA.getLocReg(), VA);
+    assignValueToReg(NewRegs[1], NextVA.getLocReg(), NextVA);
+    return 1;
+  }
 };
 
 struct RISCVIncomingValueAssigner : public CallLowering::IncomingValueAssigner {
@@ -131,6 +169,29 @@ struct RISCVIncomingValueHandler : public CallLowering::IncomingValueHandler {
     IncomingValueHandler::assignValueToReg(ValVReg, PhysReg, VA);
   }
 
+  unsigned assignCustomValue(CallLowering::ArgInfo &Arg,
+                             ArrayRef<CCValAssign> VAs,
+                             std::function<void()> *Thunk = nullptr) override {
+    const CCValAssign &VALo = VAs[0];
+    const CCValAssign &VAHi = VAs[1];
+
+    assert(VAHi.needsCustom() && "Value doesn't need custom handling");
+
+    assert(VALo.getLocVT() == MVT::i32 && VAHi.getLocVT() == MVT::i32 &&
+           VALo.getValVT() == MVT::f64 && VAHi.getValVT() == MVT::f64 &&
+           "unexpected custom value");
+
+    Register NewRegs[] = {MRI.createGenericVirtualRegister(LLT::scalar(32)),
+                          MRI.createGenericVirtualRegister(LLT::scalar(32))};
+
+    assignValueToReg(NewRegs[0], VALo.getLocReg(), VALo);
+    assignValueToReg(NewRegs[1], VAHi.getLocReg(), VAHi);
+
+    MIRBuilder.buildMergeLikeInstr(Arg.Regs[0], NewRegs);
+
+    return 1;
+  }
+
   /// How the physical register gets marked varies between formal
   /// parameters (it's a basic-block live-in), and a call instruction
   /// (it's an implicit-def of the BL).
@@ -145,6 +206,10 @@ struct RISCVFormalArgHandler : public RISCVIncomingValueHandler {
     MIRBuilder.getMRI()->addLiveIn(PhysReg);
     MIRBuilder.getMBB().addLiveIn(PhysReg);
   }
+
+  virtual void markPhysRegUsed(unsigned PhysReg) {
+    MIRBuilder.getMBB().addLiveIn(PhysReg);
+  }
 };
 
 struct RISCVCallReturnHandler : public RISCVIncomingValueHandler {
@@ -164,6 +229,28 @@ struct RISCVCallReturnHandler : public RISCVIncomingValueHandler {
 RISCVCallLowering::RISCVCallLowering(const RISCVTargetLowering &TLI)
     : CallLowering(&TLI) {}
 
+static bool isSupportedArgumentType(Type *T) {
+  if (T->isIntegerTy())
+    return true;
+  if (T->isPointerTy())
+    return true;
+  if (T->isFloatingPointTy())
+    return true;
+  return false;
+}
+
+static bool isSupportedReturnType(Type *T) {
+  if (T->isIntegerTy())
+    return true;
+  if (T->isPointerTy())
+    return true;
+  if (T->isFloatingPointTy())
+    return true;
+  if (T->isAggregateType())
+    return true;
+  return false;
+}
+
 bool RISCVCallLowering::lowerReturnVal(MachineIRBuilder &MIRBuilder,
                                        const Value *Val,
                                        ArrayRef<Register> VRegs,
@@ -171,8 +258,7 @@ bool RISCVCallLowering::lowerReturnVal(MachineIRBuilder &MIRBuilder,
   if (!Val)
     return true;
 
-  // TODO: Only integer, pointer and aggregate types are supported now.
-  if (!Val->getType()->isIntOrPtrTy() && !Val->getType()->isAggregateType())
+  if (!isSupportedReturnType(Val->getType()))
     return false;
 
   MachineFunction &MF = MIRBuilder.getMF();
@@ -221,11 +307,8 @@ bool RISCVCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
 
   // TODO: Support all argument types.
   for (auto &Arg : F.args()) {
-    if (Arg.getType()->isIntegerTy())
-      continue;
-    if (Arg.getType()->isPointerTy())
-      continue;
-    return false;
+    if (!isSupportedArgumentType(Arg.getType()))
+      return false;
   }
 
   MachineFunction &MF = MIRBuilder.getMF();
@@ -264,14 +347,9 @@ bool RISCVCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
   CallingConv::ID CC = F.getCallingConv();
 
   // TODO: Support all argument types.
-  for (auto &AInfo : Info.OrigArgs) {
-    if (AInfo.Ty->isIntegerTy())
-      continue;
-    if (AInfo.Ty->isPointerTy())
-      continue;
-    if (AInfo.Ty->isFloatingPointTy())
-      continue;
-    return false;
+  for (auto &Arg : Info.OrigArgs) {
+    if (!isSupportedArgumentType(Arg.Ty))
+      return false;
   }
 
   SmallVector<ArgInfo, 32> SplitArgInfos;
@@ -305,13 +383,9 @@ bool RISCVCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
 
   MIRBuilder.insertInstr(Call);
 
-  if (Info.OrigRet.Ty->isVoidTy())
+  if (Info.OrigRet.Ty->isVoidTy() || !isSupportedReturnType(Info.OrigRet.Ty))
     return true;
 
-  // TODO: Only integer, pointer and aggregate types are supported now.
-  if (!Info.OrigRet.Ty->isIntOrPtrTy() && !Info.OrigRet.Ty->isAggregateType())
-    return false;
-
   SmallVector<ArgInfo, 4> SplitRetInfos;
   splitToValueTypes(Info.OrigRet, SplitRetInfos, DL, CC);
 
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32-ilp32f-common.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32-ilp32f-common.ll
new file mode 100644
index 000000000000000..8fc77feae2d431d
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32-ilp32f-common.ll
@@ -0,0 +1,75 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 3
+; RUN: llc -mtriple=riscv32 -global-isel -stop-after=irtranslator \
+; RUN:    -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV32I %s
+; RUN: llc -mtriple=riscv32 -mattr=+f -target-abi ilp32f \
+; RUN:    -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV32I %s
+
+; This file contains tests that should have identical output for the ilp32,
+; and ilp32f.
+
+; Check that on RV32 ilp32[f], double is passed in a pair of registers. Unlike
+; the convention for varargs, this need not be an aligned pair.
+
+define i32 @callee_double_in_regs(i32 %a, double %b) nounwind {
+  ; RV32I-LABEL: name: callee_double_in_regs
+  ; RV32I: bb.1 (%ir-block.0):
+  ; RV32I-NEXT:   liveins: $x10, $x11, $x12
+  ; RV32I-NEXT: {{  $}}
+  ; RV32I-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32I-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+  ; RV32I-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
+  ; RV32I-NEXT:   [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY1]](s32), [[COPY2]](s32)
+  ; RV32I-NEXT:   [[FPTOSI:%[0-9]+]]:_(s32) = G_FPTOSI [[MV]](s64)
+  ; RV32I-NEXT:   [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[FPTOSI]]
+  ; RV32I-NEXT:   $x10 = COPY [[ADD]](s32)
+  ; RV32I-NEXT:   PseudoRET implicit $x10
+  %b_fptosi = fptosi double %b to i32
+  %1 = add i32 %a, %b_fptosi
+  ret i32 %1
+}
+
+define i32 @caller_double_in_regs() nounwind {
+  ; RV32I-LABEL: name: caller_double_in_regs
+  ; RV32I: bb.1 (%ir-block.0):
+  ; RV32I-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+  ; RV32I-NEXT:   [[C1:%[0-9]+]]:_(s64) = G_FCONSTANT double 2.000000e+00
+  ; RV32I-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C1]](s64)
+  ; RV32I-NEXT:   $x10 = COPY [[C]](s32)
+  ; RV32I-NEXT:   $x11 = COPY [[UV]](s32)
+  ; RV32I-NEXT:   $x12 = COPY [[UV1]](s32)
+  ; RV32I-NEXT:   PseudoCALL target-flags(riscv-call) @callee_double_in_regs, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit-def $x10
+  ; RV32I-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32I-NEXT:   $x10 = COPY [[COPY]](s32)
+  ; RV32I-NEXT:   PseudoRET implicit $x10
+  %1 = call i32 @callee_double_in_regs(i32 1, double 2.0)
+  ret i32 %1
+}
+
+define double @callee_small_scalar_ret() nounwind {
+  ; RV32I-LABEL: name: callee_small_scalar_ret
+  ; RV32I: bb.1 (%ir-block.0):
+  ; RV32I-NEXT:   [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
+  ; RV32I-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C]](s64)
+  ; RV32I-NEXT:   $x10 = COPY [[UV]](s32)
+  ; RV32I-NEXT:   $x11 = COPY [[UV1]](s32)
+  ; RV32I-NEXT:   PseudoRET implicit $x10, implicit $x11
+  ret double 1.0
+}
+
+define i64 @caller_small_scalar_ret() nounwind {
+  ; RV32I-LABEL: name: caller_small_scalar_ret
+  ; RV32I: bb.1 (%ir-block.0):
+  ; RV32I-NEXT:   PseudoCALL target-flags(riscv-call) @callee_small_scalar_ret, implicit-def $x1, implicit-def $x10, implicit-def $x11
+  ; RV32I-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32I-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+  ; RV32I-NEXT:   [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
+  ; RV32I-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[MV]](s64)
+  ; RV32I-NEXT:   $x10 = COPY [[UV]](s32)
+  ; RV32I-NEXT:   $x11 = COPY [[UV1]](s32)
+  ; RV32I-NEXT:   PseudoRET implicit $x10, implicit $x11
+  %1 = call double @callee_small_scalar_ret()
+  %2 = bitcast double %1 to i64
+  ret i64 %2
+}
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32.ll
new file mode 100644
index 000000000000000..14eb6af1c4920dd
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32.ll
@@ -0,0 +1,60 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 3
+; RUN: llc -mtriple=riscv32 -global-isel -stop-after=irtranslator \
+; RUN:   -verify-machineinstrs < %s | FileCheck -check-prefix=RV32I %s
+
+; Any tests that would have identical output for some combination of the ilp32*
+; ABIs belong in calling-conv-*-common.ll. This file contains tests that will
+; have different output across those ABIs. i.e. where some arguments would be
+; passed according to the floating point ABI.
+
+define i32 @callee_float_in_regs(i32 %a, float %b) nounwind {
+  ; RV32I-LABEL: name: callee_float_in_regs
+  ; RV32I: bb.1 (%ir-block.0):
+  ; RV32I-NEXT:   liveins: $x10, $x11
+  ; RV32I-NEXT: {{  $}}
+  ; RV32I-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32I-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+  ; RV32I-NEXT:   [[FPTOSI:%[0-9]+]]:_(s32) = G_FPTOSI [[COPY1]](s32)
+  ; RV32I-NEXT:   [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[FPTOSI]]
+  ; RV32I-NEXT:   $x10 = COPY [[ADD]](s32)
+  ; RV32I-NEXT:   PseudoRET implicit $x10
+  %b_fptosi = fptosi float %b to i32
+  %1 = add i32 %a, %b_fptosi
+  ret i32 %1
+}
+
+define i32 @caller_float_in_regs() nounwind {
+  ; RV32I-LABEL: name: caller_float_in_regs
+  ; RV32I: bb.1 (%ir-block.0):
+  ; RV32I-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+  ; RV32I-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 2.000000e+00
+  ; RV32I-NEXT:   $x10 = COPY [[C]](s32)
+  ; RV32I-NEXT:   $x11 = COPY [[C1]](s32)
+  ; RV32I-NEXT:   PseudoCALL target-flags(riscv-call) @callee_float_in_regs, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+  ; RV32I-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32I-NEXT:   $x10 = COPY [[COPY]](s32)
+  ; RV32I-NEXT:   PseudoRET implicit $x10
+  %1 = call i32 @callee_float_in_regs(i32 1, float 2.0)
+  ret i32 %1
+}
+
+define float @callee_tiny_scalar_ret() nounwind {
+  ; RV32I-LABEL: name: callee_tiny_scalar_ret
+  ; RV32I: bb.1 (%ir-block.0):
+  ; RV32I-NEXT:   [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
+  ; RV32I-NEXT:   $x10 = COPY [[C]](s32)
+  ; RV32I-NEXT:   PseudoRET implicit $x10
+  ret float 1.0
+}
+
+define i32 @caller_tiny_scalar_ret() nounwind {
+  ; RV32I-LABEL: name: caller_tiny_scalar_ret
+  ; RV32I: bb.1 (%ir-block.0):
+  ; RV32I-NEXT:   PseudoCALL target-flags(riscv-call) @callee_tiny_scalar_ret, implicit-def $x1, implicit-def $x10
+  ; RV32I-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32I-NEXT:   $x10 = COPY [[COPY]](s32)
+  ; RV32I-NEXT:   PseudoRET implicit $x10
+  %1 = call float @callee_tiny_scalar_ret()
+  %2 = bitcast float %1 to i32
+  ret i32 %2
+}
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32d.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32d.ll
new file mode 100644
index 000000000000000..0e68699e3dc44a4
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32d.ll
@@ -0,0 +1,122 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 3
+; RUN: llc -mtriple=riscv32 -mattr=+d -target-abi ilp32d \
+; RUN:    -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV32-ILP32D %s
+
+; This file contains tests that will have differing output for the ilp32/ilp32f
+; and ilp32d ABIs.
+
+define i32 @callee_double_in_fpr(i32 %a, double %b) nounwind {
+  ; RV32-ILP32D-LABEL: name: callee_double_in_fpr
+  ; RV32-ILP32D: bb.1 (%ir-block.0):
+  ; RV32-ILP32D-NEXT:   liveins: $x10, $f10_d
+  ; RV32-ILP32D-NEXT: {{  $}}
+  ; RV32-ILP32D-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-ILP32D-NEXT:   [[COPY1:%[0-9]+]]:_(s64) = COPY $f10_d
+  ; RV32-ILP32D-NEXT:   [[FPTOSI:%[0-9]+]]:_(s32) = G_FPTOSI [[COPY1]](s64)
+  ; RV32-ILP32D-NEXT:   [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[FPTOSI]]
+  ; RV32-ILP32D-NEXT:   $x10 = COPY [[ADD]](s32)
+  ; RV32-ILP32D-NEXT:   PseudoRET implicit $x10
+  %b_fptosi = fptosi double %b to i32
+  %1 = add i32 %a, %b_fptosi
+  ret i32 %1
+}
+
+define i32 @caller_double_in_fpr() nounwind {
+  ; RV32-ILP32D-LABEL: name: caller_double_in_fpr
+  ; RV32-ILP32D: bb.1 (%ir-block.0):
+  ; RV32-ILP32D-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+  ; RV32-ILP32D-NEXT:   [[C1:%[0-9]+]]:_(s64) = G_FCONSTANT double 2.000000e+00
+  ; RV32-ILP32D-NEXT:   $x10 = COPY [[C]](s32)
+  ; RV32-ILP32D-NEXT:   $f10_d = COPY [[C1]](s64)
+  ; RV32-ILP32D-NEXT:   PseudoCALL target-flags(riscv-call) @callee_double_in_fpr, implicit-def $x1, implicit $x10, implicit $f10_d, implicit-def $x10
+  ; RV32-ILP32D-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-ILP32D-NEXT:   $x10 = COPY [[COPY]](s32)
+  ; RV32-ILP32D-NEXT:   PseudoRET implicit $x10
+  %1 = call i32 @callee_double_in_fpr(i32 1, double 2.0)
+  ret i32 %1
+}
+
+; Must keep define on a single line due to an update_llc_test_checks.py limitation
+define i32 @callee_double_in_gpr_exhausted_fprs(double %a, double %b, double %c, double %d, double %e, double %f, double %g, double %h, double %i) nounwind {
+  ; RV32-ILP32D-LABEL: name: callee_double_in_gpr_exhausted_fprs
+  ; RV32-ILP32D: bb.1 (%ir-block.0):
+  ; RV32-ILP32D-NEXT:   liveins: $x10, $x11, $f10_d, $f11_d, $f12_d, $f13_d, $f14_d, $f15_d, $f16_d, $f17_d
+  ; RV32-ILP32D-NEXT: {{  $}}
+  ; RV32-ILP32D-NEXT:   [[COPY:%[0-9]+]]:_(s64) = COPY $f10_d
+  ; RV32-ILP32D-NEXT:   [[COPY1:%[0-9]+]]:_(s64) = COPY $f11_d
+  ; RV32-ILP32D-NEXT:   [[COPY2:%[0-9]+]]:_(s64) = COPY $f12_d
+  ; RV32-ILP32D-NEXT:   [[COPY3:%[0-9]+]]:_(s64) = COPY $f13_d
+  ; RV32-ILP32D-NEXT:   [[COPY4:%[0-9]+]]:_(s64) = COPY $f14_d
+  ; RV32-ILP32D-NEXT:   [[COPY5:%[0-9]+]]:_(s64) = COPY $f15_d
+  ; RV32-ILP32D-NEXT:   [[COPY6:%[0-9]+]]:_(s64) = COPY $f16_d
+  ; RV32-ILP32D-NEXT:   [[COPY7:%[0-9]+]]:_(s64) = COPY $f17_d
+  ; RV32-ILP32D-NEXT:   [[COPY8:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-ILP32D-NEXT:   [[COPY9:%[0-9]+]]:_(s32) = COPY $x11
+  ; RV32-ILP32D-NEXT:   [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY8]](s32), [[COPY9]](s32)
+  ; RV32-ILP32D-NEXT:   [[FPTOSI:%[0-9]+]]:_(s32) = G_FPTOSI [[COPY7]](s64)
+  ; RV32-ILP32D-NEXT:   [[FPTOSI1:%[0-9]+]]:_(s32) = G_FPTOSI [[MV]](s64)
+  ; RV32-ILP32D-NEXT:   [[ADD:%[0-9]+]]:_(s32) = G_ADD [[FPTOSI]], [[FPTOSI1]]
+  ; RV32-ILP32D-NEXT:   $x10 = COPY [[ADD]](s32)
+  ; RV32-ILP32D-NEXT:   PseudoRET implicit $x10
+  %h_fptosi = fptosi double %h to i32
+  %i_fptosi = fptosi double %i to i32
+  %1 = add i32 %h_fptosi, %i_fptosi
+  ret i32 %1
+}
+
+define i32 @caller_double_in_gpr_exhausted_fprs() nounwind {
+  ; RV32-ILP32D-LABEL: name: caller_double_in_gpr_exhausted_fprs
+  ; RV32-ILP32D: bb.1 (%ir-block.0):
+  ; RV32-ILP32D-NEXT:   [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
+  ; RV32-ILP32D-NEXT:   [[C1:%[0-9]+]]:_(s64) = G_FCONSTANT double 2.000000e+00
+  ; RV32-ILP32D-NEXT:   [[C2:%[0-9]+]]:_(s64) = G_FCONSTANT double 3.000000e+00
+  ; RV32-ILP32D-NEXT:   [[C3:%[0-9]+]]:_(s64) = G_FCONSTANT double 4.000000e+00
+  ; RV32-ILP32D-NEXT:   [[C4:%[0-9]+]]:_(s64) = G_FCONSTANT double 5.000000e+00
+  ; RV32-ILP32D-NEXT:   [[C5:%[0-9]+]]:_(s64) = G_FCONSTANT double 6.000000e+00
+  ; RV32-ILP32D-NEXT:   [[C6:%[0-9]+]]:_(s64) = G_FCONSTANT double 7.000000e+00
+  ; RV32-ILP32D-NEXT:   [[C7:%[0-9]+]]:_(s64) = G_FCONSTANT double 8.000000e+00
+  ; RV32-ILP32D-NEXT:   [[C8:%[0-9]+]]:_(s64) = G_FCONSTANT double 9.000000e+00
+  ; RV32-ILP32D-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C8]](s64)
+  ; RV32-ILP32D-NEXT:   $f10_d = COPY [[C]](s64)
+  ; RV32-ILP32D-NEXT:   $f11_d = COPY [[C1]](s64)
+  ; RV32-ILP32D-NEXT:   $f12_d = COPY [[C2]](s64)
+  ; RV32-ILP32D-NEXT:   $f13_d = COPY [[C3]](s64)
+  ; RV32-ILP32D-NEXT:   $f14_d = COPY [[C4]](s64)
+  ; RV32-ILP32D-NEXT:   $f15_d = COPY [[C5]](s64)
+  ; RV32-ILP32D-NEXT:   $f16_d = COPY [[C6]](s64)
+  ; RV32-ILP32D-NEXT:   $f17_d = COPY [[C7]](s64)
+  ; RV32-ILP32D-NEXT:   $x10 = COPY [[UV]](s32)
+  ; RV32-ILP32D-NEXT:   $x11 = COPY [[UV1]](s32)
+  ; RV32-ILP32D-NEXT:   PseudoCALL target-flags(riscv-call) @callee_double_in_gpr_exhausted_fprs, implicit-def $x1, implicit $f10_d, implicit $f11_d, implicit $f12_d, implicit $f13_d, implicit $f14_d, implicit $f15_d, implicit $f16_d, implicit $f17_d, implicit $x10, implicit $x11, implicit-def $x10
+  ; RV32-ILP32D-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-ILP32D-NEXT:   $x10 = COPY [[COPY]](s32)
+  ; RV32-ILP32D-NEXT:   PseudoRET implicit $x10
+  %1 = call i32 @callee_double_in_gpr_exhausted_fprs(
+      double 1.0, double 2.0, double 3.0, double 4.0, double 5.0, double 6.0,
+      double 7.0, double 8.0, double 9.0)
+  ret i32 %1
+}
+
+define double @callee_double_ret() nounwind {
+  ; RV32-ILP32D-LABEL: name: callee_double_ret
+  ; RV32-ILP32D: bb.1 (%ir-block.0):
+  ; RV32-ILP32D-NEXT:   [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
+  ; RV32-ILP32D-NEXT:   $f10_d = COPY [[C]](s64)
+  ; RV32-ILP32D-NEXT:   PseudoRET implicit $f10_d
+  ret double 1.0
+}
+
+define i32 @caller_double_ret() nounwind {
+  ; RV32-ILP32D-LABEL: name: caller_double_ret
+  ; RV32-ILP32D: bb.1 (%ir-block.0):
+  ; RV32-ILP32D-NEXT:   PseudoCALL target-flags(riscv-call) @callee_double_ret, implicit-def $x1, implicit-def $f10_d
+  ; RV32-ILP32D-NEXT:   [[COPY:%[0-9]+]]:_(s64) = COPY $f10_d
+  ; RV32-ILP32D-NEXT:   [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+  ; RV32-ILP32D-NEXT:   $x10 = COPY [[TRUNC]](s32)
+  ; RV32-ILP32D-NEXT:   PseudoRET implicit $x10
+  %1 = call double @callee_double_ret()
+  %2 = bitcast double %1 to i64
+  %3 = trunc i64 %2 to i32
+  ret i32 %3
+}
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32f-ilp32d-common.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32f-ilp32d-common.ll
new file mode 100644
index 000000000000000..23a20c0356fd4a1
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32f-ilp32d-common.ll
@@ -0,0 +1,119 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 3
+; RUN: llc -mtriple=riscv32 -mattr=+f -target-abi ilp32f \
+; RUN:    -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV32-ILP32FD %s
+; RUN: llc -mtriple=riscv32 -mattr=+d -target-abi ilp32d \
+; RUN:    -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV32-ILP32FD %s
+
+; This file contains tests that should have identical output for the ilp32f
+; and ilp32d ABIs.
+
+define i32 @callee_float_in_fpr(i32 %a, float %b) nounwind {
+  ; RV32-ILP32FD-LABEL: name: callee_float_in_fpr
+  ; RV32-ILP32FD: bb.1 (%ir-block.0):
+  ; RV32-ILP32FD-NEXT:   liveins: $x10, $f10_f
+  ; RV32-ILP32FD-NEXT: {{  $}}
+  ; RV32-ILP32FD-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-ILP32FD-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $f10_f
+  ; RV32-ILP32FD-NEXT:   [[FPTOSI:%[0-9]+]]:_(s32) = G_FPTOSI [[COPY1]](s32)
+  ; RV32-ILP32FD-NEXT:   [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[FPTOSI]]
+  ; RV32-ILP32FD-NEXT:   $x10 = COPY [[ADD]](s32)
+  ; RV32-ILP32FD-NEXT:   PseudoRET implicit $x10
+  %b_fptosi = fptosi float %b to i32
+  %1 = add i32 %a, %b_fptosi
+  ret i32 %1
+}
+
+define i32 @caller_float_in_fpr() nounwind {
+  ; RV32-ILP32FD-LABEL: name: caller_float_in_fpr
+  ; RV32-ILP32FD: bb.1 (%ir-block.0):
+  ; RV32-ILP32FD-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+  ; RV32-ILP32FD-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 2.000000e+00
+  ; RV32-ILP32FD-NEXT:   $x10 = COPY [[C]](s32)
+  ; RV32-ILP32FD-NEXT:   $f10_f = COPY [[C1]](s32)
+  ; RV32-ILP32FD-NEXT:   PseudoCALL target-flags(riscv-call) @callee_float_in_fpr, implicit-def $x1, implicit $x10, implicit $f10_f, implicit-def $x10
+  ; RV32-ILP32FD-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-ILP32FD-NEXT:   $x10 = COPY [[COPY]](s32)
+  ; RV32-ILP32FD-NEXT:   PseudoRET implicit $x10
+  %1 = call i32 @callee_float_in_fpr(i32 1, float 2.0)
+  ret i32 %1
+}
+
+; Must keep define on a single line due to an update_llc_test_checks.py limitation
+define i32 @callee_float_in_gpr_exhausted_fprs(float %a, float %b, float %c, float %d, float %e, float %f, float %g, float %h, float %i) nounwind {
+  ; RV32-ILP32FD-LABEL: name: callee_float_in_gpr_exhausted_fprs
+  ; RV32-ILP32FD: bb.1 (%ir-block.0):
+  ; RV32-ILP32FD-NEXT:   liveins: $x10, $f10_f, $f11_f, $f12_f, $f13_f, $f14_f, $f15_f, $f16_f, $f17_f
+  ; RV32-ILP32FD-NEXT: {{  $}}
+  ; RV32-ILP32FD-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $f10_f
+  ; RV32-ILP32FD-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $f11_f
+  ; RV32-ILP32FD-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $f12_f
+  ; RV32-ILP32FD-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $f13_f
+  ; RV32-ILP32FD-NEXT:   [[COPY4:%[0-9]+]]:_(s32) = COPY $f14_f
+  ; RV32-ILP32FD-NEXT:   [[COPY5:%[0-9]+]]:_(s32) = COPY $f15_f
+  ; RV32-ILP32FD-NEXT:   [[COPY6:%[0-9]+]]:_(s32) = COPY $f16_f
+  ; RV32-ILP32FD-NEXT:   [[COPY7:%[0-9]+]]:_(s32) = COPY $f17_f
+  ; RV32-ILP32FD-NEXT:   [[COPY8:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-ILP32FD-NEXT:   [[FPTOSI:%[0-9]+]]:_(s32) = G_FPTOSI [[COPY7]](s32)
+  ; RV32-ILP32FD-NEXT:   [[FPTOSI1:%[0-9]+]]:_(s32) = G_FPTOSI [[COPY8]](s32)
+  ; RV32-ILP32FD-NEXT:   [[ADD:%[0-9]+]]:_(s32) = G_ADD [[FPTOSI]], [[FPTOSI1]]
+  ; RV32-ILP32FD-NEXT:   $x10 = COPY [[ADD]](s32)
+  ; RV32-ILP32FD-NEXT:   PseudoRET implicit $x10
+  %h_fptosi = fptosi float %h to i32
+  %i_fptosi = fptosi float %i to i32
+  %1 = add i32 %h_fptosi, %i_fptosi
+  ret i32 %1
+}
+
+define i32 @caller_float_in_gpr_exhausted_fprs() nounwind {
+  ; RV32-ILP32FD-LABEL: name: caller_float_in_gpr_exhausted_fprs
+  ; RV32-ILP32FD: bb.1 (%ir-block.0):
+  ; RV32-ILP32FD-NEXT:   [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
+  ; RV32-ILP32FD-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 2.000000e+00
+  ; RV32-ILP32FD-NEXT:   [[C2:%[0-9]+]]:_(s32) = G_FCONSTANT float 3.000000e+00
+  ; RV32-ILP32FD-NEXT:   [[C3:%[0-9]+]]:_(s32) = G_FCONSTANT float 4.000000e+00
+  ; RV32-ILP32FD-NEXT:   [[C4:%[0-9]+]]:_(s32) = G_FCONSTANT float 5.000000e+00
+  ; RV32-ILP32FD-NEXT:   [[C5:%[0-9]+]]:_(s32) = G_FCONSTANT float 6.000000e+00
+  ; RV32-ILP32FD-NEXT:   [[C6:%[0-9]+]]:_(s32) = G_FCONSTANT float 7.000000e+00
+  ; RV32-ILP32FD-NEXT:   [[C7:%[0-9]+]]:_(s32) = G_FCONSTANT float 8.000000e+00
+  ; RV32-ILP32FD-NEXT:   [[C8:%[0-9]+]]:_(s32) = G_FCONSTANT float 9.000000e+00
+  ; RV32-ILP32FD-NEXT:   $f10_f = COPY [[C]](s32)
+  ; RV32-ILP32FD-NEXT:   $f11_f = COPY [[C1]](s32)
+  ; RV32-ILP32FD-NEXT:   $f12_f = COPY [[C2]](s32)
+  ; RV32-ILP32FD-NEXT:   $f13_f = COPY [[C3]](s32)
+  ; RV32-ILP32FD-NEXT:   $f14_f = COPY [[C4]](s32)
+  ; RV32-ILP32FD-NEXT:   $f15_f = COPY [[C5]](s32)
+  ; RV32-ILP32FD-NEXT:   $f16_f = COPY [[C6]](s32)
+  ; RV32-ILP32FD-NEXT:   $f17_f = COPY [[C7]](s32)
+  ; RV32-ILP32FD-NEXT:   $x10 = COPY [[C8]](s32)
+  ; RV32-ILP32FD-NEXT:   PseudoCALL target-flags(riscv-call) @callee_float_in_gpr_exhausted_fprs, implicit-def $x1, implicit $f10_f, implicit $f11_f, implicit $f12_f, implicit $f13_f, implicit $f14_f, implicit $f15_f, implicit $f16_f, implicit $f17_f, implicit $x10, implicit-def $x10
+  ; RV32-ILP32FD-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-ILP32FD-NEXT:   $x10 = COPY [[COPY]](s32)
+  ; RV32-ILP32FD-NEXT:   PseudoRET implicit $x10
+  %1 = call i32 @callee_float_in_gpr_exhausted_fprs(
+      float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0,
+      float 7.0, float 8.0, float 9.0)
+  ret i32 %1
+}
+
+define float @callee_float_ret() nounwind {
+  ; RV32-ILP32FD-LABEL: name: callee_float_ret
+  ; RV32-ILP32FD: bb.1 (%ir-block.0):
+  ; RV32-ILP32FD-NEXT:   [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
+  ; RV32-ILP32FD-NEXT:   $f10_f = COPY [[C]](s32)
+  ; RV32-ILP32FD-NEXT:   PseudoRET implicit $f10_f
+  ret float 1.0
+}
+
+define i32 @caller_float_ret() nounwind {
+  ; RV32-ILP32FD-LABEL: name: caller_float_ret
+  ; RV32-ILP32FD: bb.1 (%ir-block.0):
+  ; RV32-ILP32FD-NEXT:   PseudoCALL target-flags(riscv-call) @callee_float_ret, implicit-def $x1, implicit-def $f10_f
+  ; RV32-ILP32FD-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $f10_f
+  ; RV32-ILP32FD-NEXT:   $x10 = COPY [[COPY]](s32)
+  ; RV32-ILP32FD-NEXT:   PseudoRET implicit $x10
+  %1 = call float @callee_float_ret()
+  %2 = bitcast float %1 to i32
+  ret i32 %2
+}
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-lp64-lp64f-common.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-lp64-lp64f-common.ll
new file mode 100644
index 000000000000000..4d6275db77bc895
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-lp64-lp64f-common.ll
@@ -0,0 +1,62 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 3
+; RUN: llc -mtriple=riscv64 \
+; RUN:     -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV64I %s
+; RUN: llc -mtriple=riscv64 -mattr=+f -target-abi lp64f \
+; RUN:    -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV64I %s
+
+; This file contains tests that should have identical output for the lp64 and
+; lp64f ABIs.
+
+define i64 @callee_double_in_regs(i64 %a, double %b) nounwind {
+  ; RV64I-LABEL: name: callee_double_in_regs
+  ; RV64I: bb.1 (%ir-block.0):
+  ; RV64I-NEXT:   liveins: $x10, $x11
+  ; RV64I-NEXT: {{  $}}
+  ; RV64I-NEXT:   [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64I-NEXT:   [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+  ; RV64I-NEXT:   [[FPTOSI:%[0-9]+]]:_(s64) = G_FPTOSI [[COPY1]](s64)
+  ; RV64I-NEXT:   [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[FPTOSI]]
+  ; RV64I-NEXT:   $x10 = COPY [[ADD]](s64)
+  ; RV64I-NEXT:   PseudoRET implicit $x10
+  %b_fptosi = fptosi double %b to i64
+  %1 = add i64 %a, %b_fptosi
+  ret i64 %1
+}
+
+define i64 @caller_double_in_regs() nounwind {
+  ; RV64I-LABEL: name: caller_double_in_regs
+  ; RV64I: bb.1 (%ir-block.0):
+  ; RV64I-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+  ; RV64I-NEXT:   [[C1:%[0-9]+]]:_(s64) = G_FCONSTANT double 2.000000e+00
+  ; RV64I-NEXT:   $x10 = COPY [[C]](s64)
+  ; RV64I-NEXT:   $x11 = COPY [[C1]](s64)
+  ; RV64I-NEXT:   PseudoCALL target-flags(riscv-call) @callee_double_in_regs, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+  ; RV64I-NEXT:   [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64I-NEXT:   $x10 = COPY [[COPY]](s64)
+  ; RV64I-NEXT:   PseudoRET implicit $x10
+  %1 = call i64 @callee_double_in_regs(i64 1, double 2.0)
+  ret i64 %1
+}
+
+define double @callee_double_ret() nounwind {
+  ; RV64I-LABEL: name: callee_double_ret
+  ; RV64I: bb.1 (%ir-block.0):
+  ; RV64I-NEXT:   [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
+  ; RV64I-NEXT:   $x10 = COPY [[C]](s64)
+  ; RV64I-NEXT:   PseudoRET implicit $x10
+  ret double 1.0
+}
+
+define i64 @caller_double_ret() nounwind {
+  ; RV64I-LABEL: name: caller_double_ret
+  ; RV64I: bb.1 (%ir-block.0):
+  ; RV64I-NEXT:   PseudoCALL target-flags(riscv-call) @callee_double_ret, implicit-def $x1, implicit-def $x10
+  ; RV64I-NEXT:   [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64I-NEXT:   $x10 = COPY [[COPY]](s64)
+  ; RV64I-NEXT:   PseudoRET implicit $x10
+  %1 = call double @callee_double_ret()
+  %2 = bitcast double %1 to i64
+  ret i64 %2
+}
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-lp64.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-lp64.ll
new file mode 100644
index 000000000000000..cd1544cebce8aed
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-lp64.ll
@@ -0,0 +1,67 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 3
+; RUN: llc -mtriple=riscv64 -global-isel -stop-after=irtranslator \
+; RUN:    -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV64I %s
+
+; Any tests that would have identical output for some combination of the lp64*
+; ABIs belong in calling-conv-*-common.ll. This file contains tests that will
+; have different output across those ABIs. i.e. where some arguments would be
+; passed according to the floating point ABI.
+
+define i64 @callee_float_in_regs(i64 %a, float %b) nounwind {
+  ; RV64I-LABEL: name: callee_float_in_regs
+  ; RV64I: bb.1 (%ir-block.0):
+  ; RV64I-NEXT:   liveins: $x10, $x11
+  ; RV64I-NEXT: {{  $}}
+  ; RV64I-NEXT:   [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64I-NEXT:   [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+  ; RV64I-NEXT:   [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
+  ; RV64I-NEXT:   [[FPTOSI:%[0-9]+]]:_(s64) = G_FPTOSI [[TRUNC]](s32)
+  ; RV64I-NEXT:   [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[FPTOSI]]
+  ; RV64I-NEXT:   $x10 = COPY [[ADD]](s64)
+  ; RV64I-NEXT:   PseudoRET implicit $x10
+  %b_fptosi = fptosi float %b to i64
+  %1 = add i64 %a, %b_fptosi
+  ret i64 %1
+}
+
+define i64 @caller_float_in_regs() nounwind {
+  ; RV64I-LABEL: name: caller_float_in_regs
+  ; RV64I: bb.1 (%ir-block.0):
+  ; RV64I-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+  ; RV64I-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 2.000000e+00
+  ; RV64I-NEXT:   [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+  ; RV64I-NEXT:   $x10 = COPY [[C]](s64)
+  ; RV64I-NEXT:   $x11 = COPY [[ANYEXT]](s64)
+  ; RV64I-NEXT:   PseudoCALL target-flags(riscv-call) @callee_float_in_regs, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+  ; RV64I-NEXT:   [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64I-NEXT:   $x10 = COPY [[COPY]](s64)
+  ; RV64I-NEXT:   PseudoRET implicit $x10
+  %1 = call i64 @callee_float_in_regs(i64 1, float 2.0)
+  ret i64 %1
+}
+
+define float @callee_tiny_scalar_ret() nounwind {
+  ; RV64I-LABEL: name: callee_tiny_scalar_ret
+  ; RV64I: bb.1 (%ir-block.0):
+  ; RV64I-NEXT:   [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
+  ; RV64I-NEXT:   [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+  ; RV64I-NEXT:   $x10 = COPY [[ANYEXT]](s64)
+  ; RV64I-NEXT:   PseudoRET implicit $x10
+  ret float 1.0
+}
+
+define i64 @caller_tiny_scalar_ret() nounwind {
+  ; RV64I-LABEL: name: caller_tiny_scalar_ret
+  ; RV64I: bb.1 (%ir-block.0):
+  ; RV64I-NEXT:   PseudoCALL target-flags(riscv-call) @callee_tiny_scalar_ret, implicit-def $x1, implicit-def $x10
+  ; RV64I-NEXT:   [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64I-NEXT:   [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+  ; RV64I-NEXT:   [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[TRUNC]](s32)
+  ; RV64I-NEXT:   $x10 = COPY [[SEXT]](s64)
+  ; RV64I-NEXT:   PseudoRET implicit $x10
+  %1 = call float @callee_tiny_scalar_ret()
+  %2 = bitcast float %1 to i32
+  %3 = sext i32 %2 to i64
+  ret i64 %3
+}



More information about the llvm-commits mailing list