[llvm] e103515 - [RISCV][GISel] Support passing arguments through the stack. (#69289)

via llvm-commits llvm-commits at lists.llvm.org
Wed Oct 18 17:49:03 PDT 2023


Author: Craig Topper
Date: 2023-10-18T17:48:58-07:00
New Revision: e103515cedff16935f8f84e86cf78316cf33c220

URL: https://github.com/llvm/llvm-project/commit/e103515cedff16935f8f84e86cf78316cf33c220
DIFF: https://github.com/llvm/llvm-project/commit/e103515cedff16935f8f84e86cf78316cf33c220.diff

LOG: [RISCV][GISel] Support passing arguments through the stack. (#69289)

This is needed when we run out of registers.

Added: 
    llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32-ilp32f-ilp32d-common.ll
    llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-lp64-lp64f-lp64d-common.ll

Modified: 
    llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
index a362a709329d5df..215aa938e5dc484 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
@@ -17,6 +17,7 @@
 #include "RISCVSubtarget.h"
 #include "llvm/CodeGen/Analysis.h"
 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
 
 using namespace llvm;
 
@@ -56,19 +57,38 @@ struct RISCVOutgoingValueAssigner : public CallLowering::OutgoingValueAssigner {
 struct RISCVOutgoingValueHandler : public CallLowering::OutgoingValueHandler {
   RISCVOutgoingValueHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI,
                             MachineInstrBuilder MIB)
-      : OutgoingValueHandler(B, MRI), MIB(MIB) {}
-
-  MachineInstrBuilder MIB;
-
+      : OutgoingValueHandler(B, MRI), MIB(MIB),
+        Subtarget(MIRBuilder.getMF().getSubtarget<RISCVSubtarget>()) {}
   Register getStackAddress(uint64_t MemSize, int64_t Offset,
                            MachinePointerInfo &MPO,
                            ISD::ArgFlagsTy Flags) override {
-    llvm_unreachable("not implemented");
+    MachineFunction &MF = MIRBuilder.getMF();
+    LLT p0 = LLT::pointer(0, Subtarget.getXLen());
+    LLT sXLen = LLT::scalar(Subtarget.getXLen());
+
+    if (!SPReg)
+      SPReg = MIRBuilder.buildCopy(p0, Register(RISCV::X2)).getReg(0);
+
+    auto OffsetReg = MIRBuilder.buildConstant(sXLen, Offset);
+
+    auto AddrReg = MIRBuilder.buildPtrAdd(p0, SPReg, OffsetReg);
+
+    MPO = MachinePointerInfo::getStack(MF, Offset);
+    return AddrReg.getReg(0);
   }
 
   void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy,
                             MachinePointerInfo &MPO, CCValAssign &VA) override {
-    llvm_unreachable("not implemented");
+    MachineFunction &MF = MIRBuilder.getMF();
+    uint64_t LocMemOffset = VA.getLocMemOffset();
+
+    // TODO: Move StackAlignment to subtarget and share with FrameLowering.
+    auto MMO =
+        MF.getMachineMemOperand(MPO, MachineMemOperand::MOStore, MemTy,
+                                commonAlignment(Align(16), LocMemOffset));
+
+    Register ExtReg = extendRegister(ValVReg, VA);
+    MIRBuilder.buildStore(ExtReg, Addr, *MMO);
   }
 
   void assignValueToReg(Register ValVReg, Register PhysReg,
@@ -77,6 +97,14 @@ struct RISCVOutgoingValueHandler : public CallLowering::OutgoingValueHandler {
     MIRBuilder.buildCopy(PhysReg, ExtReg);
     MIB.addUse(PhysReg, RegState::Implicit);
   }
+
+private:
+  MachineInstrBuilder MIB;
+
+  // Cache the SP register vreg if we need it more than once in this call site.
+  Register SPReg;
+
+  const RISCVSubtarget &Subtarget;
 };
 
 struct RISCVIncomingValueAssigner : public CallLowering::IncomingValueAssigner {
@@ -112,17 +140,26 @@ struct RISCVIncomingValueAssigner : public CallLowering::IncomingValueAssigner {
 
 struct RISCVIncomingValueHandler : public CallLowering::IncomingValueHandler {
   RISCVIncomingValueHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI)
-      : IncomingValueHandler(B, MRI) {}
+      : IncomingValueHandler(B, MRI),
+        Subtarget(MIRBuilder.getMF().getSubtarget<RISCVSubtarget>()) {}
 
   Register getStackAddress(uint64_t MemSize, int64_t Offset,
                            MachinePointerInfo &MPO,
                            ISD::ArgFlagsTy Flags) override {
-    llvm_unreachable("not implemented");
+    MachineFrameInfo &MFI = MIRBuilder.getMF().getFrameInfo();
+
+    int FI = MFI.CreateFixedObject(MemSize, Offset, /*Immutable=*/true);
+    MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI);
+    return MIRBuilder.buildFrameIndex(LLT::pointer(0, Subtarget.getXLen()), FI)
+        .getReg(0);
   }
 
   void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy,
                             MachinePointerInfo &MPO, CCValAssign &VA) override {
-    llvm_unreachable("not implemented");
+    MachineFunction &MF = MIRBuilder.getMF();
+    auto MMO = MF.getMachineMemOperand(MPO, MachineMemOperand::MOLoad, MemTy,
+                                       inferAlignFromPtrInfo(MF, MPO));
+    MIRBuilder.buildLoad(ValVReg, Addr, *MMO);
   }
 
   void assignValueToReg(Register ValVReg, Register PhysReg,
@@ -131,6 +168,9 @@ struct RISCVIncomingValueHandler : public CallLowering::IncomingValueHandler {
     MIRBuilder.getMBB().addLiveIn(PhysReg);
     MIRBuilder.buildCopy(ValVReg, PhysReg);
   }
+
+private:
+  const RISCVSubtarget &Subtarget;
 };
 
 struct RISCVCallReturnHandler : public RISCVIncomingValueHandler {

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32-ilp32f-ilp32d-common.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32-ilp32f-ilp32d-common.ll
new file mode 100644
index 000000000000000..e6837b90e3bb161
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32-ilp32f-ilp32d-common.ll
@@ -0,0 +1,259 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 3
+; RUN: llc -mtriple=riscv32 \
+; RUN:    -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV32I %s
+; RUN: llc -mtriple=riscv32 -mattr=+f -target-abi ilp32f \
+; RUN:    -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV32I %s
+; RUN: llc -mtriple=riscv32 -mattr=+d -target-abi ilp32d \
+; RUN:    -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV32I %s
+
+; This file contains tests that should have identical output for the ilp32,
+; ilp32f, and ilp32d ABIs. i.e. where no arguments are passed according to
+; the floating point ABI.
+
+; Check that on RV32, i64 is passed in a pair of registers. Unlike
+; the convention for varargs, this need not be an aligned pair.
+
+define i32 @callee_i64_in_regs(i32 %a, i64 %b) nounwind {
+  ; RV32I-LABEL: name: callee_i64_in_regs
+  ; RV32I: bb.1 (%ir-block.0):
+  ; RV32I-NEXT:   liveins: $x10, $x11, $x12
+  ; RV32I-NEXT: {{  $}}
+  ; RV32I-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32I-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+  ; RV32I-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
+  ; RV32I-NEXT:   [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY1]](s32), [[COPY2]](s32)
+  ; RV32I-NEXT:   [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[MV]](s64)
+  ; RV32I-NEXT:   [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[TRUNC]]
+  ; RV32I-NEXT:   $x10 = COPY [[ADD]](s32)
+  ; RV32I-NEXT:   PseudoRET implicit $x10
+  %b_trunc = trunc i64 %b to i32
+  %1 = add i32 %a, %b_trunc
+  ret i32 %1
+}
+
+define i32 @caller_i64_in_regs() nounwind {
+  ; RV32I-LABEL: name: caller_i64_in_regs
+  ; RV32I: bb.1 (%ir-block.0):
+  ; RV32I-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+  ; RV32I-NEXT:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
+  ; RV32I-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C1]](s64)
+  ; RV32I-NEXT:   $x10 = COPY [[C]](s32)
+  ; RV32I-NEXT:   $x11 = COPY [[UV]](s32)
+  ; RV32I-NEXT:   $x12 = COPY [[UV1]](s32)
+  ; RV32I-NEXT:   PseudoCALL target-flags(riscv-call) @callee_i64_in_regs, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit-def $x10
+  ; RV32I-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32I-NEXT:   $x10 = COPY [[COPY]](s32)
+  ; RV32I-NEXT:   PseudoRET implicit $x10
+  %1 = call i32 @callee_i64_in_regs(i32 1, i64 2)
+  ret i32 %1
+}
+
+; Check that the stack is used once the GPRs are exhausted
+
+define i32 @callee_many_scalars(i8 %a, i16 %b, i32 %c, i64 %d, i32 %e, i32 %f, i64 %g, i32 %h) nounwind {
+  ; RV32I-LABEL: name: callee_many_scalars
+  ; RV32I: bb.1 (%ir-block.0):
+  ; RV32I-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+  ; RV32I-NEXT: {{  $}}
+  ; RV32I-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32I-NEXT:   [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
+  ; RV32I-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+  ; RV32I-NEXT:   [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
+  ; RV32I-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
+  ; RV32I-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
+  ; RV32I-NEXT:   [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
+  ; RV32I-NEXT:   [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY3]](s32), [[COPY4]](s32)
+  ; RV32I-NEXT:   [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
+  ; RV32I-NEXT:   [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
+  ; RV32I-NEXT:   [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
+  ; RV32I-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV32I-NEXT:   [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (load (s32) from %fixed-stack.1, align 16)
+  ; RV32I-NEXT:   [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY7]](s32), [[LOAD]](s32)
+  ; RV32I-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; RV32I-NEXT:   [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (load (s32) from %fixed-stack.0)
+  ; RV32I-NEXT:   [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[TRUNC]](s8)
+  ; RV32I-NEXT:   [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[TRUNC1]](s16)
+  ; RV32I-NEXT:   [[ADD:%[0-9]+]]:_(s32) = G_ADD [[ZEXT]], [[ZEXT1]]
+  ; RV32I-NEXT:   [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[COPY2]]
+  ; RV32I-NEXT:   [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[MV]](s64), [[MV1]]
+  ; RV32I-NEXT:   [[ZEXT2:%[0-9]+]]:_(s32) = G_ZEXT [[ICMP]](s1)
+  ; RV32I-NEXT:   [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ZEXT2]], [[ADD1]]
+  ; RV32I-NEXT:   [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[ADD2]], [[COPY5]]
+  ; RV32I-NEXT:   [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[ADD3]], [[COPY6]]
+  ; RV32I-NEXT:   [[ADD5:%[0-9]+]]:_(s32) = G_ADD [[ADD4]], [[LOAD1]]
+  ; RV32I-NEXT:   $x10 = COPY [[ADD5]](s32)
+  ; RV32I-NEXT:   PseudoRET implicit $x10
+  %a_ext = zext i8 %a to i32
+  %b_ext = zext i16 %b to i32
+  %1 = add i32 %a_ext, %b_ext
+  %2 = add i32 %1, %c
+  %3 = icmp eq i64 %d, %g
+  %4 = zext i1 %3 to i32
+  %5 = add i32 %4, %2
+  %6 = add i32 %5, %e
+  %7 = add i32 %6, %f
+  %8 = add i32 %7, %h
+  ret i32 %8
+}
+
+define i32 @caller_many_scalars() nounwind {
+  ; RV32I-LABEL: name: caller_many_scalars
+  ; RV32I: bb.1 (%ir-block.0):
+  ; RV32I-NEXT:   [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 1
+  ; RV32I-NEXT:   [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 2
+  ; RV32I-NEXT:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
+  ; RV32I-NEXT:   [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+  ; RV32I-NEXT:   [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
+  ; RV32I-NEXT:   [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 6
+  ; RV32I-NEXT:   [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 7
+  ; RV32I-NEXT:   [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+  ; RV32I-NEXT:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[C]](s8)
+  ; RV32I-NEXT:   [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[C1]](s16)
+  ; RV32I-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C3]](s64)
+  ; RV32I-NEXT:   [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C6]](s64)
+  ; RV32I-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x2
+  ; RV32I-NEXT:   [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+  ; RV32I-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C8]](s32)
+  ; RV32I-NEXT:   G_STORE [[UV3]](s32), [[PTR_ADD]](p0) :: (store (s32) into stack, align 16)
+  ; RV32I-NEXT:   [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+  ; RV32I-NEXT:   [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C9]](s32)
+  ; RV32I-NEXT:   G_STORE [[C7]](s32), [[PTR_ADD1]](p0) :: (store (s32) into stack + 4)
+  ; RV32I-NEXT:   $x10 = COPY [[ANYEXT]](s32)
+  ; RV32I-NEXT:   $x11 = COPY [[ANYEXT1]](s32)
+  ; RV32I-NEXT:   $x12 = COPY [[C2]](s32)
+  ; RV32I-NEXT:   $x13 = COPY [[UV]](s32)
+  ; RV32I-NEXT:   $x14 = COPY [[UV1]](s32)
+  ; RV32I-NEXT:   $x15 = COPY [[C4]](s32)
+  ; RV32I-NEXT:   $x16 = COPY [[C5]](s32)
+  ; RV32I-NEXT:   $x17 = COPY [[UV2]](s32)
+  ; RV32I-NEXT:   PseudoCALL target-flags(riscv-call) @callee_many_scalars, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit-def $x10
+  ; RV32I-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32I-NEXT:   $x10 = COPY [[COPY1]](s32)
+  ; RV32I-NEXT:   PseudoRET implicit $x10
+  %1 = call i32 @callee_many_scalars(i8 1, i16 2, i32 3, i64 4, i32 5, i32 6, i64 7, i32 8)
+  ret i32 %1
+}
+
+; Check return of 2x xlen scalars
+
+define i64 @callee_small_scalar_ret() nounwind {
+  ; RV32I-LABEL: name: callee_small_scalar_ret
+  ; RV32I: bb.1 (%ir-block.0):
+  ; RV32I-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1234567898765
+  ; RV32I-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C]](s64)
+  ; RV32I-NEXT:   $x10 = COPY [[UV]](s32)
+  ; RV32I-NEXT:   $x11 = COPY [[UV1]](s32)
+  ; RV32I-NEXT:   PseudoRET implicit $x10, implicit $x11
+  ret i64 1234567898765
+}
+
+define i32 @caller_small_scalar_ret() nounwind {
+  ; RV32I-LABEL: name: caller_small_scalar_ret
+  ; RV32I: bb.1 (%ir-block.0):
+  ; RV32I-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 987654321234567
+  ; RV32I-NEXT:   PseudoCALL target-flags(riscv-call) @callee_small_scalar_ret, implicit-def $x1, implicit-def $x10, implicit-def $x11
+  ; RV32I-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32I-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+  ; RV32I-NEXT:   [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
+  ; RV32I-NEXT:   [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[C]](s64), [[MV]]
+  ; RV32I-NEXT:   [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[ICMP]](s1)
+  ; RV32I-NEXT:   $x10 = COPY [[ZEXT]](s32)
+  ; RV32I-NEXT:   PseudoRET implicit $x10
+  %1 = call i64 @callee_small_scalar_ret()
+  %2 = icmp eq i64 987654321234567, %1
+  %3 = zext i1 %2 to i32
+  ret i32 %3
+}
+
+; Check return of 2x xlen structs
+
+%struct.small = type { i32, ptr }
+
+define %struct.small @callee_small_struct_ret() nounwind {
+  ; RV32I-LABEL: name: callee_small_struct_ret
+  ; RV32I: bb.1 (%ir-block.0):
+  ; RV32I-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+  ; RV32I-NEXT:   [[C1:%[0-9]+]]:_(p0) = G_CONSTANT i32 0
+  ; RV32I-NEXT:   $x10 = COPY [[C]](s32)
+  ; RV32I-NEXT:   $x11 = COPY [[C1]](p0)
+  ; RV32I-NEXT:   PseudoRET implicit $x10, implicit $x11
+  ret %struct.small { i32 1, ptr null }
+}
+
+define i32 @caller_small_struct_ret() nounwind {
+  ; RV32I-LABEL: name: caller_small_struct_ret
+  ; RV32I: bb.1 (%ir-block.0):
+  ; RV32I-NEXT:   PseudoCALL target-flags(riscv-call) @callee_small_struct_ret, implicit-def $x1, implicit-def $x10, implicit-def $x11
+  ; RV32I-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32I-NEXT:   [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+  ; RV32I-NEXT:   [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[COPY1]](p0)
+  ; RV32I-NEXT:   [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[PTRTOINT]]
+  ; RV32I-NEXT:   $x10 = COPY [[ADD]](s32)
+  ; RV32I-NEXT:   PseudoRET implicit $x10
+  %1 = call %struct.small @callee_small_struct_ret()
+  %2 = extractvalue %struct.small %1, 0
+  %3 = extractvalue %struct.small %1, 1
+  %4 = ptrtoint ptr %3 to i32
+  %5 = add i32 %2, %4
+  ret i32 %5
+}
+
+; Check return of >2x xlen structs
+
+%struct.large = type { i32, i32, i32, i32 }
+
+define void @callee_large_struct_ret(ptr noalias sret(%struct.large) %agg.result) nounwind {
+  ; RV32I-LABEL: name: callee_large_struct_ret
+  ; RV32I: bb.1 (%ir-block.0):
+  ; RV32I-NEXT:   liveins: $x10
+  ; RV32I-NEXT: {{  $}}
+  ; RV32I-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV32I-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+  ; RV32I-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+  ; RV32I-NEXT:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
+  ; RV32I-NEXT:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+  ; RV32I-NEXT:   G_STORE [[C]](s32), [[COPY]](p0) :: (store (s32) into %ir.agg.result)
+  ; RV32I-NEXT:   [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+  ; RV32I-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C4]](s32)
+  ; RV32I-NEXT:   G_STORE [[C1]](s32), [[PTR_ADD]](p0) :: (store (s32) into %ir.b)
+  ; RV32I-NEXT:   [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+  ; RV32I-NEXT:   [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C5]](s32)
+  ; RV32I-NEXT:   G_STORE [[C2]](s32), [[PTR_ADD1]](p0) :: (store (s32) into %ir.c)
+  ; RV32I-NEXT:   [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
+  ; RV32I-NEXT:   [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C6]](s32)
+  ; RV32I-NEXT:   G_STORE [[C3]](s32), [[PTR_ADD2]](p0) :: (store (s32) into %ir.d)
+  ; RV32I-NEXT:   PseudoRET
+  store i32 1, ptr %agg.result, align 4
+  %b = getelementptr inbounds %struct.large, ptr %agg.result, i32 0, i32 1
+  store i32 2, ptr %b, align 4
+  %c = getelementptr inbounds %struct.large, ptr %agg.result, i32 0, i32 2
+  store i32 3, ptr %c, align 4
+  %d = getelementptr inbounds %struct.large, ptr %agg.result, i32 0, i32 3
+  store i32 4, ptr %d, align 4
+  ret void
+}
+
+define i32 @caller_large_struct_ret() nounwind {
+  ; RV32I-LABEL: name: caller_large_struct_ret
+  ; RV32I: bb.1 (%ir-block.0):
+  ; RV32I-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0
+  ; RV32I-NEXT:   $x10 = COPY [[FRAME_INDEX]](p0)
+  ; RV32I-NEXT:   PseudoCALL target-flags(riscv-call) @callee_large_struct_ret, implicit-def $x1, implicit $x10
+  ; RV32I-NEXT:   [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (dereferenceable load (s32) from %ir.1)
+  ; RV32I-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
+  ; RV32I-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[FRAME_INDEX]], [[C]](s32)
+  ; RV32I-NEXT:   [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (dereferenceable load (s32) from %ir.3)
+  ; RV32I-NEXT:   [[ADD:%[0-9]+]]:_(s32) = G_ADD [[LOAD]], [[LOAD1]]
+  ; RV32I-NEXT:   $x10 = COPY [[ADD]](s32)
+  ; RV32I-NEXT:   PseudoRET implicit $x10
+  %1 = alloca %struct.large
+  call void @callee_large_struct_ret(ptr sret(%struct.large) %1)
+  %2 = load i32, ptr %1
+  %3 = getelementptr inbounds %struct.large, ptr %1, i32 0, i32 3
+  %4 = load i32, ptr %3
+  %5 = add i32 %2, %4
+  ret i32 %5
+}

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-lp64-lp64f-lp64d-common.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-lp64-lp64f-lp64d-common.ll
new file mode 100644
index 000000000000000..1b8ce7514bb09c8
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-lp64-lp64f-lp64d-common.ll
@@ -0,0 +1,299 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 3
+; RUN: llc -mtriple=riscv64 \
+; RUN:    -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV64I %s
+; RUN: llc -mtriple=riscv64 -mattr=+f -target-abi lp64f \
+; RUN:    -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV64I %s
+; RUN: llc -mtriple=riscv64 -mattr=+d -target-abi lp64d \
+; RUN:    -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV64I %s
+
+; This file contains tests that should have identical output for the lp64,
+; lp64f, and lp64d ABIs. i.e. where no arguments are passed according to
+; the floating point ABI.
+
+; Check that on RV64, i128 is passed in a pair of registers. Unlike
+; the convention for varargs, this need not be an aligned pair.
+
+define i64 @callee_i128_in_regs(i64 %a, i128 %b) nounwind {
+  ; RV64I-LABEL: name: callee_i128_in_regs
+  ; RV64I: bb.1 (%ir-block.0):
+  ; RV64I-NEXT:   liveins: $x10, $x11, $x12
+  ; RV64I-NEXT: {{  $}}
+  ; RV64I-NEXT:   [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64I-NEXT:   [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+  ; RV64I-NEXT:   [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
+  ; RV64I-NEXT:   [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY1]](s64), [[COPY2]](s64)
+  ; RV64I-NEXT:   [[TRUNC:%[0-9]+]]:_(s64) = G_TRUNC [[MV]](s128)
+  ; RV64I-NEXT:   [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[TRUNC]]
+  ; RV64I-NEXT:   $x10 = COPY [[ADD]](s64)
+  ; RV64I-NEXT:   PseudoRET implicit $x10
+  %b_trunc = trunc i128 %b to i64
+  %1 = add i64 %a, %b_trunc
+  ret i64 %1
+}
+
+define i64 @caller_i128_in_regs() nounwind {
+  ; RV64I-LABEL: name: caller_i128_in_regs
+  ; RV64I: bb.1 (%ir-block.0):
+  ; RV64I-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+  ; RV64I-NEXT:   [[C1:%[0-9]+]]:_(s128) = G_CONSTANT i128 2
+  ; RV64I-NEXT:   [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[C1]](s128)
+  ; RV64I-NEXT:   $x10 = COPY [[C]](s64)
+  ; RV64I-NEXT:   $x11 = COPY [[UV]](s64)
+  ; RV64I-NEXT:   $x12 = COPY [[UV1]](s64)
+  ; RV64I-NEXT:   PseudoCALL target-flags(riscv-call) @callee_i128_in_regs, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit-def $x10
+  ; RV64I-NEXT:   [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64I-NEXT:   $x10 = COPY [[COPY]](s64)
+  ; RV64I-NEXT:   PseudoRET implicit $x10
+  %1 = call i64 @callee_i128_in_regs(i64 1, i128 2)
+  ret i64 %1
+}
+
+; Check that the stack is used once the GPRs are exhausted
+
+define i32 @callee_many_scalars(i8 %a, i16 %b, i32 %c, i128 %d, i32 %e, i32 %f, i128 %g, i32 %h) nounwind {
+  ; RV64I-LABEL: name: callee_many_scalars
+  ; RV64I: bb.1 (%ir-block.0):
+  ; RV64I-NEXT:   liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
+  ; RV64I-NEXT: {{  $}}
+  ; RV64I-NEXT:   [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64I-NEXT:   [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s64)
+  ; RV64I-NEXT:   [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+  ; RV64I-NEXT:   [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s64)
+  ; RV64I-NEXT:   [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
+  ; RV64I-NEXT:   [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[COPY2]](s64)
+  ; RV64I-NEXT:   [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
+  ; RV64I-NEXT:   [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
+  ; RV64I-NEXT:   [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY3]](s64), [[COPY4]](s64)
+  ; RV64I-NEXT:   [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
+  ; RV64I-NEXT:   [[TRUNC3:%[0-9]+]]:_(s32) = G_TRUNC [[COPY5]](s64)
+  ; RV64I-NEXT:   [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
+  ; RV64I-NEXT:   [[TRUNC4:%[0-9]+]]:_(s32) = G_TRUNC [[COPY6]](s64)
+  ; RV64I-NEXT:   [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
+  ; RV64I-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; RV64I-NEXT:   [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (load (s64) from %fixed-stack.1, align 16)
+  ; RV64I-NEXT:   [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY7]](s64), [[LOAD]](s64)
+  ; RV64I-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; RV64I-NEXT:   [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX1]](p0) :: (load (s64) from %fixed-stack.0)
+  ; RV64I-NEXT:   [[TRUNC5:%[0-9]+]]:_(s32) = G_TRUNC [[LOAD1]](s64)
+  ; RV64I-NEXT:   [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[TRUNC]](s8)
+  ; RV64I-NEXT:   [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[TRUNC1]](s16)
+  ; RV64I-NEXT:   [[ADD:%[0-9]+]]:_(s32) = G_ADD [[ZEXT]], [[ZEXT1]]
+  ; RV64I-NEXT:   [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[TRUNC2]]
+  ; RV64I-NEXT:   [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[MV]](s128), [[MV1]]
+  ; RV64I-NEXT:   [[ZEXT2:%[0-9]+]]:_(s32) = G_ZEXT [[ICMP]](s1)
+  ; RV64I-NEXT:   [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ZEXT2]], [[ADD1]]
+  ; RV64I-NEXT:   [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[ADD2]], [[TRUNC3]]
+  ; RV64I-NEXT:   [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[ADD3]], [[TRUNC4]]
+  ; RV64I-NEXT:   [[ADD5:%[0-9]+]]:_(s32) = G_ADD [[ADD4]], [[TRUNC5]]
+  ; RV64I-NEXT:   [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[ADD5]](s32)
+  ; RV64I-NEXT:   $x10 = COPY [[ANYEXT]](s64)
+  ; RV64I-NEXT:   PseudoRET implicit $x10
+  %a_ext = zext i8 %a to i32
+  %b_ext = zext i16 %b to i32
+  %1 = add i32 %a_ext, %b_ext
+  %2 = add i32 %1, %c
+  %3 = icmp eq i128 %d, %g
+  %4 = zext i1 %3 to i32
+  %5 = add i32 %4, %2
+  %6 = add i32 %5, %e
+  %7 = add i32 %6, %f
+  %8 = add i32 %7, %h
+  ret i32 %8
+}
+
+define i32 @caller_many_scalars() nounwind {
+  ; RV64I-LABEL: name: caller_many_scalars
+  ; RV64I: bb.1 (%ir-block.0):
+  ; RV64I-NEXT:   [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 1
+  ; RV64I-NEXT:   [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 2
+  ; RV64I-NEXT:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
+  ; RV64I-NEXT:   [[C3:%[0-9]+]]:_(s128) = G_CONSTANT i128 4
+  ; RV64I-NEXT:   [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
+  ; RV64I-NEXT:   [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 6
+  ; RV64I-NEXT:   [[C6:%[0-9]+]]:_(s128) = G_CONSTANT i128 7
+  ; RV64I-NEXT:   [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+  ; RV64I-NEXT:   [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s8)
+  ; RV64I-NEXT:   [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s16)
+  ; RV64I-NEXT:   [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C2]](s32)
+  ; RV64I-NEXT:   [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[C3]](s128)
+  ; RV64I-NEXT:   [[ANYEXT3:%[0-9]+]]:_(s64) = G_ANYEXT [[C4]](s32)
+  ; RV64I-NEXT:   [[ANYEXT4:%[0-9]+]]:_(s64) = G_ANYEXT [[C5]](s32)
+  ; RV64I-NEXT:   [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[C6]](s128)
+  ; RV64I-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x2
+  ; RV64I-NEXT:   [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; RV64I-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C8]](s64)
+  ; RV64I-NEXT:   G_STORE [[UV3]](s64), [[PTR_ADD]](p0) :: (store (s64) into stack, align 16)
+  ; RV64I-NEXT:   [[ANYEXT5:%[0-9]+]]:_(s64) = G_ANYEXT [[C7]](s32)
+  ; RV64I-NEXT:   [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+  ; RV64I-NEXT:   [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C9]](s64)
+  ; RV64I-NEXT:   G_STORE [[ANYEXT5]](s64), [[PTR_ADD1]](p0) :: (store (s64) into stack + 8)
+  ; RV64I-NEXT:   $x10 = COPY [[ANYEXT]](s64)
+  ; RV64I-NEXT:   $x11 = COPY [[ANYEXT1]](s64)
+  ; RV64I-NEXT:   $x12 = COPY [[ANYEXT2]](s64)
+  ; RV64I-NEXT:   $x13 = COPY [[UV]](s64)
+  ; RV64I-NEXT:   $x14 = COPY [[UV1]](s64)
+  ; RV64I-NEXT:   $x15 = COPY [[ANYEXT3]](s64)
+  ; RV64I-NEXT:   $x16 = COPY [[ANYEXT4]](s64)
+  ; RV64I-NEXT:   $x17 = COPY [[UV2]](s64)
+  ; RV64I-NEXT:   PseudoCALL target-flags(riscv-call) @callee_many_scalars, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit-def $x10
+  ; RV64I-NEXT:   [[COPY1:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64I-NEXT:   [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
+  ; RV64I-NEXT:   [[ANYEXT6:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC]](s32)
+  ; RV64I-NEXT:   $x10 = COPY [[ANYEXT6]](s64)
+  ; RV64I-NEXT:   PseudoRET implicit $x10
+  %1 = call i32 @callee_many_scalars(i8 1, i16 2, i32 3, i128 4, i32 5, i32 6, i128 7, i32 8)
+  ret i32 %1
+}
+
+; Check return of 2x xlen scalars
+
+define i128 @callee_small_scalar_ret() nounwind {
+  ; RV64I-LABEL: name: callee_small_scalar_ret
+  ; RV64I: bb.1 (%ir-block.0):
+  ; RV64I-NEXT:   [[C:%[0-9]+]]:_(s128) = G_CONSTANT i128 -1
+  ; RV64I-NEXT:   [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[C]](s128)
+  ; RV64I-NEXT:   $x10 = COPY [[UV]](s64)
+  ; RV64I-NEXT:   $x11 = COPY [[UV1]](s64)
+  ; RV64I-NEXT:   PseudoRET implicit $x10, implicit $x11
+  ret i128 -1
+}
+
+define i64 @caller_small_scalar_ret() nounwind {
+  ; RV64I-LABEL: name: caller_small_scalar_ret
+  ; RV64I: bb.1 (%ir-block.0):
+  ; RV64I-NEXT:   [[C:%[0-9]+]]:_(s128) = G_CONSTANT i128 -2
+  ; RV64I-NEXT:   PseudoCALL target-flags(riscv-call) @callee_small_scalar_ret, implicit-def $x1, implicit-def $x10, implicit-def $x11
+  ; RV64I-NEXT:   [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64I-NEXT:   [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+  ; RV64I-NEXT:   [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY]](s64), [[COPY1]](s64)
+  ; RV64I-NEXT:   [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[C]](s128), [[MV]]
+  ; RV64I-NEXT:   [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[ICMP]](s1)
+  ; RV64I-NEXT:   $x10 = COPY [[ZEXT]](s64)
+  ; RV64I-NEXT:   PseudoRET implicit $x10
+  %1 = call i128 @callee_small_scalar_ret()
+  %2 = icmp eq i128 -2, %1
+  %3 = zext i1 %2 to i64
+  ret i64 %3
+}
+
+; Check return of 2x xlen structs
+
+%struct.small = type { i64, ptr }
+
+define %struct.small @callee_small_struct_ret() nounwind {
+  ; RV64I-LABEL: name: callee_small_struct_ret
+  ; RV64I: bb.1 (%ir-block.0):
+  ; RV64I-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+  ; RV64I-NEXT:   [[C1:%[0-9]+]]:_(p0) = G_CONSTANT i64 0
+  ; RV64I-NEXT:   $x10 = COPY [[C]](s64)
+  ; RV64I-NEXT:   $x11 = COPY [[C1]](p0)
+  ; RV64I-NEXT:   PseudoRET implicit $x10, implicit $x11
+  ret %struct.small { i64 1, ptr null }
+}
+
+define i64 @caller_small_struct_ret() nounwind {
+  ; RV64I-LABEL: name: caller_small_struct_ret
+  ; RV64I: bb.1 (%ir-block.0):
+  ; RV64I-NEXT:   PseudoCALL target-flags(riscv-call) @callee_small_struct_ret, implicit-def $x1, implicit-def $x10, implicit-def $x11
+  ; RV64I-NEXT:   [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64I-NEXT:   [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
+  ; RV64I-NEXT:   [[PTRTOINT:%[0-9]+]]:_(s64) = G_PTRTOINT [[COPY1]](p0)
+  ; RV64I-NEXT:   [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[PTRTOINT]]
+  ; RV64I-NEXT:   $x10 = COPY [[ADD]](s64)
+  ; RV64I-NEXT:   PseudoRET implicit $x10
+  %1 = call %struct.small @callee_small_struct_ret()
+  %2 = extractvalue %struct.small %1, 0
+  %3 = extractvalue %struct.small %1, 1
+  %4 = ptrtoint ptr %3 to i64
+  %5 = add i64 %2, %4
+  ret i64 %5
+}
+
+; Check return of >2x xlen scalars
+
+define i256 @callee_large_scalar_ret() nounwind {
+  ; RV64I-LABEL: name: callee_large_scalar_ret
+  ; RV64I: bb.1 (%ir-block.0):
+  ; RV64I-NEXT:   [[C:%[0-9]+]]:_(s256) = G_CONSTANT i256 -123456789
+  ; RV64I-NEXT:   [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64), [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[C]](s256)
+  ; RV64I-NEXT:   $x10 = COPY [[UV]](s64)
+  ; RV64I-NEXT:   $x10 = COPY [[UV1]](s64)
+  ; RV64I-NEXT:   $x10 = COPY [[UV2]](s64)
+  ; RV64I-NEXT:   $x10 = COPY [[UV3]](s64)
+  ; RV64I-NEXT:   PseudoRET implicit $x10, implicit $x10, implicit $x10, implicit $x10
+  ret i256 -123456789
+}
+
+define void @caller_large_scalar_ret() nounwind {
+  ; RV64I-LABEL: name: caller_large_scalar_ret
+  ; RV64I: bb.1 (%ir-block.0):
+  ; RV64I-NEXT:   PseudoCALL target-flags(riscv-call) @callee_large_scalar_ret, implicit-def $x1, implicit-def $x10, implicit-def $x10, implicit-def $x10, implicit-def $x10
+  ; RV64I-NEXT:   [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64I-NEXT:   [[COPY1:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64I-NEXT:   [[COPY2:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64I-NEXT:   [[COPY3:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64I-NEXT:   [[MV:%[0-9]+]]:_(s256) = G_MERGE_VALUES [[COPY]](s64), [[COPY1]](s64), [[COPY2]](s64), [[COPY3]](s64)
+  ; RV64I-NEXT:   PseudoRET
+  %1 = call i256 @callee_large_scalar_ret()
+  ret void
+}
+
+; Check return of >2x xlen structs
+
+%struct.large = type { i64, i64, i64, i64 }
+
+define void @callee_large_struct_ret(ptr noalias sret(%struct.large) %agg.result) nounwind {
+  ; RV64I-LABEL: name: callee_large_struct_ret
+  ; RV64I: bb.1 (%ir-block.0):
+  ; RV64I-NEXT:   liveins: $x10
+  ; RV64I-NEXT: {{  $}}
+  ; RV64I-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+  ; RV64I-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+  ; RV64I-NEXT:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
+  ; RV64I-NEXT:   [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 3
+  ; RV64I-NEXT:   [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+  ; RV64I-NEXT:   G_STORE [[C]](s64), [[COPY]](p0) :: (store (s64) into %ir.agg.result, align 4)
+  ; RV64I-NEXT:   [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+  ; RV64I-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C4]](s64)
+  ; RV64I-NEXT:   G_STORE [[C1]](s64), [[PTR_ADD]](p0) :: (store (s64) into %ir.b, align 4)
+  ; RV64I-NEXT:   [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+  ; RV64I-NEXT:   [[PTR_ADD1:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C5]](s64)
+  ; RV64I-NEXT:   G_STORE [[C2]](s64), [[PTR_ADD1]](p0) :: (store (s64) into %ir.c, align 4)
+  ; RV64I-NEXT:   [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
+  ; RV64I-NEXT:   [[PTR_ADD2:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[COPY]], [[C6]](s64)
+  ; RV64I-NEXT:   G_STORE [[C3]](s64), [[PTR_ADD2]](p0) :: (store (s64) into %ir.d, align 4)
+  ; RV64I-NEXT:   PseudoRET
+  store i64 1, ptr %agg.result, align 4
+  %b = getelementptr inbounds %struct.large, ptr %agg.result, i64 0, i32 1
+  store i64 2, ptr %b, align 4
+  %c = getelementptr inbounds %struct.large, ptr %agg.result, i64 0, i32 2
+  store i64 3, ptr %c, align 4
+  %d = getelementptr inbounds %struct.large, ptr %agg.result, i64 0, i32 3
+  store i64 4, ptr %d, align 4
+  ret void
+}
+
+define i64 @caller_large_struct_ret() nounwind {
+  ; RV64I-LABEL: name: caller_large_struct_ret
+  ; RV64I: bb.1 (%ir-block.0):
+  ; RV64I-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0
+  ; RV64I-NEXT:   $x10 = COPY [[FRAME_INDEX]](p0)
+  ; RV64I-NEXT:   PseudoCALL target-flags(riscv-call) @callee_large_struct_ret, implicit-def $x1, implicit $x10
+  ; RV64I-NEXT:   [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (dereferenceable load (s64) from %ir.1)
+  ; RV64I-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
+  ; RV64I-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
+  ; RV64I-NEXT:   [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (dereferenceable load (s64) from %ir.3)
+  ; RV64I-NEXT:   [[ADD:%[0-9]+]]:_(s64) = G_ADD [[LOAD]], [[LOAD1]]
+  ; RV64I-NEXT:   $x10 = COPY [[ADD]](s64)
+  ; RV64I-NEXT:   PseudoRET implicit $x10
+  %1 = alloca %struct.large
+  call void @callee_large_struct_ret(ptr sret(%struct.large) %1)
+  %2 = load i64, ptr %1
+  %3 = getelementptr inbounds %struct.large, ptr %1, i64 0, i32 3
+  %4 = load i64, ptr %3
+  %5 = add i64 %2, %4
+  ret i64 %5
+}


        


More information about the llvm-commits mailing list