[llvm] 9c6bee6 - [RISCV][GlobalISel] Add lowerCall for calling convention
Nitin John Raj via llvm-commits
llvm-commits at lists.llvm.org
Thu Jun 1 15:59:00 PDT 2023
Author: Nitin John Raj
Date: 2023-06-01T15:54:25-07:00
New Revision: 9c6bee653ca0b777389f5b6ba2e623e94da41ce4
URL: https://github.com/llvm/llvm-project/commit/9c6bee653ca0b777389f5b6ba2e623e94da41ce4
DIFF: https://github.com/llvm/llvm-project/commit/9c6bee653ca0b777389f5b6ba2e623e94da41ce4.diff
LOG: [RISCV][GlobalISel] Add lowerCall for calling convention
This patch implements minimal support for lowering function calls to callees with arguments and/or return values according to the RISC-V calling convention. Integer, pointer and aggregate types are supported.
Feedback is very much appreciated.
Reviewed By: arsenm
Differential Revision: https://reviews.llvm.org/D75023
Added:
llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calls.ll
Modified:
llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
Removed:
################################################################################
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
index cc6cf64a9a2db..f8389376b2020 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
@@ -133,6 +133,21 @@ struct RISCVIncomingValueHandler : public CallLowering::IncomingValueHandler {
}
};
+struct RISCVCallReturnHandler : public RISCVIncomingValueHandler {
+ RISCVCallReturnHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI,
+ MachineInstrBuilder &MIB)
+ : RISCVIncomingValueHandler(B, MRI), MIB(MIB) {}
+
+ MachineInstrBuilder MIB;
+
+ void assignValueToReg(Register ValVReg, Register PhysReg,
+ CCValAssign VA) override {
+ // Copy argument received in physical register to desired VReg.
+ MIB.addDef(PhysReg, RegState::Implicit);
+ MIRBuilder.buildCopy(ValVReg, PhysReg);
+ }
+};
+
} // namespace
RISCVCallLowering::RISCVCallLowering(const RISCVTargetLowering &TLI)
@@ -232,5 +247,86 @@ bool RISCVCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
bool RISCVCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
CallLoweringInfo &Info) const {
- return false;
+ MachineFunction &MF = MIRBuilder.getMF();
+ const DataLayout &DL = MF.getDataLayout();
+ const Function &F = MF.getFunction();
+ CallingConv::ID CC = F.getCallingConv();
+
+ // TODO: Support vararg functions.
+ if (Info.IsVarArg)
+ return false;
+
+ // TODO: Support all argument types.
+ for (auto &AInfo : Info.OrigArgs) {
+ if (AInfo.Ty->isIntegerTy())
+ continue;
+ if (AInfo.Ty->isPointerTy())
+ continue;
+ if (AInfo.Ty->isFloatingPointTy())
+ continue;
+ return false;
+ }
+
+ SmallVector<ArgInfo, 32> SplitArgInfos;
+ SmallVector<ISD::OutputArg, 8> Outs;
+ unsigned Index = 0;
+ for (auto &AInfo : Info.OrigArgs) {
+ // Handle any required unmerging of split value types from a given VReg into
+ // physical registers. ArgInfo objects are constructed correspondingly and
+ // appended to SplitArgInfos.
+ splitToValueTypes(AInfo, SplitArgInfos, DL, CC);
+
+ ++Index;
+ }
+
+ // TODO: Support tail calls.
+ Info.IsTailCall = false;
+
+ if (!Info.Callee.isReg())
+ Info.Callee.setTargetFlags(RISCVII::MO_CALL);
+
+ MachineInstrBuilder Call =
+ MIRBuilder
+ .buildInstrNoInsert(Info.Callee.isReg() ? RISCV::PseudoCALLIndirect
+ : RISCV::PseudoCALL)
+ .add(Info.Callee);
+
+ RISCVOutgoingValueAssigner ArgAssigner(
+ CC == CallingConv::Fast ? RISCV::CC_RISCV_FastCC : RISCV::CC_RISCV,
+ /*IsRet=*/false);
+ RISCVOutgoingValueHandler ArgHandler(MIRBuilder, MF.getRegInfo(), Call);
+ if (!determineAndHandleAssignments(ArgHandler, ArgAssigner, SplitArgInfos,
+ MIRBuilder, CC, Info.IsVarArg))
+ return false;
+
+ MIRBuilder.insertInstr(Call);
+
+ if (Info.OrigRet.Ty->isVoidTy())
+ return true;
+
+ // TODO: Only integer, pointer and aggregate types are supported now.
+ if (!Info.OrigRet.Ty->isIntOrPtrTy() && !Info.OrigRet.Ty->isAggregateType())
+ return false;
+
+ SmallVector<ArgInfo, 4> SplitRetInfos;
+ splitToValueTypes(Info.OrigRet, SplitRetInfos, DL, CC);
+
+ // Assignments should be handled *before* the merging of values takes place.
+ // To ensure this, the insert point is temporarily adjusted to just after the
+ // call instruction.
+ MachineBasicBlock::iterator CallInsertPt = Call;
+ MIRBuilder.setInsertPt(MIRBuilder.getMBB(), std::next(CallInsertPt));
+
+ RISCVIncomingValueAssigner RetAssigner(
+ CC == CallingConv::Fast ? RISCV::CC_RISCV_FastCC : RISCV::CC_RISCV,
+ /*IsRet=*/true);
+ RISCVCallReturnHandler RetHandler(MIRBuilder, MF.getRegInfo(), Call);
+ if (!determineAndHandleAssignments(RetHandler, RetAssigner, SplitRetInfos,
+ MIRBuilder, CC, Info.IsVarArg))
+ return false;
+
+ // Readjust insert point to end of basic block.
+ MIRBuilder.setMBB(MIRBuilder.getMBB());
+
+ return true;
}
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calls.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calls.ll
new file mode 100644
index 0000000000000..8375acf5e1ee4
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calls.ll
@@ -0,0 +1,331 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+; RUN: llc -mtriple=riscv32 -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefix=RV32I %s
+; RUN: llc -mtriple=riscv64 -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefix=RV64I %s
+
+declare void @void_noargs()
+
+define void @test_call_void_noargs() {
+
+ ; RV32I-LABEL: name: test_call_void_noargs
+ ; RV32I: bb.1.entry:
+ ; RV32I-NEXT: PseudoCALL target-flags(riscv-call) @void_noargs, implicit-def $x1
+ ; RV32I-NEXT: PseudoRET
+ ; RV64I-LABEL: name: test_call_void_noargs
+ ; RV64I: bb.1.entry:
+ ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @void_noargs, implicit-def $x1
+ ; RV64I-NEXT: PseudoRET
+entry:
+ call void @void_noargs()
+ ret void
+}
+
+declare void @void_args_i8(i8, i8)
+
+define void @test_call_void_args_i8() {
+
+ ; RV32I-LABEL: name: test_call_void_args_i8
+ ; RV32I: bb.1.entry:
+ ; RV32I-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
+ ; RV32I-NEXT: [[C1:%[0-9]+]]:_(s8) = G_CONSTANT i8 1
+ ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[C]](s8)
+ ; RV32I-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[C1]](s8)
+ ; RV32I-NEXT: $x10 = COPY [[ANYEXT]](s32)
+ ; RV32I-NEXT: $x11 = COPY [[ANYEXT1]](s32)
+ ; RV32I-NEXT: PseudoCALL target-flags(riscv-call) @void_args_i8, implicit-def $x1, implicit $x10, implicit $x11
+ ; RV32I-NEXT: PseudoRET
+ ; RV64I-LABEL: name: test_call_void_args_i8
+ ; RV64I: bb.1.entry:
+ ; RV64I-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
+ ; RV64I-NEXT: [[C1:%[0-9]+]]:_(s8) = G_CONSTANT i8 1
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s8)
+ ; RV64I-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s8)
+ ; RV64I-NEXT: $x10 = COPY [[ANYEXT]](s64)
+ ; RV64I-NEXT: $x11 = COPY [[ANYEXT1]](s64)
+ ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @void_args_i8, implicit-def $x1, implicit $x10, implicit $x11
+ ; RV64I-NEXT: PseudoRET
+entry:
+ call void @void_args_i8(i8 0, i8 1)
+ ret void
+}
+
+declare void @void_args_i8_zext(i8 zeroext, i8 zeroext)
+
+define void @test_call_void_args_i8_zext() {
+
+ ; RV32I-LABEL: name: test_call_void_args_i8_zext
+ ; RV32I: bb.1.entry:
+ ; RV32I-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
+ ; RV32I-NEXT: [[C1:%[0-9]+]]:_(s8) = G_CONSTANT i8 1
+ ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[C]](s8)
+ ; RV32I-NEXT: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[C1]](s8)
+ ; RV32I-NEXT: $x10 = COPY [[ZEXT]](s32)
+ ; RV32I-NEXT: $x11 = COPY [[ZEXT1]](s32)
+ ; RV32I-NEXT: PseudoCALL target-flags(riscv-call) @void_args_i8_zext, implicit-def $x1, implicit $x10, implicit $x11
+ ; RV32I-NEXT: PseudoRET
+ ; RV64I-LABEL: name: test_call_void_args_i8_zext
+ ; RV64I: bb.1.entry:
+ ; RV64I-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
+ ; RV64I-NEXT: [[C1:%[0-9]+]]:_(s8) = G_CONSTANT i8 1
+ ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[C]](s8)
+ ; RV64I-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[C1]](s8)
+ ; RV64I-NEXT: $x10 = COPY [[ZEXT]](s64)
+ ; RV64I-NEXT: $x11 = COPY [[ZEXT1]](s64)
+ ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @void_args_i8_zext, implicit-def $x1, implicit $x10, implicit $x11
+ ; RV64I-NEXT: PseudoRET
+entry:
+ call void @void_args_i8_zext(i8 zeroext 0, i8 zeroext 1)
+ ret void
+}
+
+declare void @void_args_i16_sext(i16 signext, i16 signext)
+
+define void @test_call_void_args_i16_sext() {
+
+ ; RV32I-LABEL: name: test_call_void_args_i16_sext
+ ; RV32I: bb.1.entry:
+ ; RV32I-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+ ; RV32I-NEXT: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 1
+ ; RV32I-NEXT: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[C]](s16)
+ ; RV32I-NEXT: [[SEXT1:%[0-9]+]]:_(s32) = G_SEXT [[C1]](s16)
+ ; RV32I-NEXT: $x10 = COPY [[SEXT]](s32)
+ ; RV32I-NEXT: $x11 = COPY [[SEXT1]](s32)
+ ; RV32I-NEXT: PseudoCALL target-flags(riscv-call) @void_args_i16_sext, implicit-def $x1, implicit $x10, implicit $x11
+ ; RV32I-NEXT: PseudoRET
+ ; RV64I-LABEL: name: test_call_void_args_i16_sext
+ ; RV64I: bb.1.entry:
+ ; RV64I-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+ ; RV64I-NEXT: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 1
+ ; RV64I-NEXT: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[C]](s16)
+ ; RV64I-NEXT: [[SEXT1:%[0-9]+]]:_(s64) = G_SEXT [[C1]](s16)
+ ; RV64I-NEXT: $x10 = COPY [[SEXT]](s64)
+ ; RV64I-NEXT: $x11 = COPY [[SEXT1]](s64)
+ ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @void_args_i16_sext, implicit-def $x1, implicit $x10, implicit $x11
+ ; RV64I-NEXT: PseudoRET
+entry:
+ call void @void_args_i16_sext(i16 signext 0, i16 signext 1)
+ ret void
+}
+
+declare void @void_args_i32(i32, i32)
+
+define void @test_call_void_args_i32() {
+
+ ; RV32I-LABEL: name: test_call_void_args_i32
+ ; RV32I: bb.1.entry:
+ ; RV32I-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32I-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV32I-NEXT: $x10 = COPY [[C]](s32)
+ ; RV32I-NEXT: $x11 = COPY [[C1]](s32)
+ ; RV32I-NEXT: PseudoCALL target-flags(riscv-call) @void_args_i32, implicit-def $x1, implicit $x10, implicit $x11
+ ; RV32I-NEXT: PseudoRET
+ ; RV64I-LABEL: name: test_call_void_args_i32
+ ; RV64I: bb.1.entry:
+ ; RV64I-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64I-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+ ; RV64I-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+ ; RV64I-NEXT: $x10 = COPY [[ANYEXT]](s64)
+ ; RV64I-NEXT: $x11 = COPY [[ANYEXT1]](s64)
+ ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @void_args_i32, implicit-def $x1, implicit $x10, implicit $x11
+ ; RV64I-NEXT: PseudoRET
+entry:
+ call void @void_args_i32(i32 0, i32 1)
+ ret void
+}
+
+declare void @void_args_i64(i64, i64)
+
+define void @test_call_void_args_i64() {
+
+ ; RV32I-LABEL: name: test_call_void_args_i64
+ ; RV32I: bb.1.entry:
+ ; RV32I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32I-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; RV32I-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C]](s64)
+ ; RV32I-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C1]](s64)
+ ; RV32I-NEXT: $x10 = COPY [[UV]](s32)
+ ; RV32I-NEXT: $x11 = COPY [[UV1]](s32)
+ ; RV32I-NEXT: $x12 = COPY [[UV2]](s32)
+ ; RV32I-NEXT: $x13 = COPY [[UV3]](s32)
+ ; RV32I-NEXT: PseudoCALL target-flags(riscv-call) @void_args_i64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13
+ ; RV32I-NEXT: PseudoRET
+ ; RV64I-LABEL: name: test_call_void_args_i64
+ ; RV64I: bb.1.entry:
+ ; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64I-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; RV64I-NEXT: $x10 = COPY [[C]](s64)
+ ; RV64I-NEXT: $x11 = COPY [[C1]](s64)
+ ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @void_args_i64, implicit-def $x1, implicit $x10, implicit $x11
+ ; RV64I-NEXT: PseudoRET
+entry:
+ call void @void_args_i64(i64 0, i64 1)
+ ret void
+}
+
+declare i8 @i8_noargs()
+
+define void @test_call_i8_noargs() {
+
+ ; RV32I-LABEL: name: test_call_i8_noargs
+ ; RV32I: bb.1.entry:
+ ; RV32I-NEXT: PseudoCALL target-flags(riscv-call) @i8_noargs, implicit-def $x1, implicit-def $x10
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32I-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
+ ; RV32I-NEXT: PseudoRET
+ ; RV64I-LABEL: name: test_call_i8_noargs
+ ; RV64I: bb.1.entry:
+ ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @i8_noargs, implicit-def $x1, implicit-def $x10
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64I-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s64)
+ ; RV64I-NEXT: PseudoRET
+entry:
+ %a = call i8 @i8_noargs()
+ ret void
+}
+
+declare i16 @i16_noargs()
+
+define void @test_call_i16_noargs() {
+
+ ; RV32I-LABEL: name: test_call_i16_noargs
+ ; RV32I: bb.1.entry:
+ ; RV32I-NEXT: PseudoCALL target-flags(riscv-call) @i16_noargs, implicit-def $x1, implicit-def $x10
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32I-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; RV32I-NEXT: PseudoRET
+ ; RV64I-LABEL: name: test_call_i16_noargs
+ ; RV64I: bb.1.entry:
+ ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @i16_noargs, implicit-def $x1, implicit-def $x10
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64I-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s64)
+ ; RV64I-NEXT: PseudoRET
+entry:
+ %a = call i16 @i16_noargs()
+ ret void
+}
+
+declare i32 @i32_noargs()
+
+define void @test_call_i32_noargs() {
+
+ ; RV32I-LABEL: name: test_call_i32_noargs
+ ; RV32I: bb.1.entry:
+ ; RV32I-NEXT: PseudoCALL target-flags(riscv-call) @i32_noargs, implicit-def $x1, implicit-def $x10
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32I-NEXT: PseudoRET
+ ; RV64I-LABEL: name: test_call_i32_noargs
+ ; RV64I: bb.1.entry:
+ ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @i32_noargs, implicit-def $x1, implicit-def $x10
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64I-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+ ; RV64I-NEXT: PseudoRET
+entry:
+ %a = call i32 @i32_noargs()
+ ret void
+}
+
+declare i64 @i64_noargs()
+
+define void @test_call_i64_noargs() {
+
+ ; RV32I-LABEL: name: test_call_i64_noargs
+ ; RV32I: bb.1.entry:
+ ; RV32I-NEXT: PseudoCALL target-flags(riscv-call) @i64_noargs, implicit-def $x1, implicit-def $x10, implicit-def $x11
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+ ; RV32I-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
+ ; RV32I-NEXT: PseudoRET
+ ; RV64I-LABEL: name: test_call_i64_noargs
+ ; RV64I: bb.1.entry:
+ ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @i64_noargs, implicit-def $x1, implicit-def $x10
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64I-NEXT: PseudoRET
+entry:
+ %a = call i64 @i64_noargs()
+ ret void
+}
+
+declare ptr @ptr_noargs()
+
+define void @test_call_ptr_noargs() {
+ ; RV32I-LABEL: name: test_call_ptr_noargs
+ ; RV32I: bb.1.entry:
+ ; RV32I-NEXT: PseudoCALL target-flags(riscv-call) @ptr_noargs, implicit-def $x1, implicit-def $x10
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV32I-NEXT: PseudoRET
+ ; RV64I-LABEL: name: test_call_ptr_noargs
+ ; RV64I: bb.1.entry:
+ ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @ptr_noargs, implicit-def $x1, implicit-def $x10
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; RV64I-NEXT: PseudoRET
+entry:
+ %a = call ptr @ptr_noargs()
+ ret void
+}
+
+declare [2 x i32] @i32x2_noargs()
+
+define void @test_call_i32x2_noargs() {
+ ; RV32I-LABEL: name: test_call_i32x2_noargs
+ ; RV32I: bb.1.entry:
+ ; RV32I-NEXT: PseudoCALL target-flags(riscv-call) @i32x2_noargs, implicit-def $x1, implicit-def $x10, implicit-def $x11
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+ ; RV32I-NEXT: PseudoRET
+ ; RV64I-LABEL: name: test_call_i32x2_noargs
+ ; RV64I: bb.1.entry:
+ ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @i32x2_noargs, implicit-def $x1, implicit-def $x10, implicit-def $x11
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64I-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+ ; RV64I-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
+ ; RV64I-NEXT: PseudoRET
+entry:
+ %a = call [2 x i32] @i32x2_noargs()
+ ret void
+}
+
+%struct.Foo = type { i32, i32, i32, i16, i8 }
+ at foo = global %struct.Foo { i32 1, i32 2, i32 3, i16 4, i8 5 }, align 4
+
+declare void @void_byval_args(ptr byval(%struct.Foo) %f)
+
+define void @test_void_byval_args() {
+ ; RV32I-LABEL: name: test_void_byval_args
+ ; RV32I: bb.1.entry:
+ ; RV32I-NEXT: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @foo
+ ; RV32I-NEXT: $x10 = COPY [[GV]](p0)
+ ; RV32I-NEXT: PseudoCALL target-flags(riscv-call) @void_byval_args, implicit-def $x1, implicit $x10
+ ; RV32I-NEXT: PseudoRET
+ ; RV64I-LABEL: name: test_void_byval_args
+ ; RV64I: bb.1.entry:
+ ; RV64I-NEXT: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @foo
+ ; RV64I-NEXT: $x10 = COPY [[GV]](p0)
+ ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @void_byval_args, implicit-def $x1, implicit $x10
+ ; RV64I-NEXT: PseudoRET
+entry:
+ call void @void_byval_args(ptr byval(%struct.Foo) @foo)
+ ret void
+}
+
+declare void @void_sret_args(ptr sret(%struct.Foo) %f)
+
+define void @test_void_sret_args() {
+ ; RV32I-LABEL: name: test_void_sret_args
+ ; RV32I: bb.1.entry:
+ ; RV32I-NEXT: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @foo
+ ; RV32I-NEXT: $x10 = COPY [[GV]](p0)
+ ; RV32I-NEXT: PseudoCALL target-flags(riscv-call) @void_sret_args, implicit-def $x1, implicit $x10
+ ; RV32I-NEXT: PseudoRET
+ ; RV64I-LABEL: name: test_void_sret_args
+ ; RV64I: bb.1.entry:
+ ; RV64I-NEXT: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @foo
+ ; RV64I-NEXT: $x10 = COPY [[GV]](p0)
+ ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @void_sret_args, implicit-def $x1, implicit $x10
+ ; RV64I-NEXT: PseudoRET
+entry:
+ call void @void_sret_args(ptr sret(%struct.Foo) @foo)
+ ret void
+}
More information about the llvm-commits
mailing list