[llvm] 2cf0314 - [LoongArch] Move lp64s out of the unimplemented calling conv list
Weining Lu via llvm-commits
llvm-commits at lists.llvm.org
Tue May 16 06:11:06 PDT 2023
Author: Weining Lu
Date: 2023-05-16T21:09:31+08:00
New Revision: 2cf0314029804ab467517b390f0fa2a48a34dbe7
URL: https://github.com/llvm/llvm-project/commit/2cf0314029804ab467517b390f0fa2a48a34dbe7
DIFF: https://github.com/llvm/llvm-project/commit/2cf0314029804ab467517b390f0fa2a48a34dbe7.diff
LOG: [LoongArch] Move lp64s out of the unimplemented calling conv list
lp64s is same as lp64d execpt that floating point arguments and return
values are always passed via GPRs or stack which means `UseGPRForFloat`
is always `true` in `CC_LoongArch` for lp64s.
One motivation of this change is to build linux which uses
`-msoft-float` and `-mabi=lp64s` [1].
[1]: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/arch/loongarch/Makefile?h=v6.4-rc1#n49
Reviewed By: xen0n, hev
Differential Revision: https://reviews.llvm.org/D150417
Added:
llvm/test/CodeGen/LoongArch/calling-conv-common.ll
llvm/test/CodeGen/LoongArch/calling-conv-lp64s.ll
Modified:
llvm/docs/ReleaseNotes.rst
llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
llvm/test/CodeGen/LoongArch/calling-conv-lp64d.ll
llvm/test/CodeGen/LoongArch/e_flags.ll
Removed:
################################################################################
diff --git a/llvm/docs/ReleaseNotes.rst b/llvm/docs/ReleaseNotes.rst
index 963fa07a3412c..331d26db8111d 100644
--- a/llvm/docs/ReleaseNotes.rst
+++ b/llvm/docs/ReleaseNotes.rst
@@ -123,6 +123,8 @@ Changes to the Hexagon Backend
Changes to the LoongArch Backend
--------------------------------
+* The `lp64s` ABI is supported now and has been tested on Rust bare-matal target.
+
Changes to the MIPS Backend
---------------------------
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index 6bf7e1a4b63d1..754e73ba585d0 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -1928,7 +1928,6 @@ static bool CC_LoongArch(const DataLayout &DL, LoongArchABI::ABI ABI,
default:
llvm_unreachable("Unexpected ABI");
case LoongArchABI::ABI_ILP32S:
- case LoongArchABI::ABI_LP64S:
case LoongArchABI::ABI_ILP32F:
case LoongArchABI::ABI_LP64F:
report_fatal_error("Unimplemented ABI");
@@ -1937,6 +1936,8 @@ static bool CC_LoongArch(const DataLayout &DL, LoongArchABI::ABI ABI,
case LoongArchABI::ABI_LP64D:
UseGPRForFloat = !IsFixed;
break;
+ case LoongArchABI::ABI_LP64S:
+ break;
}
// FPR32 and FPR64 alias each other.
diff --git a/llvm/test/CodeGen/LoongArch/calling-conv-common.ll b/llvm/test/CodeGen/LoongArch/calling-conv-common.ll
new file mode 100644
index 0000000000000..69107a72a1180
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/calling-conv-common.ll
@@ -0,0 +1,407 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 --target-abi=lp64s < %s | FileCheck %s
+; RUN: llc --mtriple=loongarch64 --mattr=+d --target-abi=lp64d < %s | FileCheck %s
+
+;; This file contains tests that should have identical output for all ABIs, i.e.
+;; where no arguments are passed via floating point registers.
+
+;; Check that on LA64, i128 is passed in a pair of GPRs.
+define i64 @callee_i128_in_regs(i64 %a, i128 %b) nounwind {
+; CHECK-LABEL: callee_i128_in_regs:
+; CHECK: # %bb.0:
+; CHECK-NEXT: add.d $a0, $a0, $a1
+; CHECK-NEXT: ret
+ %b_trunc = trunc i128 %b to i64
+ %1 = add i64 %a, %b_trunc
+ ret i64 %1
+}
+
+define i64 @caller_i128_in_regs() nounwind {
+; CHECK-LABEL: caller_i128_in_regs:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi.d $sp, $sp, -16
+; CHECK-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
+; CHECK-NEXT: ori $a0, $zero, 1
+; CHECK-NEXT: ori $a1, $zero, 2
+; CHECK-NEXT: move $a2, $zero
+; CHECK-NEXT: bl %plt(callee_i128_in_regs)
+; CHECK-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; CHECK-NEXT: addi.d $sp, $sp, 16
+; CHECK-NEXT: ret
+ %1 = call i64 @callee_i128_in_regs(i64 1, i128 2)
+ ret i64 %1
+}
+
+;; Check that the stack is used once the GPRs are exhausted.
+define i64 @callee_many_scalars(i8 %a, i16 %b, i32 %c, i64 %d, i128 %e, i64 %f, i128 %g, i64 %h) nounwind {
+; CHECK-LABEL: callee_many_scalars:
+; CHECK: # %bb.0:
+; CHECK-NEXT: ld.d $t0, $sp, 0
+; CHECK-NEXT: xor $a5, $a5, $t0
+; CHECK-NEXT: xor $a4, $a4, $a7
+; CHECK-NEXT: or $a4, $a4, $a5
+; CHECK-NEXT: bstrpick.d $a1, $a1, 15, 0
+; CHECK-NEXT: andi $a0, $a0, 255
+; CHECK-NEXT: add.d $a0, $a0, $a1
+; CHECK-NEXT: bstrpick.d $a1, $a2, 31, 0
+; CHECK-NEXT: add.d $a0, $a0, $a1
+; CHECK-NEXT: add.d $a0, $a0, $a3
+; CHECK-NEXT: sltui $a1, $a4, 1
+; CHECK-NEXT: add.d $a0, $a1, $a0
+; CHECK-NEXT: add.d $a0, $a0, $a6
+; CHECK-NEXT: ld.d $a1, $sp, 8
+; CHECK-NEXT: add.d $a0, $a0, $a1
+; CHECK-NEXT: ret
+ %a_ext = zext i8 %a to i64
+ %b_ext = zext i16 %b to i64
+ %c_ext = zext i32 %c to i64
+ %1 = add i64 %a_ext, %b_ext
+ %2 = add i64 %1, %c_ext
+ %3 = add i64 %2, %d
+ %4 = icmp eq i128 %e, %g
+ %5 = zext i1 %4 to i64
+ %6 = add i64 %5, %3
+ %7 = add i64 %6, %f
+ %8 = add i64 %7, %h
+ ret i64 %8
+}
+
+define i64 @caller_many_scalars() nounwind {
+; CHECK-LABEL: caller_many_scalars:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi.d $sp, $sp, -32
+; CHECK-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill
+; CHECK-NEXT: ori $a0, $zero, 8
+; CHECK-NEXT: st.d $a0, $sp, 8
+; CHECK-NEXT: st.d $zero, $sp, 0
+; CHECK-NEXT: ori $a0, $zero, 1
+; CHECK-NEXT: ori $a1, $zero, 2
+; CHECK-NEXT: ori $a2, $zero, 3
+; CHECK-NEXT: ori $a3, $zero, 4
+; CHECK-NEXT: ori $a4, $zero, 5
+; CHECK-NEXT: ori $a6, $zero, 6
+; CHECK-NEXT: ori $a7, $zero, 7
+; CHECK-NEXT: move $a5, $zero
+; CHECK-NEXT: bl %plt(callee_many_scalars)
+; CHECK-NEXT: ld.d $ra, $sp, 24 # 8-byte Folded Reload
+; CHECK-NEXT: addi.d $sp, $sp, 32
+; CHECK-NEXT: ret
+ %1 = call i64 @callee_many_scalars(i8 1, i16 2, i32 3, i64 4, i128 5, i64 6, i128 7, i64 8)
+ ret i64 %1
+}
+
+;; Check that i256 is passed indirectly.
+
+define i64 @callee_large_scalars(i256 %a, i256 %b) nounwind {
+; CHECK-LABEL: callee_large_scalars:
+; CHECK: # %bb.0:
+; CHECK-NEXT: ld.d $a2, $a1, 24
+; CHECK-NEXT: ld.d $a3, $a0, 24
+; CHECK-NEXT: xor $a2, $a3, $a2
+; CHECK-NEXT: ld.d $a3, $a1, 8
+; CHECK-NEXT: ld.d $a4, $a0, 8
+; CHECK-NEXT: xor $a3, $a4, $a3
+; CHECK-NEXT: or $a2, $a3, $a2
+; CHECK-NEXT: ld.d $a3, $a1, 16
+; CHECK-NEXT: ld.d $a4, $a0, 16
+; CHECK-NEXT: xor $a3, $a4, $a3
+; CHECK-NEXT: ld.d $a1, $a1, 0
+; CHECK-NEXT: ld.d $a0, $a0, 0
+; CHECK-NEXT: xor $a0, $a0, $a1
+; CHECK-NEXT: or $a0, $a0, $a3
+; CHECK-NEXT: or $a0, $a0, $a2
+; CHECK-NEXT: sltui $a0, $a0, 1
+; CHECK-NEXT: ret
+ %1 = icmp eq i256 %a, %b
+ %2 = zext i1 %1 to i64
+ ret i64 %2
+}
+
+define i64 @caller_large_scalars() nounwind {
+; CHECK-LABEL: caller_large_scalars:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi.d $sp, $sp, -80
+; CHECK-NEXT: st.d $ra, $sp, 72 # 8-byte Folded Spill
+; CHECK-NEXT: ori $a0, $zero, 2
+; CHECK-NEXT: st.d $a0, $sp, 0
+; CHECK-NEXT: st.d $zero, $sp, 24
+; CHECK-NEXT: st.d $zero, $sp, 16
+; CHECK-NEXT: st.d $zero, $sp, 8
+; CHECK-NEXT: st.d $zero, $sp, 56
+; CHECK-NEXT: st.d $zero, $sp, 48
+; CHECK-NEXT: st.d $zero, $sp, 40
+; CHECK-NEXT: ori $a0, $zero, 1
+; CHECK-NEXT: st.d $a0, $sp, 32
+; CHECK-NEXT: addi.d $a0, $sp, 32
+; CHECK-NEXT: addi.d $a1, $sp, 0
+; CHECK-NEXT: bl %plt(callee_large_scalars)
+; CHECK-NEXT: ld.d $ra, $sp, 72 # 8-byte Folded Reload
+; CHECK-NEXT: addi.d $sp, $sp, 80
+; CHECK-NEXT: ret
+ %1 = call i64 @callee_large_scalars(i256 1, i256 2)
+ ret i64 %1
+}
+
+;; Check that arguments larger than 2*GRLen are handled correctly when their
+;; address is passed on the stack rather than in memory.
+
+;; Must keep define on a single line due to an update_llc_test_checks.py limitation
+define i64 @callee_large_scalars_exhausted_regs(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e, i64 %f, i64 %g, i256 %h, i64 %i, i256 %j) nounwind {
+; CHECK-LABEL: callee_large_scalars_exhausted_regs:
+; CHECK: # %bb.0:
+; CHECK-NEXT: ld.d $a0, $sp, 8
+; CHECK-NEXT: ld.d $a1, $a0, 24
+; CHECK-NEXT: ld.d $a2, $a7, 24
+; CHECK-NEXT: xor $a1, $a2, $a1
+; CHECK-NEXT: ld.d $a2, $a0, 8
+; CHECK-NEXT: ld.d $a3, $a7, 8
+; CHECK-NEXT: xor $a2, $a3, $a2
+; CHECK-NEXT: or $a1, $a2, $a1
+; CHECK-NEXT: ld.d $a2, $a0, 16
+; CHECK-NEXT: ld.d $a3, $a7, 16
+; CHECK-NEXT: xor $a2, $a3, $a2
+; CHECK-NEXT: ld.d $a0, $a0, 0
+; CHECK-NEXT: ld.d $a3, $a7, 0
+; CHECK-NEXT: xor $a0, $a3, $a0
+; CHECK-NEXT: or $a0, $a0, $a2
+; CHECK-NEXT: or $a0, $a0, $a1
+; CHECK-NEXT: sltui $a0, $a0, 1
+; CHECK-NEXT: ret
+ %1 = icmp eq i256 %h, %j
+ %2 = zext i1 %1 to i64
+ ret i64 %2
+}
+
+define i64 @caller_large_scalars_exhausted_regs() nounwind {
+; CHECK-LABEL: caller_large_scalars_exhausted_regs:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi.d $sp, $sp, -96
+; CHECK-NEXT: st.d $ra, $sp, 88 # 8-byte Folded Spill
+; CHECK-NEXT: addi.d $a0, $sp, 16
+; CHECK-NEXT: st.d $a0, $sp, 8
+; CHECK-NEXT: ori $a0, $zero, 9
+; CHECK-NEXT: st.d $a0, $sp, 0
+; CHECK-NEXT: ori $a0, $zero, 10
+; CHECK-NEXT: st.d $a0, $sp, 16
+; CHECK-NEXT: st.d $zero, $sp, 40
+; CHECK-NEXT: st.d $zero, $sp, 32
+; CHECK-NEXT: st.d $zero, $sp, 24
+; CHECK-NEXT: st.d $zero, $sp, 72
+; CHECK-NEXT: st.d $zero, $sp, 64
+; CHECK-NEXT: st.d $zero, $sp, 56
+; CHECK-NEXT: ori $a0, $zero, 8
+; CHECK-NEXT: st.d $a0, $sp, 48
+; CHECK-NEXT: ori $a0, $zero, 1
+; CHECK-NEXT: ori $a1, $zero, 2
+; CHECK-NEXT: ori $a2, $zero, 3
+; CHECK-NEXT: ori $a3, $zero, 4
+; CHECK-NEXT: ori $a4, $zero, 5
+; CHECK-NEXT: ori $a5, $zero, 6
+; CHECK-NEXT: ori $a6, $zero, 7
+; CHECK-NEXT: addi.d $a7, $sp, 48
+; CHECK-NEXT: bl %plt(callee_large_scalars_exhausted_regs)
+; CHECK-NEXT: ld.d $ra, $sp, 88 # 8-byte Folded Reload
+; CHECK-NEXT: addi.d $sp, $sp, 96
+; CHECK-NEXT: ret
+ %1 = call i64 @callee_large_scalars_exhausted_regs(
+ i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i256 8, i64 9,
+ i256 10)
+ ret i64 %1
+}
+
+;; Check large struct arguments, which are passed byval
+
+%struct.large = type { i64, i64, i64, i64 }
+
+define i64 @callee_large_struct(ptr byval(%struct.large) align 8 %a) nounwind {
+; CHECK-LABEL: callee_large_struct:
+; CHECK: # %bb.0:
+; CHECK-NEXT: ld.d $a1, $a0, 24
+; CHECK-NEXT: ld.d $a0, $a0, 0
+; CHECK-NEXT: add.d $a0, $a0, $a1
+; CHECK-NEXT: ret
+ %1 = getelementptr inbounds %struct.large, ptr %a, i64 0, i32 0
+ %2 = getelementptr inbounds %struct.large, ptr %a, i64 0, i32 3
+ %3 = load i64, ptr %1
+ %4 = load i64, ptr %2
+ %5 = add i64 %3, %4
+ ret i64 %5
+}
+
+define i64 @caller_large_struct() nounwind {
+; CHECK-LABEL: caller_large_struct:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi.d $sp, $sp, -80
+; CHECK-NEXT: st.d $ra, $sp, 72 # 8-byte Folded Spill
+; CHECK-NEXT: ori $a0, $zero, 1
+; CHECK-NEXT: st.d $a0, $sp, 40
+; CHECK-NEXT: st.d $a0, $sp, 8
+; CHECK-NEXT: ori $a0, $zero, 2
+; CHECK-NEXT: st.d $a0, $sp, 48
+; CHECK-NEXT: st.d $a0, $sp, 16
+; CHECK-NEXT: ori $a0, $zero, 3
+; CHECK-NEXT: st.d $a0, $sp, 56
+; CHECK-NEXT: st.d $a0, $sp, 24
+; CHECK-NEXT: ori $a0, $zero, 4
+; CHECK-NEXT: st.d $a0, $sp, 64
+; CHECK-NEXT: st.d $a0, $sp, 32
+; CHECK-NEXT: addi.d $a0, $sp, 8
+; CHECK-NEXT: bl %plt(callee_large_struct)
+; CHECK-NEXT: ld.d $ra, $sp, 72 # 8-byte Folded Reload
+; CHECK-NEXT: addi.d $sp, $sp, 80
+; CHECK-NEXT: ret
+ %ls = alloca %struct.large, align 8
+ %a = getelementptr inbounds %struct.large, ptr %ls, i64 0, i32 0
+ store i64 1, ptr %a
+ %b = getelementptr inbounds %struct.large, ptr %ls, i64 0, i32 1
+ store i64 2, ptr %b
+ %c = getelementptr inbounds %struct.large, ptr %ls, i64 0, i32 2
+ store i64 3, ptr %c
+ %d = getelementptr inbounds %struct.large, ptr %ls, i64 0, i32 3
+ store i64 4, ptr %d
+ %1 = call i64 @callee_large_struct(ptr byval(%struct.large) align 8 %ls)
+ ret i64 %1
+}
+
+;; Check return scalar which size is 2*GRLen.
+
+define i128 @callee_small_scalar_ret() nounwind {
+; CHECK-LABEL: callee_small_scalar_ret:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi.w $a0, $zero, -1
+; CHECK-NEXT: move $a1, $a0
+; CHECK-NEXT: ret
+ ret i128 -1
+}
+
+define i64 @caller_small_scalar_ret() nounwind {
+; CHECK-LABEL: caller_small_scalar_ret:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi.d $sp, $sp, -16
+; CHECK-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
+; CHECK-NEXT: bl %plt(callee_small_scalar_ret)
+; CHECK-NEXT: addi.w $a2, $zero, -2
+; CHECK-NEXT: xor $a0, $a0, $a2
+; CHECK-NEXT: orn $a0, $a0, $a1
+; CHECK-NEXT: sltui $a0, $a0, 1
+; CHECK-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; CHECK-NEXT: addi.d $sp, $sp, 16
+; CHECK-NEXT: ret
+ %1 = call i128 @callee_small_scalar_ret()
+ %2 = icmp eq i128 -2, %1
+ %3 = zext i1 %2 to i64
+ ret i64 %3
+}
+
+;; Check return struct which size is 2*GRLen.
+
+%struct.small = type { i64, ptr }
+
+define %struct.small @callee_small_struct_ret() nounwind {
+; CHECK-LABEL: callee_small_struct_ret:
+; CHECK: # %bb.0:
+; CHECK-NEXT: ori $a0, $zero, 1
+; CHECK-NEXT: move $a1, $zero
+; CHECK-NEXT: ret
+ ret %struct.small { i64 1, ptr null }
+}
+
+define i64 @caller_small_struct_ret() nounwind {
+; CHECK-LABEL: caller_small_struct_ret:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi.d $sp, $sp, -16
+; CHECK-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
+; CHECK-NEXT: bl %plt(callee_small_struct_ret)
+; CHECK-NEXT: add.d $a0, $a0, $a1
+; CHECK-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; CHECK-NEXT: addi.d $sp, $sp, 16
+; CHECK-NEXT: ret
+ %1 = call %struct.small @callee_small_struct_ret()
+ %2 = extractvalue %struct.small %1, 0
+ %3 = extractvalue %struct.small %1, 1
+ %4 = ptrtoint ptr %3 to i64
+ %5 = add i64 %2, %4
+ ret i64 %5
+}
+
+;; Check return scalar which size is more than 2*GRLen.
+
+define i256 @callee_large_scalar_ret() nounwind {
+; CHECK-LABEL: callee_large_scalar_ret:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi.w $a1, $zero, -1
+; CHECK-NEXT: st.d $a1, $a0, 24
+; CHECK-NEXT: st.d $a1, $a0, 16
+; CHECK-NEXT: st.d $a1, $a0, 8
+; CHECK-NEXT: lu12i.w $a1, -30141
+; CHECK-NEXT: ori $a1, $a1, 747
+; CHECK-NEXT: st.d $a1, $a0, 0
+; CHECK-NEXT: ret
+ ret i256 -123456789
+}
+
+define void @caller_large_scalar_ret() nounwind {
+; CHECK-LABEL: caller_large_scalar_ret:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi.d $sp, $sp, -48
+; CHECK-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill
+; CHECK-NEXT: addi.d $a0, $sp, 0
+; CHECK-NEXT: bl %plt(callee_large_scalar_ret)
+; CHECK-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; CHECK-NEXT: addi.d $sp, $sp, 48
+; CHECK-NEXT: ret
+ %1 = call i256 @callee_large_scalar_ret()
+ ret void
+}
+
+;; Check return struct which size is more than 2*GRLen.
+
+define void @callee_large_struct_ret(ptr noalias sret(%struct.large) %agg.result) nounwind {
+; CHECK-LABEL: callee_large_struct_ret:
+; CHECK: # %bb.0:
+; CHECK-NEXT: ori $a1, $zero, 4
+; CHECK-NEXT: st.w $a1, $a0, 24
+; CHECK-NEXT: ori $a1, $zero, 3
+; CHECK-NEXT: st.w $a1, $a0, 16
+; CHECK-NEXT: ori $a1, $zero, 2
+; CHECK-NEXT: st.w $a1, $a0, 8
+; CHECK-NEXT: st.w $zero, $a0, 28
+; CHECK-NEXT: st.w $zero, $a0, 20
+; CHECK-NEXT: st.w $zero, $a0, 12
+; CHECK-NEXT: st.w $zero, $a0, 4
+; CHECK-NEXT: ori $a1, $zero, 1
+; CHECK-NEXT: st.w $a1, $a0, 0
+; CHECK-NEXT: ret
+ %a = getelementptr inbounds %struct.large, ptr %agg.result, i64 0, i32 0
+ store i64 1, ptr %a, align 4
+ %b = getelementptr inbounds %struct.large, ptr %agg.result, i64 0, i32 1
+ store i64 2, ptr %b, align 4
+ %c = getelementptr inbounds %struct.large, ptr %agg.result, i64 0, i32 2
+ store i64 3, ptr %c, align 4
+ %d = getelementptr inbounds %struct.large, ptr %agg.result, i64 0, i32 3
+ store i64 4, ptr %d, align 4
+ ret void
+}
+
+define i64 @caller_large_struct_ret() nounwind {
+; CHECK-LABEL: caller_large_struct_ret:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi.d $sp, $sp, -48
+; CHECK-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill
+; CHECK-NEXT: addi.d $a0, $sp, 8
+; CHECK-NEXT: bl %plt(callee_large_struct_ret)
+; CHECK-NEXT: ld.d $a0, $sp, 32
+; CHECK-NEXT: ld.d $a1, $sp, 8
+; CHECK-NEXT: add.d $a0, $a1, $a0
+; CHECK-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; CHECK-NEXT: addi.d $sp, $sp, 48
+; CHECK-NEXT: ret
+ %1 = alloca %struct.large
+ call void @callee_large_struct_ret(ptr sret(%struct.large) %1)
+ %2 = getelementptr inbounds %struct.large, ptr %1, i64 0, i32 0
+ %3 = load i64, ptr %2
+ %4 = getelementptr inbounds %struct.large, ptr %1, i64 0, i32 3
+ %5 = load i64, ptr %4
+ %6 = add i64 %3, %5
+ ret i64 %6
+}
diff --git a/llvm/test/CodeGen/LoongArch/calling-conv-lp64d.ll b/llvm/test/CodeGen/LoongArch/calling-conv-lp64d.ll
index ae2ce72914399..ceb38876c384a 100644
--- a/llvm/test/CodeGen/LoongArch/calling-conv-lp64d.ll
+++ b/llvm/test/CodeGen/LoongArch/calling-conv-lp64d.ll
@@ -2,406 +2,7 @@
; RUN: llc --mtriple=loongarch64 --mattr=+d --target-abi=lp64d < %s \
; RUN: | FileCheck %s
-;; Check that on LA64, i128 is passed in a pair of GPRs.
-define i64 @callee_i128_in_regs(i64 %a, i128 %b) nounwind {
-; CHECK-LABEL: callee_i128_in_regs:
-; CHECK: # %bb.0:
-; CHECK-NEXT: add.d $a0, $a0, $a1
-; CHECK-NEXT: ret
- %b_trunc = trunc i128 %b to i64
- %1 = add i64 %a, %b_trunc
- ret i64 %1
-}
-
-define i64 @caller_i128_in_regs() nounwind {
-; CHECK-LABEL: caller_i128_in_regs:
-; CHECK: # %bb.0:
-; CHECK-NEXT: addi.d $sp, $sp, -16
-; CHECK-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
-; CHECK-NEXT: ori $a0, $zero, 1
-; CHECK-NEXT: ori $a1, $zero, 2
-; CHECK-NEXT: move $a2, $zero
-; CHECK-NEXT: bl %plt(callee_i128_in_regs)
-; CHECK-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; CHECK-NEXT: addi.d $sp, $sp, 16
-; CHECK-NEXT: ret
- %1 = call i64 @callee_i128_in_regs(i64 1, i128 2)
- ret i64 %1
-}
-
-;; Check that the stack is used once the GPRs are exhausted.
-define i64 @callee_many_scalars(i8 %a, i16 %b, i32 %c, i64 %d, i128 %e, i64 %f, i128 %g, i64 %h) nounwind {
-; CHECK-LABEL: callee_many_scalars:
-; CHECK: # %bb.0:
-; CHECK-NEXT: ld.d $t0, $sp, 0
-; CHECK-NEXT: xor $a5, $a5, $t0
-; CHECK-NEXT: xor $a4, $a4, $a7
-; CHECK-NEXT: or $a4, $a4, $a5
-; CHECK-NEXT: bstrpick.d $a1, $a1, 15, 0
-; CHECK-NEXT: andi $a0, $a0, 255
-; CHECK-NEXT: add.d $a0, $a0, $a1
-; CHECK-NEXT: bstrpick.d $a1, $a2, 31, 0
-; CHECK-NEXT: add.d $a0, $a0, $a1
-; CHECK-NEXT: add.d $a0, $a0, $a3
-; CHECK-NEXT: sltui $a1, $a4, 1
-; CHECK-NEXT: add.d $a0, $a1, $a0
-; CHECK-NEXT: add.d $a0, $a0, $a6
-; CHECK-NEXT: ld.d $a1, $sp, 8
-; CHECK-NEXT: add.d $a0, $a0, $a1
-; CHECK-NEXT: ret
- %a_ext = zext i8 %a to i64
- %b_ext = zext i16 %b to i64
- %c_ext = zext i32 %c to i64
- %1 = add i64 %a_ext, %b_ext
- %2 = add i64 %1, %c_ext
- %3 = add i64 %2, %d
- %4 = icmp eq i128 %e, %g
- %5 = zext i1 %4 to i64
- %6 = add i64 %5, %3
- %7 = add i64 %6, %f
- %8 = add i64 %7, %h
- ret i64 %8
-}
-
-define i64 @caller_many_scalars() nounwind {
-; CHECK-LABEL: caller_many_scalars:
-; CHECK: # %bb.0:
-; CHECK-NEXT: addi.d $sp, $sp, -32
-; CHECK-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill
-; CHECK-NEXT: ori $a0, $zero, 8
-; CHECK-NEXT: st.d $a0, $sp, 8
-; CHECK-NEXT: st.d $zero, $sp, 0
-; CHECK-NEXT: ori $a0, $zero, 1
-; CHECK-NEXT: ori $a1, $zero, 2
-; CHECK-NEXT: ori $a2, $zero, 3
-; CHECK-NEXT: ori $a3, $zero, 4
-; CHECK-NEXT: ori $a4, $zero, 5
-; CHECK-NEXT: ori $a6, $zero, 6
-; CHECK-NEXT: ori $a7, $zero, 7
-; CHECK-NEXT: move $a5, $zero
-; CHECK-NEXT: bl %plt(callee_many_scalars)
-; CHECK-NEXT: ld.d $ra, $sp, 24 # 8-byte Folded Reload
-; CHECK-NEXT: addi.d $sp, $sp, 32
-; CHECK-NEXT: ret
- %1 = call i64 @callee_many_scalars(i8 1, i16 2, i32 3, i64 4, i128 5, i64 6, i128 7, i64 8)
- ret i64 %1
-}
-
-;; Check that i256 is passed indirectly.
-
-define i64 @callee_large_scalars(i256 %a, i256 %b) nounwind {
-; CHECK-LABEL: callee_large_scalars:
-; CHECK: # %bb.0:
-; CHECK-NEXT: ld.d $a2, $a1, 24
-; CHECK-NEXT: ld.d $a3, $a0, 24
-; CHECK-NEXT: xor $a2, $a3, $a2
-; CHECK-NEXT: ld.d $a3, $a1, 8
-; CHECK-NEXT: ld.d $a4, $a0, 8
-; CHECK-NEXT: xor $a3, $a4, $a3
-; CHECK-NEXT: or $a2, $a3, $a2
-; CHECK-NEXT: ld.d $a3, $a1, 16
-; CHECK-NEXT: ld.d $a4, $a0, 16
-; CHECK-NEXT: xor $a3, $a4, $a3
-; CHECK-NEXT: ld.d $a1, $a1, 0
-; CHECK-NEXT: ld.d $a0, $a0, 0
-; CHECK-NEXT: xor $a0, $a0, $a1
-; CHECK-NEXT: or $a0, $a0, $a3
-; CHECK-NEXT: or $a0, $a0, $a2
-; CHECK-NEXT: sltui $a0, $a0, 1
-; CHECK-NEXT: ret
- %1 = icmp eq i256 %a, %b
- %2 = zext i1 %1 to i64
- ret i64 %2
-}
-
-define i64 @caller_large_scalars() nounwind {
-; CHECK-LABEL: caller_large_scalars:
-; CHECK: # %bb.0:
-; CHECK-NEXT: addi.d $sp, $sp, -80
-; CHECK-NEXT: st.d $ra, $sp, 72 # 8-byte Folded Spill
-; CHECK-NEXT: ori $a0, $zero, 2
-; CHECK-NEXT: st.d $a0, $sp, 0
-; CHECK-NEXT: st.d $zero, $sp, 24
-; CHECK-NEXT: st.d $zero, $sp, 16
-; CHECK-NEXT: st.d $zero, $sp, 8
-; CHECK-NEXT: st.d $zero, $sp, 56
-; CHECK-NEXT: st.d $zero, $sp, 48
-; CHECK-NEXT: st.d $zero, $sp, 40
-; CHECK-NEXT: ori $a0, $zero, 1
-; CHECK-NEXT: st.d $a0, $sp, 32
-; CHECK-NEXT: addi.d $a0, $sp, 32
-; CHECK-NEXT: addi.d $a1, $sp, 0
-; CHECK-NEXT: bl %plt(callee_large_scalars)
-; CHECK-NEXT: ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; CHECK-NEXT: addi.d $sp, $sp, 80
-; CHECK-NEXT: ret
- %1 = call i64 @callee_large_scalars(i256 1, i256 2)
- ret i64 %1
-}
-
-;; Check that arguments larger than 2*GRLen are handled correctly when their
-;; address is passed on the stack rather than in memory.
-
-;; Must keep define on a single line due to an update_llc_test_checks.py limitation
-define i64 @callee_large_scalars_exhausted_regs(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e, i64 %f, i64 %g, i256 %h, i64 %i, i256 %j) nounwind {
-; CHECK-LABEL: callee_large_scalars_exhausted_regs:
-; CHECK: # %bb.0:
-; CHECK-NEXT: ld.d $a0, $sp, 8
-; CHECK-NEXT: ld.d $a1, $a0, 24
-; CHECK-NEXT: ld.d $a2, $a7, 24
-; CHECK-NEXT: xor $a1, $a2, $a1
-; CHECK-NEXT: ld.d $a2, $a0, 8
-; CHECK-NEXT: ld.d $a3, $a7, 8
-; CHECK-NEXT: xor $a2, $a3, $a2
-; CHECK-NEXT: or $a1, $a2, $a1
-; CHECK-NEXT: ld.d $a2, $a0, 16
-; CHECK-NEXT: ld.d $a3, $a7, 16
-; CHECK-NEXT: xor $a2, $a3, $a2
-; CHECK-NEXT: ld.d $a0, $a0, 0
-; CHECK-NEXT: ld.d $a3, $a7, 0
-; CHECK-NEXT: xor $a0, $a3, $a0
-; CHECK-NEXT: or $a0, $a0, $a2
-; CHECK-NEXT: or $a0, $a0, $a1
-; CHECK-NEXT: sltui $a0, $a0, 1
-; CHECK-NEXT: ret
- %1 = icmp eq i256 %h, %j
- %2 = zext i1 %1 to i64
- ret i64 %2
-}
-
-define i64 @caller_large_scalars_exhausted_regs() nounwind {
-; CHECK-LABEL: caller_large_scalars_exhausted_regs:
-; CHECK: # %bb.0:
-; CHECK-NEXT: addi.d $sp, $sp, -96
-; CHECK-NEXT: st.d $ra, $sp, 88 # 8-byte Folded Spill
-; CHECK-NEXT: addi.d $a0, $sp, 16
-; CHECK-NEXT: st.d $a0, $sp, 8
-; CHECK-NEXT: ori $a0, $zero, 9
-; CHECK-NEXT: st.d $a0, $sp, 0
-; CHECK-NEXT: ori $a0, $zero, 10
-; CHECK-NEXT: st.d $a0, $sp, 16
-; CHECK-NEXT: st.d $zero, $sp, 40
-; CHECK-NEXT: st.d $zero, $sp, 32
-; CHECK-NEXT: st.d $zero, $sp, 24
-; CHECK-NEXT: st.d $zero, $sp, 72
-; CHECK-NEXT: st.d $zero, $sp, 64
-; CHECK-NEXT: st.d $zero, $sp, 56
-; CHECK-NEXT: ori $a0, $zero, 8
-; CHECK-NEXT: st.d $a0, $sp, 48
-; CHECK-NEXT: ori $a0, $zero, 1
-; CHECK-NEXT: ori $a1, $zero, 2
-; CHECK-NEXT: ori $a2, $zero, 3
-; CHECK-NEXT: ori $a3, $zero, 4
-; CHECK-NEXT: ori $a4, $zero, 5
-; CHECK-NEXT: ori $a5, $zero, 6
-; CHECK-NEXT: ori $a6, $zero, 7
-; CHECK-NEXT: addi.d $a7, $sp, 48
-; CHECK-NEXT: bl %plt(callee_large_scalars_exhausted_regs)
-; CHECK-NEXT: ld.d $ra, $sp, 88 # 8-byte Folded Reload
-; CHECK-NEXT: addi.d $sp, $sp, 96
-; CHECK-NEXT: ret
- %1 = call i64 @callee_large_scalars_exhausted_regs(
- i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i256 8, i64 9,
- i256 10)
- ret i64 %1
-}
-
-;; Check large struct arguments, which are passed byval
-
-%struct.large = type { i64, i64, i64, i64 }
-
-define i64 @callee_large_struct(ptr byval(%struct.large) align 8 %a) nounwind {
-; CHECK-LABEL: callee_large_struct:
-; CHECK: # %bb.0:
-; CHECK-NEXT: ld.d $a1, $a0, 24
-; CHECK-NEXT: ld.d $a0, $a0, 0
-; CHECK-NEXT: add.d $a0, $a0, $a1
-; CHECK-NEXT: ret
- %1 = getelementptr inbounds %struct.large, ptr %a, i64 0, i32 0
- %2 = getelementptr inbounds %struct.large, ptr %a, i64 0, i32 3
- %3 = load i64, ptr %1
- %4 = load i64, ptr %2
- %5 = add i64 %3, %4
- ret i64 %5
-}
-
-define i64 @caller_large_struct() nounwind {
-; CHECK-LABEL: caller_large_struct:
-; CHECK: # %bb.0:
-; CHECK-NEXT: addi.d $sp, $sp, -80
-; CHECK-NEXT: st.d $ra, $sp, 72 # 8-byte Folded Spill
-; CHECK-NEXT: ori $a0, $zero, 1
-; CHECK-NEXT: st.d $a0, $sp, 40
-; CHECK-NEXT: st.d $a0, $sp, 8
-; CHECK-NEXT: ori $a0, $zero, 2
-; CHECK-NEXT: st.d $a0, $sp, 48
-; CHECK-NEXT: st.d $a0, $sp, 16
-; CHECK-NEXT: ori $a0, $zero, 3
-; CHECK-NEXT: st.d $a0, $sp, 56
-; CHECK-NEXT: st.d $a0, $sp, 24
-; CHECK-NEXT: ori $a0, $zero, 4
-; CHECK-NEXT: st.d $a0, $sp, 64
-; CHECK-NEXT: st.d $a0, $sp, 32
-; CHECK-NEXT: addi.d $a0, $sp, 8
-; CHECK-NEXT: bl %plt(callee_large_struct)
-; CHECK-NEXT: ld.d $ra, $sp, 72 # 8-byte Folded Reload
-; CHECK-NEXT: addi.d $sp, $sp, 80
-; CHECK-NEXT: ret
- %ls = alloca %struct.large, align 8
- %a = getelementptr inbounds %struct.large, ptr %ls, i64 0, i32 0
- store i64 1, ptr %a
- %b = getelementptr inbounds %struct.large, ptr %ls, i64 0, i32 1
- store i64 2, ptr %b
- %c = getelementptr inbounds %struct.large, ptr %ls, i64 0, i32 2
- store i64 3, ptr %c
- %d = getelementptr inbounds %struct.large, ptr %ls, i64 0, i32 3
- store i64 4, ptr %d
- %1 = call i64 @callee_large_struct(ptr byval(%struct.large) align 8 %ls)
- ret i64 %1
-}
-
-;; Check return scalar which size is 2*GRLen.
-
-define i128 @callee_small_scalar_ret() nounwind {
-; CHECK-LABEL: callee_small_scalar_ret:
-; CHECK: # %bb.0:
-; CHECK-NEXT: addi.w $a0, $zero, -1
-; CHECK-NEXT: move $a1, $a0
-; CHECK-NEXT: ret
- ret i128 -1
-}
-
-define i64 @caller_small_scalar_ret() nounwind {
-; CHECK-LABEL: caller_small_scalar_ret:
-; CHECK: # %bb.0:
-; CHECK-NEXT: addi.d $sp, $sp, -16
-; CHECK-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
-; CHECK-NEXT: bl %plt(callee_small_scalar_ret)
-; CHECK-NEXT: addi.w $a2, $zero, -2
-; CHECK-NEXT: xor $a0, $a0, $a2
-; CHECK-NEXT: orn $a0, $a0, $a1
-; CHECK-NEXT: sltui $a0, $a0, 1
-; CHECK-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; CHECK-NEXT: addi.d $sp, $sp, 16
-; CHECK-NEXT: ret
- %1 = call i128 @callee_small_scalar_ret()
- %2 = icmp eq i128 -2, %1
- %3 = zext i1 %2 to i64
- ret i64 %3
-}
-
-;; Check return struct which size is 2*GRLen.
-
-%struct.small = type { i64, ptr }
-
-define %struct.small @callee_small_struct_ret() nounwind {
-; CHECK-LABEL: callee_small_struct_ret:
-; CHECK: # %bb.0:
-; CHECK-NEXT: ori $a0, $zero, 1
-; CHECK-NEXT: move $a1, $zero
-; CHECK-NEXT: ret
- ret %struct.small { i64 1, ptr null }
-}
-
-define i64 @caller_small_struct_ret() nounwind {
-; CHECK-LABEL: caller_small_struct_ret:
-; CHECK: # %bb.0:
-; CHECK-NEXT: addi.d $sp, $sp, -16
-; CHECK-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
-; CHECK-NEXT: bl %plt(callee_small_struct_ret)
-; CHECK-NEXT: add.d $a0, $a0, $a1
-; CHECK-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
-; CHECK-NEXT: addi.d $sp, $sp, 16
-; CHECK-NEXT: ret
- %1 = call %struct.small @callee_small_struct_ret()
- %2 = extractvalue %struct.small %1, 0
- %3 = extractvalue %struct.small %1, 1
- %4 = ptrtoint ptr %3 to i64
- %5 = add i64 %2, %4
- ret i64 %5
-}
-
-;; Check return scalar which size is more than 2*GRLen.
-
-define i256 @callee_large_scalar_ret() nounwind {
-; CHECK-LABEL: callee_large_scalar_ret:
-; CHECK: # %bb.0:
-; CHECK-NEXT: addi.w $a1, $zero, -1
-; CHECK-NEXT: st.d $a1, $a0, 24
-; CHECK-NEXT: st.d $a1, $a0, 16
-; CHECK-NEXT: st.d $a1, $a0, 8
-; CHECK-NEXT: lu12i.w $a1, -30141
-; CHECK-NEXT: ori $a1, $a1, 747
-; CHECK-NEXT: st.d $a1, $a0, 0
-; CHECK-NEXT: ret
- ret i256 -123456789
-}
-
-define void @caller_large_scalar_ret() nounwind {
-; CHECK-LABEL: caller_large_scalar_ret:
-; CHECK: # %bb.0:
-; CHECK-NEXT: addi.d $sp, $sp, -48
-; CHECK-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill
-; CHECK-NEXT: addi.d $a0, $sp, 0
-; CHECK-NEXT: bl %plt(callee_large_scalar_ret)
-; CHECK-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload
-; CHECK-NEXT: addi.d $sp, $sp, 48
-; CHECK-NEXT: ret
- %1 = call i256 @callee_large_scalar_ret()
- ret void
-}
-
-;; Check return struct which size is more than 2*GRLen.
-
-define void @callee_large_struct_ret(ptr noalias sret(%struct.large) %agg.result) nounwind {
-; CHECK-LABEL: callee_large_struct_ret:
-; CHECK: # %bb.0:
-; CHECK-NEXT: ori $a1, $zero, 4
-; CHECK-NEXT: st.w $a1, $a0, 24
-; CHECK-NEXT: ori $a1, $zero, 3
-; CHECK-NEXT: st.w $a1, $a0, 16
-; CHECK-NEXT: ori $a1, $zero, 2
-; CHECK-NEXT: st.w $a1, $a0, 8
-; CHECK-NEXT: st.w $zero, $a0, 28
-; CHECK-NEXT: st.w $zero, $a0, 20
-; CHECK-NEXT: st.w $zero, $a0, 12
-; CHECK-NEXT: st.w $zero, $a0, 4
-; CHECK-NEXT: ori $a1, $zero, 1
-; CHECK-NEXT: st.w $a1, $a0, 0
-; CHECK-NEXT: ret
- %a = getelementptr inbounds %struct.large, ptr %agg.result, i64 0, i32 0
- store i64 1, ptr %a, align 4
- %b = getelementptr inbounds %struct.large, ptr %agg.result, i64 0, i32 1
- store i64 2, ptr %b, align 4
- %c = getelementptr inbounds %struct.large, ptr %agg.result, i64 0, i32 2
- store i64 3, ptr %c, align 4
- %d = getelementptr inbounds %struct.large, ptr %agg.result, i64 0, i32 3
- store i64 4, ptr %d, align 4
- ret void
-}
-
-define i64 @caller_large_struct_ret() nounwind {
-; CHECK-LABEL: caller_large_struct_ret:
-; CHECK: # %bb.0:
-; CHECK-NEXT: addi.d $sp, $sp, -48
-; CHECK-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill
-; CHECK-NEXT: addi.d $a0, $sp, 8
-; CHECK-NEXT: bl %plt(callee_large_struct_ret)
-; CHECK-NEXT: ld.d $a0, $sp, 32
-; CHECK-NEXT: ld.d $a1, $sp, 8
-; CHECK-NEXT: add.d $a0, $a1, $a0
-; CHECK-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload
-; CHECK-NEXT: addi.d $sp, $sp, 48
-; CHECK-NEXT: ret
- %1 = alloca %struct.large
- call void @callee_large_struct_ret(ptr sret(%struct.large) %1)
- %2 = getelementptr inbounds %struct.large, ptr %1, i64 0, i32 0
- %3 = load i64, ptr %2
- %4 = getelementptr inbounds %struct.large, ptr %1, i64 0, i32 3
- %5 = load i64, ptr %4
- %6 = add i64 %3, %5
- ret i64 %6
-}
+;; This file contains specific tests for the lp64d ABI.
;; Check pass floating-point arguments whith FPRs.
@@ -462,26 +63,26 @@ define i64 @caller_double_in_gpr_exhausted_fprs() nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: addi.d $sp, $sp, -16
; CHECK-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
-; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI21_0)
-; CHECK-NEXT: addi.d $a0, $a0, %pc_lo12(.LCPI21_0)
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_0)
+; CHECK-NEXT: addi.d $a0, $a0, %pc_lo12(.LCPI3_0)
; CHECK-NEXT: fld.d $fa1, $a0, 0
-; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI21_1)
-; CHECK-NEXT: addi.d $a0, $a0, %pc_lo12(.LCPI21_1)
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_1)
+; CHECK-NEXT: addi.d $a0, $a0, %pc_lo12(.LCPI3_1)
; CHECK-NEXT: fld.d $fa2, $a0, 0
-; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI21_2)
-; CHECK-NEXT: addi.d $a0, $a0, %pc_lo12(.LCPI21_2)
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_2)
+; CHECK-NEXT: addi.d $a0, $a0, %pc_lo12(.LCPI3_2)
; CHECK-NEXT: fld.d $fa3, $a0, 0
-; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI21_3)
-; CHECK-NEXT: addi.d $a0, $a0, %pc_lo12(.LCPI21_3)
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_3)
+; CHECK-NEXT: addi.d $a0, $a0, %pc_lo12(.LCPI3_3)
; CHECK-NEXT: fld.d $fa4, $a0, 0
-; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI21_4)
-; CHECK-NEXT: addi.d $a0, $a0, %pc_lo12(.LCPI21_4)
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_4)
+; CHECK-NEXT: addi.d $a0, $a0, %pc_lo12(.LCPI3_4)
; CHECK-NEXT: fld.d $fa5, $a0, 0
-; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI21_5)
-; CHECK-NEXT: addi.d $a0, $a0, %pc_lo12(.LCPI21_5)
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_5)
+; CHECK-NEXT: addi.d $a0, $a0, %pc_lo12(.LCPI3_5)
; CHECK-NEXT: fld.d $fa6, $a0, 0
-; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI21_6)
-; CHECK-NEXT: addi.d $a0, $a0, %pc_lo12(.LCPI21_6)
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_6)
+; CHECK-NEXT: addi.d $a0, $a0, %pc_lo12(.LCPI3_6)
; CHECK-NEXT: fld.d $fa7, $a0, 0
; CHECK-NEXT: addi.d $a0, $zero, 1
; CHECK-NEXT: movgr2fr.d $fa0, $a0
diff --git a/llvm/test/CodeGen/LoongArch/calling-conv-lp64s.ll b/llvm/test/CodeGen/LoongArch/calling-conv-lp64s.ll
new file mode 100644
index 0000000000000..d738c066e1ad3
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/calling-conv-lp64s.ll
@@ -0,0 +1,97 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; RUN: llc --mtriple=loongarch64 --target-abi=lp64s < %s | FileCheck %s
+
+;; This file contains specific tests for the lp64s ABI.
+
+define i64 @callee_float_in_regs(i64 %a, float %b) nounwind {
+; CHECK-LABEL: callee_float_in_regs:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi.d $sp, $sp, -16
+; CHECK-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
+; CHECK-NEXT: st.d $fp, $sp, 0 # 8-byte Folded Spill
+; CHECK-NEXT: move $fp, $a0
+; CHECK-NEXT: bstrpick.d $a0, $a1, 31, 0
+; CHECK-NEXT: bl %plt(__fixsfdi)
+; CHECK-NEXT: add.d $a0, $fp, $a0
+; CHECK-NEXT: ld.d $fp, $sp, 0 # 8-byte Folded Reload
+; CHECK-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; CHECK-NEXT: addi.d $sp, $sp, 16
+; CHECK-NEXT: ret
+ %b_fptosi = fptosi float %b to i64
+ %1 = add i64 %a, %b_fptosi
+ ret i64 %1
+}
+
+define i64 @caller_float_in_regs() nounwind {
+; CHECK-LABEL: caller_float_in_regs:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi.d $sp, $sp, -16
+; CHECK-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
+; CHECK-NEXT: ori $a0, $zero, 1
+; CHECK-NEXT: lu12i.w $a1, 262144
+; CHECK-NEXT: bl %plt(callee_float_in_regs)
+; CHECK-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; CHECK-NEXT: addi.d $sp, $sp, 16
+; CHECK-NEXT: ret
+ %1 = call i64 @callee_float_in_regs(i64 1, float 2.0)
+ ret i64 %1
+}
+
+define i64 @callee_float_on_stack(i128 %a, i128 %b, i128 %c, i128 %d, float %e) nounwind {
+; CHECK-LABEL: callee_float_on_stack:
+; CHECK: # %bb.0:
+; CHECK-NEXT: ld.w $a0, $sp, 0
+; CHECK-NEXT: ret
+ %1 = trunc i128 %d to i64
+ %2 = bitcast float %e to i32
+ %3 = sext i32 %2 to i64
+ %4 = add i64 %1, %3
+ ret i64 %3
+}
+
+define i64 @caller_float_on_stack() nounwind {
+; CHECK-LABEL: caller_float_on_stack:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi.d $sp, $sp, -16
+; CHECK-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
+; CHECK-NEXT: lu12i.w $a0, 264704
+; CHECK-NEXT: st.d $a0, $sp, 0
+; CHECK-NEXT: ori $a0, $zero, 1
+; CHECK-NEXT: ori $a2, $zero, 2
+; CHECK-NEXT: ori $a4, $zero, 3
+; CHECK-NEXT: ori $a6, $zero, 4
+; CHECK-NEXT: move $a1, $zero
+; CHECK-NEXT: move $a3, $zero
+; CHECK-NEXT: move $a5, $zero
+; CHECK-NEXT: move $a7, $zero
+; CHECK-NEXT: bl %plt(callee_float_on_stack)
+; CHECK-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; CHECK-NEXT: addi.d $sp, $sp, 16
+; CHECK-NEXT: ret
+ %1 = call i64 @callee_float_on_stack(i128 1, i128 2, i128 3, i128 4, float 5.0)
+ ret i64 %1
+}
+
+define float @callee_tiny_scalar_ret() nounwind {
+; CHECK-LABEL: callee_tiny_scalar_ret:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lu12i.w $a0, 260096
+; CHECK-NEXT: ret
+ ret float 1.0
+}
+
+define i64 @caller_tiny_scalar_ret() nounwind {
+; CHECK-LABEL: caller_tiny_scalar_ret:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi.d $sp, $sp, -16
+; CHECK-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
+; CHECK-NEXT: bl %plt(callee_tiny_scalar_ret)
+; CHECK-NEXT: addi.w $a0, $a0, 0
+; CHECK-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; CHECK-NEXT: addi.d $sp, $sp, 16
+; CHECK-NEXT: ret
+ %1 = call float @callee_tiny_scalar_ret()
+ %2 = bitcast float %1 to i32
+ %3 = sext i32 %2 to i64
+ ret i64 %3
+}
diff --git a/llvm/test/CodeGen/LoongArch/e_flags.ll b/llvm/test/CodeGen/LoongArch/e_flags.ll
index a15e4ac5b4393..c004d1f9cdf4d 100644
--- a/llvm/test/CodeGen/LoongArch/e_flags.ll
+++ b/llvm/test/CodeGen/LoongArch/e_flags.ll
@@ -19,7 +19,7 @@
; RUN: llc --mtriple=loongarch64 --filetype=obj %s --target-abi=lp64f -o %t-lp64f
; RUN: llvm-readelf -h %t-lp64f | FileCheck %s --check-prefixes=LP64,ABI-F --match-full-lines
-; RUN: llc --mtriple=loongarch64 --filetype=obj %s --target-abi=lp64d -o %t-lp64d
+; RUN: llc --mtriple=loongarch64 --filetype=obj %s --mattr=+d --target-abi=lp64d -o %t-lp64d
; RUN: llvm-readelf -h %t-lp64d | FileCheck %s --check-prefixes=LP64,ABI-D --match-full-lines
; LP64: Class: ELF64
More information about the llvm-commits
mailing list