[llvm-branch-commits] [llvm] [release/18.x] Backport fixes for ARM64EC thunk generation (PR #92580)
via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Fri May 17 10:09:09 PDT 2024
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-aarch64
Author: Daniel Paoliello (dpaoliello)
<details>
<summary>Changes</summary>
Backports !90115 and !92326
Release notes:
Fixes issues where LLVM is either generating the incorrect thunk for a function with aligned parameters or didn't correctly pass through the return value when `StructRet` was used.
---
Full diff: https://github.com/llvm/llvm-project/pull/92580.diff
3 Files Affected:
- (modified) llvm/lib/Target/AArch64/AArch64Arm64ECCallLowering.cpp (+12-4)
- (modified) llvm/test/CodeGen/AArch64/arm64ec-entry-thunks.ll (+22-14)
- (modified) llvm/test/CodeGen/AArch64/arm64ec-exit-thunks.ll (+5-5)
``````````diff
diff --git a/llvm/lib/Target/AArch64/AArch64Arm64ECCallLowering.cpp b/llvm/lib/Target/AArch64/AArch64Arm64ECCallLowering.cpp
index 55c5bbc66a3f4..862aefe46193d 100644
--- a/llvm/lib/Target/AArch64/AArch64Arm64ECCallLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64Arm64ECCallLowering.cpp
@@ -181,13 +181,14 @@ void AArch64Arm64ECCallLowering::getThunkArgTypes(
}
for (unsigned E = FT->getNumParams(); I != E; ++I) {
- Align ParamAlign = AttrList.getParamAlignment(I).valueOrOne();
#if 0
// FIXME: Need more information about argument size; see
// https://reviews.llvm.org/D132926
uint64_t ArgSizeBytes = AttrList.getParamArm64ECArgSizeBytes(I);
+ Align ParamAlign = AttrList.getParamAlignment(I).valueOrOne();
#else
uint64_t ArgSizeBytes = 0;
+ Align ParamAlign = Align();
#endif
Type *Arm64Ty, *X64Ty;
canonicalizeThunkType(FT->getParamType(I), ParamAlign,
@@ -297,7 +298,7 @@ void AArch64Arm64ECCallLowering::canonicalizeThunkType(
uint64_t TotalSizeBytes = ElementCnt * ElementSizePerBytes;
if (ElementTy->isFloatTy() || ElementTy->isDoubleTy()) {
Out << (ElementTy->isFloatTy() ? "F" : "D") << TotalSizeBytes;
- if (Alignment.value() >= 8 && !T->isPointerTy())
+ if (Alignment.value() >= 16 && !Ret)
Out << "a" << Alignment.value();
Arm64Ty = T;
if (TotalSizeBytes <= 8) {
@@ -328,7 +329,7 @@ void AArch64Arm64ECCallLowering::canonicalizeThunkType(
Out << "m";
if (TypeSize != 4)
Out << TypeSize;
- if (Alignment.value() >= 8 && !T->isPointerTy())
+ if (Alignment.value() >= 16 && !Ret)
Out << "a" << Alignment.value();
// FIXME: Try to canonicalize Arm64Ty more thoroughly?
Arm64Ty = T;
@@ -513,7 +514,14 @@ Function *AArch64Arm64ECCallLowering::buildEntryThunk(Function *F) {
// Call the function passed to the thunk.
Value *Callee = Thunk->getArg(0);
Callee = IRB.CreateBitCast(Callee, PtrTy);
- Value *Call = IRB.CreateCall(Arm64Ty, Callee, Args);
+ CallInst *Call = IRB.CreateCall(Arm64Ty, Callee, Args);
+
+ auto SRetAttr = F->getAttributes().getParamAttr(0, Attribute::StructRet);
+ auto InRegAttr = F->getAttributes().getParamAttr(0, Attribute::InReg);
+ if (SRetAttr.isValid() && !InRegAttr.isValid()) {
+ Thunk->addParamAttr(1, SRetAttr);
+ Call->addParamAttr(0, SRetAttr);
+ }
Value *RetVal = Call;
if (TransformDirectToSRet) {
diff --git a/llvm/test/CodeGen/AArch64/arm64ec-entry-thunks.ll b/llvm/test/CodeGen/AArch64/arm64ec-entry-thunks.ll
index bb9ba05f7a272..e9556b9d5cbee 100644
--- a/llvm/test/CodeGen/AArch64/arm64ec-entry-thunks.ll
+++ b/llvm/test/CodeGen/AArch64/arm64ec-entry-thunks.ll
@@ -222,12 +222,12 @@ define i8 @matches_has_sret() nounwind {
}
%TSRet = type { i64, i64 }
-define void @has_aligned_sret(ptr align 32 sret(%TSRet)) nounwind {
-; CHECK-LABEL: .def $ientry_thunk$cdecl$m16a32$v;
-; CHECK: .section .wowthk$aa,"xr",discard,$ientry_thunk$cdecl$m16a32$v
+define void @has_aligned_sret(ptr align 32 sret(%TSRet), i32) nounwind {
+; CHECK-LABEL: .def $ientry_thunk$cdecl$m16$i8;
+; CHECK: .section .wowthk$aa,"xr",discard,$ientry_thunk$cdecl$m16$i8
; CHECK: // %bb.0:
-; CHECK-NEXT: stp q6, q7, [sp, #-176]! // 32-byte Folded Spill
-; CHECK-NEXT: .seh_save_any_reg_px q6, 176
+; CHECK-NEXT: stp q6, q7, [sp, #-192]! // 32-byte Folded Spill
+; CHECK-NEXT: .seh_save_any_reg_px q6, 192
; CHECK-NEXT: stp q8, q9, [sp, #32] // 32-byte Folded Spill
; CHECK-NEXT: .seh_save_any_reg_p q8, 32
; CHECK-NEXT: stp q10, q11, [sp, #64] // 32-byte Folded Spill
@@ -236,17 +236,25 @@ define void @has_aligned_sret(ptr align 32 sret(%TSRet)) nounwind {
; CHECK-NEXT: .seh_save_any_reg_p q12, 96
; CHECK-NEXT: stp q14, q15, [sp, #128] // 32-byte Folded Spill
; CHECK-NEXT: .seh_save_any_reg_p q14, 128
-; CHECK-NEXT: stp x29, x30, [sp, #160] // 16-byte Folded Spill
-; CHECK-NEXT: .seh_save_fplr 160
-; CHECK-NEXT: add x29, sp, #160
-; CHECK-NEXT: .seh_add_fp 160
+; CHECK-NEXT: str x19, [sp, #160] // 8-byte Folded Spill
+; CHECK-NEXT: .seh_save_reg x19, 160
+; CHECK-NEXT: stp x29, x30, [sp, #168] // 16-byte Folded Spill
+; CHECK-NEXT: .seh_save_fplr 168
+; CHECK-NEXT: add x29, sp, #168
+; CHECK-NEXT: .seh_add_fp 168
; CHECK-NEXT: .seh_endprologue
+; CHECK-NEXT: mov x19, x0
+; CHECK-NEXT: mov x8, x0
+; CHECK-NEXT: mov x0, x1
; CHECK-NEXT: blr x9
; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_ret
; CHECK-NEXT: ldr x0, [x8, :lo12:__os_arm64x_dispatch_ret]
+; CHECK-NEXT: mov x8, x19
; CHECK-NEXT: .seh_startepilogue
-; CHECK-NEXT: ldp x29, x30, [sp, #160] // 16-byte Folded Reload
-; CHECK-NEXT: .seh_save_fplr 160
+; CHECK-NEXT: ldp x29, x30, [sp, #168] // 16-byte Folded Reload
+; CHECK-NEXT: .seh_save_fplr 168
+; CHECK-NEXT: ldr x19, [sp, #160] // 8-byte Folded Reload
+; CHECK-NEXT: .seh_save_reg x19, 160
; CHECK-NEXT: ldp q14, q15, [sp, #128] // 32-byte Folded Reload
; CHECK-NEXT: .seh_save_any_reg_p q14, 128
; CHECK-NEXT: ldp q12, q13, [sp, #96] // 32-byte Folded Reload
@@ -255,8 +263,8 @@ define void @has_aligned_sret(ptr align 32 sret(%TSRet)) nounwind {
; CHECK-NEXT: .seh_save_any_reg_p q10, 64
; CHECK-NEXT: ldp q8, q9, [sp, #32] // 32-byte Folded Reload
; CHECK-NEXT: .seh_save_any_reg_p q8, 32
-; CHECK-NEXT: ldp q6, q7, [sp], #176 // 32-byte Folded Reload
-; CHECK-NEXT: .seh_save_any_reg_px q6, 176
+; CHECK-NEXT: ldp q6, q7, [sp], #192 // 32-byte Folded Reload
+; CHECK-NEXT: .seh_save_any_reg_px q6, 192
; CHECK-NEXT: .seh_endepilogue
; CHECK-NEXT: br x0
; CHECK-NEXT: .seh_endfunclet
@@ -457,7 +465,7 @@ define %T2 @simple_struct(%T1 %0, %T2 %1, %T3, %T4) nounwind {
; CHECK-NEXT: .symidx $ientry_thunk$cdecl$i8$v
; CHECK-NEXT: .word 1
; CHECK-NEXT: .symidx "#has_aligned_sret"
-; CHECK-NEXT: .symidx $ientry_thunk$cdecl$m16a32$v
+; CHECK-NEXT: .symidx $ientry_thunk$cdecl$m16$i8
; CHECK-NEXT: .word 1
; CHECK-NEXT: .symidx "#small_array"
; CHECK-NEXT: .symidx $ientry_thunk$cdecl$m2$m2F8
diff --git a/llvm/test/CodeGen/AArch64/arm64ec-exit-thunks.ll b/llvm/test/CodeGen/AArch64/arm64ec-exit-thunks.ll
index 3b911e78aff2a..7a40fcd85ac58 100644
--- a/llvm/test/CodeGen/AArch64/arm64ec-exit-thunks.ll
+++ b/llvm/test/CodeGen/AArch64/arm64ec-exit-thunks.ll
@@ -236,8 +236,8 @@ declare void @has_sret(ptr sret([100 x i8])) nounwind;
%TSRet = type { i64, i64 }
declare void @has_aligned_sret(ptr align 32 sret(%TSRet)) nounwind;
-; CHECK-LABEL: .def $iexit_thunk$cdecl$m16a32$v;
-; CHECK: .section .wowthk$aa,"xr",discard,$iexit_thunk$cdecl$m16a32$v
+; CHECK-LABEL: .def $iexit_thunk$cdecl$m16$v;
+; CHECK: .section .wowthk$aa,"xr",discard,$iexit_thunk$cdecl$m16$v
; CHECK: // %bb.0:
; CHECK-NEXT: sub sp, sp, #48
; CHECK-NEXT: .seh_stackalloc 48
@@ -271,8 +271,8 @@ declare void @has_aligned_sret(ptr align 32 sret(%TSRet)) nounwind;
; CHECK: adrp x11, has_aligned_sret
; CHECK: add x11, x11, :lo12:has_aligned_sret
; CHECK: ldr x9, [x9, :lo12:__os_arm64x_check_icall]
-; CHECK: adrp x10, ($iexit_thunk$cdecl$m16a32$v)
-; CHECK: add x10, x10, :lo12:($iexit_thunk$cdecl$m16a32$v)
+; CHECK: adrp x10, ($iexit_thunk$cdecl$m16$v)
+; CHECK: add x10, x10, :lo12:($iexit_thunk$cdecl$m16$v)
; CHECK: blr x9
; CHECK: .seh_startepilogue
; CHECK: ldr x30, [sp], #16 // 8-byte Folded Reload
@@ -492,7 +492,7 @@ declare %T2 @simple_struct(%T1, %T2, %T3, %T4) nounwind;
; CHECK-NEXT: .symidx has_sret
; CHECK-NEXT: .word 0
; CHECK-NEXT: .symidx has_aligned_sret
-; CHECK-NEXT: .symidx $iexit_thunk$cdecl$m16a32$v
+; CHECK-NEXT: .symidx $iexit_thunk$cdecl$m16$v
; CHECK-NEXT: .word 4
; CHECK-NEXT: .symidx "#has_aligned_sret$exit_thunk"
; CHECK-NEXT: .symidx has_aligned_sret
``````````
</details>
https://github.com/llvm/llvm-project/pull/92580
More information about the llvm-branch-commits
mailing list