[llvm] [Arm64EC] Correctly handle sret in entry thunks. (PR #92326)
Eli Friedman via llvm-commits
llvm-commits at lists.llvm.org
Wed May 15 15:53:48 PDT 2024
https://github.com/efriedma-quic created https://github.com/llvm/llvm-project/pull/92326
I accidentally left out the code to transfer sret attributes to entry thunks, so values weren't being passed in the right registers, and the sret pointer wasn't returned in the correct register.
Fixes #90229
>From a57cffadad01ef10165b49faab07a92b7ac43417 Mon Sep 17 00:00:00 2001
From: Eli Friedman <efriedma at quicinc.com>
Date: Wed, 15 May 2024 15:50:55 -0700
Subject: [PATCH] [Arm64EC] Correctly handle sret in entry thunks.
I accidentally left out the code to transfer sret attributes to entry
thunks, so values weren't being passed in the right registers, and the
sret pointer wasn't returned in the correct register.
Fixes #90229
---
.../AArch64/AArch64Arm64ECCallLowering.cpp | 9 ++++-
.../CodeGen/AArch64/arm64ec-entry-thunks.ll | 36 +++++++++++--------
2 files changed, 30 insertions(+), 15 deletions(-)
diff --git a/llvm/lib/Target/AArch64/AArch64Arm64ECCallLowering.cpp b/llvm/lib/Target/AArch64/AArch64Arm64ECCallLowering.cpp
index dddc181b03144..0ec15d34cd4a9 100644
--- a/llvm/lib/Target/AArch64/AArch64Arm64ECCallLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64Arm64ECCallLowering.cpp
@@ -512,7 +512,14 @@ Function *AArch64Arm64ECCallLowering::buildEntryThunk(Function *F) {
// Call the function passed to the thunk.
Value *Callee = Thunk->getArg(0);
Callee = IRB.CreateBitCast(Callee, PtrTy);
- Value *Call = IRB.CreateCall(Arm64Ty, Callee, Args);
+ CallInst *Call = IRB.CreateCall(Arm64Ty, Callee, Args);
+
+ auto SRetAttr = F->getAttributes().getParamAttr(0, Attribute::StructRet);
+ auto InRegAttr = F->getAttributes().getParamAttr(0, Attribute::InReg);
+ if (SRetAttr.isValid() && !InRegAttr.isValid()) {
+ Thunk->addParamAttr(1, SRetAttr);
+ Call->addParamAttr(0, SRetAttr);
+ }
Value *RetVal = Call;
if (TransformDirectToSRet) {
diff --git a/llvm/test/CodeGen/AArch64/arm64ec-entry-thunks.ll b/llvm/test/CodeGen/AArch64/arm64ec-entry-thunks.ll
index c00c9bfe127e8..e9556b9d5cbee 100644
--- a/llvm/test/CodeGen/AArch64/arm64ec-entry-thunks.ll
+++ b/llvm/test/CodeGen/AArch64/arm64ec-entry-thunks.ll
@@ -222,12 +222,12 @@ define i8 @matches_has_sret() nounwind {
}
%TSRet = type { i64, i64 }
-define void @has_aligned_sret(ptr align 32 sret(%TSRet)) nounwind {
-; CHECK-LABEL: .def $ientry_thunk$cdecl$m16$v;
-; CHECK: .section .wowthk$aa,"xr",discard,$ientry_thunk$cdecl$m16$v
+define void @has_aligned_sret(ptr align 32 sret(%TSRet), i32) nounwind {
+; CHECK-LABEL: .def $ientry_thunk$cdecl$m16$i8;
+; CHECK: .section .wowthk$aa,"xr",discard,$ientry_thunk$cdecl$m16$i8
; CHECK: // %bb.0:
-; CHECK-NEXT: stp q6, q7, [sp, #-176]! // 32-byte Folded Spill
-; CHECK-NEXT: .seh_save_any_reg_px q6, 176
+; CHECK-NEXT: stp q6, q7, [sp, #-192]! // 32-byte Folded Spill
+; CHECK-NEXT: .seh_save_any_reg_px q6, 192
; CHECK-NEXT: stp q8, q9, [sp, #32] // 32-byte Folded Spill
; CHECK-NEXT: .seh_save_any_reg_p q8, 32
; CHECK-NEXT: stp q10, q11, [sp, #64] // 32-byte Folded Spill
@@ -236,17 +236,25 @@ define void @has_aligned_sret(ptr align 32 sret(%TSRet)) nounwind {
; CHECK-NEXT: .seh_save_any_reg_p q12, 96
; CHECK-NEXT: stp q14, q15, [sp, #128] // 32-byte Folded Spill
; CHECK-NEXT: .seh_save_any_reg_p q14, 128
-; CHECK-NEXT: stp x29, x30, [sp, #160] // 16-byte Folded Spill
-; CHECK-NEXT: .seh_save_fplr 160
-; CHECK-NEXT: add x29, sp, #160
-; CHECK-NEXT: .seh_add_fp 160
+; CHECK-NEXT: str x19, [sp, #160] // 8-byte Folded Spill
+; CHECK-NEXT: .seh_save_reg x19, 160
+; CHECK-NEXT: stp x29, x30, [sp, #168] // 16-byte Folded Spill
+; CHECK-NEXT: .seh_save_fplr 168
+; CHECK-NEXT: add x29, sp, #168
+; CHECK-NEXT: .seh_add_fp 168
; CHECK-NEXT: .seh_endprologue
+; CHECK-NEXT: mov x19, x0
+; CHECK-NEXT: mov x8, x0
+; CHECK-NEXT: mov x0, x1
; CHECK-NEXT: blr x9
; CHECK-NEXT: adrp x8, __os_arm64x_dispatch_ret
; CHECK-NEXT: ldr x0, [x8, :lo12:__os_arm64x_dispatch_ret]
+; CHECK-NEXT: mov x8, x19
; CHECK-NEXT: .seh_startepilogue
-; CHECK-NEXT: ldp x29, x30, [sp, #160] // 16-byte Folded Reload
-; CHECK-NEXT: .seh_save_fplr 160
+; CHECK-NEXT: ldp x29, x30, [sp, #168] // 16-byte Folded Reload
+; CHECK-NEXT: .seh_save_fplr 168
+; CHECK-NEXT: ldr x19, [sp, #160] // 8-byte Folded Reload
+; CHECK-NEXT: .seh_save_reg x19, 160
; CHECK-NEXT: ldp q14, q15, [sp, #128] // 32-byte Folded Reload
; CHECK-NEXT: .seh_save_any_reg_p q14, 128
; CHECK-NEXT: ldp q12, q13, [sp, #96] // 32-byte Folded Reload
@@ -255,8 +263,8 @@ define void @has_aligned_sret(ptr align 32 sret(%TSRet)) nounwind {
; CHECK-NEXT: .seh_save_any_reg_p q10, 64
; CHECK-NEXT: ldp q8, q9, [sp, #32] // 32-byte Folded Reload
; CHECK-NEXT: .seh_save_any_reg_p q8, 32
-; CHECK-NEXT: ldp q6, q7, [sp], #176 // 32-byte Folded Reload
-; CHECK-NEXT: .seh_save_any_reg_px q6, 176
+; CHECK-NEXT: ldp q6, q7, [sp], #192 // 32-byte Folded Reload
+; CHECK-NEXT: .seh_save_any_reg_px q6, 192
; CHECK-NEXT: .seh_endepilogue
; CHECK-NEXT: br x0
; CHECK-NEXT: .seh_endfunclet
@@ -457,7 +465,7 @@ define %T2 @simple_struct(%T1 %0, %T2 %1, %T3, %T4) nounwind {
; CHECK-NEXT: .symidx $ientry_thunk$cdecl$i8$v
; CHECK-NEXT: .word 1
; CHECK-NEXT: .symidx "#has_aligned_sret"
-; CHECK-NEXT: .symidx $ientry_thunk$cdecl$m16$v
+; CHECK-NEXT: .symidx $ientry_thunk$cdecl$m16$i8
; CHECK-NEXT: .word 1
; CHECK-NEXT: .symidx "#small_array"
; CHECK-NEXT: .symidx $ientry_thunk$cdecl$m2$m2F8
More information about the llvm-commits
mailing list