[flang-commits] [compiler-rt] [flang] [llvm] [AArch64] fix trampoline implementation: use X15 (PR #126743)
Jameson Nash via flang-commits
flang-commits at lists.llvm.org
Tue Feb 11 07:31:48 PST 2025
https://github.com/vtjnash created https://github.com/llvm/llvm-project/pull/126743
AAPCS64 reserves any of X9-X15 for a compiler to choose to use for this purpose, and says not to use X16 or X18 like GCC (and the previous implementation) chose to use. The X18 register may need to get used by the kernel in some circumstances, as specified by the platform ABI, so it is generally an unwise choice. Simply choosing a different register fixes the problem of this being broken on any platform that actually follows the platform ABI (which is all of them except EABI, if I am reading this linux kernel bug correctly https://lkml2.uits.iu.edu/hypermail/linux/kernel/2001.2/01502.html). As a side benefit, also generate slightly better code and avoids needing the compiler-rt to be present. I did that by following the XCore implementation instead of PPC (although in hindsight, following the RISCV might have been slightly more readable). That X18 is wrong to use for this purpose has been known for many years (e.g. https://www.mail-archive.com/gcc@gcc.gnu.org/msg76934.html) and also known that fixing this to use one of the correct registers is not an ABI break, since this only appears inside of a translation unit. Some of the other temporary registers (e.g. X9) are already reserved inside llvm for internal use as a generic temporary register in the prologue before saving registers, while X15 was already used in rare cases as a scratch register in the prologue as well, so I felt that seemed the most logical choice to choose here.
@ceseo Is there any tests you'd recommend I run to show that this works correctly?
@vchuravy @llvm/issue-subscribers-julialang
>From 546fed81109e575b5b44693c3940e08ea0231ebc Mon Sep 17 00:00:00 2001
From: Jameson Nash <vtjnash at gmail.com>
Date: Mon, 10 Feb 2025 19:21:38 +0000
Subject: [PATCH] [AArch64] fix trampoline implementation: use X15
AAPCS64 reserves any of X9-X15 for this purpose, and says not to use X16
or X18 like GCC did. Simply choosing a different register fixes the
problem of this being broken on any platform that actually follows the
platform ABI. As a side benefit, also generate slightly better code by
following the XCore implementation instead of PPC (although following
the RISCV might have been slightly more readable in hindsight).
---
compiler-rt/lib/builtins/README.txt | 5 -
compiler-rt/lib/builtins/trampoline_setup.c | 42 ---
.../builtins/Unit/trampoline_setup_test.c | 2 +-
.../lib/Optimizer/CodeGen/BoxedProcedure.cpp | 4 +-
.../AArch64/AArch64CallingConvention.td | 36 ++-
.../Target/AArch64/AArch64FrameLowering.cpp | 26 ++
.../Target/AArch64/AArch64ISelLowering.cpp | 85 +++---
llvm/test/CodeGen/AArch64/nest-register.ll | 16 +-
.../CodeGen/AArch64/preserve_nonecc_call.ll | 116 ++++----
.../AArch64/statepoint-call-lowering.ll | 2 +-
llvm/test/CodeGen/AArch64/trampoline.ll | 257 +++++++++++++++++-
llvm/test/CodeGen/AArch64/win64cc-x18.ll | 27 +-
.../CodeGen/AArch64/zero-call-used-regs.ll | 16 +-
13 files changed, 435 insertions(+), 199 deletions(-)
diff --git a/compiler-rt/lib/builtins/README.txt b/compiler-rt/lib/builtins/README.txt
index 19f26c92a0f94f1..2d213d95f333af3 100644
--- a/compiler-rt/lib/builtins/README.txt
+++ b/compiler-rt/lib/builtins/README.txt
@@ -272,11 +272,6 @@ switch32
switch8
switchu8
-// This function generates a custom trampoline function with the specific
-// realFunc and localsPtr values.
-void __trampoline_setup(uint32_t* trampOnStack, int trampSizeAllocated,
- const void* realFunc, void* localsPtr);
-
// There is no C interface to the *_vfp_d8_d15_regs functions. There are
// called in the prolog and epilog of Thumb1 functions. When the C++ ABI use
// SJLJ for exceptions, each function with a catch clause or destructors needs
diff --git a/compiler-rt/lib/builtins/trampoline_setup.c b/compiler-rt/lib/builtins/trampoline_setup.c
index 830e25e4c0303ad..844eb2794414285 100644
--- a/compiler-rt/lib/builtins/trampoline_setup.c
+++ b/compiler-rt/lib/builtins/trampoline_setup.c
@@ -41,45 +41,3 @@ COMPILER_RT_ABI void __trampoline_setup(uint32_t *trampOnStack,
__clear_cache(trampOnStack, &trampOnStack[10]);
}
#endif // __powerpc__ && !defined(__powerpc64__)
-
-// The AArch64 compiler generates calls to __trampoline_setup() when creating
-// trampoline functions on the stack for use with nested functions.
-// This function creates a custom 36-byte trampoline function on the stack
-// which loads x18 with a pointer to the outer function's locals
-// and then jumps to the target nested function.
-// Note: x18 is a reserved platform register on Windows and macOS.
-
-#if defined(__aarch64__) && defined(__ELF__)
-COMPILER_RT_ABI void __trampoline_setup(uint32_t *trampOnStack,
- int trampSizeAllocated,
- const void *realFunc, void *localsPtr) {
- // This should never happen, but if compiler did not allocate
- // enough space on stack for the trampoline, abort.
- if (trampSizeAllocated < 36)
- compilerrt_abort();
-
- // create trampoline
- // Load realFunc into x17. mov/movk 16 bits at a time.
- trampOnStack[0] =
- 0xd2800000u | ((((uint64_t)realFunc >> 0) & 0xffffu) << 5) | 0x11;
- trampOnStack[1] =
- 0xf2a00000u | ((((uint64_t)realFunc >> 16) & 0xffffu) << 5) | 0x11;
- trampOnStack[2] =
- 0xf2c00000u | ((((uint64_t)realFunc >> 32) & 0xffffu) << 5) | 0x11;
- trampOnStack[3] =
- 0xf2e00000u | ((((uint64_t)realFunc >> 48) & 0xffffu) << 5) | 0x11;
- // Load localsPtr into x18
- trampOnStack[4] =
- 0xd2800000u | ((((uint64_t)localsPtr >> 0) & 0xffffu) << 5) | 0x12;
- trampOnStack[5] =
- 0xf2a00000u | ((((uint64_t)localsPtr >> 16) & 0xffffu) << 5) | 0x12;
- trampOnStack[6] =
- 0xf2c00000u | ((((uint64_t)localsPtr >> 32) & 0xffffu) << 5) | 0x12;
- trampOnStack[7] =
- 0xf2e00000u | ((((uint64_t)localsPtr >> 48) & 0xffffu) << 5) | 0x12;
- trampOnStack[8] = 0xd61f0220; // br x17
-
- // Clear instruction cache.
- __clear_cache(trampOnStack, &trampOnStack[9]);
-}
-#endif // defined(__aarch64__) && !defined(__APPLE__) && !defined(_WIN64)
diff --git a/compiler-rt/test/builtins/Unit/trampoline_setup_test.c b/compiler-rt/test/builtins/Unit/trampoline_setup_test.c
index d51d35acaa02f1c..da115fe76427183 100644
--- a/compiler-rt/test/builtins/Unit/trampoline_setup_test.c
+++ b/compiler-rt/test/builtins/Unit/trampoline_setup_test.c
@@ -7,7 +7,7 @@
/*
* Tests nested functions
- * The ppc and aarch64 compilers generates a call to __trampoline_setup
+ * The ppc compiler generates a call to __trampoline_setup
* The i386 and x86_64 compilers generate a call to ___enable_execute_stack
*/
diff --git a/flang/lib/Optimizer/CodeGen/BoxedProcedure.cpp b/flang/lib/Optimizer/CodeGen/BoxedProcedure.cpp
index 26f4aee21d8bda1..f402404121da08e 100644
--- a/flang/lib/Optimizer/CodeGen/BoxedProcedure.cpp
+++ b/flang/lib/Optimizer/CodeGen/BoxedProcedure.cpp
@@ -274,10 +274,10 @@ class BoxedProcedurePass
auto loc = embox.getLoc();
mlir::Type i8Ty = builder.getI8Type();
mlir::Type i8Ptr = builder.getRefType(i8Ty);
- // For AArch64, PPC32 and PPC64, the thunk is populated by a call to
+ // For PPC32 and PPC64, the thunk is populated by a call to
// __trampoline_setup, which is defined in
// compiler-rt/lib/builtins/trampoline_setup.c and requires the
- // thunk size greater than 32 bytes. For RISCV and x86_64, the
+ // thunk size greater than 32 bytes. For Aarch64, RISCV and x86_64, the
// thunk setup doesn't go through __trampoline_setup and fits in 32
// bytes.
fir::SequenceType::Extent thunkSize = triple.getTrampolineSize();
diff --git a/llvm/lib/Target/AArch64/AArch64CallingConvention.td b/llvm/lib/Target/AArch64/AArch64CallingConvention.td
index 7cca6d9bc6b9c32..8355463dea94ea9 100644
--- a/llvm/lib/Target/AArch64/AArch64CallingConvention.td
+++ b/llvm/lib/Target/AArch64/AArch64CallingConvention.td
@@ -28,6 +28,12 @@ class CCIfSubtarget<string F, CCAction A>
//===----------------------------------------------------------------------===//
defvar AArch64_Common = [
+ // The 'nest' parameter, if any, is passed in X15.
+ // The previous register used here (X18) is also defined to be unavailable
+ // for this purpose, while all of X9-X15 were defined to be free for LLVM to
+ // use for this, so use X15 (which LLVM often already clobbers anyways).
+ CCIfNest<CCAssignToReg<[X15]>>,
+
CCIfType<[iPTR], CCBitConvertToType<i64>>,
CCIfType<[v2f32], CCBitConvertToType<v2i32>>,
CCIfType<[v2f64, v4f32], CCBitConvertToType<v2i64>>,
@@ -117,16 +123,12 @@ defvar AArch64_Common = [
];
let Entry = 1 in
-def CC_AArch64_AAPCS : CallingConv<!listconcat(
- // The 'nest' parameter, if any, is passed in X18.
- // Darwin and Windows use X18 as the platform register and hence 'nest' isn't
- // currently supported there.
- [CCIfNest<CCAssignToReg<[X18]>>],
- AArch64_Common
-)>;
+def CC_AArch64_AAPCS : CallingConv<AArch64_Common>;
let Entry = 1 in
def RetCC_AArch64_AAPCS : CallingConv<[
+ CCIfNest<CCAssignToReg<[X15]>>,
+
CCIfType<[iPTR], CCBitConvertToType<i64>>,
CCIfType<[v2f32], CCBitConvertToType<v2i32>>,
CCIfType<[v2f64, v4f32], CCBitConvertToType<v2i64>>,
@@ -177,6 +179,8 @@ def CC_AArch64_Win64_VarArg : CallingConv<[
// a stack layout compatible with the x64 calling convention.
let Entry = 1 in
def CC_AArch64_Arm64EC_VarArg : CallingConv<[
+ CCIfNest<CCAssignToReg<[X15]>>,
+
// Convert small floating-point values to integer.
CCIfType<[f16, bf16], CCBitConvertToType<i16>>,
CCIfType<[f32], CCBitConvertToType<i32>>,
@@ -295,6 +299,8 @@ def CC_AArch64_Arm64EC_Thunk_Native : CallingConv<[
let Entry = 1 in
def RetCC_AArch64_Arm64EC_Thunk : CallingConv<[
+ CCIfNest<CCAssignToReg<[X15]>>,
+
// The X86-Win64 calling convention always returns __m64 values in RAX.
CCIfType<[x86mmx], CCBitConvertToType<i64>>,
@@ -353,6 +359,8 @@ def RetCC_AArch64_Arm64EC_CFGuard_Check : CallingConv<[
// + Stack slots are sized as needed rather than being at least 64-bit.
let Entry = 1 in
def CC_AArch64_DarwinPCS : CallingConv<[
+ CCIfNest<CCAssignToReg<[X15]>>,
+
CCIfType<[iPTR], CCBitConvertToType<i64>>,
CCIfType<[v2f32], CCBitConvertToType<v2i32>>,
CCIfType<[v2f64, v4f32, f128], CCBitConvertToType<v2i64>>,
@@ -427,6 +435,8 @@ def CC_AArch64_DarwinPCS : CallingConv<[
let Entry = 1 in
def CC_AArch64_DarwinPCS_VarArg : CallingConv<[
+ CCIfNest<CCAssignToReg<[X15]>>,
+
CCIfType<[iPTR], CCBitConvertToType<i64>>,
CCIfType<[v2f32], CCBitConvertToType<v2i32>>,
CCIfType<[v2f64, v4f32, f128], CCBitConvertToType<v2i64>>,
@@ -450,6 +460,8 @@ def CC_AArch64_DarwinPCS_VarArg : CallingConv<[
// same as the normal Darwin VarArgs handling.
let Entry = 1 in
def CC_AArch64_DarwinPCS_ILP32_VarArg : CallingConv<[
+ CCIfNest<CCAssignToReg<[X15]>>,
+
CCIfType<[v2f32], CCBitConvertToType<v2i32>>,
CCIfType<[v2f64, v4f32, f128], CCBitConvertToType<v2i64>>,
@@ -494,6 +506,8 @@ def CC_AArch64_DarwinPCS_ILP32_VarArg : CallingConv<[
let Entry = 1 in
def CC_AArch64_GHC : CallingConv<[
+ CCIfNest<CCAssignToReg<[X15]>>,
+
CCIfType<[iPTR], CCBitConvertToType<i64>>,
// Handle all vector types as either f64 or v2f64.
@@ -523,6 +537,7 @@ def CC_AArch64_Preserve_None : CallingConv<[
// We can pass arguments in all general registers, except:
// - X8, used for sret
// - X16/X17, used by the linker as IP0/IP1
+ // - X15, the nest register and used by Windows for stack allocation
// - X18, the platform register
// - X19, the base pointer
// - X29, the frame pointer
@@ -533,6 +548,7 @@ def CC_AArch64_Preserve_None : CallingConv<[
// normal functions without saving and reloading arguments.
// X9 is assigned last as it is used in FrameLowering as the first
// choice for a scratch register.
+ CCIfNest<CCAssignToReg<[X15]>>,
CCIfType<[i32], CCAssignToReg<[W20, W21, W22, W23,
W24, W25, W26, W27, W28,
W0, W1, W2, W3, W4, W5,
@@ -544,12 +560,6 @@ def CC_AArch64_Preserve_None : CallingConv<[
X6, X7, X10, X11,
X12, X13, X14, X9]>>,
- // Windows uses X15 for stack allocation
- CCIf<"!State.getMachineFunction().getSubtarget<AArch64Subtarget>().isTargetWindows()",
- CCIfType<[i32], CCAssignToReg<[W15]>>>,
- CCIf<"!State.getMachineFunction().getSubtarget<AArch64Subtarget>().isTargetWindows()",
- CCIfType<[i64], CCAssignToReg<[X15]>>>,
-
CCDelegateTo<CC_AArch64_AAPCS>
]>;
diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
index d3abd79b85a75f7..ced3ff7b742ad14 100644
--- a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
@@ -2044,6 +2044,25 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
: 0;
if (windowsRequiresStackProbe(MF, NumBytes + RealignmentPadding)) {
+ // Find an available register to store value of VG to.
+ unsigned X15Scratch = AArch64::NoRegister;
+ if (LiveRegs.contains(AArch64::X15)) {
+ // if (llvm::any_of(
+ // MBB.liveins(),
+ // [&STI](const MachineBasicBlock::RegisterMaskPair &LiveIn) {
+ // return STI.getRegisterInfo()->isSuperOrSubRegisterEq(
+ // AArch64::X15, LiveIn.PhysReg);
+ // }))
+ X15Scratch = findScratchNonCalleeSaveRegister(&MBB);
+ assert(X15Scratch != AArch64::NoRegister);
+ LiveRegs.removeReg(AArch64::X15); // ignore X15 since we restore it
+ BuildMI(MBB, MBBI, DL, TII->get(AArch64::ORRXrr), X15Scratch)
+ .addReg(AArch64::XZR)
+ .addReg(AArch64::X15, RegState::Undef)
+ .addReg(AArch64::X15, RegState::Implicit)
+ .setMIFlag(MachineInstr::FrameSetup);
+ }
+
uint64_t NumWords = (NumBytes + RealignmentPadding) >> 4;
if (NeedsWinCFI) {
HasWinCFI = true;
@@ -2166,6 +2185,13 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
// we've set a frame pointer and already finished the SEH prologue.
assert(!NeedsWinCFI);
}
+ if (X15Scratch != AArch64::NoRegister) {
+ BuildMI(MBB, MBBI, DL, TII->get(AArch64::ORRXrr), AArch64::X15)
+ .addReg(AArch64::XZR)
+ .addReg(X15Scratch, RegState::Undef)
+ .addReg(X15Scratch, RegState::Implicit)
+ .setMIFlag(MachineInstr::FrameSetup);
+ }
}
StackOffset SVECalleeSavesSize = {}, SVELocalsSize = SVEStackSize;
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 0d1608a97bfd300..1404077446420db 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -7290,59 +7290,66 @@ static SDValue LowerFLDEXP(SDValue Op, SelectionDAG &DAG) {
SDValue AArch64TargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op,
SelectionDAG &DAG) const {
- // Note: x18 cannot be used for the Nest parameter on Windows and macOS.
- if (Subtarget->isTargetDarwin() || Subtarget->isTargetWindows())
- report_fatal_error(
- "ADJUST_TRAMPOLINE operation is only supported on Linux.");
-
return Op.getOperand(0);
}
SDValue AArch64TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
SelectionDAG &DAG) const {
-
- // Note: x18 cannot be used for the Nest parameter on Windows and macOS.
- if (Subtarget->isTargetDarwin() || Subtarget->isTargetWindows())
- report_fatal_error("INIT_TRAMPOLINE operation is only supported on Linux.");
-
SDValue Chain = Op.getOperand(0);
- SDValue Trmp = Op.getOperand(1); // trampoline
+ SDValue Trmp = Op.getOperand(1); // trampoline, 36 bytes
SDValue FPtr = Op.getOperand(2); // nested function
SDValue Nest = Op.getOperand(3); // 'nest' parameter value
- SDLoc dl(Op);
- EVT PtrVT = getPointerTy(DAG.getDataLayout());
- Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext());
-
- TargetLowering::ArgListTy Args;
- TargetLowering::ArgListEntry Entry;
+ const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
- Entry.Ty = IntPtrTy;
- Entry.Node = Trmp;
- Args.push_back(Entry);
+ // ldr x15, .+16
+ // ldr x17, .+20
+ // br x17
+ // 0
+ // .nest: .qword nest
+ // .fptr: .qword fptr
+ SDValue OutChains[5];
- if (auto *FI = dyn_cast<FrameIndexSDNode>(Trmp.getNode())) {
- MachineFunction &MF = DAG.getMachineFunction();
- MachineFrameInfo &MFI = MF.getFrameInfo();
- Entry.Node =
- DAG.getConstant(MFI.getObjectSize(FI->getIndex()), dl, MVT::i64);
- } else
- Entry.Node = DAG.getConstant(36, dl, MVT::i64);
+ const char X15 = 0x0f;
+ const char X17 = 0x11;
- Args.push_back(Entry);
- Entry.Node = FPtr;
- Args.push_back(Entry);
- Entry.Node = Nest;
- Args.push_back(Entry);
+ SDValue Addr = Trmp;
- // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg)
- TargetLowering::CallLoweringInfo CLI(DAG);
- CLI.setDebugLoc(dl).setChain(Chain).setLibCallee(
- CallingConv::C, Type::getVoidTy(*DAG.getContext()),
- DAG.getExternalSymbol("__trampoline_setup", PtrVT), std::move(Args));
+ SDLoc dl(Op);
+ OutChains[0] =
+ DAG.getStore(Chain, dl, DAG.getConstant(0x58000080u | X15, dl, MVT::i32), Addr,
+ MachinePointerInfo(TrmpAddr));
- std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
- return CallResult.second;
+ Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
+ DAG.getConstant(4, dl, MVT::i64));
+ OutChains[1] =
+ DAG.getStore(Chain, dl, DAG.getConstant(0x580000b0u | X17, dl, MVT::i32), Addr,
+ MachinePointerInfo(TrmpAddr, 4));
+
+ Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
+ DAG.getConstant(8, dl, MVT::i64));
+ OutChains[2] =
+ DAG.getStore(Chain, dl, DAG.getConstant(0xd61f0220u, dl, MVT::i32), Addr,
+ MachinePointerInfo(TrmpAddr, 8));
+
+ Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
+ DAG.getConstant(16, dl, MVT::i64));
+ OutChains[3] =
+ DAG.getStore(Chain, dl, Nest, Addr, MachinePointerInfo(TrmpAddr, 16));
+
+ Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
+ DAG.getConstant(24, dl, MVT::i64));
+ OutChains[4] =
+ DAG.getStore(Chain, dl, FPtr, Addr, MachinePointerInfo(TrmpAddr, 24));
+
+ SDValue StoreToken = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
+
+ SDValue EndOfTrmp = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
+ DAG.getConstant(12, dl, MVT::i64));
+
+ // Call clear cache on the trampoline instructions.
+ return DAG.getNode(ISD::CLEAR_CACHE, dl, MVT::Other, StoreToken,
+ Trmp, EndOfTrmp);
}
SDValue AArch64TargetLowering::LowerOperation(SDValue Op,
diff --git a/llvm/test/CodeGen/AArch64/nest-register.ll b/llvm/test/CodeGen/AArch64/nest-register.ll
index 1e1c1b044bab653..2e94dfba1fa523e 100644
--- a/llvm/test/CodeGen/AArch64/nest-register.ll
+++ b/llvm/test/CodeGen/AArch64/nest-register.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc -disable-post-ra -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
; Tests that the 'nest' parameter attribute causes the relevant parameter to be
@@ -5,18 +6,21 @@
define ptr @nest_receiver(ptr nest %arg) nounwind {
; CHECK-LABEL: nest_receiver:
-; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: mov x0, x18
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov x0, x15
+; CHECK-NEXT: ret
ret ptr %arg
}
define ptr @nest_caller(ptr %arg) nounwind {
; CHECK-LABEL: nest_caller:
-; CHECK: mov x18, x0
-; CHECK-NEXT: bl nest_receiver
-; CHECK: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: mov x15, x0
+; CHECK-NEXT: bl nest_receiver
+; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
%result = call ptr @nest_receiver(ptr nest %arg)
ret ptr %result
diff --git a/llvm/test/CodeGen/AArch64/preserve_nonecc_call.ll b/llvm/test/CodeGen/AArch64/preserve_nonecc_call.ll
index 9b9717c19321e75..e0d7b5abe7bea25 100644
--- a/llvm/test/CodeGen/AArch64/preserve_nonecc_call.ll
+++ b/llvm/test/CodeGen/AArch64/preserve_nonecc_call.ll
@@ -184,10 +184,11 @@ declare preserve_nonecc i64 @callee_with_many_param2(i64 %a1, i64 %a2, i64 %a3,
define preserve_nonecc i64 @callee_with_many_param(i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64 %a6, i64 %a7, i64 %a8, i64 %a9, i64 %a10, i64 %a11, i64 %a12, i64 %a13, i64 %a14, i64 %a15, i64 %a16, i64 %a17, i64 %a18, i64 %a19, i64 %a20, i64 %a21, i64 %a22, i64 %a23, i64 %a24) {
; CHECK-LABEL: callee_with_many_param:
; CHECK: // %bb.0:
-; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: sub sp, sp, #32
+; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: .cfi_offset w30, -16
-; CHECK-NEXT: mov x8, x15
+; CHECK-NEXT: ldr x8, [sp, #32]
; CHECK-NEXT: mov x15, x20
; CHECK-NEXT: mov x20, x21
; CHECK-NEXT: mov x21, x22
@@ -212,17 +213,20 @@ define preserve_nonecc i64 @callee_with_many_param(i64 %a1, i64 %a2, i64 %a3, i6
; CHECK-NEXT: mov x13, x14
; CHECK-NEXT: mov x14, x9
; CHECK-NEXT: mov x9, x8
+; CHECK-NEXT: str x15, [sp]
; CHECK-NEXT: bl callee_with_many_param2
-; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #32
; CHECK-NEXT: ret
;
; DARWIN-LABEL: callee_with_many_param:
; DARWIN: ; %bb.0:
-; DARWIN-NEXT: stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
-; DARWIN-NEXT: .cfi_def_cfa_offset 16
+; DARWIN-NEXT: sub sp, sp, #32
+; DARWIN-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill
+; DARWIN-NEXT: .cfi_def_cfa_offset 32
; DARWIN-NEXT: .cfi_offset w30, -8
; DARWIN-NEXT: .cfi_offset w29, -16
-; DARWIN-NEXT: mov x8, x15
+; DARWIN-NEXT: ldr x8, [sp, #32]
; DARWIN-NEXT: mov x15, x20
; DARWIN-NEXT: mov x20, x21
; DARWIN-NEXT: mov x21, x22
@@ -247,8 +251,10 @@ define preserve_nonecc i64 @callee_with_many_param(i64 %a1, i64 %a2, i64 %a3, i6
; DARWIN-NEXT: mov x13, x14
; DARWIN-NEXT: mov x14, x9
; DARWIN-NEXT: mov x9, x8
+; DARWIN-NEXT: str x15, [sp]
; DARWIN-NEXT: bl _callee_with_many_param2
-; DARWIN-NEXT: ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
+; DARWIN-NEXT: ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
+; DARWIN-NEXT: add sp, sp, #32
; DARWIN-NEXT: ret
;
; WIN-LABEL: callee_with_many_param:
@@ -302,17 +308,18 @@ define preserve_nonecc i64 @callee_with_many_param(i64 %a1, i64 %a2, i64 %a3, i6
define i64 @caller3() {
; CHECK-LABEL: caller3:
; CHECK: // %bb.0:
-; CHECK-NEXT: stp d15, d14, [sp, #-160]! // 16-byte Folded Spill
-; CHECK-NEXT: stp d13, d12, [sp, #16] // 16-byte Folded Spill
-; CHECK-NEXT: stp d11, d10, [sp, #32] // 16-byte Folded Spill
-; CHECK-NEXT: stp d9, d8, [sp, #48] // 16-byte Folded Spill
-; CHECK-NEXT: str x30, [sp, #64] // 8-byte Folded Spill
-; CHECK-NEXT: stp x28, x27, [sp, #80] // 16-byte Folded Spill
-; CHECK-NEXT: stp x26, x25, [sp, #96] // 16-byte Folded Spill
-; CHECK-NEXT: stp x24, x23, [sp, #112] // 16-byte Folded Spill
-; CHECK-NEXT: stp x22, x21, [sp, #128] // 16-byte Folded Spill
-; CHECK-NEXT: stp x20, x19, [sp, #144] // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_def_cfa_offset 160
+; CHECK-NEXT: sub sp, sp, #176
+; CHECK-NEXT: stp d15, d14, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT: stp d13, d12, [sp, #32] // 16-byte Folded Spill
+; CHECK-NEXT: stp d11, d10, [sp, #48] // 16-byte Folded Spill
+; CHECK-NEXT: stp d9, d8, [sp, #64] // 16-byte Folded Spill
+; CHECK-NEXT: str x30, [sp, #80] // 8-byte Folded Spill
+; CHECK-NEXT: stp x28, x27, [sp, #96] // 16-byte Folded Spill
+; CHECK-NEXT: stp x26, x25, [sp, #112] // 16-byte Folded Spill
+; CHECK-NEXT: stp x24, x23, [sp, #128] // 16-byte Folded Spill
+; CHECK-NEXT: stp x22, x21, [sp, #144] // 16-byte Folded Spill
+; CHECK-NEXT: stp x20, x19, [sp, #160] // 16-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 176
; CHECK-NEXT: .cfi_offset w19, -8
; CHECK-NEXT: .cfi_offset w20, -16
; CHECK-NEXT: .cfi_offset w21, -24
@@ -332,6 +339,7 @@ define i64 @caller3() {
; CHECK-NEXT: .cfi_offset b13, -144
; CHECK-NEXT: .cfi_offset b14, -152
; CHECK-NEXT: .cfi_offset b15, -160
+; CHECK-NEXT: mov w8, #24 // =0x18
; CHECK-NEXT: mov w20, #1 // =0x1
; CHECK-NEXT: mov w21, #2 // =0x2
; CHECK-NEXT: mov w22, #3 // =0x3
@@ -355,33 +363,35 @@ define i64 @caller3() {
; CHECK-NEXT: mov w13, #21 // =0x15
; CHECK-NEXT: mov w14, #22 // =0x16
; CHECK-NEXT: mov w9, #23 // =0x17
-; CHECK-NEXT: mov w15, #24 // =0x18
+; CHECK-NEXT: str x8, [sp]
; CHECK-NEXT: bl callee_with_many_param
-; CHECK-NEXT: ldp x20, x19, [sp, #144] // 16-byte Folded Reload
-; CHECK-NEXT: ldr x30, [sp, #64] // 8-byte Folded Reload
-; CHECK-NEXT: ldp x22, x21, [sp, #128] // 16-byte Folded Reload
-; CHECK-NEXT: ldp x24, x23, [sp, #112] // 16-byte Folded Reload
-; CHECK-NEXT: ldp x26, x25, [sp, #96] // 16-byte Folded Reload
-; CHECK-NEXT: ldp x28, x27, [sp, #80] // 16-byte Folded Reload
-; CHECK-NEXT: ldp d9, d8, [sp, #48] // 16-byte Folded Reload
-; CHECK-NEXT: ldp d11, d10, [sp, #32] // 16-byte Folded Reload
-; CHECK-NEXT: ldp d13, d12, [sp, #16] // 16-byte Folded Reload
-; CHECK-NEXT: ldp d15, d14, [sp], #160 // 16-byte Folded Reload
+; CHECK-NEXT: ldp x20, x19, [sp, #160] // 16-byte Folded Reload
+; CHECK-NEXT: ldr x30, [sp, #80] // 8-byte Folded Reload
+; CHECK-NEXT: ldp x22, x21, [sp, #144] // 16-byte Folded Reload
+; CHECK-NEXT: ldp x24, x23, [sp, #128] // 16-byte Folded Reload
+; CHECK-NEXT: ldp x26, x25, [sp, #112] // 16-byte Folded Reload
+; CHECK-NEXT: ldp x28, x27, [sp, #96] // 16-byte Folded Reload
+; CHECK-NEXT: ldp d9, d8, [sp, #64] // 16-byte Folded Reload
+; CHECK-NEXT: ldp d11, d10, [sp, #48] // 16-byte Folded Reload
+; CHECK-NEXT: ldp d13, d12, [sp, #32] // 16-byte Folded Reload
+; CHECK-NEXT: ldp d15, d14, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #176
; CHECK-NEXT: ret
;
; DARWIN-LABEL: caller3:
; DARWIN: ; %bb.0:
-; DARWIN-NEXT: stp d15, d14, [sp, #-160]! ; 16-byte Folded Spill
-; DARWIN-NEXT: stp d13, d12, [sp, #16] ; 16-byte Folded Spill
-; DARWIN-NEXT: stp d11, d10, [sp, #32] ; 16-byte Folded Spill
-; DARWIN-NEXT: stp d9, d8, [sp, #48] ; 16-byte Folded Spill
-; DARWIN-NEXT: stp x28, x27, [sp, #64] ; 16-byte Folded Spill
-; DARWIN-NEXT: stp x26, x25, [sp, #80] ; 16-byte Folded Spill
-; DARWIN-NEXT: stp x24, x23, [sp, #96] ; 16-byte Folded Spill
-; DARWIN-NEXT: stp x22, x21, [sp, #112] ; 16-byte Folded Spill
-; DARWIN-NEXT: stp x20, x19, [sp, #128] ; 16-byte Folded Spill
-; DARWIN-NEXT: stp x29, x30, [sp, #144] ; 16-byte Folded Spill
-; DARWIN-NEXT: .cfi_def_cfa_offset 160
+; DARWIN-NEXT: sub sp, sp, #176
+; DARWIN-NEXT: stp d15, d14, [sp, #16] ; 16-byte Folded Spill
+; DARWIN-NEXT: stp d13, d12, [sp, #32] ; 16-byte Folded Spill
+; DARWIN-NEXT: stp d11, d10, [sp, #48] ; 16-byte Folded Spill
+; DARWIN-NEXT: stp d9, d8, [sp, #64] ; 16-byte Folded Spill
+; DARWIN-NEXT: stp x28, x27, [sp, #80] ; 16-byte Folded Spill
+; DARWIN-NEXT: stp x26, x25, [sp, #96] ; 16-byte Folded Spill
+; DARWIN-NEXT: stp x24, x23, [sp, #112] ; 16-byte Folded Spill
+; DARWIN-NEXT: stp x22, x21, [sp, #128] ; 16-byte Folded Spill
+; DARWIN-NEXT: stp x20, x19, [sp, #144] ; 16-byte Folded Spill
+; DARWIN-NEXT: stp x29, x30, [sp, #160] ; 16-byte Folded Spill
+; DARWIN-NEXT: .cfi_def_cfa_offset 176
; DARWIN-NEXT: .cfi_offset w30, -8
; DARWIN-NEXT: .cfi_offset w29, -16
; DARWIN-NEXT: .cfi_offset w19, -24
@@ -402,6 +412,7 @@ define i64 @caller3() {
; DARWIN-NEXT: .cfi_offset b13, -144
; DARWIN-NEXT: .cfi_offset b14, -152
; DARWIN-NEXT: .cfi_offset b15, -160
+; DARWIN-NEXT: mov w8, #24 ; =0x18
; DARWIN-NEXT: mov w20, #1 ; =0x1
; DARWIN-NEXT: mov w21, #2 ; =0x2
; DARWIN-NEXT: mov w22, #3 ; =0x3
@@ -425,18 +436,19 @@ define i64 @caller3() {
; DARWIN-NEXT: mov w13, #21 ; =0x15
; DARWIN-NEXT: mov w14, #22 ; =0x16
; DARWIN-NEXT: mov w9, #23 ; =0x17
-; DARWIN-NEXT: mov w15, #24 ; =0x18
+; DARWIN-NEXT: str x8, [sp]
; DARWIN-NEXT: bl _callee_with_many_param
-; DARWIN-NEXT: ldp x29, x30, [sp, #144] ; 16-byte Folded Reload
-; DARWIN-NEXT: ldp x20, x19, [sp, #128] ; 16-byte Folded Reload
-; DARWIN-NEXT: ldp x22, x21, [sp, #112] ; 16-byte Folded Reload
-; DARWIN-NEXT: ldp x24, x23, [sp, #96] ; 16-byte Folded Reload
-; DARWIN-NEXT: ldp x26, x25, [sp, #80] ; 16-byte Folded Reload
-; DARWIN-NEXT: ldp x28, x27, [sp, #64] ; 16-byte Folded Reload
-; DARWIN-NEXT: ldp d9, d8, [sp, #48] ; 16-byte Folded Reload
-; DARWIN-NEXT: ldp d11, d10, [sp, #32] ; 16-byte Folded Reload
-; DARWIN-NEXT: ldp d13, d12, [sp, #16] ; 16-byte Folded Reload
-; DARWIN-NEXT: ldp d15, d14, [sp], #160 ; 16-byte Folded Reload
+; DARWIN-NEXT: ldp x29, x30, [sp, #160] ; 16-byte Folded Reload
+; DARWIN-NEXT: ldp x20, x19, [sp, #144] ; 16-byte Folded Reload
+; DARWIN-NEXT: ldp x22, x21, [sp, #128] ; 16-byte Folded Reload
+; DARWIN-NEXT: ldp x24, x23, [sp, #112] ; 16-byte Folded Reload
+; DARWIN-NEXT: ldp x26, x25, [sp, #96] ; 16-byte Folded Reload
+; DARWIN-NEXT: ldp x28, x27, [sp, #80] ; 16-byte Folded Reload
+; DARWIN-NEXT: ldp d9, d8, [sp, #64] ; 16-byte Folded Reload
+; DARWIN-NEXT: ldp d11, d10, [sp, #48] ; 16-byte Folded Reload
+; DARWIN-NEXT: ldp d13, d12, [sp, #32] ; 16-byte Folded Reload
+; DARWIN-NEXT: ldp d15, d14, [sp, #16] ; 16-byte Folded Reload
+; DARWIN-NEXT: add sp, sp, #176
; DARWIN-NEXT: ret
;
; WIN-LABEL: caller3:
diff --git a/llvm/test/CodeGen/AArch64/statepoint-call-lowering.ll b/llvm/test/CodeGen/AArch64/statepoint-call-lowering.ll
index 9619895c450cac3..32c3eaeb9c8766c 100644
--- a/llvm/test/CodeGen/AArch64/statepoint-call-lowering.ll
+++ b/llvm/test/CodeGen/AArch64/statepoint-call-lowering.ll
@@ -207,7 +207,7 @@ define void @test_attributes(ptr byval(%struct2) %s) gc "statepoint-example" {
; CHECK-NEXT: .cfi_offset w30, -16
; CHECK-NEXT: ldr x8, [sp, #64]
; CHECK-NEXT: ldr q0, [sp, #48]
-; CHECK-NEXT: mov x18, xzr
+; CHECK-NEXT: mov x15, xzr
; CHECK-NEXT: mov w0, #42 // =0x2a
; CHECK-NEXT: mov w1, #17 // =0x11
; CHECK-NEXT: str x8, [sp, #16]
diff --git a/llvm/test/CodeGen/AArch64/trampoline.ll b/llvm/test/CodeGen/AArch64/trampoline.ll
index 30ac2aa283b3eec..0e682704afbf87d 100644
--- a/llvm/test/CodeGen/AArch64/trampoline.ll
+++ b/llvm/test/CodeGen/AArch64/trampoline.ll
@@ -1,32 +1,265 @@
-; RUN: llc -mtriple=aarch64-- < %s | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=aarch64-linux-gnu < %s | FileCheck %s --check-prefixes=CHECK-LINUX
+; RUN: llc -mtriple=aarch64-none-eabi < %s | FileCheck %s --check-prefixes=CHECK-LINUX
+; RUN: llc -mtriple=aarch64-pc-windows-msvc < %s | FileCheck %s --check-prefix=CHECK-PC
+; RUN: llc -mtriple=aarch64-apple-darwin < %s | FileCheck %s --check-prefixes=CHECK-APPLE
@trampg = internal global [36 x i8] zeroinitializer, align 8
declare void @llvm.init.trampoline(ptr, ptr, ptr);
declare ptr @llvm.adjust.trampoline(ptr);
-define i64 @f(ptr nest %c, i64 %x, i64 %y) {
- %sum = add i64 %x, %y
- ret i64 %sum
+define ptr @f(ptr nest %x, i64 %y) {
+; CHECK-LINUX-LABEL: f:
+; CHECK-LINUX: // %bb.0:
+; CHECK-LINUX-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-LINUX-NEXT: sub sp, sp, #237, lsl #12 // =970752
+; CHECK-LINUX-NEXT: sub sp, sp, #3264
+; CHECK-LINUX-NEXT: .cfi_def_cfa_offset 974032
+; CHECK-LINUX-NEXT: .cfi_offset w29, -16
+; CHECK-LINUX-NEXT: add x0, x15, x0
+; CHECK-LINUX-NEXT: add sp, sp, #237, lsl #12 // =970752
+; CHECK-LINUX-NEXT: add sp, sp, #3264
+; CHECK-LINUX-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-LINUX-NEXT: ret
+;
+; CHECK-PC-LABEL: f:
+; CHECK-PC: .seh_proc f
+; CHECK-PC-NEXT: // %bb.0:
+; CHECK-PC-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
+; CHECK-PC-NEXT: .seh_save_fplr_x 16
+; CHECK-PC-NEXT: mov x9, x15
+; CHECK-PC-NEXT: mov x15, #60876 // =0xedcc
+; CHECK-PC-NEXT: .seh_nop
+; CHECK-PC-NEXT: bl __chkstk
+; CHECK-PC-NEXT: .seh_nop
+; CHECK-PC-NEXT: sub sp, sp, x15, lsl #4
+; CHECK-PC-NEXT: .seh_stackalloc 974016
+; CHECK-PC-NEXT: mov x15, x9
+; CHECK-PC-NEXT: .seh_endprologue
+; CHECK-PC-NEXT: add x0, x15, x0
+; CHECK-PC-NEXT: .seh_startepilogue
+; CHECK-PC-NEXT: add sp, sp, #237, lsl #12 // =970752
+; CHECK-PC-NEXT: .seh_stackalloc 970752
+; CHECK-PC-NEXT: add sp, sp, #3264
+; CHECK-PC-NEXT: .seh_stackalloc 3264
+; CHECK-PC-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload
+; CHECK-PC-NEXT: .seh_save_fplr_x 16
+; CHECK-PC-NEXT: .seh_endepilogue
+; CHECK-PC-NEXT: ret
+; CHECK-PC-NEXT: .seh_endfunclet
+; CHECK-PC-NEXT: .seh_endproc
+;
+; CHECK-APPLE-LABEL: f:
+; CHECK-APPLE: ; %bb.0:
+; CHECK-APPLE-NEXT: stp x28, x27, [sp, #-16]! ; 16-byte Folded Spill
+; CHECK-APPLE-NEXT: sub sp, sp, #237, lsl #12 ; =970752
+; CHECK-APPLE-NEXT: sub sp, sp, #3264
+; CHECK-APPLE-NEXT: .cfi_def_cfa_offset 974032
+; CHECK-APPLE-NEXT: .cfi_offset w27, -8
+; CHECK-APPLE-NEXT: .cfi_offset w28, -16
+; CHECK-APPLE-NEXT: add x0, x15, x0
+; CHECK-APPLE-NEXT: add sp, sp, #237, lsl #12 ; =970752
+; CHECK-APPLE-NEXT: add sp, sp, #3264
+; CHECK-APPLE-NEXT: ldp x28, x27, [sp], #16 ; 16-byte Folded Reload
+; CHECK-APPLE-NEXT: ret
+ %chkstack = alloca [u0xedcba x i8]
+ %sum = getelementptr i8, ptr %x, i64 %y
+ ret ptr %sum
}
define i64 @func1() {
+; CHECK-LINUX-LABEL: func1:
+; CHECK-LINUX: // %bb.0:
+; CHECK-LINUX-NEXT: sub sp, sp, #64
+; CHECK-LINUX-NEXT: str x30, [sp, #48] // 8-byte Folded Spill
+; CHECK-LINUX-NEXT: .cfi_def_cfa_offset 64
+; CHECK-LINUX-NEXT: .cfi_offset w30, -16
+; CHECK-LINUX-NEXT: adrp x8, :got:f
+; CHECK-LINUX-NEXT: mov w9, #544 // =0x220
+; CHECK-LINUX-NEXT: add x0, sp, #8
+; CHECK-LINUX-NEXT: ldr x8, [x8, :got_lo12:f]
+; CHECK-LINUX-NEXT: movk w9, #54815, lsl #16
+; CHECK-LINUX-NEXT: str w9, [sp, #16]
+; CHECK-LINUX-NEXT: add x9, sp, #56
+; CHECK-LINUX-NEXT: stp x9, x8, [sp, #24]
+; CHECK-LINUX-NEXT: mov x8, #143 // =0x8f
+; CHECK-LINUX-NEXT: movk x8, #22528, lsl #16
+; CHECK-LINUX-NEXT: movk x8, #177, lsl #32
+; CHECK-LINUX-NEXT: movk x8, #22528, lsl #48
+; CHECK-LINUX-NEXT: str x8, [sp, #8]
+; CHECK-LINUX-NEXT: add x8, sp, #8
+; CHECK-LINUX-NEXT: add x1, x8, #12
+; CHECK-LINUX-NEXT: bl __clear_cache
+; CHECK-LINUX-NEXT: ldr x30, [sp, #48] // 8-byte Folded Reload
+; CHECK-LINUX-NEXT: mov x0, xzr
+; CHECK-LINUX-NEXT: add sp, sp, #64
+; CHECK-LINUX-NEXT: ret
+;
+; CHECK-PC-LABEL: func1:
+; CHECK-PC: .seh_proc func1
+; CHECK-PC-NEXT: // %bb.0:
+; CHECK-PC-NEXT: sub sp, sp, #64
+; CHECK-PC-NEXT: .seh_stackalloc 64
+; CHECK-PC-NEXT: str x30, [sp, #48] // 8-byte Folded Spill
+; CHECK-PC-NEXT: .seh_save_reg x30, 48
+; CHECK-PC-NEXT: .seh_endprologue
+; CHECK-PC-NEXT: adrp x8, f
+; CHECK-PC-NEXT: add x8, x8, :lo12:f
+; CHECK-PC-NEXT: add x9, sp, #56
+; CHECK-PC-NEXT: stp x9, x8, [sp, #24]
+; CHECK-PC-NEXT: mov w8, #544 // =0x220
+; CHECK-PC-NEXT: add x0, sp, #8
+; CHECK-PC-NEXT: movk w8, #54815, lsl #16
+; CHECK-PC-NEXT: str w8, [sp, #16]
+; CHECK-PC-NEXT: mov x8, #143 // =0x8f
+; CHECK-PC-NEXT: movk x8, #22528, lsl #16
+; CHECK-PC-NEXT: movk x8, #177, lsl #32
+; CHECK-PC-NEXT: movk x8, #22528, lsl #48
+; CHECK-PC-NEXT: str x8, [sp, #8]
+; CHECK-PC-NEXT: add x8, sp, #8
+; CHECK-PC-NEXT: add x1, x8, #12
+; CHECK-PC-NEXT: bl __clear_cache
+; CHECK-PC-NEXT: mov x0, xzr
+; CHECK-PC-NEXT: .seh_startepilogue
+; CHECK-PC-NEXT: ldr x30, [sp, #48] // 8-byte Folded Reload
+; CHECK-PC-NEXT: .seh_save_reg x30, 48
+; CHECK-PC-NEXT: add sp, sp, #64
+; CHECK-PC-NEXT: .seh_stackalloc 64
+; CHECK-PC-NEXT: .seh_endepilogue
+; CHECK-PC-NEXT: ret
+; CHECK-PC-NEXT: .seh_endfunclet
+; CHECK-PC-NEXT: .seh_endproc
+;
+; CHECK-APPLE-LABEL: func1:
+; CHECK-APPLE: ; %bb.0:
+; CHECK-APPLE-NEXT: sub sp, sp, #64
+; CHECK-APPLE-NEXT: stp x29, x30, [sp, #48] ; 16-byte Folded Spill
+; CHECK-APPLE-NEXT: .cfi_def_cfa_offset 64
+; CHECK-APPLE-NEXT: .cfi_offset w30, -8
+; CHECK-APPLE-NEXT: .cfi_offset w29, -16
+; CHECK-APPLE-NEXT: Lloh0:
+; CHECK-APPLE-NEXT: adrp x8, _f at PAGE
+; CHECK-APPLE-NEXT: Lloh1:
+; CHECK-APPLE-NEXT: add x8, x8, _f at PAGEOFF
+; CHECK-APPLE-NEXT: add x9, sp, #40
+; CHECK-APPLE-NEXT: stp x9, x8, [sp, #16]
+; CHECK-APPLE-NEXT: mov w8, #544 ; =0x220
+; CHECK-APPLE-NEXT: mov x0, sp
+; CHECK-APPLE-NEXT: movk w8, #54815, lsl #16
+; CHECK-APPLE-NEXT: str w8, [sp, #8]
+; CHECK-APPLE-NEXT: mov x8, #143 ; =0x8f
+; CHECK-APPLE-NEXT: movk x8, #22528, lsl #16
+; CHECK-APPLE-NEXT: movk x8, #177, lsl #32
+; CHECK-APPLE-NEXT: movk x8, #22528, lsl #48
+; CHECK-APPLE-NEXT: str x8, [sp]
+; CHECK-APPLE-NEXT: mov x8, sp
+; CHECK-APPLE-NEXT: add x1, x8, #12
+; CHECK-APPLE-NEXT: bl ___clear_cache
+; CHECK-APPLE-NEXT: ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
+; CHECK-APPLE-NEXT: mov x0, xzr
+; CHECK-APPLE-NEXT: add sp, sp, #64
+; CHECK-APPLE-NEXT: ret
+; CHECK-APPLE-NEXT: .loh AdrpAdd Lloh0, Lloh1
%val = alloca i64
- %nval = bitcast ptr %val to ptr
%tramp = alloca [36 x i8], align 8
- ; CHECK: mov w1, #36
- ; CHECK: bl __trampoline_setup
- call void @llvm.init.trampoline(ptr %tramp, ptr @f, ptr %nval)
+ call void @llvm.init.trampoline(ptr %tramp, ptr @f, ptr %val)
%fp = call ptr @llvm.adjust.trampoline(ptr %tramp)
ret i64 0
}
define i64 @func2() {
+; CHECK-LINUX-LABEL: func2:
+; CHECK-LINUX: // %bb.0:
+; CHECK-LINUX-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-LINUX-NEXT: .cfi_def_cfa_offset 16
+; CHECK-LINUX-NEXT: .cfi_offset w30, -16
+; CHECK-LINUX-NEXT: adrp x8, :got:f
+; CHECK-LINUX-NEXT: mov w9, #544 // =0x220
+; CHECK-LINUX-NEXT: adrp x0, trampg
+; CHECK-LINUX-NEXT: add x0, x0, :lo12:trampg
+; CHECK-LINUX-NEXT: ldr x8, [x8, :got_lo12:f]
+; CHECK-LINUX-NEXT: movk w9, #54815, lsl #16
+; CHECK-LINUX-NEXT: str w9, [x0, #8]
+; CHECK-LINUX-NEXT: add x9, sp, #8
+; CHECK-LINUX-NEXT: add x1, x0, #12
+; CHECK-LINUX-NEXT: stp x9, x8, [x0, #16]
+; CHECK-LINUX-NEXT: mov x8, #143 // =0x8f
+; CHECK-LINUX-NEXT: movk x8, #22528, lsl #16
+; CHECK-LINUX-NEXT: movk x8, #177, lsl #32
+; CHECK-LINUX-NEXT: movk x8, #22528, lsl #48
+; CHECK-LINUX-NEXT: str x8, [x0]
+; CHECK-LINUX-NEXT: bl __clear_cache
+; CHECK-LINUX-NEXT: mov x0, xzr
+; CHECK-LINUX-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-LINUX-NEXT: ret
+;
+; CHECK-PC-LABEL: func2:
+; CHECK-PC: .seh_proc func2
+; CHECK-PC-NEXT: // %bb.0:
+; CHECK-PC-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-PC-NEXT: .seh_save_reg_x x30, 16
+; CHECK-PC-NEXT: .seh_endprologue
+; CHECK-PC-NEXT: adrp x0, trampg
+; CHECK-PC-NEXT: add x0, x0, :lo12:trampg
+; CHECK-PC-NEXT: adrp x8, f
+; CHECK-PC-NEXT: add x8, x8, :lo12:f
+; CHECK-PC-NEXT: add x9, sp, #8
+; CHECK-PC-NEXT: add x1, x0, #12
+; CHECK-PC-NEXT: stp x9, x8, [x0, #16]
+; CHECK-PC-NEXT: mov w8, #544 // =0x220
+; CHECK-PC-NEXT: movk w8, #54815, lsl #16
+; CHECK-PC-NEXT: str w8, [x0, #8]
+; CHECK-PC-NEXT: mov x8, #143 // =0x8f
+; CHECK-PC-NEXT: movk x8, #22528, lsl #16
+; CHECK-PC-NEXT: movk x8, #177, lsl #32
+; CHECK-PC-NEXT: movk x8, #22528, lsl #48
+; CHECK-PC-NEXT: str x8, [x0]
+; CHECK-PC-NEXT: bl __clear_cache
+; CHECK-PC-NEXT: mov x0, xzr
+; CHECK-PC-NEXT: .seh_startepilogue
+; CHECK-PC-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-PC-NEXT: .seh_save_reg_x x30, 16
+; CHECK-PC-NEXT: .seh_endepilogue
+; CHECK-PC-NEXT: ret
+; CHECK-PC-NEXT: .seh_endfunclet
+; CHECK-PC-NEXT: .seh_endproc
+;
+; CHECK-APPLE-LABEL: func2:
+; CHECK-APPLE: ; %bb.0:
+; CHECK-APPLE-NEXT: sub sp, sp, #32
+; CHECK-APPLE-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill
+; CHECK-APPLE-NEXT: .cfi_def_cfa_offset 32
+; CHECK-APPLE-NEXT: .cfi_offset w30, -8
+; CHECK-APPLE-NEXT: .cfi_offset w29, -16
+; CHECK-APPLE-NEXT: Lloh2:
+; CHECK-APPLE-NEXT: adrp x0, _trampg at PAGE
+; CHECK-APPLE-NEXT: Lloh3:
+; CHECK-APPLE-NEXT: add x0, x0, _trampg at PAGEOFF
+; CHECK-APPLE-NEXT: Lloh4:
+; CHECK-APPLE-NEXT: adrp x8, _f at PAGE
+; CHECK-APPLE-NEXT: Lloh5:
+; CHECK-APPLE-NEXT: add x8, x8, _f at PAGEOFF
+; CHECK-APPLE-NEXT: add x9, sp, #8
+; CHECK-APPLE-NEXT: add x1, x0, #12
+; CHECK-APPLE-NEXT: stp x9, x8, [x0, #16]
+; CHECK-APPLE-NEXT: mov w8, #544 ; =0x220
+; CHECK-APPLE-NEXT: movk w8, #54815, lsl #16
+; CHECK-APPLE-NEXT: str w8, [x0, #8]
+; CHECK-APPLE-NEXT: mov x8, #143 ; =0x8f
+; CHECK-APPLE-NEXT: movk x8, #22528, lsl #16
+; CHECK-APPLE-NEXT: movk x8, #177, lsl #32
+; CHECK-APPLE-NEXT: movk x8, #22528, lsl #48
+; CHECK-APPLE-NEXT: str x8, [x0]
+; CHECK-APPLE-NEXT: bl ___clear_cache
+; CHECK-APPLE-NEXT: ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
+; CHECK-APPLE-NEXT: mov x0, xzr
+; CHECK-APPLE-NEXT: add sp, sp, #32
+; CHECK-APPLE-NEXT: ret
+; CHECK-APPLE-NEXT: .loh AdrpAdd Lloh4, Lloh5
+; CHECK-APPLE-NEXT: .loh AdrpAdd Lloh2, Lloh3
%val = alloca i64
- %nval = bitcast ptr %val to ptr
- ; CHECK: mov w1, #36
- ; CHECK: bl __trampoline_setup
- call void @llvm.init.trampoline(ptr @trampg, ptr @f, ptr %nval)
+ call void @llvm.init.trampoline(ptr @trampg, ptr @f, ptr %val)
%fp = call ptr @llvm.adjust.trampoline(ptr @trampg)
ret i64 0
}
diff --git a/llvm/test/CodeGen/AArch64/win64cc-x18.ll b/llvm/test/CodeGen/AArch64/win64cc-x18.ll
index b3e78cc9bbb8100..4b45c300e9c1d5c 100644
--- a/llvm/test/CodeGen/AArch64/win64cc-x18.ll
+++ b/llvm/test/CodeGen/AArch64/win64cc-x18.ll
@@ -1,35 +1,26 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+;; Testing that nest uses x15 on all calling conventions (except Arm64EC)
-;; Testing that x18 is not clobbered when passing pointers with the nest
-;; attribute on windows
-
-; RUN: llc < %s -mtriple=aarch64-pc-windows-msvc | FileCheck %s --check-prefixes=CHECK,CHECK-NO-X18
-; RUN: llc < %s -mtriple=aarch64-linux-gnu | FileCheck %s --check-prefixes=CHECK,CHECK-X18
+; RUN: llc < %s -mtriple=aarch64-pc-windows-msvc | FileCheck %s
+; RUN: llc < %s -mtriple=aarch64-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=aarch64-apple-darwin- | FileCheck %s
define dso_local i64 @other(ptr nest %p) #0 {
; CHECK-LABEL: other:
-; CHECK-X18: ldr x0, [x18]
-; CHECK-NO-X18: ldr x0, [x0]
+; CHECK: ldr x0, [x15]
+; CHECK: ret
%r = load i64, ptr %p
-; CHECK: ret
ret i64 %r
}
define dso_local void @func() #0 {
; CHECK-LABEL: func:
-
-
+; CHECK: add x15, sp, #8
+; CHECK: bl {{_?other}}
+; CHECK: ret
entry:
%p = alloca i64
-; CHECK: mov w8, #1
-; CHECK: stp x30, x8, [sp, #-16]
-; CHECK-X18: add x18, sp, #8
store i64 1, ptr %p
-; CHECK-NO-X18: add x0, sp, #8
-; CHECK: bl other
call void @other(ptr nest %p)
-; CHECK: ldr x30, [sp], #16
-; CHECK: ret
ret void
}
diff --git a/llvm/test/CodeGen/AArch64/zero-call-used-regs.ll b/llvm/test/CodeGen/AArch64/zero-call-used-regs.ll
index 4799ea3bcd19f67..986666e015e9ec6 100644
--- a/llvm/test/CodeGen/AArch64/zero-call-used-regs.ll
+++ b/llvm/test/CodeGen/AArch64/zero-call-used-regs.ll
@@ -93,7 +93,7 @@ define dso_local i32 @all_gpr_arg(i32 noundef %a, i32 noundef %b, i32 noundef %c
; CHECK-NEXT: mov x5, #0 // =0x0
; CHECK-NEXT: mov x6, #0 // =0x0
; CHECK-NEXT: mov x7, #0 // =0x0
-; CHECK-NEXT: mov x18, #0 // =0x0
+; CHECK-NEXT: mov x15, #0 // =0x0
; CHECK-NEXT: orr w0, w8, w2
; CHECK-NEXT: mov x2, #0 // =0x0
; CHECK-NEXT: mov x8, #0 // =0x0
@@ -146,7 +146,7 @@ define dso_local i32 @all_arg(i32 noundef %a, i32 noundef %b, i32 noundef %c) lo
; DEFAULT-NEXT: mov x5, #0 // =0x0
; DEFAULT-NEXT: mov x6, #0 // =0x0
; DEFAULT-NEXT: mov x7, #0 // =0x0
-; DEFAULT-NEXT: mov x18, #0 // =0x0
+; DEFAULT-NEXT: mov x15, #0 // =0x0
; DEFAULT-NEXT: movi v0.2d, #0000000000000000
; DEFAULT-NEXT: orr w0, w8, w2
; DEFAULT-NEXT: mov x2, #0 // =0x0
@@ -169,7 +169,7 @@ define dso_local i32 @all_arg(i32 noundef %a, i32 noundef %b, i32 noundef %c) lo
; SVE-OR-SME-NEXT: mov x5, #0 // =0x0
; SVE-OR-SME-NEXT: mov x6, #0 // =0x0
; SVE-OR-SME-NEXT: mov x7, #0 // =0x0
-; SVE-OR-SME-NEXT: mov x18, #0 // =0x0
+; SVE-OR-SME-NEXT: mov x15, #0 // =0x0
; SVE-OR-SME-NEXT: mov z0.d, #0 // =0x0
; SVE-OR-SME-NEXT: orr w0, w8, w2
; SVE-OR-SME-NEXT: mov x2, #0 // =0x0
@@ -196,7 +196,7 @@ define dso_local i32 @all_arg(i32 noundef %a, i32 noundef %b, i32 noundef %c) lo
; STREAMING-COMPAT-NEXT: mov x5, #0 // =0x0
; STREAMING-COMPAT-NEXT: mov x6, #0 // =0x0
; STREAMING-COMPAT-NEXT: mov x7, #0 // =0x0
-; STREAMING-COMPAT-NEXT: mov x18, #0 // =0x0
+; STREAMING-COMPAT-NEXT: mov x15, #0 // =0x0
; STREAMING-COMPAT-NEXT: fmov d0, xzr
; STREAMING-COMPAT-NEXT: orr w0, w8, w2
; STREAMING-COMPAT-NEXT: mov x2, #0 // =0x0
@@ -492,7 +492,7 @@ define dso_local double @all_gpr_arg_float(double noundef %a, float noundef %b)
; CHECK-NEXT: mov x6, #0 // =0x0
; CHECK-NEXT: mov x7, #0 // =0x0
; CHECK-NEXT: mov x8, #0 // =0x0
-; CHECK-NEXT: mov x18, #0 // =0x0
+; CHECK-NEXT: mov x15, #0 // =0x0
; CHECK-NEXT: ret
entry:
@@ -547,7 +547,7 @@ define dso_local double @all_arg_float(double noundef %a, float noundef %b) loca
; DEFAULT-NEXT: mov x6, #0 // =0x0
; DEFAULT-NEXT: mov x7, #0 // =0x0
; DEFAULT-NEXT: mov x8, #0 // =0x0
-; DEFAULT-NEXT: mov x18, #0 // =0x0
+; DEFAULT-NEXT: mov x15, #0 // =0x0
; DEFAULT-NEXT: movi v1.2d, #0000000000000000
; DEFAULT-NEXT: movi v2.2d, #0000000000000000
; DEFAULT-NEXT: movi v3.2d, #0000000000000000
@@ -570,7 +570,7 @@ define dso_local double @all_arg_float(double noundef %a, float noundef %b) loca
; SVE-OR-SME-NEXT: mov x6, #0 // =0x0
; SVE-OR-SME-NEXT: mov x7, #0 // =0x0
; SVE-OR-SME-NEXT: mov x8, #0 // =0x0
-; SVE-OR-SME-NEXT: mov x18, #0 // =0x0
+; SVE-OR-SME-NEXT: mov x15, #0 // =0x0
; SVE-OR-SME-NEXT: mov z1.d, #0 // =0x0
; SVE-OR-SME-NEXT: mov z2.d, #0 // =0x0
; SVE-OR-SME-NEXT: mov z3.d, #0 // =0x0
@@ -597,7 +597,7 @@ define dso_local double @all_arg_float(double noundef %a, float noundef %b) loca
; STREAMING-COMPAT-NEXT: mov x6, #0 // =0x0
; STREAMING-COMPAT-NEXT: mov x7, #0 // =0x0
; STREAMING-COMPAT-NEXT: mov x8, #0 // =0x0
-; STREAMING-COMPAT-NEXT: mov x18, #0 // =0x0
+; STREAMING-COMPAT-NEXT: mov x15, #0 // =0x0
; STREAMING-COMPAT-NEXT: fmov d1, xzr
; STREAMING-COMPAT-NEXT: fmov d2, xzr
; STREAMING-COMPAT-NEXT: fmov d3, xzr
More information about the flang-commits
mailing list