[llvm] [AArch64][SME] Fix restoring callee-saves from FP with hazard padding (PR #143371)
Benjamin Maxwell via llvm-commits
llvm-commits at lists.llvm.org
Fri Jun 13 07:58:17 PDT 2025
https://github.com/MacDue updated https://github.com/llvm/llvm-project/pull/143371
>From c965294a93d97771a2ba2b018a3faf6e933caec0 Mon Sep 17 00:00:00 2001
From: Benjamin Maxwell <benjamin.maxwell at arm.com>
Date: Mon, 9 Jun 2025 11:15:08 +0000
Subject: [PATCH 1/4] [AArch64][SME] Fix restoring callee-saves from FP with
hazard padding
Currently, when hazard-padding is enabled a (fixed-size) hazard slot is
placed in the CS area, just after the frame record. The size of this
slot is part of the "CalleeSaveBaseToFrameRecordOffset". The SVE
epilogue emission code assumed this offset was always zero, and
incorrectly setting the stack pointer, resulting in all SVE registers
being reloaded from incorrect offsets.
```
| prev_lr |
| prev_fp |
| (a.k.a. "frame record") |
|-----------------------------------| <- fp(=x29)
| <hazard padding> |
|-----------------------------------| <- callee-saved base
| |
| callee-saved fp/simd/SVE regs |
| |
|-----------------------------------| <- SVE callee-save base
```
i.e. in the above diagram, the code assumed `fp == callee-saved base`.
---
.../Target/AArch64/AArch64FrameLowering.cpp | 10 +-
llvm/test/CodeGen/AArch64/stack-hazard.ll | 425 ++++++++++++++++++
2 files changed, 431 insertions(+), 4 deletions(-)
diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
index 22683237fa0a8..9d1cb09a1a568 100644
--- a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
@@ -2539,13 +2539,15 @@ void AArch64FrameLowering::emitEpilogue(MachineFunction &MF,
// restore the stack pointer from the frame pointer prior to SVE CSR
// restoration.
if (AFI->isStackRealigned() || MFI.hasVarSizedObjects()) {
- if (int64_t CalleeSavedSize = AFI->getSVECalleeSavedStackSize()) {
+ if (int64_t SVECalleeSavedSize = AFI->getSVECalleeSavedStackSize()) {
// Set SP to start of SVE callee-save area from which they can
// be reloaded. The code below will deallocate the stack space
// space by moving FP -> SP.
- emitFrameOffset(MBB, RestoreBegin, DL, AArch64::SP, AArch64::FP,
- StackOffset::getScalable(-CalleeSavedSize), TII,
- MachineInstr::FrameDestroy);
+ emitFrameOffset(
+ MBB, RestoreBegin, DL, AArch64::SP, AArch64::FP,
+ StackOffset::get(-AFI->getCalleeSaveBaseToFrameRecordOffset(),
+ -SVECalleeSavedSize),
+ TII, MachineInstr::FrameDestroy);
}
} else {
if (AFI->getSVECalleeSavedStackSize()) {
diff --git a/llvm/test/CodeGen/AArch64/stack-hazard.ll b/llvm/test/CodeGen/AArch64/stack-hazard.ll
index e169b199733bd..0b92aea51cf62 100644
--- a/llvm/test/CodeGen/AArch64/stack-hazard.ll
+++ b/llvm/test/CodeGen/AArch64/stack-hazard.ll
@@ -3143,3 +3143,428 @@ entry:
call void @bar(ptr noundef nonnull %b)
ret i32 0
}
+
+
+define i32 @svecc_call_dynamic_alloca(<4 x i16> %P0, i32 %P1, i32 %P2, <vscale x 16 x i8> %P3, i16 %P4) "aarch64_pstate_sm_compatible" {
+; CHECK0-LABEL: svecc_call_dynamic_alloca:
+; CHECK0: // %bb.0: // %entry
+; CHECK0-NEXT: stp x29, x30, [sp, #-64]! // 16-byte Folded Spill
+; CHECK0-NEXT: .cfi_def_cfa_offset 64
+; CHECK0-NEXT: cntd x9
+; CHECK0-NEXT: stp x27, x26, [sp, #32] // 16-byte Folded Spill
+; CHECK0-NEXT: stp x9, x28, [sp, #16] // 16-byte Folded Spill
+; CHECK0-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill
+; CHECK0-NEXT: mov x29, sp
+; CHECK0-NEXT: .cfi_def_cfa w29, 64
+; CHECK0-NEXT: .cfi_offset w19, -8
+; CHECK0-NEXT: .cfi_offset w20, -16
+; CHECK0-NEXT: .cfi_offset w26, -24
+; CHECK0-NEXT: .cfi_offset w27, -32
+; CHECK0-NEXT: .cfi_offset w28, -40
+; CHECK0-NEXT: .cfi_offset w30, -56
+; CHECK0-NEXT: .cfi_offset w29, -64
+; CHECK0-NEXT: addvl sp, sp, #-18
+; CHECK0-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill
+; CHECK0-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill
+; CHECK0-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill
+; CHECK0-NEXT: str p12, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK0-NEXT: str p11, [sp, #8, mul vl] // 2-byte Folded Spill
+; CHECK0-NEXT: str p10, [sp, #9, mul vl] // 2-byte Folded Spill
+; CHECK0-NEXT: str p9, [sp, #10, mul vl] // 2-byte Folded Spill
+; CHECK0-NEXT: str p8, [sp, #11, mul vl] // 2-byte Folded Spill
+; CHECK0-NEXT: str p7, [sp, #12, mul vl] // 2-byte Folded Spill
+; CHECK0-NEXT: str p6, [sp, #13, mul vl] // 2-byte Folded Spill
+; CHECK0-NEXT: str p5, [sp, #14, mul vl] // 2-byte Folded Spill
+; CHECK0-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill
+; CHECK0-NEXT: str z23, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK0-NEXT: str z22, [sp, #3, mul vl] // 16-byte Folded Spill
+; CHECK0-NEXT: str z21, [sp, #4, mul vl] // 16-byte Folded Spill
+; CHECK0-NEXT: str z20, [sp, #5, mul vl] // 16-byte Folded Spill
+; CHECK0-NEXT: str z19, [sp, #6, mul vl] // 16-byte Folded Spill
+; CHECK0-NEXT: str z18, [sp, #7, mul vl] // 16-byte Folded Spill
+; CHECK0-NEXT: str z17, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK0-NEXT: str z16, [sp, #9, mul vl] // 16-byte Folded Spill
+; CHECK0-NEXT: str z15, [sp, #10, mul vl] // 16-byte Folded Spill
+; CHECK0-NEXT: str z14, [sp, #11, mul vl] // 16-byte Folded Spill
+; CHECK0-NEXT: str z13, [sp, #12, mul vl] // 16-byte Folded Spill
+; CHECK0-NEXT: str z12, [sp, #13, mul vl] // 16-byte Folded Spill
+; CHECK0-NEXT: str z11, [sp, #14, mul vl] // 16-byte Folded Spill
+; CHECK0-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill
+; CHECK0-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill
+; CHECK0-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill
+; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 64 - 8 * VG
+; CHECK0-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 64 - 16 * VG
+; CHECK0-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 64 - 24 * VG
+; CHECK0-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 64 - 32 * VG
+; CHECK0-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 64 - 40 * VG
+; CHECK0-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 64 - 48 * VG
+; CHECK0-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 64 - 56 * VG
+; CHECK0-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 64 - 64 * VG
+; CHECK0-NEXT: mov w9, w0
+; CHECK0-NEXT: mov x8, sp
+; CHECK0-NEXT: mov w2, w1
+; CHECK0-NEXT: add x9, x9, #15
+; CHECK0-NEXT: mov x19, sp
+; CHECK0-NEXT: and x9, x9, #0x1fffffff0
+; CHECK0-NEXT: sub x8, x8, x9
+; CHECK0-NEXT: mov sp, x8
+; CHECK0-NEXT: //APP
+; CHECK0-NEXT: //NO_APP
+; CHECK0-NEXT: bl __arm_sme_state
+; CHECK0-NEXT: and x20, x0, #0x1
+; CHECK0-NEXT: .cfi_offset vg, -48
+; CHECK0-NEXT: tbz w20, #0, .LBB35_2
+; CHECK0-NEXT: // %bb.1: // %entry
+; CHECK0-NEXT: smstop sm
+; CHECK0-NEXT: .LBB35_2: // %entry
+; CHECK0-NEXT: mov x0, x8
+; CHECK0-NEXT: mov w1, #45 // =0x2d
+; CHECK0-NEXT: bl memset
+; CHECK0-NEXT: tbz w20, #0, .LBB35_4
+; CHECK0-NEXT: // %bb.3: // %entry
+; CHECK0-NEXT: smstart sm
+; CHECK0-NEXT: .LBB35_4: // %entry
+; CHECK0-NEXT: mov w0, #22647 // =0x5877
+; CHECK0-NEXT: movk w0, #59491, lsl #16
+; CHECK0-NEXT: .cfi_restore vg
+; CHECK0-NEXT: addvl sp, x29, #-18
+; CHECK0-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK0-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK0-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK0-NEXT: ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK0-NEXT: ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK0-NEXT: ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK0-NEXT: ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK0-NEXT: ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK0-NEXT: ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK0-NEXT: ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK0-NEXT: ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK0-NEXT: ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK0-NEXT: ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK0-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK0-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
+; CHECK0-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
+; CHECK0-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK0-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK0-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK0-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK0-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; CHECK0-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; CHECK0-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; CHECK0-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
+; CHECK0-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
+; CHECK0-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
+; CHECK0-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; CHECK0-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
+; CHECK0-NEXT: .cfi_restore z8
+; CHECK0-NEXT: .cfi_restore z9
+; CHECK0-NEXT: .cfi_restore z10
+; CHECK0-NEXT: .cfi_restore z11
+; CHECK0-NEXT: .cfi_restore z12
+; CHECK0-NEXT: .cfi_restore z13
+; CHECK0-NEXT: .cfi_restore z14
+; CHECK0-NEXT: .cfi_restore z15
+; CHECK0-NEXT: mov sp, x29
+; CHECK0-NEXT: .cfi_def_cfa wsp, 64
+; CHECK0-NEXT: ldp x20, x19, [sp, #48] // 16-byte Folded Reload
+; CHECK0-NEXT: ldr x28, [sp, #24] // 8-byte Folded Reload
+; CHECK0-NEXT: ldp x27, x26, [sp, #32] // 16-byte Folded Reload
+; CHECK0-NEXT: ldp x29, x30, [sp], #64 // 16-byte Folded Reload
+; CHECK0-NEXT: .cfi_def_cfa_offset 0
+; CHECK0-NEXT: .cfi_restore w19
+; CHECK0-NEXT: .cfi_restore w20
+; CHECK0-NEXT: .cfi_restore w26
+; CHECK0-NEXT: .cfi_restore w27
+; CHECK0-NEXT: .cfi_restore w28
+; CHECK0-NEXT: .cfi_restore w30
+; CHECK0-NEXT: .cfi_restore w29
+; CHECK0-NEXT: ret
+;
+; CHECK64-LABEL: svecc_call_dynamic_alloca:
+; CHECK64: // %bb.0: // %entry
+; CHECK64-NEXT: sub sp, sp, #128
+; CHECK64-NEXT: .cfi_def_cfa_offset 128
+; CHECK64-NEXT: cntd x9
+; CHECK64-NEXT: stp x29, x30, [sp, #64] // 16-byte Folded Spill
+; CHECK64-NEXT: stp x9, x28, [sp, #80] // 16-byte Folded Spill
+; CHECK64-NEXT: stp x27, x26, [sp, #96] // 16-byte Folded Spill
+; CHECK64-NEXT: stp x20, x19, [sp, #112] // 16-byte Folded Spill
+; CHECK64-NEXT: add x29, sp, #64
+; CHECK64-NEXT: .cfi_def_cfa w29, 64
+; CHECK64-NEXT: .cfi_offset w19, -8
+; CHECK64-NEXT: .cfi_offset w20, -16
+; CHECK64-NEXT: .cfi_offset w26, -24
+; CHECK64-NEXT: .cfi_offset w27, -32
+; CHECK64-NEXT: .cfi_offset w28, -40
+; CHECK64-NEXT: .cfi_offset w30, -56
+; CHECK64-NEXT: .cfi_offset w29, -64
+; CHECK64-NEXT: addvl sp, sp, #-18
+; CHECK64-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill
+; CHECK64-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill
+; CHECK64-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill
+; CHECK64-NEXT: str p12, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK64-NEXT: str p11, [sp, #8, mul vl] // 2-byte Folded Spill
+; CHECK64-NEXT: str p10, [sp, #9, mul vl] // 2-byte Folded Spill
+; CHECK64-NEXT: str p9, [sp, #10, mul vl] // 2-byte Folded Spill
+; CHECK64-NEXT: str p8, [sp, #11, mul vl] // 2-byte Folded Spill
+; CHECK64-NEXT: str p7, [sp, #12, mul vl] // 2-byte Folded Spill
+; CHECK64-NEXT: str p6, [sp, #13, mul vl] // 2-byte Folded Spill
+; CHECK64-NEXT: str p5, [sp, #14, mul vl] // 2-byte Folded Spill
+; CHECK64-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill
+; CHECK64-NEXT: str z23, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z22, [sp, #3, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z21, [sp, #4, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z20, [sp, #5, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z19, [sp, #6, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z18, [sp, #7, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z17, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z16, [sp, #9, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z15, [sp, #10, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z14, [sp, #11, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z13, [sp, #12, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z12, [sp, #13, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z11, [sp, #14, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 128 - 8 * VG
+; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 128 - 16 * VG
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 128 - 24 * VG
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 128 - 32 * VG
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 128 - 40 * VG
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 128 - 48 * VG
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 128 - 56 * VG
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 128 - 64 * VG
+; CHECK64-NEXT: sub sp, sp, #64
+; CHECK64-NEXT: mov w9, w0
+; CHECK64-NEXT: mov x8, sp
+; CHECK64-NEXT: mov w2, w1
+; CHECK64-NEXT: add x9, x9, #15
+; CHECK64-NEXT: mov x19, sp
+; CHECK64-NEXT: and x9, x9, #0x1fffffff0
+; CHECK64-NEXT: sub x8, x8, x9
+; CHECK64-NEXT: mov sp, x8
+; CHECK64-NEXT: //APP
+; CHECK64-NEXT: //NO_APP
+; CHECK64-NEXT: bl __arm_sme_state
+; CHECK64-NEXT: and x20, x0, #0x1
+; CHECK64-NEXT: .cfi_offset vg, -48
+; CHECK64-NEXT: tbz w20, #0, .LBB35_2
+; CHECK64-NEXT: // %bb.1: // %entry
+; CHECK64-NEXT: smstop sm
+; CHECK64-NEXT: .LBB35_2: // %entry
+; CHECK64-NEXT: mov x0, x8
+; CHECK64-NEXT: mov w1, #45 // =0x2d
+; CHECK64-NEXT: bl memset
+; CHECK64-NEXT: tbz w20, #0, .LBB35_4
+; CHECK64-NEXT: // %bb.3: // %entry
+; CHECK64-NEXT: smstart sm
+; CHECK64-NEXT: .LBB35_4: // %entry
+; CHECK64-NEXT: mov w0, #22647 // =0x5877
+; CHECK64-NEXT: movk w0, #59491, lsl #16
+; CHECK64-NEXT: .cfi_restore vg
+; CHECK64-NEXT: sub sp, x29, #64
+; CHECK64-NEXT: addvl sp, sp, #-18
+; CHECK64-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK64-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK64-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK64-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK64-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; CHECK64-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; CHECK64-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; CHECK64-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
+; CHECK64-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
+; CHECK64-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
+; CHECK64-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; CHECK64-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
+; CHECK64-NEXT: .cfi_restore z8
+; CHECK64-NEXT: .cfi_restore z9
+; CHECK64-NEXT: .cfi_restore z10
+; CHECK64-NEXT: .cfi_restore z11
+; CHECK64-NEXT: .cfi_restore z12
+; CHECK64-NEXT: .cfi_restore z13
+; CHECK64-NEXT: .cfi_restore z14
+; CHECK64-NEXT: .cfi_restore z15
+; CHECK64-NEXT: sub sp, x29, #64
+; CHECK64-NEXT: .cfi_def_cfa wsp, 128
+; CHECK64-NEXT: ldp x20, x19, [sp, #112] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr x28, [sp, #88] // 8-byte Folded Reload
+; CHECK64-NEXT: ldp x27, x26, [sp, #96] // 16-byte Folded Reload
+; CHECK64-NEXT: ldp x29, x30, [sp, #64] // 16-byte Folded Reload
+; CHECK64-NEXT: add sp, sp, #128
+; CHECK64-NEXT: .cfi_def_cfa_offset 0
+; CHECK64-NEXT: .cfi_restore w19
+; CHECK64-NEXT: .cfi_restore w20
+; CHECK64-NEXT: .cfi_restore w26
+; CHECK64-NEXT: .cfi_restore w27
+; CHECK64-NEXT: .cfi_restore w28
+; CHECK64-NEXT: .cfi_restore w30
+; CHECK64-NEXT: .cfi_restore w29
+; CHECK64-NEXT: ret
+;
+; CHECK1024-LABEL: svecc_call_dynamic_alloca:
+; CHECK1024: // %bb.0: // %entry
+; CHECK1024-NEXT: sub sp, sp, #1088
+; CHECK1024-NEXT: .cfi_def_cfa_offset 1088
+; CHECK1024-NEXT: cntd x9
+; CHECK1024-NEXT: str x29, [sp, #1024] // 8-byte Folded Spill
+; CHECK1024-NEXT: str x30, [sp, #1032] // 8-byte Folded Spill
+; CHECK1024-NEXT: str x9, [sp, #1040] // 8-byte Folded Spill
+; CHECK1024-NEXT: str x28, [sp, #1048] // 8-byte Folded Spill
+; CHECK1024-NEXT: str x27, [sp, #1056] // 8-byte Folded Spill
+; CHECK1024-NEXT: str x26, [sp, #1064] // 8-byte Folded Spill
+; CHECK1024-NEXT: str x20, [sp, #1072] // 8-byte Folded Spill
+; CHECK1024-NEXT: str x19, [sp, #1080] // 8-byte Folded Spill
+; CHECK1024-NEXT: add x29, sp, #1024
+; CHECK1024-NEXT: .cfi_def_cfa w29, 64
+; CHECK1024-NEXT: .cfi_offset w19, -8
+; CHECK1024-NEXT: .cfi_offset w20, -16
+; CHECK1024-NEXT: .cfi_offset w26, -24
+; CHECK1024-NEXT: .cfi_offset w27, -32
+; CHECK1024-NEXT: .cfi_offset w28, -40
+; CHECK1024-NEXT: .cfi_offset w30, -56
+; CHECK1024-NEXT: .cfi_offset w29, -64
+; CHECK1024-NEXT: addvl sp, sp, #-18
+; CHECK1024-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill
+; CHECK1024-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill
+; CHECK1024-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill
+; CHECK1024-NEXT: str p12, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK1024-NEXT: str p11, [sp, #8, mul vl] // 2-byte Folded Spill
+; CHECK1024-NEXT: str p10, [sp, #9, mul vl] // 2-byte Folded Spill
+; CHECK1024-NEXT: str p9, [sp, #10, mul vl] // 2-byte Folded Spill
+; CHECK1024-NEXT: str p8, [sp, #11, mul vl] // 2-byte Folded Spill
+; CHECK1024-NEXT: str p7, [sp, #12, mul vl] // 2-byte Folded Spill
+; CHECK1024-NEXT: str p6, [sp, #13, mul vl] // 2-byte Folded Spill
+; CHECK1024-NEXT: str p5, [sp, #14, mul vl] // 2-byte Folded Spill
+; CHECK1024-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill
+; CHECK1024-NEXT: str z23, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK1024-NEXT: str z22, [sp, #3, mul vl] // 16-byte Folded Spill
+; CHECK1024-NEXT: str z21, [sp, #4, mul vl] // 16-byte Folded Spill
+; CHECK1024-NEXT: str z20, [sp, #5, mul vl] // 16-byte Folded Spill
+; CHECK1024-NEXT: str z19, [sp, #6, mul vl] // 16-byte Folded Spill
+; CHECK1024-NEXT: str z18, [sp, #7, mul vl] // 16-byte Folded Spill
+; CHECK1024-NEXT: str z17, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK1024-NEXT: str z16, [sp, #9, mul vl] // 16-byte Folded Spill
+; CHECK1024-NEXT: str z15, [sp, #10, mul vl] // 16-byte Folded Spill
+; CHECK1024-NEXT: str z14, [sp, #11, mul vl] // 16-byte Folded Spill
+; CHECK1024-NEXT: str z13, [sp, #12, mul vl] // 16-byte Folded Spill
+; CHECK1024-NEXT: str z12, [sp, #13, mul vl] // 16-byte Folded Spill
+; CHECK1024-NEXT: str z11, [sp, #14, mul vl] // 16-byte Folded Spill
+; CHECK1024-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill
+; CHECK1024-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill
+; CHECK1024-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 1088 - 8 * VG
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 1088 - 16 * VG
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 1088 - 24 * VG
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 1088 - 32 * VG
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 1088 - 40 * VG
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 1088 - 48 * VG
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 1088 - 56 * VG
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 1088 - 64 * VG
+; CHECK1024-NEXT: sub sp, sp, #1024
+; CHECK1024-NEXT: mov w9, w0
+; CHECK1024-NEXT: mov x8, sp
+; CHECK1024-NEXT: mov w2, w1
+; CHECK1024-NEXT: add x9, x9, #15
+; CHECK1024-NEXT: mov x19, sp
+; CHECK1024-NEXT: and x9, x9, #0x1fffffff0
+; CHECK1024-NEXT: sub x8, x8, x9
+; CHECK1024-NEXT: mov sp, x8
+; CHECK1024-NEXT: //APP
+; CHECK1024-NEXT: //NO_APP
+; CHECK1024-NEXT: bl __arm_sme_state
+; CHECK1024-NEXT: and x20, x0, #0x1
+; CHECK1024-NEXT: .cfi_offset vg, -48
+; CHECK1024-NEXT: tbz w20, #0, .LBB35_2
+; CHECK1024-NEXT: // %bb.1: // %entry
+; CHECK1024-NEXT: smstop sm
+; CHECK1024-NEXT: .LBB35_2: // %entry
+; CHECK1024-NEXT: mov x0, x8
+; CHECK1024-NEXT: mov w1, #45 // =0x2d
+; CHECK1024-NEXT: bl memset
+; CHECK1024-NEXT: tbz w20, #0, .LBB35_4
+; CHECK1024-NEXT: // %bb.3: // %entry
+; CHECK1024-NEXT: smstart sm
+; CHECK1024-NEXT: .LBB35_4: // %entry
+; CHECK1024-NEXT: mov w0, #22647 // =0x5877
+; CHECK1024-NEXT: movk w0, #59491, lsl #16
+; CHECK1024-NEXT: .cfi_restore vg
+; CHECK1024-NEXT: sub sp, x29, #1024
+; CHECK1024-NEXT: addvl sp, sp, #-18
+; CHECK1024-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK1024-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK1024-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK1024-NEXT: ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK1024-NEXT: ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK1024-NEXT: ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK1024-NEXT: ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK1024-NEXT: ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK1024-NEXT: ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK1024-NEXT: ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK1024-NEXT: ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK1024-NEXT: ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK1024-NEXT: ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK1024-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK1024-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
+; CHECK1024-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
+; CHECK1024-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK1024-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK1024-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK1024-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK1024-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; CHECK1024-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; CHECK1024-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; CHECK1024-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
+; CHECK1024-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
+; CHECK1024-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
+; CHECK1024-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; CHECK1024-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
+; CHECK1024-NEXT: .cfi_restore z8
+; CHECK1024-NEXT: .cfi_restore z9
+; CHECK1024-NEXT: .cfi_restore z10
+; CHECK1024-NEXT: .cfi_restore z11
+; CHECK1024-NEXT: .cfi_restore z12
+; CHECK1024-NEXT: .cfi_restore z13
+; CHECK1024-NEXT: .cfi_restore z14
+; CHECK1024-NEXT: .cfi_restore z15
+; CHECK1024-NEXT: sub sp, x29, #1024
+; CHECK1024-NEXT: .cfi_def_cfa wsp, 1088
+; CHECK1024-NEXT: ldr x19, [sp, #1080] // 8-byte Folded Reload
+; CHECK1024-NEXT: ldr x20, [sp, #1072] // 8-byte Folded Reload
+; CHECK1024-NEXT: ldr x26, [sp, #1064] // 8-byte Folded Reload
+; CHECK1024-NEXT: ldr x27, [sp, #1056] // 8-byte Folded Reload
+; CHECK1024-NEXT: ldr x28, [sp, #1048] // 8-byte Folded Reload
+; CHECK1024-NEXT: ldr x30, [sp, #1032] // 8-byte Folded Reload
+; CHECK1024-NEXT: ldr x29, [sp, #1024] // 8-byte Folded Reload
+; CHECK1024-NEXT: add sp, sp, #1088
+; CHECK1024-NEXT: .cfi_def_cfa_offset 0
+; CHECK1024-NEXT: .cfi_restore w19
+; CHECK1024-NEXT: .cfi_restore w20
+; CHECK1024-NEXT: .cfi_restore w26
+; CHECK1024-NEXT: .cfi_restore w27
+; CHECK1024-NEXT: .cfi_restore w28
+; CHECK1024-NEXT: .cfi_restore w30
+; CHECK1024-NEXT: .cfi_restore w29
+; CHECK1024-NEXT: ret
+entry:
+ %ptr = alloca i8, i32 %P1
+ tail call void asm sideeffect "", "~{x0},~{x28},~{x27},~{x3}"() #2
+ %call = call ptr @memset(ptr noundef nonnull %ptr, i32 noundef 45, i32 noundef %P2)
+ ret i32 -396142473
+}
>From 0cedfe5e8d597f7dcacf785ef25da857954316c2 Mon Sep 17 00:00:00 2001
From: Benjamin Maxwell <benjamin.maxwell at arm.com>
Date: Tue, 10 Jun 2025 11:20:01 +0000
Subject: [PATCH 2/4] Avoid unsafe subs
---
.../Target/AArch64/AArch64FrameLowering.cpp | 34 +-
.../Target/AArch64/AArch64RegisterInfo.cpp | 28 +-
llvm/test/CodeGen/AArch64/stack-hazard.ll | 413 +++++++++++++++++-
llvm/test/CodeGen/AArch64/sve-alloca.ll | 2 +-
4 files changed, 452 insertions(+), 25 deletions(-)
diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
index 9d1cb09a1a568..76266711823c9 100644
--- a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
@@ -2541,13 +2541,33 @@ void AArch64FrameLowering::emitEpilogue(MachineFunction &MF,
if (AFI->isStackRealigned() || MFI.hasVarSizedObjects()) {
if (int64_t SVECalleeSavedSize = AFI->getSVECalleeSavedStackSize()) {
// Set SP to start of SVE callee-save area from which they can
- // be reloaded. The code below will deallocate the stack space
- // space by moving FP -> SP.
- emitFrameOffset(
- MBB, RestoreBegin, DL, AArch64::SP, AArch64::FP,
- StackOffset::get(-AFI->getCalleeSaveBaseToFrameRecordOffset(),
- -SVECalleeSavedSize),
- TII, MachineInstr::FrameDestroy);
+ // be reloaded.
+ const AArch64RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
+ if (!AFI->isStackRealigned() && RegInfo->hasBasePointer(MF)) {
+ // If the stack is not realigned we can use the base pointer to find
+ // the start of the SVE callee-saves (and deallocate locals).
+ emitFrameOffset(
+ MBB, RestoreBegin, DL, AArch64::SP, RegInfo->getBaseRegister(),
+ StackOffset::getFixed(NumBytes), TII, MachineInstr::FrameDestroy);
+ } else {
+ Register CalleeSaveBase = AArch64::FP;
+ if (int64_t CalleeSaveBaseOffset =
+ AFI->getCalleeSaveBaseToFrameRecordOffset()) {
+ assert(RegInfo->hasBasePointer(MF) && "Expected base pointer!");
+ // NOTE: This base pointer is clobbered from this point on! The next
+ // step in eplilogue emission restoring callee-saves, so it should
+ // not be used after this point anyway.
+ CalleeSaveBase = RegInfo->getBaseRegister();
+ emitFrameOffset(MBB, RestoreBegin, DL, CalleeSaveBase, AArch64::FP,
+ StackOffset::getFixed(-CalleeSaveBaseOffset), TII,
+ MachineInstr::FrameDestroy);
+ }
+ // The code below will deallocate the stack space space by moving the
+ // SP to the start of the SVE callee-save area.
+ emitFrameOffset(MBB, RestoreBegin, DL, AArch64::SP, CalleeSaveBase,
+ StackOffset::getScalable(-SVECalleeSavedSize), TII,
+ MachineInstr::FrameDestroy);
+ }
}
} else {
if (AFI->getSVECalleeSavedStackSize()) {
diff --git a/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp b/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
index b0c69b8aca806..ceb291f4b25ab 100644
--- a/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
@@ -635,19 +635,9 @@ bool AArch64RegisterInfo::hasBasePointer(const MachineFunction &MF) const {
// Furthermore, if both variable sized objects are present, and the
// stack needs to be dynamically re-aligned, the base pointer is the only
// reliable way to reference the locals.
- if (MFI.hasVarSizedObjects() || MF.hasEHFunclets()) {
- if (hasStackRealignment(MF))
- return true;
-
- auto &ST = MF.getSubtarget<AArch64Subtarget>();
- const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
- if (ST.hasSVE() || ST.isStreaming()) {
- // Frames that have variable sized objects and scalable SVE objects,
- // should always use a basepointer.
- if (!AFI->hasCalculatedStackSizeSVE() || AFI->getStackSizeSVE())
- return true;
- }
-
+ bool CannotUseSPForSVERestore =
+ MFI.hasVarSizedObjects() || hasStackRealignment(MF);
+ if (CannotUseSPForSVERestore || MF.hasEHFunclets()) {
// Frames with hazard padding can have a large offset between the frame
// pointer and GPR locals, which includes the emergency spill slot. If the
// emergency spill slot is not within range of the load/store instructions
@@ -655,11 +645,23 @@ bool AArch64RegisterInfo::hasBasePointer(const MachineFunction &MF) const {
// Since hasBasePointer() is called before we know if we have hazard padding
// or an emergency spill slot we need to enable the basepointer
// conservatively.
+ auto &ST = MF.getSubtarget<AArch64Subtarget>();
+ const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
if (ST.getStreamingHazardSize() &&
!AFI->getSMEFnAttrs().hasNonStreamingInterfaceAndBody()) {
return true;
}
+ if (hasStackRealignment(MF))
+ return MFI.hasVarSizedObjects() || MF.hasEHFunclets();
+
+ if (ST.hasSVE() || ST.isStreaming()) {
+ // Frames that have variable sized objects and scalable SVE objects,
+ // should always use a basepointer.
+ if (!AFI->hasCalculatedStackSizeSVE() || AFI->getStackSizeSVE())
+ return true;
+ }
+
// Conservatively estimate whether the negative offset from the frame
// pointer will be sufficient to reach. If a function has a smallish
// frame, it's less likely to have lots of spills and callee saved
diff --git a/llvm/test/CodeGen/AArch64/stack-hazard.ll b/llvm/test/CodeGen/AArch64/stack-hazard.ll
index 0b92aea51cf62..8394073f0af20 100644
--- a/llvm/test/CodeGen/AArch64/stack-hazard.ll
+++ b/llvm/test/CodeGen/AArch64/stack-hazard.ll
@@ -3227,7 +3227,7 @@ define i32 @svecc_call_dynamic_alloca(<4 x i16> %P0, i32 %P1, i32 %P2, <vscale x
; CHECK0-NEXT: mov w0, #22647 // =0x5877
; CHECK0-NEXT: movk w0, #59491, lsl #16
; CHECK0-NEXT: .cfi_restore vg
-; CHECK0-NEXT: addvl sp, x29, #-18
+; CHECK0-NEXT: mov sp, x19
; CHECK0-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
; CHECK0-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
; CHECK0-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
@@ -3363,8 +3363,7 @@ define i32 @svecc_call_dynamic_alloca(<4 x i16> %P0, i32 %P1, i32 %P2, <vscale x
; CHECK64-NEXT: mov w0, #22647 // =0x5877
; CHECK64-NEXT: movk w0, #59491, lsl #16
; CHECK64-NEXT: .cfi_restore vg
-; CHECK64-NEXT: sub sp, x29, #64
-; CHECK64-NEXT: addvl sp, sp, #-18
+; CHECK64-NEXT: add sp, x19, #64
; CHECK64-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
; CHECK64-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
; CHECK64-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
@@ -3505,8 +3504,414 @@ define i32 @svecc_call_dynamic_alloca(<4 x i16> %P0, i32 %P1, i32 %P2, <vscale x
; CHECK1024-NEXT: mov w0, #22647 // =0x5877
; CHECK1024-NEXT: movk w0, #59491, lsl #16
; CHECK1024-NEXT: .cfi_restore vg
+; CHECK1024-NEXT: add sp, x19, #1024
+; CHECK1024-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK1024-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK1024-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK1024-NEXT: ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK1024-NEXT: ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK1024-NEXT: ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK1024-NEXT: ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK1024-NEXT: ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK1024-NEXT: ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK1024-NEXT: ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK1024-NEXT: ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK1024-NEXT: ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK1024-NEXT: ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK1024-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK1024-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
+; CHECK1024-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
+; CHECK1024-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK1024-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK1024-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK1024-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK1024-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; CHECK1024-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; CHECK1024-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; CHECK1024-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
+; CHECK1024-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
+; CHECK1024-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
+; CHECK1024-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; CHECK1024-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
+; CHECK1024-NEXT: .cfi_restore z8
+; CHECK1024-NEXT: .cfi_restore z9
+; CHECK1024-NEXT: .cfi_restore z10
+; CHECK1024-NEXT: .cfi_restore z11
+; CHECK1024-NEXT: .cfi_restore z12
+; CHECK1024-NEXT: .cfi_restore z13
+; CHECK1024-NEXT: .cfi_restore z14
+; CHECK1024-NEXT: .cfi_restore z15
; CHECK1024-NEXT: sub sp, x29, #1024
+; CHECK1024-NEXT: .cfi_def_cfa wsp, 1088
+; CHECK1024-NEXT: ldr x19, [sp, #1080] // 8-byte Folded Reload
+; CHECK1024-NEXT: ldr x20, [sp, #1072] // 8-byte Folded Reload
+; CHECK1024-NEXT: ldr x26, [sp, #1064] // 8-byte Folded Reload
+; CHECK1024-NEXT: ldr x27, [sp, #1056] // 8-byte Folded Reload
+; CHECK1024-NEXT: ldr x28, [sp, #1048] // 8-byte Folded Reload
+; CHECK1024-NEXT: ldr x30, [sp, #1032] // 8-byte Folded Reload
+; CHECK1024-NEXT: ldr x29, [sp, #1024] // 8-byte Folded Reload
+; CHECK1024-NEXT: add sp, sp, #1088
+; CHECK1024-NEXT: .cfi_def_cfa_offset 0
+; CHECK1024-NEXT: .cfi_restore w19
+; CHECK1024-NEXT: .cfi_restore w20
+; CHECK1024-NEXT: .cfi_restore w26
+; CHECK1024-NEXT: .cfi_restore w27
+; CHECK1024-NEXT: .cfi_restore w28
+; CHECK1024-NEXT: .cfi_restore w30
+; CHECK1024-NEXT: .cfi_restore w29
+; CHECK1024-NEXT: ret
+entry:
+ %ptr = alloca i8, i32 %P1
+ tail call void asm sideeffect "", "~{x0},~{x28},~{x27},~{x3}"() #2
+ %call = call ptr @memset(ptr noundef nonnull %ptr, i32 noundef 45, i32 noundef %P2)
+ ret i32 -396142473
+}
+
+
+define i32 @svecc_call_realign(<4 x i16> %P0, i32 %P1, i32 %P2, <vscale x 16 x i8> %P3, i16 %P4) "aarch64_pstate_sm_compatible" {
+; CHECK0-LABEL: svecc_call_realign:
+; CHECK0: // %bb.0: // %entry
+; CHECK0-NEXT: stp x29, x30, [sp, #-64]! // 16-byte Folded Spill
+; CHECK0-NEXT: .cfi_def_cfa_offset 64
+; CHECK0-NEXT: cntd x9
+; CHECK0-NEXT: stp x28, x27, [sp, #32] // 16-byte Folded Spill
+; CHECK0-NEXT: str x9, [sp, #16] // 8-byte Folded Spill
+; CHECK0-NEXT: stp x26, x19, [sp, #48] // 16-byte Folded Spill
+; CHECK0-NEXT: mov x29, sp
+; CHECK0-NEXT: .cfi_def_cfa w29, 64
+; CHECK0-NEXT: .cfi_offset w19, -8
+; CHECK0-NEXT: .cfi_offset w26, -16
+; CHECK0-NEXT: .cfi_offset w27, -24
+; CHECK0-NEXT: .cfi_offset w28, -32
+; CHECK0-NEXT: .cfi_offset w30, -56
+; CHECK0-NEXT: .cfi_offset w29, -64
+; CHECK0-NEXT: addvl sp, sp, #-18
+; CHECK0-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill
+; CHECK0-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill
+; CHECK0-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill
+; CHECK0-NEXT: str p12, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK0-NEXT: str p11, [sp, #8, mul vl] // 2-byte Folded Spill
+; CHECK0-NEXT: str p10, [sp, #9, mul vl] // 2-byte Folded Spill
+; CHECK0-NEXT: str p9, [sp, #10, mul vl] // 2-byte Folded Spill
+; CHECK0-NEXT: str p8, [sp, #11, mul vl] // 2-byte Folded Spill
+; CHECK0-NEXT: str p7, [sp, #12, mul vl] // 2-byte Folded Spill
+; CHECK0-NEXT: str p6, [sp, #13, mul vl] // 2-byte Folded Spill
+; CHECK0-NEXT: str p5, [sp, #14, mul vl] // 2-byte Folded Spill
+; CHECK0-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill
+; CHECK0-NEXT: str z23, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK0-NEXT: str z22, [sp, #3, mul vl] // 16-byte Folded Spill
+; CHECK0-NEXT: str z21, [sp, #4, mul vl] // 16-byte Folded Spill
+; CHECK0-NEXT: str z20, [sp, #5, mul vl] // 16-byte Folded Spill
+; CHECK0-NEXT: str z19, [sp, #6, mul vl] // 16-byte Folded Spill
+; CHECK0-NEXT: str z18, [sp, #7, mul vl] // 16-byte Folded Spill
+; CHECK0-NEXT: str z17, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK0-NEXT: str z16, [sp, #9, mul vl] // 16-byte Folded Spill
+; CHECK0-NEXT: str z15, [sp, #10, mul vl] // 16-byte Folded Spill
+; CHECK0-NEXT: str z14, [sp, #11, mul vl] // 16-byte Folded Spill
+; CHECK0-NEXT: str z13, [sp, #12, mul vl] // 16-byte Folded Spill
+; CHECK0-NEXT: str z12, [sp, #13, mul vl] // 16-byte Folded Spill
+; CHECK0-NEXT: str z11, [sp, #14, mul vl] // 16-byte Folded Spill
+; CHECK0-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill
+; CHECK0-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill
+; CHECK0-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill
+; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 64 - 8 * VG
+; CHECK0-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 64 - 16 * VG
+; CHECK0-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 64 - 24 * VG
+; CHECK0-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 64 - 32 * VG
+; CHECK0-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 64 - 40 * VG
+; CHECK0-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 64 - 48 * VG
+; CHECK0-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 64 - 56 * VG
+; CHECK0-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 64 - 64 * VG
+; CHECK0-NEXT: sub x9, sp, #1024
+; CHECK0-NEXT: and sp, x9, #0xffffffffffffffe0
+; CHECK0-NEXT: mov w2, w1
+; CHECK0-NEXT: //APP
+; CHECK0-NEXT: //NO_APP
+; CHECK0-NEXT: bl __arm_sme_state
+; CHECK0-NEXT: and x19, x0, #0x1
+; CHECK0-NEXT: .cfi_offset vg, -48
+; CHECK0-NEXT: tbz w19, #0, .LBB36_2
+; CHECK0-NEXT: // %bb.1: // %entry
+; CHECK0-NEXT: smstop sm
+; CHECK0-NEXT: .LBB36_2: // %entry
+; CHECK0-NEXT: mov x0, sp
+; CHECK0-NEXT: mov w1, #45 // =0x2d
+; CHECK0-NEXT: bl memset
+; CHECK0-NEXT: tbz w19, #0, .LBB36_4
+; CHECK0-NEXT: // %bb.3: // %entry
+; CHECK0-NEXT: smstart sm
+; CHECK0-NEXT: .LBB36_4: // %entry
+; CHECK0-NEXT: mov w0, #22647 // =0x5877
+; CHECK0-NEXT: movk w0, #59491, lsl #16
+; CHECK0-NEXT: .cfi_restore vg
+; CHECK0-NEXT: addvl sp, x29, #-18
+; CHECK0-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK0-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK0-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK0-NEXT: ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK0-NEXT: ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK0-NEXT: ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK0-NEXT: ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK0-NEXT: ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK0-NEXT: ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK0-NEXT: ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK0-NEXT: ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK0-NEXT: ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK0-NEXT: ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK0-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK0-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
+; CHECK0-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
+; CHECK0-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK0-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK0-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK0-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK0-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; CHECK0-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; CHECK0-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; CHECK0-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
+; CHECK0-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
+; CHECK0-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
+; CHECK0-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; CHECK0-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
+; CHECK0-NEXT: .cfi_restore z8
+; CHECK0-NEXT: .cfi_restore z9
+; CHECK0-NEXT: .cfi_restore z10
+; CHECK0-NEXT: .cfi_restore z11
+; CHECK0-NEXT: .cfi_restore z12
+; CHECK0-NEXT: .cfi_restore z13
+; CHECK0-NEXT: .cfi_restore z14
+; CHECK0-NEXT: .cfi_restore z15
+; CHECK0-NEXT: mov sp, x29
+; CHECK0-NEXT: .cfi_def_cfa wsp, 64
+; CHECK0-NEXT: ldp x26, x19, [sp, #48] // 16-byte Folded Reload
+; CHECK0-NEXT: ldp x28, x27, [sp, #32] // 16-byte Folded Reload
+; CHECK0-NEXT: ldp x29, x30, [sp], #64 // 16-byte Folded Reload
+; CHECK0-NEXT: .cfi_def_cfa_offset 0
+; CHECK0-NEXT: .cfi_restore w19
+; CHECK0-NEXT: .cfi_restore w26
+; CHECK0-NEXT: .cfi_restore w27
+; CHECK0-NEXT: .cfi_restore w28
+; CHECK0-NEXT: .cfi_restore w30
+; CHECK0-NEXT: .cfi_restore w29
+; CHECK0-NEXT: ret
+;
+; CHECK64-LABEL: svecc_call_realign:
+; CHECK64: // %bb.0: // %entry
+; CHECK64-NEXT: sub sp, sp, #128
+; CHECK64-NEXT: .cfi_def_cfa_offset 128
+; CHECK64-NEXT: cntd x9
+; CHECK64-NEXT: stp x29, x30, [sp, #64] // 16-byte Folded Spill
+; CHECK64-NEXT: stp x9, x28, [sp, #80] // 16-byte Folded Spill
+; CHECK64-NEXT: stp x27, x26, [sp, #96] // 16-byte Folded Spill
+; CHECK64-NEXT: stp x20, x19, [sp, #112] // 16-byte Folded Spill
+; CHECK64-NEXT: add x29, sp, #64
+; CHECK64-NEXT: .cfi_def_cfa w29, 64
+; CHECK64-NEXT: .cfi_offset w19, -8
+; CHECK64-NEXT: .cfi_offset w20, -16
+; CHECK64-NEXT: .cfi_offset w26, -24
+; CHECK64-NEXT: .cfi_offset w27, -32
+; CHECK64-NEXT: .cfi_offset w28, -40
+; CHECK64-NEXT: .cfi_offset w30, -56
+; CHECK64-NEXT: .cfi_offset w29, -64
+; CHECK64-NEXT: addvl sp, sp, #-18
+; CHECK64-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill
+; CHECK64-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill
+; CHECK64-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill
+; CHECK64-NEXT: str p12, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK64-NEXT: str p11, [sp, #8, mul vl] // 2-byte Folded Spill
+; CHECK64-NEXT: str p10, [sp, #9, mul vl] // 2-byte Folded Spill
+; CHECK64-NEXT: str p9, [sp, #10, mul vl] // 2-byte Folded Spill
+; CHECK64-NEXT: str p8, [sp, #11, mul vl] // 2-byte Folded Spill
+; CHECK64-NEXT: str p7, [sp, #12, mul vl] // 2-byte Folded Spill
+; CHECK64-NEXT: str p6, [sp, #13, mul vl] // 2-byte Folded Spill
+; CHECK64-NEXT: str p5, [sp, #14, mul vl] // 2-byte Folded Spill
+; CHECK64-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill
+; CHECK64-NEXT: str z23, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z22, [sp, #3, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z21, [sp, #4, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z20, [sp, #5, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z19, [sp, #6, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z18, [sp, #7, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z17, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z16, [sp, #9, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z15, [sp, #10, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z14, [sp, #11, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z13, [sp, #12, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z12, [sp, #13, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z11, [sp, #14, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 128 - 8 * VG
+; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 128 - 16 * VG
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 128 - 24 * VG
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 128 - 32 * VG
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 128 - 40 * VG
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 128 - 48 * VG
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 128 - 56 * VG
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 128 - 64 * VG
+; CHECK64-NEXT: sub x9, sp, #1088
+; CHECK64-NEXT: and sp, x9, #0xffffffffffffffe0
+; CHECK64-NEXT: mov x19, sp
+; CHECK64-NEXT: mov w2, w1
+; CHECK64-NEXT: //APP
+; CHECK64-NEXT: //NO_APP
+; CHECK64-NEXT: bl __arm_sme_state
+; CHECK64-NEXT: and x20, x0, #0x1
+; CHECK64-NEXT: .cfi_offset vg, -48
+; CHECK64-NEXT: tbz w20, #0, .LBB36_2
+; CHECK64-NEXT: // %bb.1: // %entry
+; CHECK64-NEXT: smstop sm
+; CHECK64-NEXT: .LBB36_2: // %entry
+; CHECK64-NEXT: add x0, x19, #0
+; CHECK64-NEXT: mov w1, #45 // =0x2d
+; CHECK64-NEXT: bl memset
+; CHECK64-NEXT: tbz w20, #0, .LBB36_4
+; CHECK64-NEXT: // %bb.3: // %entry
+; CHECK64-NEXT: smstart sm
+; CHECK64-NEXT: .LBB36_4: // %entry
+; CHECK64-NEXT: mov w0, #22647 // =0x5877
+; CHECK64-NEXT: movk w0, #59491, lsl #16
+; CHECK64-NEXT: .cfi_restore vg
+; CHECK64-NEXT: sub x19, x29, #64
+; CHECK64-NEXT: addvl sp, x19, #-18
+; CHECK64-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK64-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK64-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK64-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK64-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; CHECK64-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; CHECK64-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; CHECK64-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
+; CHECK64-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
+; CHECK64-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
+; CHECK64-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; CHECK64-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
+; CHECK64-NEXT: .cfi_restore z8
+; CHECK64-NEXT: .cfi_restore z9
+; CHECK64-NEXT: .cfi_restore z10
+; CHECK64-NEXT: .cfi_restore z11
+; CHECK64-NEXT: .cfi_restore z12
+; CHECK64-NEXT: .cfi_restore z13
+; CHECK64-NEXT: .cfi_restore z14
+; CHECK64-NEXT: .cfi_restore z15
+; CHECK64-NEXT: sub sp, x29, #64
+; CHECK64-NEXT: .cfi_def_cfa wsp, 128
+; CHECK64-NEXT: ldp x20, x19, [sp, #112] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr x28, [sp, #88] // 8-byte Folded Reload
+; CHECK64-NEXT: ldp x27, x26, [sp, #96] // 16-byte Folded Reload
+; CHECK64-NEXT: ldp x29, x30, [sp, #64] // 16-byte Folded Reload
+; CHECK64-NEXT: add sp, sp, #128
+; CHECK64-NEXT: .cfi_def_cfa_offset 0
+; CHECK64-NEXT: .cfi_restore w19
+; CHECK64-NEXT: .cfi_restore w20
+; CHECK64-NEXT: .cfi_restore w26
+; CHECK64-NEXT: .cfi_restore w27
+; CHECK64-NEXT: .cfi_restore w28
+; CHECK64-NEXT: .cfi_restore w30
+; CHECK64-NEXT: .cfi_restore w29
+; CHECK64-NEXT: ret
+;
+; CHECK1024-LABEL: svecc_call_realign:
+; CHECK1024: // %bb.0: // %entry
+; CHECK1024-NEXT: sub sp, sp, #1088
+; CHECK1024-NEXT: .cfi_def_cfa_offset 1088
+; CHECK1024-NEXT: cntd x9
+; CHECK1024-NEXT: str x29, [sp, #1024] // 8-byte Folded Spill
+; CHECK1024-NEXT: str x30, [sp, #1032] // 8-byte Folded Spill
+; CHECK1024-NEXT: str x9, [sp, #1040] // 8-byte Folded Spill
+; CHECK1024-NEXT: str x28, [sp, #1048] // 8-byte Folded Spill
+; CHECK1024-NEXT: str x27, [sp, #1056] // 8-byte Folded Spill
+; CHECK1024-NEXT: str x26, [sp, #1064] // 8-byte Folded Spill
+; CHECK1024-NEXT: str x20, [sp, #1072] // 8-byte Folded Spill
+; CHECK1024-NEXT: str x19, [sp, #1080] // 8-byte Folded Spill
+; CHECK1024-NEXT: add x29, sp, #1024
+; CHECK1024-NEXT: .cfi_def_cfa w29, 64
+; CHECK1024-NEXT: .cfi_offset w19, -8
+; CHECK1024-NEXT: .cfi_offset w20, -16
+; CHECK1024-NEXT: .cfi_offset w26, -24
+; CHECK1024-NEXT: .cfi_offset w27, -32
+; CHECK1024-NEXT: .cfi_offset w28, -40
+; CHECK1024-NEXT: .cfi_offset w30, -56
+; CHECK1024-NEXT: .cfi_offset w29, -64
; CHECK1024-NEXT: addvl sp, sp, #-18
+; CHECK1024-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill
+; CHECK1024-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill
+; CHECK1024-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill
+; CHECK1024-NEXT: str p12, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK1024-NEXT: str p11, [sp, #8, mul vl] // 2-byte Folded Spill
+; CHECK1024-NEXT: str p10, [sp, #9, mul vl] // 2-byte Folded Spill
+; CHECK1024-NEXT: str p9, [sp, #10, mul vl] // 2-byte Folded Spill
+; CHECK1024-NEXT: str p8, [sp, #11, mul vl] // 2-byte Folded Spill
+; CHECK1024-NEXT: str p7, [sp, #12, mul vl] // 2-byte Folded Spill
+; CHECK1024-NEXT: str p6, [sp, #13, mul vl] // 2-byte Folded Spill
+; CHECK1024-NEXT: str p5, [sp, #14, mul vl] // 2-byte Folded Spill
+; CHECK1024-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill
+; CHECK1024-NEXT: str z23, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK1024-NEXT: str z22, [sp, #3, mul vl] // 16-byte Folded Spill
+; CHECK1024-NEXT: str z21, [sp, #4, mul vl] // 16-byte Folded Spill
+; CHECK1024-NEXT: str z20, [sp, #5, mul vl] // 16-byte Folded Spill
+; CHECK1024-NEXT: str z19, [sp, #6, mul vl] // 16-byte Folded Spill
+; CHECK1024-NEXT: str z18, [sp, #7, mul vl] // 16-byte Folded Spill
+; CHECK1024-NEXT: str z17, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK1024-NEXT: str z16, [sp, #9, mul vl] // 16-byte Folded Spill
+; CHECK1024-NEXT: str z15, [sp, #10, mul vl] // 16-byte Folded Spill
+; CHECK1024-NEXT: str z14, [sp, #11, mul vl] // 16-byte Folded Spill
+; CHECK1024-NEXT: str z13, [sp, #12, mul vl] // 16-byte Folded Spill
+; CHECK1024-NEXT: str z12, [sp, #13, mul vl] // 16-byte Folded Spill
+; CHECK1024-NEXT: str z11, [sp, #14, mul vl] // 16-byte Folded Spill
+; CHECK1024-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill
+; CHECK1024-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill
+; CHECK1024-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 1088 - 8 * VG
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 1088 - 16 * VG
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 1088 - 24 * VG
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 1088 - 32 * VG
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 1088 - 40 * VG
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 1088 - 48 * VG
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 1088 - 56 * VG
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 1088 - 64 * VG
+; CHECK1024-NEXT: sub x9, sp, #2048
+; CHECK1024-NEXT: and sp, x9, #0xffffffffffffffe0
+; CHECK1024-NEXT: mov x19, sp
+; CHECK1024-NEXT: mov w2, w1
+; CHECK1024-NEXT: //APP
+; CHECK1024-NEXT: //NO_APP
+; CHECK1024-NEXT: bl __arm_sme_state
+; CHECK1024-NEXT: and x20, x0, #0x1
+; CHECK1024-NEXT: .cfi_offset vg, -48
+; CHECK1024-NEXT: tbz w20, #0, .LBB36_2
+; CHECK1024-NEXT: // %bb.1: // %entry
+; CHECK1024-NEXT: smstop sm
+; CHECK1024-NEXT: .LBB36_2: // %entry
+; CHECK1024-NEXT: add x0, x19, #0
+; CHECK1024-NEXT: mov w1, #45 // =0x2d
+; CHECK1024-NEXT: bl memset
+; CHECK1024-NEXT: tbz w20, #0, .LBB36_4
+; CHECK1024-NEXT: // %bb.3: // %entry
+; CHECK1024-NEXT: smstart sm
+; CHECK1024-NEXT: .LBB36_4: // %entry
+; CHECK1024-NEXT: mov w0, #22647 // =0x5877
+; CHECK1024-NEXT: movk w0, #59491, lsl #16
+; CHECK1024-NEXT: .cfi_restore vg
+; CHECK1024-NEXT: sub x19, x29, #1024
+; CHECK1024-NEXT: addvl sp, x19, #-18
; CHECK1024-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
; CHECK1024-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
; CHECK1024-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
@@ -3563,7 +3968,7 @@ define i32 @svecc_call_dynamic_alloca(<4 x i16> %P0, i32 %P1, i32 %P2, <vscale x
; CHECK1024-NEXT: .cfi_restore w29
; CHECK1024-NEXT: ret
entry:
- %ptr = alloca i8, i32 %P1
+ %ptr = alloca i8, i32 1000, align 32
tail call void asm sideeffect "", "~{x0},~{x28},~{x27},~{x3}"() #2
%call = call ptr @memset(ptr noundef nonnull %ptr, i32 noundef 45, i32 noundef %P2)
ret i32 -396142473
diff --git a/llvm/test/CodeGen/AArch64/sve-alloca.ll b/llvm/test/CodeGen/AArch64/sve-alloca.ll
index 2520095cce62e..e5a11393f161b 100644
--- a/llvm/test/CodeGen/AArch64/sve-alloca.ll
+++ b/llvm/test/CodeGen/AArch64/sve-alloca.ll
@@ -64,7 +64,7 @@ define void @foo(<vscale x 4 x i64> %dst, i1 %cond) {
; CHECK-NEXT: str z1, [x0, #1, mul vl]
; CHECK-NEXT: str z0, [x0]
; CHECK-NEXT: bl bar
-; CHECK-NEXT: addvl sp, x29, #-18
+; CHECK-NEXT: mov sp, x19
; CHECK-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
>From f101ba27fe919f89117ca1536bd4c7e98cd7ceff Mon Sep 17 00:00:00 2001
From: Benjamin Maxwell <benjamin.maxwell at arm.com>
Date: Thu, 12 Jun 2025 10:48:15 +0000
Subject: [PATCH 3/4] Remove BP hack
---
.../Target/AArch64/AArch64FrameLowering.cpp | 25 +++++++++---
.../Target/AArch64/AArch64RegisterInfo.cpp | 28 +++++++------
llvm/test/CodeGen/AArch64/stack-hazard.ll | 39 +++++++------------
3 files changed, 48 insertions(+), 44 deletions(-)
diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
index 76266711823c9..515449539c141 100644
--- a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
@@ -2286,6 +2286,21 @@ static bool isFuncletReturnInstr(const MachineInstr &MI) {
}
}
+/// Find a GPR restored in the epilogue that is not reserved.
+static Register findRestoredCalleeSaveGPR(const MachineFunction &MF) {
+ const MachineFrameInfo &MFI = MF.getFrameInfo();
+ const MachineRegisterInfo &MRI = MF.getRegInfo();
+ const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
+ for (auto &CS : CSI) {
+ Register Reg = CS.getReg();
+ if (!CS.isRestored() || MRI.isReserved(Reg) ||
+ !AArch64::GPR64RegClass.contains(Reg))
+ continue;
+ return Reg;
+ }
+ return AArch64::NoRegister;
+}
+
void AArch64FrameLowering::emitEpilogue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
@@ -2553,11 +2568,11 @@ void AArch64FrameLowering::emitEpilogue(MachineFunction &MF,
Register CalleeSaveBase = AArch64::FP;
if (int64_t CalleeSaveBaseOffset =
AFI->getCalleeSaveBaseToFrameRecordOffset()) {
- assert(RegInfo->hasBasePointer(MF) && "Expected base pointer!");
- // NOTE: This base pointer is clobbered from this point on! The next
- // step in eplilogue emission restoring callee-saves, so it should
- // not be used after this point anyway.
- CalleeSaveBase = RegInfo->getBaseRegister();
+ // This will find a GPR that is about to be restored -- so safe
+ // to clobber. SVE functions have a "big stack" so always spill at
+ // least one GPR (as a scratch register).
+ CalleeSaveBase = findRestoredCalleeSaveGPR(MF);
+ assert(CalleeSaveBase != AArch64::NoRegister);
emitFrameOffset(MBB, RestoreBegin, DL, CalleeSaveBase, AArch64::FP,
StackOffset::getFixed(-CalleeSaveBaseOffset), TII,
MachineInstr::FrameDestroy);
diff --git a/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp b/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
index ceb291f4b25ab..b0c69b8aca806 100644
--- a/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
@@ -635,9 +635,19 @@ bool AArch64RegisterInfo::hasBasePointer(const MachineFunction &MF) const {
// Furthermore, if both variable sized objects are present, and the
// stack needs to be dynamically re-aligned, the base pointer is the only
// reliable way to reference the locals.
- bool CannotUseSPForSVERestore =
- MFI.hasVarSizedObjects() || hasStackRealignment(MF);
- if (CannotUseSPForSVERestore || MF.hasEHFunclets()) {
+ if (MFI.hasVarSizedObjects() || MF.hasEHFunclets()) {
+ if (hasStackRealignment(MF))
+ return true;
+
+ auto &ST = MF.getSubtarget<AArch64Subtarget>();
+ const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
+ if (ST.hasSVE() || ST.isStreaming()) {
+ // Frames that have variable sized objects and scalable SVE objects,
+ // should always use a basepointer.
+ if (!AFI->hasCalculatedStackSizeSVE() || AFI->getStackSizeSVE())
+ return true;
+ }
+
// Frames with hazard padding can have a large offset between the frame
// pointer and GPR locals, which includes the emergency spill slot. If the
// emergency spill slot is not within range of the load/store instructions
@@ -645,23 +655,11 @@ bool AArch64RegisterInfo::hasBasePointer(const MachineFunction &MF) const {
// Since hasBasePointer() is called before we know if we have hazard padding
// or an emergency spill slot we need to enable the basepointer
// conservatively.
- auto &ST = MF.getSubtarget<AArch64Subtarget>();
- const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
if (ST.getStreamingHazardSize() &&
!AFI->getSMEFnAttrs().hasNonStreamingInterfaceAndBody()) {
return true;
}
- if (hasStackRealignment(MF))
- return MFI.hasVarSizedObjects() || MF.hasEHFunclets();
-
- if (ST.hasSVE() || ST.isStreaming()) {
- // Frames that have variable sized objects and scalable SVE objects,
- // should always use a basepointer.
- if (!AFI->hasCalculatedStackSizeSVE() || AFI->getStackSizeSVE())
- return true;
- }
-
// Conservatively estimate whether the negative offset from the frame
// pointer will be sufficient to reach. If a function has a smallish
// frame, it's less likely to have lots of spills and callee saved
diff --git a/llvm/test/CodeGen/AArch64/stack-hazard.ll b/llvm/test/CodeGen/AArch64/stack-hazard.ll
index 8394073f0af20..81200f44189d6 100644
--- a/llvm/test/CodeGen/AArch64/stack-hazard.ll
+++ b/llvm/test/CodeGen/AArch64/stack-hazard.ll
@@ -3703,11 +3703,10 @@ define i32 @svecc_call_realign(<4 x i16> %P0, i32 %P1, i32 %P2, <vscale x 16 x i
; CHECK64-NEXT: stp x29, x30, [sp, #64] // 16-byte Folded Spill
; CHECK64-NEXT: stp x9, x28, [sp, #80] // 16-byte Folded Spill
; CHECK64-NEXT: stp x27, x26, [sp, #96] // 16-byte Folded Spill
-; CHECK64-NEXT: stp x20, x19, [sp, #112] // 16-byte Folded Spill
+; CHECK64-NEXT: str x19, [sp, #112] // 8-byte Folded Spill
; CHECK64-NEXT: add x29, sp, #64
; CHECK64-NEXT: .cfi_def_cfa w29, 64
-; CHECK64-NEXT: .cfi_offset w19, -8
-; CHECK64-NEXT: .cfi_offset w20, -16
+; CHECK64-NEXT: .cfi_offset w19, -16
; CHECK64-NEXT: .cfi_offset w26, -24
; CHECK64-NEXT: .cfi_offset w27, -32
; CHECK64-NEXT: .cfi_offset w28, -40
@@ -3752,21 +3751,20 @@ define i32 @svecc_call_realign(<4 x i16> %P0, i32 %P1, i32 %P2, <vscale x 16 x i
; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 128 - 64 * VG
; CHECK64-NEXT: sub x9, sp, #1088
; CHECK64-NEXT: and sp, x9, #0xffffffffffffffe0
-; CHECK64-NEXT: mov x19, sp
; CHECK64-NEXT: mov w2, w1
; CHECK64-NEXT: //APP
; CHECK64-NEXT: //NO_APP
; CHECK64-NEXT: bl __arm_sme_state
-; CHECK64-NEXT: and x20, x0, #0x1
+; CHECK64-NEXT: and x19, x0, #0x1
; CHECK64-NEXT: .cfi_offset vg, -48
-; CHECK64-NEXT: tbz w20, #0, .LBB36_2
+; CHECK64-NEXT: tbz w19, #0, .LBB36_2
; CHECK64-NEXT: // %bb.1: // %entry
; CHECK64-NEXT: smstop sm
; CHECK64-NEXT: .LBB36_2: // %entry
-; CHECK64-NEXT: add x0, x19, #0
+; CHECK64-NEXT: mov x0, sp
; CHECK64-NEXT: mov w1, #45 // =0x2d
; CHECK64-NEXT: bl memset
-; CHECK64-NEXT: tbz w20, #0, .LBB36_4
+; CHECK64-NEXT: tbz w19, #0, .LBB36_4
; CHECK64-NEXT: // %bb.3: // %entry
; CHECK64-NEXT: smstart sm
; CHECK64-NEXT: .LBB36_4: // %entry
@@ -3813,14 +3811,12 @@ define i32 @svecc_call_realign(<4 x i16> %P0, i32 %P1, i32 %P2, <vscale x 16 x i
; CHECK64-NEXT: .cfi_restore z15
; CHECK64-NEXT: sub sp, x29, #64
; CHECK64-NEXT: .cfi_def_cfa wsp, 128
-; CHECK64-NEXT: ldp x20, x19, [sp, #112] // 16-byte Folded Reload
-; CHECK64-NEXT: ldr x28, [sp, #88] // 8-byte Folded Reload
-; CHECK64-NEXT: ldp x27, x26, [sp, #96] // 16-byte Folded Reload
+; CHECK64-NEXT: ldp x26, x19, [sp, #104] // 16-byte Folded Reload
+; CHECK64-NEXT: ldp x28, x27, [sp, #88] // 16-byte Folded Reload
; CHECK64-NEXT: ldp x29, x30, [sp, #64] // 16-byte Folded Reload
; CHECK64-NEXT: add sp, sp, #128
; CHECK64-NEXT: .cfi_def_cfa_offset 0
; CHECK64-NEXT: .cfi_restore w19
-; CHECK64-NEXT: .cfi_restore w20
; CHECK64-NEXT: .cfi_restore w26
; CHECK64-NEXT: .cfi_restore w27
; CHECK64-NEXT: .cfi_restore w28
@@ -3839,12 +3835,10 @@ define i32 @svecc_call_realign(<4 x i16> %P0, i32 %P1, i32 %P2, <vscale x 16 x i
; CHECK1024-NEXT: str x28, [sp, #1048] // 8-byte Folded Spill
; CHECK1024-NEXT: str x27, [sp, #1056] // 8-byte Folded Spill
; CHECK1024-NEXT: str x26, [sp, #1064] // 8-byte Folded Spill
-; CHECK1024-NEXT: str x20, [sp, #1072] // 8-byte Folded Spill
-; CHECK1024-NEXT: str x19, [sp, #1080] // 8-byte Folded Spill
+; CHECK1024-NEXT: str x19, [sp, #1072] // 8-byte Folded Spill
; CHECK1024-NEXT: add x29, sp, #1024
; CHECK1024-NEXT: .cfi_def_cfa w29, 64
-; CHECK1024-NEXT: .cfi_offset w19, -8
-; CHECK1024-NEXT: .cfi_offset w20, -16
+; CHECK1024-NEXT: .cfi_offset w19, -16
; CHECK1024-NEXT: .cfi_offset w26, -24
; CHECK1024-NEXT: .cfi_offset w27, -32
; CHECK1024-NEXT: .cfi_offset w28, -40
@@ -3889,21 +3883,20 @@ define i32 @svecc_call_realign(<4 x i16> %P0, i32 %P1, i32 %P2, <vscale x 16 x i
; CHECK1024-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 1088 - 64 * VG
; CHECK1024-NEXT: sub x9, sp, #2048
; CHECK1024-NEXT: and sp, x9, #0xffffffffffffffe0
-; CHECK1024-NEXT: mov x19, sp
; CHECK1024-NEXT: mov w2, w1
; CHECK1024-NEXT: //APP
; CHECK1024-NEXT: //NO_APP
; CHECK1024-NEXT: bl __arm_sme_state
-; CHECK1024-NEXT: and x20, x0, #0x1
+; CHECK1024-NEXT: and x19, x0, #0x1
; CHECK1024-NEXT: .cfi_offset vg, -48
-; CHECK1024-NEXT: tbz w20, #0, .LBB36_2
+; CHECK1024-NEXT: tbz w19, #0, .LBB36_2
; CHECK1024-NEXT: // %bb.1: // %entry
; CHECK1024-NEXT: smstop sm
; CHECK1024-NEXT: .LBB36_2: // %entry
-; CHECK1024-NEXT: add x0, x19, #0
+; CHECK1024-NEXT: mov x0, sp
; CHECK1024-NEXT: mov w1, #45 // =0x2d
; CHECK1024-NEXT: bl memset
-; CHECK1024-NEXT: tbz w20, #0, .LBB36_4
+; CHECK1024-NEXT: tbz w19, #0, .LBB36_4
; CHECK1024-NEXT: // %bb.3: // %entry
; CHECK1024-NEXT: smstart sm
; CHECK1024-NEXT: .LBB36_4: // %entry
@@ -3950,8 +3943,7 @@ define i32 @svecc_call_realign(<4 x i16> %P0, i32 %P1, i32 %P2, <vscale x 16 x i
; CHECK1024-NEXT: .cfi_restore z15
; CHECK1024-NEXT: sub sp, x29, #1024
; CHECK1024-NEXT: .cfi_def_cfa wsp, 1088
-; CHECK1024-NEXT: ldr x19, [sp, #1080] // 8-byte Folded Reload
-; CHECK1024-NEXT: ldr x20, [sp, #1072] // 8-byte Folded Reload
+; CHECK1024-NEXT: ldr x19, [sp, #1072] // 8-byte Folded Reload
; CHECK1024-NEXT: ldr x26, [sp, #1064] // 8-byte Folded Reload
; CHECK1024-NEXT: ldr x27, [sp, #1056] // 8-byte Folded Reload
; CHECK1024-NEXT: ldr x28, [sp, #1048] // 8-byte Folded Reload
@@ -3960,7 +3952,6 @@ define i32 @svecc_call_realign(<4 x i16> %P0, i32 %P1, i32 %P2, <vscale x 16 x i
; CHECK1024-NEXT: add sp, sp, #1088
; CHECK1024-NEXT: .cfi_def_cfa_offset 0
; CHECK1024-NEXT: .cfi_restore w19
-; CHECK1024-NEXT: .cfi_restore w20
; CHECK1024-NEXT: .cfi_restore w26
; CHECK1024-NEXT: .cfi_restore w27
; CHECK1024-NEXT: .cfi_restore w28
>From b2144edf7e7a3d61964b3ca978dcc0a974332fdd Mon Sep 17 00:00:00 2001
From: Benjamin Maxwell <benjamin.maxwell at arm.com>
Date: Fri, 13 Jun 2025 13:16:17 +0000
Subject: [PATCH 4/4] Rework a little
---
.../Target/AArch64/AArch64FrameLowering.cpp | 120 +++---
llvm/test/CodeGen/AArch64/stack-hazard.ll | 358 +++++++++++++++++-
2 files changed, 419 insertions(+), 59 deletions(-)
diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
index 515449539c141..fbbbfd1ac8e43 100644
--- a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
@@ -2286,21 +2286,6 @@ static bool isFuncletReturnInstr(const MachineInstr &MI) {
}
}
-/// Find a GPR restored in the epilogue that is not reserved.
-static Register findRestoredCalleeSaveGPR(const MachineFunction &MF) {
- const MachineFrameInfo &MFI = MF.getFrameInfo();
- const MachineRegisterInfo &MRI = MF.getRegInfo();
- const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
- for (auto &CS : CSI) {
- Register Reg = CS.getReg();
- if (!CS.isRestored() || MRI.isReserved(Reg) ||
- !AArch64::GPR64RegClass.contains(Reg))
- continue;
- return Reg;
- }
- return AArch64::NoRegister;
-}
-
void AArch64FrameLowering::emitEpilogue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
@@ -2550,49 +2535,69 @@ void AArch64FrameLowering::emitEpilogue(MachineFunction &MF,
DeallocateAfter, TII, MachineInstr::FrameDestroy, false,
NeedsWinCFI, &HasWinCFI);
} else if (SVEStackSize) {
- // If we have stack realignment or variable sized objects on the stack,
- // restore the stack pointer from the frame pointer prior to SVE CSR
- // restoration.
- if (AFI->isStackRealigned() || MFI.hasVarSizedObjects()) {
- if (int64_t SVECalleeSavedSize = AFI->getSVECalleeSavedStackSize()) {
- // Set SP to start of SVE callee-save area from which they can
- // be reloaded.
- const AArch64RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
- if (!AFI->isStackRealigned() && RegInfo->hasBasePointer(MF)) {
- // If the stack is not realigned we can use the base pointer to find
- // the start of the SVE callee-saves (and deallocate locals).
- emitFrameOffset(
- MBB, RestoreBegin, DL, AArch64::SP, RegInfo->getBaseRegister(),
- StackOffset::getFixed(NumBytes), TII, MachineInstr::FrameDestroy);
- } else {
- Register CalleeSaveBase = AArch64::FP;
- if (int64_t CalleeSaveBaseOffset =
- AFI->getCalleeSaveBaseToFrameRecordOffset()) {
- // This will find a GPR that is about to be restored -- so safe
- // to clobber. SVE functions have a "big stack" so always spill at
- // least one GPR (as a scratch register).
- CalleeSaveBase = findRestoredCalleeSaveGPR(MF);
- assert(CalleeSaveBase != AArch64::NoRegister);
- emitFrameOffset(MBB, RestoreBegin, DL, CalleeSaveBase, AArch64::FP,
- StackOffset::getFixed(-CalleeSaveBaseOffset), TII,
- MachineInstr::FrameDestroy);
- }
- // The code below will deallocate the stack space space by moving the
- // SP to the start of the SVE callee-save area.
- emitFrameOffset(MBB, RestoreBegin, DL, AArch64::SP, CalleeSaveBase,
- StackOffset::getScalable(-SVECalleeSavedSize), TII,
- MachineInstr::FrameDestroy);
+ const AArch64RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
+ int64_t SVECalleeSavedSize = AFI->getSVECalleeSavedStackSize();
+ Register BaseForSVERestore = [&]() -> Register {
+ // With stack realignment we must use the FP to restore SVE CSRs (as both
+ // the SP and BP can't be used due to the unknown alignment padding).
+ if (AFI->isStackRealigned())
+ return AArch64::FP;
+ // With variable sized objects on the stack, we can use the BP or FP to
+ // restore the SVE callee saves. If there are no SVE locals the BP will
+ // be more efficient (a single ADD).
+ if (MFI.hasVarSizedObjects()) {
+ if (DeallocateBefore && !AFI->hasStackHazardSlotIndex()) {
+ // If there's SVE locals and no hazard padding we can do:
+ // ADDVL SP, X29, #(-SVECalleeSavedSize)
+ return AArch64::FP;
}
+ // If there's SVE locals and hazard padding we can choose between:
+ // SUB TMP, X29, #(-CalleeSaveBaseOffset)
+ // ADDVL SP, TMP, #(-SVECalleeSavedSize)
+ // OR:
+ // ADD SP, BP, #NumBytes
+ // ADDVL SP, SP, #DeallocateBefore
+ // This chooses the latter as the "ADDVL" can be omitted if there's no
+ // SVE locals.
+ assert(RegInfo->hasBasePointer(MF) && "Expected base pointer!");
+ return RegInfo->getBaseRegister();
}
- } else {
- if (AFI->getSVECalleeSavedStackSize()) {
+ // In the standard case we use the SP.
+ return AArch64::SP;
+ }();
+
+ if (SVECalleeSavedSize && BaseForSVERestore == AArch64::FP) {
+ Register CalleeSaveBase = AArch64::FP;
+ if (int64_t CalleeSaveBaseOffset =
+ AFI->getCalleeSaveBaseToFrameRecordOffset()) {
+ // If we have have an non-zero offset to the non-SVE CS base we need to
+ // compute the base address by subtracting the offest in a temporary
+ // register. SVE functions have a "big stack" so there should be at
+ // least one scratch register available.
+ RegScavenger RS;
+ RS.enterBasicBlockEnd(MBB);
+ RS.backward(MBBI);
+ CalleeSaveBase = RS.FindUnusedReg(&AArch64::GPR64commonRegClass);
+ assert(CalleeSaveBase != AArch64::NoRegister);
+ emitFrameOffset(MBB, RestoreBegin, DL, CalleeSaveBase, AArch64::FP,
+ StackOffset::getFixed(-CalleeSaveBaseOffset), TII,
+ MachineInstr::FrameDestroy);
+ }
+ // The code below will deallocate the stack space space by moving the
+ // SP to the start of the SVE callee-save area.
+ emitFrameOffset(MBB, RestoreBegin, DL, AArch64::SP, CalleeSaveBase,
+ StackOffset::getScalable(-SVECalleeSavedSize), TII,
+ MachineInstr::FrameDestroy);
+ } else if (BaseForSVERestore == AArch64::SP || SVECalleeSavedSize) {
+ if (SVECalleeSavedSize) {
// Deallocate the non-SVE locals first before we can deallocate (and
// restore callee saves) from the SVE area.
emitFrameOffset(
- MBB, RestoreBegin, DL, AArch64::SP, AArch64::SP,
+ MBB, RestoreBegin, DL, AArch64::SP, BaseForSVERestore,
StackOffset::getFixed(NumBytes), TII, MachineInstr::FrameDestroy,
false, NeedsWinCFI, &HasWinCFI, EmitCFI && !hasFP(MF),
SVEStackSize + StackOffset::getFixed(NumBytes + PrologueSaveSize));
+
NumBytes = 0;
}
@@ -2602,11 +2607,16 @@ void AArch64FrameLowering::emitEpilogue(MachineFunction &MF,
SVEStackSize +
StackOffset::getFixed(NumBytes + PrologueSaveSize));
- emitFrameOffset(MBB, RestoreEnd, DL, AArch64::SP, AArch64::SP,
- DeallocateAfter, TII, MachineInstr::FrameDestroy, false,
- NeedsWinCFI, &HasWinCFI, EmitCFI && !hasFP(MF),
- DeallocateAfter +
- StackOffset::getFixed(NumBytes + PrologueSaveSize));
+ if (BaseForSVERestore == AArch64::SP) {
+ // Note: If the base is not SP it is the base pointer, in which case the
+ // SVE CSs will be implicitly deallocated by setting the SP to the FP to
+ // restore the non-SVE CSs.
+ emitFrameOffset(MBB, RestoreEnd, DL, AArch64::SP, AArch64::SP,
+ DeallocateAfter, TII, MachineInstr::FrameDestroy, false,
+ NeedsWinCFI, &HasWinCFI, EmitCFI && !hasFP(MF),
+ DeallocateAfter +
+ StackOffset::getFixed(NumBytes + PrologueSaveSize));
+ }
}
if (EmitCFI)
emitCalleeSavedSVERestores(MBB, RestoreEnd);
diff --git a/llvm/test/CodeGen/AArch64/stack-hazard.ll b/llvm/test/CodeGen/AArch64/stack-hazard.ll
index 81200f44189d6..45a77632b9eef 100644
--- a/llvm/test/CodeGen/AArch64/stack-hazard.ll
+++ b/llvm/test/CodeGen/AArch64/stack-hazard.ll
@@ -3771,8 +3771,8 @@ define i32 @svecc_call_realign(<4 x i16> %P0, i32 %P1, i32 %P2, <vscale x 16 x i
; CHECK64-NEXT: mov w0, #22647 // =0x5877
; CHECK64-NEXT: movk w0, #59491, lsl #16
; CHECK64-NEXT: .cfi_restore vg
-; CHECK64-NEXT: sub x19, x29, #64
-; CHECK64-NEXT: addvl sp, x19, #-18
+; CHECK64-NEXT: sub x1, x29, #64
+; CHECK64-NEXT: addvl sp, x1, #-18
; CHECK64-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
; CHECK64-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
; CHECK64-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
@@ -3903,8 +3903,8 @@ define i32 @svecc_call_realign(<4 x i16> %P0, i32 %P1, i32 %P2, <vscale x 16 x i
; CHECK1024-NEXT: mov w0, #22647 // =0x5877
; CHECK1024-NEXT: movk w0, #59491, lsl #16
; CHECK1024-NEXT: .cfi_restore vg
-; CHECK1024-NEXT: sub x19, x29, #1024
-; CHECK1024-NEXT: addvl sp, x19, #-18
+; CHECK1024-NEXT: sub x1, x29, #1024
+; CHECK1024-NEXT: addvl sp, x1, #-18
; CHECK1024-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
; CHECK1024-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
; CHECK1024-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
@@ -3964,3 +3964,353 @@ entry:
%call = call ptr @memset(ptr noundef nonnull %ptr, i32 noundef 45, i32 noundef %P2)
ret i32 -396142473
}
+
+
+define i32 @svecc_call_dynamic_and_scalable_alloca(<4 x i16> %P0, i32 %P1, i32 %P2, <vscale x 16 x i8> %P3, i16 %P4) "aarch64_pstate_sm_compatible" {
+; CHECK0-LABEL: svecc_call_dynamic_and_scalable_alloca:
+; CHECK0: // %bb.0: // %entry
+; CHECK0-NEXT: stp x29, x30, [sp, #-64]! // 16-byte Folded Spill
+; CHECK0-NEXT: str x28, [sp, #16] // 8-byte Folded Spill
+; CHECK0-NEXT: mov x29, sp
+; CHECK0-NEXT: stp x27, x26, [sp, #32] // 16-byte Folded Spill
+; CHECK0-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill
+; CHECK0-NEXT: addvl sp, sp, #-18
+; CHECK0-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill
+; CHECK0-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill
+; CHECK0-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill
+; CHECK0-NEXT: str p12, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK0-NEXT: str p11, [sp, #8, mul vl] // 2-byte Folded Spill
+; CHECK0-NEXT: str p10, [sp, #9, mul vl] // 2-byte Folded Spill
+; CHECK0-NEXT: str p9, [sp, #10, mul vl] // 2-byte Folded Spill
+; CHECK0-NEXT: str p8, [sp, #11, mul vl] // 2-byte Folded Spill
+; CHECK0-NEXT: str p7, [sp, #12, mul vl] // 2-byte Folded Spill
+; CHECK0-NEXT: str p6, [sp, #13, mul vl] // 2-byte Folded Spill
+; CHECK0-NEXT: str p5, [sp, #14, mul vl] // 2-byte Folded Spill
+; CHECK0-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill
+; CHECK0-NEXT: str z23, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK0-NEXT: str z22, [sp, #3, mul vl] // 16-byte Folded Spill
+; CHECK0-NEXT: str z21, [sp, #4, mul vl] // 16-byte Folded Spill
+; CHECK0-NEXT: str z20, [sp, #5, mul vl] // 16-byte Folded Spill
+; CHECK0-NEXT: str z19, [sp, #6, mul vl] // 16-byte Folded Spill
+; CHECK0-NEXT: str z18, [sp, #7, mul vl] // 16-byte Folded Spill
+; CHECK0-NEXT: str z17, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK0-NEXT: str z16, [sp, #9, mul vl] // 16-byte Folded Spill
+; CHECK0-NEXT: str z15, [sp, #10, mul vl] // 16-byte Folded Spill
+; CHECK0-NEXT: str z14, [sp, #11, mul vl] // 16-byte Folded Spill
+; CHECK0-NEXT: str z13, [sp, #12, mul vl] // 16-byte Folded Spill
+; CHECK0-NEXT: str z12, [sp, #13, mul vl] // 16-byte Folded Spill
+; CHECK0-NEXT: str z11, [sp, #14, mul vl] // 16-byte Folded Spill
+; CHECK0-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill
+; CHECK0-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill
+; CHECK0-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill
+; CHECK0-NEXT: sub sp, sp, #48
+; CHECK0-NEXT: addvl sp, sp, #-1
+; CHECK0-NEXT: mov x19, sp
+; CHECK0-NEXT: .cfi_def_cfa w29, 64
+; CHECK0-NEXT: .cfi_offset w19, -8
+; CHECK0-NEXT: .cfi_offset w20, -16
+; CHECK0-NEXT: .cfi_offset w26, -24
+; CHECK0-NEXT: .cfi_offset w27, -32
+; CHECK0-NEXT: .cfi_offset w28, -48
+; CHECK0-NEXT: .cfi_offset w30, -56
+; CHECK0-NEXT: .cfi_offset w29, -64
+; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 64 - 8 * VG
+; CHECK0-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 64 - 16 * VG
+; CHECK0-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 64 - 24 * VG
+; CHECK0-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 64 - 32 * VG
+; CHECK0-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 64 - 40 * VG
+; CHECK0-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 64 - 48 * VG
+; CHECK0-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 64 - 56 * VG
+; CHECK0-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 64 - 64 * VG
+; CHECK0-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK0-NEXT: ubfiz x8, x0, #2, #32
+; CHECK0-NEXT: mov x9, sp
+; CHECK0-NEXT: add x8, x8, #15
+; CHECK0-NEXT: and x8, x8, #0x7fffffff0
+; CHECK0-NEXT: sub x20, x9, x8
+; CHECK0-NEXT: mov sp, x20
+; CHECK0-NEXT: //APP
+; CHECK0-NEXT: //NO_APP
+; CHECK0-NEXT: add x0, x19, #8
+; CHECK0-NEXT: bl bar
+; CHECK0-NEXT: addvl x0, x29, #-19
+; CHECK0-NEXT: bl bar
+; CHECK0-NEXT: mov x0, x20
+; CHECK0-NEXT: bl bar
+; CHECK0-NEXT: mov w0, #22647 // =0x5877
+; CHECK0-NEXT: movk w0, #59491, lsl #16
+; CHECK0-NEXT: addvl sp, x29, #-18
+; CHECK0-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK0-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK0-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK0-NEXT: ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK0-NEXT: ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK0-NEXT: ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK0-NEXT: ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK0-NEXT: ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK0-NEXT: ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK0-NEXT: ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK0-NEXT: ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK0-NEXT: ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK0-NEXT: ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK0-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK0-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
+; CHECK0-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
+; CHECK0-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK0-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK0-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK0-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK0-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; CHECK0-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; CHECK0-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; CHECK0-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
+; CHECK0-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
+; CHECK0-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
+; CHECK0-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; CHECK0-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
+; CHECK0-NEXT: mov sp, x29
+; CHECK0-NEXT: ldp x20, x19, [sp, #48] // 16-byte Folded Reload
+; CHECK0-NEXT: ldr x28, [sp, #16] // 8-byte Folded Reload
+; CHECK0-NEXT: ldp x27, x26, [sp, #32] // 16-byte Folded Reload
+; CHECK0-NEXT: ldp x29, x30, [sp], #64 // 16-byte Folded Reload
+; CHECK0-NEXT: ret
+;
+; CHECK64-LABEL: svecc_call_dynamic_and_scalable_alloca:
+; CHECK64: // %bb.0: // %entry
+; CHECK64-NEXT: sub sp, sp, #128
+; CHECK64-NEXT: stp x29, x30, [sp, #64] // 16-byte Folded Spill
+; CHECK64-NEXT: add x29, sp, #64
+; CHECK64-NEXT: stp x28, x27, [sp, #80] // 16-byte Folded Spill
+; CHECK64-NEXT: stp x26, x20, [sp, #96] // 16-byte Folded Spill
+; CHECK64-NEXT: str x19, [sp, #112] // 8-byte Folded Spill
+; CHECK64-NEXT: addvl sp, sp, #-18
+; CHECK64-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill
+; CHECK64-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill
+; CHECK64-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill
+; CHECK64-NEXT: str p12, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK64-NEXT: str p11, [sp, #8, mul vl] // 2-byte Folded Spill
+; CHECK64-NEXT: str p10, [sp, #9, mul vl] // 2-byte Folded Spill
+; CHECK64-NEXT: str p9, [sp, #10, mul vl] // 2-byte Folded Spill
+; CHECK64-NEXT: str p8, [sp, #11, mul vl] // 2-byte Folded Spill
+; CHECK64-NEXT: str p7, [sp, #12, mul vl] // 2-byte Folded Spill
+; CHECK64-NEXT: str p6, [sp, #13, mul vl] // 2-byte Folded Spill
+; CHECK64-NEXT: str p5, [sp, #14, mul vl] // 2-byte Folded Spill
+; CHECK64-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill
+; CHECK64-NEXT: str z23, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z22, [sp, #3, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z21, [sp, #4, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z20, [sp, #5, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z19, [sp, #6, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z18, [sp, #7, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z17, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z16, [sp, #9, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z15, [sp, #10, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z14, [sp, #11, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z13, [sp, #12, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z12, [sp, #13, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z11, [sp, #14, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill
+; CHECK64-NEXT: sub sp, sp, #112
+; CHECK64-NEXT: addvl sp, sp, #-1
+; CHECK64-NEXT: mov x19, sp
+; CHECK64-NEXT: .cfi_def_cfa w29, 64
+; CHECK64-NEXT: .cfi_offset w19, -16
+; CHECK64-NEXT: .cfi_offset w20, -24
+; CHECK64-NEXT: .cfi_offset w26, -32
+; CHECK64-NEXT: .cfi_offset w27, -40
+; CHECK64-NEXT: .cfi_offset w28, -48
+; CHECK64-NEXT: .cfi_offset w30, -56
+; CHECK64-NEXT: .cfi_offset w29, -64
+; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 128 - 8 * VG
+; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 128 - 16 * VG
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 128 - 24 * VG
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 128 - 32 * VG
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 128 - 40 * VG
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 128 - 48 * VG
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 128 - 56 * VG
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 128 - 64 * VG
+; CHECK64-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK64-NEXT: ubfiz x8, x0, #2, #32
+; CHECK64-NEXT: mov x9, sp
+; CHECK64-NEXT: add x8, x8, #15
+; CHECK64-NEXT: and x8, x8, #0x7fffffff0
+; CHECK64-NEXT: sub x20, x9, x8
+; CHECK64-NEXT: mov sp, x20
+; CHECK64-NEXT: //APP
+; CHECK64-NEXT: //NO_APP
+; CHECK64-NEXT: add x0, x19, #8
+; CHECK64-NEXT: bl bar
+; CHECK64-NEXT: sub x0, x29, #64
+; CHECK64-NEXT: addvl x0, x0, #-19
+; CHECK64-NEXT: bl bar
+; CHECK64-NEXT: mov x0, x20
+; CHECK64-NEXT: bl bar
+; CHECK64-NEXT: mov w0, #22647 // =0x5877
+; CHECK64-NEXT: movk w0, #59491, lsl #16
+; CHECK64-NEXT: add sp, x19, #112
+; CHECK64-NEXT: addvl sp, sp, #1
+; CHECK64-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK64-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK64-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK64-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK64-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; CHECK64-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; CHECK64-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; CHECK64-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
+; CHECK64-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
+; CHECK64-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
+; CHECK64-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; CHECK64-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
+; CHECK64-NEXT: sub sp, x29, #64
+; CHECK64-NEXT: ldp x20, x19, [sp, #104] // 16-byte Folded Reload
+; CHECK64-NEXT: ldr x29, [sp, #64] // 8-byte Folded Reload
+; CHECK64-NEXT: ldp x27, x26, [sp, #88] // 16-byte Folded Reload
+; CHECK64-NEXT: ldp x30, x28, [sp, #72] // 16-byte Folded Reload
+; CHECK64-NEXT: add sp, sp, #128
+; CHECK64-NEXT: ret
+;
+; CHECK1024-LABEL: svecc_call_dynamic_and_scalable_alloca:
+; CHECK1024: // %bb.0: // %entry
+; CHECK1024-NEXT: sub sp, sp, #1088
+; CHECK1024-NEXT: str x29, [sp, #1024] // 8-byte Folded Spill
+; CHECK1024-NEXT: add x29, sp, #1024
+; CHECK1024-NEXT: str x30, [sp, #1032] // 8-byte Folded Spill
+; CHECK1024-NEXT: str x28, [sp, #1040] // 8-byte Folded Spill
+; CHECK1024-NEXT: str x27, [sp, #1048] // 8-byte Folded Spill
+; CHECK1024-NEXT: str x26, [sp, #1056] // 8-byte Folded Spill
+; CHECK1024-NEXT: str x20, [sp, #1064] // 8-byte Folded Spill
+; CHECK1024-NEXT: str x19, [sp, #1072] // 8-byte Folded Spill
+; CHECK1024-NEXT: addvl sp, sp, #-18
+; CHECK1024-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill
+; CHECK1024-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill
+; CHECK1024-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill
+; CHECK1024-NEXT: str p12, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK1024-NEXT: str p11, [sp, #8, mul vl] // 2-byte Folded Spill
+; CHECK1024-NEXT: str p10, [sp, #9, mul vl] // 2-byte Folded Spill
+; CHECK1024-NEXT: str p9, [sp, #10, mul vl] // 2-byte Folded Spill
+; CHECK1024-NEXT: str p8, [sp, #11, mul vl] // 2-byte Folded Spill
+; CHECK1024-NEXT: str p7, [sp, #12, mul vl] // 2-byte Folded Spill
+; CHECK1024-NEXT: str p6, [sp, #13, mul vl] // 2-byte Folded Spill
+; CHECK1024-NEXT: str p5, [sp, #14, mul vl] // 2-byte Folded Spill
+; CHECK1024-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill
+; CHECK1024-NEXT: str z23, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK1024-NEXT: str z22, [sp, #3, mul vl] // 16-byte Folded Spill
+; CHECK1024-NEXT: str z21, [sp, #4, mul vl] // 16-byte Folded Spill
+; CHECK1024-NEXT: str z20, [sp, #5, mul vl] // 16-byte Folded Spill
+; CHECK1024-NEXT: str z19, [sp, #6, mul vl] // 16-byte Folded Spill
+; CHECK1024-NEXT: str z18, [sp, #7, mul vl] // 16-byte Folded Spill
+; CHECK1024-NEXT: str z17, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK1024-NEXT: str z16, [sp, #9, mul vl] // 16-byte Folded Spill
+; CHECK1024-NEXT: str z15, [sp, #10, mul vl] // 16-byte Folded Spill
+; CHECK1024-NEXT: str z14, [sp, #11, mul vl] // 16-byte Folded Spill
+; CHECK1024-NEXT: str z13, [sp, #12, mul vl] // 16-byte Folded Spill
+; CHECK1024-NEXT: str z12, [sp, #13, mul vl] // 16-byte Folded Spill
+; CHECK1024-NEXT: str z11, [sp, #14, mul vl] // 16-byte Folded Spill
+; CHECK1024-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill
+; CHECK1024-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill
+; CHECK1024-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill
+; CHECK1024-NEXT: sub sp, sp, #1072
+; CHECK1024-NEXT: addvl sp, sp, #-1
+; CHECK1024-NEXT: mov x19, sp
+; CHECK1024-NEXT: .cfi_def_cfa w29, 64
+; CHECK1024-NEXT: .cfi_offset w19, -16
+; CHECK1024-NEXT: .cfi_offset w20, -24
+; CHECK1024-NEXT: .cfi_offset w26, -32
+; CHECK1024-NEXT: .cfi_offset w27, -40
+; CHECK1024-NEXT: .cfi_offset w28, -48
+; CHECK1024-NEXT: .cfi_offset w30, -56
+; CHECK1024-NEXT: .cfi_offset w29, -64
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 1088 - 8 * VG
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 1088 - 16 * VG
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 1088 - 24 * VG
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 1088 - 32 * VG
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 1088 - 40 * VG
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 1088 - 48 * VG
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 1088 - 56 * VG
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 1088 - 64 * VG
+; CHECK1024-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK1024-NEXT: ubfiz x8, x0, #2, #32
+; CHECK1024-NEXT: mov x9, sp
+; CHECK1024-NEXT: add x8, x8, #15
+; CHECK1024-NEXT: and x8, x8, #0x7fffffff0
+; CHECK1024-NEXT: sub x20, x9, x8
+; CHECK1024-NEXT: mov sp, x20
+; CHECK1024-NEXT: //APP
+; CHECK1024-NEXT: //NO_APP
+; CHECK1024-NEXT: add x0, x19, #8
+; CHECK1024-NEXT: bl bar
+; CHECK1024-NEXT: sub x0, x29, #1024
+; CHECK1024-NEXT: addvl x0, x0, #-19
+; CHECK1024-NEXT: bl bar
+; CHECK1024-NEXT: mov x0, x20
+; CHECK1024-NEXT: bl bar
+; CHECK1024-NEXT: mov w0, #22647 // =0x5877
+; CHECK1024-NEXT: movk w0, #59491, lsl #16
+; CHECK1024-NEXT: add sp, x19, #1072
+; CHECK1024-NEXT: addvl sp, sp, #1
+; CHECK1024-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK1024-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK1024-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK1024-NEXT: ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK1024-NEXT: ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK1024-NEXT: ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK1024-NEXT: ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK1024-NEXT: ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK1024-NEXT: ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK1024-NEXT: ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK1024-NEXT: ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK1024-NEXT: ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK1024-NEXT: ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK1024-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK1024-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
+; CHECK1024-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
+; CHECK1024-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK1024-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK1024-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK1024-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK1024-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; CHECK1024-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; CHECK1024-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; CHECK1024-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
+; CHECK1024-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
+; CHECK1024-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
+; CHECK1024-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; CHECK1024-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
+; CHECK1024-NEXT: sub sp, x29, #1024
+; CHECK1024-NEXT: ldr x19, [sp, #1072] // 8-byte Folded Reload
+; CHECK1024-NEXT: ldr x20, [sp, #1064] // 8-byte Folded Reload
+; CHECK1024-NEXT: ldr x26, [sp, #1056] // 8-byte Folded Reload
+; CHECK1024-NEXT: ldr x27, [sp, #1048] // 8-byte Folded Reload
+; CHECK1024-NEXT: ldr x28, [sp, #1040] // 8-byte Folded Reload
+; CHECK1024-NEXT: ldr x30, [sp, #1032] // 8-byte Folded Reload
+; CHECK1024-NEXT: ldr x29, [sp, #1024] // 8-byte Folded Reload
+; CHECK1024-NEXT: add sp, sp, #1088
+; CHECK1024-NEXT: ret
+entry:
+ %a = alloca i32, i32 10
+ %b = alloca <vscale x 4 x i32>
+ %c = alloca i32, i32 %P1, align 4
+ tail call void asm sideeffect "", "~{x0},~{x28},~{x27},~{x3}"() #2
+ call void @bar(ptr noundef nonnull %a)
+ call void @bar(ptr noundef nonnull %b)
+ call void @bar(ptr noundef nonnull %c)
+ ret i32 -396142473
+}
More information about the llvm-commits
mailing list