[llvm] [CodeGen] Consider imm offsets when sorting framerefs (PR #166979)
Anshil Gandhi via llvm-commits
llvm-commits at lists.llvm.org
Thu Dec 4 12:11:23 PST 2025
https://github.com/gandhi56 updated https://github.com/llvm/llvm-project/pull/166979
>From 76e516d3df048694bfd949792d847c0d6dc36e74 Mon Sep 17 00:00:00 2001
From: Anshil Gandhi <gandhi21299 at gmail.com>
Date: Fri, 7 Nov 2025 12:24:43 -0500
Subject: [PATCH] [CodeGen] Consider imm offsets when sorting framerefs
LocalStackSlotAllocation pass disallows negative offsets wrt
to a base register. The pass ends up introducing a new register
for such frame references. This patch helps LocalStackSlotAlloca
to additionally consider the immediate offset of an instruction,
when sorting frame refs - hence, avoiding negative offsets and
maximizing reuse of the existing registers.
Resolves #155902
---
llvm/lib/CodeGen/LocalStackSlotAllocation.cpp | 42 ++--
.../flat-scratch-alloca-issue-155902.ll | 229 ++++++++++++++++++
...local-stack-alloc-add-references.gfx10.mir | 10 +-
.../local-stack-alloc-block-sp-reference.ll | 11 +-
llvm/test/CodeGen/Thumb/frame-chain.ll | 51 ++--
5 files changed, 300 insertions(+), 43 deletions(-)
create mode 100644 llvm/test/CodeGen/AMDGPU/flat-scratch-alloca-issue-155902.ll
diff --git a/llvm/lib/CodeGen/LocalStackSlotAllocation.cpp b/llvm/lib/CodeGen/LocalStackSlotAllocation.cpp
index 987f64f56403d..4e1c5f1262d0c 100644
--- a/llvm/lib/CodeGen/LocalStackSlotAllocation.cpp
+++ b/llvm/lib/CodeGen/LocalStackSlotAllocation.cpp
@@ -51,6 +51,7 @@ namespace {
class FrameRef {
MachineBasicBlock::iterator MI; // Instr referencing the frame
int64_t LocalOffset; // Local offset of the frame idx referenced
+ int64_t InstrOffset; // Offset of the instruction from the frame index
int FrameIdx; // The frame index
// Order reference instruction appears in program. Used to ensure
@@ -59,16 +60,20 @@ namespace {
unsigned Order;
public:
- FrameRef(MachineInstr *I, int64_t Offset, int Idx, unsigned Ord) :
- MI(I), LocalOffset(Offset), FrameIdx(Idx), Order(Ord) {}
+ FrameRef(MachineInstr *I, int64_t Offset, int64_t InstrOffset, int Idx,
+ unsigned Ord)
+ : MI(I), LocalOffset(Offset), InstrOffset(InstrOffset), FrameIdx(Idx),
+ Order(Ord) {}
bool operator<(const FrameRef &RHS) const {
- return std::tie(LocalOffset, FrameIdx, Order) <
- std::tie(RHS.LocalOffset, RHS.FrameIdx, RHS.Order);
+ return std::make_tuple(LocalOffset + InstrOffset, FrameIdx, Order) <
+ std::make_tuple(RHS.LocalOffset + RHS.InstrOffset, RHS.FrameIdx,
+ RHS.Order);
}
MachineBasicBlock::iterator getMachineInstr() const { return MI; }
int64_t getLocalOffset() const { return LocalOffset; }
+ int64_t getInstrOffset() const { return InstrOffset; }
int getFrameIndex() const { return FrameIdx; }
};
@@ -335,20 +340,27 @@ bool LocalStackSlotImpl::insertFrameReferenceRegisters(MachineFunction &Fn) {
// than that, but the increased register pressure makes that a
// tricky thing to balance. Investigate if re-materializing these
// becomes an issue.
- for (const MachineOperand &MO : MI.operands()) {
+ for (unsigned OpIdx = 0, OpEnd = MI.getNumOperands(); OpIdx != OpEnd;
+ ++OpIdx) {
+ const MachineOperand &MO = MI.getOperand(OpIdx);
// Consider replacing all frame index operands that reference
// an object allocated in the local block.
- if (MO.isFI()) {
- // Don't try this with values not in the local block.
- if (!MFI.isObjectPreAllocated(MO.getIndex()))
- break;
- int Idx = MO.getIndex();
- int64_t LocalOffset = LocalOffsets[Idx];
- if (!TRI->needsFrameBaseReg(&MI, LocalOffset))
- break;
- FrameReferenceInsns.push_back(FrameRef(&MI, LocalOffset, Idx, Order++));
+ if (!MO.isFI())
+ continue;
+
+ int FrameIdx = MO.getIndex();
+ // Don't try this with values not in the local block.
+ if (!MFI.isObjectPreAllocated(FrameIdx))
+ break;
+
+ int64_t LocalOffset = LocalOffsets[FrameIdx];
+ if (!TRI->needsFrameBaseReg(&MI, LocalOffset))
break;
- }
+
+ int64_t InstrOffset = TRI->getFrameIndexInstrOffset(&MI, OpIdx);
+ FrameReferenceInsns.emplace_back(&MI, LocalOffset, InstrOffset,
+ FrameIdx, Order++);
+ break;
}
}
}
diff --git a/llvm/test/CodeGen/AMDGPU/flat-scratch-alloca-issue-155902.ll b/llvm/test/CodeGen/AMDGPU/flat-scratch-alloca-issue-155902.ll
new file mode 100644
index 0000000000000..cd31ceea05dfc
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/flat-scratch-alloca-issue-155902.ll
@@ -0,0 +1,229 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc -O0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx950 < %s | FileCheck %s --check-prefix=GFX950
+
+; Ensure we don't crash with: "Cannot scavenge register in FI elimination!"
+define amdgpu_kernel void @issue155902(i64 %arg, i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %arg5, i64 %arg6, i64 %arg7, i64 %arg8, i64 %arg9, i64 %arg10, i64 %arg11, i64 %arg12, i64 %arg13, i64 %arg14, i64 %arg15, i64 %arg16, i64 %arg17, i64 %arg18, i64 %arg19, i64 %arg20, i64 %arg21, i64 %arg22, i64 %arg23, i64 %arg24, i64 %arg25, i64 %arg26, i64 %arg27, i64 %arg28, i64 %arg29, i64 %arg30, i64 %arg31, i64 %arg32, i64 %arg33, i64 %arg34, i64 %arg35, i64 %arg36, i64 %arg37, i64 %arg38, i64 %arg39, i64 %arg40, i64 %arg41, i64 %arg42, i64 %arg43, i64 %arg44, i64 %arg45, i64 %arg46, i64 %arg47, i64 %arg48, i64 %arg49) {
+; GFX950-LABEL: issue155902:
+; GFX950: ; %bb.0: ; %bb
+; GFX950-NEXT: s_mov_b32 s33, 0x4008
+; GFX950-NEXT: ; implicit-def: $vgpr2 : SGPR spill to VGPR lane
+; GFX950-NEXT: v_writelane_b32 v2, s33, 0
+; GFX950-NEXT: s_mov_b64 s[2:3], s[4:5]
+; GFX950-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x0
+; GFX950-NEXT: s_load_dwordx2 vcc, s[2:3], 0x8
+; GFX950-NEXT: s_load_dwordx2 s[98:99], s[2:3], 0x10
+; GFX950-NEXT: s_load_dwordx2 s[96:97], s[2:3], 0x18
+; GFX950-NEXT: s_load_dwordx2 s[94:95], s[2:3], 0x20
+; GFX950-NEXT: s_load_dwordx2 s[92:93], s[2:3], 0x28
+; GFX950-NEXT: s_load_dwordx2 s[90:91], s[2:3], 0x30
+; GFX950-NEXT: s_load_dwordx2 s[88:89], s[2:3], 0x38
+; GFX950-NEXT: s_load_dwordx2 s[86:87], s[2:3], 0x40
+; GFX950-NEXT: s_load_dwordx2 s[84:85], s[2:3], 0x48
+; GFX950-NEXT: s_load_dwordx2 s[82:83], s[2:3], 0x50
+; GFX950-NEXT: s_load_dwordx2 s[80:81], s[2:3], 0x58
+; GFX950-NEXT: s_load_dwordx2 s[78:79], s[2:3], 0x60
+; GFX950-NEXT: s_load_dwordx2 s[76:77], s[2:3], 0x68
+; GFX950-NEXT: s_load_dwordx2 s[74:75], s[2:3], 0x70
+; GFX950-NEXT: s_load_dwordx2 s[72:73], s[2:3], 0x78
+; GFX950-NEXT: s_load_dwordx2 s[70:71], s[2:3], 0x80
+; GFX950-NEXT: s_load_dwordx2 s[68:69], s[2:3], 0x88
+; GFX950-NEXT: s_load_dwordx2 s[66:67], s[2:3], 0x90
+; GFX950-NEXT: s_load_dwordx2 s[64:65], s[2:3], 0x98
+; GFX950-NEXT: s_load_dwordx2 s[62:63], s[2:3], 0xa0
+; GFX950-NEXT: s_load_dwordx2 s[60:61], s[2:3], 0xa8
+; GFX950-NEXT: s_load_dwordx2 s[58:59], s[2:3], 0xb0
+; GFX950-NEXT: s_load_dwordx2 s[56:57], s[2:3], 0xb8
+; GFX950-NEXT: s_load_dwordx2 s[54:55], s[2:3], 0xc0
+; GFX950-NEXT: s_load_dwordx2 s[52:53], s[2:3], 0xc8
+; GFX950-NEXT: s_load_dwordx2 s[50:51], s[2:3], 0xd0
+; GFX950-NEXT: s_load_dwordx2 s[48:49], s[2:3], 0xd8
+; GFX950-NEXT: s_load_dwordx2 s[46:47], s[2:3], 0xe0
+; GFX950-NEXT: s_load_dwordx2 s[44:45], s[2:3], 0xe8
+; GFX950-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0xf0
+; GFX950-NEXT: s_load_dwordx2 s[40:41], s[2:3], 0xf8
+; GFX950-NEXT: s_load_dwordx2 s[38:39], s[2:3], 0x100
+; GFX950-NEXT: s_load_dwordx2 s[36:37], s[2:3], 0x108
+; GFX950-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x110
+; GFX950-NEXT: s_load_dwordx2 s[30:31], s[2:3], 0x118
+; GFX950-NEXT: s_load_dwordx2 s[28:29], s[2:3], 0x120
+; GFX950-NEXT: s_load_dwordx2 s[26:27], s[2:3], 0x128
+; GFX950-NEXT: s_load_dwordx2 s[24:25], s[2:3], 0x130
+; GFX950-NEXT: s_load_dwordx2 s[22:23], s[2:3], 0x138
+; GFX950-NEXT: s_load_dwordx2 s[20:21], s[2:3], 0x140
+; GFX950-NEXT: s_load_dwordx2 s[18:19], s[2:3], 0x148
+; GFX950-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x150
+; GFX950-NEXT: s_load_dwordx2 s[14:15], s[2:3], 0x158
+; GFX950-NEXT: s_load_dwordx2 s[12:13], s[2:3], 0x160
+; GFX950-NEXT: s_load_dwordx2 s[10:11], s[2:3], 0x168
+; GFX950-NEXT: s_load_dwordx2 s[8:9], s[2:3], 0x170
+; GFX950-NEXT: s_load_dwordx2 s[6:7], s[2:3], 0x178
+; GFX950-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x180
+; GFX950-NEXT: s_nop 0
+; GFX950-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x188
+; GFX950-NEXT: v_mov_b64_e32 v[0:1], 0
+; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s33 offset:8
+; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s33
+; GFX950-NEXT: v_mov_b64_e32 v[0:1], 0x384
+; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s33 offset:16
+; GFX950-NEXT: s_waitcnt lgkmcnt(0)
+; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[0:1]
+; GFX950-NEXT: v_readlane_b32 s0, v2, 0
+; GFX950-NEXT: s_nop 4
+; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 offset:16
+; GFX950-NEXT: v_mov_b64_e32 v[0:1], vcc
+; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 offset:16
+; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[98:99]
+; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 offset:16
+; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[96:97]
+; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 offset:16
+; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[94:95]
+; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 offset:16
+; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[92:93]
+; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 offset:16
+; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[90:91]
+; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 offset:16
+; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[88:89]
+; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 offset:16
+; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[86:87]
+; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 offset:16
+; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[84:85]
+; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 offset:16
+; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[82:83]
+; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 offset:16
+; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[80:81]
+; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 offset:16
+; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[78:79]
+; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 offset:16
+; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[76:77]
+; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 offset:16
+; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[74:75]
+; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 offset:16
+; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[72:73]
+; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 offset:16
+; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[70:71]
+; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 offset:16
+; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[68:69]
+; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 offset:16
+; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[66:67]
+; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 offset:16
+; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[64:65]
+; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 offset:16
+; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[62:63]
+; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 offset:16
+; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[60:61]
+; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 offset:16
+; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[58:59]
+; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 offset:16
+; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[56:57]
+; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 offset:16
+; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[54:55]
+; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 offset:16
+; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[52:53]
+; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 offset:16
+; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[50:51]
+; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 offset:16
+; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[48:49]
+; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 offset:16
+; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[46:47]
+; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 offset:16
+; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[44:45]
+; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 offset:16
+; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[42:43]
+; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 offset:16
+; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[40:41]
+; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 offset:16
+; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[38:39]
+; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 offset:16
+; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[36:37]
+; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 offset:16
+; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[34:35]
+; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 offset:16
+; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[30:31]
+; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 offset:16
+; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[28:29]
+; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 offset:16
+; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[26:27]
+; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 offset:16
+; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[24:25]
+; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 offset:16
+; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[22:23]
+; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 offset:16
+; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[20:21]
+; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 offset:16
+; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[18:19]
+; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 offset:16
+; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[16:17]
+; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 offset:16
+; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[14:15]
+; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 offset:16
+; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[12:13]
+; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 offset:16
+; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[10:11]
+; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 offset:16
+; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[8:9]
+; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 offset:16
+; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[6:7]
+; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 offset:16
+; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[4:5]
+; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 offset:16
+; GFX950-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
+; GFX950-NEXT: scratch_store_dwordx2 off, v[0:1], s0 offset:16
+; GFX950-NEXT: s_endpgm
+bb:
+ %alloca.big = alloca [4096 x i32], align 4, addrspace(5)
+ %alloca304 = alloca [2 x i64], align 8, addrspace(5)
+ %alloca307 = alloca i64, align 8, addrspace(5)
+ store [2 x i64] zeroinitializer, ptr addrspace(5) %alloca304, align 8
+ store i64 900, ptr addrspace(5) %alloca307, align 8
+ store i64 %arg, ptr addrspace(5) %alloca307, align 8
+ store i64 %arg1, ptr addrspace(5) %alloca307, align 8
+ store i64 %arg2, ptr addrspace(5) %alloca307, align 8
+ store i64 %arg3, ptr addrspace(5) %alloca307, align 8
+ store i64 %arg4, ptr addrspace(5) %alloca307, align 8
+ store i64 %arg5, ptr addrspace(5) %alloca307, align 8
+ store i64 %arg6, ptr addrspace(5) %alloca307, align 8
+ store i64 %arg7, ptr addrspace(5) %alloca307, align 8
+ store i64 %arg8, ptr addrspace(5) %alloca307, align 8
+ store i64 %arg9, ptr addrspace(5) %alloca307, align 8
+ store i64 %arg10, ptr addrspace(5) %alloca307, align 8
+ store i64 %arg11, ptr addrspace(5) %alloca307, align 8
+ store i64 %arg12, ptr addrspace(5) %alloca307, align 8
+ store i64 %arg13, ptr addrspace(5) %alloca307, align 8
+ store i64 %arg14, ptr addrspace(5) %alloca307, align 8
+ store i64 %arg15, ptr addrspace(5) %alloca307, align 8
+ store i64 %arg16, ptr addrspace(5) %alloca307, align 8
+ store i64 %arg17, ptr addrspace(5) %alloca307, align 8
+ store i64 %arg18, ptr addrspace(5) %alloca307, align 8
+ store i64 %arg19, ptr addrspace(5) %alloca307, align 8
+ store i64 %arg20, ptr addrspace(5) %alloca307, align 8
+ store i64 %arg21, ptr addrspace(5) %alloca307, align 8
+ store i64 %arg22, ptr addrspace(5) %alloca307, align 8
+ store i64 %arg23, ptr addrspace(5) %alloca307, align 8
+ store i64 %arg24, ptr addrspace(5) %alloca307, align 8
+ store i64 %arg25, ptr addrspace(5) %alloca307, align 8
+ store i64 %arg26, ptr addrspace(5) %alloca307, align 8
+ store i64 %arg27, ptr addrspace(5) %alloca307, align 8
+ store i64 %arg28, ptr addrspace(5) %alloca307, align 8
+ store i64 %arg29, ptr addrspace(5) %alloca307, align 8
+ store i64 %arg30, ptr addrspace(5) %alloca307, align 8
+ store i64 %arg31, ptr addrspace(5) %alloca307, align 8
+ store i64 %arg32, ptr addrspace(5) %alloca307, align 8
+ store i64 %arg33, ptr addrspace(5) %alloca307, align 8
+ store i64 %arg34, ptr addrspace(5) %alloca307, align 8
+ store i64 %arg35, ptr addrspace(5) %alloca307, align 8
+ store i64 %arg36, ptr addrspace(5) %alloca307, align 8
+ store i64 %arg37, ptr addrspace(5) %alloca307, align 8
+ store i64 %arg38, ptr addrspace(5) %alloca307, align 8
+ store i64 %arg39, ptr addrspace(5) %alloca307, align 8
+ store i64 %arg40, ptr addrspace(5) %alloca307, align 8
+ store i64 %arg41, ptr addrspace(5) %alloca307, align 8
+ store i64 %arg42, ptr addrspace(5) %alloca307, align 8
+ store i64 %arg43, ptr addrspace(5) %alloca307, align 8
+ store i64 %arg44, ptr addrspace(5) %alloca307, align 8
+ store i64 %arg45, ptr addrspace(5) %alloca307, align 8
+ store i64 %arg46, ptr addrspace(5) %alloca307, align 8
+ store i64 %arg47, ptr addrspace(5) %alloca307, align 8
+ store i64 %arg48, ptr addrspace(5) %alloca307, align 8
+ store i64 %arg49, ptr addrspace(5) %alloca307, align 8
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/local-stack-alloc-add-references.gfx10.mir b/llvm/test/CodeGen/AMDGPU/local-stack-alloc-add-references.gfx10.mir
index 8ea9ec397fe06..3be6456213168 100644
--- a/llvm/test/CodeGen/AMDGPU/local-stack-alloc-add-references.gfx10.mir
+++ b/llvm/test/CodeGen/AMDGPU/local-stack-alloc-add-references.gfx10.mir
@@ -49,15 +49,15 @@ machineFunctionInfo:
body: |
bb.0:
; GFX10-LABEL: name: local_stack_alloc__v_add_u32_e64__literal_offsets_commute
- ; GFX10: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 256
+ ; GFX10: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 100
; GFX10-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec
; GFX10-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 killed [[S_MOV_B32_]], [[V_MOV_B32_e32_]], 0, implicit $exec
- ; GFX10-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY [[V_ADD_U32_e64_]]
- ; GFX10-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 1245193 /* reguse:VGPR_32 */, [[COPY]]
- ; GFX10-NEXT: [[V_ADD_U32_e64_1:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 256, [[V_ADD_U32_e64_]], 0, implicit $exec
+ ; GFX10-NEXT: [[V_ADD_U32_e64_1:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 156, [[V_ADD_U32_e64_]], 0, implicit $exec
; GFX10-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 1245193 /* reguse:VGPR_32 */, [[V_ADD_U32_e64_1]]
- ; GFX10-NEXT: [[V_ADD_U32_e64_2:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[V_ADD_U32_e64_]], -156, 0, implicit $exec
+ ; GFX10-NEXT: [[V_ADD_U32_e64_2:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 412, [[V_ADD_U32_e64_]], 0, implicit $exec
; GFX10-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 1245193 /* reguse:VGPR_32 */, [[V_ADD_U32_e64_2]]
+ ; GFX10-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY [[V_ADD_U32_e64_]]
+ ; GFX10-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 1245193 /* reguse:VGPR_32 */, [[COPY]]
; GFX10-NEXT: SI_RETURN
;
; GFX12-LABEL: name: local_stack_alloc__v_add_u32_e64__literal_offsets_commute
diff --git a/llvm/test/CodeGen/AMDGPU/local-stack-alloc-block-sp-reference.ll b/llvm/test/CodeGen/AMDGPU/local-stack-alloc-block-sp-reference.ll
index 5f0ca7bc42ae0..3d02d70d2fdbb 100644
--- a/llvm/test/CodeGen/AMDGPU/local-stack-alloc-block-sp-reference.ll
+++ b/llvm/test/CodeGen/AMDGPU/local-stack-alloc-block-sp-reference.ll
@@ -294,12 +294,13 @@ define amdgpu_kernel void @local_stack_offset_uses_sp_flat(ptr addrspace(1) %out
; FLATSCR-NEXT: s_add_u32 flat_scratch_lo, s8, s13
; FLATSCR-NEXT: s_addc_u32 flat_scratch_hi, s9, 0
; FLATSCR-NEXT: v_mov_b32_e32 v0, 0
-; FLATSCR-NEXT: s_mov_b32 s0, 0
-; FLATSCR-NEXT: scratch_store_dword off, v0, s0 offset:1024
+; FLATSCR-NEXT: s_movk_i32 s0, 0x2000
+; FLATSCR-NEXT: scratch_store_dword off, v0, s0
; FLATSCR-NEXT: s_waitcnt vmcnt(0)
+; FLATSCR-NEXT: s_mov_b32 s0, 0
; FLATSCR-NEXT: .LBB2_1: ; %loadstoreloop
; FLATSCR-NEXT: ; =>This Inner Loop Header: Depth=1
-; FLATSCR-NEXT: s_add_i32 s1, s0, 0x2000
+; FLATSCR-NEXT: s_add_i32 s1, s0, 0x4000
; FLATSCR-NEXT: s_add_i32 s0, s0, 1
; FLATSCR-NEXT: s_cmpk_lt_u32 s0, 0x2120
; FLATSCR-NEXT: scratch_store_byte off, v0, s1
@@ -307,12 +308,12 @@ define amdgpu_kernel void @local_stack_offset_uses_sp_flat(ptr addrspace(1) %out
; FLATSCR-NEXT: s_cbranch_scc1 .LBB2_1
; FLATSCR-NEXT: ; %bb.2: ; %split
; FLATSCR-NEXT: s_movk_i32 s0, 0x1000
-; FLATSCR-NEXT: s_addk_i32 s0, 0x2000
+; FLATSCR-NEXT: s_addk_i32 s0, 0x4000
; FLATSCR-NEXT: scratch_load_dwordx2 v[8:9], off, s0 offset:720 glc
; FLATSCR-NEXT: s_waitcnt vmcnt(0)
; FLATSCR-NEXT: scratch_load_dwordx4 v[0:3], off, s0 offset:704 glc
; FLATSCR-NEXT: s_waitcnt vmcnt(0)
-; FLATSCR-NEXT: s_movk_i32 s0, 0x2000
+; FLATSCR-NEXT: s_movk_i32 s0, 0x4000
; FLATSCR-NEXT: scratch_load_dwordx2 v[10:11], off, s0 offset:16 glc
; FLATSCR-NEXT: s_waitcnt vmcnt(0)
; FLATSCR-NEXT: scratch_load_dwordx4 v[4:7], off, s0 glc
diff --git a/llvm/test/CodeGen/Thumb/frame-chain.ll b/llvm/test/CodeGen/Thumb/frame-chain.ll
index a680f2fa4a481..26d214da95a13 100644
--- a/llvm/test/CodeGen/Thumb/frame-chain.ll
+++ b/llvm/test/CodeGen/Thumb/frame-chain.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
; RUN: llc -mtriple thumbv6m-arm-none-eabi -filetype asm -o - %s -frame-pointer=all --verify-machineinstrs | FileCheck %s --check-prefixes=FP,LEAF-FP
; RUN: llc -mtriple thumbv6m-arm-none-eabi -filetype asm -o - %s -frame-pointer=all -mattr=+aapcs-frame-chain --verify-machineinstrs | FileCheck %s --check-prefixes=FP-AAPCS,LEAF-FP-AAPCS
; RUN: llc -mtriple thumbv6m-arm-none-eabi -filetype asm -o - %s -frame-pointer=non-leaf --verify-machineinstrs | FileCheck %s --check-prefixes=FP,LEAF-NOFP
@@ -8,16 +9,16 @@
define dso_local noundef i32 @leaf(i32 noundef %0) {
; LEAF-FP-LABEL: leaf:
; LEAF-FP: @ %bb.0:
-; LEAF-FP-NEXT: .save {r7, lr}
-; LEAF-FP-NEXT: push {r7, lr}
-; LEAF-FP-NEXT: .setfp r7, sp
-; LEAF-FP-NEXT: add r7, sp, #0
-; LEAF-FP-NEXT: .pad #4
-; LEAF-FP-NEXT: sub sp, #4
-; LEAF-FP-NEXT: str r0, [sp]
-; LEAF-FP-NEXT: adds r0, r0, #4
-; LEAF-FP-NEXT: add sp, #4
-; LEAF-FP-NEXT: pop {r7, pc}
+; LEAF-FP-NEXT: .save {r7, lr}
+; LEAF-FP-NEXT: push {r7, lr}
+; LEAF-FP-NEXT: .setfp r7, sp
+; LEAF-FP-NEXT: add r7, sp, #0
+; LEAF-FP-NEXT: .pad #4
+; LEAF-FP-NEXT: sub sp, #4
+; LEAF-FP-NEXT: str r0, [sp]
+; LEAF-FP-NEXT: adds r0, r0, #4
+; LEAF-FP-NEXT: add sp, #4
+; LEAF-FP-NEXT: pop {r7, pc}
;
; LEAF-FP-AAPCS-LABEL: leaf:
; LEAF-FP-AAPCS: @ %bb.0:
@@ -54,6 +55,24 @@ define dso_local noundef i32 @leaf(i32 noundef %0) {
; LEAF-NOFP-AAPCS-NEXT: adds r0, r0, #4
; LEAF-NOFP-AAPCS-NEXT: add sp, #4
; LEAF-NOFP-AAPCS-NEXT: bx lr
+;
+; NOFP-LABEL: leaf:
+; NOFP: @ %bb.0:
+; NOFP-NEXT: .pad #4
+; NOFP-NEXT: sub sp, #4
+; NOFP-NEXT: str r0, [sp]
+; NOFP-NEXT: adds r0, r0, #4
+; NOFP-NEXT: add sp, #4
+; NOFP-NEXT: bx lr
+;
+; NOFP-AAPCS-LABEL: leaf:
+; NOFP-AAPCS: @ %bb.0:
+; NOFP-AAPCS-NEXT: .pad #4
+; NOFP-AAPCS-NEXT: sub sp, #4
+; NOFP-AAPCS-NEXT: str r0, [sp]
+; NOFP-AAPCS-NEXT: adds r0, r0, #4
+; NOFP-AAPCS-NEXT: add sp, #4
+; NOFP-AAPCS-NEXT: bx lr
%2 = alloca i32, align 4
store i32 %0, ptr %2, align 4
%3 = load i32, ptr %2, align 4
@@ -150,8 +169,7 @@ define dso_local void @required_fp(i32 %0, i32 %1) {
; FP-NEXT: subs r1, r3, r1
; FP-NEXT: mov sp, r1
; FP-NEXT: movs r1, #0
-; FP-NEXT: str r1, [r6, #4]
-; FP-NEXT: str r0, [r2]
+; FP-NEXT: stm r2!, {r0, r1}
; FP-NEXT: subs r6, r7, #7
; FP-NEXT: subs r6, #1
; FP-NEXT: mov sp, r6
@@ -184,8 +202,7 @@ define dso_local void @required_fp(i32 %0, i32 %1) {
; FP-AAPCS-NEXT: subs r1, r3, r1
; FP-AAPCS-NEXT: mov sp, r1
; FP-AAPCS-NEXT: movs r1, #0
-; FP-AAPCS-NEXT: str r1, [r6, #4]
-; FP-AAPCS-NEXT: str r0, [r2]
+; FP-AAPCS-NEXT: stm r2!, {r0, r1}
; FP-AAPCS-NEXT: mov r6, r11
; FP-AAPCS-NEXT: subs r6, #8
; FP-AAPCS-NEXT: mov sp, r6
@@ -216,8 +233,7 @@ define dso_local void @required_fp(i32 %0, i32 %1) {
; NOFP-NEXT: subs r1, r3, r1
; NOFP-NEXT: mov sp, r1
; NOFP-NEXT: movs r1, #0
-; NOFP-NEXT: str r1, [r6, #4]
-; NOFP-NEXT: str r0, [r2]
+; NOFP-NEXT: stm r2!, {r0, r1}
; NOFP-NEXT: subs r6, r7, #7
; NOFP-NEXT: subs r6, #1
; NOFP-NEXT: mov sp, r6
@@ -250,8 +266,7 @@ define dso_local void @required_fp(i32 %0, i32 %1) {
; NOFP-AAPCS-NEXT: subs r1, r3, r1
; NOFP-AAPCS-NEXT: mov sp, r1
; NOFP-AAPCS-NEXT: movs r1, #0
-; NOFP-AAPCS-NEXT: str r1, [r6, #4]
-; NOFP-AAPCS-NEXT: str r0, [r2]
+; NOFP-AAPCS-NEXT: stm r2!, {r0, r1}
; NOFP-AAPCS-NEXT: mov r6, r11
; NOFP-AAPCS-NEXT: subs r6, #8
; NOFP-AAPCS-NEXT: mov sp, r6
More information about the llvm-commits
mailing list