[llvm] [AMDGPU] Fix SP calculations considering growing up stack for dynamic alloca (PR #119168)
via llvm-commits
llvm-commits at lists.llvm.org
Tue Dec 10 22:21:20 PST 2024
https://github.com/easyonaadit updated https://github.com/llvm/llvm-project/pull/119168
>From 19e4f79a094b1a7ce44970a506287d2137205801 Mon Sep 17 00:00:00 2001
From: easyonaadit <aaditya.alokdeshpande at amd.com>
Date: Sat, 7 Dec 2024 16:27:59 +0530
Subject: [PATCH 1/5] changes to old code to refactor them, alignment, zero
sized allocas, returning correct start location
---
llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 35 +++++++----
llvm/test/CodeGen/AMDGPU/non-entry-alloca.ll | 64 +++++++++-----------
2 files changed, 52 insertions(+), 47 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index fc8bbb154d035d..92f8a8f8099967 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -4023,10 +4023,25 @@ SDValue SITargetLowering::lowerDYNAMIC_STACKALLOCImpl(SDValue Op,
Chain = DAG.getCALLSEQ_START(Chain, 0, 0, dl);
SDValue Size = Tmp2.getOperand(1);
- SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
- Chain = SP.getValue(1);
+ // Start address of the dynamically sized stack object
+ SDValue SPOld = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
+ Chain = SPOld.getValue(1);
MaybeAlign Alignment = cast<ConstantSDNode>(Tmp3)->getMaybeAlignValue();
const TargetFrameLowering *TFL = Subtarget->getFrameLowering();
+ // First we need to align the start address of the stack object to the required alignment.
+ Align StackAlign = TFL->getStackAlign();
+ if (Alignment && *Alignment > StackAlign) {
+ // formula for aligning address `SPold` to alignment boundry `align` => alignedSP = (SPold + (align - 1)) & ~(align - 1)
+ SDValue AlignedValue = DAG.getConstant(Alignment->value(), dl, VT); // the alignment boundry we want to align to
+ SDValue StackAlignMask = DAG.getNode(ISD::SUB, dl, VT, AlignedValue, // StackAlignMask = (align - 1)
+ DAG.getConstant(1, dl, VT));
+ Tmp1 = DAG.getNode(ISD::ADD, dl, VT, SPOld, StackAlignMask); // Tmp1 = (SPold + (align - 1))
+ Tmp1 = DAG.getNode( // Tmp1 now holds the start address aligned to the required value
+ ISD::AND, dl, VT, Tmp1,
+ DAG.getSignedConstant(-(uint64_t)Alignment->value()
+ << Subtarget->getWavefrontSizeLog2(),
+ dl, VT));
+ }
unsigned Opc =
TFL->getStackGrowthDirection() == TargetFrameLowering::StackGrowsUp
? ISD::ADD
@@ -4035,19 +4050,17 @@ SDValue SITargetLowering::lowerDYNAMIC_STACKALLOCImpl(SDValue Op,
SDValue ScaledSize = DAG.getNode(
ISD::SHL, dl, VT, Size,
DAG.getConstant(Subtarget->getWavefrontSizeLog2(), dl, MVT::i32));
+ // incase the value in %n at runtime is 0, we need to handle that case. There should not be a 0 sized stack object.
+ ScaledSize = DAG.getNode( // size = max(size, 0)
+ ISD::UMAX, dl, VT, ScaledSize,
+ DAG.getConstant(1, dl, VT));
- Align StackAlign = TFL->getStackAlign();
- Tmp1 = DAG.getNode(Opc, dl, VT, SP, ScaledSize); // Value
- if (Alignment && *Alignment > StackAlign) {
- Tmp1 = DAG.getNode(
- ISD::AND, dl, VT, Tmp1,
- DAG.getSignedConstant(-(uint64_t)Alignment->value()
- << Subtarget->getWavefrontSizeLog2(),
- dl, VT));
- }
+ Tmp1 = DAG.getNode(Opc, dl, VT, SPOld, ScaledSize); // Value
Chain = DAG.getCopyToReg(Chain, dl, SPReg, Tmp1); // Output chain
Tmp2 = DAG.getCALLSEQ_END(Chain, 0, 0, SDValue(), dl);
+ // Set Tmp1 to point to the start address of this stack object.
+ Tmp1 = SPOld;
return DAG.getMergeValues({Tmp1, Tmp2}, dl);
}
diff --git a/llvm/test/CodeGen/AMDGPU/non-entry-alloca.ll b/llvm/test/CodeGen/AMDGPU/non-entry-alloca.ll
index 85096eb63f46e1..0477d55e9baa36 100644
--- a/llvm/test/CodeGen/AMDGPU/non-entry-alloca.ll
+++ b/llvm/test/CodeGen/AMDGPU/non-entry-alloca.ll
@@ -30,15 +30,14 @@ define amdgpu_kernel void @kernel_non_entry_block_static_alloca_uniformly_reache
; MUBUF-NEXT: s_cmp_lg_u32 s9, 0
; MUBUF-NEXT: s_cbranch_scc1 .LBB0_3
; MUBUF-NEXT: ; %bb.2: ; %bb.1
-; MUBUF-NEXT: s_add_i32 s6, s32, 0x1000
-; MUBUF-NEXT: s_lshl_b32 s7, s10, 2
-; MUBUF-NEXT: s_mov_b32 s32, s6
+; MUBUF-NEXT: s_mov_b32 s6, s32
; MUBUF-NEXT: v_mov_b32_e32 v1, 0
-; MUBUF-NEXT: v_mov_b32_e32 v2, s6
-; MUBUF-NEXT: v_mov_b32_e32 v3, 1
+; MUBUF-NEXT: v_mov_b32_e32 v2, 1
+; MUBUF-NEXT: s_lshl_b32 s7, s10, 2
+; MUBUF-NEXT: s_add_i32 s32, s6, 0x1000
+; MUBUF-NEXT: buffer_store_dword v1, off, s[0:3], s6
+; MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], s6 offset:4
; MUBUF-NEXT: s_add_i32 s6, s6, s7
-; MUBUF-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
-; MUBUF-NEXT: buffer_store_dword v3, v2, s[0:3], 0 offen offset:4
; MUBUF-NEXT: v_mov_b32_e32 v2, s6
; MUBUF-NEXT: buffer_load_dword v2, v2, s[0:3], 0 offen
; MUBUF-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x0
@@ -66,11 +65,11 @@ define amdgpu_kernel void @kernel_non_entry_block_static_alloca_uniformly_reache
; FLATSCR-NEXT: s_cmp_lg_u32 s5, 0
; FLATSCR-NEXT: s_cbranch_scc1 .LBB0_3
; FLATSCR-NEXT: ; %bb.2: ; %bb.1
-; FLATSCR-NEXT: s_add_i32 s2, s32, 0x1000
+; FLATSCR-NEXT: s_mov_b32 s2, s32
; FLATSCR-NEXT: v_mov_b32_e32 v1, 0
; FLATSCR-NEXT: v_mov_b32_e32 v2, 1
; FLATSCR-NEXT: s_lshl_b32 s3, s6, 2
-; FLATSCR-NEXT: s_mov_b32 s32, s2
+; FLATSCR-NEXT: s_add_i32 s32, s2, 0x1000
; FLATSCR-NEXT: scratch_store_dwordx2 off, v[1:2], s2
; FLATSCR-NEXT: s_add_i32 s2, s2, s3
; FLATSCR-NEXT: scratch_load_dword v2, off, s2
@@ -131,16 +130,14 @@ define amdgpu_kernel void @kernel_non_entry_block_static_alloca_uniformly_reache
; MUBUF-NEXT: s_cmp_lg_u32 s4, 0
; MUBUF-NEXT: s_cbranch_scc1 .LBB1_2
; MUBUF-NEXT: ; %bb.1: ; %bb.0
-; MUBUF-NEXT: s_add_i32 s4, s32, 0x1000
-; MUBUF-NEXT: s_and_b32 s4, s4, 0xfffff000
-; MUBUF-NEXT: s_lshl_b32 s5, s5, 2
-; MUBUF-NEXT: s_mov_b32 s32, s4
+; MUBUF-NEXT: s_mov_b32 s4, s32
; MUBUF-NEXT: v_mov_b32_e32 v1, 0
-; MUBUF-NEXT: v_mov_b32_e32 v2, s4
-; MUBUF-NEXT: v_mov_b32_e32 v3, 1
+; MUBUF-NEXT: v_mov_b32_e32 v2, 1
+; MUBUF-NEXT: s_lshl_b32 s5, s5, 2
+; MUBUF-NEXT: s_add_i32 s32, s4, 0x1000
+; MUBUF-NEXT: buffer_store_dword v1, off, s[0:3], s4
+; MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], s4 offset:4
; MUBUF-NEXT: s_add_i32 s4, s4, s5
-; MUBUF-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
-; MUBUF-NEXT: buffer_store_dword v3, v2, s[0:3], 0 offen offset:4
; MUBUF-NEXT: v_mov_b32_e32 v2, s4
; MUBUF-NEXT: buffer_load_dword v2, v2, s[0:3], 0 offen
; MUBUF-NEXT: s_load_dwordx2 s[4:5], s[8:9], 0x0
@@ -165,12 +162,11 @@ define amdgpu_kernel void @kernel_non_entry_block_static_alloca_uniformly_reache
; FLATSCR-NEXT: s_cmp_lg_u32 s0, 0
; FLATSCR-NEXT: s_cbranch_scc1 .LBB1_2
; FLATSCR-NEXT: ; %bb.1: ; %bb.0
-; FLATSCR-NEXT: s_add_i32 s0, s32, 0x1000
; FLATSCR-NEXT: v_mov_b32_e32 v1, 0
-; FLATSCR-NEXT: s_and_b32 s0, s0, 0xfffff000
+; FLATSCR-NEXT: s_mov_b32 s0, s32
; FLATSCR-NEXT: v_mov_b32_e32 v2, 1
; FLATSCR-NEXT: s_lshl_b32 s1, s1, 2
-; FLATSCR-NEXT: s_mov_b32 s32, s0
+; FLATSCR-NEXT: s_add_i32 s32, s0, 0x1000
; FLATSCR-NEXT: scratch_store_dwordx2 off, v[1:2], s0
; FLATSCR-NEXT: s_add_i32 s0, s0, s1
; FLATSCR-NEXT: scratch_load_dword v2, off, s0
@@ -230,16 +226,15 @@ define void @func_non_entry_block_static_alloca_align4(ptr addrspace(1) %out, i3
; MUBUF-NEXT: s_and_b64 exec, exec, vcc
; MUBUF-NEXT: s_cbranch_execz .LBB2_3
; MUBUF-NEXT: ; %bb.2: ; %bb.1
-; MUBUF-NEXT: s_add_i32 s6, s32, 0x1000
+; MUBUF-NEXT: s_mov_b32 s6, s32
; MUBUF-NEXT: v_mov_b32_e32 v2, 0
-; MUBUF-NEXT: v_mov_b32_e32 v3, s6
-; MUBUF-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen
+; MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], s6
; MUBUF-NEXT: v_mov_b32_e32 v2, 1
-; MUBUF-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen offset:4
+; MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], s6 offset:4
; MUBUF-NEXT: v_lshl_add_u32 v2, v4, 2, s6
; MUBUF-NEXT: buffer_load_dword v2, v2, s[0:3], 0 offen
; MUBUF-NEXT: v_and_b32_e32 v3, 0x3ff, v31
-; MUBUF-NEXT: s_mov_b32 s32, s6
+; MUBUF-NEXT: s_add_i32 s32, s6, 0x1000
; MUBUF-NEXT: s_waitcnt vmcnt(0)
; MUBUF-NEXT: v_add_u32_e32 v2, v2, v3
; MUBUF-NEXT: global_store_dword v[0:1], v2, off
@@ -266,14 +261,14 @@ define void @func_non_entry_block_static_alloca_align4(ptr addrspace(1) %out, i3
; FLATSCR-NEXT: s_and_b64 exec, exec, vcc
; FLATSCR-NEXT: s_cbranch_execz .LBB2_3
; FLATSCR-NEXT: ; %bb.2: ; %bb.1
-; FLATSCR-NEXT: s_add_i32 s2, s32, 0x1000
+; FLATSCR-NEXT: s_mov_b32 s2, s32
; FLATSCR-NEXT: v_mov_b32_e32 v2, 0
; FLATSCR-NEXT: v_mov_b32_e32 v3, 1
; FLATSCR-NEXT: scratch_store_dwordx2 off, v[2:3], s2
; FLATSCR-NEXT: v_lshl_add_u32 v2, v4, 2, s2
; FLATSCR-NEXT: scratch_load_dword v2, v2, off
; FLATSCR-NEXT: v_and_b32_e32 v3, 0x3ff, v31
-; FLATSCR-NEXT: s_mov_b32 s32, s2
+; FLATSCR-NEXT: s_add_i32 s32, s2, 0x1000
; FLATSCR-NEXT: s_waitcnt vmcnt(0)
; FLATSCR-NEXT: v_add_u32_e32 v2, v2, v3
; FLATSCR-NEXT: global_store_dword v[0:1], v2, off
@@ -324,17 +319,15 @@ define void @func_non_entry_block_static_alloca_align64(ptr addrspace(1) %out, i
; MUBUF-NEXT: s_and_saveexec_b64 s[4:5], vcc
; MUBUF-NEXT: s_cbranch_execz .LBB3_2
; MUBUF-NEXT: ; %bb.1: ; %bb.0
-; MUBUF-NEXT: s_add_i32 s6, s32, 0x1000
-; MUBUF-NEXT: s_and_b32 s6, s6, 0xfffff000
+; MUBUF-NEXT: s_mov_b32 s6, s32
; MUBUF-NEXT: v_mov_b32_e32 v2, 0
-; MUBUF-NEXT: v_mov_b32_e32 v4, s6
-; MUBUF-NEXT: buffer_store_dword v2, v4, s[0:3], 0 offen
+; MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], s6
; MUBUF-NEXT: v_mov_b32_e32 v2, 1
-; MUBUF-NEXT: buffer_store_dword v2, v4, s[0:3], 0 offen offset:4
+; MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], s6 offset:4
; MUBUF-NEXT: v_lshl_add_u32 v2, v3, 2, s6
; MUBUF-NEXT: buffer_load_dword v2, v2, s[0:3], 0 offen
; MUBUF-NEXT: v_and_b32_e32 v3, 0x3ff, v31
-; MUBUF-NEXT: s_mov_b32 s32, s6
+; MUBUF-NEXT: s_add_i32 s32, s6, 0x1000
; MUBUF-NEXT: s_waitcnt vmcnt(0)
; MUBUF-NEXT: v_add_u32_e32 v2, v2, v3
; MUBUF-NEXT: global_store_dword v[0:1], v2, off
@@ -358,15 +351,14 @@ define void @func_non_entry_block_static_alloca_align64(ptr addrspace(1) %out, i
; FLATSCR-NEXT: s_and_saveexec_b64 s[0:1], vcc
; FLATSCR-NEXT: s_cbranch_execz .LBB3_2
; FLATSCR-NEXT: ; %bb.1: ; %bb.0
-; FLATSCR-NEXT: s_add_i32 s2, s32, 0x1000
-; FLATSCR-NEXT: s_and_b32 s2, s2, 0xfffff000
+; FLATSCR-NEXT: s_mov_b32 s2, s32
; FLATSCR-NEXT: v_mov_b32_e32 v4, 0
; FLATSCR-NEXT: v_mov_b32_e32 v5, 1
; FLATSCR-NEXT: scratch_store_dwordx2 off, v[4:5], s2
; FLATSCR-NEXT: v_lshl_add_u32 v2, v3, 2, s2
; FLATSCR-NEXT: scratch_load_dword v2, v2, off
; FLATSCR-NEXT: v_and_b32_e32 v3, 0x3ff, v31
-; FLATSCR-NEXT: s_mov_b32 s32, s2
+; FLATSCR-NEXT: s_add_i32 s32, s2, 0x1000
; FLATSCR-NEXT: s_waitcnt vmcnt(0)
; FLATSCR-NEXT: v_add_u32_e32 v2, v2, v3
; FLATSCR-NEXT: global_store_dword v[0:1], v2, off
>From 969399068837617e642ff954f7b7c7900ba5e5d9 Mon Sep 17 00:00:00 2001
From: easyonaadit <aaditya.alokdeshpande at amd.com>
Date: Mon, 9 Dec 2024 13:54:33 +0530
Subject: [PATCH 2/5] updated code
---
llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 23 +++++++++++------------
1 file changed, 11 insertions(+), 12 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 92f8a8f8099967..8e490c40e5a175 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -4002,8 +4002,9 @@ SDValue SITargetLowering::LowerCall(CallLoweringInfo &CLI,
InVals, /*IsThisReturn=*/false, SDValue());
}
-// This is identical to the default implementation in ExpandDYNAMIC_STACKALLOC,
-// except for applying the wave size scale to the increment amount.
+// This is similar to the default implementation in ExpandDYNAMIC_STACKALLOC,
+// except for considering a growing up stack and applying the wave size scale
+// to the increment amount.
SDValue SITargetLowering::lowerDYNAMIC_STACKALLOCImpl(SDValue Op,
SelectionDAG &DAG) const {
const MachineFunction &MF = DAG.getMachineFunction();
@@ -4032,15 +4033,15 @@ SDValue SITargetLowering::lowerDYNAMIC_STACKALLOCImpl(SDValue Op,
Align StackAlign = TFL->getStackAlign();
if (Alignment && *Alignment > StackAlign) {
// formula for aligning address `SPold` to alignment boundry `align` => alignedSP = (SPold + (align - 1)) & ~(align - 1)
- SDValue AlignedValue = DAG.getConstant(Alignment->value(), dl, VT); // the alignment boundry we want to align to
- SDValue StackAlignMask = DAG.getNode(ISD::SUB, dl, VT, AlignedValue, // StackAlignMask = (align - 1)
+ SDValue ScaledAlignment = DAG.getSignedConstant((uint64_t)Alignment->value()
+ << Subtarget->getWavefrontSizeLog2(),
+ dl, VT);
+ // SDValue AlignedValue = DAG.getConstant(Alignment->value(), dl, VT); // the alignment boundry we want to align to
+ SDValue StackAlignMask = DAG.getNode(ISD::SUB, dl, VT, ScaledAlignment, // StackAlignMask = (align - 1)
DAG.getConstant(1, dl, VT));
Tmp1 = DAG.getNode(ISD::ADD, dl, VT, SPOld, StackAlignMask); // Tmp1 = (SPold + (align - 1))
Tmp1 = DAG.getNode( // Tmp1 now holds the start address aligned to the required value
- ISD::AND, dl, VT, Tmp1,
- DAG.getSignedConstant(-(uint64_t)Alignment->value()
- << Subtarget->getWavefrontSizeLog2(),
- dl, VT));
+ ISD::AND, dl, VT, Tmp1, ScaledAlignment);
}
unsigned Opc =
TFL->getStackGrowthDirection() == TargetFrameLowering::StackGrowsUp
@@ -4051,7 +4052,7 @@ SDValue SITargetLowering::lowerDYNAMIC_STACKALLOCImpl(SDValue Op,
ISD::SHL, dl, VT, Size,
DAG.getConstant(Subtarget->getWavefrontSizeLog2(), dl, MVT::i32));
// incase the value in %n at runtime is 0, we need to handle that case. There should not be a 0 sized stack object.
- ScaledSize = DAG.getNode( // size = max(size, 0)
+ ScaledSize = DAG.getNode( // size = max(size, 1)
ISD::UMAX, dl, VT, ScaledSize,
DAG.getConstant(1, dl, VT));
@@ -4059,10 +4060,8 @@ SDValue SITargetLowering::lowerDYNAMIC_STACKALLOCImpl(SDValue Op,
Chain = DAG.getCopyToReg(Chain, dl, SPReg, Tmp1); // Output chain
Tmp2 = DAG.getCALLSEQ_END(Chain, 0, 0, SDValue(), dl);
- // Set Tmp1 to point to the start address of this stack object.
- Tmp1 = SPOld;
- return DAG.getMergeValues({Tmp1, Tmp2}, dl);
+ return DAG.getMergeValues({SPOld, Tmp2}, dl); // return start address of the stack object
}
SDValue SITargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
>From 8704a1953cbe930683f45ba12e3567016993b95c Mon Sep 17 00:00:00 2001
From: easyonaadit <aaditya.alokdeshpande at amd.com>
Date: Wed, 11 Dec 2024 10:56:39 +0530
Subject: [PATCH 3/5] temp commit
---
llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 20 ++++++--------------
1 file changed, 6 insertions(+), 14 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 8e490c40e5a175..090c702d2c79de 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -4003,8 +4003,8 @@ SDValue SITargetLowering::LowerCall(CallLoweringInfo &CLI,
}
// This is similar to the default implementation in ExpandDYNAMIC_STACKALLOC,
-// except for considering a growing up stack and applying the wave size scale
-// to the increment amount.
+// except for stack growth direction(default: downwards, AMDGPU: upwards) and
+// applying the wave size scale to the increment amount.
SDValue SITargetLowering::lowerDYNAMIC_STACKALLOCImpl(SDValue Op,
SelectionDAG &DAG) const {
const MachineFunction &MF = DAG.getMachineFunction();
@@ -4024,23 +4024,19 @@ SDValue SITargetLowering::lowerDYNAMIC_STACKALLOCImpl(SDValue Op,
Chain = DAG.getCALLSEQ_START(Chain, 0, 0, dl);
SDValue Size = Tmp2.getOperand(1);
- // Start address of the dynamically sized stack object
SDValue SPOld = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
Chain = SPOld.getValue(1);
MaybeAlign Alignment = cast<ConstantSDNode>(Tmp3)->getMaybeAlignValue();
const TargetFrameLowering *TFL = Subtarget->getFrameLowering();
- // First we need to align the start address of the stack object to the required alignment.
Align StackAlign = TFL->getStackAlign();
if (Alignment && *Alignment > StackAlign) {
- // formula for aligning address `SPold` to alignment boundry `align` => alignedSP = (SPold + (align - 1)) & ~(align - 1)
SDValue ScaledAlignment = DAG.getSignedConstant((uint64_t)Alignment->value()
<< Subtarget->getWavefrontSizeLog2(),
dl, VT);
- // SDValue AlignedValue = DAG.getConstant(Alignment->value(), dl, VT); // the alignment boundry we want to align to
- SDValue StackAlignMask = DAG.getNode(ISD::SUB, dl, VT, ScaledAlignment, // StackAlignMask = (align - 1)
+ SDValue StackAlignMask = DAG.getNode(ISD::SUB, dl, VT, ScaledAlignment,
DAG.getConstant(1, dl, VT));
- Tmp1 = DAG.getNode(ISD::ADD, dl, VT, SPOld, StackAlignMask); // Tmp1 = (SPold + (align - 1))
- Tmp1 = DAG.getNode( // Tmp1 now holds the start address aligned to the required value
+ Tmp1 = DAG.getNode(ISD::ADD, dl, VT, SPOld, StackAlignMask);
+ Tmp1 = DAG.getNode(
ISD::AND, dl, VT, Tmp1, ScaledAlignment);
}
unsigned Opc =
@@ -4051,17 +4047,13 @@ SDValue SITargetLowering::lowerDYNAMIC_STACKALLOCImpl(SDValue Op,
SDValue ScaledSize = DAG.getNode(
ISD::SHL, dl, VT, Size,
DAG.getConstant(Subtarget->getWavefrontSizeLog2(), dl, MVT::i32));
- // incase the value in %n at runtime is 0, we need to handle that case. There should not be a 0 sized stack object.
- ScaledSize = DAG.getNode( // size = max(size, 1)
- ISD::UMAX, dl, VT, ScaledSize,
- DAG.getConstant(1, dl, VT));
Tmp1 = DAG.getNode(Opc, dl, VT, SPOld, ScaledSize); // Value
Chain = DAG.getCopyToReg(Chain, dl, SPReg, Tmp1); // Output chain
Tmp2 = DAG.getCALLSEQ_END(Chain, 0, 0, SDValue(), dl);
- return DAG.getMergeValues({SPOld, Tmp2}, dl); // return start address of the stack object
+ return DAG.getMergeValues({SPOld, Tmp2}, dl);
}
SDValue SITargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
>From f1d71ea231b4af6e85262ffbf43ad9b182dd9e07 Mon Sep 17 00:00:00 2001
From: easyonaadit <aaditya.alokdeshpande at amd.com>
Date: Wed, 11 Dec 2024 10:58:44 +0530
Subject: [PATCH 4/5] clang-format
---
llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 7 +++----
1 file changed, 3 insertions(+), 4 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 090c702d2c79de..4455488c055e70 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -4003,7 +4003,7 @@ SDValue SITargetLowering::LowerCall(CallLoweringInfo &CLI,
}
// This is similar to the default implementation in ExpandDYNAMIC_STACKALLOC,
-// except for stack growth direction(default: downwards, AMDGPU: upwards) and
+// except for stack growth direction(default: downwards, AMDGPU: upwards) and
// applying the wave size scale to the increment amount.
SDValue SITargetLowering::lowerDYNAMIC_STACKALLOCImpl(SDValue Op,
SelectionDAG &DAG) const {
@@ -4034,10 +4034,9 @@ SDValue SITargetLowering::lowerDYNAMIC_STACKALLOCImpl(SDValue Op,
<< Subtarget->getWavefrontSizeLog2(),
dl, VT);
SDValue StackAlignMask = DAG.getNode(ISD::SUB, dl, VT, ScaledAlignment,
- DAG.getConstant(1, dl, VT));
+ DAG.getConstant(1, dl, VT));
Tmp1 = DAG.getNode(ISD::ADD, dl, VT, SPOld, StackAlignMask);
- Tmp1 = DAG.getNode(
- ISD::AND, dl, VT, Tmp1, ScaledAlignment);
+ Tmp1 = DAG.getNode(ISD::AND, dl, VT, Tmp1, ScaledAlignment);
}
unsigned Opc =
TFL->getStackGrowthDirection() == TargetFrameLowering::StackGrowsUp
>From 91b502d689e7de90eda6f7c7c611a1d72f8d83e7 Mon Sep 17 00:00:00 2001
From: easyonaadit <aaditya.alokdeshpande at amd.com>
Date: Wed, 11 Dec 2024 11:50:21 +0530
Subject: [PATCH 5/5] temp commit
---
.../AMDGPU/non-entry-alloca-updated.ll | 406 ++++++++++++++++++
1 file changed, 406 insertions(+)
create mode 100644 llvm/test/CodeGen/AMDGPU/non-entry-alloca-updated.ll
diff --git a/llvm/test/CodeGen/AMDGPU/non-entry-alloca-updated.ll b/llvm/test/CodeGen/AMDGPU/non-entry-alloca-updated.ll
new file mode 100644
index 00000000000000..e2663445e43168
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/non-entry-alloca-updated.ll
@@ -0,0 +1,406 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/CODE_OBJECT_VERSION/400/g' %s | llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -verify-machineinstrs | FileCheck -check-prefixes=MUBUF,DEFAULTSIZE %s
+; RUN: sed 's/CODE_OBJECT_VERSION/500/g' %s | llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -verify-machineinstrs | FileCheck -check-prefixes=MUBUF,DEFAULTSIZE-V5 %s
+; RUN: sed 's/CODE_OBJECT_VERSION/600/g' %s | llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -verify-machineinstrs | FileCheck -check-prefixes=MUBUF,DEFAULTSIZE-V5 %s
+; RUN: sed 's/CODE_OBJECT_VERSION/400/g' %s | llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -verify-machineinstrs -amdgpu-assume-dynamic-stack-object-size=1024 | FileCheck -check-prefixes=MUBUF,ASSUME1024 %s
+; RUN: sed 's/CODE_OBJECT_VERSION/400/g' %s | llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -verify-machineinstrs -amdgpu-assume-dynamic-stack-object-size=1024 | FileCheck -check-prefixes=MUBUF,ASSUME1024 %s
+; RUN: sed 's/CODE_OBJECT_VERSION/400/g' %s | llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -verify-machineinstrs -mattr=+enable-flat-scratch | FileCheck -check-prefixes=FLATSCR,DEFAULTSIZE %s
+; RUN: sed 's/CODE_OBJECT_VERSION/400/g' %s | llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -verify-machineinstrs -mattr=+enable-flat-scratch -amdgpu-assume-dynamic-stack-object-size=1024 | FileCheck -check-prefixes=FLATSCR,ASSUME1024 %s
+
+; FIXME: Generated test checks do not check metadata at the end of the
+; function, so this also includes manually added checks.
+
+; Test that we can select a statically sized alloca outside of the
+; entry block.
+
+; FIXME: FunctionLoweringInfo unhelpfully doesn't preserve an
+; alignment less than the stack alignment.
+define amdgpu_kernel void @kernel_non_entry_block_static_alloca_uniformly_reached_align4(ptr addrspace(1) %out, i32 %arg.cond0, i32 %arg.cond1, i32 %in) #1 {
+; MUBUF-LABEL: kernel_non_entry_block_static_alloca_uniformly_reached_align4:
+; MUBUF: ; %bb.0: ; %entry
+; MUBUF-NEXT: s_add_u32 s0, s0, s9
+; MUBUF-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x8
+; MUBUF-NEXT: s_addc_u32 s1, s1, 0
+; MUBUF-NEXT: s_mov_b32 s33, 0
+; MUBUF-NEXT: s_movk_i32 s32, 0x400
+; MUBUF-NEXT: s_waitcnt lgkmcnt(0)
+; MUBUF-NEXT: s_cmp_lg_u32 s8, 0
+; MUBUF-NEXT: s_cbranch_scc1 .LBB0_3
+; MUBUF-NEXT: ; %bb.1: ; %bb.0
+; MUBUF-NEXT: s_cmp_lg_u32 s9, 0
+; MUBUF-NEXT: s_cbranch_scc1 .LBB0_3
+; MUBUF-NEXT: ; %bb.2: ; %bb.1
+; MUBUF-NEXT: s_mov_b32 s6, s32
+; MUBUF-NEXT: v_mov_b32_e32 v1, 0
+; MUBUF-NEXT: v_mov_b32_e32 v2, 1
+; MUBUF-NEXT: s_lshl_b32 s7, s10, 2
+; MUBUF-NEXT: s_add_i32 s32, s6, 0x1000
+; MUBUF-NEXT: buffer_store_dword v1, off, s[0:3], s6
+; MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], s6 offset:4
+; MUBUF-NEXT: s_add_i32 s6, s6, s7
+; MUBUF-NEXT: v_mov_b32_e32 v2, s6
+; MUBUF-NEXT: buffer_load_dword v2, v2, s[0:3], 0 offen
+; MUBUF-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x0
+; MUBUF-NEXT: s_waitcnt vmcnt(0)
+; MUBUF-NEXT: v_add_u32_e32 v0, v2, v0
+; MUBUF-NEXT: s_waitcnt lgkmcnt(0)
+; MUBUF-NEXT: global_store_dword v1, v0, s[4:5]
+; MUBUF-NEXT: .LBB0_3: ; %bb.2
+; MUBUF-NEXT: v_mov_b32_e32 v0, 0
+; MUBUF-NEXT: global_store_dword v[0:1], v0, off
+; MUBUF-NEXT: s_waitcnt vmcnt(0)
+; MUBUF-NEXT: s_endpgm
+;
+; FLATSCR-LABEL: kernel_non_entry_block_static_alloca_uniformly_reached_align4:
+; FLATSCR: ; %bb.0: ; %entry
+; FLATSCR-NEXT: s_add_u32 flat_scratch_lo, s2, s5
+; FLATSCR-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x8
+; FLATSCR-NEXT: s_addc_u32 flat_scratch_hi, s3, 0
+; FLATSCR-NEXT: s_mov_b32 s33, 0
+; FLATSCR-NEXT: s_mov_b32 s32, 16
+; FLATSCR-NEXT: s_waitcnt lgkmcnt(0)
+; FLATSCR-NEXT: s_cmp_lg_u32 s4, 0
+; FLATSCR-NEXT: s_cbranch_scc1 .LBB0_3
+; FLATSCR-NEXT: ; %bb.1: ; %bb.0
+; FLATSCR-NEXT: s_cmp_lg_u32 s5, 0
+; FLATSCR-NEXT: s_cbranch_scc1 .LBB0_3
+; FLATSCR-NEXT: ; %bb.2: ; %bb.1
+; FLATSCR-NEXT: s_mov_b32 s2, s32
+; FLATSCR-NEXT: v_mov_b32_e32 v1, 0
+; FLATSCR-NEXT: v_mov_b32_e32 v2, 1
+; FLATSCR-NEXT: s_lshl_b32 s3, s6, 2
+; FLATSCR-NEXT: s_add_i32 s32, s2, 0x1000
+; FLATSCR-NEXT: scratch_store_dwordx2 off, v[1:2], s2
+; FLATSCR-NEXT: s_add_i32 s2, s2, s3
+; FLATSCR-NEXT: scratch_load_dword v2, off, s2
+; FLATSCR-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0
+; FLATSCR-NEXT: s_waitcnt vmcnt(0)
+; FLATSCR-NEXT: v_add_u32_e32 v0, v2, v0
+; FLATSCR-NEXT: s_waitcnt lgkmcnt(0)
+; FLATSCR-NEXT: global_store_dword v1, v0, s[0:1]
+; FLATSCR-NEXT: .LBB0_3: ; %bb.2
+; FLATSCR-NEXT: v_mov_b32_e32 v0, 0
+; FLATSCR-NEXT: global_store_dword v[0:1], v0, off
+; FLATSCR-NEXT: s_waitcnt vmcnt(0)
+; FLATSCR-NEXT: s_endpgm
+
+entry:
+ %cond0 = icmp eq i32 %arg.cond0, 0
+ br i1 %cond0, label %bb.0, label %bb.2
+
+bb.0:
+ %alloca = alloca [16 x i32], align 4, addrspace(5)
+ %gep1 = getelementptr [16 x i32], ptr addrspace(5) %alloca, i32 0, i32 1
+ %cond1 = icmp eq i32 %arg.cond1, 0
+ br i1 %cond1, label %bb.1, label %bb.2
+
+bb.1:
+ ; Use the alloca outside of the defining block.
+ store i32 0, ptr addrspace(5) %alloca
+ store i32 1, ptr addrspace(5) %gep1
+ %gep2 = getelementptr [16 x i32], ptr addrspace(5) %alloca, i32 0, i32 %in
+ %load = load i32, ptr addrspace(5) %gep2
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %add = add i32 %load, %tid
+ store i32 %add, ptr addrspace(1) %out
+ br label %bb.2
+
+bb.2:
+ store volatile i32 0, ptr addrspace(1) undef
+ ret void
+}
+; DEFAULTSIZE: .amdhsa_private_segment_fixed_size 4112
+; DEFAULTSIZE: ; ScratchSize: 4112
+; DEFAULTSIZE-V5: .amdhsa_private_segment_fixed_size 16
+; DEFAULTSIZE-V5: .amdhsa_uses_dynamic_stack 1
+; DEFAULTSIZE-V5: ; ScratchSize: 16
+
+; ASSUME1024: .amdhsa_private_segment_fixed_size 1040
+; ASSUME1024: ; ScratchSize: 1040
+
+define amdgpu_kernel void @kernel_non_entry_block_static_alloca_uniformly_reached_align64(ptr addrspace(1) %out, i32 %arg.cond, i32 %in) {
+; MUBUF-LABEL: kernel_non_entry_block_static_alloca_uniformly_reached_align64:
+; MUBUF: ; %bb.0: ; %entry
+; MUBUF-NEXT: s_load_dwordx2 s[4:5], s[8:9], 0x8
+; MUBUF-NEXT: s_add_u32 s0, s0, s17
+; MUBUF-NEXT: s_addc_u32 s1, s1, 0
+; MUBUF-NEXT: s_mov_b32 s33, 0
+; MUBUF-NEXT: s_mov_b32 s32, 0x10000
+; MUBUF-NEXT: s_waitcnt lgkmcnt(0)
+; MUBUF-NEXT: s_cmp_lg_u32 s4, 0
+; MUBUF-NEXT: s_cbranch_scc1 .LBB1_2
+; MUBUF-NEXT: ; %bb.1: ; %bb.0
+; MUBUF-NEXT: s_mov_b32 s4, s32
+; MUBUF-NEXT: v_mov_b32_e32 v1, 0
+; MUBUF-NEXT: v_mov_b32_e32 v2, 1
+; MUBUF-NEXT: s_lshl_b32 s5, s5, 2
+; MUBUF-NEXT: s_add_i32 s32, s4, 0x1000
+; MUBUF-NEXT: buffer_store_dword v1, off, s[0:3], s4
+; MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], s4 offset:4
+; MUBUF-NEXT: s_add_i32 s4, s4, s5
+; MUBUF-NEXT: v_mov_b32_e32 v2, s4
+; MUBUF-NEXT: buffer_load_dword v2, v2, s[0:3], 0 offen
+; MUBUF-NEXT: s_load_dwordx2 s[4:5], s[8:9], 0x0
+; MUBUF-NEXT: s_waitcnt vmcnt(0)
+; MUBUF-NEXT: v_add_u32_e32 v0, v2, v0
+; MUBUF-NEXT: s_waitcnt lgkmcnt(0)
+; MUBUF-NEXT: global_store_dword v1, v0, s[4:5]
+; MUBUF-NEXT: .LBB1_2: ; %bb.1
+; MUBUF-NEXT: v_mov_b32_e32 v0, 0
+; MUBUF-NEXT: global_store_dword v[0:1], v0, off
+; MUBUF-NEXT: s_waitcnt vmcnt(0)
+; MUBUF-NEXT: s_endpgm
+;
+; FLATSCR-LABEL: kernel_non_entry_block_static_alloca_uniformly_reached_align64:
+; FLATSCR: ; %bb.0: ; %entry
+; FLATSCR-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x8
+; FLATSCR-NEXT: s_add_u32 flat_scratch_lo, s8, s13
+; FLATSCR-NEXT: s_addc_u32 flat_scratch_hi, s9, 0
+; FLATSCR-NEXT: s_mov_b32 s33, 0
+; FLATSCR-NEXT: s_movk_i32 s32, 0x400
+; FLATSCR-NEXT: s_waitcnt lgkmcnt(0)
+; FLATSCR-NEXT: s_cmp_lg_u32 s0, 0
+; FLATSCR-NEXT: s_cbranch_scc1 .LBB1_2
+; FLATSCR-NEXT: ; %bb.1: ; %bb.0
+; FLATSCR-NEXT: v_mov_b32_e32 v1, 0
+; FLATSCR-NEXT: s_mov_b32 s0, s32
+; FLATSCR-NEXT: v_mov_b32_e32 v2, 1
+; FLATSCR-NEXT: s_lshl_b32 s1, s1, 2
+; FLATSCR-NEXT: s_add_i32 s32, s0, 0x1000
+; FLATSCR-NEXT: scratch_store_dwordx2 off, v[1:2], s0
+; FLATSCR-NEXT: s_add_i32 s0, s0, s1
+; FLATSCR-NEXT: scratch_load_dword v2, off, s0
+; FLATSCR-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
+; FLATSCR-NEXT: s_waitcnt vmcnt(0)
+; FLATSCR-NEXT: v_add_u32_e32 v0, v2, v0
+; FLATSCR-NEXT: s_waitcnt lgkmcnt(0)
+; FLATSCR-NEXT: global_store_dword v1, v0, s[0:1]
+; FLATSCR-NEXT: .LBB1_2: ; %bb.1
+; FLATSCR-NEXT: v_mov_b32_e32 v0, 0
+; FLATSCR-NEXT: global_store_dword v[0:1], v0, off
+; FLATSCR-NEXT: s_waitcnt vmcnt(0)
+; FLATSCR-NEXT: s_endpgm
+entry:
+ %cond = icmp eq i32 %arg.cond, 0
+ br i1 %cond, label %bb.0, label %bb.1
+
+bb.0:
+ %alloca = alloca [16 x i32], align 1024, addrspace(5)
+ %gep1 = getelementptr [16 x i32], ptr addrspace(5) %alloca, i32 0, i32 1
+ store i32 0, ptr addrspace(5) %alloca
+ store i32 1, ptr addrspace(5) %gep1
+ %gep2 = getelementptr [16 x i32], ptr addrspace(5) %alloca, i32 0, i32 %in
+ %load = load i32, ptr addrspace(5) %gep2
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %add = add i32 %load, %tid
+ store i32 %add, ptr addrspace(1) %out
+ br label %bb.1
+
+bb.1:
+ store volatile i32 0, ptr addrspace(1) undef
+ ret void
+}
+
+; DEFAULTSIZE: .amdhsa_private_segment_fixed_size 4160
+; DEFAULTSIZE: ; ScratchSize: 4160
+; DEFAULTSIZE-V5: .amdhsa_private_segment_fixed_size 64
+; DEFAULTSIZE-V5: .amdhsa_uses_dynamic_stack 1
+; DEFAULTSIZE-V5: ; ScratchSize: 64
+
+; ASSUME1024: .amdhsa_private_segment_fixed_size 1088
+; ASSUME1024: ; ScratchSize: 1088
+
+
+define void @func_non_entry_block_static_alloca_align4(ptr addrspace(1) %out, i32 %arg.cond0, i32 %arg.cond1, i32 %in) {
+; MUBUF-LABEL: func_non_entry_block_static_alloca_align4:
+; MUBUF: ; %bb.0: ; %entry
+; MUBUF-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; MUBUF-NEXT: s_mov_b32 s7, s33
+; MUBUF-NEXT: s_add_i32 s33, s32, 0xffc0
+; MUBUF-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2
+; MUBUF-NEXT: s_and_b32 s33, s33, 0xffff0000
+; MUBUF-NEXT: s_add_i32 s32, s32, 0x20000
+; MUBUF-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; MUBUF-NEXT: s_cbranch_execz .LBB2_3
+; MUBUF-NEXT: ; %bb.1: ; %bb.0
+; MUBUF-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
+; MUBUF-NEXT: s_and_b64 exec, exec, vcc
+; MUBUF-NEXT: s_cbranch_execz .LBB2_3
+; MUBUF-NEXT: ; %bb.2: ; %bb.1
+; MUBUF-NEXT: s_mov_b32 s6, s32
+; MUBUF-NEXT: v_mov_b32_e32 v2, 0
+; MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], s6
+; MUBUF-NEXT: v_mov_b32_e32 v2, 1
+; MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], s6 offset:4
+; MUBUF-NEXT: v_lshl_add_u32 v2, v4, 2, s6
+; MUBUF-NEXT: buffer_load_dword v2, v2, s[0:3], 0 offen
+; MUBUF-NEXT: v_and_b32_e32 v3, 0x3ff, v31
+; MUBUF-NEXT: s_add_i32 s32, s6, 0x1000
+; MUBUF-NEXT: s_waitcnt vmcnt(0)
+; MUBUF-NEXT: v_add_u32_e32 v2, v2, v3
+; MUBUF-NEXT: global_store_dword v[0:1], v2, off
+; MUBUF-NEXT: .LBB2_3: ; %bb.2
+; MUBUF-NEXT: s_or_b64 exec, exec, s[4:5]
+; MUBUF-NEXT: v_mov_b32_e32 v0, 0
+; MUBUF-NEXT: global_store_dword v[0:1], v0, off
+; MUBUF-NEXT: s_waitcnt vmcnt(0)
+; MUBUF-NEXT: s_add_i32 s32, s32, 0xfffe0000
+; MUBUF-NEXT: s_mov_b32 s33, s7
+; MUBUF-NEXT: s_setpc_b64 s[30:31]
+;
+; FLATSCR-LABEL: func_non_entry_block_static_alloca_align4:
+; FLATSCR: ; %bb.0: ; %entry
+; FLATSCR-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; FLATSCR-NEXT: s_mov_b32 s3, s33
+; FLATSCR-NEXT: s_add_i32 s33, s32, 0x3ff
+; FLATSCR-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2
+; FLATSCR-NEXT: s_and_b32 s33, s33, 0xfffffc00
+; FLATSCR-NEXT: s_addk_i32 s32, 0x800
+; FLATSCR-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; FLATSCR-NEXT: s_cbranch_execz .LBB2_3
+; FLATSCR-NEXT: ; %bb.1: ; %bb.0
+; FLATSCR-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
+; FLATSCR-NEXT: s_and_b64 exec, exec, vcc
+; FLATSCR-NEXT: s_cbranch_execz .LBB2_3
+; FLATSCR-NEXT: ; %bb.2: ; %bb.1
+; FLATSCR-NEXT: s_mov_b32 s2, s32
+; FLATSCR-NEXT: v_mov_b32_e32 v2, 0
+; FLATSCR-NEXT: v_mov_b32_e32 v3, 1
+; FLATSCR-NEXT: scratch_store_dwordx2 off, v[2:3], s2
+; FLATSCR-NEXT: v_lshl_add_u32 v2, v4, 2, s2
+; FLATSCR-NEXT: scratch_load_dword v2, v2, off
+; FLATSCR-NEXT: v_and_b32_e32 v3, 0x3ff, v31
+; FLATSCR-NEXT: s_add_i32 s32, s2, 0x1000
+; FLATSCR-NEXT: s_waitcnt vmcnt(0)
+; FLATSCR-NEXT: v_add_u32_e32 v2, v2, v3
+; FLATSCR-NEXT: global_store_dword v[0:1], v2, off
+; FLATSCR-NEXT: .LBB2_3: ; %bb.2
+; FLATSCR-NEXT: s_or_b64 exec, exec, s[0:1]
+; FLATSCR-NEXT: v_mov_b32_e32 v0, 0
+; FLATSCR-NEXT: global_store_dword v[0:1], v0, off
+; FLATSCR-NEXT: s_waitcnt vmcnt(0)
+; FLATSCR-NEXT: s_addk_i32 s32, 0xf800
+; FLATSCR-NEXT: s_mov_b32 s33, s3
+; FLATSCR-NEXT: s_setpc_b64 s[30:31]
+
+entry:
+ %cond0 = icmp eq i32 %arg.cond0, 0
+ br i1 %cond0, label %bb.0, label %bb.2
+
+bb.0:
+ %alloca = alloca [16 x i32], align 1024, addrspace(5)
+ %gep1 = getelementptr [16 x i32], ptr addrspace(5) %alloca, i32 0, i32 1
+ %cond1 = icmp eq i32 %arg.cond1, 0
+ br i1 %cond1, label %bb.1, label %bb.2
+
+bb.1:
+ ; Use the alloca outside of the defining block.
+ store i32 0, ptr addrspace(5) %alloca
+ store i32 1, ptr addrspace(5) %gep1
+ %gep2 = getelementptr [16 x i32], ptr addrspace(5) %alloca, i32 0, i32 %in
+ %load = load i32, ptr addrspace(5) %gep2
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %add = add i32 %load, %tid
+ store i32 %add, ptr addrspace(1) %out
+ br label %bb.2
+
+bb.2:
+ store volatile i32 0, ptr addrspace(1) undef
+ ret void
+}
+
+define void @func_non_entry_block_static_alloca_align64(ptr addrspace(1) %out, i32 %arg.cond, i32 %in) {
+; MUBUF-LABEL: func_non_entry_block_static_alloca_align64:
+; MUBUF: ; %bb.0: ; %entry
+; MUBUF-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; MUBUF-NEXT: s_mov_b32 s7, s33
+; MUBUF-NEXT: s_add_i32 s33, s32, 0xfc0
+; MUBUF-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2
+; MUBUF-NEXT: s_and_b32 s33, s33, 0xfffff000
+; MUBUF-NEXT: s_addk_i32 s32, 0x2000
+; MUBUF-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; MUBUF-NEXT: s_cbranch_execz .LBB3_2
+; MUBUF-NEXT: ; %bb.1: ; %bb.0
+; MUBUF-NEXT: s_mov_b32 s6, s32
+; MUBUF-NEXT: v_mov_b32_e32 v2, 0
+; MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], s6
+; MUBUF-NEXT: v_mov_b32_e32 v2, 1
+; MUBUF-NEXT: buffer_store_dword v2, off, s[0:3], s6 offset:4
+; MUBUF-NEXT: v_lshl_add_u32 v2, v3, 2, s6
+; MUBUF-NEXT: buffer_load_dword v2, v2, s[0:3], 0 offen
+; MUBUF-NEXT: v_and_b32_e32 v3, 0x3ff, v31
+; MUBUF-NEXT: s_add_i32 s32, s6, 0x1000
+; MUBUF-NEXT: s_waitcnt vmcnt(0)
+; MUBUF-NEXT: v_add_u32_e32 v2, v2, v3
+; MUBUF-NEXT: global_store_dword v[0:1], v2, off
+; MUBUF-NEXT: .LBB3_2: ; %bb.1
+; MUBUF-NEXT: s_or_b64 exec, exec, s[4:5]
+; MUBUF-NEXT: v_mov_b32_e32 v0, 0
+; MUBUF-NEXT: global_store_dword v[0:1], v0, off
+; MUBUF-NEXT: s_waitcnt vmcnt(0)
+; MUBUF-NEXT: s_addk_i32 s32, 0xe000
+; MUBUF-NEXT: s_mov_b32 s33, s7
+; MUBUF-NEXT: s_setpc_b64 s[30:31]
+;
+; FLATSCR-LABEL: func_non_entry_block_static_alloca_align64:
+; FLATSCR: ; %bb.0: ; %entry
+; FLATSCR-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; FLATSCR-NEXT: s_mov_b32 s3, s33
+; FLATSCR-NEXT: s_add_i32 s33, s32, 63
+; FLATSCR-NEXT: v_cmp_eq_u32_e32 vcc, 0, v2
+; FLATSCR-NEXT: s_andn2_b32 s33, s33, 63
+; FLATSCR-NEXT: s_addk_i32 s32, 0x80
+; FLATSCR-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; FLATSCR-NEXT: s_cbranch_execz .LBB3_2
+; FLATSCR-NEXT: ; %bb.1: ; %bb.0
+; FLATSCR-NEXT: s_mov_b32 s2, s32
+; FLATSCR-NEXT: v_mov_b32_e32 v4, 0
+; FLATSCR-NEXT: v_mov_b32_e32 v5, 1
+; FLATSCR-NEXT: scratch_store_dwordx2 off, v[4:5], s2
+; FLATSCR-NEXT: v_lshl_add_u32 v2, v3, 2, s2
+; FLATSCR-NEXT: scratch_load_dword v2, v2, off
+; FLATSCR-NEXT: v_and_b32_e32 v3, 0x3ff, v31
+; FLATSCR-NEXT: s_add_i32 s32, s2, 0x1000
+; FLATSCR-NEXT: s_waitcnt vmcnt(0)
+; FLATSCR-NEXT: v_add_u32_e32 v2, v2, v3
+; FLATSCR-NEXT: global_store_dword v[0:1], v2, off
+; FLATSCR-NEXT: .LBB3_2: ; %bb.1
+; FLATSCR-NEXT: s_or_b64 exec, exec, s[0:1]
+; FLATSCR-NEXT: v_mov_b32_e32 v0, 0
+; FLATSCR-NEXT: global_store_dword v[0:1], v0, off
+; FLATSCR-NEXT: s_waitcnt vmcnt(0)
+; FLATSCR-NEXT: s_addk_i32 s32, 0xff80
+; FLATSCR-NEXT: s_mov_b32 s33, s3
+; FLATSCR-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %cond = icmp eq i32 %arg.cond, 0
+ br i1 %cond, label %bb.0, label %bb.1
+
+bb.0:
+ %alloca = alloca [16 x i32], align 64, addrspace(5)
+ %gep1 = getelementptr [16 x i32], ptr addrspace(5) %alloca, i32 0, i32 1
+ store i32 0, ptr addrspace(5) %alloca
+ store i32 1, ptr addrspace(5) %gep1
+ %gep2 = getelementptr [16 x i32], ptr addrspace(5) %alloca, i32 0, i32 %in
+ %load = load i32, ptr addrspace(5) %gep2
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %add = add i32 %load, %tid
+ store i32 %add, ptr addrspace(1) %out
+ br label %bb.1
+
+bb.1:
+ store volatile i32 0, ptr addrspace(1) undef
+ ret void
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x() #0
+
+attributes #0 = { nounwind readnone speculatable }
+attributes #1 = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+
+!llvm.module.flags = !{!0}
+!0 = !{i32 1, !"amdhsa_code_object_version", i32 CODE_OBJECT_VERSION}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; ASSUME1024: {{.*}}
+; DEFAULTSIZE: {{.*}}
+; DEFAULTSIZE-V5: {{.*}}
More information about the llvm-commits
mailing list