[llvm] Support dynamically sized allocas (PR #121047)
via llvm-commits
llvm-commits at lists.llvm.org
Tue Dec 24 02:10:28 PST 2024
https://github.com/easyonaadit created https://github.com/llvm/llvm-project/pull/121047
Stack space is computed by applying wave-wide reduction on alloca's size argument.
>From 93c0a23582c4b6f89aebdff1ca3768ce3424c97d Mon Sep 17 00:00:00 2001
From: easyonaadit <aaditya.alokdeshpande at amd.com>
Date: Tue, 24 Dec 2024 11:47:38 +0530
Subject: [PATCH] Supporting dynamic sized allocas
---
.../Target/AMDGPU/AMDGPURegisterBankInfo.cpp | 11 +-
llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 79 +-
llvm/lib/Target/AMDGPU/SIISelLowering.h | 4 +-
.../GlobalISel/dynamic-alloca-divergent.ll | 278 +++-
.../regbankselect-dyn-stackalloc.mir | 129 ++
.../test/CodeGen/AMDGPU/dynamic_stackalloc.ll | 1360 ++++++++++++++++-
.../CodeGen/AMDGPU/dynamic_stackalloc_isel.ll | 170 +++
7 files changed, 1930 insertions(+), 101 deletions(-)
create mode 100644 llvm/test/CodeGen/AMDGPU/dynamic_stackalloc_isel.ll
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index d94c400ad14225..bdc737958b0e96 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -1190,9 +1190,14 @@ bool AMDGPURegisterBankInfo::applyMappingDynStackAlloc(
const RegisterBank *SizeBank = getRegBank(AllocSize, MRI, *TRI);
- // TODO: Need to emit a wave reduction to get the maximum size.
- if (SizeBank != &AMDGPU::SGPRRegBank)
- return false;
+ if (SizeBank != &AMDGPU::SGPRRegBank) {
+ auto WaveReduction =
+ B.buildIntrinsic(Intrinsic::amdgcn_wave_reduce_umax,
+ {LLT::scalar(MRI.getType(AllocSize).getSizeInBits())})
+ .addUse(AllocSize)
+ .addImm(0);
+ AllocSize = WaveReduction.getReg(0);
+ }
LLT PtrTy = MRI.getType(Dst);
LLT IntPtrTy = LLT::scalar(PtrTy.getSizeInBits());
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 58b061f5c1af0d..c34c6e4cf7164f 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -4017,29 +4017,64 @@ SDValue SITargetLowering::LowerCall(CallLoweringInfo &CLI,
}
// This is similar to the default implementation in ExpandDYNAMIC_STACKALLOC,
-// except for stack growth direction(default: downwards, AMDGPU: upwards) and
-// applying the wave size scale to the increment amount.
+// except for stack growth direction(default: downwards, AMDGPU: upwards),
+// applying the wave size scale to the increment amount,
+// and performing a wave-reduction for divergent allocation size.
SDValue SITargetLowering::lowerDYNAMIC_STACKALLOCImpl(SDValue Op,
+ SDValue BaseAddr,
+ SDValue Chain,
+ Register SPReg,
SelectionDAG &DAG) const {
+ SDLoc dl(Op);
+ EVT VT = Op.getValueType();
+ SDValue Tmp = Op.getValue(1);
+ SDValue Size = Tmp.getOperand(1);
+
+ SDValue NewSP;
+ if (isa<ConstantSDNode>(Op.getOperand(1))) {
+ SDValue ScaledSize = DAG.getNode(
+ ISD::SHL, dl, VT, Size,
+ DAG.getConstant(Subtarget->getWavefrontSizeLog2(), dl, MVT::i32));
+ NewSP = DAG.getNode(ISD::ADD, dl, VT, BaseAddr, ScaledSize); // Value
+ } else {
+ // perform wave reduction to get the maximum size
+ SDValue WaveReduction =
+ DAG.getTargetConstant(Intrinsic::amdgcn_wave_reduce_umax, dl, MVT::i32);
+ Size = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::i32, WaveReduction,
+ Size, DAG.getConstant(0, dl, MVT::i32));
+ SDValue ScaledSize = DAG.getNode(
+ ISD::SHL, dl, VT, Size,
+ DAG.getConstant(Subtarget->getWavefrontSizeLog2(), dl, MVT::i32));
+ NewSP =
+ DAG.getNode(ISD::ADD, dl, VT, BaseAddr, ScaledSize); // Value in vgpr.
+ SDValue ReadFirstLaneID =
+ DAG.getTargetConstant(Intrinsic::amdgcn_readfirstlane, dl, MVT::i32);
+ NewSP = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::i32, ReadFirstLaneID,
+ NewSP);
+ }
+
+ Chain = DAG.getCopyToReg(Chain, dl, SPReg, NewSP); // Output chain
+ Tmp = DAG.getCALLSEQ_END(Chain, 0, 0, SDValue(), dl);
+
+ return DAG.getMergeValues({BaseAddr, Tmp}, dl);
+}
+
+SDValue SITargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
+ SelectionDAG &DAG) const {
const MachineFunction &MF = DAG.getMachineFunction();
const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
SDLoc dl(Op);
EVT VT = Op.getValueType();
- SDValue Tmp1 = Op;
- SDValue Tmp2 = Op.getValue(1);
- SDValue Tmp3 = Op.getOperand(2);
- SDValue Chain = Tmp1.getOperand(0);
-
+ SDValue Chain = Op.getOperand(0);
Register SPReg = Info->getStackPtrOffsetReg();
// Chain the dynamic stack allocation so that it doesn't modify the stack
// pointer when other instructions are using the stack.
Chain = DAG.getCALLSEQ_START(Chain, 0, 0, dl);
- SDValue Size = Tmp2.getOperand(1);
SDValue BaseAddr = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
- Align Alignment = cast<ConstantSDNode>(Tmp3)->getAlignValue();
+ Align Alignment = cast<ConstantSDNode>(Op.getOperand(2))->getAlignValue();
const TargetFrameLowering *TFL = Subtarget->getFrameLowering();
assert(TFL->getStackGrowthDirection() == TargetFrameLowering::StackGrowsUp &&
@@ -4057,30 +4092,8 @@ SDValue SITargetLowering::lowerDYNAMIC_STACKALLOCImpl(SDValue Op,
DAG.getSignedConstant(-ScaledAlignment, dl, VT));
}
- SDValue ScaledSize = DAG.getNode(
- ISD::SHL, dl, VT, Size,
- DAG.getConstant(Subtarget->getWavefrontSizeLog2(), dl, MVT::i32));
-
- SDValue NewSP = DAG.getNode(ISD::ADD, dl, VT, BaseAddr, ScaledSize); // Value
-
- Chain = DAG.getCopyToReg(Chain, dl, SPReg, NewSP); // Output chain
- Tmp2 = DAG.getCALLSEQ_END(Chain, 0, 0, SDValue(), dl);
-
- return DAG.getMergeValues({BaseAddr, Tmp2}, dl);
-}
-
-SDValue SITargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
- SelectionDAG &DAG) const {
- // We only handle constant sizes here to allow non-entry block, static sized
- // allocas. A truly dynamic value is more difficult to support because we
- // don't know if the size value is uniform or not. If the size isn't uniform,
- // we would need to do a wave reduction to get the maximum size to know how
- // much to increment the uniform stack pointer.
- SDValue Size = Op.getOperand(1);
- if (isa<ConstantSDNode>(Size))
- return lowerDYNAMIC_STACKALLOCImpl(Op, DAG); // Use "generic" expansion.
-
- return AMDGPUTargetLowering::LowerDYNAMIC_STACKALLOC(Op, DAG);
+ return lowerDYNAMIC_STACKALLOCImpl(Op, BaseAddr, Chain, SPReg,
+ DAG); // Use "generic" expansion.
}
SDValue SITargetLowering::LowerSTACKSAVE(SDValue Op, SelectionDAG &DAG) const {
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.h b/llvm/lib/Target/AMDGPU/SIISelLowering.h
index 631f26542bbe6d..9b8871a8aa73f4 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.h
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.h
@@ -421,7 +421,9 @@ class SITargetLowering final : public AMDGPUTargetLowering {
SDValue LowerCall(CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const override;
- SDValue lowerDYNAMIC_STACKALLOCImpl(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerDYNAMIC_STACKALLOCImpl(SDValue Op, SDValue BaseAddr,
+ SDValue Chain, Register SPReg,
+ SelectionDAG &DAG) const;
SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSTACKSAVE(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerGET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/dynamic-alloca-divergent.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/dynamic-alloca-divergent.ll
index aefcad491073fc..0afa489a41bdca 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/dynamic-alloca-divergent.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/dynamic-alloca-divergent.ll
@@ -1,10 +1,38 @@
-; RUN: not llc -global-isel -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -global-isel-abort=2 -pass-remarks-missed="gisel.*" -verify-machineinstrs=0 -o /dev/null 2>&1 %s | FileCheck -check-prefix=ERR %s
-
-; ERR: remark: <unknown>:0:0: cannot select: %{{[0-9]+}}:sreg_32(p5) = G_DYN_STACKALLOC %{{[0-9]+}}:vgpr(s32), 1 (in function: kernel_dynamic_stackalloc_vgpr_align4)
-; ERR-NEXT: warning: Instruction selection used fallback path for kernel_dynamic_stackalloc_vgpr_align4
-; ERR-NEXT: error: <unknown>:0:0: in function kernel_dynamic_stackalloc_vgpr_align4 void (ptr addrspace(1)): unsupported dynamic alloca
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefix=DYN %s
define amdgpu_kernel void @kernel_dynamic_stackalloc_vgpr_align4(ptr addrspace(1) %ptr) {
+; DYN-LABEL: kernel_dynamic_stackalloc_vgpr_align4:
+; DYN: ; %bb.0:
+; DYN-NEXT: s_load_dwordx2 s[4:5], s[8:9], 0x0
+; DYN-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; DYN-NEXT: s_add_u32 s0, s0, s17
+; DYN-NEXT: s_addc_u32 s1, s1, 0
+; DYN-NEXT: s_mov_b32 s6, 0
+; DYN-NEXT: s_waitcnt lgkmcnt(0)
+; DYN-NEXT: global_load_dword v0, v0, s[4:5]
+; DYN-NEXT: s_mov_b64 s[4:5], exec
+; DYN-NEXT: s_mov_b32 s33, 0
+; DYN-NEXT: s_movk_i32 s32, 0x400
+; DYN-NEXT: s_waitcnt vmcnt(0)
+; DYN-NEXT: v_lshl_add_u32 v0, v0, 2, 15
+; DYN-NEXT: v_and_b32_e32 v0, -16, v0
+; DYN-NEXT: .LBB0_1: ; =>This Inner Loop Header: Depth=1
+; DYN-NEXT: s_ff1_i32_b64 s7, s[4:5]
+; DYN-NEXT: v_readlane_b32 s8, v0, s7
+; DYN-NEXT: s_bitset0_b64 s[4:5], s7
+; DYN-NEXT: s_max_u32 s6, s6, s8
+; DYN-NEXT: s_cmp_lg_u64 s[4:5], 0
+; DYN-NEXT: s_cbranch_scc1 .LBB0_1
+; DYN-NEXT: ; %bb.2:
+; DYN-NEXT: s_mov_b32 s4, s32
+; DYN-NEXT: s_lshl_b32 s5, s6, 6
+; DYN-NEXT: v_mov_b32_e32 v0, 0x7b
+; DYN-NEXT: v_mov_b32_e32 v1, s4
+; DYN-NEXT: s_add_u32 s32, s4, s5
+; DYN-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen
+; DYN-NEXT: s_waitcnt vmcnt(0)
+; DYN-NEXT: s_endpgm
%id = call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr i32, ptr addrspace(1) %ptr, i32 %id
%n = load i32, ptr addrspace(1) %gep
@@ -13,11 +41,38 @@ define amdgpu_kernel void @kernel_dynamic_stackalloc_vgpr_align4(ptr addrspace(1
ret void
}
-; ERR: remark: <unknown>:0:0: cannot select: %{{[0-9]+}}:sreg_32(p5) = G_DYN_STACKALLOC %{{[0-9]+}}:vgpr(s32), 1 (in function: kernel_dynamic_stackalloc_vgpr_default_align)
-; ERR-NEXT: warning: Instruction selection used fallback path for kernel_dynamic_stackalloc_vgpr_default_align
-; ERR-NEXT: error: <unknown>:0:0: in function kernel_dynamic_stackalloc_vgpr_default_align void (ptr addrspace(1)): unsupported dynamic alloca
-
define amdgpu_kernel void @kernel_dynamic_stackalloc_vgpr_default_align(ptr addrspace(1) %ptr) {
+; DYN-LABEL: kernel_dynamic_stackalloc_vgpr_default_align:
+; DYN: ; %bb.0:
+; DYN-NEXT: s_load_dwordx2 s[4:5], s[8:9], 0x0
+; DYN-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; DYN-NEXT: s_add_u32 s0, s0, s17
+; DYN-NEXT: s_addc_u32 s1, s1, 0
+; DYN-NEXT: s_mov_b32 s6, 0
+; DYN-NEXT: s_waitcnt lgkmcnt(0)
+; DYN-NEXT: global_load_dword v0, v0, s[4:5]
+; DYN-NEXT: s_mov_b64 s[4:5], exec
+; DYN-NEXT: s_mov_b32 s33, 0
+; DYN-NEXT: s_movk_i32 s32, 0x400
+; DYN-NEXT: s_waitcnt vmcnt(0)
+; DYN-NEXT: v_lshl_add_u32 v0, v0, 2, 15
+; DYN-NEXT: v_and_b32_e32 v0, -16, v0
+; DYN-NEXT: .LBB1_1: ; =>This Inner Loop Header: Depth=1
+; DYN-NEXT: s_ff1_i32_b64 s7, s[4:5]
+; DYN-NEXT: v_readlane_b32 s8, v0, s7
+; DYN-NEXT: s_bitset0_b64 s[4:5], s7
+; DYN-NEXT: s_max_u32 s6, s6, s8
+; DYN-NEXT: s_cmp_lg_u64 s[4:5], 0
+; DYN-NEXT: s_cbranch_scc1 .LBB1_1
+; DYN-NEXT: ; %bb.2:
+; DYN-NEXT: s_mov_b32 s4, s32
+; DYN-NEXT: s_lshl_b32 s5, s6, 6
+; DYN-NEXT: v_mov_b32_e32 v0, 0x7b
+; DYN-NEXT: v_mov_b32_e32 v1, s4
+; DYN-NEXT: s_add_u32 s32, s4, s5
+; DYN-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen
+; DYN-NEXT: s_waitcnt vmcnt(0)
+; DYN-NEXT: s_endpgm
%id = call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr i32, ptr addrspace(1) %ptr, i32 %id
%n = load i32, ptr addrspace(1) %gep
@@ -25,11 +80,40 @@ define amdgpu_kernel void @kernel_dynamic_stackalloc_vgpr_default_align(ptr addr
store volatile i32 123, ptr addrspace(5) %alloca
ret void
}
-; ERR: remark: <unknown>:0:0: cannot select: %{{[0-9]+}}:sreg_32(p5) = G_DYN_STACKALLOC %{{[0-9]+}}:vgpr(s32), 64 (in function: kernel_dynamic_stackalloc_vgpr_align64)
-; ERR-NEXT: warning: Instruction selection used fallback path for kernel_dynamic_stackalloc_vgpr_align64
-; ERR-NEXT: error: <unknown>:0:0: in function kernel_dynamic_stackalloc_vgpr_align64 void (ptr addrspace(1)): unsupported dynamic alloca
define amdgpu_kernel void @kernel_dynamic_stackalloc_vgpr_align64(ptr addrspace(1) %ptr) {
+; DYN-LABEL: kernel_dynamic_stackalloc_vgpr_align64:
+; DYN: ; %bb.0:
+; DYN-NEXT: s_load_dwordx2 s[4:5], s[8:9], 0x0
+; DYN-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; DYN-NEXT: s_add_u32 s0, s0, s17
+; DYN-NEXT: s_addc_u32 s1, s1, 0
+; DYN-NEXT: s_mov_b32 s6, 0
+; DYN-NEXT: s_waitcnt lgkmcnt(0)
+; DYN-NEXT: global_load_dword v0, v0, s[4:5]
+; DYN-NEXT: s_mov_b64 s[4:5], exec
+; DYN-NEXT: s_mov_b32 s33, 0
+; DYN-NEXT: s_movk_i32 s32, 0x1000
+; DYN-NEXT: s_waitcnt vmcnt(0)
+; DYN-NEXT: v_lshl_add_u32 v0, v0, 2, 15
+; DYN-NEXT: v_and_b32_e32 v0, -16, v0
+; DYN-NEXT: .LBB2_1: ; =>This Inner Loop Header: Depth=1
+; DYN-NEXT: s_ff1_i32_b64 s7, s[4:5]
+; DYN-NEXT: v_readlane_b32 s8, v0, s7
+; DYN-NEXT: s_bitset0_b64 s[4:5], s7
+; DYN-NEXT: s_max_u32 s6, s6, s8
+; DYN-NEXT: s_cmp_lg_u64 s[4:5], 0
+; DYN-NEXT: s_cbranch_scc1 .LBB2_1
+; DYN-NEXT: ; %bb.2:
+; DYN-NEXT: s_add_u32 s5, s32, 0xfff
+; DYN-NEXT: s_and_b32 s5, s5, 0xfffff000
+; DYN-NEXT: s_lshl_b32 s4, s6, 6
+; DYN-NEXT: v_mov_b32_e32 v0, 0x7b
+; DYN-NEXT: v_mov_b32_e32 v1, s5
+; DYN-NEXT: s_add_u32 s32, s5, s4
+; DYN-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen
+; DYN-NEXT: s_waitcnt vmcnt(0)
+; DYN-NEXT: s_endpgm
%id = call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr i32, ptr addrspace(1) %ptr, i32 %id
%n = load i32, ptr addrspace(1) %gep
@@ -38,35 +122,183 @@ define amdgpu_kernel void @kernel_dynamic_stackalloc_vgpr_align64(ptr addrspace(
ret void
}
-; ERR: remark: <unknown>:0:0: cannot select: %{{[0-9]+}}:sreg_32(p5) = G_DYN_STACKALLOC %{{[0-9]+}}:vgpr(s32), 1 (in function: func_dynamic_stackalloc_vgpr_align4)
-; ERR-NEXT: warning: Instruction selection used fallback path for func_dynamic_stackalloc_vgpr_align4
-; ERR-NEXT: error: <unknown>:0:0: in function func_dynamic_stackalloc_vgpr_align4 void (i32): unsupported dynamic alloca
-
define void @func_dynamic_stackalloc_vgpr_align4(i32 %n) {
+; DYN-LABEL: func_dynamic_stackalloc_vgpr_align4:
+; DYN: ; %bb.0:
+; DYN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; DYN-NEXT: v_lshl_add_u32 v0, v0, 2, 15
+; DYN-NEXT: s_mov_b32 s9, s33
+; DYN-NEXT: v_and_b32_e32 v0, -16, v0
+; DYN-NEXT: s_mov_b64 s[4:5], exec
+; DYN-NEXT: s_mov_b32 s6, 0
+; DYN-NEXT: s_mov_b32 s33, s32
+; DYN-NEXT: s_addk_i32 s32, 0x400
+; DYN-NEXT: .LBB3_1: ; =>This Inner Loop Header: Depth=1
+; DYN-NEXT: s_ff1_i32_b64 s7, s[4:5]
+; DYN-NEXT: v_readlane_b32 s8, v0, s7
+; DYN-NEXT: s_bitset0_b64 s[4:5], s7
+; DYN-NEXT: s_max_u32 s6, s6, s8
+; DYN-NEXT: s_cmp_lg_u64 s[4:5], 0
+; DYN-NEXT: s_cbranch_scc1 .LBB3_1
+; DYN-NEXT: ; %bb.2:
+; DYN-NEXT: s_mov_b32 s4, s32
+; DYN-NEXT: s_lshl_b32 s5, s6, 6
+; DYN-NEXT: s_add_u32 s32, s4, s5
+; DYN-NEXT: v_mov_b32_e32 v0, 0x1c8
+; DYN-NEXT: v_mov_b32_e32 v1, s4
+; DYN-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen
+; DYN-NEXT: s_waitcnt vmcnt(0)
+; DYN-NEXT: s_addk_i32 s32, 0xfc00
+; DYN-NEXT: s_mov_b32 s33, s9
+; DYN-NEXT: s_setpc_b64 s[30:31]
%alloca = alloca i32, i32 %n, align 4, addrspace(5)
store volatile i32 456, ptr addrspace(5) %alloca
ret void
}
-; ERR: remark: <unknown>:0:0: cannot select: %{{[0-9]+}}:sreg_32(p5) = G_DYN_STACKALLOC %{{[0-9]+}}:vgpr(s32), 1 (in function: func_dynamic_stackalloc_vgpr_default_align)
-; ERR-NEXT: warning: Instruction selection used fallback path for func_dynamic_stackalloc_vgpr_default_align
-; ERR-NEXT: error: <unknown>:0:0: in function func_dynamic_stackalloc_vgpr_default_align void (i32): unsupported dynamic alloca
-
define void @func_dynamic_stackalloc_vgpr_default_align(i32 %n) {
+; DYN-LABEL: func_dynamic_stackalloc_vgpr_default_align:
+; DYN: ; %bb.0:
+; DYN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; DYN-NEXT: v_lshl_add_u32 v0, v0, 2, 15
+; DYN-NEXT: s_mov_b32 s9, s33
+; DYN-NEXT: v_and_b32_e32 v0, -16, v0
+; DYN-NEXT: s_mov_b64 s[4:5], exec
+; DYN-NEXT: s_mov_b32 s6, 0
+; DYN-NEXT: s_mov_b32 s33, s32
+; DYN-NEXT: s_addk_i32 s32, 0x400
+; DYN-NEXT: .LBB4_1: ; =>This Inner Loop Header: Depth=1
+; DYN-NEXT: s_ff1_i32_b64 s7, s[4:5]
+; DYN-NEXT: v_readlane_b32 s8, v0, s7
+; DYN-NEXT: s_bitset0_b64 s[4:5], s7
+; DYN-NEXT: s_max_u32 s6, s6, s8
+; DYN-NEXT: s_cmp_lg_u64 s[4:5], 0
+; DYN-NEXT: s_cbranch_scc1 .LBB4_1
+; DYN-NEXT: ; %bb.2:
+; DYN-NEXT: s_mov_b32 s4, s32
+; DYN-NEXT: s_lshl_b32 s5, s6, 6
+; DYN-NEXT: s_add_u32 s32, s4, s5
+; DYN-NEXT: v_mov_b32_e32 v0, 0x1c8
+; DYN-NEXT: v_mov_b32_e32 v1, s4
+; DYN-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen
+; DYN-NEXT: s_waitcnt vmcnt(0)
+; DYN-NEXT: s_addk_i32 s32, 0xfc00
+; DYN-NEXT: s_mov_b32 s33, s9
+; DYN-NEXT: s_setpc_b64 s[30:31]
%alloca = alloca i32, i32 %n, addrspace(5)
store volatile i32 456, ptr addrspace(5) %alloca
ret void
}
-; ERR: remark: <unknown>:0:0: cannot select: %{{[0-9]+}}:sreg_32(p5) = G_DYN_STACKALLOC %{{[0-9]+}}:vgpr(s32), 64 (in function: func_dynamic_stackalloc_vgpr_align64)
-; ERR-NEXT: warning: Instruction selection used fallback path for func_dynamic_stackalloc_vgpr_align64
-; ERR-NEXT: error: <unknown>:0:0: in function func_dynamic_stackalloc_vgpr_align64 void (i32): unsupported dynamic alloca
define void @func_dynamic_stackalloc_vgpr_align64(i32 %n) {
+; DYN-LABEL: func_dynamic_stackalloc_vgpr_align64:
+; DYN: ; %bb.0:
+; DYN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; DYN-NEXT: v_lshl_add_u32 v0, v0, 2, 15
+; DYN-NEXT: s_mov_b32 s9, s33
+; DYN-NEXT: s_add_i32 s33, s32, 0xfc0
+; DYN-NEXT: v_and_b32_e32 v0, -16, v0
+; DYN-NEXT: s_mov_b64 s[4:5], exec
+; DYN-NEXT: s_mov_b32 s6, 0
+; DYN-NEXT: s_and_b32 s33, s33, 0xfffff000
+; DYN-NEXT: s_addk_i32 s32, 0x2000
+; DYN-NEXT: .LBB5_1: ; =>This Inner Loop Header: Depth=1
+; DYN-NEXT: s_ff1_i32_b64 s7, s[4:5]
+; DYN-NEXT: v_readlane_b32 s8, v0, s7
+; DYN-NEXT: s_bitset0_b64 s[4:5], s7
+; DYN-NEXT: s_max_u32 s6, s6, s8
+; DYN-NEXT: s_cmp_lg_u64 s[4:5], 0
+; DYN-NEXT: s_cbranch_scc1 .LBB5_1
+; DYN-NEXT: ; %bb.2:
+; DYN-NEXT: s_add_u32 s5, s32, 0xfff
+; DYN-NEXT: s_lshl_b32 s4, s6, 6
+; DYN-NEXT: s_and_b32 s5, s5, 0xfffff000
+; DYN-NEXT: s_add_u32 s32, s5, s4
+; DYN-NEXT: v_mov_b32_e32 v0, 0x1c8
+; DYN-NEXT: v_mov_b32_e32 v1, s5
+; DYN-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen
+; DYN-NEXT: s_waitcnt vmcnt(0)
+; DYN-NEXT: s_addk_i32 s32, 0xe000
+; DYN-NEXT: s_mov_b32 s33, s9
+; DYN-NEXT: s_setpc_b64 s[30:31]
%alloca = alloca i32, i32 %n, align 64, addrspace(5)
store volatile i32 456, ptr addrspace(5) %alloca
ret void
}
+define void @func_dynamic_stackalloc_non_standard_size_i65(i65 %n) {
+; DYN-LABEL: func_dynamic_stackalloc_non_standard_size_i65:
+; DYN: ; %bb.0:
+; DYN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; DYN-NEXT: v_lshl_add_u32 v0, v0, 2, 15
+; DYN-NEXT: s_mov_b32 s9, s33
+; DYN-NEXT: s_add_i32 s33, s32, 0xfc0
+; DYN-NEXT: v_and_b32_e32 v0, -16, v0
+; DYN-NEXT: s_mov_b64 s[4:5], exec
+; DYN-NEXT: s_mov_b32 s6, 0
+; DYN-NEXT: s_and_b32 s33, s33, 0xfffff000
+; DYN-NEXT: s_addk_i32 s32, 0x2000
+; DYN-NEXT: .LBB6_1: ; =>This Inner Loop Header: Depth=1
+; DYN-NEXT: s_ff1_i32_b64 s7, s[4:5]
+; DYN-NEXT: v_readlane_b32 s8, v0, s7
+; DYN-NEXT: s_bitset0_b64 s[4:5], s7
+; DYN-NEXT: s_max_u32 s6, s6, s8
+; DYN-NEXT: s_cmp_lg_u64 s[4:5], 0
+; DYN-NEXT: s_cbranch_scc1 .LBB6_1
+; DYN-NEXT: ; %bb.2:
+; DYN-NEXT: s_add_u32 s5, s32, 0xfff
+; DYN-NEXT: s_lshl_b32 s4, s6, 6
+; DYN-NEXT: s_and_b32 s5, s5, 0xfffff000
+; DYN-NEXT: s_add_u32 s32, s5, s4
+; DYN-NEXT: v_mov_b32_e32 v0, 0x1c8
+; DYN-NEXT: v_mov_b32_e32 v1, s5
+; DYN-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen
+; DYN-NEXT: s_waitcnt vmcnt(0)
+; DYN-NEXT: s_addk_i32 s32, 0xe000
+; DYN-NEXT: s_mov_b32 s33, s9
+; DYN-NEXT: s_setpc_b64 s[30:31]
+ %alloca = alloca i32, i65 %n, align 64, addrspace(5)
+ store volatile i32 456, ptr addrspace(5) %alloca
+ ret void
+}
+
+define void @func_dynamic_stackalloc_non_standard_size_i23(i23 %n) {
+; DYN-LABEL: func_dynamic_stackalloc_non_standard_size_i23:
+; DYN: ; %bb.0:
+; DYN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; DYN-NEXT: v_and_b32_e32 v0, 0x7fffff, v0
+; DYN-NEXT: v_lshl_add_u32 v0, v0, 2, 15
+; DYN-NEXT: s_mov_b32 s9, s33
+; DYN-NEXT: s_add_i32 s33, s32, 0xfc0
+; DYN-NEXT: v_and_b32_e32 v0, -16, v0
+; DYN-NEXT: s_mov_b64 s[4:5], exec
+; DYN-NEXT: s_mov_b32 s6, 0
+; DYN-NEXT: s_and_b32 s33, s33, 0xfffff000
+; DYN-NEXT: s_addk_i32 s32, 0x2000
+; DYN-NEXT: .LBB7_1: ; =>This Inner Loop Header: Depth=1
+; DYN-NEXT: s_ff1_i32_b64 s7, s[4:5]
+; DYN-NEXT: v_readlane_b32 s8, v0, s7
+; DYN-NEXT: s_bitset0_b64 s[4:5], s7
+; DYN-NEXT: s_max_u32 s6, s6, s8
+; DYN-NEXT: s_cmp_lg_u64 s[4:5], 0
+; DYN-NEXT: s_cbranch_scc1 .LBB7_1
+; DYN-NEXT: ; %bb.2:
+; DYN-NEXT: s_add_u32 s5, s32, 0xfff
+; DYN-NEXT: s_lshl_b32 s4, s6, 6
+; DYN-NEXT: s_and_b32 s5, s5, 0xfffff000
+; DYN-NEXT: s_add_u32 s32, s5, s4
+; DYN-NEXT: v_mov_b32_e32 v0, 0x1c8
+; DYN-NEXT: v_mov_b32_e32 v1, s5
+; DYN-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen
+; DYN-NEXT: s_waitcnt vmcnt(0)
+; DYN-NEXT: s_addk_i32 s32, 0xe000
+; DYN-NEXT: s_mov_b32 s33, s9
+; DYN-NEXT: s_setpc_b64 s[30:31]
+ %alloca = alloca i32, i23 %n, align 64, addrspace(5)
+ store volatile i32 456, ptr addrspace(5) %alloca
+ ret void
+}
+
declare i32 @llvm.amdgcn.workitem.id.x() #0
attributes #0 = { nounwind readnone speculatable }
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-dyn-stackalloc.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-dyn-stackalloc.mir
index 5378ce2d1efaad..10517a49e697c5 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-dyn-stackalloc.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-dyn-stackalloc.mir
@@ -491,3 +491,132 @@ body: |
%1:_(p5) = G_DYN_STACKALLOC %0, 32
S_ENDPGM 0, implicit %1
...
+
+---
+name: test_dyn_stackalloc_vgpr_align4
+legalized: true
+frameInfo:
+ maxAlignment: 4
+stack:
+ - { id: 0, type: variable-sized, alignment: 4 }
+body: |
+ bb.0:
+ liveins: $vgpr0
+
+ ; WAVE64-LABEL: name: test_dyn_stackalloc_vgpr_align4
+ ; WAVE64: liveins: $vgpr0
+ ; WAVE64-NEXT: {{ $}}
+ ; WAVE64-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+ ; WAVE64-NEXT: [[INTRINSIC_CONVERGENT:%[0-9]+]]:sgpr(s32) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.wave.reduce.umax), [[COPY]](s32), 0
+ ; WAVE64-NEXT: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 6
+ ; WAVE64-NEXT: [[SHL:%[0-9]+]]:sgpr(s32) = G_SHL [[INTRINSIC_CONVERGENT]], [[C]](s32)
+ ; WAVE64-NEXT: [[COPY1:%[0-9]+]]:sgpr(p5) = COPY $sp_reg
+ ; WAVE64-NEXT: [[COPY2:%[0-9]+]]:sgpr(p5) = COPY [[COPY1]](p5)
+ ; WAVE64-NEXT: [[PTR_ADD:%[0-9]+]]:sgpr(p5) = G_PTR_ADD [[COPY2]], [[SHL]](s32)
+ ; WAVE64-NEXT: $sp_reg = COPY [[PTR_ADD]](p5)
+ ; WAVE64-NEXT: S_ENDPGM 0, implicit [[COPY2]](p5)
+ ;
+ ; WAVE32-LABEL: name: test_dyn_stackalloc_vgpr_align4
+ ; WAVE32: liveins: $vgpr0
+ ; WAVE32-NEXT: {{ $}}
+ ; WAVE32-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+ ; WAVE32-NEXT: [[INTRINSIC_CONVERGENT:%[0-9]+]]:sgpr(s32) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.wave.reduce.umax), [[COPY]](s32), 0
+ ; WAVE32-NEXT: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 5
+ ; WAVE32-NEXT: [[SHL:%[0-9]+]]:sgpr(s32) = G_SHL [[INTRINSIC_CONVERGENT]], [[C]](s32)
+ ; WAVE32-NEXT: [[COPY1:%[0-9]+]]:sgpr(p5) = COPY $sp_reg
+ ; WAVE32-NEXT: [[COPY2:%[0-9]+]]:sgpr(p5) = COPY [[COPY1]](p5)
+ ; WAVE32-NEXT: [[PTR_ADD:%[0-9]+]]:sgpr(p5) = G_PTR_ADD [[COPY2]], [[SHL]](s32)
+ ; WAVE32-NEXT: $sp_reg = COPY [[PTR_ADD]](p5)
+ ; WAVE32-NEXT: S_ENDPGM 0, implicit [[COPY2]](p5)
+ %0:_(s32) = COPY $vgpr0
+ %1:_(p5) = G_DYN_STACKALLOC %0, 4
+ S_ENDPGM 0, implicit %1
+...
+
+---
+name: test_dyn_stackalloc_vgpr_align16
+legalized: true
+frameInfo:
+ maxAlignment: 16
+stack:
+ - { id: 0, type: variable-sized, alignment: 16 }
+body: |
+ bb.0:
+ liveins: $vgpr0
+
+ ; WAVE64-LABEL: name: test_dyn_stackalloc_vgpr_align16
+ ; WAVE64: liveins: $vgpr0
+ ; WAVE64-NEXT: {{ $}}
+ ; WAVE64-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+ ; WAVE64-NEXT: [[INTRINSIC_CONVERGENT:%[0-9]+]]:sgpr(s32) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.wave.reduce.umax), [[COPY]](s32), 0
+ ; WAVE64-NEXT: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 6
+ ; WAVE64-NEXT: [[SHL:%[0-9]+]]:sgpr(s32) = G_SHL [[INTRINSIC_CONVERGENT]], [[C]](s32)
+ ; WAVE64-NEXT: [[COPY1:%[0-9]+]]:sgpr(p5) = COPY $sp_reg
+ ; WAVE64-NEXT: [[COPY2:%[0-9]+]]:sgpr(p5) = COPY [[COPY1]](p5)
+ ; WAVE64-NEXT: [[PTR_ADD:%[0-9]+]]:sgpr(p5) = G_PTR_ADD [[COPY2]], [[SHL]](s32)
+ ; WAVE64-NEXT: $sp_reg = COPY [[PTR_ADD]](p5)
+ ; WAVE64-NEXT: S_ENDPGM 0, implicit [[COPY2]](p5)
+ ;
+ ; WAVE32-LABEL: name: test_dyn_stackalloc_vgpr_align16
+ ; WAVE32: liveins: $vgpr0
+ ; WAVE32-NEXT: {{ $}}
+ ; WAVE32-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+ ; WAVE32-NEXT: [[INTRINSIC_CONVERGENT:%[0-9]+]]:sgpr(s32) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.wave.reduce.umax), [[COPY]](s32), 0
+ ; WAVE32-NEXT: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 5
+ ; WAVE32-NEXT: [[SHL:%[0-9]+]]:sgpr(s32) = G_SHL [[INTRINSIC_CONVERGENT]], [[C]](s32)
+ ; WAVE32-NEXT: [[COPY1:%[0-9]+]]:sgpr(p5) = COPY $sp_reg
+ ; WAVE32-NEXT: [[COPY2:%[0-9]+]]:sgpr(p5) = COPY [[COPY1]](p5)
+ ; WAVE32-NEXT: [[PTR_ADD:%[0-9]+]]:sgpr(p5) = G_PTR_ADD [[COPY2]], [[SHL]](s32)
+ ; WAVE32-NEXT: $sp_reg = COPY [[PTR_ADD]](p5)
+ ; WAVE32-NEXT: S_ENDPGM 0, implicit [[COPY2]](p5)
+ %0:_(s32) = COPY $vgpr0
+ %1:_(p5) = G_DYN_STACKALLOC %0, 16
+ S_ENDPGM 0, implicit %1
+...
+
+---
+name: test_dyn_stackalloc_vgpr_align64
+legalized: true
+frameInfo:
+ maxAlignment: 64
+stack:
+ - { id: 0, type: variable-sized, alignment: 64 }
+body: |
+ bb.0:
+ liveins: $vgpr0
+
+ ; WAVE64-LABEL: name: test_dyn_stackalloc_vgpr_align64
+ ; WAVE64: liveins: $vgpr0
+ ; WAVE64-NEXT: {{ $}}
+ ; WAVE64-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+ ; WAVE64-NEXT: [[INTRINSIC_CONVERGENT:%[0-9]+]]:sgpr(s32) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.wave.reduce.umax), [[COPY]](s32), 0
+ ; WAVE64-NEXT: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 6
+ ; WAVE64-NEXT: [[SHL:%[0-9]+]]:sgpr(s32) = G_SHL [[INTRINSIC_CONVERGENT]], [[C]](s32)
+ ; WAVE64-NEXT: [[COPY1:%[0-9]+]]:sgpr(p5) = COPY $sp_reg
+ ; WAVE64-NEXT: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 4095
+ ; WAVE64-NEXT: [[PTR_ADD:%[0-9]+]]:sgpr(p5) = G_PTR_ADD [[COPY1]], [[C1]](s32)
+ ; WAVE64-NEXT: [[C2:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 -4096
+ ; WAVE64-NEXT: [[PTRMASK:%[0-9]+]]:sgpr(p5) = G_PTRMASK [[PTR_ADD]], [[C2]](s32)
+ ; WAVE64-NEXT: [[PTR_ADD1:%[0-9]+]]:sgpr(p5) = G_PTR_ADD [[PTRMASK]], [[SHL]](s32)
+ ; WAVE64-NEXT: $sp_reg = COPY [[PTR_ADD1]](p5)
+ ; WAVE64-NEXT: S_ENDPGM 0, implicit [[PTRMASK]](p5)
+ ;
+ ; WAVE32-LABEL: name: test_dyn_stackalloc_vgpr_align64
+ ; WAVE32: liveins: $vgpr0
+ ; WAVE32-NEXT: {{ $}}
+ ; WAVE32-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+ ; WAVE32-NEXT: [[INTRINSIC_CONVERGENT:%[0-9]+]]:sgpr(s32) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.wave.reduce.umax), [[COPY]](s32), 0
+ ; WAVE32-NEXT: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 5
+ ; WAVE32-NEXT: [[SHL:%[0-9]+]]:sgpr(s32) = G_SHL [[INTRINSIC_CONVERGENT]], [[C]](s32)
+ ; WAVE32-NEXT: [[COPY1:%[0-9]+]]:sgpr(p5) = COPY $sp_reg
+ ; WAVE32-NEXT: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 2047
+ ; WAVE32-NEXT: [[PTR_ADD:%[0-9]+]]:sgpr(p5) = G_PTR_ADD [[COPY1]], [[C1]](s32)
+ ; WAVE32-NEXT: [[C2:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 -2048
+ ; WAVE32-NEXT: [[PTRMASK:%[0-9]+]]:sgpr(p5) = G_PTRMASK [[PTR_ADD]], [[C2]](s32)
+ ; WAVE32-NEXT: [[PTR_ADD1:%[0-9]+]]:sgpr(p5) = G_PTR_ADD [[PTRMASK]], [[SHL]](s32)
+ ; WAVE32-NEXT: $sp_reg = COPY [[PTR_ADD1]](p5)
+ ; WAVE32-NEXT: S_ENDPGM 0, implicit [[PTRMASK]](p5)
+ %0:_(s32) = COPY $vgpr0
+ %1:_(p5) = G_DYN_STACKALLOC %0, 64
+ S_ENDPGM 0, implicit %1
+...
diff --git a/llvm/test/CodeGen/AMDGPU/dynamic_stackalloc.ll b/llvm/test/CodeGen/AMDGPU/dynamic_stackalloc.ll
index 73aa87e5c55d20..fbf7938ff36af0 100644
--- a/llvm/test/CodeGen/AMDGPU/dynamic_stackalloc.ll
+++ b/llvm/test/CodeGen/AMDGPU/dynamic_stackalloc.ll
@@ -1,64 +1,465 @@
-; RUN: not llc -mtriple=amdgcn-- -mcpu=tahiti -mattr=+promote-alloca -verify-machineinstrs < %s 2>&1 | FileCheck %s
-; RUN: not llc -mtriple=amdgcn-- -mcpu=tahiti -mattr=-promote-alloca -verify-machineinstrs < %s 2>&1 | FileCheck %s
-; RUN: not llc -mtriple=r600-- -mcpu=cypress < %s 2>&1 | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=amdgcn-- -mcpu=gfx900 -mattr=+promote-alloca -verify-machineinstrs < %s 2>&1 | FileCheck -check-prefix=DYN_GFX900 %s
+; RUN: llc -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=-promote-alloca -verify-machineinstrs < %s 2>&1 | FileCheck -check-prefix=DYN_GFX1100 %s
target datalayout = "A5"
-; CHECK: in function test_dynamic_stackalloc{{.*}}: unsupported dynamic alloca
-
define amdgpu_kernel void @test_dynamic_stackalloc_kernel_uniform(i32 %n) {
+; DYN_GFX900-LABEL: test_dynamic_stackalloc_kernel_uniform:
+; DYN_GFX900: ; %bb.0:
+; DYN_GFX900-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
+; DYN_GFX900-NEXT: s_load_dword s0, s[4:5], 0x24
+; DYN_GFX900-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
+; DYN_GFX900-NEXT: s_mov_b32 s14, -1
+; DYN_GFX900-NEXT: s_mov_b32 s15, 0xe00000
+; DYN_GFX900-NEXT: s_add_u32 s12, s12, s11
+; DYN_GFX900-NEXT: s_addc_u32 s13, s13, 0
+; DYN_GFX900-NEXT: s_waitcnt lgkmcnt(0)
+; DYN_GFX900-NEXT: s_lshl_b32 s0, s0, 2
+; DYN_GFX900-NEXT: s_add_i32 s0, s0, 15
+; DYN_GFX900-NEXT: s_movk_i32 s32, 0x400
+; DYN_GFX900-NEXT: s_and_b32 s0, s0, -16
+; DYN_GFX900-NEXT: s_mov_b32 s1, s32
+; DYN_GFX900-NEXT: v_mov_b32_e32 v0, 0x7b
+; DYN_GFX900-NEXT: s_lshl_b32 s0, s0, 6
+; DYN_GFX900-NEXT: s_mov_b32 s33, 0
+; DYN_GFX900-NEXT: s_add_i32 s32, s1, s0
+; DYN_GFX900-NEXT: buffer_store_dword v0, off, s[12:15], s1
+; DYN_GFX900-NEXT: s_waitcnt vmcnt(0)
+; DYN_GFX900-NEXT: s_endpgm
+;
+; DYN_GFX1100-LABEL: test_dynamic_stackalloc_kernel_uniform:
+; DYN_GFX1100: ; %bb.0:
+; DYN_GFX1100-NEXT: s_load_b32 s0, s[4:5], 0x24
+; DYN_GFX1100-NEXT: v_mov_b32_e32 v0, 0x7b
+; DYN_GFX1100-NEXT: s_mov_b32 s32, 16
+; DYN_GFX1100-NEXT: s_mov_b32 s33, 0
+; DYN_GFX1100-NEXT: s_mov_b32 s1, s32
+; DYN_GFX1100-NEXT: scratch_store_b32 off, v0, s1 dlc
+; DYN_GFX1100-NEXT: s_waitcnt_vscnt null, 0x0
+; DYN_GFX1100-NEXT: s_waitcnt lgkmcnt(0)
+; DYN_GFX1100-NEXT: s_lshl_b32 s0, s0, 2
+; DYN_GFX1100-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; DYN_GFX1100-NEXT: s_add_i32 s0, s0, 15
+; DYN_GFX1100-NEXT: s_and_b32 s0, s0, -16
+; DYN_GFX1100-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; DYN_GFX1100-NEXT: s_lshl_b32 s0, s0, 5
+; DYN_GFX1100-NEXT: s_add_i32 s32, s1, s0
+; DYN_GFX1100-NEXT: s_endpgm
%alloca = alloca i32, i32 %n, addrspace(5)
store volatile i32 123, ptr addrspace(5) %alloca
ret void
}
-; CHECK: in function test_dynamic_stackalloc{{.*}}: unsupported dynamic alloca
-
define amdgpu_kernel void @test_dynamic_stackalloc_kernel_uniform_over_aligned(i32 %n) {
+; DYN_GFX900-LABEL: test_dynamic_stackalloc_kernel_uniform_over_aligned:
+; DYN_GFX900: ; %bb.0:
+; DYN_GFX900-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
+; DYN_GFX900-NEXT: s_load_dword s0, s[4:5], 0x24
+; DYN_GFX900-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
+; DYN_GFX900-NEXT: s_mov_b32 s14, -1
+; DYN_GFX900-NEXT: s_mov_b32 s15, 0xe00000
+; DYN_GFX900-NEXT: s_add_u32 s12, s12, s11
+; DYN_GFX900-NEXT: s_movk_i32 s32, 0x2000
+; DYN_GFX900-NEXT: s_addc_u32 s13, s13, 0
+; DYN_GFX900-NEXT: s_waitcnt lgkmcnt(0)
+; DYN_GFX900-NEXT: s_lshl_b32 s0, s0, 2
+; DYN_GFX900-NEXT: s_add_i32 s1, s32, 0x1fff
+; DYN_GFX900-NEXT: s_add_i32 s0, s0, 15
+; DYN_GFX900-NEXT: s_and_b32 s1, s1, 0xffffe000
+; DYN_GFX900-NEXT: s_and_b32 s0, s0, -16
+; DYN_GFX900-NEXT: v_mov_b32_e32 v0, 10
+; DYN_GFX900-NEXT: s_lshl_b32 s0, s0, 6
+; DYN_GFX900-NEXT: v_mov_b32_e32 v1, s1
+; DYN_GFX900-NEXT: s_mov_b32 s33, 0
+; DYN_GFX900-NEXT: s_add_i32 s32, s1, s0
+; DYN_GFX900-NEXT: buffer_store_dword v0, v1, s[12:15], 0 offen
+; DYN_GFX900-NEXT: s_waitcnt vmcnt(0)
+; DYN_GFX900-NEXT: s_endpgm
+;
+; DYN_GFX1100-LABEL: test_dynamic_stackalloc_kernel_uniform_over_aligned:
+; DYN_GFX1100: ; %bb.0:
+; DYN_GFX1100-NEXT: s_load_b32 s0, s[4:5], 0x24
+; DYN_GFX1100-NEXT: s_movk_i32 s32, 0x80
+; DYN_GFX1100-NEXT: v_mov_b32_e32 v0, 10
+; DYN_GFX1100-NEXT: s_add_i32 s1, s32, 0xfff
+; DYN_GFX1100-NEXT: s_mov_b32 s33, 0
+; DYN_GFX1100-NEXT: s_and_b32 s1, s1, 0xfffff000
+; DYN_GFX1100-NEXT: scratch_store_b32 off, v0, s1 dlc
+; DYN_GFX1100-NEXT: s_waitcnt_vscnt null, 0x0
+; DYN_GFX1100-NEXT: s_waitcnt lgkmcnt(0)
+; DYN_GFX1100-NEXT: s_lshl_b32 s0, s0, 2
+; DYN_GFX1100-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; DYN_GFX1100-NEXT: s_add_i32 s0, s0, 15
+; DYN_GFX1100-NEXT: s_and_b32 s0, s0, -16
+; DYN_GFX1100-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; DYN_GFX1100-NEXT: s_lshl_b32 s0, s0, 5
+; DYN_GFX1100-NEXT: s_add_i32 s32, s1, s0
+; DYN_GFX1100-NEXT: s_endpgm
%alloca = alloca i32, i32 %n, align 128, addrspace(5)
store volatile i32 10, ptr addrspace(5) %alloca
ret void
}
-; CHECK: in function test_dynamic_stackalloc{{.*}}: unsupported dynamic alloca
-
define amdgpu_kernel void @test_dynamic_stackalloc_kernel_uniform_under_aligned(i32 %n) {
+; DYN_GFX900-LABEL: test_dynamic_stackalloc_kernel_uniform_under_aligned:
+; DYN_GFX900: ; %bb.0:
+; DYN_GFX900-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
+; DYN_GFX900-NEXT: s_load_dword s0, s[4:5], 0x24
+; DYN_GFX900-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
+; DYN_GFX900-NEXT: s_mov_b32 s14, -1
+; DYN_GFX900-NEXT: s_mov_b32 s15, 0xe00000
+; DYN_GFX900-NEXT: s_add_u32 s12, s12, s11
+; DYN_GFX900-NEXT: s_addc_u32 s13, s13, 0
+; DYN_GFX900-NEXT: s_waitcnt lgkmcnt(0)
+; DYN_GFX900-NEXT: s_lshl_b32 s0, s0, 2
+; DYN_GFX900-NEXT: s_add_i32 s0, s0, 15
+; DYN_GFX900-NEXT: s_movk_i32 s32, 0x400
+; DYN_GFX900-NEXT: s_and_b32 s0, s0, -16
+; DYN_GFX900-NEXT: s_mov_b32 s1, s32
+; DYN_GFX900-NEXT: v_mov_b32_e32 v0, 22
+; DYN_GFX900-NEXT: s_lshl_b32 s0, s0, 6
+; DYN_GFX900-NEXT: s_mov_b32 s33, 0
+; DYN_GFX900-NEXT: s_add_i32 s32, s1, s0
+; DYN_GFX900-NEXT: buffer_store_dword v0, off, s[12:15], s1
+; DYN_GFX900-NEXT: s_waitcnt vmcnt(0)
+; DYN_GFX900-NEXT: s_endpgm
+;
+; DYN_GFX1100-LABEL: test_dynamic_stackalloc_kernel_uniform_under_aligned:
+; DYN_GFX1100: ; %bb.0:
+; DYN_GFX1100-NEXT: s_load_b32 s0, s[4:5], 0x24
+; DYN_GFX1100-NEXT: v_mov_b32_e32 v0, 22
+; DYN_GFX1100-NEXT: s_mov_b32 s32, 16
+; DYN_GFX1100-NEXT: s_mov_b32 s33, 0
+; DYN_GFX1100-NEXT: s_mov_b32 s1, s32
+; DYN_GFX1100-NEXT: scratch_store_b32 off, v0, s1 dlc
+; DYN_GFX1100-NEXT: s_waitcnt_vscnt null, 0x0
+; DYN_GFX1100-NEXT: s_waitcnt lgkmcnt(0)
+; DYN_GFX1100-NEXT: s_lshl_b32 s0, s0, 2
+; DYN_GFX1100-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; DYN_GFX1100-NEXT: s_add_i32 s0, s0, 15
+; DYN_GFX1100-NEXT: s_and_b32 s0, s0, -16
+; DYN_GFX1100-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; DYN_GFX1100-NEXT: s_lshl_b32 s0, s0, 5
+; DYN_GFX1100-NEXT: s_add_i32 s32, s1, s0
+; DYN_GFX1100-NEXT: s_endpgm
%alloca = alloca i32, i32 %n, align 2, addrspace(5)
store volatile i32 22, ptr addrspace(5) %alloca
ret void
}
-; CHECK: in function test_dynamic_stackalloc{{.*}}: unsupported dynamic alloca
-
define amdgpu_kernel void @test_dynamic_stackalloc_kernel_divergent() {
+; DYN_GFX900-LABEL: test_dynamic_stackalloc_kernel_divergent:
+; DYN_GFX900: ; %bb.0:
+; DYN_GFX900-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
+; DYN_GFX900-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
+; DYN_GFX900-NEXT: s_mov_b32 s14, -1
+; DYN_GFX900-NEXT: s_mov_b32 s15, 0xe00000
+; DYN_GFX900-NEXT: s_add_u32 s12, s12, s11
+; DYN_GFX900-NEXT: v_lshl_add_u32 v0, v0, 2, 15
+; DYN_GFX900-NEXT: s_addc_u32 s13, s13, 0
+; DYN_GFX900-NEXT: v_and_b32_e32 v0, 0x1ff0, v0
+; DYN_GFX900-NEXT: s_mov_b64 s[0:1], exec
+; DYN_GFX900-NEXT: s_mov_b32 s2, 0
+; DYN_GFX900-NEXT: s_mov_b32 s33, 0
+; DYN_GFX900-NEXT: s_movk_i32 s32, 0x400
+; DYN_GFX900-NEXT: .LBB3_1: ; =>This Inner Loop Header: Depth=1
+; DYN_GFX900-NEXT: s_ff1_i32_b64 s3, s[0:1]
+; DYN_GFX900-NEXT: v_readlane_b32 s4, v0, s3
+; DYN_GFX900-NEXT: s_bitset0_b64 s[0:1], s3
+; DYN_GFX900-NEXT: s_max_u32 s2, s2, s4
+; DYN_GFX900-NEXT: s_cmp_lg_u64 s[0:1], 0
+; DYN_GFX900-NEXT: s_cbranch_scc1 .LBB3_1
+; DYN_GFX900-NEXT: ; %bb.2:
+; DYN_GFX900-NEXT: s_mov_b32 s0, s32
+; DYN_GFX900-NEXT: v_mov_b32_e32 v0, s0
+; DYN_GFX900-NEXT: v_lshl_add_u32 v0, s2, 6, v0
+; DYN_GFX900-NEXT: v_readfirstlane_b32 s32, v0
+; DYN_GFX900-NEXT: v_mov_b32_e32 v0, 0x7b
+; DYN_GFX900-NEXT: buffer_store_dword v0, off, s[12:15], s0
+; DYN_GFX900-NEXT: s_waitcnt vmcnt(0)
+; DYN_GFX900-NEXT: s_endpgm
+;
+; DYN_GFX1100-LABEL: test_dynamic_stackalloc_kernel_divergent:
+; DYN_GFX1100: ; %bb.0:
+; DYN_GFX1100-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; DYN_GFX1100-NEXT: s_mov_b32 s1, exec_lo
+; DYN_GFX1100-NEXT: s_mov_b32 s0, 0
+; DYN_GFX1100-NEXT: s_mov_b32 s33, 0
+; DYN_GFX1100-NEXT: s_mov_b32 s32, 16
+; DYN_GFX1100-NEXT: v_lshl_add_u32 v0, v0, 2, 15
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; DYN_GFX1100-NEXT: v_and_b32_e32 v0, 0x1ff0, v0
+; DYN_GFX1100-NEXT: .LBB3_1: ; =>This Inner Loop Header: Depth=1
+; DYN_GFX1100-NEXT: s_ctz_i32_b32 s2, s1
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; DYN_GFX1100-NEXT: v_readlane_b32 s3, v0, s2
+; DYN_GFX1100-NEXT: s_bitset0_b32 s1, s2
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; DYN_GFX1100-NEXT: s_max_u32 s0, s0, s3
+; DYN_GFX1100-NEXT: s_cmp_lg_u32 s1, 0
+; DYN_GFX1100-NEXT: s_cbranch_scc1 .LBB3_1
+; DYN_GFX1100-NEXT: ; %bb.2:
+; DYN_GFX1100-NEXT: s_mov_b32 s1, s32
+; DYN_GFX1100-NEXT: v_mov_b32_e32 v1, 0x7b
+; DYN_GFX1100-NEXT: v_lshl_add_u32 v0, s0, 5, s1
+; DYN_GFX1100-NEXT: scratch_store_b32 off, v1, s1 dlc
+; DYN_GFX1100-NEXT: s_waitcnt_vscnt null, 0x0
+; DYN_GFX1100-NEXT: v_readfirstlane_b32 s32, v0
+; DYN_GFX1100-NEXT: s_endpgm
%idx = call i32 @llvm.amdgcn.workitem.id.x()
%alloca = alloca float, i32 %idx, addrspace(5)
store volatile i32 123, ptr addrspace(5) %alloca
ret void
}
-; CHECK: in function test_dynamic_stackalloc{{.*}}: unsupported dynamic alloca
-
define amdgpu_kernel void @test_dynamic_stackalloc_kernel_divergent_over_aligned() {
+; DYN_GFX900-LABEL: test_dynamic_stackalloc_kernel_divergent_over_aligned:
+; DYN_GFX900: ; %bb.0:
+; DYN_GFX900-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
+; DYN_GFX900-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
+; DYN_GFX900-NEXT: s_mov_b32 s14, -1
+; DYN_GFX900-NEXT: s_mov_b32 s15, 0xe00000
+; DYN_GFX900-NEXT: s_add_u32 s12, s12, s11
+; DYN_GFX900-NEXT: v_lshl_add_u32 v0, v0, 2, 15
+; DYN_GFX900-NEXT: s_addc_u32 s13, s13, 0
+; DYN_GFX900-NEXT: v_and_b32_e32 v0, 0x1ff0, v0
+; DYN_GFX900-NEXT: s_mov_b64 s[0:1], exec
+; DYN_GFX900-NEXT: s_mov_b32 s2, 0
+; DYN_GFX900-NEXT: s_mov_b32 s33, 0
+; DYN_GFX900-NEXT: s_movk_i32 s32, 0x2000
+; DYN_GFX900-NEXT: .LBB4_1: ; =>This Inner Loop Header: Depth=1
+; DYN_GFX900-NEXT: s_ff1_i32_b64 s3, s[0:1]
+; DYN_GFX900-NEXT: v_readlane_b32 s4, v0, s3
+; DYN_GFX900-NEXT: s_bitset0_b64 s[0:1], s3
+; DYN_GFX900-NEXT: s_max_u32 s2, s2, s4
+; DYN_GFX900-NEXT: s_cmp_lg_u64 s[0:1], 0
+; DYN_GFX900-NEXT: s_cbranch_scc1 .LBB4_1
+; DYN_GFX900-NEXT: ; %bb.2:
+; DYN_GFX900-NEXT: s_add_i32 s0, s32, 0x1fff
+; DYN_GFX900-NEXT: s_and_b32 s0, s0, 0xffffe000
+; DYN_GFX900-NEXT: v_mov_b32_e32 v0, s0
+; DYN_GFX900-NEXT: v_lshl_add_u32 v1, s2, 6, v0
+; DYN_GFX900-NEXT: v_readfirstlane_b32 s32, v1
+; DYN_GFX900-NEXT: v_mov_b32_e32 v1, 0x1bc
+; DYN_GFX900-NEXT: buffer_store_dword v1, v0, s[12:15], 0 offen
+; DYN_GFX900-NEXT: s_waitcnt vmcnt(0)
+; DYN_GFX900-NEXT: s_endpgm
+;
+; DYN_GFX1100-LABEL: test_dynamic_stackalloc_kernel_divergent_over_aligned:
+; DYN_GFX1100: ; %bb.0:
+; DYN_GFX1100-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; DYN_GFX1100-NEXT: s_movk_i32 s32, 0x80
+; DYN_GFX1100-NEXT: s_mov_b32 s2, exec_lo
+; DYN_GFX1100-NEXT: s_add_i32 s0, s32, 0xfff
+; DYN_GFX1100-NEXT: s_mov_b32 s1, 0
+; DYN_GFX1100-NEXT: v_lshl_add_u32 v0, v0, 2, 15
+; DYN_GFX1100-NEXT: s_and_b32 s0, s0, 0xfffff000
+; DYN_GFX1100-NEXT: s_mov_b32 s33, 0
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; DYN_GFX1100-NEXT: v_and_b32_e32 v0, 0x1ff0, v0
+; DYN_GFX1100-NEXT: .LBB4_1: ; =>This Inner Loop Header: Depth=1
+; DYN_GFX1100-NEXT: s_ctz_i32_b32 s3, s2
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; DYN_GFX1100-NEXT: v_readlane_b32 s4, v0, s3
+; DYN_GFX1100-NEXT: s_bitset0_b32 s2, s3
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; DYN_GFX1100-NEXT: s_max_u32 s1, s1, s4
+; DYN_GFX1100-NEXT: s_cmp_lg_u32 s2, 0
+; DYN_GFX1100-NEXT: s_cbranch_scc1 .LBB4_1
+; DYN_GFX1100-NEXT: ; %bb.2:
+; DYN_GFX1100-NEXT: v_lshl_add_u32 v0, s1, 5, s0
+; DYN_GFX1100-NEXT: v_mov_b32_e32 v1, 0x1bc
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; DYN_GFX1100-NEXT: v_readfirstlane_b32 s32, v0
+; DYN_GFX1100-NEXT: scratch_store_b32 off, v1, s0 dlc
+; DYN_GFX1100-NEXT: s_waitcnt_vscnt null, 0x0
+; DYN_GFX1100-NEXT: s_endpgm
%idx = call i32 @llvm.amdgcn.workitem.id.x()
%alloca = alloca i32, i32 %idx, align 128, addrspace(5)
store volatile i32 444, ptr addrspace(5) %alloca
ret void
}
-; CHECK: in function test_dynamic_stackalloc{{.*}}: unsupported dynamic alloca
-
define amdgpu_kernel void @test_dynamic_stackalloc_kernel_divergent_under_aligned() {
+; DYN_GFX900-LABEL: test_dynamic_stackalloc_kernel_divergent_under_aligned:
+; DYN_GFX900: ; %bb.0:
+; DYN_GFX900-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
+; DYN_GFX900-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
+; DYN_GFX900-NEXT: s_mov_b32 s14, -1
+; DYN_GFX900-NEXT: s_mov_b32 s15, 0xe00000
+; DYN_GFX900-NEXT: s_add_u32 s12, s12, s11
+; DYN_GFX900-NEXT: s_addc_u32 s13, s13, 0
+; DYN_GFX900-NEXT: v_lshlrev_b32_e32 v0, 4, v0
+; DYN_GFX900-NEXT: s_mov_b64 s[0:1], exec
+; DYN_GFX900-NEXT: s_mov_b32 s2, 0
+; DYN_GFX900-NEXT: s_mov_b32 s33, 0
+; DYN_GFX900-NEXT: s_movk_i32 s32, 0x400
+; DYN_GFX900-NEXT: .LBB5_1: ; =>This Inner Loop Header: Depth=1
+; DYN_GFX900-NEXT: s_ff1_i32_b64 s3, s[0:1]
+; DYN_GFX900-NEXT: v_readlane_b32 s4, v0, s3
+; DYN_GFX900-NEXT: s_bitset0_b64 s[0:1], s3
+; DYN_GFX900-NEXT: s_max_u32 s2, s2, s4
+; DYN_GFX900-NEXT: s_cmp_lg_u64 s[0:1], 0
+; DYN_GFX900-NEXT: s_cbranch_scc1 .LBB5_1
+; DYN_GFX900-NEXT: ; %bb.2:
+; DYN_GFX900-NEXT: s_mov_b32 s0, s32
+; DYN_GFX900-NEXT: v_mov_b32_e32 v0, s0
+; DYN_GFX900-NEXT: v_lshl_add_u32 v0, s2, 6, v0
+; DYN_GFX900-NEXT: v_readfirstlane_b32 s32, v0
+; DYN_GFX900-NEXT: v_mov_b32_e32 v0, 0x29a
+; DYN_GFX900-NEXT: buffer_store_dword v0, off, s[12:15], s0
+; DYN_GFX900-NEXT: s_waitcnt vmcnt(0)
+; DYN_GFX900-NEXT: s_endpgm
+;
+; DYN_GFX1100-LABEL: test_dynamic_stackalloc_kernel_divergent_under_aligned:
+; DYN_GFX1100: ; %bb.0:
+; DYN_GFX1100-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; DYN_GFX1100-NEXT: s_mov_b32 s1, exec_lo
+; DYN_GFX1100-NEXT: s_mov_b32 s0, 0
+; DYN_GFX1100-NEXT: s_mov_b32 s33, 0
+; DYN_GFX1100-NEXT: s_mov_b32 s32, 16
+; DYN_GFX1100-NEXT: v_lshlrev_b32_e32 v0, 4, v0
+; DYN_GFX1100-NEXT: .LBB5_1: ; =>This Inner Loop Header: Depth=1
+; DYN_GFX1100-NEXT: s_ctz_i32_b32 s2, s1
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; DYN_GFX1100-NEXT: v_readlane_b32 s3, v0, s2
+; DYN_GFX1100-NEXT: s_bitset0_b32 s1, s2
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; DYN_GFX1100-NEXT: s_max_u32 s0, s0, s3
+; DYN_GFX1100-NEXT: s_cmp_lg_u32 s1, 0
+; DYN_GFX1100-NEXT: s_cbranch_scc1 .LBB5_1
+; DYN_GFX1100-NEXT: ; %bb.2:
+; DYN_GFX1100-NEXT: s_mov_b32 s1, s32
+; DYN_GFX1100-NEXT: v_mov_b32_e32 v1, 0x29a
+; DYN_GFX1100-NEXT: v_lshl_add_u32 v0, s0, 5, s1
+; DYN_GFX1100-NEXT: scratch_store_b32 off, v1, s1 dlc
+; DYN_GFX1100-NEXT: s_waitcnt_vscnt null, 0x0
+; DYN_GFX1100-NEXT: v_readfirstlane_b32 s32, v0
+; DYN_GFX1100-NEXT: s_endpgm
%idx = call i32 @llvm.amdgcn.workitem.id.x()
%alloca = alloca i128, i32 %idx, align 2, addrspace(5)
store volatile i32 666, ptr addrspace(5) %alloca
ret void
}
-; CHECK: in function test_dynamic_stackalloc{{.*}}: unsupported dynamic alloca
-; CHECK: in function test_dynamic_stackalloc{{.*}}: unsupported dynamic alloca
-; CHECK: in function test_dynamic_stackalloc{{.*}}: unsupported dynamic alloca
-
define amdgpu_kernel void @test_dynamic_stackalloc_kernel_multiple_allocas(i32 %n, i32 %m) {
+; DYN_GFX900-LABEL: test_dynamic_stackalloc_kernel_multiple_allocas:
+; DYN_GFX900: ; %bb.0: ; %entry
+; DYN_GFX900-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
+; DYN_GFX900-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
+; DYN_GFX900-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; DYN_GFX900-NEXT: s_mov_b32 s14, -1
+; DYN_GFX900-NEXT: s_mov_b32 s15, 0xe00000
+; DYN_GFX900-NEXT: s_add_u32 s12, s12, s11
+; DYN_GFX900-NEXT: s_addc_u32 s13, s13, 0
+; DYN_GFX900-NEXT: s_waitcnt lgkmcnt(0)
+; DYN_GFX900-NEXT: s_cmp_lg_u32 s0, 0
+; DYN_GFX900-NEXT: s_mov_b32 s4, 0
+; DYN_GFX900-NEXT: s_mov_b32 s33, 0
+; DYN_GFX900-NEXT: s_movk_i32 s32, 0x2000
+; DYN_GFX900-NEXT: s_cbranch_scc1 .LBB6_4
+; DYN_GFX900-NEXT: ; %bb.1: ; %bb.0
+; DYN_GFX900-NEXT: s_lshl_b32 s1, s1, 2
+; DYN_GFX900-NEXT: s_add_i32 s1, s1, 15
+; DYN_GFX900-NEXT: s_add_i32 s2, s32, 0xfff
+; DYN_GFX900-NEXT: s_and_b32 s1, s1, -16
+; DYN_GFX900-NEXT: v_lshl_add_u32 v0, v0, 2, 15
+; DYN_GFX900-NEXT: s_and_b32 s5, s2, 0xfffff000
+; DYN_GFX900-NEXT: s_lshl_b32 s1, s1, 6
+; DYN_GFX900-NEXT: v_and_b32_e32 v0, 0x1ff0, v0
+; DYN_GFX900-NEXT: s_mov_b64 s[2:3], exec
+; DYN_GFX900-NEXT: s_add_i32 s32, s5, s1
+; DYN_GFX900-NEXT: .LBB6_2: ; =>This Inner Loop Header: Depth=1
+; DYN_GFX900-NEXT: s_ff1_i32_b64 s1, s[2:3]
+; DYN_GFX900-NEXT: v_readlane_b32 s6, v0, s1
+; DYN_GFX900-NEXT: s_bitset0_b64 s[2:3], s1
+; DYN_GFX900-NEXT: s_max_u32 s4, s4, s6
+; DYN_GFX900-NEXT: s_cmp_lg_u64 s[2:3], 0
+; DYN_GFX900-NEXT: s_cbranch_scc1 .LBB6_2
+; DYN_GFX900-NEXT: ; %bb.3:
+; DYN_GFX900-NEXT: s_mov_b32 s1, s32
+; DYN_GFX900-NEXT: v_mov_b32_e32 v0, s1
+; DYN_GFX900-NEXT: v_lshl_add_u32 v0, s4, 6, v0
+; DYN_GFX900-NEXT: v_readfirstlane_b32 s32, v0
+; DYN_GFX900-NEXT: v_mov_b32_e32 v0, 3
+; DYN_GFX900-NEXT: v_mov_b32_e32 v1, s5
+; DYN_GFX900-NEXT: buffer_store_dword v0, v1, s[12:15], 0 offen
+; DYN_GFX900-NEXT: s_waitcnt vmcnt(0)
+; DYN_GFX900-NEXT: v_mov_b32_e32 v0, 4
+; DYN_GFX900-NEXT: buffer_store_dword v0, off, s[12:15], s1
+; DYN_GFX900-NEXT: s_waitcnt vmcnt(0)
+; DYN_GFX900-NEXT: .LBB6_4: ; %bb.1
+; DYN_GFX900-NEXT: s_lshl_b32 s0, s0, 2
+; DYN_GFX900-NEXT: s_add_i32 s0, s0, 15
+; DYN_GFX900-NEXT: s_and_b32 s0, s0, -16
+; DYN_GFX900-NEXT: v_mov_b32_e32 v0, 1
+; DYN_GFX900-NEXT: s_lshl_b32 s0, s0, 6
+; DYN_GFX900-NEXT: s_mov_b32 s1, s32
+; DYN_GFX900-NEXT: buffer_store_dword v0, off, s[12:15], s33
+; DYN_GFX900-NEXT: s_waitcnt vmcnt(0)
+; DYN_GFX900-NEXT: v_mov_b32_e32 v0, 2
+; DYN_GFX900-NEXT: s_add_i32 s32, s1, s0
+; DYN_GFX900-NEXT: buffer_store_dword v0, off, s[12:15], s1
+; DYN_GFX900-NEXT: s_waitcnt vmcnt(0)
+; DYN_GFX900-NEXT: s_endpgm
+;
+; DYN_GFX1100-LABEL: test_dynamic_stackalloc_kernel_multiple_allocas:
+; DYN_GFX1100: ; %bb.0: ; %entry
+; DYN_GFX1100-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; DYN_GFX1100-NEXT: s_mov_b32 s2, 0
+; DYN_GFX1100-NEXT: s_mov_b32 s33, 0
+; DYN_GFX1100-NEXT: s_movk_i32 s32, 0x80
+; DYN_GFX1100-NEXT: s_waitcnt lgkmcnt(0)
+; DYN_GFX1100-NEXT: s_cmp_lg_u32 s0, 0
+; DYN_GFX1100-NEXT: s_cbranch_scc1 .LBB6_4
+; DYN_GFX1100-NEXT: ; %bb.1: ; %bb.0
+; DYN_GFX1100-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; DYN_GFX1100-NEXT: s_lshl_b32 s1, s1, 2
+; DYN_GFX1100-NEXT: s_add_i32 s3, s32, 0x7ff
+; DYN_GFX1100-NEXT: s_add_i32 s1, s1, 15
+; DYN_GFX1100-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(SALU_CYCLE_1)
+; DYN_GFX1100-NEXT: s_and_b32 s4, s1, -16
+; DYN_GFX1100-NEXT: v_lshl_add_u32 v0, v0, 2, 15
+; DYN_GFX1100-NEXT: s_and_b32 s1, s3, 0xfffff800
+; DYN_GFX1100-NEXT: s_lshl_b32 s3, s4, 5
+; DYN_GFX1100-NEXT: s_add_i32 s32, s1, s3
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; DYN_GFX1100-NEXT: v_and_b32_e32 v0, 0x1ff0, v0
+; DYN_GFX1100-NEXT: s_mov_b32 s3, exec_lo
+; DYN_GFX1100-NEXT: .LBB6_2: ; =>This Inner Loop Header: Depth=1
+; DYN_GFX1100-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; DYN_GFX1100-NEXT: s_ctz_i32_b32 s4, s3
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; DYN_GFX1100-NEXT: v_readlane_b32 s5, v0, s4
+; DYN_GFX1100-NEXT: s_bitset0_b32 s3, s4
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; DYN_GFX1100-NEXT: s_max_u32 s2, s2, s5
+; DYN_GFX1100-NEXT: s_cmp_lg_u32 s3, 0
+; DYN_GFX1100-NEXT: s_cbranch_scc1 .LBB6_2
+; DYN_GFX1100-NEXT: ; %bb.3:
+; DYN_GFX1100-NEXT: s_mov_b32 s3, s32
+; DYN_GFX1100-NEXT: v_dual_mov_b32 v1, 3 :: v_dual_mov_b32 v2, 4
+; DYN_GFX1100-NEXT: v_lshl_add_u32 v0, s2, 5, s3
+; DYN_GFX1100-NEXT: scratch_store_b32 off, v1, s1 dlc
+; DYN_GFX1100-NEXT: s_waitcnt_vscnt null, 0x0
+; DYN_GFX1100-NEXT: scratch_store_b32 off, v2, s3 dlc
+; DYN_GFX1100-NEXT: s_waitcnt_vscnt null, 0x0
+; DYN_GFX1100-NEXT: v_readfirstlane_b32 s32, v0
+; DYN_GFX1100-NEXT: .LBB6_4: ; %bb.1
+; DYN_GFX1100-NEXT: s_lshl_b32 s0, s0, 2
+; DYN_GFX1100-NEXT: v_dual_mov_b32 v0, 1 :: v_dual_mov_b32 v1, 2
+; DYN_GFX1100-NEXT: s_add_i32 s0, s0, 15
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; DYN_GFX1100-NEXT: s_mov_b32 s1, s32
+; DYN_GFX1100-NEXT: s_and_b32 s0, s0, -16
+; DYN_GFX1100-NEXT: scratch_store_b32 off, v0, s33 dlc
+; DYN_GFX1100-NEXT: s_waitcnt_vscnt null, 0x0
+; DYN_GFX1100-NEXT: scratch_store_b32 off, v1, s1 dlc
+; DYN_GFX1100-NEXT: s_waitcnt_vscnt null, 0x0
+; DYN_GFX1100-NEXT: s_lshl_b32 s0, s0, 5
+; DYN_GFX1100-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; DYN_GFX1100-NEXT: s_add_i32 s32, s1, s0
+; DYN_GFX1100-NEXT: s_endpgm
entry:
%cond = icmp eq i32 %n, 0
%alloca1 = alloca i32, i32 8, addrspace(5)
@@ -77,10 +478,106 @@ bb.1:
ret void
}
-; CHECK: in function test_dynamic_stackalloc{{.*}}: unsupported dynamic alloca
-; CHECK: in function test_dynamic_stackalloc{{.*}}: unsupported dynamic alloca
-
define amdgpu_kernel void @test_dynamic_stackalloc_kernel_control_flow(i32 %n, i32 %m) {
+; DYN_GFX900-LABEL: test_dynamic_stackalloc_kernel_control_flow:
+; DYN_GFX900: ; %bb.0: ; %entry
+; DYN_GFX900-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
+; DYN_GFX900-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
+; DYN_GFX900-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; DYN_GFX900-NEXT: s_mov_b32 s14, -1
+; DYN_GFX900-NEXT: s_mov_b32 s15, 0xe00000
+; DYN_GFX900-NEXT: s_add_u32 s12, s12, s11
+; DYN_GFX900-NEXT: s_addc_u32 s13, s13, 0
+; DYN_GFX900-NEXT: s_waitcnt lgkmcnt(0)
+; DYN_GFX900-NEXT: s_cmp_lg_u32 s0, 0
+; DYN_GFX900-NEXT: s_mov_b32 s0, 0
+; DYN_GFX900-NEXT: s_mov_b32 s33, 0
+; DYN_GFX900-NEXT: s_movk_i32 s32, 0x1000
+; DYN_GFX900-NEXT: s_cbranch_scc0 .LBB7_6
+; DYN_GFX900-NEXT: ; %bb.1: ; %bb.1
+; DYN_GFX900-NEXT: v_lshl_add_u32 v0, v0, 2, 15
+; DYN_GFX900-NEXT: v_and_b32_e32 v0, 0x1ff0, v0
+; DYN_GFX900-NEXT: s_mov_b64 s[2:3], exec
+; DYN_GFX900-NEXT: .LBB7_2: ; =>This Inner Loop Header: Depth=1
+; DYN_GFX900-NEXT: s_ff1_i32_b64 s4, s[2:3]
+; DYN_GFX900-NEXT: v_readlane_b32 s5, v0, s4
+; DYN_GFX900-NEXT: s_bitset0_b64 s[2:3], s4
+; DYN_GFX900-NEXT: s_max_u32 s0, s0, s5
+; DYN_GFX900-NEXT: s_cmp_lg_u64 s[2:3], 0
+; DYN_GFX900-NEXT: s_cbranch_scc1 .LBB7_2
+; DYN_GFX900-NEXT: ; %bb.3:
+; DYN_GFX900-NEXT: s_mov_b32 s2, s32
+; DYN_GFX900-NEXT: v_mov_b32_e32 v0, s2
+; DYN_GFX900-NEXT: v_lshl_add_u32 v0, s0, 6, v0
+; DYN_GFX900-NEXT: v_readfirstlane_b32 s32, v0
+; DYN_GFX900-NEXT: v_mov_b32_e32 v0, 1
+; DYN_GFX900-NEXT: buffer_store_dword v0, off, s[12:15], s2
+; DYN_GFX900-NEXT: s_waitcnt vmcnt(0)
+; DYN_GFX900-NEXT: s_cbranch_execnz .LBB7_5
+; DYN_GFX900-NEXT: .LBB7_4: ; %bb.0
+; DYN_GFX900-NEXT: s_lshl_b32 s1, s1, 2
+; DYN_GFX900-NEXT: s_add_i32 s0, s32, 0xfff
+; DYN_GFX900-NEXT: s_add_i32 s1, s1, 15
+; DYN_GFX900-NEXT: s_and_b32 s0, s0, 0xfffff000
+; DYN_GFX900-NEXT: s_and_b32 s1, s1, -16
+; DYN_GFX900-NEXT: s_lshl_b32 s1, s1, 6
+; DYN_GFX900-NEXT: v_mov_b32_e32 v0, 2
+; DYN_GFX900-NEXT: v_mov_b32_e32 v1, s0
+; DYN_GFX900-NEXT: s_add_i32 s32, s0, s1
+; DYN_GFX900-NEXT: buffer_store_dword v0, v1, s[12:15], 0 offen
+; DYN_GFX900-NEXT: s_waitcnt vmcnt(0)
+; DYN_GFX900-NEXT: .LBB7_5: ; %bb.2
+; DYN_GFX900-NEXT: s_endpgm
+; DYN_GFX900-NEXT: .LBB7_6:
+; DYN_GFX900-NEXT: s_branch .LBB7_4
+;
+; DYN_GFX1100-LABEL: test_dynamic_stackalloc_kernel_control_flow:
+; DYN_GFX1100: ; %bb.0: ; %entry
+; DYN_GFX1100-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; DYN_GFX1100-NEXT: s_mov_b32 s33, 0
+; DYN_GFX1100-NEXT: s_mov_b32 s32, 64
+; DYN_GFX1100-NEXT: s_waitcnt lgkmcnt(0)
+; DYN_GFX1100-NEXT: s_cmp_lg_u32 s0, 0
+; DYN_GFX1100-NEXT: s_mov_b32 s0, 0
+; DYN_GFX1100-NEXT: s_cbranch_scc0 .LBB7_6
+; DYN_GFX1100-NEXT: ; %bb.1: ; %bb.1
+; DYN_GFX1100-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; DYN_GFX1100-NEXT: s_mov_b32 s2, exec_lo
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; DYN_GFX1100-NEXT: v_lshl_add_u32 v0, v0, 2, 15
+; DYN_GFX1100-NEXT: v_and_b32_e32 v0, 0x1ff0, v0
+; DYN_GFX1100-NEXT: .LBB7_2: ; =>This Inner Loop Header: Depth=1
+; DYN_GFX1100-NEXT: s_ctz_i32_b32 s3, s2
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; DYN_GFX1100-NEXT: v_readlane_b32 s4, v0, s3
+; DYN_GFX1100-NEXT: s_bitset0_b32 s2, s3
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; DYN_GFX1100-NEXT: s_max_u32 s0, s0, s4
+; DYN_GFX1100-NEXT: s_cmp_lg_u32 s2, 0
+; DYN_GFX1100-NEXT: s_cbranch_scc1 .LBB7_2
+; DYN_GFX1100-NEXT: ; %bb.3:
+; DYN_GFX1100-NEXT: s_mov_b32 s2, s32
+; DYN_GFX1100-NEXT: v_mov_b32_e32 v1, 1
+; DYN_GFX1100-NEXT: v_lshl_add_u32 v0, s0, 5, s2
+; DYN_GFX1100-NEXT: scratch_store_b32 off, v1, s2 dlc
+; DYN_GFX1100-NEXT: s_waitcnt_vscnt null, 0x0
+; DYN_GFX1100-NEXT: v_readfirstlane_b32 s32, v0
+; DYN_GFX1100-NEXT: s_cbranch_execnz .LBB7_5
+; DYN_GFX1100-NEXT: .LBB7_4: ; %bb.0
+; DYN_GFX1100-NEXT: s_lshl_b32 s0, s1, 2
+; DYN_GFX1100-NEXT: v_mov_b32_e32 v0, 2
+; DYN_GFX1100-NEXT: s_add_i32 s0, s0, 15
+; DYN_GFX1100-NEXT: s_add_i32 s1, s32, 0x7ff
+; DYN_GFX1100-NEXT: s_and_b32 s0, s0, -16
+; DYN_GFX1100-NEXT: s_and_b32 s1, s1, 0xfffff800
+; DYN_GFX1100-NEXT: s_lshl_b32 s0, s0, 5
+; DYN_GFX1100-NEXT: scratch_store_b32 off, v0, s1 dlc
+; DYN_GFX1100-NEXT: s_waitcnt_vscnt null, 0x0
+; DYN_GFX1100-NEXT: s_add_i32 s32, s1, s0
+; DYN_GFX1100-NEXT: .LBB7_5: ; %bb.2
+; DYN_GFX1100-NEXT: s_endpgm
+; DYN_GFX1100-NEXT: .LBB7_6:
+; DYN_GFX1100-NEXT: s_branch .LBB7_4
entry:
%cond = icmp eq i32 %n, 0
br i1 %cond, label %bb.0, label %bb.1
@@ -97,62 +594,581 @@ bb.2:
ret void
}
-; CHECK: in function test_dynamic_stackalloc{{.*}}: unsupported dynamic alloca
-
define void @test_dynamic_stackalloc_device_uniform(i32 %n) {
+; DYN_GFX900-LABEL: test_dynamic_stackalloc_device_uniform:
+; DYN_GFX900: ; %bb.0:
+; DYN_GFX900-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; DYN_GFX900-NEXT: v_lshl_add_u32 v0, v0, 2, 15
+; DYN_GFX900-NEXT: s_mov_b32 s9, s33
+; DYN_GFX900-NEXT: v_and_b32_e32 v0, -16, v0
+; DYN_GFX900-NEXT: s_mov_b64 s[4:5], exec
+; DYN_GFX900-NEXT: s_mov_b32 s6, 0
+; DYN_GFX900-NEXT: s_mov_b32 s33, s32
+; DYN_GFX900-NEXT: s_addk_i32 s32, 0x400
+; DYN_GFX900-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; DYN_GFX900-NEXT: s_ff1_i32_b64 s7, s[4:5]
+; DYN_GFX900-NEXT: v_readlane_b32 s8, v0, s7
+; DYN_GFX900-NEXT: s_bitset0_b64 s[4:5], s7
+; DYN_GFX900-NEXT: s_max_u32 s6, s6, s8
+; DYN_GFX900-NEXT: s_cmp_lg_u64 s[4:5], 0
+; DYN_GFX900-NEXT: s_cbranch_scc1 .LBB8_1
+; DYN_GFX900-NEXT: ; %bb.2:
+; DYN_GFX900-NEXT: s_mov_b32 s4, s32
+; DYN_GFX900-NEXT: v_mov_b32_e32 v0, s4
+; DYN_GFX900-NEXT: v_lshl_add_u32 v0, s6, 6, v0
+; DYN_GFX900-NEXT: v_readfirstlane_b32 s32, v0
+; DYN_GFX900-NEXT: v_mov_b32_e32 v0, 0x7b
+; DYN_GFX900-NEXT: buffer_store_dword v0, off, s[0:3], s4
+; DYN_GFX900-NEXT: s_waitcnt vmcnt(0)
+; DYN_GFX900-NEXT: s_addk_i32 s32, 0xfc00
+; DYN_GFX900-NEXT: s_mov_b32 s33, s9
+; DYN_GFX900-NEXT: s_setpc_b64 s[30:31]
+;
+; DYN_GFX1100-LABEL: test_dynamic_stackalloc_device_uniform:
+; DYN_GFX1100: ; %bb.0:
+; DYN_GFX1100-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; DYN_GFX1100-NEXT: v_lshl_add_u32 v0, v0, 2, 15
+; DYN_GFX1100-NEXT: s_mov_b32 s4, s33
+; DYN_GFX1100-NEXT: s_mov_b32 s1, exec_lo
+; DYN_GFX1100-NEXT: s_mov_b32 s0, 0
+; DYN_GFX1100-NEXT: s_mov_b32 s33, s32
+; DYN_GFX1100-NEXT: v_and_b32_e32 v0, -16, v0
+; DYN_GFX1100-NEXT: s_add_i32 s32, s32, 16
+; DYN_GFX1100-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; DYN_GFX1100-NEXT: s_ctz_i32_b32 s2, s1
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; DYN_GFX1100-NEXT: v_readlane_b32 s3, v0, s2
+; DYN_GFX1100-NEXT: s_bitset0_b32 s1, s2
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; DYN_GFX1100-NEXT: s_max_u32 s0, s0, s3
+; DYN_GFX1100-NEXT: s_cmp_lg_u32 s1, 0
+; DYN_GFX1100-NEXT: s_cbranch_scc1 .LBB8_1
+; DYN_GFX1100-NEXT: ; %bb.2:
+; DYN_GFX1100-NEXT: s_mov_b32 s1, s32
+; DYN_GFX1100-NEXT: v_mov_b32_e32 v1, 0x7b
+; DYN_GFX1100-NEXT: v_lshl_add_u32 v0, s0, 5, s1
+; DYN_GFX1100-NEXT: s_mov_b32 s33, s4
+; DYN_GFX1100-NEXT: scratch_store_b32 off, v1, s1 dlc
+; DYN_GFX1100-NEXT: s_waitcnt_vscnt null, 0x0
+; DYN_GFX1100-NEXT: v_readfirstlane_b32 s32, v0
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; DYN_GFX1100-NEXT: s_add_i32 s32, s32, -16
+; DYN_GFX1100-NEXT: s_setpc_b64 s[30:31]
%alloca = alloca i32, i32 %n, addrspace(5)
store volatile i32 123, ptr addrspace(5) %alloca
ret void
}
-; CHECK: in function test_dynamic_stackalloc{{.*}}: unsupported dynamic alloca
-
define void @test_dynamic_stackalloc_device_uniform_over_aligned(i32 %n) {
+; DYN_GFX900-LABEL: test_dynamic_stackalloc_device_uniform_over_aligned:
+; DYN_GFX900: ; %bb.0:
+; DYN_GFX900-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; DYN_GFX900-NEXT: v_lshl_add_u32 v0, v0, 2, 15
+; DYN_GFX900-NEXT: s_mov_b32 s9, s33
+; DYN_GFX900-NEXT: s_add_i32 s33, s32, 0x1fc0
+; DYN_GFX900-NEXT: v_and_b32_e32 v0, -16, v0
+; DYN_GFX900-NEXT: s_mov_b64 s[4:5], exec
+; DYN_GFX900-NEXT: s_mov_b32 s6, 0
+; DYN_GFX900-NEXT: s_and_b32 s33, s33, 0xffffe000
+; DYN_GFX900-NEXT: s_addk_i32 s32, 0x4000
+; DYN_GFX900-NEXT: .LBB9_1: ; =>This Inner Loop Header: Depth=1
+; DYN_GFX900-NEXT: s_ff1_i32_b64 s7, s[4:5]
+; DYN_GFX900-NEXT: v_readlane_b32 s8, v0, s7
+; DYN_GFX900-NEXT: s_bitset0_b64 s[4:5], s7
+; DYN_GFX900-NEXT: s_max_u32 s6, s6, s8
+; DYN_GFX900-NEXT: s_cmp_lg_u64 s[4:5], 0
+; DYN_GFX900-NEXT: s_cbranch_scc1 .LBB9_1
+; DYN_GFX900-NEXT: ; %bb.2:
+; DYN_GFX900-NEXT: s_add_i32 s4, s32, 0x1fff
+; DYN_GFX900-NEXT: s_and_b32 s4, s4, 0xffffe000
+; DYN_GFX900-NEXT: v_mov_b32_e32 v0, s4
+; DYN_GFX900-NEXT: v_lshl_add_u32 v1, s6, 6, v0
+; DYN_GFX900-NEXT: v_readfirstlane_b32 s32, v1
+; DYN_GFX900-NEXT: v_mov_b32_e32 v1, 10
+; DYN_GFX900-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen
+; DYN_GFX900-NEXT: s_waitcnt vmcnt(0)
+; DYN_GFX900-NEXT: s_addk_i32 s32, 0xc000
+; DYN_GFX900-NEXT: s_mov_b32 s33, s9
+; DYN_GFX900-NEXT: s_setpc_b64 s[30:31]
+;
+; DYN_GFX1100-LABEL: test_dynamic_stackalloc_device_uniform_over_aligned:
+; DYN_GFX1100: ; %bb.0:
+; DYN_GFX1100-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; DYN_GFX1100-NEXT: v_lshl_add_u32 v0, v0, 2, 15
+; DYN_GFX1100-NEXT: s_mov_b32 s4, s33
+; DYN_GFX1100-NEXT: s_add_i32 s33, s32, 0x7f
+; DYN_GFX1100-NEXT: s_mov_b32 s1, exec_lo
+; DYN_GFX1100-NEXT: s_mov_b32 s0, 0
+; DYN_GFX1100-NEXT: v_and_b32_e32 v0, -16, v0
+; DYN_GFX1100-NEXT: s_and_b32 s33, s33, 0xffffff80
+; DYN_GFX1100-NEXT: s_addk_i32 s32, 0x100
+; DYN_GFX1100-NEXT: .LBB9_1: ; =>This Inner Loop Header: Depth=1
+; DYN_GFX1100-NEXT: s_ctz_i32_b32 s2, s1
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; DYN_GFX1100-NEXT: v_readlane_b32 s3, v0, s2
+; DYN_GFX1100-NEXT: s_bitset0_b32 s1, s2
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; DYN_GFX1100-NEXT: s_max_u32 s0, s0, s3
+; DYN_GFX1100-NEXT: s_cmp_lg_u32 s1, 0
+; DYN_GFX1100-NEXT: s_cbranch_scc1 .LBB9_1
+; DYN_GFX1100-NEXT: ; %bb.2:
+; DYN_GFX1100-NEXT: s_add_i32 s1, s32, 0xfff
+; DYN_GFX1100-NEXT: v_mov_b32_e32 v1, 10
+; DYN_GFX1100-NEXT: s_and_b32 s1, s1, 0xfffff000
+; DYN_GFX1100-NEXT: s_mov_b32 s33, s4
+; DYN_GFX1100-NEXT: v_lshl_add_u32 v0, s0, 5, s1
+; DYN_GFX1100-NEXT: scratch_store_b32 off, v1, s1 dlc
+; DYN_GFX1100-NEXT: s_waitcnt_vscnt null, 0x0
+; DYN_GFX1100-NEXT: v_readfirstlane_b32 s32, v0
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; DYN_GFX1100-NEXT: s_addk_i32 s32, 0xff00
+; DYN_GFX1100-NEXT: s_setpc_b64 s[30:31]
%alloca = alloca i32, i32 %n, align 128, addrspace(5)
store volatile i32 10, ptr addrspace(5) %alloca
ret void
}
-; CHECK: in function test_dynamic_stackalloc{{.*}}: unsupported dynamic alloca
-
define void @test_dynamic_stackalloc_device_uniform_under_aligned(i32 %n) {
+; DYN_GFX900-LABEL: test_dynamic_stackalloc_device_uniform_under_aligned:
+; DYN_GFX900: ; %bb.0:
+; DYN_GFX900-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; DYN_GFX900-NEXT: v_lshl_add_u32 v0, v0, 2, 15
+; DYN_GFX900-NEXT: s_mov_b32 s9, s33
+; DYN_GFX900-NEXT: v_and_b32_e32 v0, -16, v0
+; DYN_GFX900-NEXT: s_mov_b64 s[4:5], exec
+; DYN_GFX900-NEXT: s_mov_b32 s6, 0
+; DYN_GFX900-NEXT: s_mov_b32 s33, s32
+; DYN_GFX900-NEXT: s_addk_i32 s32, 0x400
+; DYN_GFX900-NEXT: .LBB10_1: ; =>This Inner Loop Header: Depth=1
+; DYN_GFX900-NEXT: s_ff1_i32_b64 s7, s[4:5]
+; DYN_GFX900-NEXT: v_readlane_b32 s8, v0, s7
+; DYN_GFX900-NEXT: s_bitset0_b64 s[4:5], s7
+; DYN_GFX900-NEXT: s_max_u32 s6, s6, s8
+; DYN_GFX900-NEXT: s_cmp_lg_u64 s[4:5], 0
+; DYN_GFX900-NEXT: s_cbranch_scc1 .LBB10_1
+; DYN_GFX900-NEXT: ; %bb.2:
+; DYN_GFX900-NEXT: s_mov_b32 s4, s32
+; DYN_GFX900-NEXT: v_mov_b32_e32 v0, s4
+; DYN_GFX900-NEXT: v_lshl_add_u32 v0, s6, 6, v0
+; DYN_GFX900-NEXT: v_readfirstlane_b32 s32, v0
+; DYN_GFX900-NEXT: v_mov_b32_e32 v0, 22
+; DYN_GFX900-NEXT: buffer_store_dword v0, off, s[0:3], s4
+; DYN_GFX900-NEXT: s_waitcnt vmcnt(0)
+; DYN_GFX900-NEXT: s_addk_i32 s32, 0xfc00
+; DYN_GFX900-NEXT: s_mov_b32 s33, s9
+; DYN_GFX900-NEXT: s_setpc_b64 s[30:31]
+;
+; DYN_GFX1100-LABEL: test_dynamic_stackalloc_device_uniform_under_aligned:
+; DYN_GFX1100: ; %bb.0:
+; DYN_GFX1100-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; DYN_GFX1100-NEXT: v_lshl_add_u32 v0, v0, 2, 15
+; DYN_GFX1100-NEXT: s_mov_b32 s4, s33
+; DYN_GFX1100-NEXT: s_mov_b32 s1, exec_lo
+; DYN_GFX1100-NEXT: s_mov_b32 s0, 0
+; DYN_GFX1100-NEXT: s_mov_b32 s33, s32
+; DYN_GFX1100-NEXT: v_and_b32_e32 v0, -16, v0
+; DYN_GFX1100-NEXT: s_add_i32 s32, s32, 16
+; DYN_GFX1100-NEXT: .LBB10_1: ; =>This Inner Loop Header: Depth=1
+; DYN_GFX1100-NEXT: s_ctz_i32_b32 s2, s1
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; DYN_GFX1100-NEXT: v_readlane_b32 s3, v0, s2
+; DYN_GFX1100-NEXT: s_bitset0_b32 s1, s2
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; DYN_GFX1100-NEXT: s_max_u32 s0, s0, s3
+; DYN_GFX1100-NEXT: s_cmp_lg_u32 s1, 0
+; DYN_GFX1100-NEXT: s_cbranch_scc1 .LBB10_1
+; DYN_GFX1100-NEXT: ; %bb.2:
+; DYN_GFX1100-NEXT: s_mov_b32 s1, s32
+; DYN_GFX1100-NEXT: v_mov_b32_e32 v1, 22
+; DYN_GFX1100-NEXT: v_lshl_add_u32 v0, s0, 5, s1
+; DYN_GFX1100-NEXT: s_mov_b32 s33, s4
+; DYN_GFX1100-NEXT: scratch_store_b32 off, v1, s1 dlc
+; DYN_GFX1100-NEXT: s_waitcnt_vscnt null, 0x0
+; DYN_GFX1100-NEXT: v_readfirstlane_b32 s32, v0
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; DYN_GFX1100-NEXT: s_add_i32 s32, s32, -16
+; DYN_GFX1100-NEXT: s_setpc_b64 s[30:31]
%alloca = alloca i32, i32 %n, align 2, addrspace(5)
store volatile i32 22, ptr addrspace(5) %alloca
ret void
}
-; CHECK: in function test_dynamic_stackalloc{{.*}}: unsupported dynamic alloca
-
define void @test_dynamic_stackalloc_device_divergent() {
+; DYN_GFX900-LABEL: test_dynamic_stackalloc_device_divergent:
+; DYN_GFX900: ; %bb.0:
+; DYN_GFX900-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; DYN_GFX900-NEXT: v_and_b32_e32 v0, 0x3ff, v31
+; DYN_GFX900-NEXT: v_lshl_add_u32 v0, v0, 2, 15
+; DYN_GFX900-NEXT: s_mov_b32 s9, s33
+; DYN_GFX900-NEXT: v_and_b32_e32 v0, 0x1ff0, v0
+; DYN_GFX900-NEXT: s_mov_b64 s[4:5], exec
+; DYN_GFX900-NEXT: s_mov_b32 s6, 0
+; DYN_GFX900-NEXT: s_mov_b32 s33, s32
+; DYN_GFX900-NEXT: s_addk_i32 s32, 0x400
+; DYN_GFX900-NEXT: .LBB11_1: ; =>This Inner Loop Header: Depth=1
+; DYN_GFX900-NEXT: s_ff1_i32_b64 s7, s[4:5]
+; DYN_GFX900-NEXT: v_readlane_b32 s8, v0, s7
+; DYN_GFX900-NEXT: s_bitset0_b64 s[4:5], s7
+; DYN_GFX900-NEXT: s_max_u32 s6, s6, s8
+; DYN_GFX900-NEXT: s_cmp_lg_u64 s[4:5], 0
+; DYN_GFX900-NEXT: s_cbranch_scc1 .LBB11_1
+; DYN_GFX900-NEXT: ; %bb.2:
+; DYN_GFX900-NEXT: s_mov_b32 s4, s32
+; DYN_GFX900-NEXT: v_mov_b32_e32 v0, s4
+; DYN_GFX900-NEXT: v_lshl_add_u32 v0, s6, 6, v0
+; DYN_GFX900-NEXT: v_readfirstlane_b32 s32, v0
+; DYN_GFX900-NEXT: v_mov_b32_e32 v0, 0x7b
+; DYN_GFX900-NEXT: buffer_store_dword v0, off, s[0:3], s4
+; DYN_GFX900-NEXT: s_waitcnt vmcnt(0)
+; DYN_GFX900-NEXT: s_addk_i32 s32, 0xfc00
+; DYN_GFX900-NEXT: s_mov_b32 s33, s9
+; DYN_GFX900-NEXT: s_setpc_b64 s[30:31]
+;
+; DYN_GFX1100-LABEL: test_dynamic_stackalloc_device_divergent:
+; DYN_GFX1100: ; %bb.0:
+; DYN_GFX1100-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; DYN_GFX1100-NEXT: v_and_b32_e32 v0, 0x3ff, v31
+; DYN_GFX1100-NEXT: s_mov_b32 s4, s33
+; DYN_GFX1100-NEXT: s_mov_b32 s1, exec_lo
+; DYN_GFX1100-NEXT: s_mov_b32 s0, 0
+; DYN_GFX1100-NEXT: s_mov_b32 s33, s32
+; DYN_GFX1100-NEXT: v_lshl_add_u32 v0, v0, 2, 15
+; DYN_GFX1100-NEXT: s_add_i32 s32, s32, 16
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; DYN_GFX1100-NEXT: v_and_b32_e32 v0, 0x1ff0, v0
+; DYN_GFX1100-NEXT: .LBB11_1: ; =>This Inner Loop Header: Depth=1
+; DYN_GFX1100-NEXT: s_ctz_i32_b32 s2, s1
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; DYN_GFX1100-NEXT: v_readlane_b32 s3, v0, s2
+; DYN_GFX1100-NEXT: s_bitset0_b32 s1, s2
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; DYN_GFX1100-NEXT: s_max_u32 s0, s0, s3
+; DYN_GFX1100-NEXT: s_cmp_lg_u32 s1, 0
+; DYN_GFX1100-NEXT: s_cbranch_scc1 .LBB11_1
+; DYN_GFX1100-NEXT: ; %bb.2:
+; DYN_GFX1100-NEXT: s_mov_b32 s1, s32
+; DYN_GFX1100-NEXT: v_mov_b32_e32 v1, 0x7b
+; DYN_GFX1100-NEXT: v_lshl_add_u32 v0, s0, 5, s1
+; DYN_GFX1100-NEXT: s_mov_b32 s33, s4
+; DYN_GFX1100-NEXT: scratch_store_b32 off, v1, s1 dlc
+; DYN_GFX1100-NEXT: s_waitcnt_vscnt null, 0x0
+; DYN_GFX1100-NEXT: v_readfirstlane_b32 s32, v0
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; DYN_GFX1100-NEXT: s_add_i32 s32, s32, -16
+; DYN_GFX1100-NEXT: s_setpc_b64 s[30:31]
%idx = call i32 @llvm.amdgcn.workitem.id.x()
%alloca = alloca i32, i32 %idx, addrspace(5)
store volatile i32 123, ptr addrspace(5) %alloca
ret void
}
-; CHECK: in function test_dynamic_stackalloc{{.*}}: unsupported dynamic alloca
-
define void @test_dynamic_stackalloc_device_divergent_over_aligned() {
+; DYN_GFX900-LABEL: test_dynamic_stackalloc_device_divergent_over_aligned:
+; DYN_GFX900: ; %bb.0:
+; DYN_GFX900-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; DYN_GFX900-NEXT: s_mov_b32 s10, s33
+; DYN_GFX900-NEXT: s_add_i32 s33, s32, 0x1fc0
+; DYN_GFX900-NEXT: s_addk_i32 s32, 0x4000
+; DYN_GFX900-NEXT: v_and_b32_e32 v0, 0x3ff, v31
+; DYN_GFX900-NEXT: s_add_i32 s4, s32, 0x1fff
+; DYN_GFX900-NEXT: v_lshl_add_u32 v0, v0, 2, 15
+; DYN_GFX900-NEXT: s_and_b32 s6, s4, 0xffffe000
+; DYN_GFX900-NEXT: v_and_b32_e32 v0, 0x1ff0, v0
+; DYN_GFX900-NEXT: s_mov_b64 s[4:5], exec
+; DYN_GFX900-NEXT: s_mov_b32 s7, 0
+; DYN_GFX900-NEXT: s_and_b32 s33, s33, 0xffffe000
+; DYN_GFX900-NEXT: .LBB12_1: ; =>This Inner Loop Header: Depth=1
+; DYN_GFX900-NEXT: s_ff1_i32_b64 s8, s[4:5]
+; DYN_GFX900-NEXT: v_readlane_b32 s9, v0, s8
+; DYN_GFX900-NEXT: s_bitset0_b64 s[4:5], s8
+; DYN_GFX900-NEXT: s_max_u32 s7, s7, s9
+; DYN_GFX900-NEXT: s_cmp_lg_u64 s[4:5], 0
+; DYN_GFX900-NEXT: s_cbranch_scc1 .LBB12_1
+; DYN_GFX900-NEXT: ; %bb.2:
+; DYN_GFX900-NEXT: v_mov_b32_e32 v0, s6
+; DYN_GFX900-NEXT: v_lshl_add_u32 v1, s7, 6, v0
+; DYN_GFX900-NEXT: v_readfirstlane_b32 s32, v1
+; DYN_GFX900-NEXT: v_mov_b32_e32 v1, 0x1bc
+; DYN_GFX900-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen
+; DYN_GFX900-NEXT: s_waitcnt vmcnt(0)
+; DYN_GFX900-NEXT: s_addk_i32 s32, 0xc000
+; DYN_GFX900-NEXT: s_mov_b32 s33, s10
+; DYN_GFX900-NEXT: s_setpc_b64 s[30:31]
+;
+; DYN_GFX1100-LABEL: test_dynamic_stackalloc_device_divergent_over_aligned:
+; DYN_GFX1100: ; %bb.0:
+; DYN_GFX1100-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; DYN_GFX1100-NEXT: v_and_b32_e32 v0, 0x3ff, v31
+; DYN_GFX1100-NEXT: s_mov_b32 s5, s33
+; DYN_GFX1100-NEXT: s_add_i32 s33, s32, 0x7f
+; DYN_GFX1100-NEXT: s_addk_i32 s32, 0x100
+; DYN_GFX1100-NEXT: s_mov_b32 s2, exec_lo
+; DYN_GFX1100-NEXT: v_lshl_add_u32 v0, v0, 2, 15
+; DYN_GFX1100-NEXT: s_add_i32 s0, s32, 0xfff
+; DYN_GFX1100-NEXT: s_mov_b32 s1, 0
+; DYN_GFX1100-NEXT: s_and_b32 s0, s0, 0xfffff000
+; DYN_GFX1100-NEXT: s_and_b32 s33, s33, 0xffffff80
+; DYN_GFX1100-NEXT: v_and_b32_e32 v0, 0x1ff0, v0
+; DYN_GFX1100-NEXT: .LBB12_1: ; =>This Inner Loop Header: Depth=1
+; DYN_GFX1100-NEXT: s_ctz_i32_b32 s3, s2
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; DYN_GFX1100-NEXT: v_readlane_b32 s4, v0, s3
+; DYN_GFX1100-NEXT: s_bitset0_b32 s2, s3
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; DYN_GFX1100-NEXT: s_max_u32 s1, s1, s4
+; DYN_GFX1100-NEXT: s_cmp_lg_u32 s2, 0
+; DYN_GFX1100-NEXT: s_cbranch_scc1 .LBB12_1
+; DYN_GFX1100-NEXT: ; %bb.2:
+; DYN_GFX1100-NEXT: v_lshl_add_u32 v0, s1, 5, s0
+; DYN_GFX1100-NEXT: v_mov_b32_e32 v1, 0x1bc
+; DYN_GFX1100-NEXT: s_mov_b32 s33, s5
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; DYN_GFX1100-NEXT: v_readfirstlane_b32 s32, v0
+; DYN_GFX1100-NEXT: scratch_store_b32 off, v1, s0 dlc
+; DYN_GFX1100-NEXT: s_waitcnt_vscnt null, 0x0
+; DYN_GFX1100-NEXT: s_addk_i32 s32, 0xff00
+; DYN_GFX1100-NEXT: s_setpc_b64 s[30:31]
%idx = call i32 @llvm.amdgcn.workitem.id.x()
%alloca = alloca i32, i32 %idx, align 128, addrspace(5)
store volatile i32 444, ptr addrspace(5) %alloca
ret void
}
-; CHECK: in function test_dynamic_stackalloc{{.*}}: unsupported dynamic alloca
-
define void @test_dynamic_stackalloc_device_divergent_under_aligned() {
+; DYN_GFX900-LABEL: test_dynamic_stackalloc_device_divergent_under_aligned:
+; DYN_GFX900: ; %bb.0:
+; DYN_GFX900-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; DYN_GFX900-NEXT: v_and_b32_e32 v0, 0x3ff, v31
+; DYN_GFX900-NEXT: v_lshl_add_u32 v0, v0, 2, 15
+; DYN_GFX900-NEXT: s_mov_b32 s9, s33
+; DYN_GFX900-NEXT: v_and_b32_e32 v0, 0x1ff0, v0
+; DYN_GFX900-NEXT: s_mov_b64 s[4:5], exec
+; DYN_GFX900-NEXT: s_mov_b32 s6, 0
+; DYN_GFX900-NEXT: s_mov_b32 s33, s32
+; DYN_GFX900-NEXT: s_addk_i32 s32, 0x400
+; DYN_GFX900-NEXT: .LBB13_1: ; =>This Inner Loop Header: Depth=1
+; DYN_GFX900-NEXT: s_ff1_i32_b64 s7, s[4:5]
+; DYN_GFX900-NEXT: v_readlane_b32 s8, v0, s7
+; DYN_GFX900-NEXT: s_bitset0_b64 s[4:5], s7
+; DYN_GFX900-NEXT: s_max_u32 s6, s6, s8
+; DYN_GFX900-NEXT: s_cmp_lg_u64 s[4:5], 0
+; DYN_GFX900-NEXT: s_cbranch_scc1 .LBB13_1
+; DYN_GFX900-NEXT: ; %bb.2:
+; DYN_GFX900-NEXT: s_mov_b32 s4, s32
+; DYN_GFX900-NEXT: v_mov_b32_e32 v0, s4
+; DYN_GFX900-NEXT: v_lshl_add_u32 v0, s6, 6, v0
+; DYN_GFX900-NEXT: v_readfirstlane_b32 s32, v0
+; DYN_GFX900-NEXT: v_mov_b32_e32 v0, 0x29a
+; DYN_GFX900-NEXT: buffer_store_dword v0, off, s[0:3], s4
+; DYN_GFX900-NEXT: s_waitcnt vmcnt(0)
+; DYN_GFX900-NEXT: s_addk_i32 s32, 0xfc00
+; DYN_GFX900-NEXT: s_mov_b32 s33, s9
+; DYN_GFX900-NEXT: s_setpc_b64 s[30:31]
+;
+; DYN_GFX1100-LABEL: test_dynamic_stackalloc_device_divergent_under_aligned:
+; DYN_GFX1100: ; %bb.0:
+; DYN_GFX1100-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; DYN_GFX1100-NEXT: v_and_b32_e32 v0, 0x3ff, v31
+; DYN_GFX1100-NEXT: s_mov_b32 s4, s33
+; DYN_GFX1100-NEXT: s_mov_b32 s1, exec_lo
+; DYN_GFX1100-NEXT: s_mov_b32 s0, 0
+; DYN_GFX1100-NEXT: s_mov_b32 s33, s32
+; DYN_GFX1100-NEXT: v_lshl_add_u32 v0, v0, 2, 15
+; DYN_GFX1100-NEXT: s_add_i32 s32, s32, 16
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; DYN_GFX1100-NEXT: v_and_b32_e32 v0, 0x1ff0, v0
+; DYN_GFX1100-NEXT: .LBB13_1: ; =>This Inner Loop Header: Depth=1
+; DYN_GFX1100-NEXT: s_ctz_i32_b32 s2, s1
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; DYN_GFX1100-NEXT: v_readlane_b32 s3, v0, s2
+; DYN_GFX1100-NEXT: s_bitset0_b32 s1, s2
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; DYN_GFX1100-NEXT: s_max_u32 s0, s0, s3
+; DYN_GFX1100-NEXT: s_cmp_lg_u32 s1, 0
+; DYN_GFX1100-NEXT: s_cbranch_scc1 .LBB13_1
+; DYN_GFX1100-NEXT: ; %bb.2:
+; DYN_GFX1100-NEXT: s_mov_b32 s1, s32
+; DYN_GFX1100-NEXT: v_mov_b32_e32 v1, 0x29a
+; DYN_GFX1100-NEXT: v_lshl_add_u32 v0, s0, 5, s1
+; DYN_GFX1100-NEXT: s_mov_b32 s33, s4
+; DYN_GFX1100-NEXT: scratch_store_b32 off, v1, s1 dlc
+; DYN_GFX1100-NEXT: s_waitcnt_vscnt null, 0x0
+; DYN_GFX1100-NEXT: v_readfirstlane_b32 s32, v0
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; DYN_GFX1100-NEXT: s_add_i32 s32, s32, -16
+; DYN_GFX1100-NEXT: s_setpc_b64 s[30:31]
%idx = call i32 @llvm.amdgcn.workitem.id.x()
%alloca = alloca i32, i32 %idx, align 2, addrspace(5)
store volatile i32 666, ptr addrspace(5) %alloca
ret void
}
-; CHECK: in function test_dynamic_stackalloc{{.*}}: unsupported dynamic alloca
-; CHECK: in function test_dynamic_stackalloc{{.*}}: unsupported dynamic alloca
-; CHECK: in function test_dynamic_stackalloc{{.*}}: unsupported dynamic alloca
-
define void @test_dynamic_stackalloc_device_multiple_allocas(i32 %n, i32 %m) {
+; DYN_GFX900-LABEL: test_dynamic_stackalloc_device_multiple_allocas:
+; DYN_GFX900: ; %bb.0: ; %entry
+; DYN_GFX900-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; DYN_GFX900-NEXT: s_mov_b32 s13, s33
+; DYN_GFX900-NEXT: s_add_i32 s33, s32, 0xfc0
+; DYN_GFX900-NEXT: s_mov_b32 s8, 0
+; DYN_GFX900-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; DYN_GFX900-NEXT: s_and_b32 s33, s33, 0xfffff000
+; DYN_GFX900-NEXT: s_addk_i32 s32, 0x3000
+; DYN_GFX900-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; DYN_GFX900-NEXT: s_cbranch_execz .LBB14_6
+; DYN_GFX900-NEXT: ; %bb.1: ; %bb.0
+; DYN_GFX900-NEXT: v_lshl_add_u32 v1, v1, 2, 15
+; DYN_GFX900-NEXT: v_and_b32_e32 v1, -16, v1
+; DYN_GFX900-NEXT: s_mov_b64 s[6:7], exec
+; DYN_GFX900-NEXT: s_mov_b32 s10, 0
+; DYN_GFX900-NEXT: .LBB14_2: ; =>This Inner Loop Header: Depth=1
+; DYN_GFX900-NEXT: s_ff1_i32_b64 s9, s[6:7]
+; DYN_GFX900-NEXT: v_readlane_b32 s11, v1, s9
+; DYN_GFX900-NEXT: s_bitset0_b64 s[6:7], s9
+; DYN_GFX900-NEXT: s_max_u32 s10, s10, s11
+; DYN_GFX900-NEXT: s_cmp_lg_u64 s[6:7], 0
+; DYN_GFX900-NEXT: s_cbranch_scc1 .LBB14_2
+; DYN_GFX900-NEXT: ; %bb.3:
+; DYN_GFX900-NEXT: s_add_i32 s6, s32, 0xfff
+; DYN_GFX900-NEXT: s_and_b32 s9, s6, 0xfffff000
+; DYN_GFX900-NEXT: v_mov_b32_e32 v1, s9
+; DYN_GFX900-NEXT: v_lshl_add_u32 v1, s10, 6, v1
+; DYN_GFX900-NEXT: v_readfirstlane_b32 s32, v1
+; DYN_GFX900-NEXT: v_and_b32_e32 v1, 0x3ff, v31
+; DYN_GFX900-NEXT: v_lshl_add_u32 v1, v1, 2, 15
+; DYN_GFX900-NEXT: v_and_b32_e32 v1, 0x1ff0, v1
+; DYN_GFX900-NEXT: s_mov_b64 s[6:7], exec
+; DYN_GFX900-NEXT: s_mov_b32 s10, 0
+; DYN_GFX900-NEXT: .LBB14_4: ; =>This Inner Loop Header: Depth=1
+; DYN_GFX900-NEXT: s_ff1_i32_b64 s11, s[6:7]
+; DYN_GFX900-NEXT: v_readlane_b32 s12, v1, s11
+; DYN_GFX900-NEXT: s_bitset0_b64 s[6:7], s11
+; DYN_GFX900-NEXT: s_max_u32 s10, s10, s12
+; DYN_GFX900-NEXT: s_cmp_lg_u64 s[6:7], 0
+; DYN_GFX900-NEXT: s_cbranch_scc1 .LBB14_4
+; DYN_GFX900-NEXT: ; %bb.5:
+; DYN_GFX900-NEXT: s_mov_b32 s6, s32
+; DYN_GFX900-NEXT: v_mov_b32_e32 v1, s6
+; DYN_GFX900-NEXT: v_lshl_add_u32 v1, s10, 6, v1
+; DYN_GFX900-NEXT: v_readfirstlane_b32 s32, v1
+; DYN_GFX900-NEXT: v_mov_b32_e32 v1, 3
+; DYN_GFX900-NEXT: v_mov_b32_e32 v2, s9
+; DYN_GFX900-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
+; DYN_GFX900-NEXT: s_waitcnt vmcnt(0)
+; DYN_GFX900-NEXT: v_mov_b32_e32 v1, 4
+; DYN_GFX900-NEXT: buffer_store_dword v1, off, s[0:3], s6
+; DYN_GFX900-NEXT: s_waitcnt vmcnt(0)
+; DYN_GFX900-NEXT: .LBB14_6: ; %bb.1
+; DYN_GFX900-NEXT: s_or_b64 exec, exec, s[4:5]
+; DYN_GFX900-NEXT: v_lshl_add_u32 v0, v0, 2, 15
+; DYN_GFX900-NEXT: v_mov_b32_e32 v1, 2
+; DYN_GFX900-NEXT: v_and_b32_e32 v0, -16, v0
+; DYN_GFX900-NEXT: s_mov_b64 s[4:5], exec
+; DYN_GFX900-NEXT: .LBB14_7: ; =>This Inner Loop Header: Depth=1
+; DYN_GFX900-NEXT: s_ff1_i32_b64 s6, s[4:5]
+; DYN_GFX900-NEXT: v_readlane_b32 s7, v0, s6
+; DYN_GFX900-NEXT: s_bitset0_b64 s[4:5], s6
+; DYN_GFX900-NEXT: s_max_u32 s8, s8, s7
+; DYN_GFX900-NEXT: s_cmp_lg_u64 s[4:5], 0
+; DYN_GFX900-NEXT: s_cbranch_scc1 .LBB14_7
+; DYN_GFX900-NEXT: ; %bb.8:
+; DYN_GFX900-NEXT: s_mov_b32 s4, s32
+; DYN_GFX900-NEXT: v_mov_b32_e32 v0, s4
+; DYN_GFX900-NEXT: v_lshl_add_u32 v0, s8, 6, v0
+; DYN_GFX900-NEXT: v_readfirstlane_b32 s32, v0
+; DYN_GFX900-NEXT: v_mov_b32_e32 v0, 1
+; DYN_GFX900-NEXT: buffer_store_dword v0, off, s[0:3], s33
+; DYN_GFX900-NEXT: s_waitcnt vmcnt(0)
+; DYN_GFX900-NEXT: buffer_store_dword v1, off, s[0:3], s4
+; DYN_GFX900-NEXT: s_waitcnt vmcnt(0)
+; DYN_GFX900-NEXT: s_addk_i32 s32, 0xd000
+; DYN_GFX900-NEXT: s_mov_b32 s33, s13
+; DYN_GFX900-NEXT: s_setpc_b64 s[30:31]
+;
+; DYN_GFX1100-LABEL: test_dynamic_stackalloc_device_multiple_allocas:
+; DYN_GFX1100: ; %bb.0: ; %entry
+; DYN_GFX1100-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; DYN_GFX1100-NEXT: s_mov_b32 s7, s33
+; DYN_GFX1100-NEXT: s_add_i32 s33, s32, 63
+; DYN_GFX1100-NEXT: s_mov_b32 s0, 0
+; DYN_GFX1100-NEXT: s_mov_b32 s1, exec_lo
+; DYN_GFX1100-NEXT: s_and_not1_b32 s33, s33, 63
+; DYN_GFX1100-NEXT: s_addk_i32 s32, 0xc0
+; DYN_GFX1100-NEXT: v_cmpx_eq_u32_e32 0, v0
+; DYN_GFX1100-NEXT: s_cbranch_execz .LBB14_6
+; DYN_GFX1100-NEXT: ; %bb.1: ; %bb.0
+; DYN_GFX1100-NEXT: v_lshl_add_u32 v1, v1, 2, 15
+; DYN_GFX1100-NEXT: s_mov_b32 s2, exec_lo
+; DYN_GFX1100-NEXT: s_mov_b32 s3, 0
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; DYN_GFX1100-NEXT: v_and_b32_e32 v1, -16, v1
+; DYN_GFX1100-NEXT: .LBB14_2: ; =>This Inner Loop Header: Depth=1
+; DYN_GFX1100-NEXT: s_ctz_i32_b32 s4, s2
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; DYN_GFX1100-NEXT: v_readlane_b32 s5, v1, s4
+; DYN_GFX1100-NEXT: s_bitset0_b32 s2, s4
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; DYN_GFX1100-NEXT: s_max_u32 s3, s3, s5
+; DYN_GFX1100-NEXT: s_cmp_lg_u32 s2, 0
+; DYN_GFX1100-NEXT: s_cbranch_scc1 .LBB14_2
+; DYN_GFX1100-NEXT: ; %bb.3:
+; DYN_GFX1100-NEXT: v_and_b32_e32 v1, 0x3ff, v31
+; DYN_GFX1100-NEXT: s_add_i32 s2, s32, 0x7ff
+; DYN_GFX1100-NEXT: s_mov_b32 s4, exec_lo
+; DYN_GFX1100-NEXT: s_and_b32 s2, s2, 0xfffff800
+; DYN_GFX1100-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; DYN_GFX1100-NEXT: v_lshl_add_u32 v2, s3, 5, s2
+; DYN_GFX1100-NEXT: v_lshl_add_u32 v1, v1, 2, 15
+; DYN_GFX1100-NEXT: s_mov_b32 s3, 0
+; DYN_GFX1100-NEXT: v_readfirstlane_b32 s32, v2
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; DYN_GFX1100-NEXT: v_and_b32_e32 v1, 0x1ff0, v1
+; DYN_GFX1100-NEXT: .LBB14_4: ; =>This Inner Loop Header: Depth=1
+; DYN_GFX1100-NEXT: s_ctz_i32_b32 s5, s4
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; DYN_GFX1100-NEXT: v_readlane_b32 s6, v1, s5
+; DYN_GFX1100-NEXT: s_bitset0_b32 s4, s5
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; DYN_GFX1100-NEXT: s_max_u32 s3, s3, s6
+; DYN_GFX1100-NEXT: s_cmp_lg_u32 s4, 0
+; DYN_GFX1100-NEXT: s_cbranch_scc1 .LBB14_4
+; DYN_GFX1100-NEXT: ; %bb.5:
+; DYN_GFX1100-NEXT: s_mov_b32 s4, s32
+; DYN_GFX1100-NEXT: v_dual_mov_b32 v2, 3 :: v_dual_mov_b32 v3, 4
+; DYN_GFX1100-NEXT: v_lshl_add_u32 v1, s3, 5, s4
+; DYN_GFX1100-NEXT: scratch_store_b32 off, v2, s2 dlc
+; DYN_GFX1100-NEXT: s_waitcnt_vscnt null, 0x0
+; DYN_GFX1100-NEXT: scratch_store_b32 off, v3, s4 dlc
+; DYN_GFX1100-NEXT: s_waitcnt_vscnt null, 0x0
+; DYN_GFX1100-NEXT: v_readfirstlane_b32 s32, v1
+; DYN_GFX1100-NEXT: .LBB14_6: ; %bb.1
+; DYN_GFX1100-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; DYN_GFX1100-NEXT: v_lshl_add_u32 v1, v0, 2, 15
+; DYN_GFX1100-NEXT: v_mov_b32_e32 v0, 2
+; DYN_GFX1100-NEXT: s_mov_b32 s1, exec_lo
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; DYN_GFX1100-NEXT: v_and_b32_e32 v1, -16, v1
+; DYN_GFX1100-NEXT: .LBB14_7: ; =>This Inner Loop Header: Depth=1
+; DYN_GFX1100-NEXT: s_ctz_i32_b32 s2, s1
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; DYN_GFX1100-NEXT: v_readlane_b32 s3, v1, s2
+; DYN_GFX1100-NEXT: s_bitset0_b32 s1, s2
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; DYN_GFX1100-NEXT: s_max_u32 s0, s0, s3
+; DYN_GFX1100-NEXT: s_cmp_lg_u32 s1, 0
+; DYN_GFX1100-NEXT: s_cbranch_scc1 .LBB14_7
+; DYN_GFX1100-NEXT: ; %bb.8:
+; DYN_GFX1100-NEXT: s_mov_b32 s1, s32
+; DYN_GFX1100-NEXT: v_mov_b32_e32 v2, 1
+; DYN_GFX1100-NEXT: v_lshl_add_u32 v1, s0, 5, s1
+; DYN_GFX1100-NEXT: scratch_store_b32 off, v2, s33 dlc
+; DYN_GFX1100-NEXT: s_waitcnt_vscnt null, 0x0
+; DYN_GFX1100-NEXT: scratch_store_b32 off, v0, s1 dlc
+; DYN_GFX1100-NEXT: s_waitcnt_vscnt null, 0x0
+; DYN_GFX1100-NEXT: v_readfirstlane_b32 s32, v1
+; DYN_GFX1100-NEXT: s_mov_b32 s33, s7
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; DYN_GFX1100-NEXT: s_addk_i32 s32, 0xff40
+; DYN_GFX1100-NEXT: s_setpc_b64 s[30:31]
entry:
%cond = icmp eq i32 %n, 0
%alloca1 = alloca i32, i32 8, addrspace(5)
@@ -171,10 +1187,139 @@ bb.1:
ret void
}
-; CHECK: in function test_dynamic_stackalloc{{.*}}: unsupported dynamic alloca
-; CHECK: in function test_dynamic_stackalloc{{.*}}: unsupported dynamic alloca
-
define void @test_dynamic_stackalloc_device_control_flow(i32 %n, i32 %m) {
+; DYN_GFX900-LABEL: test_dynamic_stackalloc_device_control_flow:
+; DYN_GFX900: ; %bb.0: ; %entry
+; DYN_GFX900-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; DYN_GFX900-NEXT: s_mov_b32 s11, s33
+; DYN_GFX900-NEXT: s_add_i32 s33, s32, 0xfc0
+; DYN_GFX900-NEXT: s_mov_b32 s8, 0
+; DYN_GFX900-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; DYN_GFX900-NEXT: s_and_b32 s33, s33, 0xfffff000
+; DYN_GFX900-NEXT: s_addk_i32 s32, 0x2000
+; DYN_GFX900-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; DYN_GFX900-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
+; DYN_GFX900-NEXT: s_cbranch_execz .LBB15_4
+; DYN_GFX900-NEXT: ; %bb.1: ; %bb.1
+; DYN_GFX900-NEXT: v_lshl_add_u32 v1, v1, 2, 15
+; DYN_GFX900-NEXT: v_mov_b32_e32 v0, 2
+; DYN_GFX900-NEXT: v_and_b32_e32 v1, -16, v1
+; DYN_GFX900-NEXT: s_mov_b64 s[6:7], exec
+; DYN_GFX900-NEXT: .LBB15_2: ; =>This Inner Loop Header: Depth=1
+; DYN_GFX900-NEXT: s_ff1_i32_b64 s9, s[6:7]
+; DYN_GFX900-NEXT: v_readlane_b32 s10, v1, s9
+; DYN_GFX900-NEXT: s_bitset0_b64 s[6:7], s9
+; DYN_GFX900-NEXT: s_max_u32 s8, s8, s10
+; DYN_GFX900-NEXT: s_cmp_lg_u64 s[6:7], 0
+; DYN_GFX900-NEXT: s_cbranch_scc1 .LBB15_2
+; DYN_GFX900-NEXT: ; %bb.3:
+; DYN_GFX900-NEXT: s_add_i32 s6, s32, 0xfff
+; DYN_GFX900-NEXT: s_and_b32 s6, s6, 0xfffff000
+; DYN_GFX900-NEXT: v_mov_b32_e32 v1, s6
+; DYN_GFX900-NEXT: v_lshl_add_u32 v2, s8, 6, v1
+; DYN_GFX900-NEXT: v_readfirstlane_b32 s32, v2
+; DYN_GFX900-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen
+; DYN_GFX900-NEXT: s_waitcnt vmcnt(0)
+; DYN_GFX900-NEXT: ; implicit-def: $vgpr31
+; DYN_GFX900-NEXT: .LBB15_4: ; %Flow
+; DYN_GFX900-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
+; DYN_GFX900-NEXT: s_cbranch_execz .LBB15_8
+; DYN_GFX900-NEXT: ; %bb.5: ; %bb.0
+; DYN_GFX900-NEXT: v_and_b32_e32 v0, 0x3ff, v31
+; DYN_GFX900-NEXT: v_lshl_add_u32 v0, v0, 2, 15
+; DYN_GFX900-NEXT: v_and_b32_e32 v0, 0x1ff0, v0
+; DYN_GFX900-NEXT: s_mov_b64 s[6:7], exec
+; DYN_GFX900-NEXT: s_mov_b32 s8, 0
+; DYN_GFX900-NEXT: .LBB15_6: ; =>This Inner Loop Header: Depth=1
+; DYN_GFX900-NEXT: s_ff1_i32_b64 s9, s[6:7]
+; DYN_GFX900-NEXT: v_readlane_b32 s10, v0, s9
+; DYN_GFX900-NEXT: s_bitset0_b64 s[6:7], s9
+; DYN_GFX900-NEXT: s_max_u32 s8, s8, s10
+; DYN_GFX900-NEXT: s_cmp_lg_u64 s[6:7], 0
+; DYN_GFX900-NEXT: s_cbranch_scc1 .LBB15_6
+; DYN_GFX900-NEXT: ; %bb.7:
+; DYN_GFX900-NEXT: s_mov_b32 s6, s32
+; DYN_GFX900-NEXT: v_mov_b32_e32 v0, s6
+; DYN_GFX900-NEXT: v_lshl_add_u32 v0, s8, 6, v0
+; DYN_GFX900-NEXT: v_readfirstlane_b32 s32, v0
+; DYN_GFX900-NEXT: v_mov_b32_e32 v0, 1
+; DYN_GFX900-NEXT: buffer_store_dword v0, off, s[0:3], s6
+; DYN_GFX900-NEXT: s_waitcnt vmcnt(0)
+; DYN_GFX900-NEXT: .LBB15_8: ; %bb.2
+; DYN_GFX900-NEXT: s_or_b64 exec, exec, s[4:5]
+; DYN_GFX900-NEXT: s_addk_i32 s32, 0xe000
+; DYN_GFX900-NEXT: s_mov_b32 s33, s11
+; DYN_GFX900-NEXT: s_waitcnt vmcnt(0)
+; DYN_GFX900-NEXT: s_setpc_b64 s[30:31]
+;
+; DYN_GFX1100-LABEL: test_dynamic_stackalloc_device_control_flow:
+; DYN_GFX1100: ; %bb.0: ; %entry
+; DYN_GFX1100-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; DYN_GFX1100-NEXT: s_mov_b32 s5, s33
+; DYN_GFX1100-NEXT: s_add_i32 s33, s32, 63
+; DYN_GFX1100-NEXT: s_mov_b32 s1, 0
+; DYN_GFX1100-NEXT: s_mov_b32 s0, exec_lo
+; DYN_GFX1100-NEXT: s_and_not1_b32 s33, s33, 63
+; DYN_GFX1100-NEXT: s_addk_i32 s32, 0x80
+; DYN_GFX1100-NEXT: v_cmpx_ne_u32_e32 0, v0
+; DYN_GFX1100-NEXT: s_xor_b32 s0, exec_lo, s0
+; DYN_GFX1100-NEXT: s_cbranch_execz .LBB15_4
+; DYN_GFX1100-NEXT: ; %bb.1: ; %bb.1
+; DYN_GFX1100-NEXT: v_lshl_add_u32 v1, v1, 2, 15
+; DYN_GFX1100-NEXT: v_mov_b32_e32 v0, 2
+; DYN_GFX1100-NEXT: s_mov_b32 s2, exec_lo
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; DYN_GFX1100-NEXT: v_and_b32_e32 v1, -16, v1
+; DYN_GFX1100-NEXT: .LBB15_2: ; =>This Inner Loop Header: Depth=1
+; DYN_GFX1100-NEXT: s_ctz_i32_b32 s3, s2
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; DYN_GFX1100-NEXT: v_readlane_b32 s4, v1, s3
+; DYN_GFX1100-NEXT: s_bitset0_b32 s2, s3
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; DYN_GFX1100-NEXT: s_max_u32 s1, s1, s4
+; DYN_GFX1100-NEXT: s_cmp_lg_u32 s2, 0
+; DYN_GFX1100-NEXT: s_cbranch_scc1 .LBB15_2
+; DYN_GFX1100-NEXT: ; %bb.3:
+; DYN_GFX1100-NEXT: s_add_i32 s2, s32, 0x7ff
+; DYN_GFX1100-NEXT: ; implicit-def: $vgpr31
+; DYN_GFX1100-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; DYN_GFX1100-NEXT: s_and_b32 s2, s2, 0xfffff800
+; DYN_GFX1100-NEXT: v_lshl_add_u32 v1, s1, 5, s2
+; DYN_GFX1100-NEXT: scratch_store_b32 off, v0, s2 dlc
+; DYN_GFX1100-NEXT: s_waitcnt_vscnt null, 0x0
+; DYN_GFX1100-NEXT: v_readfirstlane_b32 s32, v1
+; DYN_GFX1100-NEXT: .LBB15_4: ; %Flow
+; DYN_GFX1100-NEXT: s_and_not1_saveexec_b32 s0, s0
+; DYN_GFX1100-NEXT: s_cbranch_execz .LBB15_8
+; DYN_GFX1100-NEXT: ; %bb.5: ; %bb.0
+; DYN_GFX1100-NEXT: v_and_b32_e32 v0, 0x3ff, v31
+; DYN_GFX1100-NEXT: s_mov_b32 s2, exec_lo
+; DYN_GFX1100-NEXT: s_mov_b32 s1, 0
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; DYN_GFX1100-NEXT: v_lshl_add_u32 v0, v0, 2, 15
+; DYN_GFX1100-NEXT: v_and_b32_e32 v0, 0x1ff0, v0
+; DYN_GFX1100-NEXT: .LBB15_6: ; =>This Inner Loop Header: Depth=1
+; DYN_GFX1100-NEXT: s_ctz_i32_b32 s3, s2
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; DYN_GFX1100-NEXT: v_readlane_b32 s4, v0, s3
+; DYN_GFX1100-NEXT: s_bitset0_b32 s2, s3
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; DYN_GFX1100-NEXT: s_max_u32 s1, s1, s4
+; DYN_GFX1100-NEXT: s_cmp_lg_u32 s2, 0
+; DYN_GFX1100-NEXT: s_cbranch_scc1 .LBB15_6
+; DYN_GFX1100-NEXT: ; %bb.7:
+; DYN_GFX1100-NEXT: s_mov_b32 s2, s32
+; DYN_GFX1100-NEXT: v_mov_b32_e32 v1, 1
+; DYN_GFX1100-NEXT: v_lshl_add_u32 v0, s1, 5, s2
+; DYN_GFX1100-NEXT: scratch_store_b32 off, v1, s2 dlc
+; DYN_GFX1100-NEXT: s_waitcnt_vscnt null, 0x0
+; DYN_GFX1100-NEXT: v_readfirstlane_b32 s32, v0
+; DYN_GFX1100-NEXT: .LBB15_8: ; %bb.2
+; DYN_GFX1100-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; DYN_GFX1100-NEXT: s_addk_i32 s32, 0xff80
+; DYN_GFX1100-NEXT: s_mov_b32 s33, s5
+; DYN_GFX1100-NEXT: s_setpc_b64 s[30:31]
entry:
%cond = icmp eq i32 %n, 0
br i1 %cond, label %bb.0, label %bb.1
@@ -190,3 +1335,136 @@ bb.1:
bb.2:
ret void
}
+
+define void @test_dynamic_stackalloc_device_divergent_non_standard_size_i23(i23 %n) {
+; DYN_GFX900-LABEL: test_dynamic_stackalloc_device_divergent_non_standard_size_i23:
+; DYN_GFX900: ; %bb.0:
+; DYN_GFX900-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; DYN_GFX900-NEXT: v_and_b32_e32 v0, 0x7fffff, v0
+; DYN_GFX900-NEXT: v_lshl_add_u32 v0, v0, 2, 15
+; DYN_GFX900-NEXT: s_mov_b32 s9, s33
+; DYN_GFX900-NEXT: v_and_b32_e32 v0, 0x3fffff0, v0
+; DYN_GFX900-NEXT: s_mov_b64 s[4:5], exec
+; DYN_GFX900-NEXT: s_mov_b32 s6, 0
+; DYN_GFX900-NEXT: s_mov_b32 s33, s32
+; DYN_GFX900-NEXT: s_addk_i32 s32, 0x400
+; DYN_GFX900-NEXT: .LBB16_1: ; =>This Inner Loop Header: Depth=1
+; DYN_GFX900-NEXT: s_ff1_i32_b64 s7, s[4:5]
+; DYN_GFX900-NEXT: v_readlane_b32 s8, v0, s7
+; DYN_GFX900-NEXT: s_bitset0_b64 s[4:5], s7
+; DYN_GFX900-NEXT: s_max_u32 s6, s6, s8
+; DYN_GFX900-NEXT: s_cmp_lg_u64 s[4:5], 0
+; DYN_GFX900-NEXT: s_cbranch_scc1 .LBB16_1
+; DYN_GFX900-NEXT: ; %bb.2:
+; DYN_GFX900-NEXT: s_mov_b32 s4, s32
+; DYN_GFX900-NEXT: v_mov_b32_e32 v0, s4
+; DYN_GFX900-NEXT: v_lshl_add_u32 v0, s6, 6, v0
+; DYN_GFX900-NEXT: v_readfirstlane_b32 s32, v0
+; DYN_GFX900-NEXT: v_mov_b32_e32 v0, 0x29a
+; DYN_GFX900-NEXT: buffer_store_dword v0, off, s[0:3], s4
+; DYN_GFX900-NEXT: s_waitcnt vmcnt(0)
+; DYN_GFX900-NEXT: s_addk_i32 s32, 0xfc00
+; DYN_GFX900-NEXT: s_mov_b32 s33, s9
+; DYN_GFX900-NEXT: s_setpc_b64 s[30:31]
+;
+; DYN_GFX1100-LABEL: test_dynamic_stackalloc_device_divergent_non_standard_size_i23:
+; DYN_GFX1100: ; %bb.0:
+; DYN_GFX1100-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; DYN_GFX1100-NEXT: v_and_b32_e32 v0, 0x7fffff, v0
+; DYN_GFX1100-NEXT: s_mov_b32 s4, s33
+; DYN_GFX1100-NEXT: s_mov_b32 s1, exec_lo
+; DYN_GFX1100-NEXT: s_mov_b32 s0, 0
+; DYN_GFX1100-NEXT: s_mov_b32 s33, s32
+; DYN_GFX1100-NEXT: v_lshl_add_u32 v0, v0, 2, 15
+; DYN_GFX1100-NEXT: s_add_i32 s32, s32, 16
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; DYN_GFX1100-NEXT: v_and_b32_e32 v0, 0x3fffff0, v0
+; DYN_GFX1100-NEXT: .LBB16_1: ; =>This Inner Loop Header: Depth=1
+; DYN_GFX1100-NEXT: s_ctz_i32_b32 s2, s1
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; DYN_GFX1100-NEXT: v_readlane_b32 s3, v0, s2
+; DYN_GFX1100-NEXT: s_bitset0_b32 s1, s2
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; DYN_GFX1100-NEXT: s_max_u32 s0, s0, s3
+; DYN_GFX1100-NEXT: s_cmp_lg_u32 s1, 0
+; DYN_GFX1100-NEXT: s_cbranch_scc1 .LBB16_1
+; DYN_GFX1100-NEXT: ; %bb.2:
+; DYN_GFX1100-NEXT: s_mov_b32 s1, s32
+; DYN_GFX1100-NEXT: v_mov_b32_e32 v1, 0x29a
+; DYN_GFX1100-NEXT: v_lshl_add_u32 v0, s0, 5, s1
+; DYN_GFX1100-NEXT: s_mov_b32 s33, s4
+; DYN_GFX1100-NEXT: scratch_store_b32 off, v1, s1 dlc
+; DYN_GFX1100-NEXT: s_waitcnt_vscnt null, 0x0
+; DYN_GFX1100-NEXT: v_readfirstlane_b32 s32, v0
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; DYN_GFX1100-NEXT: s_add_i32 s32, s32, -16
+; DYN_GFX1100-NEXT: s_setpc_b64 s[30:31]
+ %alloca = alloca i32, i23 %n, align 2, addrspace(5)
+ store volatile i32 666, ptr addrspace(5) %alloca
+ ret void
+}
+
+define void @test_dynamic_stackalloc_device_divergent_non_standard_size_i67(i67 %n) {
+; DYN_GFX900-LABEL: test_dynamic_stackalloc_device_divergent_non_standard_size_i67:
+; DYN_GFX900: ; %bb.0:
+; DYN_GFX900-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; DYN_GFX900-NEXT: v_lshl_add_u32 v0, v0, 2, 15
+; DYN_GFX900-NEXT: s_mov_b32 s9, s33
+; DYN_GFX900-NEXT: v_and_b32_e32 v0, -16, v0
+; DYN_GFX900-NEXT: s_mov_b64 s[4:5], exec
+; DYN_GFX900-NEXT: s_mov_b32 s6, 0
+; DYN_GFX900-NEXT: s_mov_b32 s33, s32
+; DYN_GFX900-NEXT: s_addk_i32 s32, 0x400
+; DYN_GFX900-NEXT: .LBB17_1: ; =>This Inner Loop Header: Depth=1
+; DYN_GFX900-NEXT: s_ff1_i32_b64 s7, s[4:5]
+; DYN_GFX900-NEXT: v_readlane_b32 s8, v0, s7
+; DYN_GFX900-NEXT: s_bitset0_b64 s[4:5], s7
+; DYN_GFX900-NEXT: s_max_u32 s6, s6, s8
+; DYN_GFX900-NEXT: s_cmp_lg_u64 s[4:5], 0
+; DYN_GFX900-NEXT: s_cbranch_scc1 .LBB17_1
+; DYN_GFX900-NEXT: ; %bb.2:
+; DYN_GFX900-NEXT: s_mov_b32 s4, s32
+; DYN_GFX900-NEXT: v_mov_b32_e32 v0, s4
+; DYN_GFX900-NEXT: v_lshl_add_u32 v0, s6, 6, v0
+; DYN_GFX900-NEXT: v_readfirstlane_b32 s32, v0
+; DYN_GFX900-NEXT: v_mov_b32_e32 v0, 0x29a
+; DYN_GFX900-NEXT: buffer_store_dword v0, off, s[0:3], s4
+; DYN_GFX900-NEXT: s_waitcnt vmcnt(0)
+; DYN_GFX900-NEXT: s_addk_i32 s32, 0xfc00
+; DYN_GFX900-NEXT: s_mov_b32 s33, s9
+; DYN_GFX900-NEXT: s_setpc_b64 s[30:31]
+;
+; DYN_GFX1100-LABEL: test_dynamic_stackalloc_device_divergent_non_standard_size_i67:
+; DYN_GFX1100: ; %bb.0:
+; DYN_GFX1100-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; DYN_GFX1100-NEXT: v_lshl_add_u32 v0, v0, 2, 15
+; DYN_GFX1100-NEXT: s_mov_b32 s4, s33
+; DYN_GFX1100-NEXT: s_mov_b32 s1, exec_lo
+; DYN_GFX1100-NEXT: s_mov_b32 s0, 0
+; DYN_GFX1100-NEXT: s_mov_b32 s33, s32
+; DYN_GFX1100-NEXT: v_and_b32_e32 v0, -16, v0
+; DYN_GFX1100-NEXT: s_add_i32 s32, s32, 16
+; DYN_GFX1100-NEXT: .LBB17_1: ; =>This Inner Loop Header: Depth=1
+; DYN_GFX1100-NEXT: s_ctz_i32_b32 s2, s1
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; DYN_GFX1100-NEXT: v_readlane_b32 s3, v0, s2
+; DYN_GFX1100-NEXT: s_bitset0_b32 s1, s2
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; DYN_GFX1100-NEXT: s_max_u32 s0, s0, s3
+; DYN_GFX1100-NEXT: s_cmp_lg_u32 s1, 0
+; DYN_GFX1100-NEXT: s_cbranch_scc1 .LBB17_1
+; DYN_GFX1100-NEXT: ; %bb.2:
+; DYN_GFX1100-NEXT: s_mov_b32 s1, s32
+; DYN_GFX1100-NEXT: v_mov_b32_e32 v1, 0x29a
+; DYN_GFX1100-NEXT: v_lshl_add_u32 v0, s0, 5, s1
+; DYN_GFX1100-NEXT: s_mov_b32 s33, s4
+; DYN_GFX1100-NEXT: scratch_store_b32 off, v1, s1 dlc
+; DYN_GFX1100-NEXT: s_waitcnt_vscnt null, 0x0
+; DYN_GFX1100-NEXT: v_readfirstlane_b32 s32, v0
+; DYN_GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; DYN_GFX1100-NEXT: s_add_i32 s32, s32, -16
+; DYN_GFX1100-NEXT: s_setpc_b64 s[30:31]
+ %alloca = alloca i32, i67 %n, align 2, addrspace(5)
+ store volatile i32 666, ptr addrspace(5) %alloca
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/dynamic_stackalloc_isel.ll b/llvm/test/CodeGen/AMDGPU/dynamic_stackalloc_isel.ll
new file mode 100644
index 00000000000000..d7cbb6dba082e9
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/dynamic_stackalloc_isel.ll
@@ -0,0 +1,170 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -verify-machineinstrs=0 -stop-after=amdgpu-isel < %s | FileCheck -check-prefix=DYN %s
+
+define amdgpu_kernel void @test_dynamic_stackalloc_kernel_uniform(i32 %n) {
+ ; DYN-LABEL: name: test_dynamic_stackalloc_kernel_uniform
+ ; DYN: bb.0 (%ir-block.0):
+ ; DYN-NEXT: liveins: $sgpr8_sgpr9
+ ; DYN-NEXT: {{ $}}
+ ; DYN-NEXT: [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr8_sgpr9
+ ; DYN-NEXT: [[S_LOAD_DWORD_IMM:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM [[COPY]](p4), 0, 0 :: (dereferenceable invariant load (s32) from %ir.n.kernarg.offset1, align 16, addrspace 4)
+ ; DYN-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 2
+ ; DYN-NEXT: [[S_LSHL_B32_:%[0-9]+]]:sreg_32 = S_LSHL_B32 killed [[S_LOAD_DWORD_IMM]], killed [[S_MOV_B32_]], implicit-def dead $scc
+ ; DYN-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 15
+ ; DYN-NEXT: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = nuw S_ADD_I32 killed [[S_LSHL_B32_]], killed [[S_MOV_B32_1]], implicit-def dead $scc
+ ; DYN-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 -16
+ ; DYN-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 killed [[S_ADD_I32_]], killed [[S_MOV_B32_2]], implicit-def dead $scc
+ ; DYN-NEXT: [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; DYN-NEXT: [[WAVE_REDUCE_UMAX_PSEUDO_U32_:%[0-9]+]]:sgpr_32 = WAVE_REDUCE_UMAX_PSEUDO_U32 killed [[S_AND_B32_]], killed [[S_MOV_B32_3]], implicit $exec
+ ; DYN-NEXT: [[S_MOV_B32_4:%[0-9]+]]:sreg_32 = S_MOV_B32 6
+ ; DYN-NEXT: [[S_LSHL_B32_1:%[0-9]+]]:sreg_32 = S_LSHL_B32 killed [[WAVE_REDUCE_UMAX_PSEUDO_U32_]], killed [[S_MOV_B32_4]], implicit-def dead $scc
+ ; DYN-NEXT: ADJCALLSTACKUP 0, 0, implicit-def dead $scc
+ ; DYN-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sp_reg
+ ; DYN-NEXT: [[S_ADD_I32_1:%[0-9]+]]:sreg_32 = S_ADD_I32 [[COPY1]], killed [[S_LSHL_B32_1]], implicit-def dead $scc
+ ; DYN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[S_ADD_I32_1]]
+ ; DYN-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32 = V_READFIRSTLANE_B32 killed [[COPY2]], implicit $exec
+ ; DYN-NEXT: $sp_reg = COPY [[V_READFIRSTLANE_B32_]]
+ ; DYN-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc
+ ; DYN-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 123, implicit $exec
+ ; DYN-NEXT: BUFFER_STORE_DWORD_OFFSET killed [[V_MOV_B32_e32_]], $private_rsrc_reg, [[COPY1]], 0, 0, 0, implicit $exec :: (volatile store (s32) into %ir.alloca, addrspace 5)
+ ; DYN-NEXT: S_ENDPGM 0
+ %alloca = alloca i32, i32 %n, addrspace(5)
+ store volatile i32 123, ptr addrspace(5) %alloca
+ ret void
+}
+
+define amdgpu_kernel void @test_dynamic_stackalloc_kernel_divergent() {
+ ; DYN-LABEL: name: test_dynamic_stackalloc_kernel_divergent
+ ; DYN: bb.0 (%ir-block.0):
+ ; DYN-NEXT: liveins: $vgpr0
+ ; DYN-NEXT: {{ $}}
+ ; DYN-NEXT: [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+ ; DYN-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 15
+ ; DYN-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 2
+ ; DYN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[S_MOV_B32_]]
+ ; DYN-NEXT: [[V_LSHL_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = nuw V_LSHL_ADD_U32_e64 [[COPY]](s32), killed [[S_MOV_B32_1]], [[COPY1]], implicit $exec
+ ; DYN-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 8176
+ ; DYN-NEXT: [[V_AND_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_B32_e64 killed [[V_LSHL_ADD_U32_e64_]], killed [[S_MOV_B32_2]], implicit $exec
+ ; DYN-NEXT: [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; DYN-NEXT: [[WAVE_REDUCE_UMAX_PSEUDO_U32_:%[0-9]+]]:sgpr_32 = WAVE_REDUCE_UMAX_PSEUDO_U32 killed [[V_AND_B32_e64_]], killed [[S_MOV_B32_3]], implicit $exec
+ ; DYN-NEXT: ADJCALLSTACKUP 0, 0, implicit-def dead $scc
+ ; DYN-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sp_reg
+ ; DYN-NEXT: [[S_MOV_B32_4:%[0-9]+]]:sreg_32 = S_MOV_B32 6
+ ; DYN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY killed [[S_MOV_B32_4]]
+ ; DYN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[COPY2]]
+ ; DYN-NEXT: [[V_LSHL_ADD_U32_e64_1:%[0-9]+]]:vgpr_32 = V_LSHL_ADD_U32_e64 killed [[WAVE_REDUCE_UMAX_PSEUDO_U32_]], [[COPY3]], [[COPY4]], implicit $exec
+ ; DYN-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32 = V_READFIRSTLANE_B32 killed [[V_LSHL_ADD_U32_e64_1]], implicit $exec
+ ; DYN-NEXT: $sp_reg = COPY [[V_READFIRSTLANE_B32_]]
+ ; DYN-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc
+ ; DYN-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 123, implicit $exec
+ ; DYN-NEXT: BUFFER_STORE_DWORD_OFFSET killed [[V_MOV_B32_e32_]], $private_rsrc_reg, [[COPY2]], 0, 0, 0, implicit $exec :: (volatile store (s32) into %ir.alloca, addrspace 5)
+ ; DYN-NEXT: S_ENDPGM 0
+ %idx = call i32 @llvm.amdgcn.workitem.id.x()
+ %alloca = alloca i32, i32 %idx, addrspace(5)
+ store volatile i32 123, ptr addrspace(5) %alloca
+ ret void
+}
+
+define amdgpu_kernel void @test_dynamic_stackalloc_kernel_divergent_over_aligned() {
+ ; DYN-LABEL: name: test_dynamic_stackalloc_kernel_divergent_over_aligned
+ ; DYN: bb.0 (%ir-block.0):
+ ; DYN-NEXT: liveins: $vgpr0
+ ; DYN-NEXT: {{ $}}
+ ; DYN-NEXT: [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+ ; DYN-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 15
+ ; DYN-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 2
+ ; DYN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[S_MOV_B32_]]
+ ; DYN-NEXT: [[V_LSHL_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = nuw V_LSHL_ADD_U32_e64 [[COPY]](s32), killed [[S_MOV_B32_1]], [[COPY1]], implicit $exec
+ ; DYN-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 8176
+ ; DYN-NEXT: [[V_AND_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_B32_e64 killed [[V_LSHL_ADD_U32_e64_]], killed [[S_MOV_B32_2]], implicit $exec
+ ; DYN-NEXT: [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; DYN-NEXT: [[WAVE_REDUCE_UMAX_PSEUDO_U32_:%[0-9]+]]:sgpr_32 = WAVE_REDUCE_UMAX_PSEUDO_U32 killed [[V_AND_B32_e64_]], killed [[S_MOV_B32_3]], implicit $exec
+ ; DYN-NEXT: ADJCALLSTACKUP 0, 0, implicit-def dead $scc
+ ; DYN-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sp_reg
+ ; DYN-NEXT: [[S_MOV_B32_4:%[0-9]+]]:sreg_32 = S_MOV_B32 8191
+ ; DYN-NEXT: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 [[COPY2]], killed [[S_MOV_B32_4]], implicit-def dead $scc
+ ; DYN-NEXT: [[S_MOV_B32_5:%[0-9]+]]:sreg_32 = S_MOV_B32 -8192
+ ; DYN-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 killed [[S_ADD_I32_]], killed [[S_MOV_B32_5]], implicit-def dead $scc
+ ; DYN-NEXT: [[S_MOV_B32_6:%[0-9]+]]:sreg_32 = S_MOV_B32 6
+ ; DYN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY killed [[S_MOV_B32_6]]
+ ; DYN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[S_AND_B32_]]
+ ; DYN-NEXT: [[V_LSHL_ADD_U32_e64_1:%[0-9]+]]:vgpr_32 = V_LSHL_ADD_U32_e64 killed [[WAVE_REDUCE_UMAX_PSEUDO_U32_]], [[COPY3]], [[COPY4]], implicit $exec
+ ; DYN-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32 = V_READFIRSTLANE_B32 killed [[V_LSHL_ADD_U32_e64_1]], implicit $exec
+ ; DYN-NEXT: $sp_reg = COPY [[V_READFIRSTLANE_B32_]]
+ ; DYN-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc
+ ; DYN-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 123, implicit $exec
+ ; DYN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[S_AND_B32_]]
+ ; DYN-NEXT: BUFFER_STORE_DWORD_OFFEN killed [[V_MOV_B32_e32_]], [[COPY5]], $private_rsrc_reg, 0, 0, 0, 0, implicit $exec :: (volatile store (s32) into %ir.alloca, addrspace 5)
+ ; DYN-NEXT: S_ENDPGM 0
+ %idx = call i32 @llvm.amdgcn.workitem.id.x()
+ %alloca = alloca i32, i32 %idx, align 128, addrspace(5)
+ store volatile i32 123, ptr addrspace(5) %alloca
+ ret void
+}
+
+define void @test_dynamic_stackalloc_device_over_aligned(i32 %n) {
+ ; DYN-LABEL: name: test_dynamic_stackalloc_device_over_aligned
+ ; DYN: bb.0 (%ir-block.0):
+ ; DYN-NEXT: liveins: $vgpr0
+ ; DYN-NEXT: {{ $}}
+ ; DYN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; DYN-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 15
+ ; DYN-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 2
+ ; DYN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[S_MOV_B32_]]
+ ; DYN-NEXT: [[V_LSHL_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = nuw V_LSHL_ADD_U32_e64 [[COPY]], killed [[S_MOV_B32_1]], [[COPY1]], implicit $exec
+ ; DYN-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 -16
+ ; DYN-NEXT: [[V_AND_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_B32_e64 killed [[V_LSHL_ADD_U32_e64_]], killed [[S_MOV_B32_2]], implicit $exec
+ ; DYN-NEXT: [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; DYN-NEXT: [[WAVE_REDUCE_UMAX_PSEUDO_U32_:%[0-9]+]]:sgpr_32 = WAVE_REDUCE_UMAX_PSEUDO_U32 killed [[V_AND_B32_e64_]], killed [[S_MOV_B32_3]], implicit $exec
+ ; DYN-NEXT: ADJCALLSTACKUP 0, 0, implicit-def dead $scc
+ ; DYN-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr32
+ ; DYN-NEXT: [[S_MOV_B32_4:%[0-9]+]]:sreg_32 = S_MOV_B32 65535
+ ; DYN-NEXT: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 [[COPY2]], killed [[S_MOV_B32_4]], implicit-def dead $scc
+ ; DYN-NEXT: [[S_MOV_B32_5:%[0-9]+]]:sreg_32 = S_MOV_B32 -65536
+ ; DYN-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 killed [[S_ADD_I32_]], killed [[S_MOV_B32_5]], implicit-def dead $scc
+ ; DYN-NEXT: [[S_MOV_B32_6:%[0-9]+]]:sreg_32 = S_MOV_B32 6
+ ; DYN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY killed [[S_MOV_B32_6]]
+ ; DYN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[S_AND_B32_]]
+ ; DYN-NEXT: [[V_LSHL_ADD_U32_e64_1:%[0-9]+]]:vgpr_32 = V_LSHL_ADD_U32_e64 killed [[WAVE_REDUCE_UMAX_PSEUDO_U32_]], [[COPY3]], [[COPY4]], implicit $exec
+ ; DYN-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32 = V_READFIRSTLANE_B32 killed [[V_LSHL_ADD_U32_e64_1]], implicit $exec
+ ; DYN-NEXT: $sgpr32 = COPY [[V_READFIRSTLANE_B32_]]
+ ; DYN-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc
+ ; DYN-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 123, implicit $exec
+ ; DYN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[S_AND_B32_]]
+ ; DYN-NEXT: BUFFER_STORE_DWORD_OFFEN killed [[V_MOV_B32_e32_]], [[COPY5]], $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, implicit $exec :: (volatile store (s32) into %ir.alloca, addrspace 5)
+ ; DYN-NEXT: SI_RETURN
+ %alloca = alloca i32, i32 %n, align 1024, addrspace(5)
+ store volatile i32 123, ptr addrspace(5) %alloca
+ ret void
+}
+
+define void @test_dynamic_stackalloc_device_under_aligned(i32 %n) {
+ ; DYN-LABEL: name: test_dynamic_stackalloc_device_under_aligned
+ ; DYN: bb.0 (%ir-block.0):
+ ; DYN-NEXT: liveins: $vgpr0
+ ; DYN-NEXT: {{ $}}
+ ; DYN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; DYN-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 15
+ ; DYN-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 2
+ ; DYN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[S_MOV_B32_]]
+ ; DYN-NEXT: [[V_LSHL_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = nuw V_LSHL_ADD_U32_e64 [[COPY]], killed [[S_MOV_B32_1]], [[COPY1]], implicit $exec
+ ; DYN-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 -16
+ ; DYN-NEXT: [[V_AND_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_B32_e64 killed [[V_LSHL_ADD_U32_e64_]], killed [[S_MOV_B32_2]], implicit $exec
+ ; DYN-NEXT: [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; DYN-NEXT: [[WAVE_REDUCE_UMAX_PSEUDO_U32_:%[0-9]+]]:sgpr_32 = WAVE_REDUCE_UMAX_PSEUDO_U32 killed [[V_AND_B32_e64_]], killed [[S_MOV_B32_3]], implicit $exec
+ ; DYN-NEXT: ADJCALLSTACKUP 0, 0, implicit-def dead $scc
+ ; DYN-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr32
+ ; DYN-NEXT: [[S_MOV_B32_4:%[0-9]+]]:sreg_32 = S_MOV_B32 6
+ ; DYN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY killed [[S_MOV_B32_4]]
+ ; DYN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[COPY2]]
+ ; DYN-NEXT: [[V_LSHL_ADD_U32_e64_1:%[0-9]+]]:vgpr_32 = V_LSHL_ADD_U32_e64 killed [[WAVE_REDUCE_UMAX_PSEUDO_U32_]], [[COPY3]], [[COPY4]], implicit $exec
+ ; DYN-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32 = V_READFIRSTLANE_B32 killed [[V_LSHL_ADD_U32_e64_1]], implicit $exec
+ ; DYN-NEXT: $sgpr32 = COPY [[V_READFIRSTLANE_B32_]]
+ ; DYN-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc
+ ; DYN-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 123, implicit $exec
+ ; DYN-NEXT: BUFFER_STORE_DWORD_OFFSET killed [[V_MOV_B32_e32_]], $sgpr0_sgpr1_sgpr2_sgpr3, [[COPY2]], 0, 0, 0, implicit $exec :: (volatile store (s32) into %ir.alloca, addrspace 5)
+ ; DYN-NEXT: SI_RETURN
+ %alloca = alloca i32, i32 %n, align 4, addrspace(5)
+ store volatile i32 123, ptr addrspace(5) %alloca
+ ret void
+}
More information about the llvm-commits
mailing list