[llvm] [AMDGPU] Support preloading hidden kernel arguments (PR #98861)
via llvm-commits
llvm-commits at lists.llvm.org
Sun Jul 14 23:43:37 PDT 2024
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-amdgpu
Author: Austin Kerbow (kerbowa)
<details>
<summary>Changes</summary>
Adds hidden kernel arguments to the function signature and marks them inreg if they should be preloaded into user SGPRs. The normal kernarg preloading logic then takes over with some additional checks for the correct implicitarg_ptr alignment.
Special care is needed so that metadata for the hidden arguments is not added twice when generating the code object.
---
Patch is 64.19 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/98861.diff
8 Files Affected:
- (modified) llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp (+3)
- (modified) llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp (+178-2)
- (modified) llvm/lib/Target/AMDGPU/AMDGPUSubtarget.cpp (+2)
- (modified) llvm/lib/Target/AMDGPU/SIISelLowering.cpp (+14)
- (modified) llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp (+9)
- (modified) llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h (+3)
- (added) llvm/test/CodeGen/AMDGPU/preload-implicit-kernargs.ll (+698)
- (modified) llvm/test/CodeGen/AMDGPU/preload-kernargs.ll (+37-39)
``````````diff
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp b/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp
index 1aaf514ae8f62..ef049505dc628 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp
@@ -283,6 +283,9 @@ void MetadataStreamerMsgPackV4::emitKernelArg(const Argument &Arg,
else if (Arg.hasName())
Name = Arg.getName();
+ if (Name.starts_with("_hidden"))
+ return;
+
StringRef TypeName;
Node = Func->getMetadata("kernel_arg_type");
if (Node && ArgNo < Node->getNumOperands())
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp b/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp
index 83cce6021693a..e9ba41ef8681f 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp
@@ -13,6 +13,7 @@
#include "AMDGPU.h"
#include "GCNSubtarget.h"
+#include "llvm/Analysis/ValueTracking.h"
#include "llvm/CodeGen/TargetPassConfig.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/IntrinsicsAMDGPU.h"
@@ -31,9 +32,88 @@ class PreloadKernelArgInfo {
const GCNSubtarget &ST;
unsigned NumFreeUserSGPRs;
-public:
- SmallVector<llvm::Metadata *, 8> KernelArgMetadata;
+ enum HiddenArg : unsigned {
+ HIDDEN_BLOCK_COUNT_X,
+ HIDDEN_BLOCK_COUNT_Y,
+ HIDDEN_BLOCK_COUNT_Z,
+ HIDDEN_GROUP_SIZE_X,
+ HIDDEN_GROUP_SIZE_Y,
+ HIDDEN_GROUP_SIZE_Z,
+ HIDDEN_REMAINDER_X,
+ HIDDEN_REMAINDER_Y,
+ HIDDEN_REMAINDER_Z,
+ END_HIDDEN_ARGS
+ };
+
+ struct HiddenArgInfo {
+ unsigned Offset;
+ unsigned Size;
+ const char *Name;
+ };
+
+ static constexpr HiddenArgInfo HiddenArgs[END_HIDDEN_ARGS] = {
+ {0, 4, "_hidden_block_count_x"}, {4, 4, "_hidden_block_count_y"},
+ {8, 4, "_hidden_block_count_z"}, {12, 2, "_hidden_group_size_x"},
+ {14, 2, "_hidden_group_size_y"}, {16, 2, "_hidden_group_size_z"},
+ {18, 2, "_hidden_remainder_x"}, {20, 2, "_hidden_remainder_y"},
+ {22, 2, "_hidden_remainder_z"}};
+
+ static HiddenArg getHiddenArgIndexFromOffset(unsigned Offset) {
+ for (unsigned I = 0; I < END_HIDDEN_ARGS; ++I)
+ if (HiddenArgs[I].Offset == Offset)
+ return static_cast<HiddenArg>(I);
+
+ llvm_unreachable("Unexpected hidden argument offset.");
+ }
+
+ static Type *getHiddenArgType(LLVMContext &Ctx, HiddenArg HA) {
+ if (HA < END_HIDDEN_ARGS)
+ return Type::getIntNTy(Ctx, HiddenArgs[HA].Size * 8);
+
+ llvm_unreachable("Unexpected hidden argument.");
+ }
+
+ static const char *getHiddenArgName(HiddenArg HA) {
+ if (HA < END_HIDDEN_ARGS) {
+ return HiddenArgs[HA].Name;
+ }
+ llvm_unreachable("Unexpected hidden argument.");
+ }
+ Function *cloneFunctionWithPreloadImplicitArgs() {
+ FunctionType *FT = F.getFunctionType();
+ std::vector<Type *> FTypes(FT->param_begin(), FT->param_end());
+ for (unsigned I = 0; I < END_HIDDEN_ARGS; ++I)
+ FTypes.push_back(getHiddenArgType(F.getContext(), HiddenArg(I)));
+
+ FunctionType *NFT =
+ FunctionType::get(FT->getReturnType(), FTypes, FT->isVarArg());
+ Function *NF =
+ Function::Create(NFT, F.getLinkage(), F.getAddressSpace(), F.getName());
+
+ NF->copyAttributesFrom(&F);
+ NF->copyMetadata(&F, 0);
+ NF->setIsNewDbgInfoFormat(F.IsNewDbgInfoFormat);
+
+ F.getParent()->getFunctionList().insert(F.getIterator(), NF);
+ NF->takeName(&F);
+ assert(F.use_empty());
+ NF->splice(NF->begin(), &F);
+
+ Function::arg_iterator NFArg = NF->arg_begin();
+ for (Argument &Arg : F.args()) {
+ Arg.replaceAllUsesWith(&*NFArg);
+ NFArg->takeName(&Arg);
+ ++NFArg;
+ }
+
+ for (unsigned I = 0; I < END_HIDDEN_ARGS; ++I)
+ NFArg++->setName(getHiddenArgName(HiddenArg(I)));
+
+ return NF;
+ }
+
+public:
PreloadKernelArgInfo(Function &F, const GCNSubtarget &ST) : F(F), ST(ST) {
setInitialFreeUserSGPRsCount();
}
@@ -64,6 +144,94 @@ class PreloadKernelArgInfo {
NumFreeUserSGPRs -= (NumPreloadSGPRs + PaddingSGPRs);
return true;
}
+
+ // Try to allocate SGPRs to preload implicit kernel arguments.
+ void tryAllocImplicitArgPreloadSGPRs(uint64_t ImplicitArgsBaseOffset,
+ IRBuilder<> &Builder) {
+ StringRef Name = Intrinsic::getName(Intrinsic::amdgcn_implicitarg_ptr);
+ Function *ImplicitArgPtr = F.getParent()->getFunction(Name);
+ if (!ImplicitArgPtr)
+ return;
+
+ const DataLayout &DL = F.getParent()->getDataLayout();
+ // Pair is the load and the load offset.
+ SmallVector<std::pair<LoadInst *, unsigned>, 4> ImplicitArgLoads;
+ for (auto *U : ImplicitArgPtr->users()) {
+ Instruction *CI = dyn_cast<Instruction>(U);
+ if (!CI || CI->getParent()->getParent() != &F)
+ continue;
+
+ for (auto *U : CI->users()) {
+ int64_t Offset = 0;
+ auto *Load = dyn_cast<LoadInst>(U); // Load from ImplicitArgPtr?
+ if (!Load) {
+ if (GetPointerBaseWithConstantOffset(U, Offset, DL) != CI)
+ continue;
+
+ Load = dyn_cast<LoadInst>(*U->user_begin()); // Load from GEP?
+ }
+
+ if (!Load || !Load->isSimple())
+ continue;
+
+ // FIXME: Expand to handle 64-bit implicit args and large merged loads.
+ unsigned LoadSize = Load->getType()->getScalarSizeInBits();
+ if (LoadSize != 32 && LoadSize != 16)
+ continue;
+
+ ImplicitArgLoads.push_back(std::make_pair(Load, Offset));
+ }
+ }
+
+ if (ImplicitArgLoads.empty())
+ return;
+
+ // Allocate loads in order of offset. We need to be sure that the implicit
+ // argument can actually be preloaded.
+ std::sort(ImplicitArgLoads.begin(), ImplicitArgLoads.end(),
+ [](const std::pair<LoadInst *, unsigned> &A,
+ const std::pair<LoadInst *, unsigned> &B) {
+ return A.second < B.second;
+ });
+
+ uint64_t LastExplicitArgOffset = ImplicitArgsBaseOffset;
+ bool AddedHiddenArgsToSignature = false;
+ Function *NF = nullptr;
+ unsigned LastPreloadIndex = 0;
+ for (const auto &Load : ImplicitArgLoads) {
+ LoadInst *LoadInst = Load.first;
+ Type *LoadType = LoadInst->getType();
+ auto LoadOffset = Load.second;
+ unsigned LoadSize = DL.getTypeStoreSize(LoadType);
+ // If we fail to preload any implicit argument we know we don't have SGPRs
+ // to preload any subsequent ones with larger offsets.
+ if (!tryAllocPreloadSGPRs(LoadSize, LoadOffset + ImplicitArgsBaseOffset,
+ LastExplicitArgOffset))
+ break;
+
+ if (!AddedHiddenArgsToSignature) {
+ NF = cloneFunctionWithPreloadImplicitArgs();
+ AddedHiddenArgsToSignature = true;
+ }
+
+ LastExplicitArgOffset = LoadOffset + LoadSize;
+ unsigned HiddenArgIndex = getHiddenArgIndexFromOffset(LoadOffset);
+ assert(NF);
+ unsigned Index = NF->arg_size() - END_HIDDEN_ARGS + HiddenArgIndex;
+ Argument *Arg = NF->getArg(Index);
+ LoadInst->replaceAllUsesWith(Arg);
+ if (Index > HiddenArgIndex)
+ LastPreloadIndex = HiddenArgIndex;
+ }
+
+ // Ensure all hidden arguments up to the final preload are also
+ // preloaded, even if some are unused.
+ for (unsigned I = 0; I <= LastPreloadIndex; ++I)
+ NF->getArg(NF->arg_size() - END_HIDDEN_ARGS + I)
+ ->addAttr(Attribute::InReg);
+
+ F.removeFromParent();
+ }
};
class AMDGPULowerKernelArguments : public FunctionPass {
@@ -281,6 +449,14 @@ static bool lowerKernelArguments(Function &F, const TargetMachine &TM) {
KernArgSegment->addRetAttr(
Attribute::getWithAlignment(Ctx, std::max(KernArgBaseAlign, MaxAlign)));
+ if (InPreloadSequence) {
+ uint64_t ImplicitArgsBaseOffset =
+ alignTo(ExplicitArgOffset, ST.getAlignmentForImplicitArgPtr()) +
+ BaseOffset;
+ PreloadInfo.tryAllocImplicitArgPreloadSGPRs(ImplicitArgsBaseOffset,
+ Builder);
+ }
+
return true;
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.cpp b/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.cpp
index 21fe1bc31a27e..0b4565a8fdc2b 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.cpp
@@ -601,6 +601,8 @@ uint64_t AMDGPUSubtarget::getExplicitKernArgSize(const Function &F,
MaxAlign = Align(1);
for (const Argument &Arg : F.args()) {
+ if (Arg.getName().starts_with("_hidden"))
+ continue;
const bool IsByRef = Arg.hasByRefAttr();
Type *ArgTy = IsByRef ? Arg.getParamByRefType() : Arg.getType();
Align Alignment = DL.getValueOrABITypeAlignment(
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index a733295d2a511..954cab0702afb 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -2496,6 +2496,7 @@ void SITargetLowering::allocatePreloadKernArgSGPRs(
GCNUserSGPRUsageInfo &SGPRInfo = Info.getUserSGPRInfo();
bool InPreloadSequence = true;
unsigned InIdx = 0;
+ bool AlignedForImplictArgs = false;
for (auto &Arg : F.args()) {
if (!InPreloadSequence || !Arg.hasInRegAttr())
break;
@@ -2518,6 +2519,19 @@ void SITargetLowering::allocatePreloadKernArgSGPRs(
unsigned NumAllocSGPRs =
alignTo(ArgLoc.getLocVT().getFixedSizeInBits(), 32) / 32;
+ if (!AlignedForImplictArgs && Arg.getName().starts_with("_hidden")) {
+ unsigned OffsetBefore = LastExplicitArgOffset;
+ LastExplicitArgOffset = alignTo(
+ LastExplicitArgOffset, Subtarget->getAlignmentForImplicitArgPtr());
+ if (OffsetBefore != LastExplicitArgOffset) {
+ unsigned PaddingSGPRs =
+ alignTo(LastExplicitArgOffset - OffsetBefore, 4) / 4;
+ Info.allocateUserSGPRs(PaddingSGPRs);
+ ArgOffset += PaddingSGPRs * 4;
+ }
+ AlignedForImplictArgs = true;
+ }
+
// Arg is preloaded into the previous SGPR.
if (ArgLoc.getLocVT().getStoreSize() < 4 && Alignment < 4) {
Info.getArgInfo().PreloadKernArgs[InIdx].Regs.push_back(
diff --git a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
index d9db0f7a4f531..35f41a3b87c3a 100644
--- a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
@@ -278,6 +278,15 @@ SmallVectorImpl<MCRegister> *SIMachineFunctionInfo::addPreloadedKernArg(
return &ArgInfo.PreloadKernArgs[KernArgIdx].Regs;
}
+bool SIMachineFunctionInfo::allocateUserSGPRs(
+ unsigned Number) {
+ if (Number <= getNumUserSGPRs())
+ return false;
+
+ NumUserSGPRs = Number;
+ return true;
+}
+
void SIMachineFunctionInfo::allocateWWMSpill(MachineFunction &MF, Register VGPR,
uint64_t Size, Align Alignment) {
// Skip if it is an entry function or the register is already added.
diff --git a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h
index 7af5e7388f841..f2ff919a4bc0e 100644
--- a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h
+++ b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h
@@ -760,6 +760,9 @@ class SIMachineFunctionInfo final : public AMDGPUMachineFunction,
unsigned AllocSizeDWord, int KernArgIdx,
int PaddingSGPRs);
+ /// Reserve up to \p Number of user SGPRs.
+ bool allocateUserSGPRs(unsigned Number);
+
/// Increment user SGPRs used for padding the argument list only.
Register addReservedUserSGPR() {
Register Next = getNextUserSGPR();
diff --git a/llvm/test/CodeGen/AMDGPU/preload-implicit-kernargs.ll b/llvm/test/CodeGen/AMDGPU/preload-implicit-kernargs.ll
new file mode 100644
index 0000000000000..456c9e3dc7ee5
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/preload-implicit-kernargs.ll
@@ -0,0 +1,698 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx940 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX940-NO-PRELOAD %s
+; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx940 -amdgpu-kernarg-preload-count=16 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX940-PRELOAD %s
+
+; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx90a -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX90a-NO-PRELOAD %s
+; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx90a -amdgpu-kernarg-preload-count=16 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX90a-PRELOAD %s
+
+define amdgpu_kernel void @preload_block_count_x(ptr addrspace(1) %out) {
+; GFX940-NO-PRELOAD-LABEL: preload_block_count_x:
+; GFX940-NO-PRELOAD: ; %bb.0:
+; GFX940-NO-PRELOAD-NEXT: s_load_dword s4, s[0:1], 0x8
+; GFX940-NO-PRELOAD-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s4
+; GFX940-NO-PRELOAD-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
+; GFX940-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-LABEL: preload_block_count_x:
+; GFX940-PRELOAD: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX940-PRELOAD-NEXT: ; %bb.0:
+; GFX940-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-PRELOAD-NEXT: v_mov_b32_e32 v1, s4
+; GFX940-PRELOAD-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
+; GFX940-PRELOAD-NEXT: s_endpgm
+;
+; GFX90a-NO-PRELOAD-LABEL: preload_block_count_x:
+; GFX90a-NO-PRELOAD: ; %bb.0:
+; GFX90a-NO-PRELOAD-NEXT: s_load_dword s2, s[4:5], 0x8
+; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s2
+; GFX90a-NO-PRELOAD-NEXT: global_store_dword v0, v1, s[0:1]
+; GFX90a-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-LABEL: preload_block_count_x:
+; GFX90a-PRELOAD: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX90a-PRELOAD-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-PRELOAD-NEXT: v_mov_b32_e32 v1, s8
+; GFX90a-PRELOAD-NEXT: global_store_dword v0, v1, s[6:7]
+; GFX90a-PRELOAD-NEXT: s_endpgm
+ %imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+ %load = load i32, ptr addrspace(4) %imp_arg_ptr
+ store i32 %load, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @preload_block_count_y(ptr addrspace(1) %out) {
+; GFX940-NO-PRELOAD-LABEL: preload_block_count_y:
+; GFX940-NO-PRELOAD: ; %bb.0:
+; GFX940-NO-PRELOAD-NEXT: s_load_dword s4, s[0:1], 0xc
+; GFX940-NO-PRELOAD-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s4
+; GFX940-NO-PRELOAD-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
+; GFX940-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-LABEL: preload_block_count_y:
+; GFX940-PRELOAD: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX940-PRELOAD-NEXT: ; %bb.0:
+; GFX940-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-PRELOAD-NEXT: v_mov_b32_e32 v1, s5
+; GFX940-PRELOAD-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
+; GFX940-PRELOAD-NEXT: s_endpgm
+;
+; GFX90a-NO-PRELOAD-LABEL: preload_block_count_y:
+; GFX90a-NO-PRELOAD: ; %bb.0:
+; GFX90a-NO-PRELOAD-NEXT: s_load_dword s2, s[4:5], 0xc
+; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s2
+; GFX90a-NO-PRELOAD-NEXT: global_store_dword v0, v1, s[0:1]
+; GFX90a-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-LABEL: preload_block_count_y:
+; GFX90a-PRELOAD: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX90a-PRELOAD-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-PRELOAD-NEXT: v_mov_b32_e32 v1, s9
+; GFX90a-PRELOAD-NEXT: global_store_dword v0, v1, s[6:7]
+; GFX90a-PRELOAD-NEXT: s_endpgm
+ %imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+ %gep = getelementptr i8, ptr addrspace(4) %imp_arg_ptr, i32 4
+ %load = load i32, ptr addrspace(4) %gep
+ store i32 %load, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @preload_block_count_z(ptr addrspace(1) %out) {
+; GFX940-NO-PRELOAD-LABEL: preload_block_count_z:
+; GFX940-NO-PRELOAD: ; %bb.0:
+; GFX940-NO-PRELOAD-NEXT: s_load_dword s4, s[0:1], 0x10
+; GFX940-NO-PRELOAD-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s4
+; GFX940-NO-PRELOAD-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
+; GFX940-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-LABEL: preload_block_count_z:
+; GFX940-PRELOAD: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX940-PRELOAD-NEXT: ; %bb.0:
+; GFX940-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-PRELOAD-NEXT: v_mov_b32_e32 v1, s6
+; GFX940-PRELOAD-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
+; GFX940-PRELOAD-NEXT: s_endpgm
+;
+; GFX90a-NO-PRELOAD-LABEL: preload_block_count_z:
+; GFX90a-NO-PRELOAD: ; %bb.0:
+; GFX90a-NO-PRELOAD-NEXT: s_load_dword s2, s[4:5], 0x10
+; GFX90a-NO-PRELOAD-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90a-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s2
+; GFX90a-NO-PRELOAD-NEXT: global_store_dword v0, v1, s[0:1]
+; GFX90a-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX90a-PRELOAD-LABEL: preload_block_count_z:
+; GFX90a-PRELOAD: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX90a-PRELOAD-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX90a-PRELOAD-NEXT: ; %bb.0:
+; GFX90a-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
+; GFX90a-PRELOAD-NEXT: v_mov_b32_e32 v1, s10
+; GFX90a-PRELOAD-NEXT: global_store_dword v0, v1, s[6:7]
+; GFX90a-PRELOAD-NEXT: s_endpgm
+ %imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+ %gep = getelementptr i8, ptr addrspace(4) %imp_arg_ptr, i32 8
+ %load = load i32, ptr addrspace(4) %gep
+ store i32 %load, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @preload_block_count_x_imparg_align_ptr_i8(ptr addrspace(1) %out, i8 %val) {
+; GFX940-NO-PRELOAD-LABEL: preload_block_count_x_imparg_align_ptr_i8:
+; GFX940-NO-PRELOAD: ; %bb.0:
+; GFX940-NO-PRELOAD-NEXT: s_load_dword s4, s[0:1], 0x8
+; GFX940-NO-PRELOAD-NEXT: s_load_dword s5, s[0:1], 0x10
+; GFX940-NO-PRELOAD-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v0, 0
+; GFX940-NO-PRELOAD-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NO-PRELOAD-NEXT: s_and_b32 s0, s4, 0xff
+; GFX940-NO-PRELOAD-NEXT: s_add_i32 s0, s5, s0
+; GFX940-NO-PRELOAD-NEXT: v_mov_b32_e32 v1, s0
+; GFX940-NO-PRELOAD-NEXT: global_store_dword v0, v1, s[2:3] sc0 sc1
+; GFX940-NO-PRELOAD-NEXT: s_endpgm
+;
+; GFX940-PRELOAD-LABEL: preload_block_count_x_imparg_align_ptr_i8:
+; GFX940-PRELOAD: s_trap 2 ; Kernarg preload header. Trap with incompatible firmware that doesn't support preloading kernel arguments.
+; GFX940-PRELOAD-NEXT: .fill 63, 4, 0xbf800000 ; s_nop 0
+; GFX940-PRELOAD-NEXT: ; %bb.0:
+; GFX940-PRELOAD-NEXT: s_and_b32 s0, s4, 0xff
+; GFX940-PRELOAD-NEX...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/98861
More information about the llvm-commits
mailing list