[llvm] [NFC][AMDGPU] Make allocatePreloadKernArgSGPRs usable from both SDAG and GISel (PR #139753)
Matt Arsenault via llvm-commits
llvm-commits at lists.llvm.org
Tue May 13 15:06:47 PDT 2025
================
@@ -2538,84 +2538,115 @@ void SITargetLowering::allocateHSAUserSGPRs(CCState &CCInfo,
// these from the dispatch pointer.
}
+static bool allocPreloadKernArg(uint64_t &LastExplicitArgOffset,
+ uint64_t ArgOffset, unsigned ArgSize,
+ unsigned Idx, MachineFunction &MF,
+ const SIRegisterInfo &TRI,
+ SIMachineFunctionInfo &Info, CCState &CCInfo) {
+ GCNUserSGPRUsageInfo &SGPRInfo = Info.getUserSGPRInfo();
+ const Align KernelArgBaseAlign = Align(16);
+ Align Alignment = commonAlignment(KernelArgBaseAlign, ArgOffset);
+ constexpr const unsigned SGPRSize = 4;
+ unsigned NumAllocSGPRs = alignTo(ArgSize, SGPRSize) / SGPRSize;
+
+ // Arg is preloaded into the previous SGPR.
+ if (ArgSize < SGPRSize && Alignment < SGPRSize) {
+ assert(Idx >= 1 && "No previous SGPR");
+ AMDGPUFunctionArgInfo &ArgInfo = Info.getArgInfo();
+ auto &ArgDesc = ArgInfo.PreloadKernArgs[Idx];
+ auto &PrevArgDesc = ArgInfo.PreloadKernArgs[Idx - 1];
+ ArgDesc.Regs.push_back(PrevArgDesc.Regs[0]);
+ return true;
+ }
+
+ unsigned Padding = ArgOffset - LastExplicitArgOffset;
+ unsigned PaddingSGPRs = alignTo(Padding, SGPRSize) / SGPRSize;
+ // Check for free user SGPRs for preloading.
+ if (PaddingSGPRs + NumAllocSGPRs > SGPRInfo.getNumFreeUserSGPRs())
+ return false;
+
+ // Preload this argument.
+ const TargetRegisterClass *RC =
+ TRI.getSGPRClassForBitWidth(NumAllocSGPRs * 32);
+ SmallVectorImpl<MCRegister> *PreloadRegs =
+ Info.addPreloadedKernArg(TRI, RC, NumAllocSGPRs, Idx, PaddingSGPRs);
+
+ if (PreloadRegs->size() > 1)
+ RC = &AMDGPU::SGPR_32RegClass;
+
+ for (MCRegister Reg : *PreloadRegs) {
+ assert(Reg);
+ MF.addLiveIn(Reg, RC);
+ CCInfo.AllocateReg(Reg);
+ }
+
+ LastExplicitArgOffset = NumAllocSGPRs * SGPRSize + ArgOffset;
+ return true;
+}
+
// Allocate pre-loaded kernel arguemtns. Arguments to be preloading must be
// sequential starting from the first argument.
void SITargetLowering::allocatePreloadKernArgSGPRs(
- CCState &CCInfo, SmallVectorImpl<CCValAssign> &ArgLocs,
- const SmallVectorImpl<ISD::InputArg> &Ins, MachineFunction &MF,
+ CCState &CCInfo, SmallVectorImpl<CCValAssign> &ArgLocs, MachineFunction &MF,
const SIRegisterInfo &TRI, SIMachineFunctionInfo &Info) const {
Function &F = MF.getFunction();
- unsigned LastExplicitArgOffset = Subtarget->getExplicitKernelArgOffset();
- GCNUserSGPRUsageInfo &SGPRInfo = Info.getUserSGPRInfo();
- bool InPreloadSequence = true;
- unsigned InIdx = 0;
+ const DataLayout &DL = F.getParent()->getDataLayout();
+ const unsigned BaseOffset = Subtarget->getExplicitKernelArgOffset();
+ uint64_t ExplicitArgOffset = BaseOffset;
+ uint64_t LastExplicitArgOffset = ExplicitArgOffset;
+ unsigned LocIdx = 0;
bool AlignedForImplictArgs = false;
unsigned ImplicitArgOffset = 0;
+
for (auto &Arg : F.args()) {
- if (!InPreloadSequence || !Arg.hasInRegAttr())
+ if (!Arg.hasInRegAttr())
break;
- unsigned ArgIdx = Arg.getArgNo();
- // Don't preload non-original args or parts not in the current preload
- // sequence.
- if (InIdx < Ins.size() &&
- (!Ins[InIdx].isOrigArg() || Ins[InIdx].getOrigArgIndex() != ArgIdx))
+ const bool IsByRef = Arg.hasByRefAttr();
+ Type *ArgTy = IsByRef ? Arg.getParamByRefType() : Arg.getType();
+ unsigned AllocSize = DL.getTypeAllocSize(ArgTy);
+
+ if (AllocSize == 0)
break;
- for (; InIdx < Ins.size() && Ins[InIdx].isOrigArg() &&
- Ins[InIdx].getOrigArgIndex() == ArgIdx;
- InIdx++) {
- assert(ArgLocs[ArgIdx].isMemLoc());
- auto &ArgLoc = ArgLocs[InIdx];
- const Align KernelArgBaseAlign = Align(16);
- unsigned ArgOffset = ArgLoc.getLocMemOffset();
- Align Alignment = commonAlignment(KernelArgBaseAlign, ArgOffset);
- unsigned NumAllocSGPRs =
- alignTo(ArgLoc.getLocVT().getFixedSizeInBits(), 32) / 32;
-
- // Fix alignment for hidden arguments.
- if (Arg.hasAttribute("amdgpu-hidden-argument")) {
- if (!AlignedForImplictArgs) {
- ImplicitArgOffset =
- alignTo(LastExplicitArgOffset,
- Subtarget->getAlignmentForImplicitArgPtr()) -
- LastExplicitArgOffset;
- AlignedForImplictArgs = true;
- }
- ArgOffset += ImplicitArgOffset;
- }
+ MaybeAlign ParamAlign = IsByRef ? Arg.getParamAlign() : std::nullopt;
+ Align ABIAlign = DL.getValueOrABITypeAlignment(ParamAlign, ArgTy);
- // Arg is preloaded into the previous SGPR.
- if (ArgLoc.getLocVT().getStoreSize() < 4 && Alignment < 4) {
- assert(InIdx >= 1 && "No previous SGPR");
- Info.getArgInfo().PreloadKernArgs[InIdx].Regs.push_back(
- Info.getArgInfo().PreloadKernArgs[InIdx - 1].Regs[0]);
- continue;
- }
+ // Fix alignment for hidden arguments.
+ if (Arg.hasAttribute("amdgpu-hidden-argument") && !AlignedForImplictArgs) {
+ ImplicitArgOffset = alignTo(LastExplicitArgOffset,
+ Subtarget->getAlignmentForImplicitArgPtr()) -
+ LastExplicitArgOffset;
+ AlignedForImplictArgs = true;
+ }
- unsigned Padding = ArgOffset - LastExplicitArgOffset;
- unsigned PaddingSGPRs = alignTo(Padding, 4) / 4;
- // Check for free user SGPRs for preloading.
- if (PaddingSGPRs + NumAllocSGPRs > SGPRInfo.getNumFreeUserSGPRs()) {
- InPreloadSequence = false;
- break;
- }
+ uint64_t ArgOffset = alignTo(ExplicitArgOffset, ABIAlign) + BaseOffset;
+ ExplicitArgOffset = alignTo(ExplicitArgOffset, ABIAlign) + AllocSize;
- // Preload this argument.
- const TargetRegisterClass *RC =
- TRI.getSGPRClassForBitWidth(NumAllocSGPRs * 32);
- SmallVectorImpl<MCRegister> *PreloadRegs =
- Info.addPreloadedKernArg(TRI, RC, NumAllocSGPRs, InIdx, PaddingSGPRs);
-
- if (PreloadRegs->size() > 1)
- RC = &AMDGPU::SGPR_32RegClass;
- for (auto &Reg : *PreloadRegs) {
- assert(Reg);
- MF.addLiveIn(Reg, RC);
- CCInfo.AllocateReg(Reg);
- }
+ if (ArgLocs.empty()) {
+ // global isel
+ if (Arg.hasAttribute("amdgpu-hidden-argument"))
----------------
arsenm wrote:
IIRC analyzeFormalArgumentsCompute is for kernels, and is the hacky interpretation of the IR signature as in-memory values. GlobalISel bypasses that, since this is mostly trying to reverse engineer what the original IR type was and getting it to match.
For arguments passed in registers, the situation should be simpler. Ideally the preloaded arguments would behave like normal arguments passed in registers for non-kernels
https://github.com/llvm/llvm-project/pull/139753
More information about the llvm-commits
mailing list