[llvm] [AMDGPU] Add backward compatibility layer for kernarg preloading (PR #119167)

via llvm-commits llvm-commits at lists.llvm.org
Sun Dec 8 20:59:48 PST 2024


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-backend-amdgpu

Author: Austin Kerbow (kerbowa)

<details>
<summary>Changes</summary>

Add a prologue to the kernel entry to handle cases where code designed for kernarg preloading is executed on hardware equipped with incompatible firmware. If hardware has compatible firmware the 256 bytes at the start of the kernel entry will be skipped. This skipping is done automatically by hardware that supports the feature.

A pass is added which is intended to be run at the very end of the pipeline to avoid any optimizations that would assume the prologue is a real predecessor block to the actual code start. In reality we have two possible entry points for the function. 1. The optimized path that supports kernarg preloading which begins at an offset of 256 bytes. 2. The backwards compatible entry point which starts at offset 0.

---

Patch is 332.75 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/119167.diff


15 Files Affected:

- (modified) llvm/docs/AMDGPUUsage.rst (+1-4) 
- (modified) llvm/lib/Target/AMDGPU/AMDGPU.h (+4) 
- (modified) llvm/lib/Target/AMDGPU/AMDGPUArgumentUsageInfo.h (+2) 
- (modified) llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp (-6) 
- (added) llvm/lib/Target/AMDGPU/AMDGPUPreloadKernargHeader.cpp (+229) 
- (added) llvm/lib/Target/AMDGPU/AMDGPUPreloadKernargHeader.h (+25) 
- (modified) llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp (+2) 
- (modified) llvm/lib/Target/AMDGPU/CMakeLists.txt (+1) 
- (modified) llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.cpp (-23) 
- (modified) llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.h (-14) 
- (modified) llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp (+2) 
- (modified) llvm/test/CodeGen/AMDGPU/llc-pipeline.ll (+5) 
- (modified) llvm/test/CodeGen/AMDGPU/preload-implicit-kernargs.ll (+2509-115) 
- (removed) llvm/test/CodeGen/AMDGPU/preload-kernarg-header.ll (-23) 
- (modified) llvm/test/CodeGen/AMDGPU/preload-kernargs.ll (+4497-215) 


``````````diff
diff --git a/llvm/docs/AMDGPUUsage.rst b/llvm/docs/AMDGPUUsage.rst
index 5c6034753eb4af..40b393224f15dd 100644
--- a/llvm/docs/AMDGPUUsage.rst
+++ b/llvm/docs/AMDGPUUsage.rst
@@ -5914,10 +5914,7 @@ additional 256 bytes to the kernel_code_entry_byte_offset. This addition
 facilitates the incorporation of a prologue to the kernel entry to handle cases
 where code designed for kernarg preloading is executed on hardware equipped with
 incompatible firmware. If hardware has compatible firmware the 256 bytes at the
-start of the kernel entry will be skipped. Additionally, the compiler backend
-may insert a trap instruction at the start of the kernel prologue to manage
-situations where kernarg preloading is attempted on hardware with incompatible
-firmware.
+start of the kernel entry will be skipped.
 
 With code object V5 and later, hidden kernel arguments that are normally
 accessed through the Implicit Argument Ptr, may be preloaded into User SGPRs.
diff --git a/llvm/lib/Target/AMDGPU/AMDGPU.h b/llvm/lib/Target/AMDGPU/AMDGPU.h
index b9769a1baf4d17..94de55923b4a3c 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPU.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPU.h
@@ -64,6 +64,7 @@ createAMDGPULowerModuleLDSLegacyPass(const AMDGPUTargetMachine *TM = nullptr);
 ModulePass *createAMDGPULowerBufferFatPointersPass();
 FunctionPass *createSIModeRegisterPass();
 FunctionPass *createGCNPreRAOptimizationsPass();
+FunctionPass *createAMDGPUPreloadKernargHeaderLegacyPass();
 
 struct AMDGPUSimplifyLibCallsPass : PassInfoMixin<AMDGPUSimplifyLibCallsPass> {
   AMDGPUSimplifyLibCallsPass() {}
@@ -230,6 +231,9 @@ extern char &AMDGPUPerfHintAnalysisLegacyID;
 void initializeGCNRegPressurePrinterPass(PassRegistry &);
 extern char &GCNRegPressurePrinterID;
 
+void initializeAMDGPUPreloadKernargHeaderLegacyPass(PassRegistry &);
+extern char &AMDGPUPreloadKernargHeaderLegacyID;
+
 // Passes common to R600 and SI
 FunctionPass *createAMDGPUPromoteAlloca();
 void initializeAMDGPUPromoteAllocaPass(PassRegistry&);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUArgumentUsageInfo.h b/llvm/lib/Target/AMDGPU/AMDGPUArgumentUsageInfo.h
index 06b2f181c276cd..859625e9b538a3 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUArgumentUsageInfo.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUArgumentUsageInfo.h
@@ -9,6 +9,7 @@
 #ifndef LLVM_LIB_TARGET_AMDGPU_AMDGPUARGUMENTUSAGEINFO_H
 #define LLVM_LIB_TARGET_AMDGPU_AMDGPUARGUMENTUSAGEINFO_H
 
+#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/CodeGen/Register.h"
 #include "llvm/Pass.h"
@@ -161,6 +162,7 @@ struct AMDGPUFunctionArgInfo {
 
   // Map the index of preloaded kernel arguments to its descriptor.
   SmallDenseMap<int, KernArgPreloadDescriptor> PreloadKernArgs{};
+  Register FirstKernArgPreloadReg = AMDGPU::NoRegister;
 
   std::tuple<const ArgDescriptor *, const TargetRegisterClass *, LLT>
   getPreloadedValue(PreloadedValue Value) const;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
index 90c341ac0819cc..737b2f740d6f77 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
@@ -207,12 +207,6 @@ void AMDGPUAsmPrinter::emitFunctionBodyStart() {
 
   if (STM.isAmdHsaOS())
     HSAMetadataStream->emitKernel(*MF, CurrentProgramInfo);
-
-  if (MFI.getNumKernargPreloadedSGPRs() > 0) {
-    assert(AMDGPU::hasKernargPreload(STM));
-    getTargetStreamer()->EmitKernargPreloadHeader(*getGlobalSTI(),
-                                                  STM.isAmdHsaOS());
-  }
 }
 
 void AMDGPUAsmPrinter::emitFunctionBodyEnd() {
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPreloadKernargHeader.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPreloadKernargHeader.cpp
new file mode 100644
index 00000000000000..756665a7e4d7a2
--- /dev/null
+++ b/llvm/lib/Target/AMDGPU/AMDGPUPreloadKernargHeader.cpp
@@ -0,0 +1,229 @@
+//===- AMDGPUPreloadKernargHeader.cpp - Preload Kernarg Header ------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file This pass handles the creation of the backwards compatability layer
+/// for kernarg prealoding. Code may be compiled with the feature enabled, while
+/// the kernel is executed on hardware without firmware support.
+///
+/// To avoid the need for recompilation, we insert a block at the beginning of
+/// the kernel that is responsible for loading the kernel arguments into SGPRs
+/// using s_load instructions which setup the registers exactly as they would be
+/// by firmware if the code were executed on a system that supported kernarg
+/// preladoing.
+///
+/// This essentially allows for two entry points for the kernel. Firmware that
+/// supports the feature will automatically jump past the first 256 bytes of the
+/// program, skipping the backwards compatibility layer and directly beginning
+/// execution on the fast code path.
+///
+/// This pass should be run as late as possible, to avoid any optimization that
+/// may assume that padding is dead code or that the prologue added here is a
+/// true predecessor of the kernel entry block.
+//===----------------------------------------------------------------------===//
+
+#include "AMDGPUPreloadKernargHeader.h"
+#include "AMDGPU.h"
+#include "GCNSubtarget.h"
+#include "SIMachineFunctionInfo.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/TargetParser/TargetParser.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "amdgpu-preload-kernarg-header"
+
+namespace {
+
+struct LoadConfig {
+  unsigned Size;
+  const TargetRegisterClass *RegClass;
+  unsigned Opcode;
+  Register LoadReg;
+
+  // Constructor for the static config array
+  LoadConfig(unsigned S, const TargetRegisterClass *RC, unsigned Op)
+      : Size(S), RegClass(RC), Opcode(Op), LoadReg(AMDGPU::NoRegister) {}
+
+  // Constructor for the return value
+  LoadConfig(unsigned S, const TargetRegisterClass *RC, unsigned Op,
+             Register Reg)
+      : Size(S), RegClass(RC), Opcode(Op), LoadReg(Reg) {}
+};
+
+class AMDGPUPreloadKernargHeader {
+public:
+  AMDGPUPreloadKernargHeader(MachineFunction &MF);
+
+  bool run();
+
+private:
+  MachineFunction &MF;
+  const GCNSubtarget &ST;
+  const SIMachineFunctionInfo &MFI;
+  const SIInstrInfo &TII;
+  const TargetRegisterInfo &TRI;
+
+  // Create a new block before the entry point to the kernel. Firmware that
+  // supports preloading kernel arguments will automatically jump past this
+  // block to the alternative kernel entry point.
+  void createBackCompatBlock();
+
+  // Add instructions to load kernel arguments into SGPRs, returns the number of
+  // s_load instructions added.
+  unsigned addBackCompatLoads(MachineBasicBlock *BackCompatMBB,
+                              Register KernargSegmentPtr,
+                              unsigned NumKernargPreloadSGPRs);
+};
+
+class AMDGPUPreloadKernargHeaderLegacy : public MachineFunctionPass {
+public:
+  static char ID;
+
+  AMDGPUPreloadKernargHeaderLegacy() : MachineFunctionPass(ID) {}
+
+  StringRef getPassName() const override {
+    return "AMDGPU Preload Kernarg Header";
+  }
+
+  bool runOnMachineFunction(MachineFunction &MF) override;
+};
+
+} // end anonymous namespace
+
+char AMDGPUPreloadKernargHeaderLegacy::ID = 0;
+
+INITIALIZE_PASS(AMDGPUPreloadKernargHeaderLegacy, DEBUG_TYPE,
+                "AMDGPU Preload Kernarg Header", false, false)
+
+char &llvm::AMDGPUPreloadKernargHeaderLegacyID =
+    AMDGPUPreloadKernargHeaderLegacy::ID;
+
+FunctionPass *llvm::createAMDGPUPreloadKernargHeaderLegacyPass() {
+  return new AMDGPUPreloadKernargHeaderLegacy();
+}
+
+bool AMDGPUPreloadKernargHeaderLegacy::runOnMachineFunction(
+    MachineFunction &MF) {
+  return AMDGPUPreloadKernargHeader(MF).run();
+}
+
+AMDGPUPreloadKernargHeader::AMDGPUPreloadKernargHeader(MachineFunction &MF)
+    : MF(MF), ST(MF.getSubtarget<GCNSubtarget>()),
+      MFI(*MF.getInfo<SIMachineFunctionInfo>()), TII(*ST.getInstrInfo()),
+      TRI(*ST.getRegisterInfo()) {}
+
+bool AMDGPUPreloadKernargHeader::run() {
+  if (!ST.hasKernargPreload())
+    return false;
+
+  unsigned NumPreloadSGPRs = MFI.getNumKernargPreloadedSGPRs();
+  if (NumPreloadSGPRs <= 0)
+    return false;
+
+  if (MF.begin() == MF.end())
+    return false;
+
+  createBackCompatBlock();
+
+  return true;
+}
+
+void AMDGPUPreloadKernargHeader::createBackCompatBlock() {
+  auto KernelEntryMBB = MF.begin();
+  MachineBasicBlock *BackCompatMBB = MF.CreateMachineBasicBlock();
+  MF.insert(KernelEntryMBB, BackCompatMBB);
+  BackCompatMBB->addSuccessor(&*KernelEntryMBB);
+
+  assert(MFI.getUserSGPRInfo().hasKernargSegmentPtr());
+  Register KernargSegmentPtr = MFI.getArgInfo().KernargSegmentPtr.getRegister();
+  BackCompatMBB->addLiveIn(KernargSegmentPtr);
+
+  unsigned NumKernargPreloadSGPRs = MFI.getNumKernargPreloadedSGPRs();
+  unsigned NumInstrs = 0;
+
+  // Load kernel arguments to SGPRs
+  NumInstrs += addBackCompatLoads(BackCompatMBB, KernargSegmentPtr,
+                                  NumKernargPreloadSGPRs);
+
+  AMDGPU::IsaVersion IV = AMDGPU::getIsaVersion(ST.getCPU());
+  unsigned Waitcnt =
+      AMDGPU::encodeWaitcnt(IV, getVmcntBitMask(IV), getExpcntBitMask(IV), 0);
+
+  // Wait for loads to complete
+  BuildMI(BackCompatMBB, DebugLoc(), TII.get(AMDGPU::S_WAITCNT))
+      .addImm(Waitcnt);
+  NumInstrs++;
+
+  // Set PC to the actual kernel entry point.  Add padding to fill out the rest
+  // of the backcompat block. The total number of bytes must be 256.
+  for (unsigned I = 0; I < 64 - NumInstrs; ++I) {
+    BuildMI(BackCompatMBB, DebugLoc(), TII.get(AMDGPU::S_BRANCH))
+        .addMBB(&*KernelEntryMBB);
+  }
+}
+
+// Find the largest possible load size that fits with SGRP alignment
+static LoadConfig getLoadParameters(const TargetRegisterInfo &TRI,
+                                    Register KernargPreloadSGPR,
+                                    unsigned NumKernargPreloadSGPRs) {
+  static const LoadConfig Configs[] = {
+      {8, &AMDGPU::SReg_256RegClass, AMDGPU::S_LOAD_DWORDX8_IMM},
+      {4, &AMDGPU::SReg_128RegClass, AMDGPU::S_LOAD_DWORDX4_IMM},
+      {2, &AMDGPU::SReg_64RegClass, AMDGPU::S_LOAD_DWORDX2_IMM},
+      {1, &AMDGPU::SReg_32RegClass, AMDGPU::S_LOAD_DWORD_IMM}};
+
+  // Find the largest possible load size
+  for (const auto &Config : Configs) {
+    if (NumKernargPreloadSGPRs >= Config.Size) {
+      Register LoadReg = TRI.getMatchingSuperReg(KernargPreloadSGPR,
+                                                 AMDGPU::sub0, Config.RegClass);
+      if (LoadReg != AMDGPU::NoRegister)
+        return LoadConfig(Config.Size, Config.RegClass, Config.Opcode, LoadReg);
+    }
+  }
+
+  // Fallback to a single register
+  return LoadConfig(1, &AMDGPU::SReg_32RegClass, AMDGPU::S_LOAD_DWORD_IMM,
+                    KernargPreloadSGPR);
+}
+
+unsigned AMDGPUPreloadKernargHeader::addBackCompatLoads(
+    MachineBasicBlock *BackCompatMBB, Register KernargSegmentPtr,
+    unsigned NumKernargPreloadSGPRs) {
+  Register KernargPreloadSGPR = MFI.getArgInfo().FirstKernArgPreloadReg;
+  unsigned Offset = 0;
+  unsigned NumLoads = 0;
+
+  // Fill all user SGPRs used for kernarg preloading with sequential data from
+  // the kernarg segment
+  while (NumKernargPreloadSGPRs > 0) {
+    LoadConfig Config =
+        getLoadParameters(TRI, KernargPreloadSGPR, NumKernargPreloadSGPRs);
+
+    BuildMI(BackCompatMBB, DebugLoc(), TII.get(Config.Opcode), Config.LoadReg)
+        .addReg(KernargSegmentPtr)
+        .addImm(Offset)
+        .addImm(0);
+
+    Offset += 4 * Config.Size;
+    KernargPreloadSGPR = KernargPreloadSGPR.asMCReg() + Config.Size;
+    NumKernargPreloadSGPRs -= Config.Size;
+    NumLoads++;
+  }
+
+  return NumLoads;
+}
+
+PreservedAnalyses
+AMDGPUPreloadKernargHeaderPass::run(MachineFunction &MF,
+                                    MachineFunctionAnalysisManager &) {
+  if (!AMDGPUPreloadKernargHeader(MF).run())
+    return PreservedAnalyses::all();
+
+  return PreservedAnalyses::none();
+}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPreloadKernargHeader.h b/llvm/lib/Target/AMDGPU/AMDGPUPreloadKernargHeader.h
new file mode 100644
index 00000000000000..b0a3d065d562e0
--- /dev/null
+++ b/llvm/lib/Target/AMDGPU/AMDGPUPreloadKernargHeader.h
@@ -0,0 +1,25 @@
+//===- AMDGPUPreloadKernargHeader.h ----------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_AMDGPU_PRELOAD_KERNARG_HEADER_H
+#define LLVM_LIB_TARGET_AMDGPU_PRELOAD_KERNARG_HEADER_H
+
+#include "llvm/CodeGen/MachinePassManager.h"
+
+namespace llvm {
+
+class AMDGPUPreloadKernargHeaderPass
+    : public PassInfoMixin<AMDGPUPreloadKernargHeaderPass> {
+public:
+  PreservedAnalyses run(MachineFunction &MF,
+                        MachineFunctionAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_LIB_TARGET_AMDGPU_PRELOAD_KERNARG_HEADER_H
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
index 34ad99dd980f27..854f5898303951 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
@@ -535,6 +535,7 @@ extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAMDGPUTarget() {
   initializeGCNPreRALongBranchRegPass(*PR);
   initializeGCNRewritePartialRegUsesPass(*PR);
   initializeGCNRegPressurePrinterPass(*PR);
+  initializeAMDGPUPreloadKernargHeaderLegacyPass(*PR);
 }
 
 static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
@@ -1658,6 +1659,7 @@ void GCNPassConfig::addPreEmitPass() {
     addPass(&AMDGPUInsertDelayAluID);
 
   addPass(&BranchRelaxationPassID);
+  addPass(createAMDGPUPreloadKernargHeaderLegacyPass());
 }
 
 TargetPassConfig *GCNTargetMachine::createPassConfig(PassManagerBase &PM) {
diff --git a/llvm/lib/Target/AMDGPU/CMakeLists.txt b/llvm/lib/Target/AMDGPU/CMakeLists.txt
index 68d141e338a882..87112ef4729a8a 100644
--- a/llvm/lib/Target/AMDGPU/CMakeLists.txt
+++ b/llvm/lib/Target/AMDGPU/CMakeLists.txt
@@ -88,6 +88,7 @@ add_llvm_target(AMDGPUCodeGen
   AMDGPUPerfHintAnalysis.cpp
   AMDGPUPostLegalizerCombiner.cpp
   AMDGPUPreLegalizerCombiner.cpp
+  AMDGPUPreloadKernargHeader.cpp
   AMDGPUPrintfRuntimeBinding.cpp
   AMDGPUPromoteAlloca.cpp
   AMDGPUPromoteKernelArguments.cpp
diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.cpp b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.cpp
index ffde4d33f1341a..eccd77d6c00f0b 100644
--- a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.cpp
+++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.cpp
@@ -338,15 +338,6 @@ bool AMDGPUTargetAsmStreamer::EmitHSAMetadata(
   return true;
 }
 
-bool AMDGPUTargetAsmStreamer::EmitKernargPreloadHeader(
-    const MCSubtargetInfo &STI, bool TrapEnabled) {
-  OS << (TrapEnabled ? "\ts_trap 2" : "\ts_endpgm")
-     << " ; Kernarg preload header. Trap with incompatible firmware that "
-        "doesn't support preloading kernel arguments.\n";
-  OS << "\t.fill 63, 4, 0xbf800000 ; s_nop 0\n";
-  return true;
-}
-
 bool AMDGPUTargetAsmStreamer::EmitCodeEnd(const MCSubtargetInfo &STI) {
   const uint32_t Encoded_s_code_end = 0xbf9f0000;
   const uint32_t Encoded_s_nop = 0xbf800000;
@@ -935,20 +926,6 @@ bool AMDGPUTargetELFStreamer::EmitHSAMetadata(msgpack::Document &HSAMetadataDoc,
   return true;
 }
 
-bool AMDGPUTargetELFStreamer::EmitKernargPreloadHeader(
-    const MCSubtargetInfo &STI, bool TrapEnabled) {
-  const uint32_t Encoded_s_nop = 0xbf800000;
-  const uint32_t Encoded_s_trap = 0xbf920002;
-  const uint32_t Encoded_s_endpgm = 0xbf810000;
-  const uint32_t TrapInstr = TrapEnabled ? Encoded_s_trap : Encoded_s_endpgm;
-  MCStreamer &OS = getStreamer();
-  OS.emitInt32(TrapInstr);
-  for (int i = 0; i < 63; ++i) {
-    OS.emitInt32(Encoded_s_nop);
-  }
-  return true;
-}
-
 bool AMDGPUTargetELFStreamer::EmitCodeEnd(const MCSubtargetInfo &STI) {
   const uint32_t Encoded_s_code_end = 0xbf9f0000;
   const uint32_t Encoded_s_nop = 0xbf800000;
diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.h b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.h
index 6a91ad06de5d12..9c490208505846 100644
--- a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.h
+++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.h
@@ -96,12 +96,6 @@ class AMDGPUTargetStreamer : public MCTargetStreamer {
   /// \returns True on success, false on failure.
   virtual bool EmitCodeEnd(const MCSubtargetInfo &STI) { return true; }
 
-  /// \returns True on success, false on failure.
-  virtual bool EmitKernargPreloadHeader(const MCSubtargetInfo &STI,
-                                        bool TrapEnabled) {
-    return true;
-  }
-
   virtual void
   EmitAmdhsaKernelDescriptor(const MCSubtargetInfo &STI, StringRef KernelName,
                              const AMDGPU::MCKernelDescriptor &KernelDescriptor,
@@ -168,10 +162,6 @@ class AMDGPUTargetAsmStreamer final : public AMDGPUTargetStreamer {
   /// \returns True on success, false on failure.
   bool EmitCodeEnd(const MCSubtargetInfo &STI) override;
 
-  /// \returns True on success, false on failure.
-  bool EmitKernargPreloadHeader(const MCSubtargetInfo &STI,
-                                bool TrapEnabled) override;
-
   void
   EmitAmdhsaKernelDescriptor(const MCSubtargetInfo &STI, StringRef KernelName,
                              const AMDGPU::MCKernelDescriptor &KernelDescriptor,
@@ -225,10 +215,6 @@ class AMDGPUTargetELFStreamer final : public AMDGPUTargetStreamer {
   /// \returns True on success, false on failure.
   bool EmitCodeEnd(const MCSubtargetInfo &STI) override;
 
-  /// \returns True on success, false on failure.
-  bool EmitKernargPreloadHeader(const MCSubtargetInfo &STI,
-                                bool TrapEnabled) override;
-
   void
   EmitAmdhsaKernelDescriptor(const MCSubtargetInfo &STI, StringRef KernelName,
                              const AMDGPU::MCKernelDescriptor &KernelDescriptor,
diff --git a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
index 1e43d2727a00da..a3402668237a58 100644
--- a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
@@ -259,6 +259,8 @@ SmallVectorImpl<MCRegister> *SIMachineFunctionInfo::addPreloadedKernArg(
   // If the available register tuples are aligned with the kernarg to be
   // preloaded use that register, otherwise we need to use a set of SGPRs and
   // merge them.
+  if (ArgInfo.FirstKernArgPreloadReg == AMDGPU::NoRegister)
+    ArgInfo.FirstKernArgPreloadReg = getNextUserSGPR();
   Register PreloadReg =
       TRI.getMatchingSuperReg(getNextUserSGPR(), AMDGPU::sub0, RC);
   if (PreloadReg &&
diff --git a/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll b/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll
index e77f4f69e265bb..950967aebd44a8 100644
--- a/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll
+++ b/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll
@@ -144,6 +144,7 @@
 ; GCN-O0-NEXT:        SI Final Branch Preparation
 ; GCN-O0-NEXT:        Post RA hazard recognizer
 ; GCN-O0-NEXT:        Branch relaxation pass
+; GCN-O0-NEXT:        AMDGPU Preload Kernarg Header
 ; GCN-O0-NEXT:        Register Usage Information Collector Pass
 ; GCN-O0-NEXT:        Remove Loads Into Fake Uses
 ; GCN-O0-NEXT:        Live DEBUG_VALUE analysis
@@ -424,6 +425,7 @@
 ; GCN-O1-NEXT:        Post RA hazard recognizer
 ; GCN-O1-NEXT:        AMDGPU Insert Delay ALU
 ; GCN-O1-NEXT:        Branch relaxation pass
+; GCN-O1-NEXT:        AMDGPU Preload Kernarg Header
 ; GCN-O1-NEXT:        Register Usage Information Collector Pass
 ; GCN-O1-NEXT:        Remove Loads Into Fake Uses
 ; GCN-O1-NEXT:        Live DEBUG_VALUE analysis
@@ -732,6 +734,7 @@
 ; GCN-O1-OPTS-NEXT:        Post RA hazard recognizer
 ; GCN-O1-OPTS-NEXT:        AMDGPU Insert Delay ALU
 ; GCN-O1-OPTS-NEXT:        Branch relaxation pass
+; GCN-O1-OPTS-NEXT:        AMDGPU Preload Kernarg Header
 ; GCN-O1-OPTS...
[truncated]

``````````

</details>


https://github.com/llvm/llvm-project/pull/119167


More information about the llvm-commits mailing list