[llvm] [AMDGPU] Infer Alignments after Inferring AddressSpaces (PR #69776)

via llvm-commits llvm-commits at lists.llvm.org
Fri Oct 20 13:40:00 PDT 2023


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-backend-amdgpu

Author: Jeffrey Byrnes (jrbyrnes)

<details>
<summary>Changes</summary>

Since ValueTracking / getOrEnforceAlignment gives up on AddrSpaceCasts, we may have suboptimal alignment in kernels that use them. InferAddressSpaces may clean these up, so running InferAlignments after may result in more optimal alignment. This enables certain LoadStore vectorizations.

A WIP while I investigate / work on the test changes, some of which seem a bit odd. For now, wondering if there are objections to the approach.


---

Patch is 153.52 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/69776.diff


17 Files Affected:

- (modified) llvm/include/llvm/InitializePasses.h (+1) 
- (modified) llvm/include/llvm/Transforms/Scalar/InferAlignment.h (+4) 
- (modified) llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp (+6-1) 
- (modified) llvm/lib/Transforms/Scalar/InferAlignment.cpp (+40) 
- (modified) llvm/test/CodeGen/AMDGPU/GlobalISel/function-returns.ll (+1-1) 
- (modified) llvm/test/CodeGen/AMDGPU/GlobalISel/implicit-kernarg-backend-usage-global-isel.ll (+37-61) 
- (modified) llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-amdgpu_kernel.ll (+91-32) 
- (modified) llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call-sret.ll (+2-2) 
- (modified) llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call.ll (+5-5) 
- (modified) llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-sibling-call.ll (+4-4) 
- (modified) llvm/test/CodeGen/AMDGPU/abi-attribute-hints-undefined-behavior.ll (+12-25) 
- (modified) llvm/test/CodeGen/AMDGPU/branch-folding-implicit-def-subreg.ll (+34-34) 
- (modified) llvm/test/CodeGen/AMDGPU/chain-hi-to-lo.ll (+6-14) 
- (modified) llvm/test/CodeGen/AMDGPU/gfx-callable-argument-types.ll (+259-214) 
- (modified) llvm/test/CodeGen/AMDGPU/implicit-kernarg-backend-usage.ll (+40-61) 
- (modified) llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll (+18-84) 
- (modified) llvm/test/CodeGen/AMDGPU/vgpr-liverange-ir.ll (+3-3) 


``````````diff
diff --git a/llvm/include/llvm/InitializePasses.h b/llvm/include/llvm/InitializePasses.h
index db653fff71ba95a..e0ce0f4a7cc78df 100644
--- a/llvm/include/llvm/InitializePasses.h
+++ b/llvm/include/llvm/InitializePasses.h
@@ -141,6 +141,7 @@ void initializeImmutableModuleSummaryIndexWrapperPassPass(PassRegistry&);
 void initializeImplicitNullChecksPass(PassRegistry&);
 void initializeIndirectBrExpandPassPass(PassRegistry&);
 void initializeInferAddressSpacesPass(PassRegistry&);
+void initializeInferAligmentsPass(PassRegistry &);
 void initializeInstCountLegacyPassPass(PassRegistry &);
 void initializeInstSimplifyLegacyPassPass(PassRegistry &);
 void initializeInstructionCombiningPassPass(PassRegistry&);
diff --git a/llvm/include/llvm/Transforms/Scalar/InferAlignment.h b/llvm/include/llvm/Transforms/Scalar/InferAlignment.h
index f6fc5f3b8d9872f..85de257045c5a37 100644
--- a/llvm/include/llvm/Transforms/Scalar/InferAlignment.h
+++ b/llvm/include/llvm/Transforms/Scalar/InferAlignment.h
@@ -18,10 +18,14 @@
 
 namespace llvm {
 
+class FunctionPass;
+
 struct InferAlignmentPass : public PassInfoMixin<InferAlignmentPass> {
   PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
 };
 
+FunctionPass *createInferAlignmentsPass();
+
 } // namespace llvm
 
 #endif // LLVM_TRANSFORMS_SCALAR_INFERALIGNMENT_H
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
index dc7321cd5de9fcd..0cd9de57903bc4f 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
@@ -58,6 +58,7 @@
 #include "llvm/Transforms/Scalar.h"
 #include "llvm/Transforms/Scalar/GVN.h"
 #include "llvm/Transforms/Scalar/InferAddressSpaces.h"
+#include "llvm/Transforms/Scalar/InferAlignment.h"
 #include "llvm/Transforms/Utils.h"
 #include "llvm/Transforms/Utils/SimplifyLibCalls.h"
 #include "llvm/Transforms/Vectorize/LoadStoreVectorizer.h"
@@ -1022,8 +1023,12 @@ void AMDGPUPassConfig::addIRPasses() {
   if (TM.getOptLevel() > CodeGenOptLevel::None)
     addPass(createAMDGPUAttributorPass());
 
-  if (TM.getOptLevel() > CodeGenOptLevel::None)
+  // Alignment Inference may be blocked by addrpacecasts on pointer operands,
+  // so rerun after AddressSpaces have been inferred.
+  if (TM.getOptLevel() > CodeGenOptLevel::None) {
     addPass(createInferAddressSpacesPass());
+    addPass(createInferAlignmentsPass());
+  }
 
   // Run atomic optimizer before Atomic Expand
   if ((TM.getTargetTriple().getArch() == Triple::amdgcn) &&
diff --git a/llvm/lib/Transforms/Scalar/InferAlignment.cpp b/llvm/lib/Transforms/Scalar/InferAlignment.cpp
index b75b8d486fbbe8b..c45eef4b69f96f9 100644
--- a/llvm/lib/Transforms/Scalar/InferAlignment.cpp
+++ b/llvm/lib/Transforms/Scalar/InferAlignment.cpp
@@ -22,6 +22,8 @@
 
 using namespace llvm;
 
+#define DEBUG_TYPE "infer-alignment"
+
 static bool tryToImproveAlign(
     const DataLayout &DL, Instruction *I,
     function_ref<Align(Value *PtrOp, Align OldAlign, Align PrefAlign)> Fn) {
@@ -89,3 +91,41 @@ PreservedAnalyses InferAlignmentPass::run(Function &F,
   // Changes to alignment shouldn't invalidated analyses.
   return PreservedAnalyses::all();
 }
+
+class InferAligments : public FunctionPass {
+
+public:
+  static char ID;
+
+  InferAligments() : FunctionPass(ID) {
+    initializeInferAligmentsPass(*PassRegistry::getPassRegistry());
+  }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override {
+    AU.setPreservesCFG();
+    AU.addPreserved<DominatorTreeWrapperPass>();
+    AU.addRequired<AssumptionCacheTracker>();
+  }
+
+  bool runOnFunction(Function &F) override;
+};
+
+char InferAligments::ID = 0;
+
+INITIALIZE_PASS_BEGIN(InferAligments, DEBUG_TYPE, "Infer Alignments", false,
+                      false)
+INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
+INITIALIZE_PASS_END(InferAligments, DEBUG_TYPE, "Infer Alignments", false,
+                    false)
+
+bool InferAligments::runOnFunction(Function &F) {
+  if (skipFunction(F))
+    return false;
+
+  auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>();
+  DominatorTree *DT = DTWP ? &DTWP->getDomTree() : nullptr;
+  return inferAlignment(
+      F, getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F), *DT);
+}
+
+FunctionPass *llvm::createInferAlignmentsPass() { return new InferAligments(); }
\ No newline at end of file
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/function-returns.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/function-returns.ll
index e6c835fa25406ad..94f6b1664407e17 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/function-returns.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/function-returns.ll
@@ -953,7 +953,7 @@ define void @void_func_sret_struct_i8_i32(ptr addrspace(5) sret({ i8, i32 }) %ar
   ; CHECK-NEXT:   [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p1) :: (volatile load (s32) from `ptr addrspace(1) undef`, addrspace 1)
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
   ; CHECK-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p5) = nuw G_PTR_ADD [[COPY]], [[C]](s32)
-  ; CHECK-NEXT:   G_STORE [[LOAD]](s8), [[COPY]](p5) :: (store (s8) into %ir.arg0, addrspace 5)
+  ; CHECK-NEXT:   G_STORE [[LOAD]](s8), [[COPY]](p5) :: (store (s8) into %ir.arg0, align 4, addrspace 5)
   ; CHECK-NEXT:   G_STORE [[LOAD1]](s32), [[PTR_ADD]](p5) :: (store (s32) into %ir.gep1, addrspace 5)
   ; CHECK-NEXT:   SI_RETURN
   %val0 = load volatile i8, ptr addrspace(1) undef
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/implicit-kernarg-backend-usage-global-isel.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/implicit-kernarg-backend-usage-global-isel.ll
index c25ecafa1f7c074..d9ccd1116d21621 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/implicit-kernarg-backend-usage-global-isel.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/implicit-kernarg-backend-usage-global-isel.ll
@@ -386,24 +386,14 @@ define amdgpu_kernel void @llvm_debugtrap() {
 define amdgpu_kernel void @llvm_amdgcn_queue_ptr(ptr addrspace(1) %ptr)  {
 ; GFX8V3-LABEL: llvm_amdgcn_queue_ptr:
 ; GFX8V3:       ; %bb.0:
-; GFX8V3-NEXT:    v_mov_b32_e32 v0, s6
-; GFX8V3-NEXT:    v_mov_b32_e32 v1, s7
-; GFX8V3-NEXT:    s_add_u32 s0, s8, 8
-; GFX8V3-NEXT:    flat_load_ubyte v0, v[0:1] glc
-; GFX8V3-NEXT:    s_addc_u32 s1, s9, 0
-; GFX8V3-NEXT:    s_waitcnt vmcnt(0)
-; GFX8V3-NEXT:    v_mov_b32_e32 v0, s0
-; GFX8V3-NEXT:    v_mov_b32_e32 v1, s1
-; GFX8V3-NEXT:    flat_load_ubyte v0, v[0:1] glc
-; GFX8V3-NEXT:    s_waitcnt vmcnt(0)
-; GFX8V3-NEXT:    v_mov_b32_e32 v0, s4
-; GFX8V3-NEXT:    v_mov_b32_e32 v1, s5
-; GFX8V3-NEXT:    flat_load_ubyte v0, v[0:1] glc
+; GFX8V3-NEXT:    s_load_dword s0, s[6:7], 0x0
+; GFX8V3-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX8V3-NEXT:    s_load_dwordx2 s[0:1], s[8:9], 0x0
-; GFX8V3-NEXT:    s_waitcnt vmcnt(0)
+; GFX8V3-NEXT:    s_load_dword s2, s[8:9], 0x8
+; GFX8V3-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8V3-NEXT:    s_load_dword s2, s[4:5], 0x0
 ; GFX8V3-NEXT:    v_mov_b32_e32 v0, s10
 ; GFX8V3-NEXT:    v_mov_b32_e32 v1, s11
-; GFX8V3-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX8V3-NEXT:    v_mov_b32_e32 v3, s1
 ; GFX8V3-NEXT:    v_mov_b32_e32 v2, s0
 ; GFX8V3-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
@@ -412,24 +402,14 @@ define amdgpu_kernel void @llvm_amdgcn_queue_ptr(ptr addrspace(1) %ptr)  {
 ;
 ; GFX8V4-LABEL: llvm_amdgcn_queue_ptr:
 ; GFX8V4:       ; %bb.0:
-; GFX8V4-NEXT:    v_mov_b32_e32 v0, s6
-; GFX8V4-NEXT:    v_mov_b32_e32 v1, s7
-; GFX8V4-NEXT:    s_add_u32 s0, s8, 8
-; GFX8V4-NEXT:    flat_load_ubyte v0, v[0:1] glc
-; GFX8V4-NEXT:    s_addc_u32 s1, s9, 0
-; GFX8V4-NEXT:    s_waitcnt vmcnt(0)
-; GFX8V4-NEXT:    v_mov_b32_e32 v0, s0
-; GFX8V4-NEXT:    v_mov_b32_e32 v1, s1
-; GFX8V4-NEXT:    flat_load_ubyte v0, v[0:1] glc
-; GFX8V4-NEXT:    s_waitcnt vmcnt(0)
-; GFX8V4-NEXT:    v_mov_b32_e32 v0, s4
-; GFX8V4-NEXT:    v_mov_b32_e32 v1, s5
-; GFX8V4-NEXT:    flat_load_ubyte v0, v[0:1] glc
+; GFX8V4-NEXT:    s_load_dword s0, s[6:7], 0x0
+; GFX8V4-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX8V4-NEXT:    s_load_dwordx2 s[0:1], s[8:9], 0x0
-; GFX8V4-NEXT:    s_waitcnt vmcnt(0)
+; GFX8V4-NEXT:    s_load_dword s2, s[8:9], 0x8
+; GFX8V4-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8V4-NEXT:    s_load_dword s2, s[4:5], 0x0
 ; GFX8V4-NEXT:    v_mov_b32_e32 v0, s10
 ; GFX8V4-NEXT:    v_mov_b32_e32 v1, s11
-; GFX8V4-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX8V4-NEXT:    v_mov_b32_e32 v3, s1
 ; GFX8V4-NEXT:    v_mov_b32_e32 v2, s0
 ; GFX8V4-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
@@ -438,22 +418,14 @@ define amdgpu_kernel void @llvm_amdgcn_queue_ptr(ptr addrspace(1) %ptr)  {
 ;
 ; GFX8V5-LABEL: llvm_amdgcn_queue_ptr:
 ; GFX8V5:       ; %bb.0:
-; GFX8V5-NEXT:    s_add_u32 s0, s6, 8
-; GFX8V5-NEXT:    flat_load_ubyte v0, v[0:1] glc
-; GFX8V5-NEXT:    s_addc_u32 s1, s7, 0
-; GFX8V5-NEXT:    s_waitcnt vmcnt(0)
-; GFX8V5-NEXT:    v_mov_b32_e32 v0, s0
-; GFX8V5-NEXT:    v_mov_b32_e32 v1, s1
-; GFX8V5-NEXT:    flat_load_ubyte v0, v[0:1] glc
-; GFX8V5-NEXT:    s_waitcnt vmcnt(0)
-; GFX8V5-NEXT:    v_mov_b32_e32 v0, s4
-; GFX8V5-NEXT:    v_mov_b32_e32 v1, s5
-; GFX8V5-NEXT:    flat_load_ubyte v0, v[0:1] glc
+; GFX8V5-NEXT:    s_load_dword s0, s[0:1], 0x0
+; GFX8V5-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX8V5-NEXT:    s_load_dwordx2 s[0:1], s[6:7], 0x0
-; GFX8V5-NEXT:    s_waitcnt vmcnt(0)
+; GFX8V5-NEXT:    s_load_dword s2, s[6:7], 0x8
+; GFX8V5-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8V5-NEXT:    s_load_dword s2, s[4:5], 0x0
 ; GFX8V5-NEXT:    v_mov_b32_e32 v0, s8
 ; GFX8V5-NEXT:    v_mov_b32_e32 v1, s9
-; GFX8V5-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX8V5-NEXT:    v_mov_b32_e32 v3, s1
 ; GFX8V5-NEXT:    v_mov_b32_e32 v2, s0
 ; GFX8V5-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
@@ -462,50 +434,54 @@ define amdgpu_kernel void @llvm_amdgcn_queue_ptr(ptr addrspace(1) %ptr)  {
 ;
 ; GFX9V3-LABEL: llvm_amdgcn_queue_ptr:
 ; GFX9V3:       ; %bb.0:
-; GFX9V3-NEXT:    v_mov_b32_e32 v2, 0
-; GFX9V3-NEXT:    global_load_ubyte v0, v2, s[6:7] glc
-; GFX9V3-NEXT:    global_load_ubyte v0, v2, s[8:9] offset:8 glc
-; GFX9V3-NEXT:    global_load_ubyte v0, v2, s[4:5] glc
+; GFX9V3-NEXT:    s_load_dword s0, s[6:7], 0x0
+; GFX9V3-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX9V3-NEXT:    s_load_dwordx2 s[0:1], s[8:9], 0x0
-; GFX9V3-NEXT:    s_waitcnt vmcnt(0)
+; GFX9V3-NEXT:    s_load_dword s2, s[8:9], 0x8
+; GFX9V3-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9V3-NEXT:    s_load_dword s2, s[4:5], 0x0
 ; GFX9V3-NEXT:    v_mov_b32_e32 v0, s10
 ; GFX9V3-NEXT:    v_mov_b32_e32 v1, s11
+; GFX9V3-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX9V3-NEXT:    ; kill: killed $sgpr6_sgpr7
+; GFX9V3-NEXT:    ; kill: killed $sgpr8_sgpr9
 ; GFX9V3-NEXT:    ; kill: killed $sgpr4_sgpr5
-; GFX9V3-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX9V3-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
 ; GFX9V3-NEXT:    s_waitcnt vmcnt(0)
 ; GFX9V3-NEXT:    s_endpgm
 ;
 ; GFX9V4-LABEL: llvm_amdgcn_queue_ptr:
 ; GFX9V4:       ; %bb.0:
-; GFX9V4-NEXT:    v_mov_b32_e32 v2, 0
-; GFX9V4-NEXT:    global_load_ubyte v0, v2, s[6:7] glc
-; GFX9V4-NEXT:    global_load_ubyte v0, v2, s[8:9] offset:8 glc
-; GFX9V4-NEXT:    global_load_ubyte v0, v2, s[4:5] glc
+; GFX9V4-NEXT:    s_load_dword s0, s[6:7], 0x0
+; GFX9V4-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX9V4-NEXT:    s_load_dwordx2 s[0:1], s[8:9], 0x0
-; GFX9V4-NEXT:    s_waitcnt vmcnt(0)
+; GFX9V4-NEXT:    s_load_dword s2, s[8:9], 0x8
+; GFX9V4-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9V4-NEXT:    s_load_dword s2, s[4:5], 0x0
 ; GFX9V4-NEXT:    v_mov_b32_e32 v0, s10
 ; GFX9V4-NEXT:    v_mov_b32_e32 v1, s11
+; GFX9V4-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX9V4-NEXT:    ; kill: killed $sgpr6_sgpr7
+; GFX9V4-NEXT:    ; kill: killed $sgpr8_sgpr9
 ; GFX9V4-NEXT:    ; kill: killed $sgpr4_sgpr5
-; GFX9V4-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX9V4-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
 ; GFX9V4-NEXT:    s_waitcnt vmcnt(0)
 ; GFX9V4-NEXT:    s_endpgm
 ;
 ; GFX9V5-LABEL: llvm_amdgcn_queue_ptr:
 ; GFX9V5:       ; %bb.0:
-; GFX9V5-NEXT:    v_mov_b32_e32 v2, 0
-; GFX9V5-NEXT:    global_load_ubyte v0, v[0:1], off glc
+; GFX9V5-NEXT:    s_load_dword s0, s[0:1], 0x0
+; GFX9V5-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX9V5-NEXT:    s_load_dwordx2 s[0:1], s[6:7], 0x0
-; GFX9V5-NEXT:    global_load_ubyte v0, v2, s[6:7] offset:8 glc
-; GFX9V5-NEXT:    global_load_ubyte v0, v2, s[4:5] glc
-; GFX9V5-NEXT:    s_waitcnt vmcnt(0)
+; GFX9V5-NEXT:    s_load_dword s2, s[6:7], 0x8
+; GFX9V5-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9V5-NEXT:    s_load_dword s2, s[4:5], 0x0
 ; GFX9V5-NEXT:    v_mov_b32_e32 v0, s8
 ; GFX9V5-NEXT:    v_mov_b32_e32 v1, s9
+; GFX9V5-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9V5-NEXT:    ; kill: killed $sgpr0_sgpr1
+; GFX9V5-NEXT:    ; kill: killed $sgpr6_sgpr7
 ; GFX9V5-NEXT:    ; kill: killed $sgpr4_sgpr5
-; GFX9V5-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX9V5-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
 ; GFX9V5-NEXT:    s_waitcnt vmcnt(0)
 ; GFX9V5-NEXT:    s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-amdgpu_kernel.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-amdgpu_kernel.ll
index 6b2e6d8dfdb394c..637fac1cba3730a 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-amdgpu_kernel.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-amdgpu_kernel.ll
@@ -17,6 +17,7 @@ define amdgpu_kernel void @i8_arg(ptr addrspace(1) nocapture %out, i8 %in) nounw
   ; HSA-VI-NEXT:   [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD1]](s8)
   ; HSA-VI-NEXT:   G_STORE [[ZEXT]](s32), [[LOAD]](p1) :: (store (s32) into %ir.out, addrspace 1)
   ; HSA-VI-NEXT:   S_ENDPGM 0
+  ;
   ; LEGACY-MESA-VI-LABEL: name: i8_arg
   ; LEGACY-MESA-VI: bb.1 (%ir-block.0):
   ; LEGACY-MESA-VI-NEXT:   liveins: $sgpr0_sgpr1
@@ -51,6 +52,7 @@ define amdgpu_kernel void @i8_zext_arg(ptr addrspace(1) nocapture %out, i8 zeroe
   ; HSA-VI-NEXT:   [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD1]](s8)
   ; HSA-VI-NEXT:   G_STORE [[ZEXT]](s32), [[LOAD]](p1) :: (store (s32) into %ir.out, addrspace 1)
   ; HSA-VI-NEXT:   S_ENDPGM 0
+  ;
   ; LEGACY-MESA-VI-LABEL: name: i8_zext_arg
   ; LEGACY-MESA-VI: bb.1 (%ir-block.0):
   ; LEGACY-MESA-VI-NEXT:   liveins: $sgpr0_sgpr1
@@ -85,6 +87,7 @@ define amdgpu_kernel void @i8_sext_arg(ptr addrspace(1) nocapture %out, i8 signe
   ; HSA-VI-NEXT:   [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[LOAD1]](s8)
   ; HSA-VI-NEXT:   G_STORE [[SEXT]](s32), [[LOAD]](p1) :: (store (s32) into %ir.out, addrspace 1)
   ; HSA-VI-NEXT:   S_ENDPGM 0
+  ;
   ; LEGACY-MESA-VI-LABEL: name: i8_sext_arg
   ; LEGACY-MESA-VI: bb.1 (%ir-block.0):
   ; LEGACY-MESA-VI-NEXT:   liveins: $sgpr0_sgpr1
@@ -119,6 +122,7 @@ define amdgpu_kernel void @i16_arg(ptr addrspace(1) nocapture %out, i16 %in) nou
   ; HSA-VI-NEXT:   [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD1]](s16)
   ; HSA-VI-NEXT:   G_STORE [[ZEXT]](s32), [[LOAD]](p1) :: (store (s32) into %ir.out, addrspace 1)
   ; HSA-VI-NEXT:   S_ENDPGM 0
+  ;
   ; LEGACY-MESA-VI-LABEL: name: i16_arg
   ; LEGACY-MESA-VI: bb.1 (%ir-block.0):
   ; LEGACY-MESA-VI-NEXT:   liveins: $sgpr0_sgpr1
@@ -153,6 +157,7 @@ define amdgpu_kernel void @i16_zext_arg(ptr addrspace(1) nocapture %out, i16 zer
   ; HSA-VI-NEXT:   [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD1]](s16)
   ; HSA-VI-NEXT:   G_STORE [[ZEXT]](s32), [[LOAD]](p1) :: (store (s32) into %ir.out, addrspace 1)
   ; HSA-VI-NEXT:   S_ENDPGM 0
+  ;
   ; LEGACY-MESA-VI-LABEL: name: i16_zext_arg
   ; LEGACY-MESA-VI: bb.1 (%ir-block.0):
   ; LEGACY-MESA-VI-NEXT:   liveins: $sgpr0_sgpr1
@@ -187,6 +192,7 @@ define amdgpu_kernel void @i16_sext_arg(ptr addrspace(1) nocapture %out, i16 sig
   ; HSA-VI-NEXT:   [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[LOAD1]](s16)
   ; HSA-VI-NEXT:   G_STORE [[SEXT]](s32), [[LOAD]](p1) :: (store (s32) into %ir.out, addrspace 1)
   ; HSA-VI-NEXT:   S_ENDPGM 0
+  ;
   ; LEGACY-MESA-VI-LABEL: name: i16_sext_arg
   ; LEGACY-MESA-VI: bb.1 (%ir-block.0):
   ; LEGACY-MESA-VI-NEXT:   liveins: $sgpr0_sgpr1
@@ -220,6 +226,7 @@ define amdgpu_kernel void @i32_arg(ptr addrspace(1) nocapture %out, i32 %in) nou
   ; HSA-VI-NEXT:   [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (dereferenceable invariant load (s32), align 8, addrspace 4)
   ; HSA-VI-NEXT:   G_STORE [[LOAD1]](s32), [[LOAD]](p1) :: (store (s32) into %ir.out, addrspace 1)
   ; HSA-VI-NEXT:   S_ENDPGM 0
+  ;
   ; LEGACY-MESA-VI-LABEL: name: i32_arg
   ; LEGACY-MESA-VI: bb.1.entry:
   ; LEGACY-MESA-VI-NEXT:   liveins: $sgpr0_sgpr1
@@ -252,6 +259,7 @@ define amdgpu_kernel void @f32_arg(ptr addrspace(1) nocapture %out, float %in) n
   ; HSA-VI-NEXT:   [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (dereferenceable invariant load (s32), align 8, addrspace 4)
   ; HSA-VI-NEXT:   G_STORE [[LOAD1]](s32), [[LOAD]](p1) :: (store (s32) into %ir.out, addrspace 1)
   ; HSA-VI-NEXT:   S_ENDPGM 0
+  ;
   ; LEGACY-MESA-VI-LABEL: name: f32_arg
   ; LEGACY-MESA-VI: bb.1.entry:
   ; LEGACY-MESA-VI-NEXT:   liveins: $sgpr0_sgpr1
@@ -284,6 +292,7 @@ define amdgpu_kernel void @v2i8_arg(ptr addrspace(1) %out, <2 x i8> %in) {
   ; HSA-VI-NEXT:   [[LOAD1:%[0-9]+]]:_(<2 x s8>) = G_LOAD [[PTR_ADD1]](p4) :: (dereferenceable invariant load (<2 x s8>), align 8, addrspace 4)
   ; HSA-VI-NEXT:   G_STORE [[LOAD1]](<2 x s8>), [[LOAD]](p1) :: (store (<2 x s8>) into %ir.out, addrspace 1)
   ; HSA-VI-NEXT:   S_ENDPGM 0
+  ;
   ; LEGACY-MESA-VI-LABEL: name: v2i8_arg
   ; LEGACY-MESA-VI: bb.1.entry:
   ; LEGACY-MESA-VI-NEXT:   liveins: $sgpr0_sgpr1
@@ -316,6 +325,7 @@ define amdgpu_kernel void @v2i16_arg(ptr addrspace(1) %out, <2 x i16> %in) {
   ; HSA-VI-NEXT:   [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[PTR_ADD1]](p4) :: (dereferenceable invariant load (<2 x s16>), align 8, addrspace 4)
   ; HSA-VI-NEXT:   G_STORE [[LOAD1]](<2 x s16>), [[LOAD]](p1) :: (store (<2 x s16>) into %ir.out, addrspace 1)
   ; HSA-VI-NEXT:   S_ENDPGM 0
+  ;
   ; LEGACY-MESA-VI-LABEL: name: v2i16_arg
   ; LEGACY-MESA-VI: bb.1.entry:
   ; LEGACY-MESA-VI-NEXT:   liveins: $sgpr0_sgpr1
@@ -348,6 +358,7 @@ define amdgpu_kernel void @v2i32_arg(ptr addrspace(1) nocapture %out, <2 x i32>
   ; HSA-VI-NEXT:   [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD1]](p4) :: (dereferenceable invariant load (<2 x s32>), addrspace 4)
   ; HSA-VI-NEXT:   G_STORE [[LOAD1]](<2 x s32>), [[LOAD]](p1) :: (store (<2 x s32>) into %ir.out, align 4, addrspace 1)
   ; HSA-VI-NEXT:   S_ENDPGM 0
+  ;
   ; LEGACY-MESA-VI-LABEL: name: v2i32_arg
   ; LEGACY-MESA-VI: bb.1.entry:
   ; LEGACY-MESA-VI-NEXT:   liveins: $sgpr0_sgpr1
@@ -380,6 +391,7 @@ define amdgpu_kernel void @v2f32_arg(ptr addrspace(1) nocapture %out, <2 x float
   ; HSA-VI-NEXT:   [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD1]](p4) :: (dereferenceable invariant load (<2 x s32>), addrspace 4)
   ; HSA-VI-NEXT:   G_STORE [[LOAD1]](<2 x s32>), [[LOAD]](p1) :: (store (<2 x s32>) into %ir.out, align 4, addrspace 1)
   ; HSA-VI-NEXT:   S_ENDPGM 0
+  ;
   ; LEGACY-MESA-VI-LABEL: name: v2f32_arg
   ; LEGACY-MESA-VI: bb.1.entry:
   ; LEGACY-MESA-VI-NEXT:   liveins: $sgpr0_sgpr1
@@ -412,6 +424,7 @@ define amdgpu_kernel void @v3i8_arg(ptr addrspace(1) nocapture %out, <3 x i8> %i
   ; HSA-VI-NEXT:   [[LOAD1:%[0-9]+]]:_(<3 x s8>) = G_LOAD [[PTR_ADD1]](p4) :: (dereferenceable invariant load (<3 x s8>), align 8, addrspace 4)
   ; HSA-VI-NEXT:   G_STORE [[LOAD1]](<3 x s8>), [[LOAD]](p1) :: (store (<3 x s8>) into %ir.out, align 4, addrspace 1)
   ; HSA-VI-NEXT:   S_ENDPGM 0
+  ;
   ; LEGACY-MESA-VI-LABEL: name: v3i8_arg
   ; LEGACY-MESA-VI: bb.1.entry:
   ; LEGACY-MESA-VI-NEXT:   liveins: $sgpr0_sgpr1
@@ -444,6 +457,7 @@ define amdgpu_kernel void @v3i16_arg(ptr addrspace(1) nocapture %out, <3 x i16>
   ; HSA-VI-NEXT:   [[LOAD1:%[0-9]+]]:_(<3 x s16>) = G_LOAD [[PTR_ADD1]](p4) :: (dereferenceable invariant load (<3 x s16>), align 8, addrspace 4)
   ; HSA-VI-NEXT:   G_STORE [[LOAD1]](<3 x s16>), [[LOAD]](p1) :: (store (<3 x s16>) into %ir.out, align 4, addrspace 1)
   ; HSA-VI-NEXT:   S_ENDPGM 0
+  ;
   ; LEGACY-MESA-VI-LABEL: name: v3i16_arg
   ; LEGACY-MESA-VI: bb.1.entry:
   ; LEGACY-MESA-VI-NEXT:   liveins: $sgpr0_sgpr1
@@ -476,6 +490,7 @@ define amdgpu_kernel void @v3i32_arg(ptr addrspace(1) nocapture %out, <3 x i32>
   ; HSA-VI-NEXT:   [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD1]](p4) :: (dereferenceable invariant load (<3 x s32>), align 16, addrspace 4)
   ; HSA-VI-NEXT:   G_STORE [[LOAD1]](<3 x s32>), [[LOAD]](p1) :: (store (<3 x s32>) into %ir.out, align 4, addrspace 1)
   ; HSA-VI-NEXT:   S_ENDPGM 0
+  ;
   ; LEGACY-MESA-VI-LABEL: name: v3i32_arg
   ; LEGACY-MESA-VI: bb.1.entry:
   ; LEGACY-MESA-VI-NEXT:   liveins: $sgpr0_sgpr1
@@ -508,6 +523,7 @@ define amdgpu_...
[truncated]

``````````

</details>


https://github.com/llvm/llvm-project/pull/69776


More information about the llvm-commits mailing list