[llvm] [AMDGPU] Optimize block count calculations to the new ABI (PR #174112)

Joseph Huber via llvm-commits llvm-commits at lists.llvm.org
Wed Dec 31 13:56:06 PST 2025


https://github.com/jhuber6 updated https://github.com/llvm/llvm-project/pull/174112

>From c4cd3afa7f8e394f13fea029a77f20b87b50d7e5 Mon Sep 17 00:00:00 2001
From: Joseph Huber <huberjn at outlook.com>
Date: Wed, 31 Dec 2025 15:08:01 -0600
Subject: [PATCH 1/2] [AMDGPU] Optimize block count calculations to the new ABI

Summary:
We already have a way to get the block count using the old grid size
lookup and dividing it by the number of threads. We did not want to make
a new intrinsic to do the same thing, so this optimization pattern
matches on this usage to automatically optimize it to the new form. This
should improve performance of old kernels by converting branches into a
simple index lookup and removing the division.
---
 .../AMDGPU/AMDGPULowerKernelAttributes.cpp    | 45 +++++++++++
 .../AMDGPU/implicit-arg-block-count.ll        | 77 +++++++++++++++++++
 2 files changed, 122 insertions(+)
 create mode 100644 llvm/test/CodeGen/AMDGPU/implicit-arg-block-count.ll

diff --git a/llvm/lib/Target/AMDGPU/AMDGPULowerKernelAttributes.cpp b/llvm/lib/Target/AMDGPU/AMDGPULowerKernelAttributes.cpp
index 248d7dcc9ec3e..5aeab9c0c0577 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULowerKernelAttributes.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULowerKernelAttributes.cpp
@@ -19,6 +19,7 @@
 #include "llvm/CodeGen/Passes.h"
 #include "llvm/IR/Constants.h"
 #include "llvm/IR/Function.h"
+#include "llvm/IR/IRBuilder.h"
 #include "llvm/IR/InstIterator.h"
 #include "llvm/IR/Instructions.h"
 #include "llvm/IR/IntrinsicsAMDGPU.h"
@@ -111,6 +112,8 @@ static bool processUse(CallInst *CI, bool IsV5OrAbove) {
                                      /*Size=*/3, /*DefaultVal=*/0);
 
   if (!HasReqdWorkGroupSize && !HasUniformWorkGroupSize &&
+      !Intrinsic::getDeclarationIfExists(CI->getModule(),
+                                         Intrinsic::amdgcn_dispatch_ptr) &&
       none_of(MaxNumWorkgroups, [](unsigned X) { return X != 0; }))
     return false;
 
@@ -322,6 +325,48 @@ static bool processUse(CallInst *CI, bool IsV5OrAbove) {
     }
   }
 
+  // Upgrade the old method of calculating the block size using the grid size.
+  // We pattern match any case where the implicit argument group size is the
+  // divisor to a dispatch packet grid size read of the same dimension.
+  if (IsV5OrAbove && llvm::any_of(GroupSizes, [](Value *V) { return V; })) {
+    for (int I = 0; I < 3; I++) {
+      Value *GroupSize = GroupSizes[I];
+      if (!GroupSize)
+        continue;
+
+      for (User *U : GroupSize->users()) {
+        Instruction *Inst = dyn_cast<Instruction>(U);
+        if (isa<ZExtInst>(Inst))
+          Inst = Inst->getNextNode();
+
+        using namespace llvm::PatternMatch;
+        Value *Idx;
+        if (!match(Inst,
+                   m_UDiv(m_Load(m_GEP(
+                              m_Intrinsic<Intrinsic::amdgcn_dispatch_ptr>(),
+                              m_Value(Idx))),
+                          m_Value())))
+          continue;
+
+        ConstantInt *Offset = dyn_cast<ConstantInt>(Idx);
+        if (!Offset ||
+            Offset->getZExtValue() != GRID_SIZE_X + I * sizeof(uint32_t))
+          continue;
+
+        IRBuilder<> Builder(Inst);
+
+        Value *GEP = Builder.CreateConstGEP1_64(Builder.getInt8Ty(), CI,
+                                                HIDDEN_BLOCK_COUNT_X +
+                                                    I * sizeof(uint32_t));
+        Value *BlockCount = Builder.CreateLoad(Builder.getInt32Ty(), GEP);
+
+        Inst->replaceAllUsesWith(BlockCount);
+        Inst->eraseFromParent();
+        MadeChange = true;
+      }
+    }
+  }
+
   // If reqd_work_group_size is set, we can replace work group size with it.
   if (!HasReqdWorkGroupSize)
     return MadeChange;
diff --git a/llvm/test/CodeGen/AMDGPU/implicit-arg-block-count.ll b/llvm/test/CodeGen/AMDGPU/implicit-arg-block-count.ll
new file mode 100644
index 0000000000000..b515f27b70edd
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/implicit-arg-block-count.ll
@@ -0,0 +1,77 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -passes=amdgpu-lower-kernel-attributes,instcombine,infer-alignment %s | FileCheck %s
+
+define i32 @num_blocks(i32 noundef %dim) {
+; CHECK-LABEL: define i32 @num_blocks(
+; CHECK-SAME: i32 noundef [[DIM:%.*]]) {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    switch i32 [[DIM]], label %[[SW_DEFAULT_I:.*]] [
+; CHECK-NEXT:      i32 0, label %[[SW_BB_I:.*]]
+; CHECK-NEXT:      i32 1, label %[[SW_BB1_I:.*]]
+; CHECK-NEXT:      i32 2, label %[[SW_BB3_I:.*]]
+; CHECK-NEXT:    ]
+; CHECK:       [[SW_BB_I]]:
+; CHECK-NEXT:    [[TMP0:%.*]] = call align 8 dereferenceable(256) ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+; CHECK-NEXT:    br label %[[__GPU_NUM_BLOCKS_EXIT:.*]]
+; CHECK:       [[SW_BB1_I]]:
+; CHECK-NEXT:    [[TMP1:%.*]] = call align 8 dereferenceable(256) ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds nuw i8, ptr addrspace(4) [[TMP1]], i64 4
+; CHECK-NEXT:    br label %[[__GPU_NUM_BLOCKS_EXIT]]
+; CHECK:       [[SW_BB3_I]]:
+; CHECK-NEXT:    [[TMP3:%.*]] = call align 8 dereferenceable(256) ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+; CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds nuw i8, ptr addrspace(4) [[TMP3]], i64 8
+; CHECK-NEXT:    br label %[[__GPU_NUM_BLOCKS_EXIT]]
+; CHECK:       [[SW_DEFAULT_I]]:
+; CHECK-NEXT:    unreachable
+; CHECK:       [[__GPU_NUM_BLOCKS_EXIT]]:
+; CHECK-NEXT:    [[RETVAL_0_I_IN:%.*]] = phi ptr addrspace(4) [ [[TMP0]], %[[SW_BB_I]] ], [ [[TMP2]], %[[SW_BB1_I]] ], [ [[TMP4]], %[[SW_BB3_I]] ]
+; CHECK-NEXT:    [[RETVAL_0_I:%.*]] = load i32, ptr addrspace(4) [[RETVAL_0_I_IN]], align 4
+; CHECK-NEXT:    ret i32 [[RETVAL_0_I]]
+;
+entry:
+  switch i32 %dim, label %sw.default.i [
+  i32 0, label %sw.bb.i
+  i32 1, label %sw.bb1.i
+  i32 2, label %sw.bb3.i
+  ]
+
+sw.bb.i:
+  %0 = call align 4 dereferenceable(64) ptr addrspace(4) @llvm.amdgcn.dispatch.ptr()
+  %1 = getelementptr i8, ptr addrspace(4) %0, i32 12
+  %2 = load i32, ptr addrspace(4) %1, align 4
+  %3 = call align 8 dereferenceable(256) ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %4 = getelementptr i8, ptr addrspace(4) %3, i32 12
+  %5 = load i16, ptr addrspace(4) %4, align 2
+  %conv.i.i = zext i16 %5 to i32
+  %div.i.i = udiv i32 %2, %conv.i.i
+  br label %__gpu_num_blocks.exit
+
+sw.bb1.i:                                         ; preds = %entry
+  %6 = call align 4 dereferenceable(64) ptr addrspace(4) @llvm.amdgcn.dispatch.ptr()
+  %7 = getelementptr i8, ptr addrspace(4) %6, i32 16
+  %8 = load i32, ptr addrspace(4) %7, align 4
+  %9 = call align 8 dereferenceable(256) ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %10 = getelementptr i8, ptr addrspace(4) %9, i32 14
+  %11 = load i16, ptr addrspace(4) %10, align 2
+  %conv.i1.i = zext i16 %11 to i32
+  %div.i2.i = udiv i32 %8, %conv.i1.i
+  br label %__gpu_num_blocks.exit
+
+sw.bb3.i:                                         ; preds = %entry
+  %12 = call align 4 dereferenceable(64) ptr addrspace(4) @llvm.amdgcn.dispatch.ptr()
+  %13 = getelementptr i8, ptr addrspace(4) %12, i32 20
+  %14 = load i32, ptr addrspace(4) %13, align 4
+  %15 = call align 8 dereferenceable(256) ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %16 = getelementptr i8, ptr addrspace(4) %15, i32 16
+  %17 = load i16, ptr addrspace(4) %16, align 2
+  %conv.i3.i = zext i16 %17 to i32
+  %div.i4.i = udiv i32 %14, %conv.i3.i
+  br label %__gpu_num_blocks.exit
+
+sw.default.i:
+  unreachable
+
+__gpu_num_blocks.exit:
+  %retval.0.i = phi i32 [ %div.i.i, %sw.bb.i ], [ %div.i2.i, %sw.bb1.i ], [ %div.i4.i, %sw.bb3.i ]
+  ret i32 %retval.0.i
+}

>From abd952d6162550f7e4683925f325bee64a6fad20 Mon Sep 17 00:00:00 2001
From: Joseph Huber <huberjn at outlook.com>
Date: Wed, 31 Dec 2025 15:55:57 -0600
Subject: [PATCH 2/2] cleanup test

---
 .../AMDGPU/implicit-arg-block-count.ll        | 88 +++++++++----------
 1 file changed, 44 insertions(+), 44 deletions(-)

diff --git a/llvm/test/CodeGen/AMDGPU/implicit-arg-block-count.ll b/llvm/test/CodeGen/AMDGPU/implicit-arg-block-count.ll
index b515f27b70edd..be8bdd3d5d9da 100644
--- a/llvm/test/CodeGen/AMDGPU/implicit-arg-block-count.ll
+++ b/llvm/test/CodeGen/AMDGPU/implicit-arg-block-count.ll
@@ -5,73 +5,73 @@ define i32 @num_blocks(i32 noundef %dim) {
 ; CHECK-LABEL: define i32 @num_blocks(
 ; CHECK-SAME: i32 noundef [[DIM:%.*]]) {
 ; CHECK-NEXT:  [[ENTRY:.*:]]
-; CHECK-NEXT:    switch i32 [[DIM]], label %[[SW_DEFAULT_I:.*]] [
-; CHECK-NEXT:      i32 0, label %[[SW_BB_I:.*]]
-; CHECK-NEXT:      i32 1, label %[[SW_BB1_I:.*]]
-; CHECK-NEXT:      i32 2, label %[[SW_BB3_I:.*]]
+; CHECK-NEXT:    switch i32 [[DIM]], label %[[DEFAULT:.*]] [
+; CHECK-NEXT:      i32 0, label %[[DIM_X:.*]]
+; CHECK-NEXT:      i32 1, label %[[DIM_Y:.*]]
+; CHECK-NEXT:      i32 2, label %[[DIM_Z:.*]]
 ; CHECK-NEXT:    ]
-; CHECK:       [[SW_BB_I]]:
-; CHECK-NEXT:    [[TMP0:%.*]] = call align 8 dereferenceable(256) ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
-; CHECK-NEXT:    br label %[[__GPU_NUM_BLOCKS_EXIT:.*]]
-; CHECK:       [[SW_BB1_I]]:
-; CHECK-NEXT:    [[TMP1:%.*]] = call align 8 dereferenceable(256) ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
-; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds nuw i8, ptr addrspace(4) [[TMP1]], i64 4
-; CHECK-NEXT:    br label %[[__GPU_NUM_BLOCKS_EXIT]]
-; CHECK:       [[SW_BB3_I]]:
-; CHECK-NEXT:    [[TMP3:%.*]] = call align 8 dereferenceable(256) ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
-; CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds nuw i8, ptr addrspace(4) [[TMP3]], i64 8
-; CHECK-NEXT:    br label %[[__GPU_NUM_BLOCKS_EXIT]]
-; CHECK:       [[SW_DEFAULT_I]]:
+; CHECK:       [[DIM_X]]:
+; CHECK-NEXT:    [[TMP0:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+; CHECK-NEXT:    br label %[[EXIT:.*]]
+; CHECK:       [[DIM_Y]]:
+; CHECK-NEXT:    [[TMP1:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr i8, ptr addrspace(4) [[TMP1]], i64 4
+; CHECK-NEXT:    br label %[[EXIT]]
+; CHECK:       [[DIM_Z]]:
+; CHECK-NEXT:    [[TMP3:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+; CHECK-NEXT:    [[TMP4:%.*]] = getelementptr i8, ptr addrspace(4) [[TMP3]], i64 8
+; CHECK-NEXT:    br label %[[EXIT]]
+; CHECK:       [[DEFAULT]]:
 ; CHECK-NEXT:    unreachable
-; CHECK:       [[__GPU_NUM_BLOCKS_EXIT]]:
-; CHECK-NEXT:    [[RETVAL_0_I_IN:%.*]] = phi ptr addrspace(4) [ [[TMP0]], %[[SW_BB_I]] ], [ [[TMP2]], %[[SW_BB1_I]] ], [ [[TMP4]], %[[SW_BB3_I]] ]
+; CHECK:       [[EXIT]]:
+; CHECK-NEXT:    [[RETVAL_0_I_IN:%.*]] = phi ptr addrspace(4) [ [[TMP0]], %[[DIM_X]] ], [ [[TMP2]], %[[DIM_Y]] ], [ [[TMP4]], %[[DIM_Z]] ]
 ; CHECK-NEXT:    [[RETVAL_0_I:%.*]] = load i32, ptr addrspace(4) [[RETVAL_0_I_IN]], align 4
 ; CHECK-NEXT:    ret i32 [[RETVAL_0_I]]
 ;
 entry:
-  switch i32 %dim, label %sw.default.i [
-  i32 0, label %sw.bb.i
-  i32 1, label %sw.bb1.i
-  i32 2, label %sw.bb3.i
+  switch i32 %dim, label %default [
+  i32 0, label %dim_x
+  i32 1, label %dim_y
+  i32 2, label %dim_z
   ]
 
-sw.bb.i:
-  %0 = call align 4 dereferenceable(64) ptr addrspace(4) @llvm.amdgcn.dispatch.ptr()
+dim_x:
+  %0 = call ptr addrspace(4) @llvm.amdgcn.dispatch.ptr()
   %1 = getelementptr i8, ptr addrspace(4) %0, i32 12
   %2 = load i32, ptr addrspace(4) %1, align 4
-  %3 = call align 8 dereferenceable(256) ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %3 = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
   %4 = getelementptr i8, ptr addrspace(4) %3, i32 12
   %5 = load i16, ptr addrspace(4) %4, align 2
-  %conv.i.i = zext i16 %5 to i32
-  %div.i.i = udiv i32 %2, %conv.i.i
-  br label %__gpu_num_blocks.exit
+  %conv_x = zext i16 %5 to i32
+  %count_x = udiv i32 %2, %conv_x
+  br label %exit
 
-sw.bb1.i:                                         ; preds = %entry
-  %6 = call align 4 dereferenceable(64) ptr addrspace(4) @llvm.amdgcn.dispatch.ptr()
+dim_y:
+  %6 = call ptr addrspace(4) @llvm.amdgcn.dispatch.ptr()
   %7 = getelementptr i8, ptr addrspace(4) %6, i32 16
   %8 = load i32, ptr addrspace(4) %7, align 4
-  %9 = call align 8 dereferenceable(256) ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %9 = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
   %10 = getelementptr i8, ptr addrspace(4) %9, i32 14
   %11 = load i16, ptr addrspace(4) %10, align 2
-  %conv.i1.i = zext i16 %11 to i32
-  %div.i2.i = udiv i32 %8, %conv.i1.i
-  br label %__gpu_num_blocks.exit
+  %conv_y = zext i16 %11 to i32
+  %count_y = udiv i32 %8, %conv_y
+  br label %exit
 
-sw.bb3.i:                                         ; preds = %entry
-  %12 = call align 4 dereferenceable(64) ptr addrspace(4) @llvm.amdgcn.dispatch.ptr()
+dim_z:
+  %12 = call ptr addrspace(4) @llvm.amdgcn.dispatch.ptr()
   %13 = getelementptr i8, ptr addrspace(4) %12, i32 20
   %14 = load i32, ptr addrspace(4) %13, align 4
-  %15 = call align 8 dereferenceable(256) ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %15 = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
   %16 = getelementptr i8, ptr addrspace(4) %15, i32 16
   %17 = load i16, ptr addrspace(4) %16, align 2
-  %conv.i3.i = zext i16 %17 to i32
-  %div.i4.i = udiv i32 %14, %conv.i3.i
-  br label %__gpu_num_blocks.exit
+  %conv_z = zext i16 %17 to i32
+  %count_z = udiv i32 %14, %conv_z
+  br label %exit
 
-sw.default.i:
+default:
   unreachable
 
-__gpu_num_blocks.exit:
-  %retval.0.i = phi i32 [ %div.i.i, %sw.bb.i ], [ %div.i2.i, %sw.bb1.i ], [ %div.i4.i, %sw.bb3.i ]
-  ret i32 %retval.0.i
+exit:
+  %retval = phi i32 [ %count_x, %dim_x ], [ %count_y, %dim_y ], [ %count_z, %dim_z ]
+  ret i32 %retval
 }



More information about the llvm-commits mailing list