[llvm] AMDGPU: Fix runtime unrolling when cascaded GEPs present (PR #147700)

via llvm-commits llvm-commits at lists.llvm.org
Wed Jul 9 11:24:44 PDT 2025


https://github.com/macurtis-amd updated https://github.com/llvm/llvm-project/pull/147700

>From 269757404be275cb3de8d5a69eb92700d9bfba78 Mon Sep 17 00:00:00 2001
From: Matthew Curtis <macurtis at amd.com>
Date: Wed, 9 Jul 2025 03:32:37 -0500
Subject: [PATCH 1/2] AMDGPU: Fix runtime unrolling when cascaded GEPs present

---
 .../AMDGPU/AMDGPUTargetTransformInfo.cpp      |  9 ++--
 .../LoopUnroll/AMDGPU/unroll-runtime.ll       | 44 +++++++++++++++++++
 2 files changed, 50 insertions(+), 3 deletions(-)
 create mode 100644 llvm/test/Transforms/LoopUnroll/AMDGPU/unroll-runtime.ll

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
index f693580929518..24f4df2aff9d1 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
@@ -216,10 +216,13 @@ void AMDGPUTTIImpl::getUnrollingPreferences(
         // a variable, most likely we will be unable to combine it.
         // Do not unroll too deep inner loops for local memory to give a chance
         // to unroll an outer loop for a more important reason.
-        if (LocalGEPsSeen > 1 || L->getLoopDepth() > 2 ||
-            (!isa<GlobalVariable>(GEP->getPointerOperand()) &&
-             !isa<Argument>(GEP->getPointerOperand())))
+        if (LocalGEPsSeen > 1 || L->getLoopDepth() > 2)
           continue;
+
+        const Value *V = getUnderlyingObject(GEP->getPointerOperand());
+        if (!isa<GlobalVariable>(V) && !isa<Argument>(V))
+          continue;
+
         LLVM_DEBUG(dbgs() << "Allow unroll runtime for loop:\n"
                           << *L << " due to LDS use.\n");
         UP.Runtime = UnrollRuntimeLocal;
diff --git a/llvm/test/Transforms/LoopUnroll/AMDGPU/unroll-runtime.ll b/llvm/test/Transforms/LoopUnroll/AMDGPU/unroll-runtime.ll
new file mode 100644
index 0000000000000..9414a1e24e542
--- /dev/null
+++ b/llvm/test/Transforms/LoopUnroll/AMDGPU/unroll-runtime.ll
@@ -0,0 +1,44 @@
+; RUN: opt -mtriple=amdgcn-unknown-amdhsa -passes=loop-unroll -S %s | FileCheck %s
+
+%struct.wombat = type { %struct.zot, i32, [16 x i32], [16 x i32], i32, i32, [16 x i32], i32 }
+%struct.zot = type { i32, i32, [1024 x i32] }
+
+ at global = external addrspace(3) global %struct.wombat
+
+; Ensure that a cascaded GEP for local address space does not inhibit unrolling
+;
+; CHECK-LABEL: @unroll_when_cascaded_gep
+; CHECK: bb:
+; CHECK:   br {{.*}}, label %bb2.unr-lcssa, label %bb.new
+; CHECK: bb.new:
+; CHECK:   %unroll_iter = 
+; CHECK:   br label %bb1
+; CHECK: bb1:
+; CHECK:   br {{.*}}, label %bb2.unr-lcssa.loopexit, label %bb1
+; CHECK: bb2.unr-lcssa.loopexit:
+; CHECK:   br label %bb2.unr-lcssa
+; CHECK: bb2.unr-lcssa:
+; CHECK:   br {{.*}}, label %bb1.epil.preheader, label %bb2
+; CHECK: bb1.epil.preheader:
+; CHECK:   br label %bb1.epil
+; CHECK: bb1.epil:
+; CHECK:   br {{.*}}, label %bb1.epil, label %bb2.epilog-lcssa
+; CHECK: bb2.epilog-lcssa:
+; CHECK:   br label %bb2
+; CHECK: bb2:
+; CHECK:   ret void
+define amdgpu_kernel void @unroll_when_cascaded_gep(i32 %arg) {
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb1, %bb
+  %phi = phi i32 [ 0, %bb ], [ %add, %bb1 ]
+  %getelementptr = getelementptr [1024 x i32], ptr addrspace(3) getelementptr inbounds nuw (i8, ptr addrspace(3) @global, i32 8), i32 0, i32 0
+  %add = add i32 %phi, 1
+  %icmp = icmp eq i32 %phi, %arg
+  br i1 %icmp, label %bb2, label %bb1
+
+bb2:                                              ; preds = %bb1
+  ret void
+}
+

>From e545559416f59540c6def4270251b8c126570f7e Mon Sep 17 00:00:00 2001
From: Matthew Curtis <macurtis at amd.com>
Date: Wed, 9 Jul 2025 13:24:12 -0500
Subject: [PATCH 2/2] fixup! AMDGPU: Fix runtime unrolling when cascaded GEPs
 present

---
 .../LoopUnroll/AMDGPU/unroll-runtime.ll       | 62 ++++++++++++-------
 1 file changed, 41 insertions(+), 21 deletions(-)

diff --git a/llvm/test/Transforms/LoopUnroll/AMDGPU/unroll-runtime.ll b/llvm/test/Transforms/LoopUnroll/AMDGPU/unroll-runtime.ll
index 9414a1e24e542..aa3bd7cfa17ba 100644
--- a/llvm/test/Transforms/LoopUnroll/AMDGPU/unroll-runtime.ll
+++ b/llvm/test/Transforms/LoopUnroll/AMDGPU/unroll-runtime.ll
@@ -1,4 +1,5 @@
-; RUN: opt -mtriple=amdgcn-unknown-amdhsa -passes=loop-unroll -S %s | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -passes=loop-unroll -S %s | FileCheck %s
 
 %struct.wombat = type { %struct.zot, i32, [16 x i32], [16 x i32], i32, i32, [16 x i32], i32 }
 %struct.zot = type { i32, i32, [1024 x i32] }
@@ -7,27 +8,46 @@
 
 ; Ensure that a cascaded GEP for local address space does not inhibit unrolling
 ;
-; CHECK-LABEL: @unroll_when_cascaded_gep
-; CHECK: bb:
-; CHECK:   br {{.*}}, label %bb2.unr-lcssa, label %bb.new
-; CHECK: bb.new:
-; CHECK:   %unroll_iter = 
-; CHECK:   br label %bb1
-; CHECK: bb1:
-; CHECK:   br {{.*}}, label %bb2.unr-lcssa.loopexit, label %bb1
-; CHECK: bb2.unr-lcssa.loopexit:
-; CHECK:   br label %bb2.unr-lcssa
-; CHECK: bb2.unr-lcssa:
-; CHECK:   br {{.*}}, label %bb1.epil.preheader, label %bb2
-; CHECK: bb1.epil.preheader:
-; CHECK:   br label %bb1.epil
-; CHECK: bb1.epil:
-; CHECK:   br {{.*}}, label %bb1.epil, label %bb2.epilog-lcssa
-; CHECK: bb2.epilog-lcssa:
-; CHECK:   br label %bb2
-; CHECK: bb2:
-; CHECK:   ret void
 define amdgpu_kernel void @unroll_when_cascaded_gep(i32 %arg) {
+; CHECK-LABEL: @unroll_when_cascaded_gep(
+; CHECK-NEXT:  bb:
+; CHECK-NEXT:    [[TMP0:%.*]] = add i32 [[ARG:%.*]], 1
+; CHECK-NEXT:    [[XTRAITER:%.*]] = and i32 [[TMP0]], 7
+; CHECK-NEXT:    [[TMP1:%.*]] = icmp ult i32 [[ARG]], 7
+; CHECK-NEXT:    br i1 [[TMP1]], label [[BB2_UNR_LCSSA:%.*]], label [[BB_NEW:%.*]]
+; CHECK:       bb.new:
+; CHECK-NEXT:    [[UNROLL_ITER:%.*]] = sub i32 [[TMP0]], [[XTRAITER]]
+; CHECK-NEXT:    br label [[BB1:%.*]]
+; CHECK:       bb1:
+; CHECK-NEXT:    [[PHI:%.*]] = phi i32 [ 0, [[BB_NEW]] ], [ [[ADD_7:%.*]], [[BB1]] ]
+; CHECK-NEXT:    [[NITER:%.*]] = phi i32 [ 0, [[BB_NEW]] ], [ [[NITER_NEXT_7:%.*]], [[BB1]] ]
+; CHECK-NEXT:    [[ADD_7]] = add i32 [[PHI]], 8
+; CHECK-NEXT:    [[NITER_NEXT_7]] = add i32 [[NITER]], 8
+; CHECK-NEXT:    [[NITER_NCMP_7:%.*]] = icmp eq i32 [[NITER_NEXT_7]], [[UNROLL_ITER]]
+; CHECK-NEXT:    br i1 [[NITER_NCMP_7]], label [[BB2_UNR_LCSSA_LOOPEXIT:%.*]], label [[BB1]]
+; CHECK:       bb2.unr-lcssa.loopexit:
+; CHECK-NEXT:    [[PHI_UNR_PH:%.*]] = phi i32 [ [[ADD_7]], [[BB1]] ]
+; CHECK-NEXT:    br label [[BB2_UNR_LCSSA]]
+; CHECK:       bb2.unr-lcssa:
+; CHECK-NEXT:    [[PHI_UNR:%.*]] = phi i32 [ 0, [[BB:%.*]] ], [ [[PHI_UNR_PH]], [[BB2_UNR_LCSSA_LOOPEXIT]] ]
+; CHECK-NEXT:    [[LCMP_MOD:%.*]] = icmp ne i32 [[XTRAITER]], 0
+; CHECK-NEXT:    br i1 [[LCMP_MOD]], label [[BB1_EPIL_PREHEADER:%.*]], label [[BB2:%.*]]
+; CHECK:       bb1.epil.preheader:
+; CHECK-NEXT:    br label [[BB1_EPIL:%.*]]
+; CHECK:       bb1.epil:
+; CHECK-NEXT:    [[PHI_EPIL:%.*]] = phi i32 [ [[PHI_UNR]], [[BB1_EPIL_PREHEADER]] ], [ [[ADD_EPIL:%.*]], [[BB1_EPIL]] ]
+; CHECK-NEXT:    [[EPIL_ITER:%.*]] = phi i32 [ 0, [[BB1_EPIL_PREHEADER]] ], [ [[EPIL_ITER_NEXT:%.*]], [[BB1_EPIL]] ]
+; CHECK-NEXT:    [[GETELEMENTPTR_EPIL:%.*]] = getelementptr [1024 x i32], ptr addrspace(3) getelementptr inbounds nuw (i8, ptr addrspace(3) @global, i32 8), i32 0, i32 0
+; CHECK-NEXT:    [[ADD_EPIL]] = add i32 [[PHI_EPIL]], 1
+; CHECK-NEXT:    [[ICMP_EPIL:%.*]] = icmp eq i32 [[PHI_EPIL]], [[ARG]]
+; CHECK-NEXT:    [[EPIL_ITER_NEXT]] = add i32 [[EPIL_ITER]], 1
+; CHECK-NEXT:    [[EPIL_ITER_CMP:%.*]] = icmp ne i32 [[EPIL_ITER_NEXT]], [[XTRAITER]]
+; CHECK-NEXT:    br i1 [[EPIL_ITER_CMP]], label [[BB1_EPIL]], label [[BB2_EPILOG_LCSSA:%.*]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK:       bb2.epilog-lcssa:
+; CHECK-NEXT:    br label [[BB2]]
+; CHECK:       bb2:
+; CHECK-NEXT:    ret void
+;
 bb:
   br label %bb1
 



More information about the llvm-commits mailing list