[llvm] [AMDGPU] Promote nestedGEP allocas to vectors (PR #141199)
Harrison Hao via llvm-commits
llvm-commits at lists.llvm.org
Fri May 23 03:59:07 PDT 2025
https://github.com/harrisonGPU updated https://github.com/llvm/llvm-project/pull/141199
>From ffa8ed87232d03c0c394095387f2d1d518519271 Mon Sep 17 00:00:00 2001
From: Harrison Hao <tsworld1314 at gmail.com>
Date: Thu, 22 May 2025 14:42:37 +0800
Subject: [PATCH 1/6] [AMDGPU] Promote nestedGEP allocas to vectors
---
.../lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp | 58 ++++++++++++++++++-
llvm/test/CodeGen/AMDGPU/amdpal.ll | 19 +++---
.../promote-alloca-vector-gep-of-gep.ll | 55 ++++++++++++++++++
3 files changed, 118 insertions(+), 14 deletions(-)
create mode 100644 llvm/test/CodeGen/AMDGPU/promote-alloca-vector-gep-of-gep.ll
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
index 517d05f6514d5..4355f195f2c88 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
@@ -437,8 +437,62 @@ static Value *GEPToVectorIndex(GetElementPtrInst *GEP, AllocaInst *Alloca,
unsigned BW = DL.getIndexTypeSizeInBits(GEP->getType());
SmallMapVector<Value *, APInt, 4> VarOffsets;
APInt ConstOffset(BW, 0);
- if (GEP->getPointerOperand()->stripPointerCasts() != Alloca ||
- !GEP->collectOffset(DL, BW, VarOffsets, ConstOffset))
+
+ // Walk backwards through nested GEPs to collect both constant and variable
+ // offsets, so that nested vector GEP chains can be lowered in one step.
+ //
+ // Given this IR fragment as input:
+ //
+ // %0 = alloca [10 x <2 x i32>], align 8, addrspace(5)
+ // %1 = getelementptr [10 x <2 x i32>], ptr addrspace(5) %0, i32 0, i32 %j
+ // %2 = getelementptr i8, ptr addrspace(5) %1, i32 4
+ // %3 = load i32, ptr addrspace(5) %2, align 4
+ //
+ // Combine both GEP operations in a single pass, producing:
+ // BasePtr = %0
+ // ConstOffset = 4
+ // VarOffsets = { %j → element_size(<2 x i32>) }
+ //
+ // That lets us emit a single buffer_load directly into a VGPR, without ever
+ // allocating scratch memory for the intermediate pointer.
+ Value *CurPtr = GEP;
+ while (auto *CurGEP = dyn_cast<GetElementPtrInst>(CurPtr)) {
+ SmallMapVector<Value *, APInt, 4> LocalVarsOffsets;
+ APInt LocalConstOffset(BW, 0);
+
+ if (!CurGEP->collectOffset(DL, BW, LocalVarsOffsets, LocalConstOffset))
+ return nullptr;
+
+ // Merge any variable-index contributions into the accumulated VarOffsets
+ // map.
+ // Only a single pointer variable is allowed in the entire GEP chain.
+ // If VarOffsets already holds a different pointer, abort.
+ //
+ // Example:
+ // Suppose LocalVarsOffsets = { (%ptr → 4) } from this GEP, and
+ // VarOffsets already has { (%ptr → 8) } from an inner GEP.
+ // After this loop, VarOffsets should become { (%ptr → 12) }.
+ for (auto &VarEntry : LocalVarsOffsets) {
+ // If VarOffsets already records a different pointer, abort.
+ if (!VarOffsets.empty() && !VarOffsets.count(VarEntry.first))
+ return nullptr;
+
+ // Look up whether we’ve seen this pointer before.
+ auto *Existing = VarOffsets.find(VarEntry.first);
+ if (Existing == VarOffsets.end())
+ VarOffsets.insert({VarEntry.first, VarEntry.second});
+ else
+ Existing->second += VarEntry.second;
+ }
+
+ ConstOffset += LocalConstOffset;
+
+ // Move to the next outer pointer
+ CurPtr = CurGEP->getPointerOperand()->stripPointerCasts();
+ }
+
+ // Only proceed if this GEP stems from the same alloca.
+ if (CurPtr->stripPointerCasts() != Alloca)
return nullptr;
unsigned VecElemSize = DL.getTypeAllocSize(VecElemTy);
diff --git a/llvm/test/CodeGen/AMDGPU/amdpal.ll b/llvm/test/CodeGen/AMDGPU/amdpal.ll
index 2e47b0163aa8c..695e80a7cc0a6 100644
--- a/llvm/test/CodeGen/AMDGPU/amdpal.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdpal.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -mtriple=amdgcn--amdpal -mcpu=tahiti | FileCheck --check-prefixes=PAL,CI --enable-var-scope %s
-; RUN: llc < %s -mtriple=amdgcn--amdpal -mcpu=tonga | FileCheck --check-prefixes=PAL,VI --enable-var-scope %s
+; RUN: llc < %s -mtriple=amdgcn--amdpal -mcpu=tahiti | FileCheck --check-prefixes=PAL --enable-var-scope %s
+; RUN: llc < %s -mtriple=amdgcn--amdpal -mcpu=tonga | FileCheck --check-prefixes=PAL --enable-var-scope %s
; PAL-NOT: .AMDGPU.config
; PAL-LABEL: {{^}}simple:
@@ -51,17 +51,12 @@ entry:
ret void
}
-; Check code sequence for amdpal use of scratch for alloca in a compute shader.
-; The scratch descriptor is loaded from offset 0x10 of the GIT, rather than offset
-; 0 in a graphics shader.
-; Prior to GCN3 s_load_dword offsets are dwords, so the offset will be 0x4.
+; After the change that **promotes the alloca to a vector** (GEP‑of‑GEP
+; promotion), no scratch buffer is needed, so the descriptor load should
+; disappear.
; PAL-LABEL: {{^}}scratch2_cs:
-; PAL: s_movk_i32 s{{[0-9]+}}, 0x1234
-; PAL: s_mov_b32 s[[GITPTR:[0-9]+]], s0
-; CI: s_load_dwordx4 s[[[SCRATCHDESC:[0-9]+]]:{{[0-9]+]}}, s[[[GITPTR]]:{{[0-9]+\]}}, 0x4
-; VI: s_load_dwordx4 s[[[SCRATCHDESC:[0-9]+]]:{{[0-9]+]}}, s[[[GITPTR]]:{{[0-9]+\]}}, 0x10
-; PAL: buffer_store{{.*}}, s[[[SCRATCHDESC]]:
+; PAL: buffer_store{{.*}}, s[[[SCRATCHDESC:[0-9]+]]:{{[0-9]+]}}
define amdgpu_cs void @scratch2_cs(i32 inreg, i32 inreg, i32 inreg, <3 x i32> inreg, i32 inreg, <3 x i32> %coord, <2 x i32> %in, i32 %extra, i32 %idx) #0 {
entry:
@@ -88,6 +83,6 @@ declare void @llvm.amdgcn.raw.ptr.buffer.store.f32(float, ptr addrspace(8), i32,
; PAL-NEXT: .cs:
; PAL-NEXT: .entry_point: _amdgpu_cs_main
; PAL-NEXT: .entry_point_symbol: scratch2_cs
-; PAL-NEXT: .scratch_memory_size: 0x10
+; PAL-NEXT: .scratch_memory_size: 0
; PAL-NEXT: .sgpr_count: 0x
; PAL-NEXT: .vgpr_count: 0x
diff --git a/llvm/test/CodeGen/AMDGPU/promote-alloca-vector-gep-of-gep.ll b/llvm/test/CodeGen/AMDGPU/promote-alloca-vector-gep-of-gep.ll
new file mode 100644
index 0000000000000..40a81d04d09c8
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/promote-alloca-vector-gep-of-gep.ll
@@ -0,0 +1,55 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -passes=amdgpu-promote-alloca < %s | FileCheck %s
+target triple = "amdgcn-amd-amdhsa"
+define amdgpu_ps void @scalar_alloca_ptr_with_vector_gep_of_gep(i32 %j) #0 {
+; CHECK-LABEL: define amdgpu_ps void @scalar_alloca_ptr_with_vector_gep_of_gep(
+; CHECK-SAME: i32 [[J:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[SORTEDFRAGMENTS:%.*]] = freeze <20 x i32> poison
+; CHECK-NEXT: [[TMP0:%.*]] = mul i32 [[J]], 2
+; CHECK-NEXT: [[TMP1:%.*]] = mul i32 [[J]], 2
+; CHECK-NEXT: [[TMP2:%.*]] = add i32 1, [[TMP1]]
+; CHECK-NEXT: [[TMP3:%.*]] = extractelement <20 x i32> [[SORTEDFRAGMENTS]], i32 [[TMP2]]
+; CHECK-NEXT: ret void
+;
+entry:
+ %SortedFragments = alloca [10 x <2 x i32>], align 8, addrspace(5)
+ %0 = getelementptr [10 x <2 x i32>], ptr addrspace(5) %SortedFragments, i32 0, i32 %j
+ %1 = getelementptr i8, ptr addrspace(5) %0, i32 4
+ %2 = load i32, ptr addrspace(5) %1, align 4
+ ret void
+}
+
+attributes #0 = { "amdgpu-promote-alloca-to-vector-max-regs"="32" }
+
+define amdgpu_cs void @scalar_alloca_ptr_with_vector_gep_of_scratch(i32 inreg, i32 inreg, i32 inreg, <3 x i32> inreg, i32 inreg, <3 x i32> %coord, <2 x i32> %in, i32 %extra, i32 %idx) #1 {
+; CHECK-LABEL: define amdgpu_cs void @scalar_alloca_ptr_with_vector_gep_of_scratch(
+; CHECK-SAME: i32 inreg [[TMP0:%.*]], i32 inreg [[TMP1:%.*]], i32 inreg [[TMP2:%.*]], <3 x i32> inreg [[TMP3:%.*]], i32 inreg [[TMP4:%.*]], <3 x i32> [[COORD:%.*]], <2 x i32> [[IN:%.*]], i32 [[EXTRA:%.*]], i32 [[IDX:%.*]]) #[[ATTR1:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[V:%.*]] = freeze <3 x i32> poison
+; CHECK-NEXT: [[TMP5:%.*]] = insertelement <3 x i32> [[V]], i32 [[EXTRA]], i32 0
+; CHECK-NEXT: [[TMP6:%.*]] = extractelement <2 x i32> [[IN]], i64 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertelement <3 x i32> [[TMP5]], i32 [[TMP6]], i32 1
+; CHECK-NEXT: [[TMP8:%.*]] = extractelement <2 x i32> [[IN]], i64 1
+; CHECK-NEXT: [[TMP9:%.*]] = insertelement <3 x i32> [[TMP7]], i32 [[TMP8]], i32 2
+; CHECK-NEXT: [[TMP10:%.*]] = add i32 1, [[IDX]]
+; CHECK-NEXT: [[TMP11:%.*]] = extractelement <3 x i32> [[TMP9]], i32 [[TMP10]]
+; CHECK-NEXT: [[XF:%.*]] = bitcast i32 [[TMP11]] to float
+; CHECK-NEXT: call void @llvm.amdgcn.raw.ptr.buffer.store.f32(float [[XF]], ptr addrspace(8) poison, i32 0, i32 0, i32 0)
+; CHECK-NEXT: ret void
+;
+entry:
+ %v = alloca [3 x i32], addrspace(5)
+ %v1 = getelementptr [3 x i32], ptr addrspace(5) %v, i32 0, i32 1
+ store i32 %extra, ptr addrspace(5) %v
+ store <2 x i32> %in, ptr addrspace(5) %v1
+ %e = getelementptr [2 x i32], ptr addrspace(5) %v1, i32 0, i32 %idx
+ %x = load i32, ptr addrspace(5) %e
+ %xf = bitcast i32 %x to float
+ call void @llvm.amdgcn.raw.ptr.buffer.store.f32(float %xf, ptr addrspace(8) poison, i32 0, i32 0, i32 0)
+ ret void
+}
+
+attributes #1 = { nounwind "amdgpu-git-ptr-high"="0x1234" }
+
+declare void @llvm.amdgcn.raw.ptr.buffer.store.f32(float, ptr addrspace(8), i32, i32, i32 immarg)
>From 0996795ffa7cbc14f80faa83b1aab106afd2bf95 Mon Sep 17 00:00:00 2001
From: Harrison Hao <tsworld1314 at gmail.com>
Date: Fri, 23 May 2025 07:35:19 +0000
Subject: [PATCH 2/6] [AMDGPU] Update comments.
---
.../lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp | 23 ++++++++++++-------
1 file changed, 15 insertions(+), 8 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
index 4355f195f2c88..4f79b05180690 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
@@ -469,19 +469,26 @@ static Value *GEPToVectorIndex(GetElementPtrInst *GEP, AllocaInst *Alloca,
// If VarOffsets already holds a different pointer, abort.
//
// Example:
- // Suppose LocalVarsOffsets = { (%ptr → 4) } from this GEP, and
- // VarOffsets already has { (%ptr → 8) } from an inner GEP.
- // After this loop, VarOffsets should become { (%ptr → 12) }.
+ // 1) First GEP picks the idx’th element (each element is 8 bytes):
+ // addr0 = base + idx * 8
+ //
+ // 2) Second GEP adds a fixed 4‐byte shift:
+ // addr1 = addr0 + 4
+ //
+ // To turn that into a 4‐byte “lane” index we divide by 4:
+ // lane = (idx * 8 + 4) / 4
+ // = idx * (8 / 4) + (4 / 4)
+ // = idx * 2 + 1
for (auto &VarEntry : LocalVarsOffsets) {
// If VarOffsets already records a different pointer, abort.
if (!VarOffsets.empty() && !VarOffsets.count(VarEntry.first))
return nullptr;
- // Look up whether we’ve seen this pointer before.
- auto *Existing = VarOffsets.find(VarEntry.first);
- if (Existing == VarOffsets.end())
- VarOffsets.insert({VarEntry.first, VarEntry.second});
- else
+ // Try to insert VarEntry.first with its offset; if that pointer is
+ // already in VarOffsets, add the new offset to the existing one.
+ auto [Existing, Inserted] =
+ VarOffsets.try_emplace(VarEntry.first, VarEntry.second);
+ if (!Inserted)
Existing->second += VarEntry.second;
}
>From ceee25d0b30642d659cc14e72e6528fd6103e8a2 Mon Sep 17 00:00:00 2001
From: Harrison Hao <tsworld1314 at gmail.com>
Date: Fri, 23 May 2025 07:41:26 +0000
Subject: [PATCH 3/6] [AMDGPU] Use contains.
---
llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
index 4f79b05180690..e05b3781ca393 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
@@ -481,7 +481,7 @@ static Value *GEPToVectorIndex(GetElementPtrInst *GEP, AllocaInst *Alloca,
// = idx * 2 + 1
for (auto &VarEntry : LocalVarsOffsets) {
// If VarOffsets already records a different pointer, abort.
- if (!VarOffsets.empty() && !VarOffsets.count(VarEntry.first))
+ if (!VarOffsets.empty() && !VarOffsets.contains(VarEntry.first))
return nullptr;
// Try to insert VarEntry.first with its offset; if that pointer is
>From a4a69bf3baf18205f95eb6ea0f148c542560df1a Mon Sep 17 00:00:00 2001
From: Harrison Hao <tsworld1314 at gmail.com>
Date: Fri, 23 May 2025 08:06:36 +0000
Subject: [PATCH 4/6] [AMDGPU] Update comments.
---
llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp | 12 ------------
1 file changed, 12 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
index e05b3781ca393..ab4d5887123b3 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
@@ -467,18 +467,6 @@ static Value *GEPToVectorIndex(GetElementPtrInst *GEP, AllocaInst *Alloca,
// map.
// Only a single pointer variable is allowed in the entire GEP chain.
// If VarOffsets already holds a different pointer, abort.
- //
- // Example:
- // 1) First GEP picks the idx’th element (each element is 8 bytes):
- // addr0 = base + idx * 8
- //
- // 2) Second GEP adds a fixed 4‐byte shift:
- // addr1 = addr0 + 4
- //
- // To turn that into a 4‐byte “lane” index we divide by 4:
- // lane = (idx * 8 + 4) / 4
- // = idx * (8 / 4) + (4 / 4)
- // = idx * 2 + 1
for (auto &VarEntry : LocalVarsOffsets) {
// If VarOffsets already records a different pointer, abort.
if (!VarOffsets.empty() && !VarOffsets.contains(VarEntry.first))
>From 80b40c3cfd53b4d053cb05ae01b589b113705664 Mon Sep 17 00:00:00 2001
From: Harrison Hao <tsworld1314 at gmail.com>
Date: Fri, 23 May 2025 09:12:56 +0000
Subject: [PATCH 5/6] [AMDGPU] Update lit test.
---
llvm/test/CodeGen/AMDGPU/amdpal.ll | 8 ++++-
.../promote-alloca-vector-gep-of-gep.ll | 35 ++++++++-----------
2 files changed, 22 insertions(+), 21 deletions(-)
diff --git a/llvm/test/CodeGen/AMDGPU/amdpal.ll b/llvm/test/CodeGen/AMDGPU/amdpal.ll
index 695e80a7cc0a6..2a21427adebaa 100644
--- a/llvm/test/CodeGen/AMDGPU/amdpal.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdpal.ll
@@ -56,7 +56,13 @@ entry:
; disappear.
; PAL-LABEL: {{^}}scratch2_cs:
-; PAL: buffer_store{{.*}}, s[[[SCRATCHDESC:[0-9]+]]:{{[0-9]+]}}
+; PAL-NEXT: ; %bb.0:
+; PAL-NEXT: v_add_i32_e32 v0, vcc, 1, v6
+; PAL-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; PAL-NEXT: v_cndmask_b32_e32 v1, v5, v3, vcc
+; PAL-NEXT: v_cmp_eq_u32_e32 vcc, 2, v0
+; PAL-NEXT: v_cndmask_b32_e32 v0, v1, v4, vcc
+; PAL-NEXT: buffer_store{{.*}}, s[[[SCRATCHDESC]]:
define amdgpu_cs void @scratch2_cs(i32 inreg, i32 inreg, i32 inreg, <3 x i32> inreg, i32 inreg, <3 x i32> %coord, <2 x i32> %in, i32 %extra, i32 %idx) #0 {
entry:
diff --git a/llvm/test/CodeGen/AMDGPU/promote-alloca-vector-gep-of-gep.ll b/llvm/test/CodeGen/AMDGPU/promote-alloca-vector-gep-of-gep.ll
index 40a81d04d09c8..99ed5ce0f86c3 100644
--- a/llvm/test/CodeGen/AMDGPU/promote-alloca-vector-gep-of-gep.ll
+++ b/llvm/test/CodeGen/AMDGPU/promote-alloca-vector-gep-of-gep.ll
@@ -1,6 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -passes=amdgpu-promote-alloca < %s | FileCheck %s
-target triple = "amdgcn-amd-amdhsa"
define amdgpu_ps void @scalar_alloca_ptr_with_vector_gep_of_gep(i32 %j) #0 {
; CHECK-LABEL: define amdgpu_ps void @scalar_alloca_ptr_with_vector_gep_of_gep(
; CHECK-SAME: i32 [[J:%.*]]) #[[ATTR0:[0-9]+]] {
@@ -14,28 +13,28 @@ define amdgpu_ps void @scalar_alloca_ptr_with_vector_gep_of_gep(i32 %j) #0 {
;
entry:
%SortedFragments = alloca [10 x <2 x i32>], align 8, addrspace(5)
- %0 = getelementptr [10 x <2 x i32>], ptr addrspace(5) %SortedFragments, i32 0, i32 %j
- %1 = getelementptr i8, ptr addrspace(5) %0, i32 4
- %2 = load i32, ptr addrspace(5) %1, align 4
+ %row = getelementptr [10 x <2 x i32>], ptr addrspace(5) %SortedFragments, i32 0, i32 %j
+ %elt1 = getelementptr i8, ptr addrspace(5) %row, i32 4
+ %val = load i32, ptr addrspace(5) %elt1, align 4
ret void
}
attributes #0 = { "amdgpu-promote-alloca-to-vector-max-regs"="32" }
-define amdgpu_cs void @scalar_alloca_ptr_with_vector_gep_of_scratch(i32 inreg, i32 inreg, i32 inreg, <3 x i32> inreg, i32 inreg, <3 x i32> %coord, <2 x i32> %in, i32 %extra, i32 %idx) #1 {
+define amdgpu_cs void @scalar_alloca_ptr_with_vector_gep_of_scratch(i32 inreg %0, i32 inreg %1, i32 inreg %2, <3 x i32> inreg %coord, i32 inreg %4, <3 x i32> %v_in, <2 x i32> %in, i32 %extra, i32 %idx, ptr addrspace(8) %buffer) #0 {
; CHECK-LABEL: define amdgpu_cs void @scalar_alloca_ptr_with_vector_gep_of_scratch(
-; CHECK-SAME: i32 inreg [[TMP0:%.*]], i32 inreg [[TMP1:%.*]], i32 inreg [[TMP2:%.*]], <3 x i32> inreg [[TMP3:%.*]], i32 inreg [[TMP4:%.*]], <3 x i32> [[COORD:%.*]], <2 x i32> [[IN:%.*]], i32 [[EXTRA:%.*]], i32 [[IDX:%.*]]) #[[ATTR1:[0-9]+]] {
+; CHECK-SAME: i32 inreg [[TMP0:%.*]], i32 inreg [[TMP1:%.*]], i32 inreg [[TMP2:%.*]], <3 x i32> inreg [[COORD:%.*]], i32 inreg [[TMP3:%.*]], <3 x i32> [[V_IN:%.*]], <2 x i32> [[IN:%.*]], i32 [[EXTRA:%.*]], i32 [[IDX:%.*]], ptr addrspace(8) [[BUFFER:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[V:%.*]] = freeze <3 x i32> poison
-; CHECK-NEXT: [[TMP5:%.*]] = insertelement <3 x i32> [[V]], i32 [[EXTRA]], i32 0
-; CHECK-NEXT: [[TMP6:%.*]] = extractelement <2 x i32> [[IN]], i64 0
-; CHECK-NEXT: [[TMP7:%.*]] = insertelement <3 x i32> [[TMP5]], i32 [[TMP6]], i32 1
-; CHECK-NEXT: [[TMP8:%.*]] = extractelement <2 x i32> [[IN]], i64 1
-; CHECK-NEXT: [[TMP9:%.*]] = insertelement <3 x i32> [[TMP7]], i32 [[TMP8]], i32 2
-; CHECK-NEXT: [[TMP10:%.*]] = add i32 1, [[IDX]]
-; CHECK-NEXT: [[TMP11:%.*]] = extractelement <3 x i32> [[TMP9]], i32 [[TMP10]]
-; CHECK-NEXT: [[XF:%.*]] = bitcast i32 [[TMP11]] to float
-; CHECK-NEXT: call void @llvm.amdgcn.raw.ptr.buffer.store.f32(float [[XF]], ptr addrspace(8) poison, i32 0, i32 0, i32 0)
+; CHECK-NEXT: [[TMP4:%.*]] = insertelement <3 x i32> [[V]], i32 [[EXTRA]], i32 0
+; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x i32> [[IN]], i64 0
+; CHECK-NEXT: [[TMP6:%.*]] = insertelement <3 x i32> [[TMP4]], i32 [[TMP5]], i32 1
+; CHECK-NEXT: [[TMP7:%.*]] = extractelement <2 x i32> [[IN]], i64 1
+; CHECK-NEXT: [[TMP8:%.*]] = insertelement <3 x i32> [[TMP6]], i32 [[TMP7]], i32 2
+; CHECK-NEXT: [[TMP9:%.*]] = add i32 1, [[IDX]]
+; CHECK-NEXT: [[TMP10:%.*]] = extractelement <3 x i32> [[TMP8]], i32 [[TMP9]]
+; CHECK-NEXT: [[XF:%.*]] = bitcast i32 [[TMP10]] to float
+; CHECK-NEXT: store float [[XF]], ptr addrspace(8) [[BUFFER]], align 4
; CHECK-NEXT: ret void
;
entry:
@@ -46,10 +45,6 @@ entry:
%e = getelementptr [2 x i32], ptr addrspace(5) %v1, i32 0, i32 %idx
%x = load i32, ptr addrspace(5) %e
%xf = bitcast i32 %x to float
- call void @llvm.amdgcn.raw.ptr.buffer.store.f32(float %xf, ptr addrspace(8) poison, i32 0, i32 0, i32 0)
+ store float %xf, ptr addrspace(8) %buffer, align 4
ret void
}
-
-attributes #1 = { nounwind "amdgpu-git-ptr-high"="0x1234" }
-
-declare void @llvm.amdgcn.raw.ptr.buffer.store.f32(float, ptr addrspace(8), i32, i32, i32 immarg)
>From c50146aa75b966715a56f18b1b59cbce19b1c6b2 Mon Sep 17 00:00:00 2001
From: Harrison Hao <tsworld1314 at gmail.com>
Date: Fri, 23 May 2025 10:58:44 +0000
Subject: [PATCH 6/6] [AMDGPU] Update code and test.
---
.../lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp | 9 ++---
.../promote-alloca-vector-gep-of-gep.ll | 39 ++++++++-----------
2 files changed, 20 insertions(+), 28 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
index ab4d5887123b3..61760f093d7eb 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
@@ -467,17 +467,16 @@ static Value *GEPToVectorIndex(GetElementPtrInst *GEP, AllocaInst *Alloca,
// map.
// Only a single pointer variable is allowed in the entire GEP chain.
// If VarOffsets already holds a different pointer, abort.
- for (auto &VarEntry : LocalVarsOffsets) {
+ for (auto &[Var, Offset] : LocalVarsOffsets) {
// If VarOffsets already records a different pointer, abort.
- if (!VarOffsets.empty() && !VarOffsets.contains(VarEntry.first))
+ if (!VarOffsets.empty() && !VarOffsets.contains(Var))
return nullptr;
// Try to insert VarEntry.first with its offset; if that pointer is
// already in VarOffsets, add the new offset to the existing one.
- auto [Existing, Inserted] =
- VarOffsets.try_emplace(VarEntry.first, VarEntry.second);
+ auto [Existing, Inserted] = VarOffsets.try_emplace(Var, Offset);
if (!Inserted)
- Existing->second += VarEntry.second;
+ Existing->second += Offset;
}
ConstOffset += LocalConstOffset;
diff --git a/llvm/test/CodeGen/AMDGPU/promote-alloca-vector-gep-of-gep.ll b/llvm/test/CodeGen/AMDGPU/promote-alloca-vector-gep-of-gep.ll
index 99ed5ce0f86c3..ecf1ea9709783 100644
--- a/llvm/test/CodeGen/AMDGPU/promote-alloca-vector-gep-of-gep.ll
+++ b/llvm/test/CodeGen/AMDGPU/promote-alloca-vector-gep-of-gep.ll
@@ -19,32 +19,25 @@ entry:
ret void
}
-attributes #0 = { "amdgpu-promote-alloca-to-vector-max-regs"="32" }
-
-define amdgpu_cs void @scalar_alloca_ptr_with_vector_gep_of_scratch(i32 inreg %0, i32 inreg %1, i32 inreg %2, <3 x i32> inreg %coord, i32 inreg %4, <3 x i32> %v_in, <2 x i32> %in, i32 %extra, i32 %idx, ptr addrspace(8) %buffer) #0 {
-; CHECK-LABEL: define amdgpu_cs void @scalar_alloca_ptr_with_vector_gep_of_scratch(
-; CHECK-SAME: i32 inreg [[TMP0:%.*]], i32 inreg [[TMP1:%.*]], i32 inreg [[TMP2:%.*]], <3 x i32> inreg [[COORD:%.*]], i32 inreg [[TMP3:%.*]], <3 x i32> [[V_IN:%.*]], <2 x i32> [[IN:%.*]], i32 [[EXTRA:%.*]], i32 [[IDX:%.*]], ptr addrspace(8) [[BUFFER:%.*]]) #[[ATTR0]] {
+define amdgpu_ps void @scalar_alloca_ptr_with_vector_gep_of_gep3(i32 %j) #0 {
+; CHECK-LABEL: define amdgpu_ps void @scalar_alloca_ptr_with_vector_gep_of_gep3(
+; CHECK-SAME: i32 [[J:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[V:%.*]] = freeze <3 x i32> poison
-; CHECK-NEXT: [[TMP4:%.*]] = insertelement <3 x i32> [[V]], i32 [[EXTRA]], i32 0
-; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x i32> [[IN]], i64 0
-; CHECK-NEXT: [[TMP6:%.*]] = insertelement <3 x i32> [[TMP4]], i32 [[TMP5]], i32 1
-; CHECK-NEXT: [[TMP7:%.*]] = extractelement <2 x i32> [[IN]], i64 1
-; CHECK-NEXT: [[TMP8:%.*]] = insertelement <3 x i32> [[TMP6]], i32 [[TMP7]], i32 2
-; CHECK-NEXT: [[TMP9:%.*]] = add i32 1, [[IDX]]
-; CHECK-NEXT: [[TMP10:%.*]] = extractelement <3 x i32> [[TMP8]], i32 [[TMP9]]
-; CHECK-NEXT: [[XF:%.*]] = bitcast i32 [[TMP10]] to float
-; CHECK-NEXT: store float [[XF]], ptr addrspace(8) [[BUFFER]], align 4
+; CHECK-NEXT: [[SORTEDFRAGMENTS:%.*]] = freeze <16 x i32> poison
+; CHECK-NEXT: [[TMP0:%.*]] = mul i32 [[J]], 2
+; CHECK-NEXT: [[TMP1:%.*]] = add i32 8, [[TMP0]]
+; CHECK-NEXT: [[TMP2:%.*]] = mul i32 [[J]], 2
+; CHECK-NEXT: [[TMP3:%.*]] = add i32 9, [[TMP2]]
+; CHECK-NEXT: [[TMP4:%.*]] = extractelement <16 x i32> [[SORTEDFRAGMENTS]], i32 [[TMP3]]
; CHECK-NEXT: ret void
;
entry:
- %v = alloca [3 x i32], addrspace(5)
- %v1 = getelementptr [3 x i32], ptr addrspace(5) %v, i32 0, i32 1
- store i32 %extra, ptr addrspace(5) %v
- store <2 x i32> %in, ptr addrspace(5) %v1
- %e = getelementptr [2 x i32], ptr addrspace(5) %v1, i32 0, i32 %idx
- %x = load i32, ptr addrspace(5) %e
- %xf = bitcast i32 %x to float
- store float %xf, ptr addrspace(8) %buffer, align 4
+ %SortedFragments = alloca [2 x [4 x <2 x i32>]], align 8, addrspace(5)
+ %lvl1 = getelementptr inbounds [2 x [4 x <2 x i32>]], ptr addrspace(5) %SortedFragments, i32 0, i32 1
+ %lvl2 = getelementptr inbounds [4 x <2 x i32>], ptr addrspace(5) %lvl1, i32 0, i32 %j
+ %byte = getelementptr inbounds i8, ptr addrspace(5) %lvl2, i32 4
+ %val = load i32, ptr addrspace(5) %byte, align 4
ret void
}
+
+attributes #0 = { "amdgpu-promote-alloca-to-vector-max-regs"="32" }
More information about the llvm-commits
mailing list