[llvm] AMDGPU: Make VarIndex a WeakVH in AMDGPUPromoteAlloca (PR #188662)
via llvm-commits
llvm-commits at lists.llvm.org
Wed Mar 25 18:53:19 PDT 2026
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-amdgpu
Author: Ruiling, Song (ruiling)
<details>
<summary>Changes</summary>
The VarIndex might come from (like load) another alloca which maybe promoted before. The value will replaced in this case. WeakVH correctly handles this.
---
Full diff: https://github.com/llvm/llvm-project/pull/188662.diff
2 Files Affected:
- (modified) llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp (+1-1)
- (added) llvm/test/CodeGen/AMDGPU/promote-alloca-proper-value-replacement.ll (+29)
``````````diff
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
index ee9ba9f798443..c8ff65f06f2d4 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
@@ -90,7 +90,7 @@ static cl::opt<unsigned>
// VarIndex is A, VarMul is stride, VarShift is shift and ConstIndex is B. All
// parts are optional.
struct GEPToVectorIndex {
- Value *VarIndex = nullptr; // defaults to 0
+ WeakVH VarIndex = nullptr; // defaults to 0
ConstantInt *VarMul = nullptr; // defaults to 1
ConstantInt *VarShift = nullptr; // defaults to 0
ConstantInt *ConstIndex = nullptr; // defaults to 0
diff --git a/llvm/test/CodeGen/AMDGPU/promote-alloca-proper-value-replacement.ll b/llvm/test/CodeGen/AMDGPU/promote-alloca-proper-value-replacement.ll
new file mode 100644
index 0000000000000..08e9904c185f9
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/promote-alloca-proper-value-replacement.ll
@@ -0,0 +1,29 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt -S -mtriple=amdgcn-unknown-unknown -passes=amdgpu-promote-alloca < %s | FileCheck %s
+
+define void @alloca_value_cross_reference() {
+; CHECK-LABEL: define void @alloca_value_cross_reference() {
+; CHECK-NEXT: [[_ENTRY:.*:]]
+; CHECK-NEXT: [[HIT_ORDERED:%.*]] = freeze <4 x float> poison
+; CHECK-NEXT: [[HIT_INDEX:%.*]] = freeze <4 x i32> poison
+; CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x i32> [[HIT_INDEX]], i32 0, i32 0
+; CHECK-NEXT: br [[DOTLR_PH5:label %.*]]
+; CHECK: [[_LR_PH5:.*:]]
+; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x i32> [[TMP0]], i32 0
+; CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x float> [[HIT_ORDERED]], float 0.000000e+00, i32 0
+; CHECK-NEXT: ret void
+;
+.entry:
+ %hit_ordered = alloca [4 x float], align 4, addrspace(5)
+ %hit_index = alloca [4 x i32], align 4, addrspace(5)
+ store i32 0, ptr addrspace(5) %hit_index, align 4
+ br label %.lr.ph5
+
+ ; The separate block is needed to avoid constant-folding on
+ ; the load from %hit_index.
+.lr.ph5:
+ %i = load i32, ptr addrspace(5) %hit_index, align 4
+ %p = getelementptr float, ptr addrspace(5) %hit_ordered, i32 %i
+ store float 0.000000e+00, ptr addrspace(5) %p, align 4
+ ret void
+}
``````````
</details>
https://github.com/llvm/llvm-project/pull/188662
More information about the llvm-commits
mailing list