[llvm] [AMDGPU] Move InferAddressSpacesPass to middle end optimization pipeline (PR #138604)

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Thu May 29 12:15:02 PDT 2025


================
@@ -0,0 +1,60 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -S -O1 -mtriple=amdgcn-amd-amdhsa %s -o - | FileCheck --check-prefix=INFER %s
+; RUN: opt -S -O2 -mtriple=amdgcn-amd-amdhsa %s -o - | FileCheck --check-prefix=INFER %s
+; RUN: opt -S -O3 -mtriple=amdgcn-amd-amdhsa %s -o - | FileCheck --check-prefix=INFER %s
+; RUN: opt -S -passes="lto<O1>" -mtriple=amdgcn-amd-amdhsa %s -o - | FileCheck --check-prefix=NO-INFER %s
+; RUN: opt -S -passes="lto<O2>" -mtriple=amdgcn-amd-amdhsa %s -o - | FileCheck --check-prefix=INFER %s
+; RUN: opt -S -passes="lto<O3>" -mtriple=amdgcn-amd-amdhsa %s -o - | FileCheck --check-prefix=INFER %s
+
+%struct.data = type { ptr, i32 }
+
+declare void @external_use(i32)
+
+define internal void @callee(ptr %ptr_as0, i32 %val) {
+; NO-INFER-LABEL: define internal void @callee(
+; NO-INFER-SAME: ptr [[PTR_AS0:%.*]], i32 [[VAL:%.*]]) #[[ATTR1:[0-9]+]] {
+; NO-INFER-NEXT:    [[LOADED:%.*]] = load i32, ptr [[PTR_AS0]], align 4
+; NO-INFER-NEXT:    [[COMPUTED:%.*]] = add i32 [[LOADED]], [[VAL]]
+; NO-INFER-NEXT:    store i32 [[COMPUTED]], ptr [[PTR_AS0]], align 4
+; NO-INFER-NEXT:    call void @external_use(i32 [[COMPUTED]])
+; NO-INFER-NEXT:    ret void
+;
+  %loaded = load i32, ptr %ptr_as0, align 4
+  %computed = add i32 %loaded, %val
+  store i32 %computed, ptr %ptr_as0, align 4
+  call void @external_use(i32 %computed)
+  ret void
+}
+
+define void @caller(ptr addrspace(1) %ptr_as1, i32 %value) {
+; INFER-LABEL: define void @caller(
+; INFER-SAME: ptr addrspace(1) captures(none) [[PTR_AS1:%.*]], i32 [[VALUE:%.*]]) local_unnamed_addr #[[ATTR1:[0-9]+]] {
+; INFER-NEXT:    [[LOADED_I:%.*]] = load i32, ptr addrspace(1) [[PTR_AS1]], align 4
+; INFER-NEXT:    [[COMPUTED_I:%.*]] = add i32 [[LOADED_I]], [[VALUE]]
+; INFER-NEXT:    store i32 [[COMPUTED_I]], ptr addrspace(1) [[PTR_AS1]], align 4
+; INFER-NEXT:    tail call void @external_use(i32 [[COMPUTED_I]])
+; INFER-NEXT:    ret void
+;
+; NO-INFER-LABEL: define void @caller(
+; NO-INFER-SAME: ptr addrspace(1) [[PTR_AS1:%.*]], i32 [[VALUE:%.*]]) #[[ATTR1]] {
+; NO-INFER-NEXT:    [[DATA:%.*]] = alloca [[STRUCT_DATA:%.*]], align 8, addrspace(5)
+; NO-INFER-NEXT:    [[VAL_FIELD:%.*]] = getelementptr [[STRUCT_DATA]], ptr addrspace(5) [[DATA]], i32 0, i32 1
+; NO-INFER-NEXT:    store i32 [[VALUE]], ptr addrspace(5) [[VAL_FIELD]], align 4
+; NO-INFER-NEXT:    [[GENERIC_INPUT:%.*]] = addrspacecast ptr addrspace(1) [[PTR_AS1]] to ptr
+; NO-INFER-NEXT:    store ptr [[GENERIC_INPUT]], ptr addrspace(5) [[DATA]], align 8
+; NO-INFER-NEXT:    [[RETRIEVED_PTR:%.*]] = load ptr, ptr addrspace(5) [[DATA]], align 8
+; NO-INFER-NEXT:    [[RETRIEVED_VAL:%.*]] = load i32, ptr addrspace(5) [[VAL_FIELD]], align 4
+; NO-INFER-NEXT:    call void @callee(ptr [[RETRIEVED_PTR]], i32 [[RETRIEVED_VAL]])
+; NO-INFER-NEXT:    ret void
+;
+  %data = alloca %struct.data, align 8, addrspace(5)
+  %ptr_field = getelementptr %struct.data, ptr addrspace(5) %data, i32 0, i32 0
+  %val_field = getelementptr %struct.data, ptr addrspace(5) %data, i32 0, i32 1
+  store i32 %value, ptr addrspace(5) %val_field, align 4
+  %generic_input = addrspacecast ptr addrspace(1) %ptr_as1 to ptr
+  store ptr %generic_input, ptr addrspace(5) %ptr_field, align 8
+  %retrieved_ptr = load ptr, ptr addrspace(5) %ptr_field, align 8
+  %retrieved_val = load i32, ptr addrspace(5) %val_field, align 4
+  call void @callee(ptr %retrieved_ptr, i32 %retrieved_val)
+  ret void
----------------
arsenm wrote:

The usual clang pattern is alloca to addrspacecast to store of flat pointer, reload and use. Probably should add a case that looks like that 

https://github.com/llvm/llvm-project/pull/138604


More information about the llvm-commits mailing list