[llvm] [AMDGPU][Attributor] Infer `inreg` attribute in `AMDGPUAttributor` (PR #101609)

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Sun Sep 15 09:33:12 PDT 2024


================
@@ -0,0 +1,97 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --check-globals
+; RUN: opt -S -mtriple=amdgcn-unknown-unknown -passes=amdgpu-attributor %s -o - | FileCheck %s
+
+ at g1 = protected addrspace(1) externally_initialized global i32 0, align 4
+ at g2 = protected addrspace(1) externally_initialized global i32 0, align 4
+ at g3 = protected addrspace(1) externally_initialized global i32 0, align 4
+ at g4 = protected addrspace(1) externally_initialized global i32 0, align 4
+
+;.
+; CHECK: @g1 = protected addrspace(1) externally_initialized global i32 0, align 4
+; CHECK: @g2 = protected addrspace(1) externally_initialized global i32 0, align 4
+; CHECK: @g3 = protected addrspace(1) externally_initialized global i32 0, align 4
+; CHECK: @g4 = protected addrspace(1) externally_initialized global i32 0, align 4
+;.
+define internal fastcc void @callee_infer(ptr addrspace(1) %x, i32 %y) {
+; CHECK-LABEL: define {{[^@]+}}@callee_infer
+; CHECK-SAME: (ptr addrspace(1) inreg [[X:%.*]], i32 inreg [[Y:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[X_VAL:%.*]] = load i32, ptr addrspace(1) [[X]], align 4
+; CHECK-NEXT:    store i32 [[X_VAL]], ptr addrspace(1) @g3, align 4
+; CHECK-NEXT:    store i32 [[Y]], ptr addrspace(1) @g4, align 4
+; CHECK-NEXT:    ret void
+;
+entry:
+  %x.val = load i32, ptr addrspace(1) %x, align 4
+  store i32 %x.val, ptr addrspace(1) @g3, align 4
+  store i32 %y, ptr addrspace(1) @g4, align 4
+  ret void
+}
+
+define protected amdgpu_kernel void @kernel_infer(ptr addrspace(1) %p1, ptr addrspace(1) %p2, i32 %x) {
+; CHECK-LABEL: define {{[^@]+}}@kernel_infer
+; CHECK-SAME: (ptr addrspace(1) [[P1:%.*]], ptr addrspace(1) [[P2:%.*]], i32 [[X:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[X]], 0
+; CHECK-NEXT:    [[P:%.*]] = select i1 [[CMP]], ptr addrspace(1) [[P1]], ptr addrspace(1) [[P2]]
+; CHECK-NEXT:    tail call fastcc void @callee_infer(ptr addrspace(1) @g1, i32 [[X]])
+; CHECK-NEXT:    tail call fastcc void @callee_infer(ptr addrspace(1) @g2, i32 [[X]])
+; CHECK-NEXT:    tail call fastcc void @callee_infer(ptr addrspace(1) @g1, i32 1)
+; CHECK-NEXT:    tail call fastcc void @callee_infer(ptr addrspace(1) @g2, i32 2)
+; CHECK-NEXT:    tail call fastcc void @callee_infer(ptr addrspace(1) [[P]], i32 [[X]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  %cmp = icmp sgt i32 %x, 0
+  %p = select i1 %cmp, ptr addrspace(1) %p1, ptr addrspace(1) %p2
+  tail call fastcc void @callee_infer(ptr addrspace(1) @g1, i32 %x)
+  tail call fastcc void @callee_infer(ptr addrspace(1) @g2, i32 %x)
+  tail call fastcc void @callee_infer(ptr addrspace(1) @g1, i32 1)
+  tail call fastcc void @callee_infer(ptr addrspace(1) @g2, i32 2)
+  tail call fastcc void @callee_infer(ptr addrspace(1) %p, i32 %x)
+  ret void
+}
+
+define internal fastcc void @callee_not_infer(ptr addrspace(1) %x, i32 %y) {
+; CHECK-LABEL: define {{[^@]+}}@callee_not_infer
+; CHECK-SAME: (ptr addrspace(1) [[X:%.*]], i32 [[Y:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[X_VAL:%.*]] = load i32, ptr addrspace(1) [[X]], align 4
+; CHECK-NEXT:    store i32 [[X_VAL]], ptr addrspace(1) @g3, align 4
+; CHECK-NEXT:    store i32 [[Y]], ptr addrspace(1) @g4, align 4
+; CHECK-NEXT:    ret void
+;
+entry:
+  %x.val = load i32, ptr addrspace(1) %x, align 4
+  store i32 %x.val, ptr addrspace(1) @g3, align 4
+  store i32 %y, ptr addrspace(1) @g4, align 4
+  ret void
+}
+
+define protected amdgpu_kernel void @kernel_not_infer(ptr addrspace(1) %q, ptr addrspace(1) %p1, ptr addrspace(1) %p2) {
+; CHECK-LABEL: define {{[^@]+}}@kernel_not_infer
+; CHECK-SAME: (ptr addrspace(1) [[Q:%.*]], ptr addrspace(1) [[P1:%.*]], ptr addrspace(1) [[P2:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[ID_X:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr i32, ptr addrspace(1) [[Q]], i32 [[ID_X]]
+; CHECK-NEXT:    [[D:%.*]] = load i32, ptr addrspace(1) [[GEP]], align 4
+; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[D]], 0
+; CHECK-NEXT:    [[P:%.*]] = select i1 [[CMP]], ptr addrspace(1) [[P1]], ptr addrspace(1) [[P2]]
+; CHECK-NEXT:    tail call fastcc void @callee_not_infer(ptr addrspace(1) [[Q]], i32 [[ID_X]])
+; CHECK-NEXT:    tail call fastcc void @callee_not_infer(ptr addrspace(1) [[P]], i32 [[ID_X]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  %id.x = call i32 @llvm.amdgcn.workitem.id.x()
+  %gep = getelementptr i32, ptr addrspace(1) %q, i32 %id.x
+  %d = load i32, ptr addrspace(1) %gep
+  %cmp = icmp sgt i32 %d, 0
+  %p = select i1 %cmp, ptr addrspace(1) %p1, ptr addrspace(1) %p2
+  tail call fastcc void @callee_not_infer(ptr addrspace(1) %q, i32 %id.x)
+  tail call fastcc void @callee_not_infer(ptr addrspace(1) %p, i32 %id.x)
+  ret void
+}
+;.
----------------
arsenm wrote:

i1 is always special. And #72461 will eventually make it so that i1 is in an SGPR, but is a divergent value. But i1 inreg will behave differently, and promote to a 32-bit SGPR 

https://github.com/llvm/llvm-project/pull/101609


More information about the llvm-commits mailing list