[llvm] [AMDGPU][Attributor] Infer inreg attribute in `AMDGPUAttributor` (PR #146720)
Shilei Tian via llvm-commits
llvm-commits at lists.llvm.org
Wed Jul 2 08:17:18 PDT 2025
https://github.com/shiltian updated https://github.com/llvm/llvm-project/pull/146720
>From a3a10e9253cdfa9c2f195d67b5a6eb60bfc9c167 Mon Sep 17 00:00:00 2001
From: Shilei Tian <i at tianshilei.me>
Date: Wed, 2 Jul 2025 11:14:59 -0400
Subject: [PATCH] [AMDGPU][Attributor] Infer inreg attribute in
`AMDGPUAttributor`
This patch introduces `AAAMDGPUUniformArgument` that can infer `inreg` function
argument attribute. The idea is, for a function argument, if the corresponding
call site arguments are always uniform, we can mark it as `inreg` thus pass it
via SGPR.
In addition, this AA is also able to propagate the inreg attribute if feasible.
---
llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp | 116 +++++++++++++++++-
.../test/CodeGen/AMDGPU/aa-inreg-inference.ll | 74 +++++++++++
2 files changed, 189 insertions(+), 1 deletion(-)
create mode 100644 llvm/test/CodeGen/AMDGPU/aa-inreg-inference.ll
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp
index fef22c81c9391..fa54c80490602 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp
@@ -14,6 +14,7 @@
#include "GCNSubtarget.h"
#include "Utils/AMDGPUBaseInfo.h"
#include "llvm/Analysis/CycleAnalysis.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/CodeGen/TargetPassConfig.h"
#include "llvm/IR/IntrinsicsAMDGPU.h"
#include "llvm/IR/IntrinsicsR600.h"
@@ -1295,6 +1296,114 @@ struct AAAMDGPUNoAGPR
const char AAAMDGPUNoAGPR::ID = 0;
+struct AAAMDGPUUniform : public StateWrapper<BooleanState, AbstractAttribute> {
+ using Base = StateWrapper<BooleanState, AbstractAttribute>;
+ AAAMDGPUUniform(const IRPosition &IRP, Attributor &A) : Base(IRP) {}
+
+ /// Create an abstract attribute view for the position \p IRP.
+ static AAAMDGPUUniform &createForPosition(const IRPosition &IRP,
+ Attributor &A);
+
+ /// See AbstractAttribute::getName()
+ StringRef getName() const override { return "AAAMDGPUUniform"; }
+
+ const std::string getAsStr(Attributor *A) const override {
+ return getAssumed() ? "uniform" : "divergent";
+ }
+
+ void trackStatistics() const override {}
+
+ /// See AbstractAttribute::getIdAddr()
+ const char *getIdAddr() const override { return &ID; }
+
+ /// This function should return true if the type of the \p AA is
+ /// AAAMDGPUUniform
+ static bool classof(const AbstractAttribute *AA) {
+ return (AA->getIdAddr() == &ID);
+ }
+
+ /// Unique ID (due to the unique address)
+ static const char ID;
+};
+
+const char AAAMDGPUUniform::ID = 0;
+
+/// This AA is to infer the inreg attribute for a function argument.
+struct AAAMDGPUUniformArgument : public AAAMDGPUUniform {
+ AAAMDGPUUniformArgument(const IRPosition &IRP, Attributor &A)
+ : AAAMDGPUUniform(IRP, A) {}
+
+ void initialize(Attributor &A) override {
+ Argument *Arg = getAssociatedArgument();
+ CallingConv::ID CC = Arg->getParent()->getCallingConv();
+ if (Arg->hasAttribute(Attribute::InReg)) {
+ indicateOptimisticFixpoint();
+ return;
+ }
+
+ if (AMDGPU::isEntryFunctionCC(CC)) {
+ // We only use isArgPassedInSGPR on kernel entry function argument, so
+ // even if we will use SPGR for non-uniform i1 argument passing, it will
+ // not affect this.
+ if (AMDGPU::isArgPassedInSGPR(Arg))
+ indicateOptimisticFixpoint();
+ else
+ indicatePessimisticFixpoint();
+ }
+ }
+
+ ChangeStatus updateImpl(Attributor &A) override {
+ unsigned ArgNo = getAssociatedArgument()->getArgNo();
+ TargetMachine &TM =
+ static_cast<AMDGPUInformationCache &>(A.getInfoCache()).TM;
+
+ auto isUniform = [&](AbstractCallSite ACS) -> bool {
+ CallBase *CB = ACS.getInstruction();
+ Value *V = CB->getArgOperand(ArgNo);
+ if (auto *Arg = dyn_cast<Argument>(V)) {
+ auto *AA = A.getOrCreateAAFor<AAAMDGPUUniform>(
+ IRPosition::argument(*Arg), this, DepClassTy::REQUIRED);
+ return AA && AA->isValidState();
+ }
+ TargetTransformInfo TTI = TM.getTargetTransformInfo(*CB->getFunction());
+ return TTI.isAlwaysUniform(V);
+ };
+
+ bool UsedAssumedInformation = true;
+ if (!A.checkForAllCallSites(isUniform, *this, /*RequireAllCallSites=*/true,
+ UsedAssumedInformation))
+ return indicatePessimisticFixpoint();
+
+ if (!UsedAssumedInformation)
+ return indicateOptimisticFixpoint();
+
+ return ChangeStatus::UNCHANGED;
+ }
+
+ ChangeStatus manifest(Attributor &A) override {
+ Argument *Arg = getAssociatedArgument();
+ // If the argument already has inreg attribute, we will not do anything
+ // about it.
+ if (Arg->hasAttribute(Attribute::InReg))
+ return ChangeStatus::UNCHANGED;
+ if (AMDGPU::isEntryFunctionCC(Arg->getParent()->getCallingConv()))
+ return ChangeStatus::UNCHANGED;
+ LLVMContext &Ctx = Arg->getContext();
+ return A.manifestAttrs(getIRPosition(),
+ {Attribute::get(Ctx, Attribute::InReg)});
+ }
+};
+
+AAAMDGPUUniform &AAAMDGPUUniform::createForPosition(const IRPosition &IRP,
+ Attributor &A) {
+ switch (IRP.getPositionKind()) {
+ case IRPosition::IRP_ARGUMENT:
+ return *new (A.Allocator) AAAMDGPUUniformArgument(IRP, A);
+ default:
+ llvm_unreachable("not a valid position for AAAMDGPUUniform");
+ }
+}
+
/// Performs the final check and updates the 'amdgpu-waves-per-eu' attribute
/// based on the finalized 'amdgpu-flat-work-group-size' attribute.
/// Both attributes start with narrow ranges that expand during iteration.
@@ -1381,7 +1490,7 @@ static bool runImpl(Module &M, AnalysisGetter &AG, TargetMachine &TM,
&AAAMDMaxNumWorkgroups::ID, &AAAMDWavesPerEU::ID, &AAAMDGPUNoAGPR::ID,
&AACallEdges::ID, &AAPointerInfo::ID, &AAPotentialConstantValues::ID,
&AAUnderlyingObjects::ID, &AAAddressSpace::ID, &AAIndirectCallInfo::ID,
- &AAInstanceInfo::ID});
+ &AAInstanceInfo::ID, &AAAMDGPUUniform::ID});
AttributorConfig AC(CGUpdater);
AC.IsClosedWorldModule = Options.IsClosedWorld;
@@ -1433,6 +1542,11 @@ static bool runImpl(Module &M, AnalysisGetter &AG, TargetMachine &TM,
A.getOrCreateAAFor<AAAddressSpace>(
IRPosition::value(*CmpX->getPointerOperand()));
}
+
+ if (!AMDGPU::isEntryFunctionCC(F->getCallingConv())) {
+ for (auto &Arg : F->args())
+ A.getOrCreateAAFor<AAAMDGPUUniform>(IRPosition::argument(Arg));
+ }
}
}
diff --git a/llvm/test/CodeGen/AMDGPU/aa-inreg-inference.ll b/llvm/test/CodeGen/AMDGPU/aa-inreg-inference.ll
new file mode 100644
index 0000000000000..91dc5618b2989
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/aa-inreg-inference.ll
@@ -0,0 +1,74 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -passes=amdgpu-attributor %s -o - | FileCheck %s
+
+ at g1 = protected addrspace(1) externally_initialized global i32 0, align 4
+ at g2 = protected addrspace(1) externally_initialized global i32 0, align 4
+ at g3 = protected addrspace(1) externally_initialized global i32 0, align 4
+ at g4 = protected addrspace(1) externally_initialized global i32 0, align 4
+
+define internal void @callee_with_always_uniform_argument(ptr addrspace(1) %x, i32 %y) {
+; CHECK-LABEL: define internal void @callee_with_always_uniform_argument(
+; CHECK-SAME: ptr addrspace(1) inreg [[X:%.*]], i32 inreg [[Y:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[X_VAL:%.*]] = load i32, ptr addrspace(1) [[X]], align 4
+; CHECK-NEXT: store i32 [[X_VAL]], ptr addrspace(1) @g3, align 4
+; CHECK-NEXT: store i32 [[Y]], ptr addrspace(1) @g4, align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %x.val = load i32, ptr addrspace(1) %x, align 4
+ store i32 %x.val, ptr addrspace(1) @g3, align 4
+ store i32 %y, ptr addrspace(1) @g4, align 4
+ ret void
+}
+
+define amdgpu_kernel void @kernel_with_readfirstlane(ptr addrspace(1) %p, i32 %x) {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_with_readfirstlane(
+; CHECK-SAME: ptr addrspace(1) [[P:%.*]], i32 [[X:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[P0:%.*]] = call ptr addrspace(1) @llvm.amdgcn.readfirstlane.p1(ptr addrspace(1) [[P]])
+; CHECK-NEXT: call void @callee_with_always_uniform_argument(ptr addrspace(1) [[P0]], i32 [[X]])
+; CHECK-NEXT: ret void
+;
+entry:
+ %p0 = call ptr addrspace(1) @llvm.amdgcn.readfirstlane.p1(ptr addrspace(1) %p)
+ call void @callee_with_always_uniform_argument(ptr addrspace(1) %p0, i32 %x)
+ ret void
+}
+
+define internal void @callee_without_always_uniform_argument(ptr addrspace(1) %x, i32 %y) {
+; CHECK-LABEL: define internal void @callee_without_always_uniform_argument(
+; CHECK-SAME: ptr addrspace(1) [[X:%.*]], i32 [[Y:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[X_VAL:%.*]] = load i32, ptr addrspace(1) [[X]], align 4
+; CHECK-NEXT: store i32 [[X_VAL]], ptr addrspace(1) @g3, align 4
+; CHECK-NEXT: store i32 [[Y]], ptr addrspace(1) @g4, align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %x.val = load i32, ptr addrspace(1) %x, align 4
+ store i32 %x.val, ptr addrspace(1) @g3, align 4
+ store i32 %y, ptr addrspace(1) @g4, align 4
+ ret void
+}
+
+define amdgpu_kernel void @kernel_with_divergent_callsite_argument(ptr addrspace(1) %p, i32 %x) {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_with_divergent_callsite_argument(
+; CHECK-SAME: ptr addrspace(1) [[P:%.*]], i32 [[X:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[ID_X:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr addrspace(1) [[P]], i32 [[ID_X]]
+; CHECK-NEXT: [[D:%.*]] = load i32, ptr addrspace(1) [[GEP]], align 4
+; CHECK-NEXT: call void @callee_without_always_uniform_argument(ptr addrspace(1) [[GEP]], i32 [[D]])
+; CHECK-NEXT: ret void
+;
+entry:
+ %id.x = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr i32, ptr addrspace(1) %p, i32 %id.x
+ %d = load i32, ptr addrspace(1) %gep
+ call void @callee_without_always_uniform_argument(ptr addrspace(1) %gep, i32 %d)
+ ret void
+}
+
+declare ptr addrspace(1) @llvm.amdgcn.readfirstlane.p1(ptr addrspace(1))
+declare noundef i32 @llvm.amdgcn.workitem.id.x()
More information about the llvm-commits
mailing list