[llvm] [Attributor] Add support for atomic operations in `AAAddressSpace` (PR #106927)

Shilei Tian via llvm-commits llvm-commits at lists.llvm.org
Sun Sep 1 15:53:24 PDT 2024


https://github.com/shiltian created https://github.com/llvm/llvm-project/pull/106927

None

>From 540ef5d58065164ed80e9c8c84d8df275cc6fd57 Mon Sep 17 00:00:00 2001
From: Shilei Tian <i at tianshilei.me>
Date: Sun, 1 Sep 2024 18:52:41 -0400
Subject: [PATCH] [Attributor] Add support for atomic operations in
 `AAAddressSpace`

---
 llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp   |   9 +-
 .../Transforms/IPO/AttributorAttributes.cpp   |   2 +
 llvm/test/CodeGen/AMDGPU/aa-as-infer.ll       | 164 ++++++++++++++++++
 3 files changed, 173 insertions(+), 2 deletions(-)
 create mode 100644 llvm/test/CodeGen/AMDGPU/aa-as-infer.ll

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp
index 72049f0aa6b86e..97d438d9703c23 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp
@@ -1084,10 +1084,15 @@ static bool runImpl(Module &M, AnalysisGetter &AG, TargetMachine &TM,
       if (auto *LI = dyn_cast<LoadInst>(&I)) {
         A.getOrCreateAAFor<AAAddressSpace>(
             IRPosition::value(*LI->getPointerOperand()));
-      }
-      if (auto *SI = dyn_cast<StoreInst>(&I)) {
+      } else if (auto *SI = dyn_cast<StoreInst>(&I)) {
         A.getOrCreateAAFor<AAAddressSpace>(
             IRPosition::value(*SI->getPointerOperand()));
+      } else if (auto *AI = dyn_cast<AtomicRMWInst>(&I)) {
+        A.getOrCreateAAFor<AAAddressSpace>(
+            IRPosition::value(*AI->getPointerOperand()));
+      } else if (auto *CmpX = dyn_cast<AtomicCmpXchgInst>(&I)) {
+        A.getOrCreateAAFor<AAAddressSpace>(
+            IRPosition::value(*CmpX->getPointerOperand()));
       }
     }
   }
diff --git a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
index 69d29b6c042349..cf23bd57b5a670 100644
--- a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
+++ b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
@@ -12588,6 +12588,8 @@ struct AAAddressSpaceImpl : public AAAddressSpace {
         if (U.getOperandNo() == 1)
           MakeChange(Inst, const_cast<Use &>(U));
       }
+      if (isa<AtomicRMWInst>(Inst) || isa<AtomicCmpXchgInst>(Inst))
+        MakeChange(Inst, const_cast<Use &>(U));
       return true;
     };
 
diff --git a/llvm/test/CodeGen/AMDGPU/aa-as-infer.ll b/llvm/test/CodeGen/AMDGPU/aa-as-infer.ll
new file mode 100644
index 00000000000000..f244011863eec7
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/aa-as-infer.ll
@@ -0,0 +1,164 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -passes=amdgpu-attributor -S %s -o - | FileCheck %s
+
+ at g1 = protected addrspace(1) externally_initialized global i32 0, align 4
+ at g2 = protected addrspace(1) externally_initialized global i32 0, align 4
+
+define internal void @can_infer_cmpxchg(ptr %word) {
+; CHECK-LABEL: define internal void @can_infer_cmpxchg(
+; CHECK-SAME: ptr [[WORD:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT:    [[TMP1:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
+; CHECK-NEXT:    [[CMPXCHG_0:%.*]] = cmpxchg ptr addrspace(1) [[TMP1]], i32 0, i32 4 monotonic monotonic, align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
+; CHECK-NEXT:    [[CMPXCHG_1:%.*]] = cmpxchg ptr addrspace(1) [[TMP2]], i32 0, i32 5 acq_rel monotonic, align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
+; CHECK-NEXT:    [[CMPXCHG_2:%.*]] = cmpxchg ptr addrspace(1) [[TMP3]], i32 0, i32 6 acquire monotonic, align 4
+; CHECK-NEXT:    [[TMP4:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
+; CHECK-NEXT:    [[CMPXCHG_3:%.*]] = cmpxchg ptr addrspace(1) [[TMP4]], i32 0, i32 7 release monotonic, align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
+; CHECK-NEXT:    [[CMPXCHG_4:%.*]] = cmpxchg ptr addrspace(1) [[TMP5]], i32 0, i32 8 seq_cst monotonic, align 4
+; CHECK-NEXT:    [[TMP6:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
+; CHECK-NEXT:    [[CMPXCHG_5:%.*]] = cmpxchg weak ptr addrspace(1) [[TMP6]], i32 0, i32 9 seq_cst monotonic, align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
+; CHECK-NEXT:    [[CMPXCHG_6:%.*]] = cmpxchg volatile ptr addrspace(1) [[TMP7]], i32 0, i32 10 seq_cst monotonic, align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
+; CHECK-NEXT:    [[CMPXCHG_7:%.*]] = cmpxchg weak volatile ptr addrspace(1) [[TMP8]], i32 0, i32 11 syncscope("singlethread") seq_cst monotonic, align 4
+; CHECK-NEXT:    ret void
+;
+  %cmpxchg.0 = cmpxchg ptr %word, i32 0, i32 4 monotonic monotonic, align 4
+  %cmpxchg.1 = cmpxchg ptr %word, i32 0, i32 5 acq_rel monotonic, align 4
+  %cmpxchg.2 = cmpxchg ptr %word, i32 0, i32 6 acquire monotonic, align 4
+  %cmpxchg.3 = cmpxchg ptr %word, i32 0, i32 7 release monotonic, align 4
+  %cmpxchg.4 = cmpxchg ptr %word, i32 0, i32 8 seq_cst monotonic, align 4
+  %cmpxchg.5 = cmpxchg weak ptr %word, i32 0, i32 9 seq_cst monotonic, align 4
+  %cmpxchg.6 = cmpxchg volatile ptr %word, i32 0, i32 10 seq_cst monotonic, align 4
+  %cmpxchg.7 = cmpxchg weak volatile ptr %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic, align 4
+  ret void
+}
+
+define internal void @can_not_infer_cmpxchg(ptr %word) {
+; CHECK-LABEL: define internal void @can_not_infer_cmpxchg(
+; CHECK-SAME: ptr [[WORD:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[CMPXCHG_0:%.*]] = cmpxchg ptr [[WORD]], i32 0, i32 4 monotonic monotonic, align 4
+; CHECK-NEXT:    [[CMPXCHG_1:%.*]] = cmpxchg ptr [[WORD]], i32 0, i32 5 acq_rel monotonic, align 4
+; CHECK-NEXT:    [[CMPXCHG_2:%.*]] = cmpxchg ptr [[WORD]], i32 0, i32 6 acquire monotonic, align 4
+; CHECK-NEXT:    [[CMPXCHG_3:%.*]] = cmpxchg ptr [[WORD]], i32 0, i32 7 release monotonic, align 4
+; CHECK-NEXT:    [[CMPXCHG_4:%.*]] = cmpxchg ptr [[WORD]], i32 0, i32 8 seq_cst monotonic, align 4
+; CHECK-NEXT:    [[CMPXCHG_5:%.*]] = cmpxchg weak ptr [[WORD]], i32 0, i32 9 seq_cst monotonic, align 4
+; CHECK-NEXT:    [[CMPXCHG_6:%.*]] = cmpxchg volatile ptr [[WORD]], i32 0, i32 10 seq_cst monotonic, align 4
+; CHECK-NEXT:    [[CMPXCHG_7:%.*]] = cmpxchg weak volatile ptr [[WORD]], i32 0, i32 11 syncscope("singlethread") seq_cst monotonic, align 4
+; CHECK-NEXT:    ret void
+;
+  %cmpxchg.0 = cmpxchg ptr %word, i32 0, i32 4 monotonic monotonic, align 4
+  %cmpxchg.1 = cmpxchg ptr %word, i32 0, i32 5 acq_rel monotonic, align 4
+  %cmpxchg.2 = cmpxchg ptr %word, i32 0, i32 6 acquire monotonic, align 4
+  %cmpxchg.3 = cmpxchg ptr %word, i32 0, i32 7 release monotonic, align 4
+  %cmpxchg.4 = cmpxchg ptr %word, i32 0, i32 8 seq_cst monotonic, align 4
+  %cmpxchg.5 = cmpxchg weak ptr %word, i32 0, i32 9 seq_cst monotonic, align 4
+  %cmpxchg.6 = cmpxchg volatile ptr %word, i32 0, i32 10 seq_cst monotonic, align 4
+  %cmpxchg.7 = cmpxchg weak volatile ptr %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic, align 4
+  ret void
+}
+
+define internal void @can_infer_atomicrmw(ptr %word) {
+; CHECK-LABEL: define internal void @can_infer_atomicrmw(
+; CHECK-SAME: ptr [[WORD:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[TMP1:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
+; CHECK-NEXT:    [[ATOMICRMW_XCHG:%.*]] = atomicrmw xchg ptr addrspace(1) [[TMP1]], i32 12 monotonic, align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
+; CHECK-NEXT:    [[ATOMICRMW_ADD:%.*]] = atomicrmw add ptr addrspace(1) [[TMP2]], i32 13 monotonic, align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
+; CHECK-NEXT:    [[ATOMICRMW_SUB:%.*]] = atomicrmw sub ptr addrspace(1) [[TMP3]], i32 14 monotonic, align 4
+; CHECK-NEXT:    [[TMP4:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
+; CHECK-NEXT:    [[ATOMICRMW_AND:%.*]] = atomicrmw and ptr addrspace(1) [[TMP4]], i32 15 monotonic, align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
+; CHECK-NEXT:    [[ATOMICRMW_NAND:%.*]] = atomicrmw nand ptr addrspace(1) [[TMP5]], i32 16 monotonic, align 4
+; CHECK-NEXT:    [[TMP6:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
+; CHECK-NEXT:    [[ATOMICRMW_OR:%.*]] = atomicrmw or ptr addrspace(1) [[TMP6]], i32 17 monotonic, align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
+; CHECK-NEXT:    [[ATOMICRMW_XOR:%.*]] = atomicrmw xor ptr addrspace(1) [[TMP7]], i32 18 monotonic, align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
+; CHECK-NEXT:    [[ATOMICRMW_MAX:%.*]] = atomicrmw max ptr addrspace(1) [[TMP8]], i32 19 monotonic, align 4
+; CHECK-NEXT:    [[TMP9:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
+; CHECK-NEXT:    [[ATOMICRMW_MIN:%.*]] = atomicrmw volatile min ptr addrspace(1) [[TMP9]], i32 20 monotonic, align 4
+; CHECK-NEXT:    [[TMP10:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
+; CHECK-NEXT:    [[ATOMICRMW_UMAX:%.*]] = atomicrmw umax ptr addrspace(1) [[TMP10]], i32 21 syncscope("singlethread") monotonic, align 4
+; CHECK-NEXT:    [[TMP11:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
+; CHECK-NEXT:    [[ATOMICRMW_UMIN:%.*]] = atomicrmw volatile umin ptr addrspace(1) [[TMP11]], i32 22 syncscope("singlethread") monotonic, align 4
+; CHECK-NEXT:    ret void
+;
+  %atomicrmw.xchg = atomicrmw xchg ptr %word, i32 12 monotonic, align 4
+  %atomicrmw.add = atomicrmw add ptr %word, i32 13 monotonic, align 4
+  %atomicrmw.sub = atomicrmw sub ptr %word, i32 14 monotonic, align 4
+  %atomicrmw.and = atomicrmw and ptr %word, i32 15 monotonic, align 4
+  %atomicrmw.nand = atomicrmw nand ptr %word, i32 16 monotonic, align 4
+  %atomicrmw.or = atomicrmw or ptr %word, i32 17 monotonic, align 4
+  %atomicrmw.xor = atomicrmw xor ptr %word, i32 18 monotonic, align 4
+  %atomicrmw.max = atomicrmw max ptr %word, i32 19 monotonic, align 4
+  %atomicrmw.min = atomicrmw volatile min ptr %word, i32 20 monotonic, align 4
+  %atomicrmw.umax = atomicrmw umax ptr %word, i32 21 syncscope("singlethread") monotonic, align 4
+  %atomicrmw.umin = atomicrmw volatile umin ptr %word, i32 22 syncscope("singlethread") monotonic, align 4
+  ret void
+}
+
+define internal void @can_not_infer_atomicrmw(ptr %word) {
+; CHECK-LABEL: define internal void @can_not_infer_atomicrmw(
+; CHECK-SAME: ptr [[WORD:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[ATOMICRMW_XCHG:%.*]] = atomicrmw xchg ptr [[WORD]], i32 12 monotonic, align 4
+; CHECK-NEXT:    [[ATOMICRMW_ADD:%.*]] = atomicrmw add ptr [[WORD]], i32 13 monotonic, align 4
+; CHECK-NEXT:    [[ATOMICRMW_SUB:%.*]] = atomicrmw sub ptr [[WORD]], i32 14 monotonic, align 4
+; CHECK-NEXT:    [[ATOMICRMW_AND:%.*]] = atomicrmw and ptr [[WORD]], i32 15 monotonic, align 4
+; CHECK-NEXT:    [[ATOMICRMW_NAND:%.*]] = atomicrmw nand ptr [[WORD]], i32 16 monotonic, align 4
+; CHECK-NEXT:    [[ATOMICRMW_OR:%.*]] = atomicrmw or ptr [[WORD]], i32 17 monotonic, align 4
+; CHECK-NEXT:    [[ATOMICRMW_XOR:%.*]] = atomicrmw xor ptr [[WORD]], i32 18 monotonic, align 4
+; CHECK-NEXT:    [[ATOMICRMW_MAX:%.*]] = atomicrmw max ptr [[WORD]], i32 19 monotonic, align 4
+; CHECK-NEXT:    [[ATOMICRMW_MIN:%.*]] = atomicrmw volatile min ptr [[WORD]], i32 20 monotonic, align 4
+; CHECK-NEXT:    [[ATOMICRMW_UMAX:%.*]] = atomicrmw umax ptr [[WORD]], i32 21 syncscope("singlethread") monotonic, align 4
+; CHECK-NEXT:    [[ATOMICRMW_UMIN:%.*]] = atomicrmw volatile umin ptr [[WORD]], i32 22 syncscope("singlethread") monotonic, align 4
+; CHECK-NEXT:    ret void
+;
+  %atomicrmw.xchg = atomicrmw xchg ptr %word, i32 12 monotonic, align 4
+  %atomicrmw.add = atomicrmw add ptr %word, i32 13 monotonic, align 4
+  %atomicrmw.sub = atomicrmw sub ptr %word, i32 14 monotonic, align 4
+  %atomicrmw.and = atomicrmw and ptr %word, i32 15 monotonic, align 4
+  %atomicrmw.nand = atomicrmw nand ptr %word, i32 16 monotonic, align 4
+  %atomicrmw.or = atomicrmw or ptr %word, i32 17 monotonic, align 4
+  %atomicrmw.xor = atomicrmw xor ptr %word, i32 18 monotonic, align 4
+  %atomicrmw.max = atomicrmw max ptr %word, i32 19 monotonic, align 4
+  %atomicrmw.min = atomicrmw volatile min ptr %word, i32 20 monotonic, align 4
+  %atomicrmw.umax = atomicrmw umax ptr %word, i32 21 syncscope("singlethread") monotonic, align 4
+  %atomicrmw.umin = atomicrmw volatile umin ptr %word, i32 22 syncscope("singlethread") monotonic, align 4
+  ret void
+}
+
+define void @foo(ptr addrspace(3) %val) {
+; CHECK-LABEL: define void @foo(
+; CHECK-SAME: ptr addrspace(3) [[VAL:%.*]]) #[[ATTR1:[0-9]+]] {
+; CHECK-NEXT:    [[VAL_CAST:%.*]] = addrspacecast ptr addrspace(3) [[VAL]] to ptr
+; CHECK-NEXT:    call void @can_infer_cmpxchg(ptr addrspacecast (ptr addrspace(1) @g1 to ptr))
+; CHECK-NEXT:    call void @can_infer_cmpxchg(ptr addrspacecast (ptr addrspace(1) @g2 to ptr))
+; CHECK-NEXT:    call void @can_not_infer_cmpxchg(ptr addrspacecast (ptr addrspace(1) @g1 to ptr))
+; CHECK-NEXT:    call void @can_not_infer_cmpxchg(ptr addrspacecast (ptr addrspace(1) @g2 to ptr))
+; CHECK-NEXT:    call void @can_not_infer_cmpxchg(ptr [[VAL_CAST]])
+; CHECK-NEXT:    call void @can_infer_atomicrmw(ptr addrspacecast (ptr addrspace(1) @g1 to ptr))
+; CHECK-NEXT:    call void @can_infer_atomicrmw(ptr addrspacecast (ptr addrspace(1) @g2 to ptr))
+; CHECK-NEXT:    call void @can_not_infer_atomicrmw(ptr addrspacecast (ptr addrspace(1) @g1 to ptr))
+; CHECK-NEXT:    call void @can_not_infer_atomicrmw(ptr addrspacecast (ptr addrspace(1) @g2 to ptr))
+; CHECK-NEXT:    call void @can_not_infer_atomicrmw(ptr [[VAL_CAST]])
+; CHECK-NEXT:    ret void
+;
+  %g1.cast = addrspacecast ptr addrspace(1) @g1 to ptr
+  %g2.cast = addrspacecast ptr addrspace(1) @g2 to ptr
+  %val.cast = addrspacecast ptr addrspace(3) %val to ptr
+  call void @can_infer_cmpxchg(ptr %g1.cast)
+  call void @can_infer_cmpxchg(ptr %g2.cast)
+  call void @can_not_infer_cmpxchg(ptr %g1.cast)
+  call void @can_not_infer_cmpxchg(ptr %g2.cast)
+  call void @can_not_infer_cmpxchg(ptr %val.cast)
+  call void @can_infer_atomicrmw(ptr %g1.cast)
+  call void @can_infer_atomicrmw(ptr %g2.cast)
+  call void @can_not_infer_atomicrmw(ptr %g1.cast)
+  call void @can_not_infer_atomicrmw(ptr %g2.cast)
+  call void @can_not_infer_atomicrmw(ptr %val.cast)
+  ret void
+}



More information about the llvm-commits mailing list