[llvm] e6012c8 - AMDGPU: Handle private atomics

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Wed Apr 6 19:47:49 PDT 2022


Author: Matt Arsenault
Date: 2022-04-06T22:47:19-04:00
New Revision: e6012c8e0fbdf422325e66c96537de03d258deb7

URL: https://github.com/llvm/llvm-project/commit/e6012c8e0fbdf422325e66c96537de03d258deb7
DIFF: https://github.com/llvm/llvm-project/commit/e6012c8e0fbdf422325e66c96537de03d258deb7.diff

LOG: AMDGPU: Handle private atomics

Use new NotAtomic expansion to turn these into the equivalent
non-atomic operations. Independent lanes cannot access the private
memory of other lanes, so there's no possibility for synchronization.

These don't really appear directly in user code, but
InferAddressSpaces can make these appear after optimizations.

Fixes issues 54693 and 54274.

Added: 
    

Modified: 
    llvm/lib/Target/AMDGPU/SIISelLowering.cpp
    llvm/lib/Target/AMDGPU/SIISelLowering.h
    llvm/test/CodeGen/AMDGPU/private-memory-atomics.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 9f5bbff428896..69c1643c63f5b 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -12439,6 +12439,9 @@ static bool fpModeMatchesGlobalFPAtomicMode(const AtomicRMWInst *RMW) {
 
 TargetLowering::AtomicExpansionKind
 SITargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
+  unsigned AS = RMW->getPointerAddressSpace();
+  if (AS == AMDGPUAS::PRIVATE_ADDRESS)
+    return AtomicExpansionKind::NotAtomic;
 
   auto ReportUnsafeHWInst = [&](TargetLowering::AtomicExpansionKind Kind) {
     OptimizationRemarkEmitter ORE(RMW->getFunction());
@@ -12470,8 +12473,6 @@ SITargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
     if (!Ty->isFloatTy() && (!Subtarget->hasGFX90AInsts() || !Ty->isDoubleTy()))
       return AtomicExpansionKind::CmpXChg;
 
-    unsigned AS = RMW->getPointerAddressSpace();
-
     if ((AS == AMDGPUAS::GLOBAL_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS) &&
          Subtarget->hasAtomicFaddInsts()) {
       if (Subtarget->hasGFX940Insts())
@@ -12531,6 +12532,27 @@ SITargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
   return AMDGPUTargetLowering::shouldExpandAtomicRMWInIR(RMW);
 }
 
+TargetLowering::AtomicExpansionKind
+SITargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
+  return LI->getPointerAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS
+             ? AtomicExpansionKind::NotAtomic
+             : AtomicExpansionKind::None;
+}
+
+TargetLowering::AtomicExpansionKind
+SITargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
+  return SI->getPointerAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS
+             ? AtomicExpansionKind::NotAtomic
+             : AtomicExpansionKind::None;
+}
+
+TargetLowering::AtomicExpansionKind
+SITargetLowering::shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *CmpX) const {
+  return CmpX->getPointerAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS
+             ? AtomicExpansionKind::NotAtomic
+             : AtomicExpansionKind::None;
+}
+
 const TargetRegisterClass *
 SITargetLowering::getRegClassFor(MVT VT, bool isDivergent) const {
   const TargetRegisterClass *RC = TargetLoweringBase::getRegClassFor(VT, false);

diff  --git a/llvm/lib/Target/AMDGPU/SIISelLowering.h b/llvm/lib/Target/AMDGPU/SIISelLowering.h
index 7468d4db0829e..2c9dc8722a923 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.h
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.h
@@ -469,6 +469,10 @@ class SITargetLowering final : public AMDGPUTargetLowering {
                                     bool SNaN = false,
                                     unsigned Depth = 0) const override;
   AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *) const override;
+  AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
+  AtomicExpansionKind shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
+  AtomicExpansionKind
+  shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override;
 
   virtual const TargetRegisterClass *
   getRegClassFor(MVT VT, bool isDivergent) const override;

diff  --git a/llvm/test/CodeGen/AMDGPU/private-memory-atomics.ll b/llvm/test/CodeGen/AMDGPU/private-memory-atomics.ll
index bf3b50ea04274..ff4ccfc60ad19 100644
--- a/llvm/test/CodeGen/AMDGPU/private-memory-atomics.ll
+++ b/llvm/test/CodeGen/AMDGPU/private-memory-atomics.ll
@@ -1,32 +1,535 @@
-; RUN: llc -verify-machineinstrs -march=amdgcn -mcpu=tahiti < %s
-; RUN: llc -verify-machineinstrs -march=amdgcn -mcpu=tonga < %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: opt -S -mtriple=amdgcn-- -mcpu=tahiti -atomic-expand < %s | FileCheck -check-prefix=IR %s
+; RUN: llc -mtriple=amdgcn-- -mcpu=tahiti < %s | FileCheck -check-prefix=GCN %s
 
-; This works because promote allocas pass replaces these with LDS atomics.
+define i32 @load_atomic_private_seq_cst_i32(i32 addrspace(5)* %ptr) {
+; IR-LABEL: @load_atomic_private_seq_cst_i32(
+; IR-NEXT:    [[LOAD:%.*]] = load i32, i32 addrspace(5)* [[PTR:%.*]], align 4
+; IR-NEXT:    ret i32 [[LOAD]]
+;
+; GCN-LABEL: load_atomic_private_seq_cst_i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    buffer_load_dword v0, v0, s[0:3], 0 offen
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %load = load atomic i32, i32 addrspace(5)* %ptr seq_cst, align 4
+  ret i32 %load
+}
+
+define i64 @load_atomic_private_seq_cst_i64(i64 addrspace(5)* %ptr) {
+; IR-LABEL: @load_atomic_private_seq_cst_i64(
+; IR-NEXT:    [[LOAD:%.*]] = load i64, i64 addrspace(5)* [[PTR:%.*]], align 8
+; IR-NEXT:    ret i64 [[LOAD]]
+;
+; GCN-LABEL: load_atomic_private_seq_cst_i64:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_add_i32_e32 v1, vcc, 4, v0
+; GCN-NEXT:    buffer_load_dword v0, v0, s[0:3], 0 offen
+; GCN-NEXT:    buffer_load_dword v1, v1, s[0:3], 0 offen
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %load = load atomic i64, i64 addrspace(5)* %ptr seq_cst, align 8
+  ret i64 %load
+}
+
+define void @atomic_store_seq_cst_i32(i32 addrspace(5)* %ptr, i32 %val) {
+; IR-LABEL: @atomic_store_seq_cst_i32(
+; IR-NEXT:    store i32 [[VAL:%.*]], i32 addrspace(5)* [[PTR:%.*]], align 4
+; IR-NEXT:    ret void
+;
+; GCN-LABEL: atomic_store_seq_cst_i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    buffer_store_dword v1, v0, s[0:3], 0 offen
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  store atomic i32 %val, i32 addrspace(5)* %ptr seq_cst, align 4
+  ret void
+}
+
+define void @atomic_store_seq_cst_i64(i64 addrspace(5)* %ptr, i64 %val) {
+; IR-LABEL: @atomic_store_seq_cst_i64(
+; IR-NEXT:    store i64 [[VAL:%.*]], i64 addrspace(5)* [[PTR:%.*]], align 8
+; IR-NEXT:    ret void
+;
+; GCN-LABEL: atomic_store_seq_cst_i64:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_add_i32_e32 v3, vcc, 4, v0
+; GCN-NEXT:    buffer_store_dword v2, v3, s[0:3], 0 offen
+; GCN-NEXT:    buffer_store_dword v1, v0, s[0:3], 0 offen
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  store atomic i64 %val, i64 addrspace(5)* %ptr seq_cst, align 8
+  ret void
+}
+
+define i32 @load_atomic_private_seq_cst_syncscope_i32(i32 addrspace(5)* %ptr) {
+; IR-LABEL: @load_atomic_private_seq_cst_syncscope_i32(
+; IR-NEXT:    [[LOAD:%.*]] = load i32, i32 addrspace(5)* [[PTR:%.*]], align 4
+; IR-NEXT:    ret i32 [[LOAD]]
+;
+; GCN-LABEL: load_atomic_private_seq_cst_syncscope_i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    buffer_load_dword v0, v0, s[0:3], 0 offen
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %load = load atomic i32, i32 addrspace(5)* %ptr syncscope("agent") seq_cst, align 4
+  ret i32 %load
+}
+
+define void @atomic_store_seq_cst_syncscope_i32(i32 addrspace(5)* %ptr, i32 %val) {
+; IR-LABEL: @atomic_store_seq_cst_syncscope_i32(
+; IR-NEXT:    store i32 [[VAL:%.*]], i32 addrspace(5)* [[PTR:%.*]], align 4
+; IR-NEXT:    ret void
+;
+; GCN-LABEL: atomic_store_seq_cst_syncscope_i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    buffer_store_dword v1, v0, s[0:3], 0 offen
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  store atomic i32 %val, i32 addrspace(5)* %ptr syncscope("agent") seq_cst, align 4
+  ret void
+}
+
+define i32 @cmpxchg_private_i32(i32 addrspace(5)* %ptr) {
+; IR-LABEL: @cmpxchg_private_i32(
+; IR-NEXT:    [[TMP1:%.*]] = load i32, i32 addrspace(5)* [[PTR:%.*]], align 4
+; IR-NEXT:    [[TMP2:%.*]] = icmp eq i32 [[TMP1]], 0
+; IR-NEXT:    [[TMP3:%.*]] = select i1 [[TMP2]], i32 1, i32 [[TMP1]]
+; IR-NEXT:    store i32 [[TMP3]], i32 addrspace(5)* [[PTR]], align 4
+; IR-NEXT:    [[TMP4:%.*]] = insertvalue { i32, i1 } undef, i32 [[TMP1]], 0
+; IR-NEXT:    [[TMP5:%.*]] = insertvalue { i32, i1 } [[TMP4]], i1 [[TMP2]], 1
+; IR-NEXT:    [[RESULT_0:%.*]] = extractvalue { i32, i1 } [[TMP5]], 0
+; IR-NEXT:    [[RESULT_1:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
+; IR-NEXT:    store i1 [[RESULT_1]], i1 addrspace(1)* undef, align 1
+; IR-NEXT:    ret i32 [[RESULT_0]]
+;
+; GCN-LABEL: cmpxchg_private_i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    buffer_load_dword v1, v0, s[0:3], 0 offen
+; GCN-NEXT:    s_mov_b32 s7, 0xf000
+; GCN-NEXT:    s_mov_b32 s6, -1
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v1
+; GCN-NEXT:    v_cndmask_b32_e64 v2, v1, 1, vcc
+; GCN-NEXT:    buffer_store_dword v2, v0, s[0:3], 0 offen
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
+; GCN-NEXT:    buffer_store_byte v0, off, s[4:7], 0
+; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    v_mov_b32_e32 v0, v1
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = cmpxchg i32 addrspace(5)* %ptr, i32 0, i32 1 acq_rel monotonic
+  %result.0 = extractvalue { i32, i1 } %result, 0
+  %result.1 = extractvalue { i32, i1 } %result, 1
+  store i1 %result.1, i1 addrspace(1)* undef
+  ret i32 %result.0
+}
+
+define i64 @cmpxchg_private_i64(i64 addrspace(5)* %ptr) {
+; IR-LABEL: @cmpxchg_private_i64(
+; IR-NEXT:    [[TMP1:%.*]] = load i64, i64 addrspace(5)* [[PTR:%.*]], align 4
+; IR-NEXT:    [[TMP2:%.*]] = icmp eq i64 [[TMP1]], 0
+; IR-NEXT:    [[TMP3:%.*]] = select i1 [[TMP2]], i64 1, i64 [[TMP1]]
+; IR-NEXT:    store i64 [[TMP3]], i64 addrspace(5)* [[PTR]], align 4
+; IR-NEXT:    [[TMP4:%.*]] = insertvalue { i64, i1 } undef, i64 [[TMP1]], 0
+; IR-NEXT:    [[TMP5:%.*]] = insertvalue { i64, i1 } [[TMP4]], i1 [[TMP2]], 1
+; IR-NEXT:    [[RESULT_0:%.*]] = extractvalue { i64, i1 } [[TMP5]], 0
+; IR-NEXT:    [[RESULT_1:%.*]] = extractvalue { i64, i1 } [[TMP5]], 1
+; IR-NEXT:    store i1 [[RESULT_1]], i1 addrspace(1)* undef, align 1
+; IR-NEXT:    ret i64 [[RESULT_0]]
+;
+; GCN-LABEL: cmpxchg_private_i64:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_mov_b32_e32 v2, v0
+; GCN-NEXT:    v_add_i32_e32 v3, vcc, 4, v2
+; GCN-NEXT:    buffer_load_dword v1, v3, s[0:3], 0 offen
+; GCN-NEXT:    buffer_load_dword v0, v0, s[0:3], 0 offen
+; GCN-NEXT:    s_mov_b32 s7, 0xf000
+; GCN-NEXT:    s_mov_b32 s6, -1
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GCN-NEXT:    v_cndmask_b32_e64 v4, v1, 0, vcc
+; GCN-NEXT:    buffer_store_dword v4, v3, s[0:3], 0 offen
+; GCN-NEXT:    v_cndmask_b32_e64 v3, v0, 1, vcc
+; GCN-NEXT:    s_waitcnt expcnt(0)
+; GCN-NEXT:    v_cndmask_b32_e64 v4, 0, 1, vcc
+; GCN-NEXT:    buffer_store_dword v3, v2, s[0:3], 0 offen
+; GCN-NEXT:    buffer_store_byte v4, off, s[4:7], 0
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = cmpxchg i64 addrspace(5)* %ptr, i64 0, i64 1 acq_rel monotonic
+  %result.0 = extractvalue { i64, i1 } %result, 0
+  %result.1 = extractvalue { i64, i1 } %result, 1
+  store i1 %result.1, i1 addrspace(1)* undef
+  ret i64 %result.0
+}
+
+
+define i32 @atomicrmw_xchg_private_i32(i32 addrspace(5)* %ptr) {
+; IR-LABEL: @atomicrmw_xchg_private_i32(
+; IR-NEXT:    [[TMP1:%.*]] = load i32, i32 addrspace(5)* [[PTR:%.*]], align 4
+; IR-NEXT:    store i32 4, i32 addrspace(5)* [[PTR]], align 4
+; IR-NEXT:    ret i32 [[TMP1]]
+;
+; GCN-LABEL: atomicrmw_xchg_private_i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    buffer_load_dword v1, v0, s[0:3], 0 offen
+; GCN-NEXT:    v_mov_b32_e32 v2, 4
+; GCN-NEXT:    buffer_store_dword v2, v0, s[0:3], 0 offen
+; GCN-NEXT:    s_waitcnt vmcnt(1)
+; GCN-NEXT:    v_mov_b32_e32 v0, v1
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw xchg i32 addrspace(5)* %ptr, i32 4 seq_cst
+  ret i32 %result
+}
+
+define i32 @atomicrmw_add_private_i32(i32 addrspace(5)* %ptr) {
+; IR-LABEL: @atomicrmw_add_private_i32(
+; IR-NEXT:    [[TMP1:%.*]] = load i32, i32 addrspace(5)* [[PTR:%.*]], align 4
+; IR-NEXT:    [[TMP2:%.*]] = add i32 [[TMP1]], 4
+; IR-NEXT:    store i32 [[TMP2]], i32 addrspace(5)* [[PTR]], align 4
+; IR-NEXT:    ret i32 [[TMP1]]
+;
+; GCN-LABEL: atomicrmw_add_private_i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    buffer_load_dword v1, v0, s[0:3], 0 offen
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, 4, v1
+; GCN-NEXT:    buffer_store_dword v2, v0, s[0:3], 0 offen
+; GCN-NEXT:    v_mov_b32_e32 v0, v1
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw add i32 addrspace(5)* %ptr, i32 4 seq_cst
+  ret i32 %result
+}
+
+define i32 @atomicrmw_sub_private_i32(i32 addrspace(5)* %ptr) {
+; IR-LABEL: @atomicrmw_sub_private_i32(
+; IR-NEXT:    [[TMP1:%.*]] = load i32, i32 addrspace(5)* [[PTR:%.*]], align 4
+; IR-NEXT:    [[TMP2:%.*]] = sub i32 [[TMP1]], 4
+; IR-NEXT:    store i32 [[TMP2]], i32 addrspace(5)* [[PTR]], align 4
+; IR-NEXT:    ret i32 [[TMP1]]
+;
+; GCN-LABEL: atomicrmw_sub_private_i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    buffer_load_dword v1, v0, s[0:3], 0 offen
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, -4, v1
+; GCN-NEXT:    buffer_store_dword v2, v0, s[0:3], 0 offen
+; GCN-NEXT:    v_mov_b32_e32 v0, v1
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw sub i32 addrspace(5)* %ptr, i32 4 seq_cst
+  ret i32 %result
+}
+
+define i32 @atomicrmw_and_private_i32(i32 addrspace(5)* %ptr) {
+; IR-LABEL: @atomicrmw_and_private_i32(
+; IR-NEXT:    [[TMP1:%.*]] = load i32, i32 addrspace(5)* [[PTR:%.*]], align 4
+; IR-NEXT:    [[TMP2:%.*]] = and i32 [[TMP1]], 4
+; IR-NEXT:    store i32 [[TMP2]], i32 addrspace(5)* [[PTR]], align 4
+; IR-NEXT:    ret i32 [[TMP1]]
+;
+; GCN-LABEL: atomicrmw_and_private_i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    buffer_load_dword v1, v0, s[0:3], 0 offen
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_and_b32_e32 v2, 4, v1
+; GCN-NEXT:    buffer_store_dword v2, v0, s[0:3], 0 offen
+; GCN-NEXT:    v_mov_b32_e32 v0, v1
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw and i32 addrspace(5)* %ptr, i32 4 seq_cst
+  ret i32 %result
+}
+
+define i32 @atomicrmw_nand_private_i32(i32 addrspace(5)* %ptr) {
+; IR-LABEL: @atomicrmw_nand_private_i32(
+; IR-NEXT:    [[TMP1:%.*]] = load i32, i32 addrspace(5)* [[PTR:%.*]], align 4
+; IR-NEXT:    [[TMP2:%.*]] = and i32 [[TMP1]], 4
+; IR-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP2]], -1
+; IR-NEXT:    store i32 [[TMP3]], i32 addrspace(5)* [[PTR]], align 4
+; IR-NEXT:    ret i32 [[TMP1]]
+;
+; GCN-LABEL: atomicrmw_nand_private_i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    buffer_load_dword v1, v0, s[0:3], 0 offen
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_not_b32_e32 v2, v1
+; GCN-NEXT:    v_or_b32_e32 v2, -5, v2
+; GCN-NEXT:    buffer_store_dword v2, v0, s[0:3], 0 offen
+; GCN-NEXT:    v_mov_b32_e32 v0, v1
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw nand i32 addrspace(5)* %ptr, i32 4 seq_cst
+  ret i32 %result
+}
+
+define i32 @atomicrmw_or_private_i32(i32 addrspace(5)* %ptr) {
+; IR-LABEL: @atomicrmw_or_private_i32(
+; IR-NEXT:    [[TMP1:%.*]] = load i32, i32 addrspace(5)* [[PTR:%.*]], align 4
+; IR-NEXT:    [[TMP2:%.*]] = or i32 [[TMP1]], 4
+; IR-NEXT:    store i32 [[TMP2]], i32 addrspace(5)* [[PTR]], align 4
+; IR-NEXT:    ret i32 [[TMP1]]
+;
+; GCN-LABEL: atomicrmw_or_private_i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    buffer_load_dword v1, v0, s[0:3], 0 offen
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_or_b32_e32 v2, 4, v1
+; GCN-NEXT:    buffer_store_dword v2, v0, s[0:3], 0 offen
+; GCN-NEXT:    v_mov_b32_e32 v0, v1
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw or i32 addrspace(5)* %ptr, i32 4 seq_cst
+  ret i32 %result
+}
+
+define i32 @atomicrmw_xor_private_i32(i32 addrspace(5)* %ptr) {
+; IR-LABEL: @atomicrmw_xor_private_i32(
+; IR-NEXT:    [[TMP1:%.*]] = load i32, i32 addrspace(5)* [[PTR:%.*]], align 4
+; IR-NEXT:    [[TMP2:%.*]] = xor i32 [[TMP1]], 4
+; IR-NEXT:    store i32 [[TMP2]], i32 addrspace(5)* [[PTR]], align 4
+; IR-NEXT:    ret i32 [[TMP1]]
+;
+; GCN-LABEL: atomicrmw_xor_private_i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    buffer_load_dword v1, v0, s[0:3], 0 offen
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_xor_b32_e32 v2, 4, v1
+; GCN-NEXT:    buffer_store_dword v2, v0, s[0:3], 0 offen
+; GCN-NEXT:    v_mov_b32_e32 v0, v1
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw xor i32 addrspace(5)* %ptr, i32 4 seq_cst
+  ret i32 %result
+}
+
+define i32 @atomicrmw_max_private_i32(i32 addrspace(5)* %ptr) {
+; IR-LABEL: @atomicrmw_max_private_i32(
+; IR-NEXT:    [[TMP1:%.*]] = load i32, i32 addrspace(5)* [[PTR:%.*]], align 4
+; IR-NEXT:    [[TMP2:%.*]] = icmp slt i32 [[TMP1]], 4
+; IR-NEXT:    [[TMP3:%.*]] = select i1 [[TMP2]], i32 4, i32 [[TMP1]]
+; IR-NEXT:    store i32 [[TMP3]], i32 addrspace(5)* [[PTR]], align 4
+; IR-NEXT:    ret i32 [[TMP1]]
+;
+; GCN-LABEL: atomicrmw_max_private_i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    buffer_load_dword v1, v0, s[0:3], 0 offen
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_max_i32_e32 v2, 4, v1
+; GCN-NEXT:    buffer_store_dword v2, v0, s[0:3], 0 offen
+; GCN-NEXT:    v_mov_b32_e32 v0, v1
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw max i32 addrspace(5)* %ptr, i32 4 seq_cst
+  ret i32 %result
+}
+
+define i32 @atomicrmw_min_private_i32(i32 addrspace(5)* %ptr) {
+; IR-LABEL: @atomicrmw_min_private_i32(
+; IR-NEXT:    [[TMP1:%.*]] = load i32, i32 addrspace(5)* [[PTR:%.*]], align 4
+; IR-NEXT:    [[TMP2:%.*]] = icmp slt i32 [[TMP1]], 4
+; IR-NEXT:    [[TMP3:%.*]] = select i1 [[TMP2]], i32 [[TMP1]], i32 4
+; IR-NEXT:    store i32 [[TMP3]], i32 addrspace(5)* [[PTR]], align 4
+; IR-NEXT:    ret i32 [[TMP1]]
+;
+; GCN-LABEL: atomicrmw_min_private_i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    buffer_load_dword v1, v0, s[0:3], 0 offen
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_min_i32_e32 v2, 4, v1
+; GCN-NEXT:    buffer_store_dword v2, v0, s[0:3], 0 offen
+; GCN-NEXT:    v_mov_b32_e32 v0, v1
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw min i32 addrspace(5)* %ptr, i32 4 seq_cst
+  ret i32 %result
+}
+
+define i32 @atomicrmw_umax_private_i32(i32 addrspace(5)* %ptr) {
+; IR-LABEL: @atomicrmw_umax_private_i32(
+; IR-NEXT:    [[TMP1:%.*]] = load i32, i32 addrspace(5)* [[PTR:%.*]], align 4
+; IR-NEXT:    [[TMP2:%.*]] = icmp ult i32 [[TMP1]], 4
+; IR-NEXT:    [[TMP3:%.*]] = select i1 [[TMP2]], i32 4, i32 [[TMP1]]
+; IR-NEXT:    store i32 [[TMP3]], i32 addrspace(5)* [[PTR]], align 4
+; IR-NEXT:    ret i32 [[TMP1]]
+;
+; GCN-LABEL: atomicrmw_umax_private_i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    buffer_load_dword v1, v0, s[0:3], 0 offen
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_max_u32_e32 v2, 4, v1
+; GCN-NEXT:    buffer_store_dword v2, v0, s[0:3], 0 offen
+; GCN-NEXT:    v_mov_b32_e32 v0, v1
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw umax i32 addrspace(5)* %ptr, i32 4 seq_cst
+  ret i32 %result
+}
+
+define i32 @atomicrmw_umin_private_i32(i32 addrspace(5)* %ptr) {
+; IR-LABEL: @atomicrmw_umin_private_i32(
+; IR-NEXT:    [[TMP1:%.*]] = load i32, i32 addrspace(5)* [[PTR:%.*]], align 4
+; IR-NEXT:    [[TMP2:%.*]] = icmp ult i32 [[TMP1]], 4
+; IR-NEXT:    [[TMP3:%.*]] = select i1 [[TMP2]], i32 [[TMP1]], i32 4
+; IR-NEXT:    store i32 [[TMP3]], i32 addrspace(5)* [[PTR]], align 4
+; IR-NEXT:    ret i32 [[TMP1]]
+;
+; GCN-LABEL: atomicrmw_umin_private_i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    buffer_load_dword v1, v0, s[0:3], 0 offen
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_min_u32_e32 v2, 4, v1
+; GCN-NEXT:    buffer_store_dword v2, v0, s[0:3], 0 offen
+; GCN-NEXT:    v_mov_b32_e32 v0, v1
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw umin i32 addrspace(5)* %ptr, i32 4 seq_cst
+  ret i32 %result
+}
+
+define float @atomicrmw_fadd_private_i32(float addrspace(5)* %ptr) {
+; IR-LABEL: @atomicrmw_fadd_private_i32(
+; IR-NEXT:    [[TMP1:%.*]] = load float, float addrspace(5)* [[PTR:%.*]], align 4
+; IR-NEXT:    [[TMP2:%.*]] = fadd float [[TMP1]], 2.000000e+00
+; IR-NEXT:    store float [[TMP2]], float addrspace(5)* [[PTR]], align 4
+; IR-NEXT:    ret float [[TMP1]]
+;
+; GCN-LABEL: atomicrmw_fadd_private_i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    buffer_load_dword v1, v0, s[0:3], 0 offen
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_add_f32_e32 v2, 2.0, v1
+; GCN-NEXT:    buffer_store_dword v2, v0, s[0:3], 0 offen
+; GCN-NEXT:    v_mov_b32_e32 v0, v1
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw fadd float addrspace(5)* %ptr, float 2.0 seq_cst
+  ret float %result
+}
+
+define float @atomicrmw_fsub_private_i32(float addrspace(5)* %ptr, float %val) {
+; IR-LABEL: @atomicrmw_fsub_private_i32(
+; IR-NEXT:    [[TMP1:%.*]] = load float, float addrspace(5)* [[PTR:%.*]], align 4
+; IR-NEXT:    [[TMP2:%.*]] = fsub float [[TMP1]], [[VAL:%.*]]
+; IR-NEXT:    store float [[TMP2]], float addrspace(5)* [[PTR]], align 4
+; IR-NEXT:    ret float [[TMP1]]
+;
+; GCN-LABEL: atomicrmw_fsub_private_i32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    buffer_load_dword v2, v0, s[0:3], 0 offen
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_sub_f32_e32 v1, v2, v1
+; GCN-NEXT:    buffer_store_dword v1, v0, s[0:3], 0 offen
+; GCN-NEXT:    v_mov_b32_e32 v0, v2
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  %result = atomicrmw fsub float addrspace(5)* %ptr, float %val seq_cst
+  ret float %result
+}
 
-; Private atomics have no real use, but at least shouldn't crash on it.
-define amdgpu_kernel void @atomicrmw_private(i32 addrspace(1)* %out, i32 %in) nounwind {
+define amdgpu_kernel void @alloca_promote_atomicrmw_private_lds_promote(i32 addrspace(1)* %out, i32 %in) nounwind {
+; IR-LABEL: @alloca_promote_atomicrmw_private_lds_promote(
+; IR-NEXT:  entry:
+; IR-NEXT:    [[TMP:%.*]] = alloca [2 x i32], align 4, addrspace(5)
+; IR-NEXT:    [[GEP1:%.*]] = getelementptr inbounds [2 x i32], [2 x i32] addrspace(5)* [[TMP]], i32 0, i32 0
+; IR-NEXT:    [[GEP2:%.*]] = getelementptr inbounds [2 x i32], [2 x i32] addrspace(5)* [[TMP]], i32 0, i32 1
+; IR-NEXT:    store i32 0, i32 addrspace(5)* [[GEP1]], align 4
+; IR-NEXT:    store i32 1, i32 addrspace(5)* [[GEP2]], align 4
+; IR-NEXT:    [[GEP3:%.*]] = getelementptr inbounds [2 x i32], [2 x i32] addrspace(5)* [[TMP]], i32 0, i32 [[IN:%.*]]
+; IR-NEXT:    [[TMP0:%.*]] = load i32, i32 addrspace(5)* [[GEP3]], align 4
+; IR-NEXT:    [[TMP1:%.*]] = add i32 [[TMP0]], 7
+; IR-NEXT:    store i32 [[TMP1]], i32 addrspace(5)* [[GEP3]], align 4
+; IR-NEXT:    store i32 [[TMP0]], i32 addrspace(1)* [[OUT:%.*]], align 4
+; IR-NEXT:    ret void
+;
+; GCN-LABEL: alloca_promote_atomicrmw_private_lds_promote:
+; GCN:       ; %bb.0: ; %entry
+; GCN-NEXT:    s_load_dword s4, s[0:1], 0xb
+; GCN-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; GCN-NEXT:    s_mov_b32 s3, 0xf000
+; GCN-NEXT:    s_mov_b32 s2, -1
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_cmp_eq_u32 s4, 1
+; GCN-NEXT:    s_cselect_b64 s[4:5], -1, 0
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; GCN-NEXT:    s_endpgm
 entry:
   %tmp = alloca [2 x i32], addrspace(5)
-  %tmp1 = getelementptr inbounds [2 x i32], [2 x i32] addrspace(5)* %tmp, i32 0, i32 0
-  %tmp2 = getelementptr inbounds [2 x i32], [2 x i32] addrspace(5)* %tmp, i32 0, i32 1
-  store i32 0, i32 addrspace(5)* %tmp1
-  store i32 1, i32 addrspace(5)* %tmp2
-  %tmp3 = getelementptr inbounds [2 x i32], [2 x i32] addrspace(5)* %tmp, i32 0, i32 %in
-  %tmp4 = atomicrmw add i32 addrspace(5)* %tmp3, i32 7 acq_rel
-  store i32 %tmp4, i32 addrspace(1)* %out
+  %gep1 = getelementptr inbounds [2 x i32], [2 x i32] addrspace(5)* %tmp, i32 0, i32 0
+  %gep2 = getelementptr inbounds [2 x i32], [2 x i32] addrspace(5)* %tmp, i32 0, i32 1
+  store i32 0, i32 addrspace(5)* %gep1
+  store i32 1, i32 addrspace(5)* %gep2
+  %gep3 = getelementptr inbounds [2 x i32], [2 x i32] addrspace(5)* %tmp, i32 0, i32 %in
+  %rmw = atomicrmw add i32 addrspace(5)* %gep3, i32 7 acq_rel
+  store i32 %rmw, i32 addrspace(1)* %out
   ret void
 }
 
-define amdgpu_kernel void @cmpxchg_private(i32 addrspace(1)* %out, i32 %in) nounwind {
+define amdgpu_kernel void @alloca_promote_cmpxchg_private(i32 addrspace(1)* %out, i32 %in) nounwind {
+; IR-LABEL: @alloca_promote_cmpxchg_private(
+; IR-NEXT:  entry:
+; IR-NEXT:    [[TMP:%.*]] = alloca [2 x i32], align 4, addrspace(5)
+; IR-NEXT:    [[GEP1:%.*]] = getelementptr inbounds [2 x i32], [2 x i32] addrspace(5)* [[TMP]], i32 0, i32 0
+; IR-NEXT:    [[GEP2:%.*]] = getelementptr inbounds [2 x i32], [2 x i32] addrspace(5)* [[TMP]], i32 0, i32 1
+; IR-NEXT:    store i32 0, i32 addrspace(5)* [[GEP1]], align 4
+; IR-NEXT:    store i32 1, i32 addrspace(5)* [[GEP2]], align 4
+; IR-NEXT:    [[GEP3:%.*]] = getelementptr inbounds [2 x i32], [2 x i32] addrspace(5)* [[TMP]], i32 0, i32 [[IN:%.*]]
+; IR-NEXT:    [[TMP0:%.*]] = load i32, i32 addrspace(5)* [[GEP3]], align 4
+; IR-NEXT:    [[TMP1:%.*]] = icmp eq i32 [[TMP0]], 0
+; IR-NEXT:    [[TMP2:%.*]] = select i1 [[TMP1]], i32 1, i32 [[TMP0]]
+; IR-NEXT:    store i32 [[TMP2]], i32 addrspace(5)* [[GEP3]], align 4
+; IR-NEXT:    [[TMP3:%.*]] = insertvalue { i32, i1 } undef, i32 [[TMP0]], 0
+; IR-NEXT:    [[TMP4:%.*]] = insertvalue { i32, i1 } [[TMP3]], i1 [[TMP1]], 1
+; IR-NEXT:    [[VAL:%.*]] = extractvalue { i32, i1 } [[TMP4]], 0
+; IR-NEXT:    store i32 [[VAL]], i32 addrspace(1)* [[OUT:%.*]], align 4
+; IR-NEXT:    ret void
+;
+; GCN-LABEL: alloca_promote_cmpxchg_private:
+; GCN:       ; %bb.0: ; %entry
+; GCN-NEXT:    s_load_dword s4, s[0:1], 0xb
+; GCN-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; GCN-NEXT:    s_mov_b32 s3, 0xf000
+; GCN-NEXT:    s_mov_b32 s2, -1
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_cmp_eq_u32 s4, 1
+; GCN-NEXT:    s_cselect_b64 s[4:5], -1, 0
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; GCN-NEXT:    s_endpgm
 entry:
   %tmp = alloca [2 x i32], addrspace(5)
-  %tmp1 = getelementptr inbounds [2 x i32], [2 x i32] addrspace(5)* %tmp, i32 0, i32 0
-  %tmp2 = getelementptr inbounds [2 x i32], [2 x i32] addrspace(5)* %tmp, i32 0, i32 1
-  store i32 0, i32 addrspace(5)* %tmp1
-  store i32 1, i32 addrspace(5)* %tmp2
-  %tmp3 = getelementptr inbounds [2 x i32], [2 x i32] addrspace(5)* %tmp, i32 0, i32 %in
-  %tmp4 = cmpxchg i32 addrspace(5)* %tmp3, i32 0, i32 1 acq_rel monotonic
-  %val = extractvalue { i32, i1 } %tmp4, 0
+  %gep1 = getelementptr inbounds [2 x i32], [2 x i32] addrspace(5)* %tmp, i32 0, i32 0
+  %gep2 = getelementptr inbounds [2 x i32], [2 x i32] addrspace(5)* %tmp, i32 0, i32 1
+  store i32 0, i32 addrspace(5)* %gep1
+  store i32 1, i32 addrspace(5)* %gep2
+  %gep3 = getelementptr inbounds [2 x i32], [2 x i32] addrspace(5)* %tmp, i32 0, i32 %in
+  %xchg = cmpxchg i32 addrspace(5)* %gep3, i32 0, i32 1 acq_rel monotonic
+  %val = extractvalue { i32, i1 } %xchg, 0
   store i32 %val, i32 addrspace(1)* %out
   ret void
 }


        


More information about the llvm-commits mailing list