[llvm] [AMDGPU] Allow merging unordered and monotonic atomic loads in SILoadStoreOptimizer (PR #189932)
via llvm-commits
llvm-commits at lists.llvm.org
Wed Apr 1 03:31:52 PDT 2026
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-amdgpu
Author: Harrison Hao (harrisonGPU)
<details>
<summary>Changes</summary>
Relax barrier handling in SILoadStoreOptimizer to allow merging of
unordered and monotonic nonvolatile atomic loads. Stronger atomics and
volatile accesses remain barriers.
Track atomic ordering and syncscope in CombineInfo and require them to
match in offsetsCanBeCombined, so we don't merge operations with different
semantics (e.g. unordered vs monotonic, or agent vs workgroup).
This is safe because unordered and monotonic atomics do not introduce
synchronization beyond per location coherence.
This transformation is done in MachineIR only. Performing it at the IR
level would change the number of atomic events and violate the memory
model.
Reference:
https://llvm.org/docs/Atomics.html#monotonic
---
Full diff: https://github.com/llvm/llvm-project/pull/189932.diff
2 Files Affected:
- (modified) llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp (+31-1)
- (added) llvm/test/CodeGen/AMDGPU/atomic-load-merge.ll (+333)
``````````diff
diff --git a/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp b/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
index 2a854312d6125..65938e82d49b9 100644
--- a/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
+++ b/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
@@ -125,6 +125,8 @@ class SILoadStoreOptimizer {
const MachineOperand *AddrReg[MaxAddressRegs];
unsigned NumAddresses;
unsigned Order;
+ AtomicOrdering Ordering = AtomicOrdering::NotAtomic;
+ SyncScope::ID SSID = SyncScope::System;
bool hasSameBaseAddress(const CombineInfo &CI) {
if (NumAddresses != CI.NumAddresses)
@@ -902,6 +904,13 @@ void SILoadStoreOptimizer::CombineInfo::setMI(MachineBasicBlock::iterator MI,
CPol = LSO.TII->getNamedOperand(*I, AMDGPU::OpName::cpol)->getImm();
}
+ // Extract atomic ordering and sync scope from MMO for merge compatibility.
+ if (!I->memoperands_empty()) {
+ const MachineMemOperand *MMO = *I->memoperands_begin();
+ Ordering = MMO->getSuccessOrdering();
+ SSID = MMO->getSyncScopeID();
+ }
+
AddressRegs Regs = getRegs(Opc, *LSO.TII);
bool isVIMAGEorVSAMPLE = LSO.TII->isVIMAGE(*I) || LSO.TII->isVSAMPLE(*I);
@@ -1096,6 +1105,10 @@ bool SILoadStoreOptimizer::offsetsCanBeCombined(CombineInfo &CI,
if ((CI.Offset % CI.EltSize != 0) || (Paired.Offset % CI.EltSize != 0))
return false;
+ // Do not merge instructions with different atomic semantics.
+ if (CI.Ordering != Paired.Ordering || CI.SSID != Paired.SSID)
+ return false;
+
if (CI.InstClass == TBUFFER_LOAD || CI.InstClass == TBUFFER_STORE) {
const llvm::AMDGPU::GcnBufferFormatInfo *Info0 =
@@ -2649,7 +2662,7 @@ SILoadStoreOptimizer::collectMergeableInsts(
// Treat volatile accesses, ordered accesses and unmodeled side effects as
// barriers. We can look after this barrier for separate merges.
- if (MI.hasOrderedMemoryRef() || MI.hasUnmodeledSideEffects()) {
+ if (MI.hasUnmodeledSideEffects()) {
LLVM_DEBUG(dbgs() << "Breaking search on barrier: " << MI);
// Search will resume after this instruction in a separate merge list.
@@ -2657,6 +2670,23 @@ SILoadStoreOptimizer::collectMergeableInsts(
break;
}
+ if (MI.hasOrderedMemoryRef()) {
+ // Allow unordered and monotonic nonvolatile atomic loads to be merged.
+ // Instructions without MMOs or with volatile/strongly ordered MMOs
+ // remain barriers.
+ if (MI.mayStore() || MI.memoperands_empty() ||
+ llvm::any_of(MI.memoperands(), [](const MachineMemOperand *MMO) {
+ return MMO->isVolatile() ||
+ isStrongerThanMonotonic(MMO->getSuccessOrdering());
+ })) {
+ LLVM_DEBUG(dbgs() << "Breaking search on barrier: " << MI);
+
+ // Search will resume after this instruction in a separate merge list.
+ ++BlockI;
+ break;
+ }
+ }
+
const InstClassEnum InstClass = getInstClass(MI.getOpcode(), *TII);
if (InstClass == UNKNOWN)
continue;
diff --git a/llvm/test/CodeGen/AMDGPU/atomic-load-merge.ll b/llvm/test/CodeGen/AMDGPU/atomic-load-merge.ll
new file mode 100644
index 0000000000000..d50a326e71842
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/atomic-load-merge.ll
@@ -0,0 +1,333 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 < %s | FileCheck %s
+
+define amdgpu_cs void @atomic_two_load_monotonic_merge(ptr addrspace(1) align 16 %p, ptr addrspace(1) %out) {
+; CHECK-LABEL: atomic_two_load_monotonic_merge:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: global_load_b64 v[0:1], v[0:1], off scope:SCOPE_DEV
+; CHECK-NEXT: s_wait_loadcnt 0x0
+; CHECK-NEXT: v_add_f32_e32 v0, v0, v1
+; CHECK-NEXT: global_store_b32 v[2:3], v0, off
+; CHECK-NEXT: s_endpgm
+ %p1 = getelementptr inbounds i8, ptr addrspace(1) %p, i64 4
+ %a0 = load atomic float, ptr addrspace(1) %p syncscope("agent") monotonic, align 4
+ %a1 = load atomic float, ptr addrspace(1) %p1 syncscope("agent") monotonic, align 4
+ %res = fadd float %a0, %a1
+ store float %res, ptr addrspace(1) %out, align 4
+ ret void
+}
+
+define amdgpu_cs void @atomic_two_load_monotonic_merge2(ptr addrspace(1) align 16 %p, ptr addrspace(1) %out) {
+; CHECK-LABEL: atomic_two_load_monotonic_merge2:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: global_load_b64 v[0:1], v[0:1], off offset:4 scope:SCOPE_DEV
+; CHECK-NEXT: s_wait_loadcnt 0x0
+; CHECK-NEXT: v_add_f32_e32 v0, v0, v1
+; CHECK-NEXT: global_store_b32 v[2:3], v0, off
+; CHECK-NEXT: s_endpgm
+ %p1 = getelementptr inbounds i8, ptr addrspace(1) %p, i64 4
+ %p2 = getelementptr inbounds i8, ptr addrspace(1) %p, i64 8
+ %a0 = load atomic float, ptr addrspace(1) %p1 syncscope("agent") monotonic, align 4
+ %a1 = load atomic float, ptr addrspace(1) %p2 syncscope("agent") monotonic, align 4
+ %res = fadd float %a0, %a1
+ store float %res, ptr addrspace(1) %out, align 4
+ ret void
+}
+
+define amdgpu_cs void @atomic_two_load_unordered_merge(ptr addrspace(1) align 16 %p, ptr addrspace(1) %out) {
+; CHECK-LABEL: atomic_two_load_unordered_merge:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: global_load_b64 v[0:1], v[0:1], off
+; CHECK-NEXT: s_wait_loadcnt 0x0
+; CHECK-NEXT: v_add_f32_e32 v0, v0, v1
+; CHECK-NEXT: global_store_b32 v[2:3], v0, off
+; CHECK-NEXT: s_endpgm
+ %p1 = getelementptr inbounds i8, ptr addrspace(1) %p, i64 4
+ %a0 = load atomic float, ptr addrspace(1) %p syncscope("agent") unordered, align 4
+ %a1 = load atomic float, ptr addrspace(1) %p1 syncscope("agent") unordered, align 4
+ %res = fadd float %a0, %a1
+ store float %res, ptr addrspace(1) %out, align 4
+ ret void
+}
+
+define amdgpu_cs void @atomic_two_load_unordered_workgroup_merge(ptr addrspace(1) align 16 %p, ptr addrspace(1) %out) {
+; CHECK-LABEL: atomic_two_load_unordered_workgroup_merge:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: global_load_b64 v[0:1], v[0:1], off
+; CHECK-NEXT: s_wait_loadcnt 0x0
+; CHECK-NEXT: v_add_f32_e32 v0, v0, v1
+; CHECK-NEXT: global_store_b32 v[2:3], v0, off
+; CHECK-NEXT: s_endpgm
+ %p1 = getelementptr inbounds i8, ptr addrspace(1) %p, i64 4
+ %a0 = load atomic float, ptr addrspace(1) %p syncscope("workgroup") unordered, align 4
+ %a1 = load atomic float, ptr addrspace(1) %p1 syncscope("workgroup") unordered, align 4
+ %res = fadd float %a0, %a1
+ store float %res, ptr addrspace(1) %out, align 4
+ ret void
+}
+
+define amdgpu_cs void @atomic_two_load_monotonic_workgroup_merge(ptr addrspace(1) align 16 %p, ptr addrspace(1) %out) {
+; CHECK-LABEL: atomic_two_load_monotonic_workgroup_merge:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: global_load_b64 v[0:1], v[0:1], off scope:SCOPE_SE
+; CHECK-NEXT: s_wait_loadcnt 0x0
+; CHECK-NEXT: v_add_f32_e32 v0, v0, v1
+; CHECK-NEXT: global_store_b32 v[2:3], v0, off
+; CHECK-NEXT: s_endpgm
+ %p1 = getelementptr inbounds i8, ptr addrspace(1) %p, i64 4
+ %a0 = load atomic float, ptr addrspace(1) %p syncscope("workgroup") monotonic, align 4
+ %a1 = load atomic float, ptr addrspace(1) %p1 syncscope("workgroup") monotonic, align 4
+ %res = fadd float %a0, %a1
+ store float %res, ptr addrspace(1) %out, align 4
+ ret void
+}
+
+define amdgpu_cs void @atomic_two_load_volatile_no_merge(ptr addrspace(1) align 16 %p, ptr addrspace(1) %out) {
+; CHECK-LABEL: atomic_two_load_volatile_no_merge:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_b32 v4, v[0:1], off scope:SCOPE_DEV
+; CHECK-NEXT: global_load_b32 v0, v[0:1], off offset:4 scope:SCOPE_DEV
+; CHECK-NEXT: s_wait_loadcnt 0x0
+; CHECK-NEXT: v_add_f32_e32 v0, v4, v0
+; CHECK-NEXT: global_store_b32 v[2:3], v0, off
+; CHECK-NEXT: s_endpgm
+ %p1 = getelementptr inbounds i8, ptr addrspace(1) %p, i64 4
+ %a0 = load atomic volatile float, ptr addrspace(1) %p syncscope("agent") monotonic, align 4
+ %a1 = load atomic volatile float, ptr addrspace(1) %p1 syncscope("agent") monotonic, align 4
+ %res = fadd float %a0, %a1
+ store float %res, ptr addrspace(1) %out, align 4
+ ret void
+}
+
+define amdgpu_cs void @atomic_two_load_mixed_atomic_noatomic_no_merge(ptr addrspace(1) align 16 %p, ptr addrspace(1) %out) {
+; CHECK-LABEL: atomic_two_load_mixed_atomic_noatomic_no_merge:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_b32 v4, v[0:1], off scope:SCOPE_DEV
+; CHECK-NEXT: global_load_b32 v0, v[0:1], off offset:4
+; CHECK-NEXT: s_wait_loadcnt 0x0
+; CHECK-NEXT: v_add_f32_e32 v0, v4, v0
+; CHECK-NEXT: global_store_b32 v[2:3], v0, off
+; CHECK-NEXT: s_endpgm
+ %p1 = getelementptr inbounds i8, ptr addrspace(1) %p, i64 4
+ %a0 = load atomic float, ptr addrspace(1) %p syncscope("agent") monotonic, align 4
+ %a1 = load float, ptr addrspace(1) %p1, align 4
+ %res = fadd float %a0, %a1
+ store float %res, ptr addrspace(1) %out, align 4
+ ret void
+}
+
+define amdgpu_cs void @atomic_two_load_gap_no_merge(ptr addrspace(1) align 16 %p, ptr addrspace(1) %out) {
+; CHECK-LABEL: atomic_two_load_gap_no_merge:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_b32 v4, v[0:1], off scope:SCOPE_DEV
+; CHECK-NEXT: global_load_b32 v0, v[0:1], off offset:8 scope:SCOPE_DEV
+; CHECK-NEXT: s_wait_loadcnt 0x0
+; CHECK-NEXT: v_add_f32_e32 v0, v4, v0
+; CHECK-NEXT: global_store_b32 v[2:3], v0, off
+; CHECK-NEXT: s_endpgm
+ %p1 = getelementptr inbounds i8, ptr addrspace(1) %p, i64 8
+ %a0 = load atomic float, ptr addrspace(1) %p syncscope("agent") monotonic, align 4
+ %a1 = load atomic float, ptr addrspace(1) %p1 syncscope("agent") monotonic, align 4
+ %res = fadd float %a0, %a1
+ store float %res, ptr addrspace(1) %out, align 4
+ ret void
+}
+
+define amdgpu_cs void @atomic_two_load_acquire_no_merge(ptr addrspace(1) align 16 %p, ptr addrspace(1) %out) {
+; CHECK-LABEL: atomic_two_load_acquire_no_merge:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: global_load_b32 v4, v[0:1], off scope:SCOPE_DEV
+; CHECK-NEXT: s_wait_loadcnt 0x0
+; CHECK-NEXT: global_inv scope:SCOPE_DEV
+; CHECK-NEXT: global_load_b32 v0, v[0:1], off offset:4 scope:SCOPE_DEV
+; CHECK-NEXT: s_wait_loadcnt 0x0
+; CHECK-NEXT: global_inv scope:SCOPE_DEV
+; CHECK-NEXT: v_add_f32_e32 v0, v4, v0
+; CHECK-NEXT: global_store_b32 v[2:3], v0, off
+; CHECK-NEXT: s_endpgm
+ %p1 = getelementptr inbounds i8, ptr addrspace(1) %p, i64 4
+ %a0 = load atomic float, ptr addrspace(1) %p syncscope("agent") acquire, align 4
+ %a1 = load atomic float, ptr addrspace(1) %p1 syncscope("agent") acquire, align 4
+ %res = fadd float %a0, %a1
+ store float %res, ptr addrspace(1) %out, align 4
+ ret void
+}
+
+define amdgpu_cs void @atomic_two_load_seq_cst_no_merge(ptr addrspace(1) align 16 %p, ptr addrspace(1) %out) {
+; CHECK-LABEL: atomic_two_load_seq_cst_no_merge:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: global_load_b32 v4, v[0:1], off scope:SCOPE_DEV
+; CHECK-NEXT: s_wait_loadcnt 0x0
+; CHECK-NEXT: global_inv scope:SCOPE_DEV
+; CHECK-NEXT: s_wait_loadcnt 0x0
+; CHECK-NEXT: global_load_b32 v0, v[0:1], off offset:4 scope:SCOPE_DEV
+; CHECK-NEXT: s_wait_loadcnt 0x0
+; CHECK-NEXT: global_inv scope:SCOPE_DEV
+; CHECK-NEXT: v_add_f32_e32 v0, v4, v0
+; CHECK-NEXT: global_store_b32 v[2:3], v0, off
+; CHECK-NEXT: s_endpgm
+ %p1 = getelementptr inbounds i8, ptr addrspace(1) %p, i64 4
+ %a0 = load atomic float, ptr addrspace(1) %p syncscope("agent") seq_cst, align 4
+ %a1 = load atomic float, ptr addrspace(1) %p1 syncscope("agent") seq_cst, align 4
+ %res = fadd float %a0, %a1
+ store float %res, ptr addrspace(1) %out, align 4
+ ret void
+}
+
+define amdgpu_cs void @atomic_two_load_same_offset_no_merge(ptr addrspace(1) align 16 %p, ptr addrspace(1) %out) {
+; CHECK-LABEL: atomic_two_load_same_offset_no_merge:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_b32 v4, v[0:1], off scope:SCOPE_DEV
+; CHECK-NEXT: global_load_b32 v0, v[0:1], off scope:SCOPE_DEV
+; CHECK-NEXT: s_wait_loadcnt 0x1
+; CHECK-NEXT: v_add_f32_e32 v1, 1.0, v4
+; CHECK-NEXT: s_wait_loadcnt 0x0
+; CHECK-NEXT: v_add_f32_e32 v0, 2.0, v0
+; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; CHECK-NEXT: v_add_f32_e32 v0, v1, v0
+; CHECK-NEXT: global_store_b32 v[2:3], v0, off
+; CHECK-NEXT: s_endpgm
+ %a0 = load atomic float, ptr addrspace(1) %p syncscope("agent") monotonic, align 4
+ %a1 = load atomic float, ptr addrspace(1) %p syncscope("agent") monotonic, align 4
+ %num1 = fadd float %a0, 1.0
+ %num2 = fadd float %a1, 2.0
+ %res = fadd float %num1, %num2
+ store float %res, ptr addrspace(1) %out, align 4
+ ret void
+}
+
+define amdgpu_cs void @atomic_two_load_mixed_order_no_merge(ptr addrspace(1) align 16 %p, ptr addrspace(1) %out) {
+; CHECK-LABEL: atomic_two_load_mixed_order_no_merge:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_b32 v4, v[0:1], off
+; CHECK-NEXT: global_load_b32 v0, v[0:1], off offset:4 scope:SCOPE_DEV
+; CHECK-NEXT: s_wait_loadcnt 0x0
+; CHECK-NEXT: v_add_f32_e32 v0, v4, v0
+; CHECK-NEXT: global_store_b32 v[2:3], v0, off
+; CHECK-NEXT: s_endpgm
+ %p1 = getelementptr inbounds i8, ptr addrspace(1) %p, i64 4
+ %a0 = load atomic float, ptr addrspace(1) %p syncscope("agent") unordered, align 4
+ %a1 = load atomic float, ptr addrspace(1) %p1 syncscope("agent") monotonic, align 4
+ %res = fadd float %a0, %a1
+ store float %res, ptr addrspace(1) %out, align 4
+ ret void
+}
+
+define amdgpu_cs void @atomic_two_load_mixed_scope_no_merge(ptr addrspace(1) align 16 %p, ptr addrspace(1) %out) {
+; CHECK-LABEL: atomic_two_load_mixed_scope_no_merge:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: global_load_b32 v4, v[0:1], off scope:SCOPE_DEV
+; CHECK-NEXT: global_load_b32 v0, v[0:1], off offset:4 scope:SCOPE_SE
+; CHECK-NEXT: s_wait_loadcnt 0x0
+; CHECK-NEXT: v_add_f32_e32 v0, v4, v0
+; CHECK-NEXT: global_store_b32 v[2:3], v0, off
+; CHECK-NEXT: s_endpgm
+ %p1 = getelementptr inbounds i8, ptr addrspace(1) %p, i64 4
+ %a0 = load atomic float, ptr addrspace(1) %p syncscope("agent") monotonic, align 4
+ %a1 = load atomic float, ptr addrspace(1) %p1 syncscope("workgroup") monotonic, align 4
+ %res = fadd float %a0, %a1
+ store float %res, ptr addrspace(1) %out, align 4
+ ret void
+}
+
+define amdgpu_cs void @atomic_four_load_monotonic_merge_b128(ptr addrspace(1) align 16 %p, ptr addrspace(1) %out) {
+; CHECK-LABEL: atomic_four_load_monotonic_merge_b128:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: global_load_b128 v[4:7], v[0:1], off scope:SCOPE_DEV
+; CHECK-NEXT: s_wait_loadcnt 0x0
+; CHECK-NEXT: v_dual_add_f32 v0, v4, v5 :: v_dual_add_f32 v1, v6, v7
+; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; CHECK-NEXT: v_add_f32_e32 v0, v0, v1
+; CHECK-NEXT: global_store_b32 v[2:3], v0, off
+; CHECK-NEXT: s_endpgm
+ %p1 = getelementptr inbounds i8, ptr addrspace(1) %p, i64 4
+ %p2 = getelementptr inbounds i8, ptr addrspace(1) %p, i64 8
+ %p3 = getelementptr inbounds i8, ptr addrspace(1) %p, i64 12
+ %a0 = load atomic float, ptr addrspace(1) %p syncscope("agent") monotonic, align 4
+ %a1 = load atomic float, ptr addrspace(1) %p1 syncscope("agent") monotonic, align 4
+ %a2 = load atomic float, ptr addrspace(1) %p2 syncscope("agent") monotonic, align 4
+ %a3 = load atomic float, ptr addrspace(1) %p3 syncscope("agent") monotonic, align 4
+ %s01 = fadd float %a0, %a1
+ %s23 = fadd float %a2, %a3
+ %sum = fadd float %s01, %s23
+ store float %sum, ptr addrspace(1) %out, align 4
+ ret void
+}
+
+define amdgpu_cs void @atomic_four_load_monotonic_workgroup_merge_b128(ptr addrspace(1) align 16 %p, ptr addrspace(1) %out) {
+; CHECK-LABEL: atomic_four_load_monotonic_workgroup_merge_b128:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: global_load_b128 v[4:7], v[0:1], off scope:SCOPE_SE
+; CHECK-NEXT: s_wait_loadcnt 0x0
+; CHECK-NEXT: v_dual_add_f32 v0, v4, v5 :: v_dual_add_f32 v1, v6, v7
+; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; CHECK-NEXT: v_add_f32_e32 v0, v0, v1
+; CHECK-NEXT: global_store_b32 v[2:3], v0, off
+; CHECK-NEXT: s_endpgm
+ %p1 = getelementptr inbounds i8, ptr addrspace(1) %p, i64 4
+ %p2 = getelementptr inbounds i8, ptr addrspace(1) %p, i64 8
+ %p3 = getelementptr inbounds i8, ptr addrspace(1) %p, i64 12
+ %a0 = load atomic float, ptr addrspace(1) %p syncscope("workgroup") monotonic, align 4
+ %a1 = load atomic float, ptr addrspace(1) %p1 syncscope("workgroup") monotonic, align 4
+ %a2 = load atomic float, ptr addrspace(1) %p2 syncscope("workgroup") monotonic, align 4
+ %a3 = load atomic float, ptr addrspace(1) %p3 syncscope("workgroup") monotonic, align 4
+ %s01 = fadd float %a0, %a1
+ %s23 = fadd float %a2, %a3
+ %sum = fadd float %s01, %s23
+ store float %sum, ptr addrspace(1) %out, align 4
+ ret void
+}
+
+define amdgpu_cs void @atomic_four_load_unordered_merge_b128(ptr addrspace(1) align 16 %p, ptr addrspace(1) %out) {
+; CHECK-LABEL: atomic_four_load_unordered_merge_b128:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: global_load_b128 v[4:7], v[0:1], off
+; CHECK-NEXT: s_wait_loadcnt 0x0
+; CHECK-NEXT: v_dual_add_f32 v0, v4, v5 :: v_dual_add_f32 v1, v6, v7
+; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; CHECK-NEXT: v_add_f32_e32 v0, v0, v1
+; CHECK-NEXT: global_store_b32 v[2:3], v0, off
+; CHECK-NEXT: s_endpgm
+ %p1 = getelementptr inbounds i8, ptr addrspace(1) %p, i64 4
+ %p2 = getelementptr inbounds i8, ptr addrspace(1) %p, i64 8
+ %p3 = getelementptr inbounds i8, ptr addrspace(1) %p, i64 12
+ %a0 = load atomic float, ptr addrspace(1) %p syncscope("agent") unordered, align 4
+ %a1 = load atomic float, ptr addrspace(1) %p1 syncscope("agent") unordered, align 4
+ %a2 = load atomic float, ptr addrspace(1) %p2 syncscope("agent") unordered, align 4
+ %a3 = load atomic float, ptr addrspace(1) %p3 syncscope("agent") unordered, align 4
+ %s01 = fadd float %a0, %a1
+ %s23 = fadd float %a2, %a3
+ %sum = fadd float %s01, %s23
+ store float %sum, ptr addrspace(1) %out, align 4
+ ret void
+}
+
+define amdgpu_cs void @atomic_four_load_unordered_workgroup_merge_b128(ptr addrspace(1) align 16 %p, ptr addrspace(1) %out) {
+; CHECK-LABEL: atomic_four_load_unordered_workgroup_merge_b128:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: global_load_b128 v[4:7], v[0:1], off
+; CHECK-NEXT: s_wait_loadcnt 0x0
+; CHECK-NEXT: v_dual_add_f32 v0, v4, v5 :: v_dual_add_f32 v1, v6, v7
+; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; CHECK-NEXT: v_add_f32_e32 v0, v0, v1
+; CHECK-NEXT: global_store_b32 v[2:3], v0, off
+; CHECK-NEXT: s_endpgm
+ %p1 = getelementptr inbounds i8, ptr addrspace(1) %p, i64 4
+ %p2 = getelementptr inbounds i8, ptr addrspace(1) %p, i64 8
+ %p3 = getelementptr inbounds i8, ptr addrspace(1) %p, i64 12
+ %a0 = load atomic float, ptr addrspace(1) %p syncscope("workgroup") unordered, align 4
+ %a1 = load atomic float, ptr addrspace(1) %p1 syncscope("workgroup") unordered, align 4
+ %a2 = load atomic float, ptr addrspace(1) %p2 syncscope("workgroup") unordered, align 4
+ %a3 = load atomic float, ptr addrspace(1) %p3 syncscope("workgroup") unordered, align 4
+ %s01 = fadd float %a0, %a1
+ %s23 = fadd float %a2, %a3
+ %sum = fadd float %s01, %s23
+ store float %sum, ptr addrspace(1) %out, align 4
+ ret void
+}
``````````
</details>
https://github.com/llvm/llvm-project/pull/189932
More information about the llvm-commits
mailing list