[llvm-branch-commits] [llvm] AMDGPU: Start considering new atomicrmw metadata on integer operations (PR #122138)
Matt Arsenault via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Tue May 20 09:00:17 PDT 2025
https://github.com/arsenm updated https://github.com/llvm/llvm-project/pull/122138
>From a77e0a4a6456257446836fa938d73d8b71dd5034 Mon Sep 17 00:00:00 2001
From: Matt Arsenault <Matthew.Arsenault at amd.com>
Date: Tue, 16 Apr 2024 13:42:55 +0200
Subject: [PATCH] AMDGPU: Start considering new atomicrmw metadata on integer
operations
Start considering !amdgpu.no.remote.memory.access and
!amdgpu.no.fine.grained.host.memory metadata when deciding to expand
integer atomic operations. This does not yet attempt to accurately
handle fadd/fmin/fmax, which are trickier and require migrating the
old "amdgpu-unsafe-fp-atomics" attribute.
---
llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 65 +-
.../AMDGPU/GlobalISel/atomicrmw_udec_wrap.ll | 1073 +---
.../AMDGPU/GlobalISel/atomicrmw_uinc_wrap.ll | 1347 +----
llvm/test/CodeGen/AMDGPU/acc-ldst.ll | 6 +-
.../atomic_optimizations_global_pointer.ll | 2028 +++----
.../CodeGen/AMDGPU/dag-divergence-atomic.ll | 34 +-
llvm/test/CodeGen/AMDGPU/flat_atomics.ll | 172 +-
.../CodeGen/AMDGPU/flat_atomics_i32_system.ll | 1026 +---
llvm/test/CodeGen/AMDGPU/flat_atomics_i64.ll | 162 +-
.../AMDGPU/flat_atomics_i64_noprivate.ll | 5102 ++++++++++++-----
.../CodeGen/AMDGPU/flat_atomics_i64_system.ll | 1632 +-----
.../flat_atomics_i64_system_noprivate.ll | 1312 +----
.../CodeGen/AMDGPU/global-saddr-atomics.ll | 162 +-
llvm/test/CodeGen/AMDGPU/global_atomics.ll | 398 +-
.../AMDGPU/global_atomics_i32_system.ll | 1082 +---
.../test/CodeGen/AMDGPU/global_atomics_i64.ll | 162 +-
.../AMDGPU/global_atomics_i64_system.ll | 1330 +----
.../test/CodeGen/AMDGPU/idemponent-atomics.ll | 45 +-
.../CodeGen/AMDGPU/move-to-valu-atomicrmw.ll | 6 +-
.../test/CodeGen/AMDGPU/shl_add_ptr_global.ll | 2 +-
.../AtomicExpand/AMDGPU/expand-atomic-i16.ll | 691 ++-
.../AMDGPU/expand-atomic-i32-agent.ll | 1039 +++-
.../AMDGPU/expand-atomic-i32-system.ll | 360 +-
.../AMDGPU/expand-atomic-i64-agent.ll | 1039 +++-
.../AMDGPU/expand-atomic-i64-system.ll | 360 +-
.../AtomicExpand/AMDGPU/expand-atomic-i8.ll | 215 +-
...expand-atomicrmw-flat-noalias-addrspace.ll | 138 +-
...expand-atomicrmw-integer-ops-0-to-add-0.ll | 65 +-
28 files changed, 9308 insertions(+), 11745 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 6f8bccbcca039..e8bcba7f5ea64 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -16939,19 +16939,60 @@ SITargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
case AtomicRMWInst::UDecWrap: {
if (AMDGPU::isFlatGlobalAddrSpace(AS) ||
AS == AMDGPUAS::BUFFER_FAT_POINTER) {
- // Always expand system scope atomics.
- if (HasSystemScope) {
- if (Op == AtomicRMWInst::Sub || Op == AtomicRMWInst::Or ||
- Op == AtomicRMWInst::Xor) {
- // Atomic sub/or/xor do not work over PCI express, but atomic add
- // does. InstCombine transforms these with 0 to or, so undo that.
- if (Constant *ConstVal = dyn_cast<Constant>(RMW->getValOperand());
- ConstVal && ConstVal->isNullValue())
- return AtomicExpansionKind::Expand;
- }
-
- return AtomicExpansionKind::CmpXChg;
+ // On most subtargets, for atomicrmw operations other than add/xchg,
+ // whether or not the instructions will behave correctly depends on where
+ // the address physically resides and what interconnect is used in the
+ // system configuration. On some some targets the instruction will nop,
+ // and in others synchronization will only occur at degraded device scope.
+ //
+ // If the allocation is known local to the device, the instructions should
+ // work correctly.
+ if (RMW->hasMetadata("amdgpu.no.remote.memory"))
+ return atomicSupportedIfLegalIntType(RMW);
+
+ // If fine-grained remote memory works at device scope, we don't need to
+ // do anything.
+ if (!HasSystemScope &&
+ Subtarget->supportsAgentScopeFineGrainedRemoteMemoryAtomics())
+ return atomicSupportedIfLegalIntType(RMW);
+
+ // If we are targeting a remote allocated address, it depends what kind of
+ // allocation the address belongs to.
+ //
+ // If the allocation is fine-grained (in host memory, or in PCIe peer
+ // device memory), the operation will fail depending on the target.
+ //
+ // Note fine-grained host memory access does work on APUs or if XGMI is
+ // used, but we do not know if we are targeting an APU or the system
+ // configuration from the ISA version/target-cpu.
+ if (RMW->hasMetadata("amdgpu.no.fine.grained.memory"))
+ return atomicSupportedIfLegalIntType(RMW);
+
+ if (Op == AtomicRMWInst::Sub || Op == AtomicRMWInst::Or ||
+ Op == AtomicRMWInst::Xor) {
+ // Atomic sub/or/xor do not work over PCI express, but atomic add
+ // does. InstCombine transforms these with 0 to or, so undo that.
+ if (Constant *ConstVal = dyn_cast<Constant>(RMW->getValOperand());
+ ConstVal && ConstVal->isNullValue())
+ return AtomicExpansionKind::Expand;
}
+
+ // If the allocation could be in remote, fine-grained memory, the rmw
+ // instructions may fail. cmpxchg should work, so emit that. On some
+ // system configurations, PCIe atomics aren't supported so cmpxchg won't
+ // even work, so you're out of luck anyway.
+
+ // In summary:
+ //
+ // Cases that may fail:
+ // - fine-grained pinned host memory
+ // - fine-grained migratable host memory
+ // - fine-grained PCIe peer device
+ //
+ // Cases that should work, but may be treated overly conservatively.
+ // - fine-grained host memory on an APU
+ // - fine-grained XGMI peer device
+ return AtomicExpansionKind::CmpXChg;
}
return atomicSupportedIfLegalIntType(RMW);
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_udec_wrap.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_udec_wrap.ll
index 70968e23e4104..195ca4f2f2083 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_udec_wrap.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_udec_wrap.ll
@@ -91,7 +91,7 @@ define amdgpu_kernel void @lds_atomic_dec_ret_i32(ptr addrspace(1) %out, ptr add
; GFX11-NEXT: v_mov_b32_e32 v1, 0
; GFX11-NEXT: global_store_b32 v1, v0, s[0:1]
; GFX11-NEXT: s_endpgm
- %result = atomicrmw udec_wrap ptr addrspace(3) %ptr, i32 42 syncscope("agent") seq_cst, align 4
+ %result = atomicrmw udec_wrap ptr addrspace(3) %ptr, i32 42 syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory !1
store i32 %result, ptr addrspace(1) %out, align 4
ret void
}
@@ -175,7 +175,7 @@ define amdgpu_kernel void @lds_atomic_dec_ret_i32_offset(ptr addrspace(1) %out,
; GFX11-NEXT: global_store_b32 v1, v0, s[0:1]
; GFX11-NEXT: s_endpgm
%gep = getelementptr i32, ptr addrspace(3) %ptr, i32 4
- %result = atomicrmw udec_wrap ptr addrspace(3) %gep, i32 42 syncscope("agent") seq_cst, align 4
+ %result = atomicrmw udec_wrap ptr addrspace(3) %gep, i32 42 syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory !1
store i32 %result, ptr addrspace(1) %out, align 4
ret void
}
@@ -233,7 +233,7 @@ define amdgpu_kernel void @lds_atomic_dec_noret_i32(ptr addrspace(3) %ptr) #1 {
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
- %result = atomicrmw udec_wrap ptr addrspace(3) %ptr, i32 42 syncscope("agent") seq_cst, align 4
+ %result = atomicrmw udec_wrap ptr addrspace(3) %ptr, i32 42 syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory !1
ret void
}
@@ -291,7 +291,7 @@ define amdgpu_kernel void @lds_atomic_dec_noret_i32_offset(ptr addrspace(3) %ptr
; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%gep = getelementptr i32, ptr addrspace(3) %ptr, i32 4
- %result = atomicrmw udec_wrap ptr addrspace(3) %gep, i32 42 syncscope("agent") seq_cst, align 4
+ %result = atomicrmw udec_wrap ptr addrspace(3) %gep, i32 42 syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory !1
ret void
}
@@ -368,7 +368,7 @@ define amdgpu_kernel void @global_atomic_dec_ret_i32(ptr addrspace(1) %out, ptr
; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: global_store_b32 v1, v0, s[0:1]
; GFX11-NEXT: s_endpgm
- %result = atomicrmw udec_wrap ptr addrspace(1) %ptr, i32 42 syncscope("agent") seq_cst, align 4
+ %result = atomicrmw udec_wrap ptr addrspace(1) %ptr, i32 42 syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory !1
store i32 %result, ptr addrspace(1) %out, align 4
ret void
}
@@ -451,7 +451,7 @@ define amdgpu_kernel void @global_atomic_dec_ret_i32_offset(ptr addrspace(1) %ou
; GFX11-NEXT: global_store_b32 v1, v0, s[0:1]
; GFX11-NEXT: s_endpgm
%gep = getelementptr i32, ptr addrspace(1) %ptr, i32 4
- %result = atomicrmw udec_wrap ptr addrspace(1) %gep, i32 42 syncscope("agent") seq_cst, align 4
+ %result = atomicrmw udec_wrap ptr addrspace(1) %gep, i32 42 syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory !1
store i32 %result, ptr addrspace(1) %out, align 4
ret void
}
@@ -462,36 +462,19 @@ define amdgpu_kernel void @global_atomic_dec_ret_i32_offset_system(ptr addrspace
; CI-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0
; CI-NEXT: s_add_i32 s12, s12, s17
; CI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
-; CI-NEXT: v_not_b32_e32 v2, 41
; CI-NEXT: s_mov_b32 flat_scratch_lo, s13
+; CI-NEXT: v_mov_b32_e32 v2, 42
; CI-NEXT: s_waitcnt lgkmcnt(0)
-; CI-NEXT: s_load_dword s6, s[2:3], 0x4
-; CI-NEXT: s_add_u32 s4, s2, 16
-; CI-NEXT: s_addc_u32 s5, s3, 0
-; CI-NEXT: v_mov_b32_e32 v0, s4
-; CI-NEXT: s_mov_b64 s[2:3], 0
-; CI-NEXT: v_mov_b32_e32 v1, s5
-; CI-NEXT: s_waitcnt lgkmcnt(0)
-; CI-NEXT: v_mov_b32_e32 v3, s6
-; CI-NEXT: .LBB6_1: ; %atomicrmw.start
-; CI-NEXT: ; =>This Inner Loop Header: Depth=1
-; CI-NEXT: v_mov_b32_e32 v4, v3
-; CI-NEXT: v_add_i32_e32 v3, vcc, -1, v4
-; CI-NEXT: v_add_i32_e32 v5, vcc, 0xffffffd5, v4
-; CI-NEXT: v_cmp_lt_u32_e32 vcc, v5, v2
-; CI-NEXT: v_cndmask_b32_e64 v3, v3, 42, vcc
-; CI-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; CI-NEXT: s_add_u32 s2, s2, 16
+; CI-NEXT: s_addc_u32 s3, s3, 0
+; CI-NEXT: v_mov_b32_e32 v0, s2
+; CI-NEXT: v_mov_b32_e32 v1, s3
+; CI-NEXT: flat_atomic_dec v2, v[0:1], v2 glc
; CI-NEXT: s_waitcnt vmcnt(0)
; CI-NEXT: buffer_wbinvl1_vol
-; CI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; CI-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
-; CI-NEXT: s_andn2_b64 exec, exec, s[2:3]
-; CI-NEXT: s_cbranch_execnz .LBB6_1
-; CI-NEXT: ; %bb.2: ; %atomicrmw.end
-; CI-NEXT: s_or_b64 exec, exec, s[2:3]
; CI-NEXT: v_mov_b32_e32 v0, s0
; CI-NEXT: v_mov_b32_e32 v1, s1
-; CI-NEXT: flat_store_dword v[0:1], v3
+; CI-NEXT: flat_store_dword v[0:1], v2
; CI-NEXT: s_endpgm
;
; VI-LABEL: global_atomic_dec_ret_i32_offset_system:
@@ -499,131 +482,59 @@ define amdgpu_kernel void @global_atomic_dec_ret_i32_offset_system(ptr addrspace
; VI-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0
; VI-NEXT: s_add_i32 s12, s12, s17
; VI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
-; VI-NEXT: v_not_b32_e32 v2, 41
; VI-NEXT: s_mov_b32 flat_scratch_lo, s13
+; VI-NEXT: v_mov_b32_e32 v2, 42
; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: s_load_dword s6, s[2:3], 0x10
-; VI-NEXT: s_add_u32 s4, s2, 16
-; VI-NEXT: s_addc_u32 s5, s3, 0
-; VI-NEXT: v_mov_b32_e32 v0, s4
-; VI-NEXT: s_mov_b64 s[2:3], 0
-; VI-NEXT: v_mov_b32_e32 v1, s5
-; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v3, s6
-; VI-NEXT: .LBB6_1: ; %atomicrmw.start
-; VI-NEXT: ; =>This Inner Loop Header: Depth=1
-; VI-NEXT: v_mov_b32_e32 v4, v3
-; VI-NEXT: v_add_u32_e32 v3, vcc, -1, v4
-; VI-NEXT: v_add_u32_e32 v5, vcc, 0xffffffd5, v4
-; VI-NEXT: v_cmp_lt_u32_e32 vcc, v5, v2
-; VI-NEXT: v_cndmask_b32_e64 v3, v3, 42, vcc
-; VI-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; VI-NEXT: s_add_u32 s2, s2, 16
+; VI-NEXT: s_addc_u32 s3, s3, 0
+; VI-NEXT: v_mov_b32_e32 v0, s2
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: flat_atomic_dec v2, v[0:1], v2 glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
-; VI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; VI-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
-; VI-NEXT: s_andn2_b64 exec, exec, s[2:3]
-; VI-NEXT: s_cbranch_execnz .LBB6_1
-; VI-NEXT: ; %bb.2: ; %atomicrmw.end
-; VI-NEXT: s_or_b64 exec, exec, s[2:3]
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
-; VI-NEXT: flat_store_dword v[0:1], v3
+; VI-NEXT: flat_store_dword v[0:1], v2
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: global_atomic_dec_ret_i32_offset_system:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: v_not_b32_e32 v0, 41
+; GFX9-NEXT: v_mov_b32_e32 v0, 42
; GFX9-NEXT: v_mov_b32_e32 v1, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_load_dword s6, s[2:3], 0x10
-; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v2, s6
-; GFX9-NEXT: .LBB6_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: v_mov_b32_e32 v3, v2
-; GFX9-NEXT: v_add_u32_e32 v4, 0xffffffd5, v3
-; GFX9-NEXT: v_add_u32_e32 v2, -1, v3
-; GFX9-NEXT: v_cmp_lt_u32_e32 vcc, v4, v0
-; GFX9-NEXT: v_cndmask_b32_e64 v2, v2, 42, vcc
-; GFX9-NEXT: global_atomic_cmpswap v2, v1, v[2:3], s[2:3] offset:16 glc
+; GFX9-NEXT: global_atomic_dec v0, v1, v0, s[2:3] offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB6_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v0, 0
-; GFX9-NEXT: global_store_dword v0, v2, s[0:1]
+; GFX9-NEXT: global_store_dword v1, v0, s[0:1]
; GFX9-NEXT: s_endpgm
;
; GFX10-LABEL: global_atomic_dec_ret_i32_offset_system:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0
-; GFX10-NEXT: v_mov_b32_e32 v0, 0
-; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: s_load_dword s4, s[2:3], 0x10
+; GFX10-NEXT: v_mov_b32_e32 v0, 42
+; GFX10-NEXT: v_mov_b32_e32 v1, 0
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_mov_b32_e32 v1, s4
-; GFX10-NEXT: s_mov_b32 s4, 0
-; GFX10-NEXT: .LBB6_1: ; %atomicrmw.start
-; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX10-NEXT: v_mov_b32_e32 v2, v1
-; GFX10-NEXT: v_add_nc_u32_e32 v1, 0xffffffd5, v2
-; GFX10-NEXT: v_add_nc_u32_e32 v3, -1, v2
-; GFX10-NEXT: v_cmp_gt_u32_e32 vcc_lo, 0xffffffd6, v1
-; GFX10-NEXT: v_cndmask_b32_e64 v1, v3, 42, vcc_lo
-; GFX10-NEXT: global_atomic_cmpswap v1, v0, v[1:2], s[2:3] offset:16 glc
+; GFX10-NEXT: global_atomic_dec v0, v1, v0, s[2:3] offset:16 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
-; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
-; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
-; GFX10-NEXT: s_cbranch_execnz .LBB6_1
-; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s4
-; GFX10-NEXT: v_mov_b32_e32 v0, 0
-; GFX10-NEXT: global_store_dword v0, v1, s[0:1]
+; GFX10-NEXT: global_store_dword v1, v0, s[0:1]
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_atomic_dec_ret_i32_offset_system:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
+; GFX11-NEXT: v_dual_mov_b32 v0, 42 :: v_dual_mov_b32 v1, 0
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: s_load_b32 s4, s[2:3], 0x10
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s4
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: .LBB6_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_mov_b32_e32 v2, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 0xffffffd5, v2
-; GFX11-NEXT: v_add_nc_u32_e32 v3, -1, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cmp_gt_u32_e32 vcc_lo, 0xffffffd6, v1
-; GFX11-NEXT: v_cndmask_b32_e64 v1, v3, 42, vcc_lo
-; GFX11-NEXT: global_atomic_cmpswap_b32 v1, v0, v[1:2], s[2:3] offset:16 glc
+; GFX11-NEXT: global_atomic_dec_u32 v0, v1, v0, s[2:3] offset:16 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
-; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_execnz .LBB6_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s4
-; GFX11-NEXT: v_mov_b32_e32 v0, 0
-; GFX11-NEXT: global_store_b32 v0, v1, s[0:1]
+; GFX11-NEXT: global_store_b32 v1, v0, s[0:1]
; GFX11-NEXT: s_endpgm
%gep = getelementptr i32, ptr addrspace(1) %ptr, i32 4
- %result = atomicrmw udec_wrap ptr addrspace(1) %gep, i32 42 seq_cst, align 4
+ %result = atomicrmw udec_wrap ptr addrspace(1) %gep, i32 42 seq_cst, align 4, !amdgpu.no.remote.memory !1
store i32 %result, ptr addrspace(1) %out, align 4
ret void
}
@@ -692,7 +603,7 @@ define amdgpu_kernel void @global_atomic_dec_noret_i32(ptr addrspace(1) %ptr) #1
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
- %result = atomicrmw udec_wrap ptr addrspace(1) %ptr, i32 42 syncscope("agent") seq_cst, align 4
+ %result = atomicrmw udec_wrap ptr addrspace(1) %ptr, i32 42 syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory !1
ret void
}
@@ -765,7 +676,7 @@ define amdgpu_kernel void @global_atomic_dec_noret_i32_offset(ptr addrspace(1) %
; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%gep = getelementptr i32, ptr addrspace(1) %ptr, i32 4
- %result = atomicrmw udec_wrap ptr addrspace(1) %gep, i32 42 syncscope("agent") seq_cst, align 4
+ %result = atomicrmw udec_wrap ptr addrspace(1) %gep, i32 42 syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory !1
ret void
}
@@ -775,32 +686,16 @@ define amdgpu_kernel void @global_atomic_dec_noret_i32_offset_system(ptr addrspa
; CI-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
; CI-NEXT: s_add_i32 s12, s12, s17
; CI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
-; CI-NEXT: v_not_b32_e32 v4, 41
; CI-NEXT: s_mov_b32 flat_scratch_lo, s13
+; CI-NEXT: v_mov_b32_e32 v2, 42
; CI-NEXT: s_waitcnt lgkmcnt(0)
-; CI-NEXT: s_load_dword s4, s[0:1], 0x4
-; CI-NEXT: s_add_u32 s2, s0, 16
-; CI-NEXT: s_addc_u32 s3, s1, 0
-; CI-NEXT: v_mov_b32_e32 v0, s2
-; CI-NEXT: s_mov_b64 s[0:1], 0
-; CI-NEXT: v_mov_b32_e32 v1, s3
-; CI-NEXT: s_waitcnt lgkmcnt(0)
-; CI-NEXT: v_mov_b32_e32 v3, s4
-; CI-NEXT: .LBB9_1: ; %atomicrmw.start
-; CI-NEXT: ; =>This Inner Loop Header: Depth=1
-; CI-NEXT: v_add_i32_e32 v2, vcc, -1, v3
-; CI-NEXT: v_add_i32_e32 v5, vcc, 0xffffffd5, v3
-; CI-NEXT: v_cmp_lt_u32_e32 vcc, v5, v4
-; CI-NEXT: v_cndmask_b32_e64 v2, v2, 42, vcc
-; CI-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
+; CI-NEXT: s_add_u32 s0, s0, 16
+; CI-NEXT: s_addc_u32 s1, s1, 0
+; CI-NEXT: v_mov_b32_e32 v0, s0
+; CI-NEXT: v_mov_b32_e32 v1, s1
+; CI-NEXT: flat_atomic_dec v[0:1], v2
; CI-NEXT: s_waitcnt vmcnt(0)
; CI-NEXT: buffer_wbinvl1_vol
-; CI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
-; CI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; CI-NEXT: v_mov_b32_e32 v3, v2
-; CI-NEXT: s_andn2_b64 exec, exec, s[0:1]
-; CI-NEXT: s_cbranch_execnz .LBB9_1
-; CI-NEXT: ; %bb.2: ; %atomicrmw.end
; CI-NEXT: s_endpgm
;
; VI-LABEL: global_atomic_dec_noret_i32_offset_system:
@@ -808,118 +703,53 @@ define amdgpu_kernel void @global_atomic_dec_noret_i32_offset_system(ptr addrspa
; VI-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
; VI-NEXT: s_add_i32 s12, s12, s17
; VI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
-; VI-NEXT: v_not_b32_e32 v4, 41
; VI-NEXT: s_mov_b32 flat_scratch_lo, s13
+; VI-NEXT: v_mov_b32_e32 v2, 42
; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: s_load_dword s4, s[0:1], 0x10
-; VI-NEXT: s_add_u32 s2, s0, 16
-; VI-NEXT: s_addc_u32 s3, s1, 0
-; VI-NEXT: v_mov_b32_e32 v0, s2
-; VI-NEXT: s_mov_b64 s[0:1], 0
-; VI-NEXT: v_mov_b32_e32 v1, s3
-; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v3, s4
-; VI-NEXT: .LBB9_1: ; %atomicrmw.start
-; VI-NEXT: ; =>This Inner Loop Header: Depth=1
-; VI-NEXT: v_add_u32_e32 v2, vcc, -1, v3
-; VI-NEXT: v_add_u32_e32 v5, vcc, 0xffffffd5, v3
-; VI-NEXT: v_cmp_lt_u32_e32 vcc, v5, v4
-; VI-NEXT: v_cndmask_b32_e64 v2, v2, 42, vcc
-; VI-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
+; VI-NEXT: s_add_u32 s0, s0, 16
+; VI-NEXT: s_addc_u32 s1, s1, 0
+; VI-NEXT: v_mov_b32_e32 v0, s0
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: flat_atomic_dec v[0:1], v2
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
-; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
-; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; VI-NEXT: v_mov_b32_e32 v3, v2
-; VI-NEXT: s_andn2_b64 exec, exec, s[0:1]
-; VI-NEXT: s_cbranch_execnz .LBB9_1
-; VI-NEXT: ; %bb.2: ; %atomicrmw.end
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: global_atomic_dec_noret_i32_offset_system:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
-; GFX9-NEXT: s_mov_b64 s[2:3], 0
-; GFX9-NEXT: v_not_b32_e32 v2, 41
-; GFX9-NEXT: v_mov_b32_e32 v3, 0
-; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_load_dword s4, s[0:1], 0x10
+; GFX9-NEXT: v_mov_b32_e32 v0, 42
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v1, s4
-; GFX9-NEXT: .LBB9_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: v_add_u32_e32 v4, 0xffffffd5, v1
-; GFX9-NEXT: v_add_u32_e32 v0, -1, v1
-; GFX9-NEXT: v_cmp_lt_u32_e32 vcc, v4, v2
-; GFX9-NEXT: v_cndmask_b32_e64 v0, v0, 42, vcc
-; GFX9-NEXT: global_atomic_cmpswap v0, v3, v[0:1], s[0:1] offset:16 glc
+; GFX9-NEXT: global_atomic_dec v1, v0, s[0:1] offset:16
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
-; GFX9-NEXT: v_mov_b32_e32 v1, v0
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
-; GFX9-NEXT: s_cbranch_execnz .LBB9_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX9-NEXT: s_endpgm
;
; GFX10-LABEL: global_atomic_dec_noret_i32_offset_system:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
-; GFX10-NEXT: v_mov_b32_e32 v2, 0
-; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: s_load_dword s2, s[0:1], 0x10
+; GFX10-NEXT: v_mov_b32_e32 v0, 42
+; GFX10-NEXT: v_mov_b32_e32 v1, 0
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_mov_b32_e32 v1, s2
-; GFX10-NEXT: s_mov_b32 s2, 0
-; GFX10-NEXT: .LBB9_1: ; %atomicrmw.start
-; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX10-NEXT: v_add_nc_u32_e32 v0, 0xffffffd5, v1
-; GFX10-NEXT: v_add_nc_u32_e32 v3, -1, v1
-; GFX10-NEXT: v_cmp_gt_u32_e32 vcc_lo, 0xffffffd6, v0
-; GFX10-NEXT: v_cndmask_b32_e64 v0, v3, 42, vcc_lo
-; GFX10-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
-; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: global_atomic_dec v1, v0, s[0:1] offset:16
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v1
-; GFX10-NEXT: v_mov_b32_e32 v1, v0
-; GFX10-NEXT: s_or_b32 s2, vcc_lo, s2
-; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
-; GFX10-NEXT: s_cbranch_execnz .LBB9_1
-; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_atomic_dec_noret_i32_offset_system:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GFX11-NEXT: v_dual_mov_b32 v0, 42 :: v_dual_mov_b32 v1, 0
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: s_load_b32 s2, s[0:1], 0x10
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s2
-; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: .LBB9_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 0xffffffd5, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v3, -1, v1
-; GFX11-NEXT: v_cmp_gt_u32_e32 vcc_lo, 0xffffffd6, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e64 v0, v3, 42, vcc_lo
-; GFX11-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: global_atomic_dec_u32 v1, v0, s[0:1] offset:16
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v0
-; GFX11-NEXT: s_or_b32 s2, vcc_lo, s2
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_execnz .LBB9_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX11-NEXT: s_endpgm
%gep = getelementptr i32, ptr addrspace(1) %ptr, i32 4
- %result = atomicrmw udec_wrap ptr addrspace(1) %gep, i32 42 seq_cst, align 4
+ %result = atomicrmw udec_wrap ptr addrspace(1) %gep, i32 42 seq_cst, align 4, !amdgpu.no.remote.memory !1
ret void
}
@@ -1016,7 +846,7 @@ define amdgpu_kernel void @global_atomic_dec_ret_i32_offset_addr64(ptr addrspace
%gep.tid = getelementptr i32, ptr addrspace(1) %ptr, i32 %id
%out.gep = getelementptr i32, ptr addrspace(1) %out, i32 %id
%gep = getelementptr i32, ptr addrspace(1) %gep.tid, i32 5
- %result = atomicrmw udec_wrap ptr addrspace(1) %gep, i32 42 syncscope("agent") seq_cst, align 4
+ %result = atomicrmw udec_wrap ptr addrspace(1) %gep, i32 42 syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory !1
store i32 %result, ptr addrspace(1) %out.gep, align 4
ret void
}
@@ -1100,7 +930,7 @@ define amdgpu_kernel void @global_atomic_dec_noret_i32_offset_addr64(ptr addrspa
%id = call i32 @llvm.amdgcn.workitem.id.x()
%gep.tid = getelementptr i32, ptr addrspace(1) %ptr, i32 %id
%gep = getelementptr i32, ptr addrspace(1) %gep.tid, i32 5
- %result = atomicrmw udec_wrap ptr addrspace(1) %gep, i32 42 syncscope("agent") seq_cst, align 4
+ %result = atomicrmw udec_wrap ptr addrspace(1) %gep, i32 42 syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory !1
ret void
}
@@ -1191,7 +1021,7 @@ define amdgpu_kernel void @flat_atomic_dec_ret_i32(ptr %out, ptr %ptr) #1 {
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: flat_store_b32 v[0:1], v2
; GFX11-NEXT: s_endpgm
- %result = atomicrmw udec_wrap ptr %ptr, i32 42 syncscope("agent") seq_cst, align 4
+ %result = atomicrmw udec_wrap ptr %ptr, i32 42 syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory !1
store i32 %result, ptr %out, align 4
ret void
}
@@ -1290,7 +1120,7 @@ define amdgpu_kernel void @flat_atomic_dec_ret_i32_offset(ptr %out, ptr %ptr) #1
; GFX11-NEXT: flat_store_b32 v[0:1], v2
; GFX11-NEXT: s_endpgm
%gep = getelementptr i32, ptr %ptr, i32 4
- %result = atomicrmw udec_wrap ptr %gep, i32 42 syncscope("agent") seq_cst, align 4
+ %result = atomicrmw udec_wrap ptr %gep, i32 42 syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory !1
store i32 %result, ptr %out, align 4
ret void
}
@@ -1302,34 +1132,18 @@ define amdgpu_kernel void @flat_atomic_dec_ret_i32_offset_system(ptr %out, ptr %
; CI-NEXT: s_add_i32 s12, s12, s17
; CI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
; CI-NEXT: s_mov_b32 flat_scratch_lo, s13
-; CI-NEXT: v_not_b32_e32 v2, 41
+; CI-NEXT: v_mov_b32_e32 v2, 42
; CI-NEXT: s_waitcnt lgkmcnt(0)
; CI-NEXT: s_add_u32 s2, s2, 16
; CI-NEXT: s_addc_u32 s3, s3, 0
; CI-NEXT: v_mov_b32_e32 v0, s2
; CI-NEXT: v_mov_b32_e32 v1, s3
-; CI-NEXT: flat_load_dword v3, v[0:1]
-; CI-NEXT: s_mov_b64 s[2:3], 0
-; CI-NEXT: .LBB14_1: ; %atomicrmw.start
-; CI-NEXT: ; =>This Inner Loop Header: Depth=1
-; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CI-NEXT: v_mov_b32_e32 v4, v3
-; CI-NEXT: v_add_i32_e32 v3, vcc, -1, v4
-; CI-NEXT: v_add_i32_e32 v5, vcc, 0xffffffd5, v4
-; CI-NEXT: v_cmp_lt_u32_e32 vcc, v5, v2
-; CI-NEXT: v_cndmask_b32_e64 v3, v3, 42, vcc
-; CI-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; CI-NEXT: flat_atomic_dec v2, v[0:1], v2 glc
; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; CI-NEXT: buffer_wbinvl1_vol
-; CI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; CI-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
-; CI-NEXT: s_andn2_b64 exec, exec, s[2:3]
-; CI-NEXT: s_cbranch_execnz .LBB14_1
-; CI-NEXT: ; %bb.2: ; %atomicrmw.end
-; CI-NEXT: s_or_b64 exec, exec, s[2:3]
; CI-NEXT: v_mov_b32_e32 v0, s0
; CI-NEXT: v_mov_b32_e32 v1, s1
-; CI-NEXT: flat_store_dword v[0:1], v3
+; CI-NEXT: flat_store_dword v[0:1], v2
; CI-NEXT: s_endpgm
;
; VI-LABEL: flat_atomic_dec_ret_i32_offset_system:
@@ -1338,34 +1152,18 @@ define amdgpu_kernel void @flat_atomic_dec_ret_i32_offset_system(ptr %out, ptr %
; VI-NEXT: s_add_i32 s12, s12, s17
; VI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
; VI-NEXT: s_mov_b32 flat_scratch_lo, s13
-; VI-NEXT: v_not_b32_e32 v2, 41
+; VI-NEXT: v_mov_b32_e32 v2, 42
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_add_u32 s2, s2, 16
; VI-NEXT: s_addc_u32 s3, s3, 0
; VI-NEXT: v_mov_b32_e32 v0, s2
; VI-NEXT: v_mov_b32_e32 v1, s3
-; VI-NEXT: flat_load_dword v3, v[0:1]
-; VI-NEXT: s_mov_b64 s[2:3], 0
-; VI-NEXT: .LBB14_1: ; %atomicrmw.start
-; VI-NEXT: ; =>This Inner Loop Header: Depth=1
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v4, v3
-; VI-NEXT: v_add_u32_e32 v3, vcc, -1, v4
-; VI-NEXT: v_add_u32_e32 v5, vcc, 0xffffffd5, v4
-; VI-NEXT: v_cmp_lt_u32_e32 vcc, v5, v2
-; VI-NEXT: v_cndmask_b32_e64 v3, v3, 42, vcc
-; VI-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; VI-NEXT: flat_atomic_dec v2, v[0:1], v2 glc
; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
-; VI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; VI-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
-; VI-NEXT: s_andn2_b64 exec, exec, s[2:3]
-; VI-NEXT: s_cbranch_execnz .LBB14_1
-; VI-NEXT: ; %bb.2: ; %atomicrmw.end
-; VI-NEXT: s_or_b64 exec, exec, s[2:3]
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
-; VI-NEXT: flat_store_dword v[0:1], v3
+; VI-NEXT: flat_store_dword v[0:1], v2
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: flat_atomic_dec_ret_i32_offset_system:
@@ -1373,32 +1171,16 @@ define amdgpu_kernel void @flat_atomic_dec_ret_i32_offset_system(ptr %out, ptr %
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0
; GFX9-NEXT: s_add_u32 flat_scratch_lo, s12, s17
; GFX9-NEXT: s_addc_u32 flat_scratch_hi, s13, 0
-; GFX9-NEXT: v_not_b32_e32 v2, 41
+; GFX9-NEXT: v_mov_b32_e32 v2, 42
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v0, s2
; GFX9-NEXT: v_mov_b32_e32 v1, s3
-; GFX9-NEXT: flat_load_dword v3, v[0:1] offset:16
-; GFX9-NEXT: s_mov_b64 s[2:3], 0
-; GFX9-NEXT: .LBB14_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v4, v3
-; GFX9-NEXT: v_add_u32_e32 v5, 0xffffffd5, v4
-; GFX9-NEXT: v_add_u32_e32 v3, -1, v4
-; GFX9-NEXT: v_cmp_lt_u32_e32 vcc, v5, v2
-; GFX9-NEXT: v_cndmask_b32_e64 v3, v3, 42, vcc
-; GFX9-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] offset:16 glc
+; GFX9-NEXT: flat_atomic_dec v2, v[0:1], v2 offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
-; GFX9-NEXT: s_cbranch_execnz .LBB14_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX9-NEXT: v_mov_b32_e32 v0, s0
; GFX9-NEXT: v_mov_b32_e32 v1, s1
-; GFX9-NEXT: flat_store_dword v[0:1], v3
+; GFX9-NEXT: flat_store_dword v[0:1], v2
; GFX9-NEXT: s_endpgm
;
; GFX10-LABEL: flat_atomic_dec_ret_i32_offset_system:
@@ -1408,31 +1190,16 @@ define amdgpu_kernel void @flat_atomic_dec_ret_i32_offset_system(ptr %out, ptr %
; GFX10-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_LO), s12
; GFX10-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_HI), s13
; GFX10-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0
+; GFX10-NEXT: v_mov_b32_e32 v2, 42
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_add_u32 s2, s2, 16
; GFX10-NEXT: s_addc_u32 s3, s3, 0
; GFX10-NEXT: v_mov_b32_e32 v0, s2
; GFX10-NEXT: v_mov_b32_e32 v1, s3
-; GFX10-NEXT: s_mov_b32 s2, 0
-; GFX10-NEXT: flat_load_dword v2, v[0:1]
-; GFX10-NEXT: .LBB14_1: ; %atomicrmw.start
-; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_mov_b32_e32 v3, v2
-; GFX10-NEXT: v_add_nc_u32_e32 v2, 0xffffffd5, v3
-; GFX10-NEXT: v_add_nc_u32_e32 v4, -1, v3
-; GFX10-NEXT: v_cmp_gt_u32_e32 vcc_lo, 0xffffffd6, v2
-; GFX10-NEXT: v_cndmask_b32_e64 v2, v4, 42, vcc_lo
-; GFX10-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
+; GFX10-NEXT: flat_atomic_dec v2, v[0:1], v2 glc
; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX10-NEXT: s_or_b32 s2, vcc_lo, s2
-; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
-; GFX10-NEXT: s_cbranch_execnz .LBB14_1
-; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s2
; GFX10-NEXT: v_mov_b32_e32 v0, s0
; GFX10-NEXT: v_mov_b32_e32 v1, s1
; GFX10-NEXT: flat_store_dword v[0:1], v2
@@ -1441,36 +1208,18 @@ define amdgpu_kernel void @flat_atomic_dec_ret_i32_offset_system(ptr %out, ptr %
; GFX11-LABEL: flat_atomic_dec_ret_i32_offset_system:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
+; GFX11-NEXT: v_mov_b32_e32 v2, 42
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
-; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: flat_load_b32 v2, v[0:1] offset:16
-; GFX11-NEXT: .LBB14_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0xffffffd5, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v4, -1, v3
-; GFX11-NEXT: v_cmp_gt_u32_e32 vcc_lo, 0xffffffd6, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e64 v2, v4, 42, vcc_lo
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
+; GFX11-NEXT: flat_atomic_dec_u32 v2, v[0:1], v2 offset:16 glc
; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: s_or_b32 s2, vcc_lo, s2
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_execnz .LBB14_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s2
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: flat_store_b32 v[0:1], v2
; GFX11-NEXT: s_endpgm
%gep = getelementptr i32, ptr %ptr, i32 4
- %result = atomicrmw udec_wrap ptr %gep, i32 42 seq_cst, align 4
+ %result = atomicrmw udec_wrap ptr %gep, i32 42 seq_cst, align 4, !amdgpu.no.remote.memory !1
store i32 %result, ptr %out, align 4
ret void
}
@@ -1550,7 +1299,7 @@ define amdgpu_kernel void @flat_atomic_dec_noret_i32(ptr %ptr) #1 {
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
- %result = atomicrmw udec_wrap ptr %ptr, i32 42 syncscope("agent") seq_cst, align 4
+ %result = atomicrmw udec_wrap ptr %ptr, i32 42 syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory !1
ret void
}
@@ -1636,7 +1385,7 @@ define amdgpu_kernel void @flat_atomic_dec_noret_i32_offset(ptr %ptr) #1 {
; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%gep = getelementptr i32, ptr %ptr, i32 4
- %result = atomicrmw udec_wrap ptr %gep, i32 42 syncscope("agent") seq_cst, align 4
+ %result = atomicrmw udec_wrap ptr %gep, i32 42 syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory !1
ret void
}
@@ -1647,30 +1396,15 @@ define amdgpu_kernel void @flat_atomic_dec_noret_i32_offset_system(ptr %ptr) #1
; CI-NEXT: s_add_i32 s12, s12, s17
; CI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
; CI-NEXT: s_mov_b32 flat_scratch_lo, s13
-; CI-NEXT: v_not_b32_e32 v4, 41
+; CI-NEXT: v_mov_b32_e32 v2, 42
; CI-NEXT: s_waitcnt lgkmcnt(0)
; CI-NEXT: s_add_u32 s0, s0, 16
; CI-NEXT: s_addc_u32 s1, s1, 0
; CI-NEXT: v_mov_b32_e32 v0, s0
; CI-NEXT: v_mov_b32_e32 v1, s1
-; CI-NEXT: flat_load_dword v3, v[0:1]
-; CI-NEXT: s_mov_b64 s[0:1], 0
-; CI-NEXT: .LBB17_1: ; %atomicrmw.start
-; CI-NEXT: ; =>This Inner Loop Header: Depth=1
-; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CI-NEXT: v_add_i32_e32 v2, vcc, -1, v3
-; CI-NEXT: v_add_i32_e32 v5, vcc, 0xffffffd5, v3
-; CI-NEXT: v_cmp_lt_u32_e32 vcc, v5, v4
-; CI-NEXT: v_cndmask_b32_e64 v2, v2, 42, vcc
-; CI-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
+; CI-NEXT: flat_atomic_dec v[0:1], v2
; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; CI-NEXT: buffer_wbinvl1_vol
-; CI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
-; CI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; CI-NEXT: v_mov_b32_e32 v3, v2
-; CI-NEXT: s_andn2_b64 exec, exec, s[0:1]
-; CI-NEXT: s_cbranch_execnz .LBB17_1
-; CI-NEXT: ; %bb.2: ; %atomicrmw.end
; CI-NEXT: s_endpgm
;
; VI-LABEL: flat_atomic_dec_noret_i32_offset_system:
@@ -1679,30 +1413,15 @@ define amdgpu_kernel void @flat_atomic_dec_noret_i32_offset_system(ptr %ptr) #1
; VI-NEXT: s_add_i32 s12, s12, s17
; VI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
; VI-NEXT: s_mov_b32 flat_scratch_lo, s13
-; VI-NEXT: v_not_b32_e32 v4, 41
+; VI-NEXT: v_mov_b32_e32 v2, 42
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_add_u32 s0, s0, 16
; VI-NEXT: s_addc_u32 s1, s1, 0
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
-; VI-NEXT: flat_load_dword v3, v[0:1]
-; VI-NEXT: s_mov_b64 s[0:1], 0
-; VI-NEXT: .LBB17_1: ; %atomicrmw.start
-; VI-NEXT: ; =>This Inner Loop Header: Depth=1
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT: v_add_u32_e32 v2, vcc, -1, v3
-; VI-NEXT: v_add_u32_e32 v5, vcc, 0xffffffd5, v3
-; VI-NEXT: v_cmp_lt_u32_e32 vcc, v5, v4
-; VI-NEXT: v_cndmask_b32_e64 v2, v2, 42, vcc
-; VI-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
+; VI-NEXT: flat_atomic_dec v[0:1], v2
; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
-; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
-; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; VI-NEXT: v_mov_b32_e32 v3, v2
-; VI-NEXT: s_andn2_b64 exec, exec, s[0:1]
-; VI-NEXT: s_cbranch_execnz .LBB17_1
-; VI-NEXT: ; %bb.2: ; %atomicrmw.end
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: flat_atomic_dec_noret_i32_offset_system:
@@ -1710,28 +1429,13 @@ define amdgpu_kernel void @flat_atomic_dec_noret_i32_offset_system(ptr %ptr) #1
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
; GFX9-NEXT: s_add_u32 flat_scratch_lo, s12, s17
; GFX9-NEXT: s_addc_u32 flat_scratch_hi, s13, 0
-; GFX9-NEXT: v_not_b32_e32 v4, 41
+; GFX9-NEXT: v_mov_b32_e32 v2, 42
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v0, s0
; GFX9-NEXT: v_mov_b32_e32 v1, s1
-; GFX9-NEXT: flat_load_dword v3, v[0:1] offset:16
-; GFX9-NEXT: s_mov_b64 s[0:1], 0
-; GFX9-NEXT: .LBB17_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_add_u32_e32 v5, 0xffffffd5, v3
-; GFX9-NEXT: v_add_u32_e32 v2, -1, v3
-; GFX9-NEXT: v_cmp_lt_u32_e32 vcc, v5, v4
-; GFX9-NEXT: v_cndmask_b32_e64 v2, v2, 42, vcc
-; GFX9-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] offset:16 glc
+; GFX9-NEXT: flat_atomic_dec v[0:1], v2 offset:16
; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
-; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX9-NEXT: v_mov_b32_e32 v3, v2
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
-; GFX9-NEXT: s_cbranch_execnz .LBB17_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX9-NEXT: s_endpgm
;
; GFX10-LABEL: flat_atomic_dec_noret_i32_offset_system:
@@ -1741,61 +1445,33 @@ define amdgpu_kernel void @flat_atomic_dec_noret_i32_offset_system(ptr %ptr) #1
; GFX10-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_LO), s12
; GFX10-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_HI), s13
; GFX10-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
+; GFX10-NEXT: v_mov_b32_e32 v2, 42
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_add_u32 s0, s0, 16
; GFX10-NEXT: s_addc_u32 s1, s1, 0
; GFX10-NEXT: v_mov_b32_e32 v0, s0
; GFX10-NEXT: v_mov_b32_e32 v1, s1
-; GFX10-NEXT: s_mov_b32 s0, 0
-; GFX10-NEXT: flat_load_dword v3, v[0:1]
-; GFX10-NEXT: .LBB17_1: ; %atomicrmw.start
-; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_add_nc_u32_e32 v2, 0xffffffd5, v3
-; GFX10-NEXT: v_add_nc_u32_e32 v4, -1, v3
-; GFX10-NEXT: v_cmp_gt_u32_e32 vcc_lo, 0xffffffd6, v2
-; GFX10-NEXT: v_cndmask_b32_e64 v2, v4, 42, vcc_lo
-; GFX10-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
-; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-NEXT: flat_atomic_dec v[0:1], v2
+; GFX10-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX10-NEXT: v_mov_b32_e32 v3, v2
-; GFX10-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
-; GFX10-NEXT: s_cbranch_execnz .LBB17_1
-; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: flat_atomic_dec_noret_i32_offset_system:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GFX11-NEXT: v_mov_b32_e32 v2, 42
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: flat_load_b32 v3, v[0:1] offset:16
-; GFX11-NEXT: .LBB17_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 0xffffffd5, v3
-; GFX11-NEXT: v_add_nc_u32_e32 v4, -1, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cmp_gt_u32_e32 vcc_lo, 0xffffffd6, v2
-; GFX11-NEXT: v_cndmask_b32_e64 v2, v4, 42, vcc_lo
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: flat_atomic_dec_u32 v[0:1], v2 offset:16
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB17_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX11-NEXT: s_endpgm
%gep = getelementptr i32, ptr %ptr, i32 4
- %result = atomicrmw udec_wrap ptr %gep, i32 42 seq_cst, align 4
+ %result = atomicrmw udec_wrap ptr %gep, i32 42 seq_cst, align 4, !amdgpu.no.remote.memory !1
ret void
}
@@ -1924,7 +1600,7 @@ define amdgpu_kernel void @flat_atomic_dec_ret_i32_offset_addr64(ptr %out, ptr %
%gep.tid = getelementptr i32, ptr %ptr, i32 %id
%out.gep = getelementptr i32, ptr %out, i32 %id
%gep = getelementptr i32, ptr %gep.tid, i32 5
- %result = atomicrmw udec_wrap ptr %gep, i32 42 syncscope("agent") seq_cst, align 4
+ %result = atomicrmw udec_wrap ptr %gep, i32 42 syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory !1
store i32 %result, ptr %out.gep, align 4
ret void
}
@@ -2031,7 +1707,7 @@ define amdgpu_kernel void @flat_atomic_dec_noret_i32_offset_addr64(ptr %ptr) #1
%id = call i32 @llvm.amdgcn.workitem.id.x()
%gep.tid = getelementptr i32, ptr %ptr, i32 %id
%gep = getelementptr i32, ptr %gep.tid, i32 5
- %result = atomicrmw udec_wrap ptr %gep, i32 42 syncscope("agent") seq_cst, align 4
+ %result = atomicrmw udec_wrap ptr %gep, i32 42 syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory !1
ret void
}
@@ -2137,7 +1813,7 @@ define amdgpu_kernel void @flat_atomic_dec_ret_i64(ptr %out, ptr %ptr) #1 {
; GFX11-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
; GFX11-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX11-NEXT: s_endpgm
- %result = atomicrmw udec_wrap ptr %ptr, i64 42 syncscope("agent") seq_cst, align 8, !noalias.addrspace !0
+ %result = atomicrmw udec_wrap ptr %ptr, i64 42 syncscope("agent") seq_cst, align 8, !noalias.addrspace !0, !amdgpu.no.remote.memory !1
store i64 %result, ptr %out, align 4
ret void
}
@@ -2251,7 +1927,7 @@ define amdgpu_kernel void @flat_atomic_dec_ret_i64_offset(ptr %out, ptr %ptr) #1
; GFX11-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX11-NEXT: s_endpgm
%gep = getelementptr i64, ptr %ptr, i32 4
- %result = atomicrmw udec_wrap ptr %gep, i64 42 syncscope("agent") seq_cst, align 8, !noalias.addrspace !0
+ %result = atomicrmw udec_wrap ptr %gep, i64 42 syncscope("agent") seq_cst, align 8, !noalias.addrspace !0, !amdgpu.no.remote.memory !1
store i64 %result, ptr %out, align 4
ret void
}
@@ -2336,7 +2012,7 @@ define amdgpu_kernel void @flat_atomic_dec_noret_i64(ptr %ptr) #1 {
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
- %result = atomicrmw udec_wrap ptr %ptr, i64 42 syncscope("agent") seq_cst, align 8, !noalias.addrspace !0
+ %result = atomicrmw udec_wrap ptr %ptr, i64 42 syncscope("agent") seq_cst, align 8, !noalias.addrspace !0, !amdgpu.no.remote.memory !1
ret void
}
@@ -2427,7 +2103,7 @@ define amdgpu_kernel void @flat_atomic_dec_noret_i64_offset(ptr %ptr) #1 {
; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%gep = getelementptr i64, ptr %ptr, i32 4
- %result = atomicrmw udec_wrap ptr %gep, i64 42 syncscope("agent") seq_cst, align 8, !noalias.addrspace !0
+ %result = atomicrmw udec_wrap ptr %gep, i64 42 syncscope("agent") seq_cst, align 8, !noalias.addrspace !0, !amdgpu.no.remote.memory !1
ret void
}
@@ -2437,41 +2113,17 @@ define amdgpu_kernel void @flat_atomic_dec_noret_i64_offset_system(ptr %ptr) #1
; CI-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
; CI-NEXT: s_add_i32 s12, s12, s17
; CI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
+; CI-NEXT: v_mov_b32_e32 v0, 42
; CI-NEXT: s_mov_b32 flat_scratch_lo, s13
-; CI-NEXT: v_not_b32_e32 v6, 41
; CI-NEXT: s_waitcnt lgkmcnt(0)
-; CI-NEXT: s_add_u32 s2, s0, 32
-; CI-NEXT: s_addc_u32 s3, s1, 0
-; CI-NEXT: v_mov_b32_e32 v5, s3
-; CI-NEXT: s_add_u32 s0, s0, 36
-; CI-NEXT: v_mov_b32_e32 v4, s2
+; CI-NEXT: s_add_u32 s0, s0, 32
; CI-NEXT: s_addc_u32 s1, s1, 0
-; CI-NEXT: v_mov_b32_e32 v0, s0
-; CI-NEXT: v_mov_b32_e32 v1, s1
-; CI-NEXT: flat_load_dword v2, v[4:5]
-; CI-NEXT: flat_load_dword v3, v[0:1]
-; CI-NEXT: v_mov_b32_e32 v7, -1
-; CI-NEXT: s_mov_b64 s[0:1], 0
-; CI-NEXT: .LBB24_1: ; %atomicrmw.start
-; CI-NEXT: ; =>This Inner Loop Header: Depth=1
-; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CI-NEXT: v_add_i32_e32 v8, vcc, -1, v2
-; CI-NEXT: v_addc_u32_e32 v9, vcc, -1, v3, vcc
-; CI-NEXT: v_add_i32_e32 v0, vcc, 0xffffffd5, v2
-; CI-NEXT: v_addc_u32_e32 v1, vcc, -1, v3, vcc
-; CI-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[6:7]
-; CI-NEXT: v_cndmask_b32_e64 v0, v8, 42, vcc
-; CI-NEXT: v_cndmask_b32_e64 v1, v9, 0, vcc
-; CI-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; CI-NEXT: v_mov_b32_e32 v3, s1
+; CI-NEXT: v_mov_b32_e32 v1, 0
+; CI-NEXT: v_mov_b32_e32 v2, s0
+; CI-NEXT: flat_atomic_dec_x2 v[2:3], v[0:1]
; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; CI-NEXT: buffer_wbinvl1_vol
-; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; CI-NEXT: v_mov_b32_e32 v3, v1
-; CI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; CI-NEXT: v_mov_b32_e32 v2, v0
-; CI-NEXT: s_andn2_b64 exec, exec, s[0:1]
-; CI-NEXT: s_cbranch_execnz .LBB24_1
-; CI-NEXT: ; %bb.2: ; %atomicrmw.end
; CI-NEXT: s_endpgm
;
; VI-LABEL: flat_atomic_dec_noret_i64_offset_system:
@@ -2479,75 +2131,32 @@ define amdgpu_kernel void @flat_atomic_dec_noret_i64_offset_system(ptr %ptr) #1
; VI-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
; VI-NEXT: s_add_i32 s12, s12, s17
; VI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
+; VI-NEXT: v_mov_b32_e32 v0, 42
; VI-NEXT: s_mov_b32 flat_scratch_lo, s13
-; VI-NEXT: v_not_b32_e32 v6, 41
; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: s_add_u32 s2, s0, 32
-; VI-NEXT: s_addc_u32 s3, s1, 0
-; VI-NEXT: v_mov_b32_e32 v5, s3
-; VI-NEXT: s_add_u32 s0, s0, 36
-; VI-NEXT: v_mov_b32_e32 v4, s2
+; VI-NEXT: s_add_u32 s0, s0, 32
; VI-NEXT: s_addc_u32 s1, s1, 0
-; VI-NEXT: v_mov_b32_e32 v0, s0
-; VI-NEXT: v_mov_b32_e32 v1, s1
-; VI-NEXT: flat_load_dword v2, v[4:5]
-; VI-NEXT: flat_load_dword v3, v[0:1]
-; VI-NEXT: v_mov_b32_e32 v7, -1
-; VI-NEXT: s_mov_b64 s[0:1], 0
-; VI-NEXT: .LBB24_1: ; %atomicrmw.start
-; VI-NEXT: ; =>This Inner Loop Header: Depth=1
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT: v_add_u32_e32 v8, vcc, -1, v2
-; VI-NEXT: v_addc_u32_e32 v9, vcc, -1, v3, vcc
-; VI-NEXT: v_add_u32_e32 v0, vcc, 0xffffffd5, v2
-; VI-NEXT: v_addc_u32_e32 v1, vcc, -1, v3, vcc
-; VI-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[6:7]
-; VI-NEXT: v_cndmask_b32_e64 v0, v8, 42, vcc
-; VI-NEXT: v_cndmask_b32_e64 v1, v9, 0, vcc
-; VI-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; VI-NEXT: v_mov_b32_e32 v3, s1
+; VI-NEXT: v_mov_b32_e32 v1, 0
+; VI-NEXT: v_mov_b32_e32 v2, s0
+; VI-NEXT: flat_atomic_dec_x2 v[2:3], v[0:1]
; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
-; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; VI-NEXT: v_mov_b32_e32 v3, v1
-; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; VI-NEXT: v_mov_b32_e32 v2, v0
-; VI-NEXT: s_andn2_b64 exec, exec, s[0:1]
-; VI-NEXT: s_cbranch_execnz .LBB24_1
-; VI-NEXT: ; %bb.2: ; %atomicrmw.end
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: flat_atomic_dec_noret_i64_offset_system:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
; GFX9-NEXT: s_add_u32 flat_scratch_lo, s12, s17
+; GFX9-NEXT: v_mov_b32_e32 v0, 42
; GFX9-NEXT: s_addc_u32 flat_scratch_hi, s13, 0
-; GFX9-NEXT: v_not_b32_e32 v6, 41
-; GFX9-NEXT: v_mov_b32_e32 v7, -1
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v5, s1
-; GFX9-NEXT: v_mov_b32_e32 v4, s0
-; GFX9-NEXT: flat_load_dwordx2 v[2:3], v[4:5] offset:32
-; GFX9-NEXT: s_mov_b64 s[0:1], 0
-; GFX9-NEXT: .LBB24_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_add_co_u32_e32 v8, vcc, -1, v2
-; GFX9-NEXT: v_addc_co_u32_e32 v9, vcc, -1, v3, vcc
-; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, 0xffffffd5, v2
-; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, -1, v3, vcc
-; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[6:7]
-; GFX9-NEXT: v_cndmask_b32_e64 v0, v8, 42, vcc
-; GFX9-NEXT: v_cndmask_b32_e64 v1, v9, 0, vcc
-; GFX9-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] offset:32 glc
+; GFX9-NEXT: v_mov_b32_e32 v3, s1
+; GFX9-NEXT: v_mov_b32_e32 v2, s0
+; GFX9-NEXT: flat_atomic_dec_x2 v[2:3], v[0:1] offset:32
; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GFX9-NEXT: v_mov_b32_e32 v3, v1
-; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX9-NEXT: v_mov_b32_e32 v2, v0
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
-; GFX9-NEXT: s_cbranch_execnz .LBB24_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX9-NEXT: s_endpgm
;
; GFX10-LABEL: flat_atomic_dec_noret_i64_offset_system:
@@ -2557,75 +2166,35 @@ define amdgpu_kernel void @flat_atomic_dec_noret_i64_offset_system(ptr %ptr) #1
; GFX10-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_LO), s12
; GFX10-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_HI), s13
; GFX10-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
-; GFX10-NEXT: v_not_b32_e32 v6, 41
-; GFX10-NEXT: v_mov_b32_e32 v7, -1
+; GFX10-NEXT: v_mov_b32_e32 v0, 42
+; GFX10-NEXT: v_mov_b32_e32 v1, 0
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_add_u32 s0, s0, 32
; GFX10-NEXT: s_addc_u32 s1, s1, 0
-; GFX10-NEXT: v_mov_b32_e32 v5, s1
-; GFX10-NEXT: v_mov_b32_e32 v4, s0
-; GFX10-NEXT: s_mov_b32 s0, 0
-; GFX10-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
-; GFX10-NEXT: .LBB24_1: ; %atomicrmw.start
-; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, 0xffffffd5, v2
-; GFX10-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, -1, v3, vcc_lo
-; GFX10-NEXT: v_add_co_u32 v8, vcc_lo, v2, -1
-; GFX10-NEXT: v_add_co_ci_u32_e32 v9, vcc_lo, -1, v3, vcc_lo
-; GFX10-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[6:7]
-; GFX10-NEXT: v_cndmask_b32_e64 v0, v8, 42, vcc_lo
-; GFX10-NEXT: v_cndmask_b32_e64 v1, v9, 0, vcc_lo
-; GFX10-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v3, s1
+; GFX10-NEXT: v_mov_b32_e32 v2, s0
+; GFX10-NEXT: flat_atomic_dec_x2 v[2:3], v[0:1]
+; GFX10-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
-; GFX10-NEXT: v_mov_b32_e32 v3, v1
-; GFX10-NEXT: v_mov_b32_e32 v2, v0
-; GFX10-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
-; GFX10-NEXT: s_cbranch_execnz .LBB24_1
-; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: flat_atomic_dec_noret_i64_offset_system:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
-; GFX11-NEXT: v_not_b32_e32 v6, 41
-; GFX11-NEXT: v_mov_b32_e32 v7, -1
+; GFX11-NEXT: v_mov_b32_e32 v0, 42
+; GFX11-NEXT: v_mov_b32_e32 v1, 0
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v5, s1 :: v_dual_mov_b32 v4, s0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: flat_load_b64 v[2:3], v[4:5] offset:32
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB24_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, 0xffffffd5, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v3, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v8, vcc_lo, v2, -1
-; GFX11-NEXT: v_add_co_ci_u32_e64 v9, null, -1, v3, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[6:7]
-; GFX11-NEXT: v_cndmask_b32_e64 v0, v8, 42, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e64 v1, v9, 0, vcc_lo
-; GFX11-NEXT: flat_atomic_cmpswap_b64 v[0:1], v[4:5], v[0:3] offset:32 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
+; GFX11-NEXT: flat_atomic_dec_u64 v[2:3], v[0:1] offset:32
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
-; GFX11-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB24_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX11-NEXT: s_endpgm
%gep = getelementptr i64, ptr %ptr, i32 4
- %result = atomicrmw udec_wrap ptr %gep, i64 42 seq_cst, align 8, !noalias.addrspace !0
+ %result = atomicrmw udec_wrap ptr %gep, i64 42 seq_cst, align 8, !noalias.addrspace !0, !amdgpu.no.remote.memory !1
ret void
}
@@ -2765,7 +2334,7 @@ define amdgpu_kernel void @flat_atomic_dec_ret_i64_offset_addr64(ptr %out, ptr %
%gep.tid = getelementptr i64, ptr %ptr, i32 %id
%out.gep = getelementptr i64, ptr %out, i32 %id
%gep = getelementptr i64, ptr %gep.tid, i32 5
- %result = atomicrmw udec_wrap ptr %gep, i64 42 syncscope("agent") seq_cst, align 8, !noalias.addrspace !0
+ %result = atomicrmw udec_wrap ptr %gep, i64 42 syncscope("agent") seq_cst, align 8, !noalias.addrspace !0, !amdgpu.no.remote.memory !1
store i64 %result, ptr %out.gep, align 4
ret void
}
@@ -2876,7 +2445,7 @@ define amdgpu_kernel void @flat_atomic_dec_noret_i64_offset_addr64(ptr %ptr) #1
%id = call i32 @llvm.amdgcn.workitem.id.x()
%gep.tid = getelementptr i64, ptr %ptr, i32 %id
%gep = getelementptr i64, ptr %gep.tid, i32 5
- %result = atomicrmw udec_wrap ptr %gep, i64 42 syncscope("agent") seq_cst, align 8, !noalias.addrspace !0
+ %result = atomicrmw udec_wrap ptr %gep, i64 42 syncscope("agent") seq_cst, align 8, !noalias.addrspace !0, !amdgpu.no.remote.memory !1
ret void
}
@@ -2971,7 +2540,7 @@ define amdgpu_kernel void @atomic_dec_shl_base_lds_0(ptr addrspace(1) %out, ptr
%tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #2
%idx.0 = add nsw i32 %tid.x, 2
%arrayidx0 = getelementptr inbounds [512 x i32], ptr addrspace(3) @lds0, i32 0, i32 %idx.0
- %result = atomicrmw udec_wrap ptr addrspace(3) %arrayidx0, i32 9 syncscope("agent") seq_cst, align 4
+ %result = atomicrmw udec_wrap ptr addrspace(3) %arrayidx0, i32 9 syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory !1
store i32 %idx.0, ptr addrspace(1) %add_use, align 4
store i32 %result, ptr addrspace(1) %out, align 4
ret void
@@ -3060,7 +2629,7 @@ define amdgpu_kernel void @lds_atomic_dec_ret_i64(ptr addrspace(1) %out, ptr add
; GFX11-NEXT: v_mov_b32_e32 v2, 0
; GFX11-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX11-NEXT: s_endpgm
- %result = atomicrmw udec_wrap ptr addrspace(3) %ptr, i64 42 syncscope("agent") seq_cst, align 8
+ %result = atomicrmw udec_wrap ptr addrspace(3) %ptr, i64 42 syncscope("agent") seq_cst, align 8, !amdgpu.no.remote.memory !1
store i64 %result, ptr addrspace(1) %out, align 4
ret void
}
@@ -3212,7 +2781,7 @@ define amdgpu_kernel void @lds_atomic_dec_noret_i64(ptr addrspace(3) %ptr) #1 {
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
- %result = atomicrmw udec_wrap ptr addrspace(3) %ptr, i64 42 syncscope("agent") seq_cst, align 8
+ %result = atomicrmw udec_wrap ptr addrspace(3) %ptr, i64 42 syncscope("agent") seq_cst, align 8, !amdgpu.no.remote.memory !1
ret void
}
@@ -3275,7 +2844,7 @@ define amdgpu_kernel void @lds_atomic_dec_noret_i64_offset(ptr addrspace(3) %ptr
; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%gep = getelementptr i64, ptr addrspace(3) %ptr, i32 4
- %result = atomicrmw udec_wrap ptr addrspace(3) %gep, i64 42 syncscope("agent") seq_cst, align 8
+ %result = atomicrmw udec_wrap ptr addrspace(3) %gep, i64 42 syncscope("agent") seq_cst, align 8, !amdgpu.no.remote.memory !1
ret void
}
@@ -3357,7 +2926,7 @@ define amdgpu_kernel void @global_atomic_dec_ret_i64(ptr addrspace(1) %out, ptr
; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX11-NEXT: s_endpgm
- %result = atomicrmw udec_wrap ptr addrspace(1) %ptr, i64 42 syncscope("agent") seq_cst, align 8
+ %result = atomicrmw udec_wrap ptr addrspace(1) %ptr, i64 42 syncscope("agent") seq_cst, align 8, !amdgpu.no.remote.memory !1
store i64 %result, ptr addrspace(1) %out, align 4
ret void
}
@@ -3445,7 +3014,7 @@ define amdgpu_kernel void @global_atomic_dec_ret_i64_offset(ptr addrspace(1) %ou
; GFX11-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX11-NEXT: s_endpgm
%gep = getelementptr i64, ptr addrspace(1) %ptr, i32 4
- %result = atomicrmw udec_wrap ptr addrspace(1) %gep, i64 42 syncscope("agent") seq_cst, align 8
+ %result = atomicrmw udec_wrap ptr addrspace(1) %gep, i64 42 syncscope("agent") seq_cst, align 8, !amdgpu.no.remote.memory !1
store i64 %result, ptr addrspace(1) %out, align 4
ret void
}
@@ -3456,42 +3025,20 @@ define amdgpu_kernel void @global_atomic_dec_ret_i64_offset_system(ptr addrspace
; CI-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0
; CI-NEXT: s_add_i32 s12, s12, s17
; CI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
-; CI-NEXT: v_not_b32_e32 v0, 41
-; CI-NEXT: v_mov_b32_e32 v1, -1
-; CI-NEXT: s_waitcnt lgkmcnt(0)
-; CI-NEXT: s_load_dwordx2 s[6:7], s[2:3], 0x8
-; CI-NEXT: s_add_u32 s4, s2, 32
-; CI-NEXT: s_addc_u32 s5, s3, 0
-; CI-NEXT: v_mov_b32_e32 v2, s4
-; CI-NEXT: s_mov_b64 s[2:3], 0
-; CI-NEXT: s_waitcnt lgkmcnt(0)
-; CI-NEXT: v_mov_b32_e32 v4, s6
-; CI-NEXT: v_mov_b32_e32 v3, s5
-; CI-NEXT: v_mov_b32_e32 v5, s7
+; CI-NEXT: v_mov_b32_e32 v0, 42
; CI-NEXT: s_mov_b32 flat_scratch_lo, s13
-; CI-NEXT: .LBB34_1: ; %atomicrmw.start
-; CI-NEXT: ; =>This Inner Loop Header: Depth=1
-; CI-NEXT: v_mov_b32_e32 v7, v5
-; CI-NEXT: v_mov_b32_e32 v6, v4
-; CI-NEXT: v_add_i32_e32 v8, vcc, -1, v6
-; CI-NEXT: v_addc_u32_e32 v9, vcc, -1, v7, vcc
-; CI-NEXT: v_add_i32_e32 v4, vcc, 0xffffffd5, v6
-; CI-NEXT: v_addc_u32_e32 v5, vcc, -1, v7, vcc
-; CI-NEXT: v_cmp_lt_u64_e32 vcc, v[4:5], v[0:1]
-; CI-NEXT: v_cndmask_b32_e64 v4, v8, 42, vcc
-; CI-NEXT: v_cndmask_b32_e64 v5, v9, 0, vcc
-; CI-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[2:3], v[4:7] glc
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: s_add_u32 s2, s2, 32
+; CI-NEXT: s_addc_u32 s3, s3, 0
+; CI-NEXT: v_mov_b32_e32 v2, s2
+; CI-NEXT: v_mov_b32_e32 v1, 0
+; CI-NEXT: v_mov_b32_e32 v3, s3
+; CI-NEXT: flat_atomic_dec_x2 v[0:1], v[2:3], v[0:1] glc
; CI-NEXT: s_waitcnt vmcnt(0)
; CI-NEXT: buffer_wbinvl1_vol
-; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; CI-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
-; CI-NEXT: s_andn2_b64 exec, exec, s[2:3]
-; CI-NEXT: s_cbranch_execnz .LBB34_1
-; CI-NEXT: ; %bb.2: ; %atomicrmw.end
-; CI-NEXT: s_or_b64 exec, exec, s[2:3]
-; CI-NEXT: v_mov_b32_e32 v0, s0
-; CI-NEXT: v_mov_b32_e32 v1, s1
-; CI-NEXT: flat_store_dwordx2 v[0:1], v[4:5]
+; CI-NEXT: v_mov_b32_e32 v3, s1
+; CI-NEXT: v_mov_b32_e32 v2, s0
+; CI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; CI-NEXT: s_endpgm
;
; VI-LABEL: global_atomic_dec_ret_i64_offset_system:
@@ -3499,158 +3046,63 @@ define amdgpu_kernel void @global_atomic_dec_ret_i64_offset_system(ptr addrspace
; VI-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0
; VI-NEXT: s_add_i32 s12, s12, s17
; VI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
-; VI-NEXT: v_not_b32_e32 v0, 41
-; VI-NEXT: v_mov_b32_e32 v1, -1
-; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: s_load_dwordx2 s[6:7], s[2:3], 0x20
-; VI-NEXT: s_add_u32 s4, s2, 32
-; VI-NEXT: s_addc_u32 s5, s3, 0
-; VI-NEXT: v_mov_b32_e32 v2, s4
-; VI-NEXT: s_mov_b64 s[2:3], 0
-; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v4, s6
-; VI-NEXT: v_mov_b32_e32 v3, s5
-; VI-NEXT: v_mov_b32_e32 v5, s7
+; VI-NEXT: v_mov_b32_e32 v0, 42
; VI-NEXT: s_mov_b32 flat_scratch_lo, s13
-; VI-NEXT: .LBB34_1: ; %atomicrmw.start
-; VI-NEXT: ; =>This Inner Loop Header: Depth=1
-; VI-NEXT: v_mov_b32_e32 v7, v5
-; VI-NEXT: v_mov_b32_e32 v6, v4
-; VI-NEXT: v_add_u32_e32 v8, vcc, -1, v6
-; VI-NEXT: v_addc_u32_e32 v9, vcc, -1, v7, vcc
-; VI-NEXT: v_add_u32_e32 v4, vcc, 0xffffffd5, v6
-; VI-NEXT: v_addc_u32_e32 v5, vcc, -1, v7, vcc
-; VI-NEXT: v_cmp_lt_u64_e32 vcc, v[4:5], v[0:1]
-; VI-NEXT: v_cndmask_b32_e64 v4, v8, 42, vcc
-; VI-NEXT: v_cndmask_b32_e64 v5, v9, 0, vcc
-; VI-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[2:3], v[4:7] glc
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_add_u32 s2, s2, 32
+; VI-NEXT: s_addc_u32 s3, s3, 0
+; VI-NEXT: v_mov_b32_e32 v2, s2
+; VI-NEXT: v_mov_b32_e32 v1, 0
+; VI-NEXT: v_mov_b32_e32 v3, s3
+; VI-NEXT: flat_atomic_dec_x2 v[0:1], v[2:3], v[0:1] glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
-; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; VI-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
-; VI-NEXT: s_andn2_b64 exec, exec, s[2:3]
-; VI-NEXT: s_cbranch_execnz .LBB34_1
-; VI-NEXT: ; %bb.2: ; %atomicrmw.end
-; VI-NEXT: s_or_b64 exec, exec, s[2:3]
-; VI-NEXT: v_mov_b32_e32 v0, s0
-; VI-NEXT: v_mov_b32_e32 v1, s1
-; VI-NEXT: flat_store_dwordx2 v[0:1], v[4:5]
+; VI-NEXT: v_mov_b32_e32 v3, s1
+; VI-NEXT: v_mov_b32_e32 v2, s0
+; VI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: global_atomic_dec_ret_i64_offset_system:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0
-; GFX9-NEXT: v_not_b32_e32 v0, 41
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: v_mov_b32_e32 v1, -1
-; GFX9-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_load_dwordx2 s[6:7], s[2:3], 0x20
+; GFX9-NEXT: v_mov_b32_e32 v0, 42
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v2, s6
-; GFX9-NEXT: v_mov_b32_e32 v3, s7
-; GFX9-NEXT: .LBB34_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: v_mov_b32_e32 v8, v3
-; GFX9-NEXT: v_mov_b32_e32 v7, v2
-; GFX9-NEXT: v_add_co_u32_e32 v5, vcc, -1, v7
-; GFX9-NEXT: v_addc_co_u32_e32 v6, vcc, -1, v8, vcc
-; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, 0xffffffd5, v7
-; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, -1, v8, vcc
-; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, v[2:3], v[0:1]
-; GFX9-NEXT: v_cndmask_b32_e64 v5, v5, 42, vcc
-; GFX9-NEXT: v_cndmask_b32_e64 v6, v6, 0, vcc
-; GFX9-NEXT: global_atomic_cmpswap_x2 v[2:3], v4, v[5:8], s[2:3] offset:32 glc
+; GFX9-NEXT: global_atomic_dec_x2 v[0:1], v2, v[0:1], s[2:3] offset:32 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[7:8]
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB34_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v0, 0
-; GFX9-NEXT: global_store_dwordx2 v0, v[2:3], s[0:1]
+; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; GFX9-NEXT: s_endpgm
;
; GFX10-LABEL: global_atomic_dec_ret_i64_offset_system:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0
-; GFX10-NEXT: v_not_b32_e32 v0, 41
-; GFX10-NEXT: v_mov_b32_e32 v1, -1
-; GFX10-NEXT: v_mov_b32_e32 v4, 0
-; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x20
+; GFX10-NEXT: v_mov_b32_e32 v0, 42
+; GFX10-NEXT: v_mov_b32_e32 v1, 0
+; GFX10-NEXT: v_mov_b32_e32 v2, 0
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_mov_b32_e32 v2, s4
-; GFX10-NEXT: v_mov_b32_e32 v3, s5
-; GFX10-NEXT: s_mov_b32 s4, 0
-; GFX10-NEXT: .LBB34_1: ; %atomicrmw.start
-; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX10-NEXT: v_mov_b32_e32 v8, v3
-; GFX10-NEXT: v_mov_b32_e32 v7, v2
-; GFX10-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffffd5, v7
-; GFX10-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, -1, v8, vcc_lo
-; GFX10-NEXT: v_add_co_u32 v5, vcc_lo, v7, -1
-; GFX10-NEXT: v_add_co_ci_u32_e32 v6, vcc_lo, -1, v8, vcc_lo
-; GFX10-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[2:3], v[0:1]
-; GFX10-NEXT: v_cndmask_b32_e64 v5, v5, 42, vcc_lo
-; GFX10-NEXT: v_cndmask_b32_e64 v6, v6, 0, vcc_lo
-; GFX10-NEXT: global_atomic_cmpswap_x2 v[2:3], v4, v[5:8], s[2:3] offset:32 glc
+; GFX10-NEXT: global_atomic_dec_x2 v[0:1], v2, v[0:1], s[2:3] offset:32 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[7:8]
-; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
-; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
-; GFX10-NEXT: s_cbranch_execnz .LBB34_1
-; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s4
-; GFX10-NEXT: v_mov_b32_e32 v0, 0
-; GFX10-NEXT: global_store_dwordx2 v0, v[2:3], s[0:1]
+; GFX10-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_atomic_dec_ret_i64_offset_system:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
-; GFX11-NEXT: v_not_b32_e32 v0, 41
-; GFX11-NEXT: v_dual_mov_b32 v1, -1 :: v_dual_mov_b32 v4, 0
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: s_load_b64 s[4:5], s[2:3], 0x20
+; GFX11-NEXT: v_mov_b32_e32 v0, 42
+; GFX11-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, 0
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB34_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_dual_mov_b32 v8, v3 :: v_dual_mov_b32 v7, v2
-; GFX11-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffffd5, v7
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v8, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v5, vcc_lo, v7, -1
-; GFX11-NEXT: v_add_co_ci_u32_e64 v6, null, -1, v8, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[2:3], v[0:1]
-; GFX11-NEXT: v_cndmask_b32_e64 v5, v5, 42, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e64 v6, v6, 0, vcc_lo
-; GFX11-NEXT: global_atomic_cmpswap_b64 v[2:3], v4, v[5:8], s[2:3] offset:32 glc
+; GFX11-NEXT: global_atomic_dec_u64 v[0:1], v2, v[0:1], s[2:3] offset:32 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[7:8]
-; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_execnz .LBB34_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s4
-; GFX11-NEXT: v_mov_b32_e32 v0, 0
-; GFX11-NEXT: global_store_b64 v0, v[2:3], s[0:1]
+; GFX11-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX11-NEXT: s_endpgm
%gep = getelementptr i64, ptr addrspace(1) %ptr, i32 4
- %result = atomicrmw udec_wrap ptr addrspace(1) %gep, i64 42 seq_cst, align 8
+ %result = atomicrmw udec_wrap ptr addrspace(1) %gep, i64 42 seq_cst, align 8, !amdgpu.no.remote.memory !1
store i64 %result, ptr addrspace(1) %out, align 4
ret void
}
@@ -3724,7 +3176,7 @@ define amdgpu_kernel void @global_atomic_dec_noret_i64(ptr addrspace(1) %ptr) #1
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
- %result = atomicrmw udec_wrap ptr addrspace(1) %ptr, i64 42 syncscope("agent") seq_cst, align 8
+ %result = atomicrmw udec_wrap ptr addrspace(1) %ptr, i64 42 syncscope("agent") seq_cst, align 8, !amdgpu.no.remote.memory !1
ret void
}
@@ -3802,7 +3254,7 @@ define amdgpu_kernel void @global_atomic_dec_noret_i64_offset(ptr addrspace(1) %
; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_endpgm
%gep = getelementptr i64, ptr addrspace(1) %ptr, i32 4
- %result = atomicrmw udec_wrap ptr addrspace(1) %gep, i64 42 syncscope("agent") seq_cst, align 8
+ %result = atomicrmw udec_wrap ptr addrspace(1) %gep, i64 42 syncscope("agent") seq_cst, align 8, !amdgpu.no.remote.memory !1
ret void
}
@@ -3812,38 +3264,17 @@ define amdgpu_kernel void @global_atomic_dec_noret_i64_offset_system(ptr addrspa
; CI-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
; CI-NEXT: s_add_i32 s12, s12, s17
; CI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
-; CI-NEXT: v_not_b32_e32 v4, 41
-; CI-NEXT: v_mov_b32_e32 v5, -1
-; CI-NEXT: s_waitcnt lgkmcnt(0)
-; CI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x8
-; CI-NEXT: s_add_u32 s2, s0, 32
-; CI-NEXT: s_addc_u32 s3, s1, 0
-; CI-NEXT: v_mov_b32_e32 v7, s3
-; CI-NEXT: s_mov_b64 s[0:1], 0
-; CI-NEXT: s_waitcnt lgkmcnt(0)
-; CI-NEXT: v_mov_b32_e32 v2, s4
-; CI-NEXT: v_mov_b32_e32 v6, s2
-; CI-NEXT: v_mov_b32_e32 v3, s5
+; CI-NEXT: v_mov_b32_e32 v0, 42
; CI-NEXT: s_mov_b32 flat_scratch_lo, s13
-; CI-NEXT: .LBB37_1: ; %atomicrmw.start
-; CI-NEXT: ; =>This Inner Loop Header: Depth=1
-; CI-NEXT: v_add_i32_e32 v8, vcc, -1, v2
-; CI-NEXT: v_addc_u32_e32 v9, vcc, -1, v3, vcc
-; CI-NEXT: v_add_i32_e32 v0, vcc, 0xffffffd5, v2
-; CI-NEXT: v_addc_u32_e32 v1, vcc, -1, v3, vcc
-; CI-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[4:5]
-; CI-NEXT: v_cndmask_b32_e64 v0, v8, 42, vcc
-; CI-NEXT: v_cndmask_b32_e64 v1, v9, 0, vcc
-; CI-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[6:7], v[0:3] glc
+; CI-NEXT: s_waitcnt lgkmcnt(0)
+; CI-NEXT: s_add_u32 s0, s0, 32
+; CI-NEXT: s_addc_u32 s1, s1, 0
+; CI-NEXT: v_mov_b32_e32 v3, s1
+; CI-NEXT: v_mov_b32_e32 v1, 0
+; CI-NEXT: v_mov_b32_e32 v2, s0
+; CI-NEXT: flat_atomic_dec_x2 v[2:3], v[0:1]
; CI-NEXT: s_waitcnt vmcnt(0)
; CI-NEXT: buffer_wbinvl1_vol
-; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; CI-NEXT: v_mov_b32_e32 v3, v1
-; CI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; CI-NEXT: v_mov_b32_e32 v2, v0
-; CI-NEXT: s_andn2_b64 exec, exec, s[0:1]
-; CI-NEXT: s_cbranch_execnz .LBB37_1
-; CI-NEXT: ; %bb.2: ; %atomicrmw.end
; CI-NEXT: s_endpgm
;
; VI-LABEL: global_atomic_dec_noret_i64_offset_system:
@@ -3851,144 +3282,57 @@ define amdgpu_kernel void @global_atomic_dec_noret_i64_offset_system(ptr addrspa
; VI-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
; VI-NEXT: s_add_i32 s12, s12, s17
; VI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
-; VI-NEXT: v_not_b32_e32 v4, 41
-; VI-NEXT: v_mov_b32_e32 v5, -1
-; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x20
-; VI-NEXT: s_add_u32 s2, s0, 32
-; VI-NEXT: s_addc_u32 s3, s1, 0
-; VI-NEXT: v_mov_b32_e32 v7, s3
-; VI-NEXT: s_mov_b64 s[0:1], 0
-; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v2, s4
-; VI-NEXT: v_mov_b32_e32 v6, s2
-; VI-NEXT: v_mov_b32_e32 v3, s5
+; VI-NEXT: v_mov_b32_e32 v0, 42
; VI-NEXT: s_mov_b32 flat_scratch_lo, s13
-; VI-NEXT: .LBB37_1: ; %atomicrmw.start
-; VI-NEXT: ; =>This Inner Loop Header: Depth=1
-; VI-NEXT: v_add_u32_e32 v8, vcc, -1, v2
-; VI-NEXT: v_addc_u32_e32 v9, vcc, -1, v3, vcc
-; VI-NEXT: v_add_u32_e32 v0, vcc, 0xffffffd5, v2
-; VI-NEXT: v_addc_u32_e32 v1, vcc, -1, v3, vcc
-; VI-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[4:5]
-; VI-NEXT: v_cndmask_b32_e64 v0, v8, 42, vcc
-; VI-NEXT: v_cndmask_b32_e64 v1, v9, 0, vcc
-; VI-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[6:7], v[0:3] glc
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_add_u32 s0, s0, 32
+; VI-NEXT: s_addc_u32 s1, s1, 0
+; VI-NEXT: v_mov_b32_e32 v3, s1
+; VI-NEXT: v_mov_b32_e32 v1, 0
+; VI-NEXT: v_mov_b32_e32 v2, s0
+; VI-NEXT: flat_atomic_dec_x2 v[2:3], v[0:1]
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
-; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; VI-NEXT: v_mov_b32_e32 v3, v1
-; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; VI-NEXT: v_mov_b32_e32 v2, v0
-; VI-NEXT: s_andn2_b64 exec, exec, s[0:1]
-; VI-NEXT: s_cbranch_execnz .LBB37_1
-; VI-NEXT: ; %bb.2: ; %atomicrmw.end
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: global_atomic_dec_noret_i64_offset_system:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
-; GFX9-NEXT: v_not_b32_e32 v4, 41
-; GFX9-NEXT: s_mov_b64 s[2:3], 0
-; GFX9-NEXT: v_mov_b32_e32 v5, -1
-; GFX9-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x20
+; GFX9-NEXT: v_mov_b32_e32 v0, 42
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v2, s4
-; GFX9-NEXT: v_mov_b32_e32 v3, s5
-; GFX9-NEXT: .LBB37_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: v_add_co_u32_e32 v7, vcc, -1, v2
-; GFX9-NEXT: v_addc_co_u32_e32 v8, vcc, -1, v3, vcc
-; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, 0xffffffd5, v2
-; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, -1, v3, vcc
-; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[4:5]
-; GFX9-NEXT: v_cndmask_b32_e64 v0, v7, 42, vcc
-; GFX9-NEXT: v_cndmask_b32_e64 v1, v8, 0, vcc
-; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] offset:32 glc
+; GFX9-NEXT: global_atomic_dec_x2 v2, v[0:1], s[0:1] offset:32
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GFX9-NEXT: v_mov_b32_e32 v3, v1
-; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
-; GFX9-NEXT: v_mov_b32_e32 v2, v0
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
-; GFX9-NEXT: s_cbranch_execnz .LBB37_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX9-NEXT: s_endpgm
;
; GFX10-LABEL: global_atomic_dec_noret_i64_offset_system:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
-; GFX10-NEXT: v_not_b32_e32 v4, 41
-; GFX10-NEXT: v_mov_b32_e32 v5, -1
-; GFX10-NEXT: v_mov_b32_e32 v6, 0
-; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x20
+; GFX10-NEXT: v_mov_b32_e32 v0, 42
+; GFX10-NEXT: v_mov_b32_e32 v1, 0
+; GFX10-NEXT: v_mov_b32_e32 v2, 0
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_mov_b32_e32 v2, s2
-; GFX10-NEXT: v_mov_b32_e32 v3, s3
-; GFX10-NEXT: s_mov_b32 s2, 0
-; GFX10-NEXT: .LBB37_1: ; %atomicrmw.start
-; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, 0xffffffd5, v2
-; GFX10-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, -1, v3, vcc_lo
-; GFX10-NEXT: v_add_co_u32 v7, vcc_lo, v2, -1
-; GFX10-NEXT: v_add_co_ci_u32_e32 v8, vcc_lo, -1, v3, vcc_lo
-; GFX10-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX10-NEXT: v_cndmask_b32_e64 v0, v7, 42, vcc_lo
-; GFX10-NEXT: v_cndmask_b32_e64 v1, v8, 0, vcc_lo
-; GFX10-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] offset:32 glc
-; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: global_atomic_dec_x2 v2, v[0:1], s[0:1] offset:32
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
-; GFX10-NEXT: v_mov_b32_e32 v3, v1
-; GFX10-NEXT: v_mov_b32_e32 v2, v0
-; GFX10-NEXT: s_or_b32 s2, vcc_lo, s2
-; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
-; GFX10-NEXT: s_cbranch_execnz .LBB37_1
-; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_atomic_dec_noret_i64_offset_system:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
-; GFX11-NEXT: v_not_b32_e32 v4, 41
-; GFX11-NEXT: v_dual_mov_b32 v5, -1 :: v_dual_mov_b32 v6, 0
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: s_load_b64 s[2:3], s[0:1], 0x20
+; GFX11-NEXT: v_mov_b32_e32 v0, 42
+; GFX11-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, 0
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB37_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, 0xffffffd5, v2
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v3, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v7, vcc_lo, v2, -1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v8, null, -1, v3, vcc_lo
-; GFX11-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e64 v0, v7, 42, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v1, v8, 0, vcc_lo
-; GFX11-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] offset:32 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: global_atomic_dec_u64 v2, v[0:1], s[0:1] offset:32
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
-; GFX11-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
-; GFX11-NEXT: s_or_b32 s2, vcc_lo, s2
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_execnz .LBB37_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX11-NEXT: s_endpgm
%gep = getelementptr i64, ptr addrspace(1) %ptr, i32 4
- %result = atomicrmw udec_wrap ptr addrspace(1) %gep, i64 42 seq_cst, align 8
+ %result = atomicrmw udec_wrap ptr addrspace(1) %gep, i64 42 seq_cst, align 8, !amdgpu.no.remote.memory !1
ret void
}
@@ -4090,7 +3434,7 @@ define amdgpu_kernel void @global_atomic_dec_ret_i64_offset_addr64(ptr addrspace
%gep.tid = getelementptr i64, ptr addrspace(1) %ptr, i32 %id
%out.gep = getelementptr i64, ptr addrspace(1) %out, i32 %id
%gep = getelementptr i64, ptr addrspace(1) %gep.tid, i32 5
- %result = atomicrmw udec_wrap ptr addrspace(1) %gep, i64 42 syncscope("agent") seq_cst, align 8
+ %result = atomicrmw udec_wrap ptr addrspace(1) %gep, i64 42 syncscope("agent") seq_cst, align 8, !amdgpu.no.remote.memory !1
store i64 %result, ptr addrspace(1) %out.gep, align 4
ret void
}
@@ -4179,7 +3523,7 @@ define amdgpu_kernel void @global_atomic_dec_noret_i64_offset_addr64(ptr addrspa
%id = call i32 @llvm.amdgcn.workitem.id.x()
%gep.tid = getelementptr i64, ptr addrspace(1) %ptr, i32 %id
%gep = getelementptr i64, ptr addrspace(1) %gep.tid, i32 5
- %result = atomicrmw udec_wrap ptr addrspace(1) %gep, i64 42 syncscope("agent") seq_cst, align 8
+ %result = atomicrmw udec_wrap ptr addrspace(1) %gep, i64 42 syncscope("agent") seq_cst, align 8, !amdgpu.no.remote.memory !1
ret void
}
@@ -4280,7 +3624,7 @@ define amdgpu_kernel void @atomic_dec_shl_base_lds_0_i64(ptr addrspace(1) %out,
%tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #2
%idx.0 = add nsw i32 %tid.x, 2
%arrayidx0 = getelementptr inbounds [512 x i64], ptr addrspace(3) @lds1, i32 0, i32 %idx.0
- %result = atomicrmw udec_wrap ptr addrspace(3) %arrayidx0, i64 9 syncscope("agent") seq_cst, align 8, !noalias.addrspace !0
+ %result = atomicrmw udec_wrap ptr addrspace(3) %arrayidx0, i64 9 syncscope("agent") seq_cst, align 8, !noalias.addrspace !0, !amdgpu.no.remote.memory !1
store i32 %idx.0, ptr addrspace(1) %add_use, align 4
store i64 %result, ptr addrspace(1) %out, align 4
ret void
@@ -4291,6 +3635,7 @@ attributes #1 = { nounwind }
attributes #2 = { nounwind memory(none) }
!0 = !{i32 5, i32 6}
+!1 = !{}
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; GCN: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_uinc_wrap.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_uinc_wrap.ll
index 3aa6defdd79fc..d5390fc75df57 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_uinc_wrap.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_uinc_wrap.ll
@@ -424,7 +424,7 @@ define amdgpu_kernel void @global_atomic_inc_ret_i32(ptr addrspace(1) %out, ptr
; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: global_store_b32 v1, v0, s[0:1]
; GFX12-NEXT: s_endpgm
- %result = atomicrmw uinc_wrap ptr addrspace(1) %ptr, i32 42 syncscope("agent") seq_cst, align 4
+ %result = atomicrmw uinc_wrap ptr addrspace(1) %ptr, i32 42 syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory !1
store i32 %result, ptr addrspace(1) %out, align 4
ret void
}
@@ -518,7 +518,7 @@ define amdgpu_kernel void @global_atomic_inc_ret_i32_offset(ptr addrspace(1) %ou
; GFX12-NEXT: global_store_b32 v1, v0, s[0:1]
; GFX12-NEXT: s_endpgm
%gep = getelementptr i32, ptr addrspace(1) %ptr, i32 4
- %result = atomicrmw uinc_wrap ptr addrspace(1) %gep, i32 42 syncscope("agent") seq_cst, align 4
+ %result = atomicrmw uinc_wrap ptr addrspace(1) %gep, i32 42 syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory !1
store i32 %result, ptr addrspace(1) %out, align 4
ret void
}
@@ -530,30 +530,15 @@ define amdgpu_kernel void @global_atomic_inc_ret_i32_offset_sistem(ptr addrspace
; CI-NEXT: s_add_i32 s12, s12, s17
; CI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
; CI-NEXT: s_mov_b32 flat_scratch_lo, s13
+; CI-NEXT: v_mov_b32_e32 v2, 42
; CI-NEXT: s_waitcnt lgkmcnt(0)
-; CI-NEXT: s_load_dword s6, s[2:3], 0x4
-; CI-NEXT: s_add_u32 s4, s2, 16
-; CI-NEXT: s_addc_u32 s5, s3, 0
-; CI-NEXT: v_mov_b32_e32 v0, s4
-; CI-NEXT: s_mov_b64 s[2:3], 0
-; CI-NEXT: v_mov_b32_e32 v1, s5
-; CI-NEXT: s_waitcnt lgkmcnt(0)
-; CI-NEXT: v_mov_b32_e32 v2, s6
-; CI-NEXT: .LBB6_1: ; %atomicrmw.start
-; CI-NEXT: ; =>This Inner Loop Header: Depth=1
-; CI-NEXT: v_mov_b32_e32 v3, v2
-; CI-NEXT: v_add_i32_e32 v2, vcc, 1, v3
-; CI-NEXT: v_cmp_le_u32_e32 vcc, 42, v3
-; CI-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc
-; CI-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
+; CI-NEXT: s_add_u32 s2, s2, 16
+; CI-NEXT: s_addc_u32 s3, s3, 0
+; CI-NEXT: v_mov_b32_e32 v0, s2
+; CI-NEXT: v_mov_b32_e32 v1, s3
+; CI-NEXT: flat_atomic_inc v2, v[0:1], v2 glc
; CI-NEXT: s_waitcnt vmcnt(0)
; CI-NEXT: buffer_wbinvl1_vol
-; CI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
-; CI-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
-; CI-NEXT: s_andn2_b64 exec, exec, s[2:3]
-; CI-NEXT: s_cbranch_execnz .LBB6_1
-; CI-NEXT: ; %bb.2: ; %atomicrmw.end
-; CI-NEXT: s_or_b64 exec, exec, s[2:3]
; CI-NEXT: v_mov_b32_e32 v0, s0
; CI-NEXT: v_mov_b32_e32 v1, s1
; CI-NEXT: flat_store_dword v[0:1], v2
@@ -565,30 +550,15 @@ define amdgpu_kernel void @global_atomic_inc_ret_i32_offset_sistem(ptr addrspace
; VI-NEXT: s_add_i32 s12, s12, s17
; VI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
; VI-NEXT: s_mov_b32 flat_scratch_lo, s13
+; VI-NEXT: v_mov_b32_e32 v2, 42
; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: s_load_dword s6, s[2:3], 0x10
-; VI-NEXT: s_add_u32 s4, s2, 16
-; VI-NEXT: s_addc_u32 s5, s3, 0
-; VI-NEXT: v_mov_b32_e32 v0, s4
-; VI-NEXT: s_mov_b64 s[2:3], 0
-; VI-NEXT: v_mov_b32_e32 v1, s5
-; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v2, s6
-; VI-NEXT: .LBB6_1: ; %atomicrmw.start
-; VI-NEXT: ; =>This Inner Loop Header: Depth=1
-; VI-NEXT: v_mov_b32_e32 v3, v2
-; VI-NEXT: v_add_u32_e32 v2, vcc, 1, v3
-; VI-NEXT: v_cmp_le_u32_e32 vcc, 42, v3
-; VI-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc
-; VI-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
+; VI-NEXT: s_add_u32 s2, s2, 16
+; VI-NEXT: s_addc_u32 s3, s3, 0
+; VI-NEXT: v_mov_b32_e32 v0, s2
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: flat_atomic_inc v2, v[0:1], v2 glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
-; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
-; VI-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
-; VI-NEXT: s_andn2_b64 exec, exec, s[2:3]
-; VI-NEXT: s_cbranch_execnz .LBB6_1
-; VI-NEXT: ; %bb.2: ; %atomicrmw.end
-; VI-NEXT: s_or_b64 exec, exec, s[2:3]
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: flat_store_dword v[0:1], v2
@@ -597,126 +567,54 @@ define amdgpu_kernel void @global_atomic_inc_ret_i32_offset_sistem(ptr addrspace
; GFX9-LABEL: global_atomic_inc_ret_i32_offset_sistem:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: v_mov_b32_e32 v0, 0
-; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_load_dword s6, s[2:3], 0x10
+; GFX9-NEXT: v_mov_b32_e32 v0, 42
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v1, s6
-; GFX9-NEXT: .LBB6_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: v_mov_b32_e32 v2, v1
-; GFX9-NEXT: v_add_u32_e32 v1, 1, v2
-; GFX9-NEXT: v_cmp_le_u32_e32 vcc, 42, v2
-; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc
-; GFX9-NEXT: global_atomic_cmpswap v1, v0, v[1:2], s[2:3] offset:16 glc
+; GFX9-NEXT: global_atomic_inc v0, v1, v0, s[2:3] offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB6_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v0, 0
-; GFX9-NEXT: global_store_dword v0, v1, s[0:1]
+; GFX9-NEXT: global_store_dword v1, v0, s[0:1]
; GFX9-NEXT: s_endpgm
;
; GFX10-LABEL: global_atomic_inc_ret_i32_offset_sistem:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0
-; GFX10-NEXT: v_mov_b32_e32 v0, 0
-; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: s_load_dword s4, s[2:3], 0x10
+; GFX10-NEXT: v_mov_b32_e32 v0, 42
+; GFX10-NEXT: v_mov_b32_e32 v1, 0
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_mov_b32_e32 v1, s4
-; GFX10-NEXT: s_mov_b32 s4, 0
-; GFX10-NEXT: .LBB6_1: ; %atomicrmw.start
-; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX10-NEXT: v_mov_b32_e32 v2, v1
-; GFX10-NEXT: v_add_nc_u32_e32 v1, 1, v2
-; GFX10-NEXT: v_cmp_le_u32_e32 vcc_lo, 42, v2
-; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc_lo
-; GFX10-NEXT: global_atomic_cmpswap v1, v0, v[1:2], s[2:3] offset:16 glc
+; GFX10-NEXT: global_atomic_inc v0, v1, v0, s[2:3] offset:16 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
-; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
-; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
-; GFX10-NEXT: s_cbranch_execnz .LBB6_1
-; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s4
-; GFX10-NEXT: v_mov_b32_e32 v0, 0
-; GFX10-NEXT: global_store_dword v0, v1, s[0:1]
+; GFX10-NEXT: global_store_dword v1, v0, s[0:1]
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_atomic_inc_ret_i32_offset_sistem:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
+; GFX11-NEXT: v_dual_mov_b32 v0, 42 :: v_dual_mov_b32 v1, 0
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: s_load_b32 s4, s[2:3], 0x10
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s4
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: .LBB6_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_mov_b32_e32 v2, v1
-; GFX11-NEXT: v_add_nc_u32_e32 v1, 1, v2
-; GFX11-NEXT: v_cmp_le_u32_e32 vcc_lo, 42, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc_lo
-; GFX11-NEXT: global_atomic_cmpswap_b32 v1, v0, v[1:2], s[2:3] offset:16 glc
+; GFX11-NEXT: global_atomic_inc_u32 v0, v1, v0, s[2:3] offset:16 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
-; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_execnz .LBB6_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s4
-; GFX11-NEXT: v_mov_b32_e32 v0, 0
-; GFX11-NEXT: global_store_b32 v0, v1, s[0:1]
+; GFX11-NEXT: global_store_b32 v1, v0, s[0:1]
; GFX11-NEXT: s_endpgm
;
; GFX12-LABEL: global_atomic_inc_ret_i32_offset_sistem:
; GFX12: ; %bb.0:
; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: s_load_b32 s4, s[2:3], 0x10
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s4
-; GFX12-NEXT: s_mov_b32 s4, 0
-; GFX12-NEXT: .LBB6_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_mov_b32_e32 v2, v1
-; GFX12-NEXT: v_add_nc_u32_e32 v1, 1, v2
-; GFX12-NEXT: v_cmp_le_u32_e32 vcc_lo, 42, v2
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc_lo
+; GFX12-NEXT: v_dual_mov_b32 v0, 42 :: v_dual_mov_b32 v1, 0
; GFX12-NEXT: global_wb scope:SCOPE_SYS
; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v1, v0, v[1:2], s[2:3] offset:16 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: global_atomic_inc_u32 v0, v1, v0, s[2:3] offset:16 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_SYS
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; GFX12-NEXT: s_cbranch_execnz .LBB6_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s4
-; GFX12-NEXT: v_mov_b32_e32 v0, 0
-; GFX12-NEXT: global_store_b32 v0, v1, s[0:1]
+; GFX12-NEXT: global_store_b32 v1, v0, s[0:1]
; GFX12-NEXT: s_endpgm
%gep = getelementptr i32, ptr addrspace(1) %ptr, i32 4
- %result = atomicrmw uinc_wrap ptr addrspace(1) %gep, i32 42 seq_cst, align 4
+ %result = atomicrmw uinc_wrap ptr addrspace(1) %gep, i32 42 seq_cst, align 4, !amdgpu.no.remote.memory !1
store i32 %result, ptr addrspace(1) %out, align 4
ret void
}
@@ -795,7 +693,7 @@ define amdgpu_kernel void @global_atomic_inc_noret_i32(ptr addrspace(1) %ptr) #1
; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
- %result = atomicrmw uinc_wrap ptr addrspace(1) %ptr, i32 42 syncscope("agent") seq_cst, align 4
+ %result = atomicrmw uinc_wrap ptr addrspace(1) %ptr, i32 42 syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory !1
ret void
}
@@ -878,7 +776,7 @@ define amdgpu_kernel void @global_atomic_inc_noret_i32_offset(ptr addrspace(1) %
; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
%gep = getelementptr i32, ptr addrspace(1) %ptr, i32 4
- %result = atomicrmw uinc_wrap ptr addrspace(1) %gep, i32 42 syncscope("agent") seq_cst, align 4
+ %result = atomicrmw uinc_wrap ptr addrspace(1) %gep, i32 42 syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory !1
ret void
}
@@ -889,29 +787,15 @@ define amdgpu_kernel void @global_atomic_inc_noret_i32_offset_system(ptr addrspa
; CI-NEXT: s_add_i32 s12, s12, s17
; CI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
; CI-NEXT: s_mov_b32 flat_scratch_lo, s13
+; CI-NEXT: v_mov_b32_e32 v2, 42
; CI-NEXT: s_waitcnt lgkmcnt(0)
-; CI-NEXT: s_load_dword s4, s[0:1], 0x4
-; CI-NEXT: s_add_u32 s2, s0, 16
-; CI-NEXT: s_addc_u32 s3, s1, 0
-; CI-NEXT: v_mov_b32_e32 v0, s2
-; CI-NEXT: s_mov_b64 s[0:1], 0
-; CI-NEXT: v_mov_b32_e32 v1, s3
-; CI-NEXT: s_waitcnt lgkmcnt(0)
-; CI-NEXT: v_mov_b32_e32 v3, s4
-; CI-NEXT: .LBB9_1: ; %atomicrmw.start
-; CI-NEXT: ; =>This Inner Loop Header: Depth=1
-; CI-NEXT: v_add_i32_e32 v2, vcc, 1, v3
-; CI-NEXT: v_cmp_le_u32_e32 vcc, 42, v3
-; CI-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc
-; CI-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
+; CI-NEXT: s_add_u32 s0, s0, 16
+; CI-NEXT: s_addc_u32 s1, s1, 0
+; CI-NEXT: v_mov_b32_e32 v0, s0
+; CI-NEXT: v_mov_b32_e32 v1, s1
+; CI-NEXT: flat_atomic_inc v[0:1], v2
; CI-NEXT: s_waitcnt vmcnt(0)
; CI-NEXT: buffer_wbinvl1_vol
-; CI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
-; CI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; CI-NEXT: v_mov_b32_e32 v3, v2
-; CI-NEXT: s_andn2_b64 exec, exec, s[0:1]
-; CI-NEXT: s_cbranch_execnz .LBB9_1
-; CI-NEXT: ; %bb.2: ; %atomicrmw.end
; CI-NEXT: s_endpgm
;
; VI-LABEL: global_atomic_inc_noret_i32_offset_system:
@@ -920,140 +804,64 @@ define amdgpu_kernel void @global_atomic_inc_noret_i32_offset_system(ptr addrspa
; VI-NEXT: s_add_i32 s12, s12, s17
; VI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
; VI-NEXT: s_mov_b32 flat_scratch_lo, s13
+; VI-NEXT: v_mov_b32_e32 v2, 42
; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: s_load_dword s4, s[0:1], 0x10
-; VI-NEXT: s_add_u32 s2, s0, 16
-; VI-NEXT: s_addc_u32 s3, s1, 0
-; VI-NEXT: v_mov_b32_e32 v0, s2
-; VI-NEXT: s_mov_b64 s[0:1], 0
-; VI-NEXT: v_mov_b32_e32 v1, s3
-; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v3, s4
-; VI-NEXT: .LBB9_1: ; %atomicrmw.start
-; VI-NEXT: ; =>This Inner Loop Header: Depth=1
-; VI-NEXT: v_add_u32_e32 v2, vcc, 1, v3
-; VI-NEXT: v_cmp_le_u32_e32 vcc, 42, v3
-; VI-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc
-; VI-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
+; VI-NEXT: s_add_u32 s0, s0, 16
+; VI-NEXT: s_addc_u32 s1, s1, 0
+; VI-NEXT: v_mov_b32_e32 v0, s0
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: flat_atomic_inc v[0:1], v2
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
-; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
-; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; VI-NEXT: v_mov_b32_e32 v3, v2
-; VI-NEXT: s_andn2_b64 exec, exec, s[0:1]
-; VI-NEXT: s_cbranch_execnz .LBB9_1
-; VI-NEXT: ; %bb.2: ; %atomicrmw.end
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: global_atomic_inc_noret_i32_offset_system:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
-; GFX9-NEXT: s_mov_b64 s[2:3], 0
-; GFX9-NEXT: v_mov_b32_e32 v2, 0
-; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_load_dword s4, s[0:1], 0x10
+; GFX9-NEXT: v_mov_b32_e32 v0, 42
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v1, s4
-; GFX9-NEXT: .LBB9_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: v_add_u32_e32 v0, 1, v1
-; GFX9-NEXT: v_cmp_le_u32_e32 vcc, 42, v1
-; GFX9-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc
-; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
+; GFX9-NEXT: global_atomic_inc v1, v0, s[0:1] offset:16
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
-; GFX9-NEXT: v_mov_b32_e32 v1, v0
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
-; GFX9-NEXT: s_cbranch_execnz .LBB9_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX9-NEXT: s_endpgm
;
; GFX10-LABEL: global_atomic_inc_noret_i32_offset_system:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
-; GFX10-NEXT: v_mov_b32_e32 v2, 0
-; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: s_load_dword s2, s[0:1], 0x10
+; GFX10-NEXT: v_mov_b32_e32 v0, 42
+; GFX10-NEXT: v_mov_b32_e32 v1, 0
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_mov_b32_e32 v1, s2
-; GFX10-NEXT: s_mov_b32 s2, 0
-; GFX10-NEXT: .LBB9_1: ; %atomicrmw.start
-; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX10-NEXT: v_add_nc_u32_e32 v0, 1, v1
-; GFX10-NEXT: v_cmp_le_u32_e32 vcc_lo, 42, v1
-; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc_lo
-; GFX10-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
-; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: global_atomic_inc v1, v0, s[0:1] offset:16
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v1
-; GFX10-NEXT: v_mov_b32_e32 v1, v0
-; GFX10-NEXT: s_or_b32 s2, vcc_lo, s2
-; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
-; GFX10-NEXT: s_cbranch_execnz .LBB9_1
-; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_atomic_inc_noret_i32_offset_system:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GFX11-NEXT: v_dual_mov_b32 v0, 42 :: v_dual_mov_b32 v1, 0
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: s_load_b32 s2, s[0:1], 0x10
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s2
-; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: .LBB9_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_nc_u32_e32 v0, 1, v1
-; GFX11-NEXT: v_cmp_le_u32_e32 vcc_lo, 42, v1
-; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc_lo
-; GFX11-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: global_atomic_inc_u32 v1, v0, s[0:1] offset:16
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v0
-; GFX11-NEXT: s_or_b32 s2, vcc_lo, s2
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_execnz .LBB9_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX11-NEXT: s_endpgm
;
; GFX12-LABEL: global_atomic_inc_noret_i32_offset_system:
; GFX12: ; %bb.0:
; GFX12-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: s_load_b32 s2, s[0:1], 0x10
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s2
-; GFX12-NEXT: s_mov_b32 s2, 0
-; GFX12-NEXT: .LBB9_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_add_nc_u32_e32 v0, 1, v1
-; GFX12-NEXT: v_cmp_le_u32_e32 vcc_lo, 42, v1
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc_lo
+; GFX12-NEXT: v_dual_mov_b32 v0, 42 :: v_dual_mov_b32 v1, 0
; GFX12-NEXT: global_wb scope:SCOPE_SYS
; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b32 v0, v2, v[0:1], s[0:1] offset:16 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: global_atomic_inc_u32 v1, v0, s[0:1] offset:16 scope:SCOPE_SYS
+; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_SYS
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v1
-; GFX12-NEXT: v_mov_b32_e32 v1, v0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s2, vcc_lo, s2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
-; GFX12-NEXT: s_cbranch_execnz .LBB9_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX12-NEXT: s_endpgm
%gep = getelementptr i32, ptr addrspace(1) %ptr, i32 4
- %result = atomicrmw uinc_wrap ptr addrspace(1) %gep, i32 42 seq_cst, align 4
+ %result = atomicrmw uinc_wrap ptr addrspace(1) %gep, i32 42 seq_cst, align 4, !amdgpu.no.remote.memory !1
ret void
}
@@ -1163,7 +971,7 @@ define amdgpu_kernel void @global_atomic_inc_ret_i32_offset_addr64(ptr addrspace
%gep.tid = getelementptr i32, ptr addrspace(1) %ptr, i32 %id
%out.gep = getelementptr i32, ptr addrspace(1) %out, i32 %id
%gep = getelementptr i32, ptr addrspace(1) %gep.tid, i32 5
- %result = atomicrmw uinc_wrap ptr addrspace(1) %gep, i32 42 syncscope("agent") seq_cst, align 4
+ %result = atomicrmw uinc_wrap ptr addrspace(1) %gep, i32 42 syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory !1
store i32 %result, ptr addrspace(1) %out.gep, align 4
ret void
}
@@ -1259,7 +1067,7 @@ define amdgpu_kernel void @global_atomic_inc_noret_i32_offset_addr64(ptr addrspa
%id = call i32 @llvm.amdgcn.workitem.id.x()
%gep.tid = getelementptr i32, ptr addrspace(1) %ptr, i32 %id
%gep = getelementptr i32, ptr addrspace(1) %gep.tid, i32 5
- %result = atomicrmw uinc_wrap ptr addrspace(1) %gep, i32 42 syncscope("agent") seq_cst, align 4
+ %result = atomicrmw uinc_wrap ptr addrspace(1) %gep, i32 42 syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory !1
ret void
}
@@ -1817,7 +1625,7 @@ define amdgpu_kernel void @global_atomic_inc_ret_i64(ptr addrspace(1) %out, ptr
; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX12-NEXT: s_endpgm
- %result = atomicrmw uinc_wrap ptr addrspace(1) %ptr, i64 42 syncscope("agent") seq_cst, align 8
+ %result = atomicrmw uinc_wrap ptr addrspace(1) %ptr, i64 42 syncscope("agent") seq_cst, align 8, !amdgpu.no.remote.memory !1
store i64 %result, ptr addrspace(1) %out, align 4
ret void
}
@@ -1917,7 +1725,7 @@ define amdgpu_kernel void @global_atomic_inc_ret_i64_offset(ptr addrspace(1) %ou
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX12-NEXT: s_endpgm
%gep = getelementptr i64, ptr addrspace(1) %ptr, i32 4
- %result = atomicrmw uinc_wrap ptr addrspace(1) %gep, i64 42 syncscope("agent") seq_cst, align 8
+ %result = atomicrmw uinc_wrap ptr addrspace(1) %gep, i64 42 syncscope("agent") seq_cst, align 8, !amdgpu.no.remote.memory !1
store i64 %result, ptr addrspace(1) %out, align 4
ret void
}
@@ -1928,38 +1736,20 @@ define amdgpu_kernel void @global_atomic_inc_ret_i64_offset_system(ptr addrspace
; CI-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0
; CI-NEXT: s_add_i32 s12, s12, s17
; CI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
+; CI-NEXT: v_mov_b32_e32 v0, 42
; CI-NEXT: s_mov_b32 flat_scratch_lo, s13
; CI-NEXT: s_waitcnt lgkmcnt(0)
-; CI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x8
-; CI-NEXT: s_add_u32 s6, s2, 32
-; CI-NEXT: s_addc_u32 s7, s3, 0
-; CI-NEXT: v_mov_b32_e32 v0, s6
-; CI-NEXT: s_mov_b64 s[2:3], 0
-; CI-NEXT: s_waitcnt lgkmcnt(0)
-; CI-NEXT: v_mov_b32_e32 v2, s4
-; CI-NEXT: v_mov_b32_e32 v1, s7
-; CI-NEXT: v_mov_b32_e32 v3, s5
-; CI-NEXT: .LBB19_1: ; %atomicrmw.start
-; CI-NEXT: ; =>This Inner Loop Header: Depth=1
-; CI-NEXT: v_mov_b32_e32 v5, v3
-; CI-NEXT: v_mov_b32_e32 v4, v2
-; CI-NEXT: v_add_i32_e32 v2, vcc, 1, v4
-; CI-NEXT: v_addc_u32_e32 v3, vcc, 0, v5, vcc
-; CI-NEXT: v_cmp_le_u64_e32 vcc, 42, v[4:5]
-; CI-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc
-; CI-NEXT: v_cndmask_b32_e64 v3, v3, 0, vcc
-; CI-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5] glc
+; CI-NEXT: s_add_u32 s2, s2, 32
+; CI-NEXT: s_addc_u32 s3, s3, 0
+; CI-NEXT: v_mov_b32_e32 v2, s2
+; CI-NEXT: v_mov_b32_e32 v1, 0
+; CI-NEXT: v_mov_b32_e32 v3, s3
+; CI-NEXT: flat_atomic_inc_x2 v[0:1], v[2:3], v[0:1] glc
; CI-NEXT: s_waitcnt vmcnt(0)
; CI-NEXT: buffer_wbinvl1_vol
-; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
-; CI-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
-; CI-NEXT: s_andn2_b64 exec, exec, s[2:3]
-; CI-NEXT: s_cbranch_execnz .LBB19_1
-; CI-NEXT: ; %bb.2: ; %atomicrmw.end
-; CI-NEXT: s_or_b64 exec, exec, s[2:3]
-; CI-NEXT: v_mov_b32_e32 v0, s0
-; CI-NEXT: v_mov_b32_e32 v1, s1
-; CI-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
+; CI-NEXT: v_mov_b32_e32 v3, s1
+; CI-NEXT: v_mov_b32_e32 v2, s0
+; CI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; CI-NEXT: s_endpgm
;
; VI-LABEL: global_atomic_inc_ret_i64_offset_system:
@@ -1967,180 +1757,77 @@ define amdgpu_kernel void @global_atomic_inc_ret_i64_offset_system(ptr addrspace
; VI-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0
; VI-NEXT: s_add_i32 s12, s12, s17
; VI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
+; VI-NEXT: v_mov_b32_e32 v0, 42
; VI-NEXT: s_mov_b32 flat_scratch_lo, s13
; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x20
-; VI-NEXT: s_add_u32 s6, s2, 32
-; VI-NEXT: s_addc_u32 s7, s3, 0
-; VI-NEXT: v_mov_b32_e32 v0, s6
-; VI-NEXT: s_mov_b64 s[2:3], 0
-; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v2, s4
-; VI-NEXT: v_mov_b32_e32 v1, s7
-; VI-NEXT: v_mov_b32_e32 v3, s5
-; VI-NEXT: .LBB19_1: ; %atomicrmw.start
-; VI-NEXT: ; =>This Inner Loop Header: Depth=1
-; VI-NEXT: v_mov_b32_e32 v5, v3
-; VI-NEXT: v_mov_b32_e32 v4, v2
-; VI-NEXT: v_add_u32_e32 v2, vcc, 1, v4
-; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v5, vcc
-; VI-NEXT: v_cmp_le_u64_e32 vcc, 42, v[4:5]
-; VI-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc
-; VI-NEXT: v_cndmask_b32_e64 v3, v3, 0, vcc
-; VI-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5] glc
+; VI-NEXT: s_add_u32 s2, s2, 32
+; VI-NEXT: s_addc_u32 s3, s3, 0
+; VI-NEXT: v_mov_b32_e32 v2, s2
+; VI-NEXT: v_mov_b32_e32 v1, 0
+; VI-NEXT: v_mov_b32_e32 v3, s3
+; VI-NEXT: flat_atomic_inc_x2 v[0:1], v[2:3], v[0:1] glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
-; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
-; VI-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
-; VI-NEXT: s_andn2_b64 exec, exec, s[2:3]
-; VI-NEXT: s_cbranch_execnz .LBB19_1
-; VI-NEXT: ; %bb.2: ; %atomicrmw.end
-; VI-NEXT: s_or_b64 exec, exec, s[2:3]
-; VI-NEXT: v_mov_b32_e32 v0, s0
-; VI-NEXT: v_mov_b32_e32 v1, s1
-; VI-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
+; VI-NEXT: v_mov_b32_e32 v3, s1
+; VI-NEXT: v_mov_b32_e32 v2, s0
+; VI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: global_atomic_inc_ret_i64_offset_system:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: v_mov_b32_e32 v0, 42
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
; GFX9-NEXT: v_mov_b32_e32 v2, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_load_dwordx2 s[6:7], s[2:3], 0x20
-; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v0, s6
-; GFX9-NEXT: v_mov_b32_e32 v1, s7
-; GFX9-NEXT: .LBB19_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: v_mov_b32_e32 v6, v1
-; GFX9-NEXT: v_mov_b32_e32 v5, v0
-; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, 1, v5
-; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v6, vcc
-; GFX9-NEXT: v_cmp_le_u64_e32 vcc, 42, v[5:6]
-; GFX9-NEXT: v_cndmask_b32_e64 v3, v0, 0, vcc
-; GFX9-NEXT: v_cndmask_b32_e64 v4, v1, 0, vcc
-; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[2:3] offset:32 glc
+; GFX9-NEXT: global_atomic_inc_x2 v[0:1], v2, v[0:1], s[2:3] offset:32 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6]
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB19_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v2, 0
; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; GFX9-NEXT: s_endpgm
;
; GFX10-LABEL: global_atomic_inc_ret_i64_offset_system:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0
+; GFX10-NEXT: v_mov_b32_e32 v0, 42
+; GFX10-NEXT: v_mov_b32_e32 v1, 0
; GFX10-NEXT: v_mov_b32_e32 v2, 0
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x20
-; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_mov_b32_e32 v0, s4
-; GFX10-NEXT: v_mov_b32_e32 v1, s5
-; GFX10-NEXT: s_mov_b32 s4, 0
-; GFX10-NEXT: .LBB19_1: ; %atomicrmw.start
-; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX10-NEXT: v_mov_b32_e32 v6, v1
-; GFX10-NEXT: v_mov_b32_e32 v5, v0
-; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, v5, 1
-; GFX10-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, 0, v6, vcc_lo
-; GFX10-NEXT: v_cmp_le_u64_e32 vcc_lo, 42, v[5:6]
-; GFX10-NEXT: v_cndmask_b32_e64 v3, v0, 0, vcc_lo
-; GFX10-NEXT: v_cndmask_b32_e64 v4, v1, 0, vcc_lo
-; GFX10-NEXT: global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[2:3] offset:32 glc
+; GFX10-NEXT: global_atomic_inc_x2 v[0:1], v2, v[0:1], s[2:3] offset:32 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[5:6]
-; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
-; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
-; GFX10-NEXT: s_cbranch_execnz .LBB19_1
-; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s4
-; GFX10-NEXT: v_mov_b32_e32 v2, 0
; GFX10-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_atomic_inc_ret_i64_offset_system:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
+; GFX11-NEXT: v_mov_b32_e32 v0, 42
+; GFX11-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, 0
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: s_load_b64 s[4:5], s[2:3], 0x20
-; GFX11-NEXT: v_mov_b32_e32 v2, 0
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB19_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0
-; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v5, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v6, vcc_lo
-; GFX11-NEXT: v_cmp_le_u64_e32 vcc_lo, 42, v[5:6]
-; GFX11-NEXT: v_cndmask_b32_e64 v3, v0, 0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e64 v4, v1, 0, vcc_lo
-; GFX11-NEXT: global_atomic_cmpswap_b64 v[0:1], v2, v[3:6], s[2:3] offset:32 glc
+; GFX11-NEXT: global_atomic_inc_u64 v[0:1], v2, v[0:1], s[2:3] offset:32 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[5:6]
-; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_execnz .LBB19_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s4
-; GFX11-NEXT: v_mov_b32_e32 v2, 0
; GFX11-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX11-NEXT: s_endpgm
;
; GFX12-LABEL: global_atomic_inc_ret_i64_offset_system:
; GFX12: ; %bb.0:
; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: s_load_b64 s[4:5], s[2:3], 0x20
-; GFX12-NEXT: v_mov_b32_e32 v2, 0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
-; GFX12-NEXT: s_mov_b32 s4, 0
-; GFX12-NEXT: .LBB19_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0
-; GFX12-NEXT: v_add_co_u32 v0, vcc_lo, v5, 1
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v6, vcc_lo
-; GFX12-NEXT: v_cmp_le_u64_e32 vcc_lo, 42, v[5:6]
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e64 v3, v0, 0, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX12-NEXT: v_cndmask_b32_e64 v4, v1, 0, vcc_lo
+; GFX12-NEXT: v_mov_b32_e32 v0, 42
+; GFX12-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, 0
; GFX12-NEXT: global_wb scope:SCOPE_SYS
; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b64 v[0:1], v2, v[3:6], s[2:3] offset:32 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: global_atomic_inc_u64 v[0:1], v2, v[0:1], s[2:3] offset:32 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_SYS
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[5:6]
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; GFX12-NEXT: s_cbranch_execnz .LBB19_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s4
-; GFX12-NEXT: v_mov_b32_e32 v2, 0
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX12-NEXT: s_endpgm
%gep = getelementptr i64, ptr addrspace(1) %ptr, i32 4
- %result = atomicrmw uinc_wrap ptr addrspace(1) %gep, i64 42 seq_cst, align 8
+ %result = atomicrmw uinc_wrap ptr addrspace(1) %gep, i64 42 seq_cst, align 8, !amdgpu.no.remote.memory !1
store i64 %result, ptr addrspace(1) %out, align 4
ret void
}
@@ -2225,7 +1912,7 @@ define amdgpu_kernel void @global_atomic_inc_noret_i64(ptr addrspace(1) %ptr) #1
; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
- %result = atomicrmw uinc_wrap ptr addrspace(1) %ptr, i64 42 syncscope("agent") seq_cst, align 8
+ %result = atomicrmw uinc_wrap ptr addrspace(1) %ptr, i64 42 syncscope("agent") seq_cst, align 8, !amdgpu.no.remote.memory !1
ret void
}
@@ -2314,7 +2001,7 @@ define amdgpu_kernel void @global_atomic_inc_noret_i64_offset(ptr addrspace(1) %
; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
%gep = getelementptr i64, ptr addrspace(1) %ptr, i32 4
- %result = atomicrmw uinc_wrap ptr addrspace(1) %gep, i64 42 syncscope("agent") seq_cst, align 8
+ %result = atomicrmw uinc_wrap ptr addrspace(1) %gep, i64 42 syncscope("agent") seq_cst, align 8, !amdgpu.no.remote.memory !1
ret void
}
@@ -2324,34 +2011,17 @@ define amdgpu_kernel void @global_atomic_inc_noret_i64_offset_system(ptr addrspa
; CI-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
; CI-NEXT: s_add_i32 s12, s12, s17
; CI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
+; CI-NEXT: v_mov_b32_e32 v0, 42
; CI-NEXT: s_mov_b32 flat_scratch_lo, s13
; CI-NEXT: s_waitcnt lgkmcnt(0)
-; CI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x8
-; CI-NEXT: s_add_u32 s4, s0, 32
-; CI-NEXT: s_addc_u32 s5, s1, 0
-; CI-NEXT: v_mov_b32_e32 v4, s4
-; CI-NEXT: s_mov_b64 s[0:1], 0
-; CI-NEXT: s_waitcnt lgkmcnt(0)
-; CI-NEXT: v_mov_b32_e32 v2, s2
-; CI-NEXT: v_mov_b32_e32 v5, s5
-; CI-NEXT: v_mov_b32_e32 v3, s3
-; CI-NEXT: .LBB22_1: ; %atomicrmw.start
-; CI-NEXT: ; =>This Inner Loop Header: Depth=1
-; CI-NEXT: v_add_i32_e32 v0, vcc, 1, v2
-; CI-NEXT: v_addc_u32_e32 v1, vcc, 0, v3, vcc
-; CI-NEXT: v_cmp_le_u64_e32 vcc, 42, v[2:3]
-; CI-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc
-; CI-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc
-; CI-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; CI-NEXT: s_add_u32 s0, s0, 32
+; CI-NEXT: s_addc_u32 s1, s1, 0
+; CI-NEXT: v_mov_b32_e32 v3, s1
+; CI-NEXT: v_mov_b32_e32 v1, 0
+; CI-NEXT: v_mov_b32_e32 v2, s0
+; CI-NEXT: flat_atomic_inc_x2 v[2:3], v[0:1]
; CI-NEXT: s_waitcnt vmcnt(0)
; CI-NEXT: buffer_wbinvl1_vol
-; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; CI-NEXT: v_mov_b32_e32 v3, v1
-; CI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; CI-NEXT: v_mov_b32_e32 v2, v0
-; CI-NEXT: s_andn2_b64 exec, exec, s[0:1]
-; CI-NEXT: s_cbranch_execnz .LBB22_1
-; CI-NEXT: ; %bb.2: ; %atomicrmw.end
; CI-NEXT: s_endpgm
;
; VI-LABEL: global_atomic_inc_noret_i64_offset_system:
@@ -2359,163 +2029,70 @@ define amdgpu_kernel void @global_atomic_inc_noret_i64_offset_system(ptr addrspa
; VI-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
; VI-NEXT: s_add_i32 s12, s12, s17
; VI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
+; VI-NEXT: v_mov_b32_e32 v0, 42
; VI-NEXT: s_mov_b32 flat_scratch_lo, s13
; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x20
-; VI-NEXT: s_add_u32 s4, s0, 32
-; VI-NEXT: s_addc_u32 s5, s1, 0
-; VI-NEXT: v_mov_b32_e32 v4, s4
-; VI-NEXT: s_mov_b64 s[0:1], 0
-; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v2, s2
-; VI-NEXT: v_mov_b32_e32 v5, s5
-; VI-NEXT: v_mov_b32_e32 v3, s3
-; VI-NEXT: .LBB22_1: ; %atomicrmw.start
-; VI-NEXT: ; =>This Inner Loop Header: Depth=1
-; VI-NEXT: v_add_u32_e32 v0, vcc, 1, v2
-; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v3, vcc
-; VI-NEXT: v_cmp_le_u64_e32 vcc, 42, v[2:3]
-; VI-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc
-; VI-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc
-; VI-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; VI-NEXT: s_add_u32 s0, s0, 32
+; VI-NEXT: s_addc_u32 s1, s1, 0
+; VI-NEXT: v_mov_b32_e32 v3, s1
+; VI-NEXT: v_mov_b32_e32 v1, 0
+; VI-NEXT: v_mov_b32_e32 v2, s0
+; VI-NEXT: flat_atomic_inc_x2 v[2:3], v[0:1]
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
-; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; VI-NEXT: v_mov_b32_e32 v3, v1
-; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; VI-NEXT: v_mov_b32_e32 v2, v0
-; VI-NEXT: s_andn2_b64 exec, exec, s[0:1]
-; VI-NEXT: s_cbranch_execnz .LBB22_1
-; VI-NEXT: ; %bb.2: ; %atomicrmw.end
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: global_atomic_inc_noret_i64_offset_system:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
-; GFX9-NEXT: s_mov_b64 s[2:3], 0
-; GFX9-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x20
+; GFX9-NEXT: v_mov_b32_e32 v0, 42
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v2, s4
-; GFX9-NEXT: v_mov_b32_e32 v3, s5
-; GFX9-NEXT: .LBB22_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, 1, v2
-; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v3, vcc
-; GFX9-NEXT: v_cmp_le_u64_e32 vcc, 42, v[2:3]
-; GFX9-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc
-; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc
-; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] offset:32 glc
+; GFX9-NEXT: global_atomic_inc_x2 v2, v[0:1], s[0:1] offset:32
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GFX9-NEXT: v_mov_b32_e32 v3, v1
-; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
-; GFX9-NEXT: v_mov_b32_e32 v2, v0
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
-; GFX9-NEXT: s_cbranch_execnz .LBB22_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX9-NEXT: s_endpgm
;
; GFX10-LABEL: global_atomic_inc_noret_i64_offset_system:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
-; GFX10-NEXT: v_mov_b32_e32 v4, 0
-; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x20
+; GFX10-NEXT: v_mov_b32_e32 v0, 42
+; GFX10-NEXT: v_mov_b32_e32 v1, 0
+; GFX10-NEXT: v_mov_b32_e32 v2, 0
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_mov_b32_e32 v2, s2
-; GFX10-NEXT: v_mov_b32_e32 v3, s3
-; GFX10-NEXT: s_mov_b32 s2, 0
-; GFX10-NEXT: .LBB22_1: ; %atomicrmw.start
-; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, v2, 1
-; GFX10-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, 0, v3, vcc_lo
-; GFX10-NEXT: v_cmp_le_u64_e32 vcc_lo, 42, v[2:3]
-; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc_lo
-; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc_lo
-; GFX10-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] offset:32 glc
-; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: global_atomic_inc_x2 v2, v[0:1], s[0:1] offset:32
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
-; GFX10-NEXT: v_mov_b32_e32 v3, v1
-; GFX10-NEXT: v_mov_b32_e32 v2, v0
-; GFX10-NEXT: s_or_b32 s2, vcc_lo, s2
-; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
-; GFX10-NEXT: s_cbranch_execnz .LBB22_1
-; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: global_atomic_inc_noret_i64_offset_system:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GFX11-NEXT: v_mov_b32_e32 v0, 42
+; GFX11-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, 0
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: s_load_b64 s[2:3], s[0:1], 0x20
-; GFX11-NEXT: v_mov_b32_e32 v4, 0
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB22_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v2, 1
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v3, vcc_lo
-; GFX11-NEXT: v_cmp_le_u64_e32 vcc_lo, 42, v[2:3]
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc_lo
-; GFX11-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] offset:32 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: global_atomic_inc_u64 v2, v[0:1], s[0:1] offset:32
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
-; GFX11-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
-; GFX11-NEXT: s_or_b32 s2, vcc_lo, s2
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_execnz .LBB22_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX11-NEXT: s_endpgm
;
; GFX12-LABEL: global_atomic_inc_noret_i64_offset_system:
; GFX12: ; %bb.0:
; GFX12-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: s_load_b64 s[2:3], s[0:1], 0x20
-; GFX12-NEXT: v_mov_b32_e32 v4, 0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX12-NEXT: s_mov_b32 s2, 0
-; GFX12-NEXT: .LBB22_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_add_co_u32 v0, vcc_lo, v2, 1
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v3, vcc_lo
-; GFX12-NEXT: v_cmp_le_u64_e32 vcc_lo, 42, v[2:3]
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX12-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc_lo
+; GFX12-NEXT: v_mov_b32_e32 v0, 42
+; GFX12-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, 0
; GFX12-NEXT: global_wb scope:SCOPE_SYS
; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] offset:32 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: global_atomic_inc_u64 v2, v[0:1], s[0:1] offset:32 scope:SCOPE_SYS
+; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_SYS
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
-; GFX12-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s2, vcc_lo, s2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
-; GFX12-NEXT: s_cbranch_execnz .LBB22_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX12-NEXT: s_endpgm
%gep = getelementptr i64, ptr addrspace(1) %ptr, i32 4
- %result = atomicrmw uinc_wrap ptr addrspace(1) %gep, i64 42 seq_cst, align 8
+ %result = atomicrmw uinc_wrap ptr addrspace(1) %gep, i64 42 seq_cst, align 8, !amdgpu.no.remote.memory !1
ret void
}
@@ -2631,7 +2208,7 @@ define amdgpu_kernel void @global_atomic_inc_ret_i64_offset_addr64(ptr addrspace
%gep.tid = getelementptr i64, ptr addrspace(1) %ptr, i32 %id
%out.gep = getelementptr i64, ptr addrspace(1) %out, i32 %id
%gep = getelementptr i64, ptr addrspace(1) %gep.tid, i32 5
- %result = atomicrmw uinc_wrap ptr addrspace(1) %gep, i64 42 syncscope("agent") seq_cst, align 8
+ %result = atomicrmw uinc_wrap ptr addrspace(1) %gep, i64 42 syncscope("agent") seq_cst, align 8, !amdgpu.no.remote.memory !1
store i64 %result, ptr addrspace(1) %out.gep, align 4
ret void
}
@@ -2733,7 +2310,7 @@ define amdgpu_kernel void @global_atomic_inc_noret_i64_offset_addr64(ptr addrspa
%id = call i32 @llvm.amdgcn.workitem.id.x()
%gep.tid = getelementptr i64, ptr addrspace(1) %ptr, i32 %id
%gep = getelementptr i64, ptr addrspace(1) %gep.tid, i32 5
- %result = atomicrmw uinc_wrap ptr addrspace(1) %gep, i64 42 syncscope("agent") seq_cst, align 8
+ %result = atomicrmw uinc_wrap ptr addrspace(1) %gep, i64 42 syncscope("agent") seq_cst, align 8, !amdgpu.no.remote.memory !1
ret void
}
@@ -2837,7 +2414,7 @@ define amdgpu_kernel void @flat_atomic_inc_ret_i32(ptr %out, ptr %ptr) #1 {
; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX12-NEXT: flat_store_b32 v[0:1], v2
; GFX12-NEXT: s_endpgm
- %result = atomicrmw uinc_wrap ptr %ptr, i32 42 syncscope("agent") seq_cst, align 4
+ %result = atomicrmw uinc_wrap ptr %ptr, i32 42 syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory !1
store i32 %result, ptr %out, align 4
ret void
}
@@ -2949,7 +2526,7 @@ define amdgpu_kernel void @flat_atomic_inc_ret_i32_offset(ptr %out, ptr %ptr) #1
; GFX12-NEXT: flat_store_b32 v[0:1], v2
; GFX12-NEXT: s_endpgm
%gep = getelementptr i32, ptr %ptr, i32 4
- %result = atomicrmw uinc_wrap ptr %gep, i32 42 syncscope("agent") seq_cst, align 4
+ %result = atomicrmw uinc_wrap ptr %gep, i32 42 syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory !1
store i32 %result, ptr %out, align 4
ret void
}
@@ -2961,29 +2538,15 @@ define amdgpu_kernel void @flat_atomic_inc_ret_i32_offset_system(ptr %out, ptr %
; CI-NEXT: s_add_i32 s12, s12, s17
; CI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
; CI-NEXT: s_mov_b32 flat_scratch_lo, s13
+; CI-NEXT: v_mov_b32_e32 v2, 42
; CI-NEXT: s_waitcnt lgkmcnt(0)
; CI-NEXT: s_add_u32 s2, s2, 16
; CI-NEXT: s_addc_u32 s3, s3, 0
; CI-NEXT: v_mov_b32_e32 v0, s2
; CI-NEXT: v_mov_b32_e32 v1, s3
-; CI-NEXT: flat_load_dword v2, v[0:1]
-; CI-NEXT: s_mov_b64 s[2:3], 0
-; CI-NEXT: .LBB27_1: ; %atomicrmw.start
-; CI-NEXT: ; =>This Inner Loop Header: Depth=1
-; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CI-NEXT: v_mov_b32_e32 v3, v2
-; CI-NEXT: v_add_i32_e32 v2, vcc, 1, v3
-; CI-NEXT: v_cmp_le_u32_e32 vcc, 42, v3
-; CI-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc
-; CI-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
+; CI-NEXT: flat_atomic_inc v2, v[0:1], v2 glc
; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; CI-NEXT: buffer_wbinvl1_vol
-; CI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
-; CI-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
-; CI-NEXT: s_andn2_b64 exec, exec, s[2:3]
-; CI-NEXT: s_cbranch_execnz .LBB27_1
-; CI-NEXT: ; %bb.2: ; %atomicrmw.end
-; CI-NEXT: s_or_b64 exec, exec, s[2:3]
; CI-NEXT: v_mov_b32_e32 v0, s0
; CI-NEXT: v_mov_b32_e32 v1, s1
; CI-NEXT: flat_store_dword v[0:1], v2
@@ -2995,29 +2558,15 @@ define amdgpu_kernel void @flat_atomic_inc_ret_i32_offset_system(ptr %out, ptr %
; VI-NEXT: s_add_i32 s12, s12, s17
; VI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
; VI-NEXT: s_mov_b32 flat_scratch_lo, s13
+; VI-NEXT: v_mov_b32_e32 v2, 42
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_add_u32 s2, s2, 16
; VI-NEXT: s_addc_u32 s3, s3, 0
; VI-NEXT: v_mov_b32_e32 v0, s2
; VI-NEXT: v_mov_b32_e32 v1, s3
-; VI-NEXT: flat_load_dword v2, v[0:1]
-; VI-NEXT: s_mov_b64 s[2:3], 0
-; VI-NEXT: .LBB27_1: ; %atomicrmw.start
-; VI-NEXT: ; =>This Inner Loop Header: Depth=1
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v3, v2
-; VI-NEXT: v_add_u32_e32 v2, vcc, 1, v3
-; VI-NEXT: v_cmp_le_u32_e32 vcc, 42, v3
-; VI-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc
-; VI-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
+; VI-NEXT: flat_atomic_inc v2, v[0:1], v2 glc
; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
-; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
-; VI-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
-; VI-NEXT: s_andn2_b64 exec, exec, s[2:3]
-; VI-NEXT: s_cbranch_execnz .LBB27_1
-; VI-NEXT: ; %bb.2: ; %atomicrmw.end
-; VI-NEXT: s_or_b64 exec, exec, s[2:3]
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: flat_store_dword v[0:1], v2
@@ -3028,27 +2577,13 @@ define amdgpu_kernel void @flat_atomic_inc_ret_i32_offset_system(ptr %out, ptr %
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0
; GFX9-NEXT: s_add_u32 flat_scratch_lo, s12, s17
; GFX9-NEXT: s_addc_u32 flat_scratch_hi, s13, 0
+; GFX9-NEXT: v_mov_b32_e32 v2, 42
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v0, s2
; GFX9-NEXT: v_mov_b32_e32 v1, s3
-; GFX9-NEXT: flat_load_dword v2, v[0:1] offset:16
-; GFX9-NEXT: s_mov_b64 s[2:3], 0
-; GFX9-NEXT: .LBB27_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v3, v2
-; GFX9-NEXT: v_add_u32_e32 v2, 1, v3
-; GFX9-NEXT: v_cmp_le_u32_e32 vcc, 42, v3
-; GFX9-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc
-; GFX9-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] offset:16 glc
+; GFX9-NEXT: flat_atomic_inc v2, v[0:1], v2 offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
-; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
-; GFX9-NEXT: s_cbranch_execnz .LBB27_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX9-NEXT: v_mov_b32_e32 v0, s0
; GFX9-NEXT: v_mov_b32_e32 v1, s1
; GFX9-NEXT: flat_store_dword v[0:1], v2
@@ -3061,30 +2596,16 @@ define amdgpu_kernel void @flat_atomic_inc_ret_i32_offset_system(ptr %out, ptr %
; GFX10-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_LO), s12
; GFX10-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_HI), s13
; GFX10-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0
+; GFX10-NEXT: v_mov_b32_e32 v2, 42
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_add_u32 s2, s2, 16
; GFX10-NEXT: s_addc_u32 s3, s3, 0
; GFX10-NEXT: v_mov_b32_e32 v0, s2
; GFX10-NEXT: v_mov_b32_e32 v1, s3
-; GFX10-NEXT: s_mov_b32 s2, 0
-; GFX10-NEXT: flat_load_dword v2, v[0:1]
-; GFX10-NEXT: .LBB27_1: ; %atomicrmw.start
-; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_mov_b32_e32 v3, v2
-; GFX10-NEXT: v_add_nc_u32_e32 v2, 1, v3
-; GFX10-NEXT: v_cmp_le_u32_e32 vcc_lo, 42, v3
-; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc_lo
-; GFX10-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
+; GFX10-NEXT: flat_atomic_inc v2, v[0:1], v2 glc
; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX10-NEXT: s_or_b32 s2, vcc_lo, s2
-; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
-; GFX10-NEXT: s_cbranch_execnz .LBB27_1
-; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s2
; GFX10-NEXT: v_mov_b32_e32 v0, s0
; GFX10-NEXT: v_mov_b32_e32 v1, s1
; GFX10-NEXT: flat_store_dword v[0:1], v2
@@ -3093,29 +2614,13 @@ define amdgpu_kernel void @flat_atomic_inc_ret_i32_offset_system(ptr %out, ptr %
; GFX11-LABEL: flat_atomic_inc_ret_i32_offset_system:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
+; GFX11-NEXT: v_mov_b32_e32 v2, 42
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
-; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: flat_load_b32 v2, v[0:1] offset:16
-; GFX11-NEXT: .LBB27_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 1, v3
-; GFX11-NEXT: v_cmp_le_u32_e32 vcc_lo, 42, v3
-; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc_lo
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
+; GFX11-NEXT: flat_atomic_inc_u32 v2, v[0:1], v2 offset:16 glc
; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: s_or_b32 s2, vcc_lo, s2
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_execnz .LBB27_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s2
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: flat_store_b32 v[0:1], v2
; GFX11-NEXT: s_endpgm
@@ -3123,37 +2628,19 @@ define amdgpu_kernel void @flat_atomic_inc_ret_i32_offset_system(ptr %out, ptr %
; GFX12-LABEL: flat_atomic_inc_ret_i32_offset_system:
; GFX12: ; %bb.0:
; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
+; GFX12-NEXT: v_mov_b32_e32 v2, 42
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
-; GFX12-NEXT: s_mov_b32 s2, 0
-; GFX12-NEXT: flat_load_b32 v2, v[0:1] offset:16
-; GFX12-NEXT: .LBB27_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_add_nc_u32_e32 v2, 1, v3
-; GFX12-NEXT: v_cmp_le_u32_e32 vcc_lo, 42, v3
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc_lo
; GFX12-NEXT: global_wb scope:SCOPE_SYS
; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-NEXT: flat_atomic_inc_u32 v2, v[0:1], v2 offset:16 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_SYS
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s2, vcc_lo, s2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
-; GFX12-NEXT: s_cbranch_execnz .LBB27_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s2
; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX12-NEXT: flat_store_b32 v[0:1], v2
; GFX12-NEXT: s_endpgm
%gep = getelementptr i32, ptr %ptr, i32 4
- %result = atomicrmw uinc_wrap ptr %gep, i32 42 seq_cst, align 4
+ %result = atomicrmw uinc_wrap ptr %gep, i32 42 seq_cst, align 4, !amdgpu.no.remote.memory !1
store i32 %result, ptr %out, align 4
ret void
}
@@ -3244,7 +2731,7 @@ define amdgpu_kernel void @flat_atomic_inc_noret_i32(ptr %ptr) #1 {
; GFX12-NEXT: s_wait_storecnt_dscnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
- %result = atomicrmw uinc_wrap ptr %ptr, i32 42 syncscope("agent") seq_cst, align 4
+ %result = atomicrmw uinc_wrap ptr %ptr, i32 42 syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory !1
ret void
}
@@ -3341,7 +2828,7 @@ define amdgpu_kernel void @flat_atomic_inc_noret_i32_offset(ptr %ptr) #1 {
; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
%gep = getelementptr i32, ptr %ptr, i32 4
- %result = atomicrmw uinc_wrap ptr %gep, i32 42 syncscope("agent") seq_cst, align 4
+ %result = atomicrmw uinc_wrap ptr %gep, i32 42 syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory !1
ret void
}
@@ -3352,28 +2839,15 @@ define amdgpu_kernel void @flat_atomic_inc_noret_i32_offset_system(ptr %ptr) #1
; CI-NEXT: s_add_i32 s12, s12, s17
; CI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
; CI-NEXT: s_mov_b32 flat_scratch_lo, s13
+; CI-NEXT: v_mov_b32_e32 v2, 42
; CI-NEXT: s_waitcnt lgkmcnt(0)
; CI-NEXT: s_add_u32 s0, s0, 16
; CI-NEXT: s_addc_u32 s1, s1, 0
; CI-NEXT: v_mov_b32_e32 v0, s0
; CI-NEXT: v_mov_b32_e32 v1, s1
-; CI-NEXT: flat_load_dword v3, v[0:1]
-; CI-NEXT: s_mov_b64 s[0:1], 0
-; CI-NEXT: .LBB30_1: ; %atomicrmw.start
-; CI-NEXT: ; =>This Inner Loop Header: Depth=1
-; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CI-NEXT: v_add_i32_e32 v2, vcc, 1, v3
-; CI-NEXT: v_cmp_le_u32_e32 vcc, 42, v3
-; CI-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc
-; CI-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
+; CI-NEXT: flat_atomic_inc v[0:1], v2
; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; CI-NEXT: buffer_wbinvl1_vol
-; CI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
-; CI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; CI-NEXT: v_mov_b32_e32 v3, v2
-; CI-NEXT: s_andn2_b64 exec, exec, s[0:1]
-; CI-NEXT: s_cbranch_execnz .LBB30_1
-; CI-NEXT: ; %bb.2: ; %atomicrmw.end
; CI-NEXT: s_endpgm
;
; VI-LABEL: flat_atomic_inc_noret_i32_offset_system:
@@ -3382,28 +2856,15 @@ define amdgpu_kernel void @flat_atomic_inc_noret_i32_offset_system(ptr %ptr) #1
; VI-NEXT: s_add_i32 s12, s12, s17
; VI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
; VI-NEXT: s_mov_b32 flat_scratch_lo, s13
+; VI-NEXT: v_mov_b32_e32 v2, 42
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_add_u32 s0, s0, 16
; VI-NEXT: s_addc_u32 s1, s1, 0
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
-; VI-NEXT: flat_load_dword v3, v[0:1]
-; VI-NEXT: s_mov_b64 s[0:1], 0
-; VI-NEXT: .LBB30_1: ; %atomicrmw.start
-; VI-NEXT: ; =>This Inner Loop Header: Depth=1
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT: v_add_u32_e32 v2, vcc, 1, v3
-; VI-NEXT: v_cmp_le_u32_e32 vcc, 42, v3
-; VI-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc
-; VI-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
+; VI-NEXT: flat_atomic_inc v[0:1], v2
; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
-; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
-; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; VI-NEXT: v_mov_b32_e32 v3, v2
-; VI-NEXT: s_andn2_b64 exec, exec, s[0:1]
-; VI-NEXT: s_cbranch_execnz .LBB30_1
-; VI-NEXT: ; %bb.2: ; %atomicrmw.end
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: flat_atomic_inc_noret_i32_offset_system:
@@ -3411,26 +2872,13 @@ define amdgpu_kernel void @flat_atomic_inc_noret_i32_offset_system(ptr %ptr) #1
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
; GFX9-NEXT: s_add_u32 flat_scratch_lo, s12, s17
; GFX9-NEXT: s_addc_u32 flat_scratch_hi, s13, 0
+; GFX9-NEXT: v_mov_b32_e32 v2, 42
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v0, s0
; GFX9-NEXT: v_mov_b32_e32 v1, s1
-; GFX9-NEXT: flat_load_dword v3, v[0:1] offset:16
-; GFX9-NEXT: s_mov_b64 s[0:1], 0
-; GFX9-NEXT: .LBB30_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_add_u32_e32 v2, 1, v3
-; GFX9-NEXT: v_cmp_le_u32_e32 vcc, 42, v3
-; GFX9-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc
-; GFX9-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] offset:16 glc
+; GFX9-NEXT: flat_atomic_inc v[0:1], v2 offset:16
; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
-; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX9-NEXT: v_mov_b32_e32 v3, v2
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
-; GFX9-NEXT: s_cbranch_execnz .LBB30_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX9-NEXT: s_endpgm
;
; GFX10-LABEL: flat_atomic_inc_noret_i32_offset_system:
@@ -3440,89 +2888,46 @@ define amdgpu_kernel void @flat_atomic_inc_noret_i32_offset_system(ptr %ptr) #1
; GFX10-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_LO), s12
; GFX10-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_HI), s13
; GFX10-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
+; GFX10-NEXT: v_mov_b32_e32 v2, 42
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_add_u32 s0, s0, 16
; GFX10-NEXT: s_addc_u32 s1, s1, 0
; GFX10-NEXT: v_mov_b32_e32 v0, s0
; GFX10-NEXT: v_mov_b32_e32 v1, s1
-; GFX10-NEXT: s_mov_b32 s0, 0
-; GFX10-NEXT: flat_load_dword v3, v[0:1]
-; GFX10-NEXT: .LBB30_1: ; %atomicrmw.start
-; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_add_nc_u32_e32 v2, 1, v3
-; GFX10-NEXT: v_cmp_le_u32_e32 vcc_lo, 42, v3
-; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc_lo
-; GFX10-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
-; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-NEXT: flat_atomic_inc v[0:1], v2
+; GFX10-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX10-NEXT: v_mov_b32_e32 v3, v2
-; GFX10-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
-; GFX10-NEXT: s_cbranch_execnz .LBB30_1
-; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: flat_atomic_inc_noret_i32_offset_system:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GFX11-NEXT: v_mov_b32_e32 v2, 42
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: flat_load_b32 v3, v[0:1] offset:16
-; GFX11-NEXT: .LBB30_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_nc_u32_e32 v2, 1, v3
-; GFX11-NEXT: v_cmp_le_u32_e32 vcc_lo, 42, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc_lo
-; GFX11-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: flat_atomic_inc_u32 v[0:1], v2 offset:16
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, v2
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB30_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX11-NEXT: s_endpgm
;
; GFX12-LABEL: flat_atomic_inc_noret_i32_offset_system:
; GFX12: ; %bb.0:
; GFX12-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GFX12-NEXT: v_mov_b32_e32 v2, 42
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: flat_load_b32 v3, v[0:1] offset:16
-; GFX12-NEXT: .LBB30_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_add_nc_u32_e32 v2, 1, v3
-; GFX12-NEXT: v_cmp_le_u32_e32 vcc_lo, 42, v3
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc_lo
; GFX12-NEXT: global_wb scope:SCOPE_SYS
; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: flat_atomic_inc_u32 v[0:1], v2 offset:16 scope:SCOPE_SYS
+; GFX12-NEXT: s_wait_storecnt_dscnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_SYS
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
-; GFX12-NEXT: v_mov_b32_e32 v3, v2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB30_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX12-NEXT: s_endpgm
%gep = getelementptr i32, ptr %ptr, i32 4
- %result = atomicrmw uinc_wrap ptr %gep, i32 42 seq_cst, align 4
+ %result = atomicrmw uinc_wrap ptr %gep, i32 42 seq_cst, align 4, !amdgpu.no.remote.memory !1
ret void
}
@@ -3672,7 +3077,7 @@ define amdgpu_kernel void @flat_atomic_inc_ret_i32_offset_addr64(ptr %out, ptr %
%gep.tid = getelementptr i32, ptr %ptr, i32 %id
%out.gep = getelementptr i32, ptr %out, i32 %id
%gep = getelementptr i32, ptr %gep.tid, i32 5
- %result = atomicrmw uinc_wrap ptr %gep, i32 42 syncscope("agent") seq_cst, align 4
+ %result = atomicrmw uinc_wrap ptr %gep, i32 42 syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory !1
store i32 %result, ptr %out.gep, align 4
ret void
}
@@ -3796,7 +3201,7 @@ define amdgpu_kernel void @flat_atomic_inc_noret_i32_offset_addr64(ptr %ptr) #1
%id = call i32 @llvm.amdgcn.workitem.id.x()
%gep.tid = getelementptr i32, ptr %ptr, i32 %id
%gep = getelementptr i32, ptr %gep.tid, i32 5
- %result = atomicrmw uinc_wrap ptr %gep, i32 42 syncscope("agent") seq_cst, align 4
+ %result = atomicrmw uinc_wrap ptr %gep, i32 42 syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory !1
ret void
}
@@ -4038,7 +3443,7 @@ define amdgpu_kernel void @flat_atomic_inc_ret_i64(ptr %out, ptr %ptr) #1 {
; GFX12-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
- %result = atomicrmw uinc_wrap ptr %ptr, i64 42 syncscope("agent") seq_cst, align 8, !noalias.addrspace !0
+ %result = atomicrmw uinc_wrap ptr %ptr, i64 42 syncscope("agent") seq_cst, align 8, !noalias.addrspace !0, !amdgpu.no.remote.memory !1
store i64 %result, ptr %out, align 4
ret void
}
@@ -4166,7 +3571,7 @@ define amdgpu_kernel void @flat_atomic_inc_ret_i64_offset(ptr %out, ptr %ptr) #1
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
%gep = getelementptr i64, ptr %ptr, i32 4
- %result = atomicrmw uinc_wrap ptr %gep, i64 42 syncscope("agent") seq_cst, align 8, !noalias.addrspace !0
+ %result = atomicrmw uinc_wrap ptr %gep, i64 42 syncscope("agent") seq_cst, align 8, !noalias.addrspace !0, !amdgpu.no.remote.memory !1
store i64 %result, ptr %out, align 4
ret void
}
@@ -4177,46 +3582,25 @@ define amdgpu_kernel void @flat_atomic_inc_ret_i64_offset_system(ptr %out, ptr %
; CI-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0
; CI-NEXT: s_add_i32 s12, s12, s17
; CI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
+; CI-NEXT: v_mov_b32_e32 v0, 42
; CI-NEXT: s_mov_b32 flat_scratch_lo, s13
; CI-NEXT: s_waitcnt lgkmcnt(0)
-; CI-NEXT: s_add_u32 s4, s2, 32
-; CI-NEXT: s_addc_u32 s5, s3, 0
-; CI-NEXT: s_add_u32 s2, s2, 36
+; CI-NEXT: s_add_u32 s2, s2, 32
; CI-NEXT: s_addc_u32 s3, s3, 0
-; CI-NEXT: v_mov_b32_e32 v0, s4
-; CI-NEXT: v_mov_b32_e32 v4, s3
-; CI-NEXT: v_mov_b32_e32 v1, s5
-; CI-NEXT: v_mov_b32_e32 v3, s2
-; CI-NEXT: flat_load_dword v2, v[0:1]
-; CI-NEXT: flat_load_dword v3, v[3:4]
-; CI-NEXT: s_mov_b64 s[2:3], 0
-; CI-NEXT: .LBB36_1: ; %atomicrmw.start
-; CI-NEXT: ; =>This Inner Loop Header: Depth=1
-; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CI-NEXT: v_mov_b32_e32 v5, v3
-; CI-NEXT: v_mov_b32_e32 v4, v2
-; CI-NEXT: v_add_i32_e32 v2, vcc, 1, v4
-; CI-NEXT: v_addc_u32_e32 v3, vcc, 0, v5, vcc
-; CI-NEXT: v_cmp_le_u64_e32 vcc, 42, v[4:5]
-; CI-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc
-; CI-NEXT: v_cndmask_b32_e64 v3, v3, 0, vcc
-; CI-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5] glc
+; CI-NEXT: v_mov_b32_e32 v2, s2
+; CI-NEXT: v_mov_b32_e32 v1, 0
+; CI-NEXT: v_mov_b32_e32 v3, s3
+; CI-NEXT: flat_atomic_inc_x2 v[0:1], v[2:3], v[0:1] glc
; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; CI-NEXT: buffer_wbinvl1_vol
-; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
-; CI-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
-; CI-NEXT: s_andn2_b64 exec, exec, s[2:3]
-; CI-NEXT: s_cbranch_execnz .LBB36_1
-; CI-NEXT: ; %bb.2: ; %atomicrmw.end
-; CI-NEXT: s_or_b64 exec, exec, s[2:3]
-; CI-NEXT: v_mov_b32_e32 v0, s0
-; CI-NEXT: v_mov_b32_e32 v1, s1
+; CI-NEXT: v_mov_b32_e32 v3, s1
+; CI-NEXT: v_mov_b32_e32 v2, s0
; CI-NEXT: s_add_u32 s0, s0, 4
-; CI-NEXT: flat_store_dword v[0:1], v2
; CI-NEXT: s_addc_u32 s1, s1, 0
-; CI-NEXT: v_mov_b32_e32 v0, s0
-; CI-NEXT: v_mov_b32_e32 v1, s1
-; CI-NEXT: flat_store_dword v[0:1], v3
+; CI-NEXT: v_mov_b32_e32 v5, s1
+; CI-NEXT: v_mov_b32_e32 v4, s0
+; CI-NEXT: flat_store_dword v[2:3], v0
+; CI-NEXT: flat_store_dword v[4:5], v1
; CI-NEXT: s_endpgm
;
; VI-LABEL: flat_atomic_inc_ret_i64_offset_system:
@@ -4224,80 +3608,43 @@ define amdgpu_kernel void @flat_atomic_inc_ret_i64_offset_system(ptr %out, ptr %
; VI-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0
; VI-NEXT: s_add_i32 s12, s12, s17
; VI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
+; VI-NEXT: v_mov_b32_e32 v0, 42
; VI-NEXT: s_mov_b32 flat_scratch_lo, s13
; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: s_add_u32 s4, s2, 32
-; VI-NEXT: s_addc_u32 s5, s3, 0
-; VI-NEXT: s_add_u32 s2, s2, 36
+; VI-NEXT: s_add_u32 s2, s2, 32
; VI-NEXT: s_addc_u32 s3, s3, 0
-; VI-NEXT: v_mov_b32_e32 v0, s4
-; VI-NEXT: v_mov_b32_e32 v4, s3
-; VI-NEXT: v_mov_b32_e32 v1, s5
-; VI-NEXT: v_mov_b32_e32 v3, s2
-; VI-NEXT: flat_load_dword v2, v[0:1]
-; VI-NEXT: flat_load_dword v3, v[3:4]
-; VI-NEXT: s_mov_b64 s[2:3], 0
-; VI-NEXT: .LBB36_1: ; %atomicrmw.start
-; VI-NEXT: ; =>This Inner Loop Header: Depth=1
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v5, v3
-; VI-NEXT: v_mov_b32_e32 v4, v2
-; VI-NEXT: v_add_u32_e32 v2, vcc, 1, v4
-; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v5, vcc
-; VI-NEXT: v_cmp_le_u64_e32 vcc, 42, v[4:5]
-; VI-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc
-; VI-NEXT: v_cndmask_b32_e64 v3, v3, 0, vcc
-; VI-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5] glc
+; VI-NEXT: v_mov_b32_e32 v2, s2
+; VI-NEXT: v_mov_b32_e32 v1, 0
+; VI-NEXT: v_mov_b32_e32 v3, s3
+; VI-NEXT: flat_atomic_inc_x2 v[0:1], v[2:3], v[0:1] glc
; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
-; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
-; VI-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
-; VI-NEXT: s_andn2_b64 exec, exec, s[2:3]
-; VI-NEXT: s_cbranch_execnz .LBB36_1
-; VI-NEXT: ; %bb.2: ; %atomicrmw.end
-; VI-NEXT: s_or_b64 exec, exec, s[2:3]
-; VI-NEXT: v_mov_b32_e32 v0, s0
-; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: v_mov_b32_e32 v3, s1
+; VI-NEXT: v_mov_b32_e32 v2, s0
; VI-NEXT: s_add_u32 s0, s0, 4
-; VI-NEXT: flat_store_dword v[0:1], v2
; VI-NEXT: s_addc_u32 s1, s1, 0
-; VI-NEXT: v_mov_b32_e32 v0, s0
-; VI-NEXT: v_mov_b32_e32 v1, s1
-; VI-NEXT: flat_store_dword v[0:1], v3
+; VI-NEXT: v_mov_b32_e32 v5, s1
+; VI-NEXT: v_mov_b32_e32 v4, s0
+; VI-NEXT: flat_store_dword v[2:3], v0
+; VI-NEXT: flat_store_dword v[4:5], v1
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: flat_atomic_inc_ret_i64_offset_system:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0
; GFX9-NEXT: s_add_u32 flat_scratch_lo, s12, s17
+; GFX9-NEXT: v_mov_b32_e32 v0, 42
; GFX9-NEXT: s_addc_u32 flat_scratch_hi, s13, 0
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v0, s2
-; GFX9-NEXT: v_mov_b32_e32 v1, s3
-; GFX9-NEXT: flat_load_dwordx2 v[2:3], v[0:1] offset:32
-; GFX9-NEXT: s_mov_b64 s[2:3], 0
-; GFX9-NEXT: .LBB36_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v5, v3
-; GFX9-NEXT: v_mov_b32_e32 v4, v2
-; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, 1, v4
-; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v5, vcc
-; GFX9-NEXT: v_cmp_le_u64_e32 vcc, 42, v[4:5]
-; GFX9-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc
-; GFX9-NEXT: v_cndmask_b32_e64 v3, v3, 0, vcc
-; GFX9-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5] offset:32 glc
+; GFX9-NEXT: v_mov_b32_e32 v2, s2
+; GFX9-NEXT: v_mov_b32_e32 v3, s3
+; GFX9-NEXT: flat_atomic_inc_x2 v[0:1], v[2:3], v[0:1] offset:32 glc
; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
-; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
-; GFX9-NEXT: s_cbranch_execnz .LBB36_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[2:3]
-; GFX9-NEXT: v_mov_b32_e32 v0, s0
-; GFX9-NEXT: v_mov_b32_e32 v1, s1
-; GFX9-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, s1
+; GFX9-NEXT: v_mov_b32_e32 v2, s0
+; GFX9-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; GFX9-NEXT: s_endpgm
;
; GFX10-LABEL: flat_atomic_inc_ret_i64_offset_system:
@@ -4307,110 +3654,54 @@ define amdgpu_kernel void @flat_atomic_inc_ret_i64_offset_system(ptr %out, ptr %
; GFX10-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_LO), s12
; GFX10-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_HI), s13
; GFX10-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0
+; GFX10-NEXT: v_mov_b32_e32 v0, 42
+; GFX10-NEXT: v_mov_b32_e32 v1, 0
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_add_u32 s2, s2, 32
; GFX10-NEXT: s_addc_u32 s3, s3, 0
-; GFX10-NEXT: v_mov_b32_e32 v0, s2
-; GFX10-NEXT: v_mov_b32_e32 v1, s3
-; GFX10-NEXT: s_mov_b32 s2, 0
-; GFX10-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
-; GFX10-NEXT: .LBB36_1: ; %atomicrmw.start
-; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_mov_b32_e32 v5, v3
-; GFX10-NEXT: v_mov_b32_e32 v4, v2
-; GFX10-NEXT: v_add_co_u32 v2, vcc_lo, v4, 1
-; GFX10-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, 0, v5, vcc_lo
-; GFX10-NEXT: v_cmp_le_u64_e32 vcc_lo, 42, v[4:5]
-; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc_lo
-; GFX10-NEXT: v_cndmask_b32_e64 v3, v3, 0, vcc_lo
-; GFX10-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5] glc
+; GFX10-NEXT: v_mov_b32_e32 v2, s2
+; GFX10-NEXT: v_mov_b32_e32 v3, s3
+; GFX10-NEXT: flat_atomic_inc_x2 v[0:1], v[2:3], v[0:1] glc
; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
-; GFX10-NEXT: s_or_b32 s2, vcc_lo, s2
-; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
-; GFX10-NEXT: s_cbranch_execnz .LBB36_1
-; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s2
-; GFX10-NEXT: v_mov_b32_e32 v0, s0
-; GFX10-NEXT: v_mov_b32_e32 v1, s1
-; GFX10-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
+; GFX10-NEXT: v_mov_b32_e32 v3, s1
+; GFX10-NEXT: v_mov_b32_e32 v2, s0
+; GFX10-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: flat_atomic_inc_ret_i64_offset_system:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
+; GFX11-NEXT: v_mov_b32_e32 v0, 42
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
-; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: flat_load_b64 v[2:3], v[0:1] offset:32
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB36_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_u32 v2, vcc_lo, v4, 1
-; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v5, vcc_lo
-; GFX11-NEXT: v_cmp_le_u64_e32 vcc_lo, 42, v[4:5]
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v3, v3, 0, vcc_lo
-; GFX11-NEXT: flat_atomic_cmpswap_b64 v[2:3], v[0:1], v[2:5] offset:32 glc
+; GFX11-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_mov_b32_e32 v3, s3
+; GFX11-NEXT: flat_atomic_inc_u64 v[0:1], v[2:3], v[0:1] offset:32 glc
; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
-; GFX11-NEXT: s_or_b32 s2, vcc_lo, s2
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_execnz .LBB36_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s2
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: flat_store_b64 v[0:1], v[2:3]
+; GFX11-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
+; GFX11-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX11-NEXT: s_endpgm
;
; GFX12-LABEL: flat_atomic_inc_ret_i64_offset_system:
; GFX12: ; %bb.0:
; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
+; GFX12-NEXT: v_mov_b32_e32 v0, 42
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
-; GFX12-NEXT: s_mov_b32 s2, 0
-; GFX12-NEXT: flat_load_b64 v[2:3], v[0:1] offset:32
-; GFX12-NEXT: .LBB36_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_add_co_u32 v2, vcc_lo, v4, 1
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v5, vcc_lo
-; GFX12-NEXT: v_cmp_le_u64_e32 vcc_lo, 42, v[4:5]
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX12-NEXT: v_cndmask_b32_e64 v3, v3, 0, vcc_lo
+; GFX12-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s2
+; GFX12-NEXT: v_mov_b32_e32 v3, s3
; GFX12-NEXT: global_wb scope:SCOPE_SYS
; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b64 v[2:3], v[0:1], v[2:5] offset:32 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX12-NEXT: flat_atomic_inc_u64 v[0:1], v[2:3], v[0:1] offset:32 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_SYS
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s2, vcc_lo, s2
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
-; GFX12-NEXT: s_cbranch_execnz .LBB36_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s2
-; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX12-NEXT: flat_store_b64 v[0:1], v[2:3]
+; GFX12-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
+; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
%gep = getelementptr i64, ptr %ptr, i32 4
- %result = atomicrmw uinc_wrap ptr %gep, i64 42 seq_cst, align 8, !noalias.addrspace !0
+ %result = atomicrmw uinc_wrap ptr %gep, i64 42 seq_cst, align 8, !noalias.addrspace !0, !amdgpu.no.remote.memory !1
store i64 %result, ptr %out, align 4
ret void
}
@@ -4507,7 +3798,7 @@ define amdgpu_kernel void @flat_atomic_inc_noret_i64(ptr %ptr) #1 {
; GFX12-NEXT: s_wait_storecnt_dscnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
- %result = atomicrmw uinc_wrap ptr %ptr, i64 42 syncscope("agent") seq_cst, align 8, !noalias.addrspace !0
+ %result = atomicrmw uinc_wrap ptr %ptr, i64 42 syncscope("agent") seq_cst, align 8, !noalias.addrspace !0, !amdgpu.no.remote.memory !1
ret void
}
@@ -4610,7 +3901,7 @@ define amdgpu_kernel void @flat_atomic_inc_noret_i64_offset(ptr %ptr) #1 {
; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
%gep = getelementptr i64, ptr %ptr, i32 4
- %result = atomicrmw uinc_wrap ptr %gep, i64 42 syncscope("agent") seq_cst, align 8, !noalias.addrspace !0
+ %result = atomicrmw uinc_wrap ptr %gep, i64 42 syncscope("agent") seq_cst, align 8, !noalias.addrspace !0, !amdgpu.no.remote.memory !1
ret void
}
@@ -4620,37 +3911,17 @@ define amdgpu_kernel void @flat_atomic_inc_noret_i64_offset_system(ptr %ptr) #1
; CI-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
; CI-NEXT: s_add_i32 s12, s12, s17
; CI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
+; CI-NEXT: v_mov_b32_e32 v0, 42
; CI-NEXT: s_mov_b32 flat_scratch_lo, s13
; CI-NEXT: s_waitcnt lgkmcnt(0)
-; CI-NEXT: s_add_u32 s2, s0, 32
-; CI-NEXT: s_addc_u32 s3, s1, 0
-; CI-NEXT: v_mov_b32_e32 v5, s3
-; CI-NEXT: s_add_u32 s0, s0, 36
-; CI-NEXT: v_mov_b32_e32 v4, s2
+; CI-NEXT: s_add_u32 s0, s0, 32
; CI-NEXT: s_addc_u32 s1, s1, 0
-; CI-NEXT: v_mov_b32_e32 v0, s0
-; CI-NEXT: v_mov_b32_e32 v1, s1
-; CI-NEXT: flat_load_dword v2, v[4:5]
-; CI-NEXT: flat_load_dword v3, v[0:1]
-; CI-NEXT: s_mov_b64 s[0:1], 0
-; CI-NEXT: .LBB39_1: ; %atomicrmw.start
-; CI-NEXT: ; =>This Inner Loop Header: Depth=1
-; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CI-NEXT: v_add_i32_e32 v0, vcc, 1, v2
-; CI-NEXT: v_addc_u32_e32 v1, vcc, 0, v3, vcc
-; CI-NEXT: v_cmp_le_u64_e32 vcc, 42, v[2:3]
-; CI-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc
-; CI-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc
-; CI-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; CI-NEXT: v_mov_b32_e32 v3, s1
+; CI-NEXT: v_mov_b32_e32 v1, 0
+; CI-NEXT: v_mov_b32_e32 v2, s0
+; CI-NEXT: flat_atomic_inc_x2 v[2:3], v[0:1]
; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; CI-NEXT: buffer_wbinvl1_vol
-; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; CI-NEXT: v_mov_b32_e32 v3, v1
-; CI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; CI-NEXT: v_mov_b32_e32 v2, v0
-; CI-NEXT: s_andn2_b64 exec, exec, s[0:1]
-; CI-NEXT: s_cbranch_execnz .LBB39_1
-; CI-NEXT: ; %bb.2: ; %atomicrmw.end
; CI-NEXT: s_endpgm
;
; VI-LABEL: flat_atomic_inc_noret_i64_offset_system:
@@ -4658,67 +3929,32 @@ define amdgpu_kernel void @flat_atomic_inc_noret_i64_offset_system(ptr %ptr) #1
; VI-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
; VI-NEXT: s_add_i32 s12, s12, s17
; VI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
+; VI-NEXT: v_mov_b32_e32 v0, 42
; VI-NEXT: s_mov_b32 flat_scratch_lo, s13
; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: s_add_u32 s2, s0, 32
-; VI-NEXT: s_addc_u32 s3, s1, 0
-; VI-NEXT: v_mov_b32_e32 v5, s3
-; VI-NEXT: s_add_u32 s0, s0, 36
-; VI-NEXT: v_mov_b32_e32 v4, s2
+; VI-NEXT: s_add_u32 s0, s0, 32
; VI-NEXT: s_addc_u32 s1, s1, 0
-; VI-NEXT: v_mov_b32_e32 v0, s0
-; VI-NEXT: v_mov_b32_e32 v1, s1
-; VI-NEXT: flat_load_dword v2, v[4:5]
-; VI-NEXT: flat_load_dword v3, v[0:1]
-; VI-NEXT: s_mov_b64 s[0:1], 0
-; VI-NEXT: .LBB39_1: ; %atomicrmw.start
-; VI-NEXT: ; =>This Inner Loop Header: Depth=1
-; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; VI-NEXT: v_add_u32_e32 v0, vcc, 1, v2
-; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v3, vcc
-; VI-NEXT: v_cmp_le_u64_e32 vcc, 42, v[2:3]
-; VI-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc
-; VI-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc
-; VI-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; VI-NEXT: v_mov_b32_e32 v3, s1
+; VI-NEXT: v_mov_b32_e32 v1, 0
+; VI-NEXT: v_mov_b32_e32 v2, s0
+; VI-NEXT: flat_atomic_inc_x2 v[2:3], v[0:1]
; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
-; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; VI-NEXT: v_mov_b32_e32 v3, v1
-; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; VI-NEXT: v_mov_b32_e32 v2, v0
-; VI-NEXT: s_andn2_b64 exec, exec, s[0:1]
-; VI-NEXT: s_cbranch_execnz .LBB39_1
-; VI-NEXT: ; %bb.2: ; %atomicrmw.end
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: flat_atomic_inc_noret_i64_offset_system:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
; GFX9-NEXT: s_add_u32 flat_scratch_lo, s12, s17
+; GFX9-NEXT: v_mov_b32_e32 v0, 42
; GFX9-NEXT: s_addc_u32 flat_scratch_hi, s13, 0
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v5, s1
-; GFX9-NEXT: v_mov_b32_e32 v4, s0
-; GFX9-NEXT: flat_load_dwordx2 v[2:3], v[4:5] offset:32
-; GFX9-NEXT: s_mov_b64 s[0:1], 0
-; GFX9-NEXT: .LBB39_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, 1, v2
-; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v3, vcc
-; GFX9-NEXT: v_cmp_le_u64_e32 vcc, 42, v[2:3]
-; GFX9-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc
-; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc
-; GFX9-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] offset:32 glc
+; GFX9-NEXT: v_mov_b32_e32 v3, s1
+; GFX9-NEXT: v_mov_b32_e32 v2, s0
+; GFX9-NEXT: flat_atomic_inc_x2 v[2:3], v[0:1] offset:32
; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
-; GFX9-NEXT: v_mov_b32_e32 v3, v1
-; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX9-NEXT: v_mov_b32_e32 v2, v0
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
-; GFX9-NEXT: s_cbranch_execnz .LBB39_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX9-NEXT: s_endpgm
;
; GFX10-LABEL: flat_atomic_inc_noret_i64_offset_system:
@@ -4728,99 +3964,49 @@ define amdgpu_kernel void @flat_atomic_inc_noret_i64_offset_system(ptr %ptr) #1
; GFX10-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_LO), s12
; GFX10-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_HI), s13
; GFX10-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
+; GFX10-NEXT: v_mov_b32_e32 v0, 42
+; GFX10-NEXT: v_mov_b32_e32 v1, 0
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_add_u32 s0, s0, 32
; GFX10-NEXT: s_addc_u32 s1, s1, 0
-; GFX10-NEXT: v_mov_b32_e32 v5, s1
-; GFX10-NEXT: v_mov_b32_e32 v4, s0
-; GFX10-NEXT: s_mov_b32 s0, 0
-; GFX10-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
-; GFX10-NEXT: .LBB39_1: ; %atomicrmw.start
-; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, v2, 1
-; GFX10-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, 0, v3, vcc_lo
-; GFX10-NEXT: v_cmp_le_u64_e32 vcc_lo, 42, v[2:3]
-; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc_lo
-; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc_lo
-; GFX10-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
-; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_mov_b32_e32 v3, s1
+; GFX10-NEXT: v_mov_b32_e32 v2, s0
+; GFX10-NEXT: flat_atomic_inc_x2 v[2:3], v[0:1]
+; GFX10-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
-; GFX10-NEXT: v_mov_b32_e32 v3, v1
-; GFX10-NEXT: v_mov_b32_e32 v2, v0
-; GFX10-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
-; GFX10-NEXT: s_cbranch_execnz .LBB39_1
-; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: flat_atomic_inc_noret_i64_offset_system:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GFX11-NEXT: v_mov_b32_e32 v0, 42
+; GFX11-NEXT: v_mov_b32_e32 v1, 0
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v5, s1 :: v_dual_mov_b32 v4, s0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: flat_load_b64 v[2:3], v[4:5] offset:32
-; GFX11-NEXT: .p2align 6
-; GFX11-NEXT: .LBB39_1: ; %atomicrmw.start
-; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v2, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v3, vcc_lo
-; GFX11-NEXT: v_cmp_le_u64_e32 vcc_lo, 42, v[2:3]
-; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc_lo
-; GFX11-NEXT: flat_atomic_cmpswap_b64 v[0:1], v[4:5], v[0:3] offset:32 glc
-; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
+; GFX11-NEXT: flat_atomic_inc_u64 v[2:3], v[0:1] offset:32
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
-; GFX11-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
-; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_execnz .LBB39_1
-; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX11-NEXT: s_endpgm
;
; GFX12-LABEL: flat_atomic_inc_noret_i64_offset_system:
; GFX12: ; %bb.0:
; GFX12-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GFX12-NEXT: v_mov_b32_e32 v0, 42
+; GFX12-NEXT: v_mov_b32_e32 v1, 0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v5, s1 :: v_dual_mov_b32 v4, s0
-; GFX12-NEXT: s_mov_b32 s0, 0
-; GFX12-NEXT: flat_load_b64 v[2:3], v[4:5] offset:32
-; GFX12-NEXT: .LBB39_1: ; %atomicrmw.start
-; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: v_add_co_u32 v0, vcc_lo, v2, 1
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v3, vcc_lo
-; GFX12-NEXT: v_cmp_le_u64_e32 vcc_lo, 42, v[2:3]
-; GFX12-NEXT: s_wait_alu 0xfffd
-; GFX12-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc_lo
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX12-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc_lo
+; GFX12-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
; GFX12-NEXT: global_wb scope:SCOPE_SYS
; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: flat_atomic_cmpswap_b64 v[0:1], v[4:5], v[0:3] offset:32 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: flat_atomic_inc_u64 v[2:3], v[0:1] offset:32 scope:SCOPE_SYS
+; GFX12-NEXT: s_wait_storecnt_dscnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_SYS
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
-; GFX12-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB39_1
-; GFX12-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX12-NEXT: s_endpgm
%gep = getelementptr i64, ptr %ptr, i32 4
- %result = atomicrmw uinc_wrap ptr %gep, i64 42 seq_cst, align 8, !noalias.addrspace !0
+ %result = atomicrmw uinc_wrap ptr %gep, i64 42 seq_cst, align 8, !noalias.addrspace !0, !amdgpu.no.remote.memory !1
ret void
}
@@ -4982,7 +4168,7 @@ define amdgpu_kernel void @flat_atomic_inc_ret_i64_offset_addr64(ptr %out, ptr %
%gep.tid = getelementptr i64, ptr %ptr, i32 %id
%out.gep = getelementptr i64, ptr %out, i32 %id
%gep = getelementptr i64, ptr %gep.tid, i32 5
- %result = atomicrmw uinc_wrap ptr %gep, i64 42 syncscope("agent") seq_cst, align 8, !noalias.addrspace !0
+ %result = atomicrmw uinc_wrap ptr %gep, i64 42 syncscope("agent") seq_cst, align 8, !noalias.addrspace !0, !amdgpu.no.remote.memory !1
store i64 %result, ptr %out.gep, align 4
ret void
}
@@ -5110,7 +4296,7 @@ define amdgpu_kernel void @flat_atomic_inc_noret_i64_offset_addr64(ptr %ptr) #1
%id = call i32 @llvm.amdgcn.workitem.id.x()
%gep.tid = getelementptr i64, ptr %ptr, i32 %id
%gep = getelementptr i64, ptr %gep.tid, i32 5
- %result = atomicrmw uinc_wrap ptr %gep, i64 42 syncscope("agent") seq_cst, align 8, !noalias.addrspace !0
+ %result = atomicrmw uinc_wrap ptr %gep, i64 42 syncscope("agent") seq_cst, align 8, !noalias.addrspace !0, !amdgpu.no.remote.memory !1
ret void
}
@@ -5246,6 +4432,7 @@ attributes #1 = { nounwind }
attributes #2 = { nounwind memory(none) }
!0 = !{i32 5, i32 6}
+!1 = !{}
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; GCN: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/acc-ldst.ll b/llvm/test/CodeGen/AMDGPU/acc-ldst.ll
index 726bfbab7ad48..45c06cae585cc 100644
--- a/llvm/test/CodeGen/AMDGPU/acc-ldst.ll
+++ b/llvm/test/CodeGen/AMDGPU/acc-ldst.ll
@@ -203,7 +203,7 @@ define amdgpu_kernel void @test_atomic_mfma_4xi32_atomic_store(ptr addrspace(1)
bb:
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr inbounds i32, ptr addrspace(1) %arg, i32 %tid
- %in.1 = atomicrmw volatile sub ptr addrspace(1) %gep, i32 1 syncscope("agent") seq_cst
+ %in.1 = atomicrmw volatile sub ptr addrspace(1) %gep, i32 1 syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
%tmp0 = insertelement <4 x i32> poison, i32 %in.1, i32 0
%tmp1 = insertelement <4 x i32> %tmp0, i32 0, i32 1
%tmp2 = insertelement <4 x i32> %tmp1, i32 0, i32 2
@@ -229,7 +229,7 @@ define amdgpu_kernel void @test_atomic_mfma_4xi32_atomic64_store(ptr addrspace(1
bb:
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr inbounds i64, ptr addrspace(1) %arg, i32 %tid
- %in.1 = atomicrmw volatile sub ptr addrspace(1) %gep, i64 1 syncscope("agent") seq_cst
+ %in.1 = atomicrmw volatile sub ptr addrspace(1) %gep, i64 1 syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
%tmp0 = insertelement <2 x i64> poison, i64 %in.1, i32 0
%tmp1 = insertelement <2 x i64> %tmp0, i64 0, i32 1
%tmp2 = bitcast <2 x i64> %tmp1 to <4 x i32>
@@ -319,3 +319,5 @@ exit:
}
attributes #0 = { "amdgpu-flat-work-group-size"="1,256" }
+
+!0 = !{}
diff --git a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll
index 53469b7f6f100..e2c076bea552f 100644
--- a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll
+++ b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll
@@ -328,7 +328,7 @@ define amdgpu_kernel void @add_i32_constant(ptr addrspace(1) %out, ptr addrspace
; GFX1232-NEXT: buffer_store_b32 v0, off, s[0:3], null
; GFX1232-NEXT: s_endpgm
entry:
- %old = atomicrmw add ptr addrspace(1) %inout, i32 5 syncscope("agent") acq_rel
+ %old = atomicrmw add ptr addrspace(1) %inout, i32 5 syncscope("agent") acq_rel, !amdgpu.no.fine.grained.memory !0
store i32 %old, ptr addrspace(1) %out
ret void
}
@@ -655,7 +655,7 @@ define amdgpu_kernel void @add_i32_uniform(ptr addrspace(1) %out, ptr addrspace(
; GFX1232-NEXT: buffer_store_b32 v0, off, s[0:3], null
; GFX1232-NEXT: s_endpgm
entry:
- %old = atomicrmw add ptr addrspace(1) %inout, i32 %additive syncscope("agent") acq_rel
+ %old = atomicrmw add ptr addrspace(1) %inout, i32 %additive syncscope("agent") acq_rel, !amdgpu.no.fine.grained.memory !0
store i32 %old, ptr addrspace(1) %out
ret void
}
@@ -1565,7 +1565,7 @@ define amdgpu_kernel void @add_i32_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX1232_DPP-NEXT: s_endpgm
entry:
%lane = call i32 @llvm.amdgcn.workitem.id.x()
- %old = atomicrmw add ptr addrspace(1) %inout, i32 %lane syncscope("agent") acq_rel
+ %old = atomicrmw add ptr addrspace(1) %inout, i32 %lane syncscope("agent") acq_rel, !amdgpu.no.fine.grained.memory !0
store i32 %old, ptr addrspace(1) %out
ret void
}
@@ -1899,7 +1899,7 @@ define amdgpu_kernel void @add_i64_constant(ptr addrspace(1) %out, ptr addrspace
; GFX1232-NEXT: buffer_store_b64 v[0:1], off, s[0:3], null
; GFX1232-NEXT: s_endpgm
entry:
- %old = atomicrmw add ptr addrspace(1) %inout, i64 5 syncscope("agent") acq_rel
+ %old = atomicrmw add ptr addrspace(1) %inout, i64 5 syncscope("agent") acq_rel, !amdgpu.no.fine.grained.memory !0
store i64 %old, ptr addrspace(1) %out
ret void
}
@@ -2284,7 +2284,7 @@ define amdgpu_kernel void @add_i64_uniform(ptr addrspace(1) %out, ptr addrspace(
; GFX1232-NEXT: buffer_store_b64 v[0:1], off, s[0:3], null
; GFX1232-NEXT: s_endpgm
entry:
- %old = atomicrmw add ptr addrspace(1) %inout, i64 %additive syncscope("agent") acq_rel
+ %old = atomicrmw add ptr addrspace(1) %inout, i64 %additive syncscope("agent") acq_rel, !amdgpu.no.fine.grained.memory !0
store i64 %old, ptr addrspace(1) %out
ret void
}
@@ -3543,7 +3543,7 @@ define amdgpu_kernel void @add_i64_varying(ptr addrspace(1) %out, ptr addrspace(
entry:
%lane = call i32 @llvm.amdgcn.workitem.id.x()
%zext = zext i32 %lane to i64
- %old = atomicrmw add ptr addrspace(1) %inout, i64 %zext syncscope("agent") acq_rel
+ %old = atomicrmw add ptr addrspace(1) %inout, i64 %zext syncscope("agent") acq_rel, !amdgpu.no.fine.grained.memory !0
store i64 %old, ptr addrspace(1) %out
ret void
}
@@ -3859,7 +3859,7 @@ define amdgpu_kernel void @sub_i32_constant(ptr addrspace(1) %out, ptr addrspace
; GFX1232-NEXT: buffer_store_b32 v0, off, s[0:3], null
; GFX1232-NEXT: s_endpgm
entry:
- %old = atomicrmw sub ptr addrspace(1) %inout, i32 5 syncscope("agent") acq_rel
+ %old = atomicrmw sub ptr addrspace(1) %inout, i32 5 syncscope("agent") acq_rel, !amdgpu.no.fine.grained.memory !0
store i32 %old, ptr addrspace(1) %out
ret void
}
@@ -4188,7 +4188,7 @@ define amdgpu_kernel void @sub_i32_uniform(ptr addrspace(1) %out, ptr addrspace(
; GFX1232-NEXT: buffer_store_b32 v0, off, s[0:3], null
; GFX1232-NEXT: s_endpgm
entry:
- %old = atomicrmw sub ptr addrspace(1) %inout, i32 %subitive syncscope("agent") acq_rel
+ %old = atomicrmw sub ptr addrspace(1) %inout, i32 %subitive syncscope("agent") acq_rel, !amdgpu.no.fine.grained.memory !0
store i32 %old, ptr addrspace(1) %out
ret void
}
@@ -5098,7 +5098,7 @@ define amdgpu_kernel void @sub_i32_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX1232_DPP-NEXT: s_endpgm
entry:
%lane = call i32 @llvm.amdgcn.workitem.id.x()
- %old = atomicrmw sub ptr addrspace(1) %inout, i32 %lane syncscope("agent") acq_rel
+ %old = atomicrmw sub ptr addrspace(1) %inout, i32 %lane syncscope("agent") acq_rel, !amdgpu.no.fine.grained.memory !0
store i32 %old, ptr addrspace(1) %out
ret void
}
@@ -5452,7 +5452,7 @@ define amdgpu_kernel void @sub_i64_constant(ptr addrspace(1) %out, ptr addrspace
; GFX1232-NEXT: buffer_store_b64 v[0:1], off, s[0:3], null
; GFX1232-NEXT: s_endpgm
entry:
- %old = atomicrmw sub ptr addrspace(1) %inout, i64 5 syncscope("agent") acq_rel
+ %old = atomicrmw sub ptr addrspace(1) %inout, i64 5 syncscope("agent") acq_rel, !amdgpu.no.fine.grained.memory !0
store i64 %old, ptr addrspace(1) %out
ret void
}
@@ -5848,7 +5848,7 @@ define amdgpu_kernel void @sub_i64_uniform(ptr addrspace(1) %out, ptr addrspace(
; GFX1232-NEXT: buffer_store_b64 v[0:1], off, s[0:3], null
; GFX1232-NEXT: s_endpgm
entry:
- %old = atomicrmw sub ptr addrspace(1) %inout, i64 %subitive syncscope("agent") acq_rel
+ %old = atomicrmw sub ptr addrspace(1) %inout, i64 %subitive syncscope("agent") acq_rel, !amdgpu.no.fine.grained.memory !0
store i64 %old, ptr addrspace(1) %out
ret void
}
@@ -7107,7 +7107,7 @@ define amdgpu_kernel void @sub_i64_varying(ptr addrspace(1) %out, ptr addrspace(
entry:
%lane = call i32 @llvm.amdgcn.workitem.id.x()
%zext = zext i32 %lane to i64
- %old = atomicrmw sub ptr addrspace(1) %inout, i64 %zext syncscope("agent") acq_rel
+ %old = atomicrmw sub ptr addrspace(1) %inout, i64 %zext syncscope("agent") acq_rel, !amdgpu.no.fine.grained.memory !0
store i64 %old, ptr addrspace(1) %out
ret void
}
@@ -7115,720 +7115,508 @@ entry:
define amdgpu_kernel void @uniform_or_i8(ptr addrspace(1) %result, ptr addrspace(1) %uniform.ptr, i8 %val) {
; GFX7LESS-LABEL: uniform_or_i8:
; GFX7LESS: ; %bb.0:
-; GFX7LESS-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x9
-; GFX7LESS-NEXT: s_load_dword s12, s[4:5], 0xd
+; GFX7LESS-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; GFX7LESS-NEXT: s_load_dword s6, s[4:5], 0xd
; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, exec_lo, 0
; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, exec_hi, v0
; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX7LESS-NEXT: ; implicit-def: $vgpr0
-; GFX7LESS-NEXT: s_and_saveexec_b64 s[2:3], vcc
-; GFX7LESS-NEXT: s_cbranch_execz .LBB12_4
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX7LESS-NEXT: s_cbranch_execz .LBB12_2
; GFX7LESS-NEXT: ; %bb.1:
; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_and_b32 s4, s10, -4
-; GFX7LESS-NEXT: s_mov_b32 s5, s11
-; GFX7LESS-NEXT: s_and_b32 s0, s10, 3
-; GFX7LESS-NEXT: s_and_b32 s1, s12, 0xff
-; GFX7LESS-NEXT: s_load_dword s6, s[4:5], 0x0
-; GFX7LESS-NEXT: s_mov_b64 s[10:11], 0
-; GFX7LESS-NEXT: s_mov_b32 s7, 0xf000
-; GFX7LESS-NEXT: s_lshl_b32 s13, s0, 3
-; GFX7LESS-NEXT: s_lshl_b32 s14, s1, s13
-; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: v_mov_b32_e32 v1, s6
-; GFX7LESS-NEXT: s_mov_b32 s6, -1
-; GFX7LESS-NEXT: .LBB12_2: ; %atomicrmw.start
-; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX7LESS-NEXT: v_or_b32_e32 v0, s14, v1
-; GFX7LESS-NEXT: s_waitcnt expcnt(0)
-; GFX7LESS-NEXT: v_mov_b32_e32 v3, v1
-; GFX7LESS-NEXT: v_mov_b32_e32 v2, v0
-; GFX7LESS-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 glc
-; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
-; GFX7LESS-NEXT: v_cmp_eq_u32_e64 s[0:1], v2, v1
-; GFX7LESS-NEXT: s_or_b64 s[10:11], s[0:1], s[10:11]
-; GFX7LESS-NEXT: v_mov_b32_e32 v1, v2
-; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[10:11]
-; GFX7LESS-NEXT: s_cbranch_execnz .LBB12_2
-; GFX7LESS-NEXT: ; %bb.3: ; %atomicrmw.end
-; GFX7LESS-NEXT: s_or_b64 exec, exec, s[10:11]
-; GFX7LESS-NEXT: v_lshrrev_b32_e32 v0, s13, v2
-; GFX7LESS-NEXT: .LBB12_4: ; %Flow
-; GFX7LESS-NEXT: s_or_b64 exec, exec, s[2:3]
-; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_and_b32 s8, s2, -4
; GFX7LESS-NEXT: s_mov_b32 s11, 0xf000
+; GFX7LESS-NEXT: s_and_b32 s2, s2, 3
+; GFX7LESS-NEXT: s_lshl_b32 s2, s2, 3
+; GFX7LESS-NEXT: s_and_b32 s7, s6, 0xff
+; GFX7LESS-NEXT: s_lshl_b32 s7, s7, s2
; GFX7LESS-NEXT: s_mov_b32 s10, -1
+; GFX7LESS-NEXT: s_mov_b32 s9, s3
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, s7
+; GFX7LESS-NEXT: buffer_atomic_or v0, off, s[8:11], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0) expcnt(0)
+; GFX7LESS-NEXT: v_lshrrev_b32_e32 v0, s2, v0
+; GFX7LESS-NEXT: .LBB12_2:
+; GFX7LESS-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_mov_b32 s3, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s2, -1
; GFX7LESS-NEXT: v_and_b32_e32 v0, 0xff, v0
-; GFX7LESS-NEXT: v_mov_b32_e32 v1, s12
-; GFX7LESS-NEXT: v_readfirstlane_b32 s0, v0
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, s6
+; GFX7LESS-NEXT: v_readfirstlane_b32 s4, v0
; GFX7LESS-NEXT: v_cndmask_b32_e64 v0, v1, 0, vcc
-; GFX7LESS-NEXT: v_or_b32_e32 v0, s0, v0
-; GFX7LESS-NEXT: buffer_store_byte v0, off, s[8:11], 0
+; GFX7LESS-NEXT: v_or_b32_e32 v0, s4, v0
+; GFX7LESS-NEXT: buffer_store_byte v0, off, s[0:3], 0
; GFX7LESS-NEXT: s_endpgm
;
; GFX8-LABEL: uniform_or_i8:
; GFX8: ; %bb.0:
-; GFX8-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x24
-; GFX8-NEXT: s_load_dword s12, s[4:5], 0x34
+; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8-NEXT: s_load_dword s6, s[4:5], 0x34
; GFX8-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
; GFX8-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX8-NEXT: ; implicit-def: $vgpr0
-; GFX8-NEXT: s_and_saveexec_b64 s[2:3], vcc
-; GFX8-NEXT: s_cbranch_execz .LBB12_4
+; GFX8-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX8-NEXT: s_cbranch_execz .LBB12_2
; GFX8-NEXT: ; %bb.1:
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: s_and_b32 s4, s10, -4
-; GFX8-NEXT: s_mov_b32 s5, s11
-; GFX8-NEXT: s_load_dword s1, s[4:5], 0x0
-; GFX8-NEXT: s_and_b32 s0, s10, 3
-; GFX8-NEXT: s_lshl_b32 s13, s0, 3
-; GFX8-NEXT: s_and_b32 s0, s12, 0xff
-; GFX8-NEXT: s_lshl_b32 s14, s0, s13
-; GFX8-NEXT: s_mov_b64 s[10:11], 0
-; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v1, s1
-; GFX8-NEXT: s_mov_b32 s7, 0xf000
-; GFX8-NEXT: s_mov_b32 s6, -1
-; GFX8-NEXT: .LBB12_2: ; %atomicrmw.start
-; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX8-NEXT: v_or_b32_e32 v0, s14, v1
-; GFX8-NEXT: v_mov_b32_e32 v3, v1
-; GFX8-NEXT: v_mov_b32_e32 v2, v0
-; GFX8-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 glc
+; GFX8-NEXT: s_and_b32 s8, s2, -4
+; GFX8-NEXT: s_and_b32 s2, s2, 3
+; GFX8-NEXT: s_mov_b32 s9, s3
+; GFX8-NEXT: s_lshl_b32 s2, s2, 3
+; GFX8-NEXT: s_and_b32 s3, s6, 0xff
+; GFX8-NEXT: s_lshl_b32 s3, s3, s2
+; GFX8-NEXT: s_mov_b32 s11, 0xf000
+; GFX8-NEXT: s_mov_b32 s10, -1
+; GFX8-NEXT: v_mov_b32_e32 v0, s3
+; GFX8-NEXT: buffer_atomic_or v0, off, s[8:11], 0 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], v2, v1
-; GFX8-NEXT: s_or_b64 s[10:11], s[0:1], s[10:11]
-; GFX8-NEXT: v_mov_b32_e32 v1, v2
-; GFX8-NEXT: s_andn2_b64 exec, exec, s[10:11]
-; GFX8-NEXT: s_cbranch_execnz .LBB12_2
-; GFX8-NEXT: ; %bb.3: ; %atomicrmw.end
-; GFX8-NEXT: s_or_b64 exec, exec, s[10:11]
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, s13, v2
-; GFX8-NEXT: .LBB12_4: ; %Flow
-; GFX8-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX8-NEXT: v_lshrrev_b32_e32 v0, s2, v0
+; GFX8-NEXT: .LBB12_2:
+; GFX8-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX8-NEXT: v_and_b32_e32 v0, 0xff, v0
-; GFX8-NEXT: v_readfirstlane_b32 s0, v0
+; GFX8-NEXT: v_readfirstlane_b32 s4, v0
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v0, s12
+; GFX8-NEXT: v_mov_b32_e32 v0, s6
; GFX8-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc
-; GFX8-NEXT: s_mov_b32 s11, 0xf000
-; GFX8-NEXT: s_mov_b32 s10, -1
-; GFX8-NEXT: v_or_b32_e32 v0, s0, v0
-; GFX8-NEXT: buffer_store_byte v0, off, s[8:11], 0
+; GFX8-NEXT: s_mov_b32 s3, 0xf000
+; GFX8-NEXT: s_mov_b32 s2, -1
+; GFX8-NEXT: v_or_b32_e32 v0, s4, v0
+; GFX8-NEXT: buffer_store_byte v0, off, s[0:3], 0
; GFX8-NEXT: s_endpgm
;
; GFX9-LABEL: uniform_or_i8:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x24
-; GFX9-NEXT: s_load_dword s12, s[4:5], 0x34
+; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9-NEXT: s_load_dword s6, s[4:5], 0x34
; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX9-NEXT: ; implicit-def: $vgpr0
-; GFX9-NEXT: s_and_saveexec_b64 s[2:3], vcc
-; GFX9-NEXT: s_cbranch_execz .LBB12_4
+; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX9-NEXT: s_cbranch_execz .LBB12_2
; GFX9-NEXT: ; %bb.1:
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_and_b32 s4, s10, -4
-; GFX9-NEXT: s_mov_b32 s5, s11
-; GFX9-NEXT: s_load_dword s1, s[4:5], 0x0
-; GFX9-NEXT: s_and_b32 s0, s10, 3
-; GFX9-NEXT: s_lshl_b32 s13, s0, 3
-; GFX9-NEXT: s_and_b32 s0, s12, 0xff
-; GFX9-NEXT: s_lshl_b32 s14, s0, s13
-; GFX9-NEXT: s_mov_b64 s[10:11], 0
-; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v1, s1
-; GFX9-NEXT: s_mov_b32 s7, 0xf000
-; GFX9-NEXT: s_mov_b32 s6, -1
-; GFX9-NEXT: .LBB12_2: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: v_or_b32_e32 v0, s14, v1
-; GFX9-NEXT: v_mov_b32_e32 v3, v1
-; GFX9-NEXT: v_mov_b32_e32 v2, v0
-; GFX9-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 glc
+; GFX9-NEXT: s_and_b32 s8, s2, -4
+; GFX9-NEXT: s_and_b32 s2, s2, 3
+; GFX9-NEXT: s_mov_b32 s9, s3
+; GFX9-NEXT: s_lshl_b32 s2, s2, 3
+; GFX9-NEXT: s_and_b32 s3, s6, 0xff
+; GFX9-NEXT: s_lshl_b32 s3, s3, s2
+; GFX9-NEXT: s_mov_b32 s11, 0xf000
+; GFX9-NEXT: s_mov_b32 s10, -1
+; GFX9-NEXT: v_mov_b32_e32 v0, s3
+; GFX9-NEXT: buffer_atomic_or v0, off, s[8:11], 0 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], v2, v1
-; GFX9-NEXT: s_or_b64 s[10:11], s[0:1], s[10:11]
-; GFX9-NEXT: v_mov_b32_e32 v1, v2
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[10:11]
-; GFX9-NEXT: s_cbranch_execnz .LBB12_2
-; GFX9-NEXT: ; %bb.3: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[10:11]
-; GFX9-NEXT: v_lshrrev_b32_e32 v0, s13, v2
-; GFX9-NEXT: .LBB12_4: ; %Flow
-; GFX9-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX9-NEXT: v_lshrrev_b32_e32 v0, s2, v0
+; GFX9-NEXT: .LBB12_2:
+; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX9-NEXT: v_and_b32_e32 v0, 0xff, v0
-; GFX9-NEXT: v_readfirstlane_b32 s0, v0
+; GFX9-NEXT: v_readfirstlane_b32 s4, v0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v0, s12
+; GFX9-NEXT: v_mov_b32_e32 v0, s6
; GFX9-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc
-; GFX9-NEXT: s_mov_b32 s11, 0xf000
-; GFX9-NEXT: s_mov_b32 s10, -1
-; GFX9-NEXT: v_or_b32_e32 v0, s0, v0
-; GFX9-NEXT: buffer_store_byte v0, off, s[8:11], 0
+; GFX9-NEXT: s_mov_b32 s3, 0xf000
+; GFX9-NEXT: s_mov_b32 s2, -1
+; GFX9-NEXT: v_or_b32_e32 v0, s4, v0
+; GFX9-NEXT: buffer_store_byte v0, off, s[0:3], 0
; GFX9-NEXT: s_endpgm
;
; GFX1064-LABEL: uniform_or_i8:
; GFX1064: ; %bb.0:
; GFX1064-NEXT: s_clause 0x1
-; GFX1064-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x24
-; GFX1064-NEXT: s_load_dword s12, s[4:5], 0x34
+; GFX1064-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1064-NEXT: s_load_dword s6, s[4:5], 0x34
; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX1064-NEXT: ; implicit-def: $vgpr0
-; GFX1064-NEXT: s_and_saveexec_b64 s[2:3], vcc
-; GFX1064-NEXT: s_cbranch_execz .LBB12_4
+; GFX1064-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX1064-NEXT: s_cbranch_execz .LBB12_2
; GFX1064-NEXT: ; %bb.1:
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_and_b32 s4, s10, -4
-; GFX1064-NEXT: s_mov_b32 s5, s11
-; GFX1064-NEXT: s_and_b32 s1, s10, 3
-; GFX1064-NEXT: s_load_dword s0, s[4:5], 0x0
-; GFX1064-NEXT: s_lshl_b32 s13, s1, 3
-; GFX1064-NEXT: s_and_b32 s1, s12, 0xff
-; GFX1064-NEXT: s_mov_b64 s[10:11], 0
-; GFX1064-NEXT: s_lshl_b32 s14, s1, s13
-; GFX1064-NEXT: s_mov_b32 s7, 0x31016000
-; GFX1064-NEXT: s_mov_b32 s6, -1
-; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: v_mov_b32_e32 v1, s0
-; GFX1064-NEXT: .LBB12_2: ; %atomicrmw.start
-; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1064-NEXT: v_or_b32_e32 v0, s14, v1
-; GFX1064-NEXT: v_mov_b32_e32 v3, v1
-; GFX1064-NEXT: v_mov_b32_e32 v2, v0
-; GFX1064-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 glc
+; GFX1064-NEXT: s_and_b32 s7, s2, 3
+; GFX1064-NEXT: s_and_b32 s8, s6, 0xff
+; GFX1064-NEXT: s_lshl_b32 s7, s7, 3
+; GFX1064-NEXT: s_mov_b32 s11, 0x31016000
+; GFX1064-NEXT: s_lshl_b32 s9, s8, s7
+; GFX1064-NEXT: s_and_b32 s8, s2, -4
+; GFX1064-NEXT: v_mov_b32_e32 v0, s9
+; GFX1064-NEXT: s_mov_b32 s10, -1
+; GFX1064-NEXT: s_mov_b32 s9, s3
+; GFX1064-NEXT: buffer_atomic_or v0, off, s[8:11], 0 glc
; GFX1064-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-NEXT: v_cmp_eq_u32_e64 s[0:1], v2, v1
-; GFX1064-NEXT: v_mov_b32_e32 v1, v2
-; GFX1064-NEXT: s_or_b64 s[10:11], s[0:1], s[10:11]
-; GFX1064-NEXT: s_andn2_b64 exec, exec, s[10:11]
-; GFX1064-NEXT: s_cbranch_execnz .LBB12_2
-; GFX1064-NEXT: ; %bb.3: ; %atomicrmw.end
-; GFX1064-NEXT: s_or_b64 exec, exec, s[10:11]
-; GFX1064-NEXT: v_lshrrev_b32_e32 v0, s13, v2
-; GFX1064-NEXT: .LBB12_4: ; %Flow
-; GFX1064-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX1064-NEXT: v_lshrrev_b32_e32 v0, s7, v0
+; GFX1064-NEXT: .LBB12_2:
+; GFX1064-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX1064-NEXT: v_and_b32_e32 v0, 0xff, v0
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_mov_b32 s11, 0x31016000
-; GFX1064-NEXT: s_mov_b32 s10, -1
-; GFX1064-NEXT: v_readfirstlane_b32 s0, v0
-; GFX1064-NEXT: v_cndmask_b32_e64 v0, s12, 0, vcc
-; GFX1064-NEXT: v_or_b32_e32 v0, s0, v0
-; GFX1064-NEXT: buffer_store_byte v0, off, s[8:11], 0
+; GFX1064-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1064-NEXT: v_readfirstlane_b32 s2, v0
+; GFX1064-NEXT: v_cndmask_b32_e64 v0, s6, 0, vcc
+; GFX1064-NEXT: v_or_b32_e32 v0, s2, v0
+; GFX1064-NEXT: s_mov_b32 s2, -1
+; GFX1064-NEXT: buffer_store_byte v0, off, s[0:3], 0
; GFX1064-NEXT: s_endpgm
;
; GFX1032-LABEL: uniform_or_i8:
; GFX1032: ; %bb.0:
; GFX1032-NEXT: s_clause 0x1
-; GFX1032-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x24
-; GFX1032-NEXT: s_load_dword s1, s[4:5], 0x34
+; GFX1032-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1032-NEXT: s_load_dword s6, s[4:5], 0x34
; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1032-NEXT: s_mov_b32 s3, 0
; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032-NEXT: ; implicit-def: $vgpr0
-; GFX1032-NEXT: s_and_saveexec_b32 s2, vcc_lo
-; GFX1032-NEXT: s_cbranch_execz .LBB12_4
+; GFX1032-NEXT: s_and_saveexec_b32 s4, vcc_lo
+; GFX1032-NEXT: s_cbranch_execz .LBB12_2
; GFX1032-NEXT: ; %bb.1:
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_and_b32 s4, s10, -4
-; GFX1032-NEXT: s_mov_b32 s5, s11
-; GFX1032-NEXT: s_and_b32 s6, s10, 3
-; GFX1032-NEXT: s_load_dword s0, s[4:5], 0x0
-; GFX1032-NEXT: s_lshl_b32 s10, s6, 3
-; GFX1032-NEXT: s_and_b32 s6, s1, 0xff
-; GFX1032-NEXT: s_mov_b32 s7, 0x31016000
-; GFX1032-NEXT: s_lshl_b32 s11, s6, s10
-; GFX1032-NEXT: s_mov_b32 s6, -1
-; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: v_mov_b32_e32 v1, s0
-; GFX1032-NEXT: .LBB12_2: ; %atomicrmw.start
-; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1032-NEXT: v_or_b32_e32 v0, s11, v1
-; GFX1032-NEXT: v_mov_b32_e32 v3, v1
-; GFX1032-NEXT: v_mov_b32_e32 v2, v0
-; GFX1032-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 glc
+; GFX1032-NEXT: s_and_b32 s5, s2, 3
+; GFX1032-NEXT: s_and_b32 s7, s6, 0xff
+; GFX1032-NEXT: s_lshl_b32 s5, s5, 3
+; GFX1032-NEXT: s_and_b32 s8, s2, -4
+; GFX1032-NEXT: s_lshl_b32 s7, s7, s5
+; GFX1032-NEXT: s_mov_b32 s11, 0x31016000
+; GFX1032-NEXT: v_mov_b32_e32 v0, s7
+; GFX1032-NEXT: s_mov_b32 s10, -1
+; GFX1032-NEXT: s_mov_b32 s9, s3
+; GFX1032-NEXT: buffer_atomic_or v0, off, s[8:11], 0 glc
; GFX1032-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-NEXT: v_cmp_eq_u32_e64 s0, v2, v1
-; GFX1032-NEXT: v_mov_b32_e32 v1, v2
-; GFX1032-NEXT: s_or_b32 s3, s0, s3
-; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s3
-; GFX1032-NEXT: s_cbranch_execnz .LBB12_2
-; GFX1032-NEXT: ; %bb.3: ; %atomicrmw.end
-; GFX1032-NEXT: s_or_b32 exec_lo, exec_lo, s3
-; GFX1032-NEXT: v_lshrrev_b32_e32 v0, s10, v2
-; GFX1032-NEXT: .LBB12_4: ; %Flow
-; GFX1032-NEXT: s_or_b32 exec_lo, exec_lo, s2
+; GFX1032-NEXT: v_lshrrev_b32_e32 v0, s5, v0
+; GFX1032-NEXT: .LBB12_2:
+; GFX1032-NEXT: s_or_b32 exec_lo, exec_lo, s4
; GFX1032-NEXT: v_and_b32_e32 v0, 0xff, v0
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_mov_b32 s11, 0x31016000
-; GFX1032-NEXT: s_mov_b32 s10, -1
-; GFX1032-NEXT: v_readfirstlane_b32 s0, v0
-; GFX1032-NEXT: v_cndmask_b32_e64 v0, s1, 0, vcc_lo
-; GFX1032-NEXT: v_or_b32_e32 v0, s0, v0
-; GFX1032-NEXT: buffer_store_byte v0, off, s[8:11], 0
+; GFX1032-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1032-NEXT: v_readfirstlane_b32 s2, v0
+; GFX1032-NEXT: v_cndmask_b32_e64 v0, s6, 0, vcc_lo
+; GFX1032-NEXT: v_or_b32_e32 v0, s2, v0
+; GFX1032-NEXT: s_mov_b32 s2, -1
+; GFX1032-NEXT: buffer_store_byte v0, off, s[0:3], 0
; GFX1032-NEXT: s_endpgm
;
; GFX1164-TRUE16-LABEL: uniform_or_i8:
; GFX1164-TRUE16: ; %bb.0:
; GFX1164-TRUE16-NEXT: s_clause 0x1
-; GFX1164-TRUE16-NEXT: s_load_b128 s[8:11], s[4:5], 0x24
-; GFX1164-TRUE16-NEXT: s_load_b32 s12, s[4:5], 0x34
+; GFX1164-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164-TRUE16-NEXT: s_load_b32 s6, s[4:5], 0x34
; GFX1164-TRUE16-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
; GFX1164-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1164-TRUE16-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
; GFX1164-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX1164-TRUE16-NEXT: ; implicit-def: $vgpr0_lo16
-; GFX1164-TRUE16-NEXT: s_and_saveexec_b64 s[2:3], vcc
-; GFX1164-TRUE16-NEXT: s_cbranch_execz .LBB12_4
+; GFX1164-TRUE16-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX1164-TRUE16-NEXT: s_cbranch_execz .LBB12_2
; GFX1164-TRUE16-NEXT: ; %bb.1:
; GFX1164-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-TRUE16-NEXT: s_and_b32 s4, s10, -4
-; GFX1164-TRUE16-NEXT: s_mov_b32 s5, s11
-; GFX1164-TRUE16-NEXT: s_and_b32 s1, s10, 3
-; GFX1164-TRUE16-NEXT: s_load_b32 s0, s[4:5], 0x0
-; GFX1164-TRUE16-NEXT: s_mov_b32 s6, s12
-; GFX1164-TRUE16-NEXT: s_lshl_b32 s13, s1, 3
-; GFX1164-TRUE16-NEXT: s_and_b32 s1, s6, 0xff
-; GFX1164-TRUE16-NEXT: s_mov_b64 s[10:11], 0
-; GFX1164-TRUE16-NEXT: s_lshl_b32 s14, s1, s13
-; GFX1164-TRUE16-NEXT: s_mov_b32 s7, 0x31016000
-; GFX1164-TRUE16-NEXT: s_mov_b32 s6, -1
-; GFX1164-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-TRUE16-NEXT: v_mov_b32_e32 v1, s0
-; GFX1164-TRUE16-NEXT: .LBB12_2: ; %atomicrmw.start
-; GFX1164-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1164-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX1164-TRUE16-NEXT: v_or_b32_e32 v0, s14, v1
-; GFX1164-TRUE16-NEXT: v_mov_b32_e32 v3, v1
-; GFX1164-TRUE16-NEXT: v_mov_b32_e32 v2, v0
-; GFX1164-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[2:3], off, s[4:7], 0 glc
+; GFX1164-TRUE16-NEXT: s_and_b32 s7, s2, 3
+; GFX1164-TRUE16-NEXT: s_mov_b32 s8, s6
+; GFX1164-TRUE16-NEXT: s_lshl_b32 s7, s7, 3
+; GFX1164-TRUE16-NEXT: s_and_b32 s8, s8, 0xff
+; GFX1164-TRUE16-NEXT: s_mov_b32 s11, 0x31016000
+; GFX1164-TRUE16-NEXT: s_lshl_b32 s9, s8, s7
+; GFX1164-TRUE16-NEXT: s_and_b32 s8, s2, -4
+; GFX1164-TRUE16-NEXT: v_mov_b32_e32 v0, s9
+; GFX1164-TRUE16-NEXT: s_mov_b32 s10, -1
+; GFX1164-TRUE16-NEXT: s_mov_b32 s9, s3
+; GFX1164-TRUE16-NEXT: buffer_atomic_or_b32 v0, off, s[8:11], 0 glc
; GFX1164-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX1164-TRUE16-NEXT: v_cmp_eq_u32_e64 s[0:1], v2, v1
-; GFX1164-TRUE16-NEXT: v_mov_b32_e32 v1, v2
-; GFX1164-TRUE16-NEXT: s_or_b64 s[10:11], s[0:1], s[10:11]
-; GFX1164-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1164-TRUE16-NEXT: s_and_not1_b64 exec, exec, s[10:11]
-; GFX1164-TRUE16-NEXT: s_cbranch_execnz .LBB12_2
-; GFX1164-TRUE16-NEXT: ; %bb.3: ; %atomicrmw.end
-; GFX1164-TRUE16-NEXT: s_or_b64 exec, exec, s[10:11]
-; GFX1164-TRUE16-NEXT: v_lshrrev_b32_e32 v0, s13, v2
-; GFX1164-TRUE16-NEXT: .LBB12_4: ; %Flow
-; GFX1164-TRUE16-NEXT: s_or_b64 exec, exec, s[2:3]
-; GFX1164-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX1164-TRUE16-NEXT: v_lshrrev_b32_e32 v0, s7, v0
+; GFX1164-TRUE16-NEXT: .LBB12_2:
+; GFX1164-TRUE16-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX1164-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
; GFX1164-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v0
; GFX1164-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-TRUE16-NEXT: s_mov_b32 s11, 0x31016000
-; GFX1164-TRUE16-NEXT: s_mov_b32 s10, -1
-; GFX1164-TRUE16-NEXT: v_readfirstlane_b32 s0, v0
-; GFX1164-TRUE16-NEXT: v_cndmask_b16 v0.l, s12, 0, vcc
+; GFX1164-TRUE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1164-TRUE16-NEXT: v_readfirstlane_b32 s2, v0
+; GFX1164-TRUE16-NEXT: v_cndmask_b16 v0.l, s6, 0, vcc
; GFX1164-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-TRUE16-NEXT: v_or_b16 v0.l, s0, v0.l
-; GFX1164-TRUE16-NEXT: buffer_store_b8 v0, off, s[8:11], 0
+; GFX1164-TRUE16-NEXT: v_or_b16 v0.l, s2, v0.l
+; GFX1164-TRUE16-NEXT: s_mov_b32 s2, -1
+; GFX1164-TRUE16-NEXT: buffer_store_b8 v0, off, s[0:3], 0
; GFX1164-TRUE16-NEXT: s_endpgm
;
; GFX1164-FAKE16-LABEL: uniform_or_i8:
; GFX1164-FAKE16: ; %bb.0:
; GFX1164-FAKE16-NEXT: s_clause 0x1
-; GFX1164-FAKE16-NEXT: s_load_b128 s[8:11], s[4:5], 0x24
-; GFX1164-FAKE16-NEXT: s_load_b32 s12, s[4:5], 0x34
+; GFX1164-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164-FAKE16-NEXT: s_load_b32 s6, s[4:5], 0x34
; GFX1164-FAKE16-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
; GFX1164-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1164-FAKE16-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
; GFX1164-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX1164-FAKE16-NEXT: ; implicit-def: $vgpr0
-; GFX1164-FAKE16-NEXT: s_and_saveexec_b64 s[2:3], vcc
-; GFX1164-FAKE16-NEXT: s_cbranch_execz .LBB12_4
+; GFX1164-FAKE16-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX1164-FAKE16-NEXT: s_cbranch_execz .LBB12_2
; GFX1164-FAKE16-NEXT: ; %bb.1:
; GFX1164-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-FAKE16-NEXT: s_and_b32 s4, s10, -4
-; GFX1164-FAKE16-NEXT: s_mov_b32 s5, s11
-; GFX1164-FAKE16-NEXT: s_and_b32 s1, s10, 3
-; GFX1164-FAKE16-NEXT: s_load_b32 s0, s[4:5], 0x0
-; GFX1164-FAKE16-NEXT: s_lshl_b32 s13, s1, 3
-; GFX1164-FAKE16-NEXT: s_and_b32 s1, s12, 0xff
-; GFX1164-FAKE16-NEXT: s_mov_b64 s[10:11], 0
-; GFX1164-FAKE16-NEXT: s_lshl_b32 s14, s1, s13
-; GFX1164-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
-; GFX1164-FAKE16-NEXT: s_mov_b32 s6, -1
-; GFX1164-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-FAKE16-NEXT: v_mov_b32_e32 v1, s0
-; GFX1164-FAKE16-NEXT: .LBB12_2: ; %atomicrmw.start
-; GFX1164-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1164-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX1164-FAKE16-NEXT: v_or_b32_e32 v0, s14, v1
-; GFX1164-FAKE16-NEXT: v_mov_b32_e32 v3, v1
-; GFX1164-FAKE16-NEXT: v_mov_b32_e32 v2, v0
-; GFX1164-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[2:3], off, s[4:7], 0 glc
+; GFX1164-FAKE16-NEXT: s_and_b32 s7, s2, 3
+; GFX1164-FAKE16-NEXT: s_and_b32 s8, s6, 0xff
+; GFX1164-FAKE16-NEXT: s_lshl_b32 s7, s7, 3
+; GFX1164-FAKE16-NEXT: s_mov_b32 s11, 0x31016000
+; GFX1164-FAKE16-NEXT: s_lshl_b32 s9, s8, s7
+; GFX1164-FAKE16-NEXT: s_and_b32 s8, s2, -4
+; GFX1164-FAKE16-NEXT: v_mov_b32_e32 v0, s9
+; GFX1164-FAKE16-NEXT: s_mov_b32 s10, -1
+; GFX1164-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1164-FAKE16-NEXT: buffer_atomic_or_b32 v0, off, s[8:11], 0 glc
; GFX1164-FAKE16-NEXT: s_waitcnt vmcnt(0)
-; GFX1164-FAKE16-NEXT: v_cmp_eq_u32_e64 s[0:1], v2, v1
-; GFX1164-FAKE16-NEXT: v_mov_b32_e32 v1, v2
-; GFX1164-FAKE16-NEXT: s_or_b64 s[10:11], s[0:1], s[10:11]
-; GFX1164-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1164-FAKE16-NEXT: s_and_not1_b64 exec, exec, s[10:11]
-; GFX1164-FAKE16-NEXT: s_cbranch_execnz .LBB12_2
-; GFX1164-FAKE16-NEXT: ; %bb.3: ; %atomicrmw.end
-; GFX1164-FAKE16-NEXT: s_or_b64 exec, exec, s[10:11]
-; GFX1164-FAKE16-NEXT: v_lshrrev_b32_e32 v0, s13, v2
-; GFX1164-FAKE16-NEXT: .LBB12_4: ; %Flow
-; GFX1164-FAKE16-NEXT: s_or_b64 exec, exec, s[2:3]
-; GFX1164-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX1164-FAKE16-NEXT: v_lshrrev_b32_e32 v0, s7, v0
+; GFX1164-FAKE16-NEXT: .LBB12_2:
+; GFX1164-FAKE16-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX1164-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
; GFX1164-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v0
; GFX1164-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-FAKE16-NEXT: s_mov_b32 s11, 0x31016000
-; GFX1164-FAKE16-NEXT: s_mov_b32 s10, -1
-; GFX1164-FAKE16-NEXT: v_readfirstlane_b32 s0, v0
-; GFX1164-FAKE16-NEXT: v_cndmask_b32_e64 v0, s12, 0, vcc
+; GFX1164-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1164-FAKE16-NEXT: v_readfirstlane_b32 s2, v0
+; GFX1164-FAKE16-NEXT: v_cndmask_b32_e64 v0, s6, 0, vcc
; GFX1164-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-FAKE16-NEXT: v_or_b32_e32 v0, s0, v0
-; GFX1164-FAKE16-NEXT: buffer_store_b8 v0, off, s[8:11], 0
+; GFX1164-FAKE16-NEXT: v_or_b32_e32 v0, s2, v0
+; GFX1164-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1164-FAKE16-NEXT: buffer_store_b8 v0, off, s[0:3], 0
; GFX1164-FAKE16-NEXT: s_endpgm
;
; GFX1132-TRUE16-LABEL: uniform_or_i8:
; GFX1132-TRUE16: ; %bb.0:
; GFX1132-TRUE16-NEXT: s_clause 0x1
-; GFX1132-TRUE16-NEXT: s_load_b128 s[8:11], s[4:5], 0x24
-; GFX1132-TRUE16-NEXT: s_load_b32 s1, s[4:5], 0x34
+; GFX1132-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132-TRUE16-NEXT: s_load_b32 s4, s[4:5], 0x34
; GFX1132-TRUE16-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1132-TRUE16-NEXT: s_mov_b32 s3, 0
; GFX1132-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1132-TRUE16-NEXT: ; implicit-def: $vgpr0_lo16
-; GFX1132-TRUE16-NEXT: s_and_saveexec_b32 s2, vcc_lo
-; GFX1132-TRUE16-NEXT: s_cbranch_execz .LBB12_4
+; GFX1132-TRUE16-NEXT: s_and_saveexec_b32 s5, vcc_lo
+; GFX1132-TRUE16-NEXT: s_cbranch_execz .LBB12_2
; GFX1132-TRUE16-NEXT: ; %bb.1:
; GFX1132-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-TRUE16-NEXT: s_and_b32 s4, s10, -4
-; GFX1132-TRUE16-NEXT: s_mov_b32 s5, s11
-; GFX1132-TRUE16-NEXT: s_and_b32 s6, s10, 3
-; GFX1132-TRUE16-NEXT: s_load_b32 s0, s[4:5], 0x0
-; GFX1132-TRUE16-NEXT: s_mov_b32 s7, s1
-; GFX1132-TRUE16-NEXT: s_lshl_b32 s10, s6, 3
-; GFX1132-TRUE16-NEXT: s_and_b32 s6, s7, 0xff
-; GFX1132-TRUE16-NEXT: s_mov_b32 s7, 0x31016000
-; GFX1132-TRUE16-NEXT: s_lshl_b32 s11, s6, s10
-; GFX1132-TRUE16-NEXT: s_mov_b32 s6, -1
-; GFX1132-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-TRUE16-NEXT: v_mov_b32_e32 v1, s0
-; GFX1132-TRUE16-NEXT: .LBB12_2: ; %atomicrmw.start
-; GFX1132-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1132-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1132-TRUE16-NEXT: v_or_b32_e32 v0, s11, v1
-; GFX1132-TRUE16-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
-; GFX1132-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[2:3], off, s[4:7], 0 glc
+; GFX1132-TRUE16-NEXT: s_and_b32 s6, s2, 3
+; GFX1132-TRUE16-NEXT: s_mov_b32 s7, s4
+; GFX1132-TRUE16-NEXT: s_lshl_b32 s6, s6, 3
+; GFX1132-TRUE16-NEXT: s_and_b32 s7, s7, 0xff
+; GFX1132-TRUE16-NEXT: s_and_b32 s8, s2, -4
+; GFX1132-TRUE16-NEXT: s_lshl_b32 s7, s7, s6
+; GFX1132-TRUE16-NEXT: s_mov_b32 s11, 0x31016000
+; GFX1132-TRUE16-NEXT: v_mov_b32_e32 v0, s7
+; GFX1132-TRUE16-NEXT: s_mov_b32 s10, -1
+; GFX1132-TRUE16-NEXT: s_mov_b32 s9, s3
+; GFX1132-TRUE16-NEXT: buffer_atomic_or_b32 v0, off, s[8:11], 0 glc
; GFX1132-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX1132-TRUE16-NEXT: v_cmp_eq_u32_e64 s0, v2, v1
-; GFX1132-TRUE16-NEXT: v_mov_b32_e32 v1, v2
-; GFX1132-TRUE16-NEXT: s_or_b32 s3, s0, s3
-; GFX1132-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1132-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s3
-; GFX1132-TRUE16-NEXT: s_cbranch_execnz .LBB12_2
-; GFX1132-TRUE16-NEXT: ; %bb.3: ; %atomicrmw.end
-; GFX1132-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s3
-; GFX1132-TRUE16-NEXT: v_lshrrev_b32_e32 v0, s10, v2
-; GFX1132-TRUE16-NEXT: .LBB12_4: ; %Flow
-; GFX1132-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s2
-; GFX1132-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX1132-TRUE16-NEXT: v_lshrrev_b32_e32 v0, s6, v0
+; GFX1132-TRUE16-NEXT: .LBB12_2:
+; GFX1132-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX1132-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
; GFX1132-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v0
; GFX1132-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-TRUE16-NEXT: s_mov_b32 s11, 0x31016000
-; GFX1132-TRUE16-NEXT: s_mov_b32 s10, -1
-; GFX1132-TRUE16-NEXT: v_readfirstlane_b32 s0, v0
-; GFX1132-TRUE16-NEXT: v_cndmask_b16 v0.l, s1, 0, vcc_lo
+; GFX1132-TRUE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1132-TRUE16-NEXT: v_readfirstlane_b32 s2, v0
+; GFX1132-TRUE16-NEXT: v_cndmask_b16 v0.l, s4, 0, vcc_lo
; GFX1132-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1132-TRUE16-NEXT: v_or_b16 v0.l, s0, v0.l
-; GFX1132-TRUE16-NEXT: buffer_store_b8 v0, off, s[8:11], 0
+; GFX1132-TRUE16-NEXT: v_or_b16 v0.l, s2, v0.l
+; GFX1132-TRUE16-NEXT: s_mov_b32 s2, -1
+; GFX1132-TRUE16-NEXT: buffer_store_b8 v0, off, s[0:3], 0
; GFX1132-TRUE16-NEXT: s_endpgm
;
; GFX1132-FAKE16-LABEL: uniform_or_i8:
; GFX1132-FAKE16: ; %bb.0:
; GFX1132-FAKE16-NEXT: s_clause 0x1
-; GFX1132-FAKE16-NEXT: s_load_b128 s[8:11], s[4:5], 0x24
-; GFX1132-FAKE16-NEXT: s_load_b32 s1, s[4:5], 0x34
+; GFX1132-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132-FAKE16-NEXT: s_load_b32 s4, s[4:5], 0x34
; GFX1132-FAKE16-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1132-FAKE16-NEXT: s_mov_b32 s3, 0
; GFX1132-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1132-FAKE16-NEXT: ; implicit-def: $vgpr0
-; GFX1132-FAKE16-NEXT: s_and_saveexec_b32 s2, vcc_lo
-; GFX1132-FAKE16-NEXT: s_cbranch_execz .LBB12_4
+; GFX1132-FAKE16-NEXT: s_and_saveexec_b32 s5, vcc_lo
+; GFX1132-FAKE16-NEXT: s_cbranch_execz .LBB12_2
; GFX1132-FAKE16-NEXT: ; %bb.1:
; GFX1132-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-FAKE16-NEXT: s_and_b32 s4, s10, -4
-; GFX1132-FAKE16-NEXT: s_mov_b32 s5, s11
-; GFX1132-FAKE16-NEXT: s_and_b32 s6, s10, 3
-; GFX1132-FAKE16-NEXT: s_load_b32 s0, s[4:5], 0x0
-; GFX1132-FAKE16-NEXT: s_lshl_b32 s10, s6, 3
-; GFX1132-FAKE16-NEXT: s_and_b32 s6, s1, 0xff
-; GFX1132-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
-; GFX1132-FAKE16-NEXT: s_lshl_b32 s11, s6, s10
-; GFX1132-FAKE16-NEXT: s_mov_b32 s6, -1
-; GFX1132-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-FAKE16-NEXT: v_mov_b32_e32 v1, s0
-; GFX1132-FAKE16-NEXT: .LBB12_2: ; %atomicrmw.start
-; GFX1132-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1132-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1132-FAKE16-NEXT: v_or_b32_e32 v0, s11, v1
-; GFX1132-FAKE16-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
-; GFX1132-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[2:3], off, s[4:7], 0 glc
+; GFX1132-FAKE16-NEXT: s_and_b32 s6, s2, 3
+; GFX1132-FAKE16-NEXT: s_and_b32 s7, s4, 0xff
+; GFX1132-FAKE16-NEXT: s_lshl_b32 s6, s6, 3
+; GFX1132-FAKE16-NEXT: s_and_b32 s8, s2, -4
+; GFX1132-FAKE16-NEXT: s_lshl_b32 s7, s7, s6
+; GFX1132-FAKE16-NEXT: s_mov_b32 s11, 0x31016000
+; GFX1132-FAKE16-NEXT: v_mov_b32_e32 v0, s7
+; GFX1132-FAKE16-NEXT: s_mov_b32 s10, -1
+; GFX1132-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1132-FAKE16-NEXT: buffer_atomic_or_b32 v0, off, s[8:11], 0 glc
; GFX1132-FAKE16-NEXT: s_waitcnt vmcnt(0)
-; GFX1132-FAKE16-NEXT: v_cmp_eq_u32_e64 s0, v2, v1
-; GFX1132-FAKE16-NEXT: v_mov_b32_e32 v1, v2
-; GFX1132-FAKE16-NEXT: s_or_b32 s3, s0, s3
-; GFX1132-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1132-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s3
-; GFX1132-FAKE16-NEXT: s_cbranch_execnz .LBB12_2
-; GFX1132-FAKE16-NEXT: ; %bb.3: ; %atomicrmw.end
-; GFX1132-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s3
-; GFX1132-FAKE16-NEXT: v_lshrrev_b32_e32 v0, s10, v2
-; GFX1132-FAKE16-NEXT: .LBB12_4: ; %Flow
-; GFX1132-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s2
-; GFX1132-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX1132-FAKE16-NEXT: v_lshrrev_b32_e32 v0, s6, v0
+; GFX1132-FAKE16-NEXT: .LBB12_2:
+; GFX1132-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX1132-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
; GFX1132-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v0
; GFX1132-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-FAKE16-NEXT: s_mov_b32 s11, 0x31016000
-; GFX1132-FAKE16-NEXT: s_mov_b32 s10, -1
-; GFX1132-FAKE16-NEXT: v_readfirstlane_b32 s0, v0
-; GFX1132-FAKE16-NEXT: v_cndmask_b32_e64 v0, s1, 0, vcc_lo
+; GFX1132-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1132-FAKE16-NEXT: v_readfirstlane_b32 s2, v0
+; GFX1132-FAKE16-NEXT: v_cndmask_b32_e64 v0, s4, 0, vcc_lo
; GFX1132-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1132-FAKE16-NEXT: v_or_b32_e32 v0, s0, v0
-; GFX1132-FAKE16-NEXT: buffer_store_b8 v0, off, s[8:11], 0
+; GFX1132-FAKE16-NEXT: v_or_b32_e32 v0, s2, v0
+; GFX1132-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1132-FAKE16-NEXT: buffer_store_b8 v0, off, s[0:3], 0
; GFX1132-FAKE16-NEXT: s_endpgm
;
; GFX1264-TRUE16-LABEL: uniform_or_i8:
; GFX1264-TRUE16: ; %bb.0:
; GFX1264-TRUE16-NEXT: s_clause 0x1
-; GFX1264-TRUE16-NEXT: s_load_b128 s[8:11], s[4:5], 0x24
-; GFX1264-TRUE16-NEXT: s_load_b32 s12, s[4:5], 0x34
+; GFX1264-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1264-TRUE16-NEXT: s_load_b32 s6, s[4:5], 0x34
; GFX1264-TRUE16-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
; GFX1264-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1264-TRUE16-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
; GFX1264-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX1264-TRUE16-NEXT: ; implicit-def: $vgpr0_lo16
-; GFX1264-TRUE16-NEXT: s_and_saveexec_b64 s[2:3], vcc
-; GFX1264-TRUE16-NEXT: s_cbranch_execz .LBB12_4
+; GFX1264-TRUE16-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX1264-TRUE16-NEXT: s_cbranch_execz .LBB12_2
; GFX1264-TRUE16-NEXT: ; %bb.1:
; GFX1264-TRUE16-NEXT: s_wait_kmcnt 0x0
-; GFX1264-TRUE16-NEXT: s_and_b32 s4, s10, -4
-; GFX1264-TRUE16-NEXT: s_mov_b32 s5, s11
-; GFX1264-TRUE16-NEXT: s_and_b32 s1, s10, 3
-; GFX1264-TRUE16-NEXT: s_load_b32 s0, s[4:5], 0x0
-; GFX1264-TRUE16-NEXT: s_mov_b32 s6, s12
-; GFX1264-TRUE16-NEXT: s_lshl_b32 s13, s1, 3
-; GFX1264-TRUE16-NEXT: s_and_b32 s1, s6, 0xff
-; GFX1264-TRUE16-NEXT: s_mov_b64 s[10:11], 0
-; GFX1264-TRUE16-NEXT: s_lshl_b32 s14, s1, s13
-; GFX1264-TRUE16-NEXT: s_mov_b32 s7, 0x31016000
-; GFX1264-TRUE16-NEXT: s_mov_b32 s6, -1
-; GFX1264-TRUE16-NEXT: s_wait_kmcnt 0x0
-; GFX1264-TRUE16-NEXT: v_mov_b32_e32 v1, s0
-; GFX1264-TRUE16-NEXT: .LBB12_2: ; %atomicrmw.start
-; GFX1264-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1264-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX1264-TRUE16-NEXT: v_or_b32_e32 v0, s14, v1
-; GFX1264-TRUE16-NEXT: v_mov_b32_e32 v3, v1
-; GFX1264-TRUE16-NEXT: v_mov_b32_e32 v2, v0
-; GFX1264-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[2:3], off, s[4:7], null th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1264-TRUE16-NEXT: s_and_b32 s7, s2, 3
+; GFX1264-TRUE16-NEXT: s_mov_b32 s8, s6
+; GFX1264-TRUE16-NEXT: s_lshl_b32 s7, s7, 3
+; GFX1264-TRUE16-NEXT: s_and_b32 s8, s8, 0xff
+; GFX1264-TRUE16-NEXT: s_mov_b32 s11, 0x31016000
+; GFX1264-TRUE16-NEXT: s_lshl_b32 s9, s8, s7
+; GFX1264-TRUE16-NEXT: s_and_b32 s8, s2, -4
+; GFX1264-TRUE16-NEXT: v_mov_b32_e32 v0, s9
+; GFX1264-TRUE16-NEXT: s_mov_b32 s10, -1
+; GFX1264-TRUE16-NEXT: s_mov_b32 s9, s3
+; GFX1264-TRUE16-NEXT: buffer_atomic_or_b32 v0, off, s[8:11], null th:TH_ATOMIC_RETURN scope:SCOPE_SYS
; GFX1264-TRUE16-NEXT: s_wait_loadcnt 0x0
-; GFX1264-TRUE16-NEXT: v_cmp_eq_u32_e64 s[0:1], v2, v1
-; GFX1264-TRUE16-NEXT: v_mov_b32_e32 v1, v2
-; GFX1264-TRUE16-NEXT: s_or_b64 s[10:11], s[0:1], s[10:11]
-; GFX1264-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1264-TRUE16-NEXT: s_and_not1_b64 exec, exec, s[10:11]
-; GFX1264-TRUE16-NEXT: s_cbranch_execnz .LBB12_2
-; GFX1264-TRUE16-NEXT: ; %bb.3: ; %atomicrmw.end
-; GFX1264-TRUE16-NEXT: s_or_b64 exec, exec, s[10:11]
-; GFX1264-TRUE16-NEXT: v_lshrrev_b32_e32 v0, s13, v2
-; GFX1264-TRUE16-NEXT: .LBB12_4: ; %Flow
-; GFX1264-TRUE16-NEXT: s_or_b64 exec, exec, s[2:3]
-; GFX1264-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX1264-TRUE16-NEXT: v_lshrrev_b32_e32 v0, s7, v0
+; GFX1264-TRUE16-NEXT: .LBB12_2:
+; GFX1264-TRUE16-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX1264-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
; GFX1264-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v0
; GFX1264-TRUE16-NEXT: s_wait_kmcnt 0x0
-; GFX1264-TRUE16-NEXT: s_mov_b32 s11, 0x31016000
-; GFX1264-TRUE16-NEXT: s_mov_b32 s10, -1
-; GFX1264-TRUE16-NEXT: v_readfirstlane_b32 s0, v0
-; GFX1264-TRUE16-NEXT: v_cndmask_b16 v0.l, s12, 0, vcc
-; GFX1264-TRUE16-NEXT: s_wait_alu 0xf1ff
+; GFX1264-TRUE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1264-TRUE16-NEXT: v_readfirstlane_b32 s2, v0
+; GFX1264-TRUE16-NEXT: v_cndmask_b16 v0.l, s6, 0, vcc
; GFX1264-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1264-TRUE16-NEXT: v_or_b16 v0.l, s0, v0.l
-; GFX1264-TRUE16-NEXT: buffer_store_b8 v0, off, s[8:11], null
+; GFX1264-TRUE16-NEXT: v_or_b16 v0.l, s2, v0.l
+; GFX1264-TRUE16-NEXT: s_mov_b32 s2, -1
+; GFX1264-TRUE16-NEXT: buffer_store_b8 v0, off, s[0:3], null
; GFX1264-TRUE16-NEXT: s_endpgm
;
; GFX1264-FAKE16-LABEL: uniform_or_i8:
; GFX1264-FAKE16: ; %bb.0:
; GFX1264-FAKE16-NEXT: s_clause 0x1
-; GFX1264-FAKE16-NEXT: s_load_b128 s[8:11], s[4:5], 0x24
-; GFX1264-FAKE16-NEXT: s_load_b32 s12, s[4:5], 0x34
+; GFX1264-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1264-FAKE16-NEXT: s_load_b32 s6, s[4:5], 0x34
; GFX1264-FAKE16-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
; GFX1264-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1264-FAKE16-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
; GFX1264-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX1264-FAKE16-NEXT: ; implicit-def: $vgpr0
-; GFX1264-FAKE16-NEXT: s_and_saveexec_b64 s[2:3], vcc
-; GFX1264-FAKE16-NEXT: s_cbranch_execz .LBB12_4
+; GFX1264-FAKE16-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX1264-FAKE16-NEXT: s_cbranch_execz .LBB12_2
; GFX1264-FAKE16-NEXT: ; %bb.1:
; GFX1264-FAKE16-NEXT: s_wait_kmcnt 0x0
-; GFX1264-FAKE16-NEXT: s_and_b32 s4, s10, -4
-; GFX1264-FAKE16-NEXT: s_mov_b32 s5, s11
-; GFX1264-FAKE16-NEXT: s_and_b32 s1, s10, 3
-; GFX1264-FAKE16-NEXT: s_load_b32 s0, s[4:5], 0x0
-; GFX1264-FAKE16-NEXT: s_lshl_b32 s13, s1, 3
-; GFX1264-FAKE16-NEXT: s_and_b32 s1, s12, 0xff
-; GFX1264-FAKE16-NEXT: s_mov_b64 s[10:11], 0
-; GFX1264-FAKE16-NEXT: s_lshl_b32 s14, s1, s13
-; GFX1264-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
-; GFX1264-FAKE16-NEXT: s_mov_b32 s6, -1
-; GFX1264-FAKE16-NEXT: s_wait_kmcnt 0x0
-; GFX1264-FAKE16-NEXT: v_mov_b32_e32 v1, s0
-; GFX1264-FAKE16-NEXT: .LBB12_2: ; %atomicrmw.start
-; GFX1264-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1264-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX1264-FAKE16-NEXT: v_or_b32_e32 v0, s14, v1
-; GFX1264-FAKE16-NEXT: v_mov_b32_e32 v3, v1
-; GFX1264-FAKE16-NEXT: v_mov_b32_e32 v2, v0
-; GFX1264-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[2:3], off, s[4:7], null th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1264-FAKE16-NEXT: s_and_b32 s7, s2, 3
+; GFX1264-FAKE16-NEXT: s_and_b32 s8, s6, 0xff
+; GFX1264-FAKE16-NEXT: s_lshl_b32 s7, s7, 3
+; GFX1264-FAKE16-NEXT: s_mov_b32 s11, 0x31016000
+; GFX1264-FAKE16-NEXT: s_lshl_b32 s9, s8, s7
+; GFX1264-FAKE16-NEXT: s_and_b32 s8, s2, -4
+; GFX1264-FAKE16-NEXT: v_mov_b32_e32 v0, s9
+; GFX1264-FAKE16-NEXT: s_mov_b32 s10, -1
+; GFX1264-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1264-FAKE16-NEXT: buffer_atomic_or_b32 v0, off, s[8:11], null th:TH_ATOMIC_RETURN scope:SCOPE_SYS
; GFX1264-FAKE16-NEXT: s_wait_loadcnt 0x0
-; GFX1264-FAKE16-NEXT: v_cmp_eq_u32_e64 s[0:1], v2, v1
-; GFX1264-FAKE16-NEXT: v_mov_b32_e32 v1, v2
-; GFX1264-FAKE16-NEXT: s_or_b64 s[10:11], s[0:1], s[10:11]
-; GFX1264-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1264-FAKE16-NEXT: s_and_not1_b64 exec, exec, s[10:11]
-; GFX1264-FAKE16-NEXT: s_cbranch_execnz .LBB12_2
-; GFX1264-FAKE16-NEXT: ; %bb.3: ; %atomicrmw.end
-; GFX1264-FAKE16-NEXT: s_or_b64 exec, exec, s[10:11]
-; GFX1264-FAKE16-NEXT: v_lshrrev_b32_e32 v0, s13, v2
-; GFX1264-FAKE16-NEXT: .LBB12_4: ; %Flow
-; GFX1264-FAKE16-NEXT: s_or_b64 exec, exec, s[2:3]
-; GFX1264-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX1264-FAKE16-NEXT: v_lshrrev_b32_e32 v0, s7, v0
+; GFX1264-FAKE16-NEXT: .LBB12_2:
+; GFX1264-FAKE16-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX1264-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
; GFX1264-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v0
; GFX1264-FAKE16-NEXT: s_wait_kmcnt 0x0
-; GFX1264-FAKE16-NEXT: s_mov_b32 s11, 0x31016000
-; GFX1264-FAKE16-NEXT: s_mov_b32 s10, -1
-; GFX1264-FAKE16-NEXT: v_readfirstlane_b32 s0, v0
-; GFX1264-FAKE16-NEXT: v_cndmask_b32_e64 v0, s12, 0, vcc
-; GFX1264-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX1264-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1264-FAKE16-NEXT: v_readfirstlane_b32 s2, v0
+; GFX1264-FAKE16-NEXT: v_cndmask_b32_e64 v0, s6, 0, vcc
; GFX1264-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1264-FAKE16-NEXT: v_or_b32_e32 v0, s0, v0
-; GFX1264-FAKE16-NEXT: buffer_store_b8 v0, off, s[8:11], null
+; GFX1264-FAKE16-NEXT: v_or_b32_e32 v0, s2, v0
+; GFX1264-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1264-FAKE16-NEXT: buffer_store_b8 v0, off, s[0:3], null
; GFX1264-FAKE16-NEXT: s_endpgm
;
; GFX1232-TRUE16-LABEL: uniform_or_i8:
; GFX1232-TRUE16: ; %bb.0:
; GFX1232-TRUE16-NEXT: s_clause 0x1
-; GFX1232-TRUE16-NEXT: s_load_b128 s[8:11], s[4:5], 0x24
-; GFX1232-TRUE16-NEXT: s_load_b32 s1, s[4:5], 0x34
+; GFX1232-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1232-TRUE16-NEXT: s_load_b32 s4, s[4:5], 0x34
; GFX1232-TRUE16-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1232-TRUE16-NEXT: s_mov_b32 s3, 0
; GFX1232-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1232-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1232-TRUE16-NEXT: ; implicit-def: $vgpr0_lo16
-; GFX1232-TRUE16-NEXT: s_and_saveexec_b32 s2, vcc_lo
-; GFX1232-TRUE16-NEXT: s_cbranch_execz .LBB12_4
+; GFX1232-TRUE16-NEXT: s_and_saveexec_b32 s5, vcc_lo
+; GFX1232-TRUE16-NEXT: s_cbranch_execz .LBB12_2
; GFX1232-TRUE16-NEXT: ; %bb.1:
; GFX1232-TRUE16-NEXT: s_wait_kmcnt 0x0
-; GFX1232-TRUE16-NEXT: s_and_b32 s4, s10, -4
-; GFX1232-TRUE16-NEXT: s_mov_b32 s5, s11
-; GFX1232-TRUE16-NEXT: s_and_b32 s6, s10, 3
-; GFX1232-TRUE16-NEXT: s_load_b32 s0, s[4:5], 0x0
-; GFX1232-TRUE16-NEXT: s_mov_b32 s7, s1
-; GFX1232-TRUE16-NEXT: s_lshl_b32 s10, s6, 3
-; GFX1232-TRUE16-NEXT: s_and_b32 s6, s7, 0xff
-; GFX1232-TRUE16-NEXT: s_mov_b32 s7, 0x31016000
-; GFX1232-TRUE16-NEXT: s_lshl_b32 s11, s6, s10
-; GFX1232-TRUE16-NEXT: s_mov_b32 s6, -1
-; GFX1232-TRUE16-NEXT: s_wait_kmcnt 0x0
-; GFX1232-TRUE16-NEXT: v_mov_b32_e32 v1, s0
-; GFX1232-TRUE16-NEXT: .LBB12_2: ; %atomicrmw.start
-; GFX1232-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1232-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1232-TRUE16-NEXT: v_or_b32_e32 v0, s11, v1
-; GFX1232-TRUE16-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
-; GFX1232-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[2:3], off, s[4:7], null th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1232-TRUE16-NEXT: s_and_b32 s6, s2, 3
+; GFX1232-TRUE16-NEXT: s_mov_b32 s7, s4
+; GFX1232-TRUE16-NEXT: s_lshl_b32 s6, s6, 3
+; GFX1232-TRUE16-NEXT: s_and_b32 s7, s7, 0xff
+; GFX1232-TRUE16-NEXT: s_and_b32 s8, s2, -4
+; GFX1232-TRUE16-NEXT: s_lshl_b32 s7, s7, s6
+; GFX1232-TRUE16-NEXT: s_mov_b32 s11, 0x31016000
+; GFX1232-TRUE16-NEXT: v_mov_b32_e32 v0, s7
+; GFX1232-TRUE16-NEXT: s_mov_b32 s10, -1
+; GFX1232-TRUE16-NEXT: s_mov_b32 s9, s3
+; GFX1232-TRUE16-NEXT: buffer_atomic_or_b32 v0, off, s[8:11], null th:TH_ATOMIC_RETURN scope:SCOPE_SYS
; GFX1232-TRUE16-NEXT: s_wait_loadcnt 0x0
-; GFX1232-TRUE16-NEXT: v_cmp_eq_u32_e64 s0, v2, v1
-; GFX1232-TRUE16-NEXT: v_mov_b32_e32 v1, v2
-; GFX1232-TRUE16-NEXT: s_or_b32 s3, s0, s3
-; GFX1232-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1232-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s3
-; GFX1232-TRUE16-NEXT: s_cbranch_execnz .LBB12_2
-; GFX1232-TRUE16-NEXT: ; %bb.3: ; %atomicrmw.end
-; GFX1232-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s3
-; GFX1232-TRUE16-NEXT: v_lshrrev_b32_e32 v0, s10, v2
-; GFX1232-TRUE16-NEXT: .LBB12_4: ; %Flow
-; GFX1232-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s2
-; GFX1232-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX1232-TRUE16-NEXT: v_lshrrev_b32_e32 v0, s6, v0
+; GFX1232-TRUE16-NEXT: .LBB12_2:
+; GFX1232-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX1232-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
; GFX1232-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v0
; GFX1232-TRUE16-NEXT: s_wait_kmcnt 0x0
-; GFX1232-TRUE16-NEXT: s_mov_b32 s11, 0x31016000
-; GFX1232-TRUE16-NEXT: s_mov_b32 s10, -1
-; GFX1232-TRUE16-NEXT: v_readfirstlane_b32 s0, v0
-; GFX1232-TRUE16-NEXT: v_cndmask_b16 v0.l, s1, 0, vcc_lo
-; GFX1232-TRUE16-NEXT: s_wait_alu 0xf1ff
+; GFX1232-TRUE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1232-TRUE16-NEXT: v_readfirstlane_b32 s2, v0
+; GFX1232-TRUE16-NEXT: v_cndmask_b16 v0.l, s4, 0, vcc_lo
; GFX1232-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1232-TRUE16-NEXT: v_or_b16 v0.l, s0, v0.l
-; GFX1232-TRUE16-NEXT: buffer_store_b8 v0, off, s[8:11], null
+; GFX1232-TRUE16-NEXT: v_or_b16 v0.l, s2, v0.l
+; GFX1232-TRUE16-NEXT: s_mov_b32 s2, -1
+; GFX1232-TRUE16-NEXT: buffer_store_b8 v0, off, s[0:3], null
; GFX1232-TRUE16-NEXT: s_endpgm
;
; GFX1232-FAKE16-LABEL: uniform_or_i8:
; GFX1232-FAKE16: ; %bb.0:
; GFX1232-FAKE16-NEXT: s_clause 0x1
-; GFX1232-FAKE16-NEXT: s_load_b128 s[8:11], s[4:5], 0x24
-; GFX1232-FAKE16-NEXT: s_load_b32 s1, s[4:5], 0x34
+; GFX1232-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1232-FAKE16-NEXT: s_load_b32 s4, s[4:5], 0x34
; GFX1232-FAKE16-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1232-FAKE16-NEXT: s_mov_b32 s3, 0
; GFX1232-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1232-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1232-FAKE16-NEXT: ; implicit-def: $vgpr0
-; GFX1232-FAKE16-NEXT: s_and_saveexec_b32 s2, vcc_lo
-; GFX1232-FAKE16-NEXT: s_cbranch_execz .LBB12_4
+; GFX1232-FAKE16-NEXT: s_and_saveexec_b32 s5, vcc_lo
+; GFX1232-FAKE16-NEXT: s_cbranch_execz .LBB12_2
; GFX1232-FAKE16-NEXT: ; %bb.1:
; GFX1232-FAKE16-NEXT: s_wait_kmcnt 0x0
-; GFX1232-FAKE16-NEXT: s_and_b32 s4, s10, -4
-; GFX1232-FAKE16-NEXT: s_mov_b32 s5, s11
-; GFX1232-FAKE16-NEXT: s_and_b32 s6, s10, 3
-; GFX1232-FAKE16-NEXT: s_load_b32 s0, s[4:5], 0x0
-; GFX1232-FAKE16-NEXT: s_lshl_b32 s10, s6, 3
-; GFX1232-FAKE16-NEXT: s_and_b32 s6, s1, 0xff
-; GFX1232-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
-; GFX1232-FAKE16-NEXT: s_lshl_b32 s11, s6, s10
-; GFX1232-FAKE16-NEXT: s_mov_b32 s6, -1
-; GFX1232-FAKE16-NEXT: s_wait_kmcnt 0x0
-; GFX1232-FAKE16-NEXT: v_mov_b32_e32 v1, s0
-; GFX1232-FAKE16-NEXT: .LBB12_2: ; %atomicrmw.start
-; GFX1232-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1232-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1232-FAKE16-NEXT: v_or_b32_e32 v0, s11, v1
-; GFX1232-FAKE16-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
-; GFX1232-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[2:3], off, s[4:7], null th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1232-FAKE16-NEXT: s_and_b32 s6, s2, 3
+; GFX1232-FAKE16-NEXT: s_and_b32 s7, s4, 0xff
+; GFX1232-FAKE16-NEXT: s_lshl_b32 s6, s6, 3
+; GFX1232-FAKE16-NEXT: s_and_b32 s8, s2, -4
+; GFX1232-FAKE16-NEXT: s_lshl_b32 s7, s7, s6
+; GFX1232-FAKE16-NEXT: s_mov_b32 s11, 0x31016000
+; GFX1232-FAKE16-NEXT: v_mov_b32_e32 v0, s7
+; GFX1232-FAKE16-NEXT: s_mov_b32 s10, -1
+; GFX1232-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1232-FAKE16-NEXT: buffer_atomic_or_b32 v0, off, s[8:11], null th:TH_ATOMIC_RETURN scope:SCOPE_SYS
; GFX1232-FAKE16-NEXT: s_wait_loadcnt 0x0
-; GFX1232-FAKE16-NEXT: v_cmp_eq_u32_e64 s0, v2, v1
-; GFX1232-FAKE16-NEXT: v_mov_b32_e32 v1, v2
-; GFX1232-FAKE16-NEXT: s_or_b32 s3, s0, s3
-; GFX1232-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1232-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s3
-; GFX1232-FAKE16-NEXT: s_cbranch_execnz .LBB12_2
-; GFX1232-FAKE16-NEXT: ; %bb.3: ; %atomicrmw.end
-; GFX1232-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s3
-; GFX1232-FAKE16-NEXT: v_lshrrev_b32_e32 v0, s10, v2
-; GFX1232-FAKE16-NEXT: .LBB12_4: ; %Flow
-; GFX1232-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s2
-; GFX1232-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX1232-FAKE16-NEXT: v_lshrrev_b32_e32 v0, s6, v0
+; GFX1232-FAKE16-NEXT: .LBB12_2:
+; GFX1232-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX1232-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
; GFX1232-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v0
; GFX1232-FAKE16-NEXT: s_wait_kmcnt 0x0
-; GFX1232-FAKE16-NEXT: s_mov_b32 s11, 0x31016000
-; GFX1232-FAKE16-NEXT: s_mov_b32 s10, -1
-; GFX1232-FAKE16-NEXT: v_readfirstlane_b32 s0, v0
-; GFX1232-FAKE16-NEXT: v_cndmask_b32_e64 v0, s1, 0, vcc_lo
-; GFX1232-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX1232-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1232-FAKE16-NEXT: v_readfirstlane_b32 s2, v0
+; GFX1232-FAKE16-NEXT: v_cndmask_b32_e64 v0, s4, 0, vcc_lo
; GFX1232-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1232-FAKE16-NEXT: v_or_b32_e32 v0, s0, v0
-; GFX1232-FAKE16-NEXT: buffer_store_b8 v0, off, s[8:11], null
+; GFX1232-FAKE16-NEXT: v_or_b32_e32 v0, s2, v0
+; GFX1232-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1232-FAKE16-NEXT: buffer_store_b8 v0, off, s[0:3], null
; GFX1232-FAKE16-NEXT: s_endpgm
- %rmw = atomicrmw or ptr addrspace(1) %uniform.ptr, i8 %val monotonic, align 1
+ %rmw = atomicrmw or ptr addrspace(1) %uniform.ptr, i8 %val monotonic, align 1, !amdgpu.no.fine.grained.memory !0
store i8 %rmw, ptr addrspace(1) %result
ret void
}
@@ -8645,7 +8433,7 @@ define amdgpu_kernel void @uniform_add_i8(ptr addrspace(1) %result, ptr addrspac
; GFX1232-FAKE16-NEXT: s_mov_b32 s2, -1
; GFX1232-FAKE16-NEXT: buffer_store_b8 v0, off, s[0:3], null
; GFX1232-FAKE16-NEXT: s_endpgm
- %rmw = atomicrmw add ptr addrspace(1) %uniform.ptr, i8 %val monotonic, align 1
+ %rmw = atomicrmw add ptr addrspace(1) %uniform.ptr, i8 %val monotonic, align 1, !amdgpu.no.fine.grained.memory !0
store i8 %rmw, ptr addrspace(1) %result
ret void
}
@@ -9017,7 +8805,7 @@ define amdgpu_kernel void @uniform_xchg_i8(ptr addrspace(1) %result, ptr addrspa
; GFX1232-NEXT: s_mov_b32 s2, -1
; GFX1232-NEXT: buffer_store_b8 v0, off, s[0:3], null
; GFX1232-NEXT: s_endpgm
- %rmw = atomicrmw xchg ptr addrspace(1) %uniform.ptr, i8 %val monotonic, align 1
+ %rmw = atomicrmw xchg ptr addrspace(1) %uniform.ptr, i8 %val monotonic, align 1, !amdgpu.no.fine.grained.memory !0
store i8 %rmw, ptr addrspace(1) %result
ret void
}
@@ -9025,720 +8813,508 @@ define amdgpu_kernel void @uniform_xchg_i8(ptr addrspace(1) %result, ptr addrspa
define amdgpu_kernel void @uniform_or_i16(ptr addrspace(1) %result, ptr addrspace(1) %uniform.ptr, i16 %val) {
; GFX7LESS-LABEL: uniform_or_i16:
; GFX7LESS: ; %bb.0:
-; GFX7LESS-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x9
-; GFX7LESS-NEXT: s_load_dword s12, s[4:5], 0xd
+; GFX7LESS-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; GFX7LESS-NEXT: s_load_dword s6, s[4:5], 0xd
; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, exec_lo, 0
; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, exec_hi, v0
; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX7LESS-NEXT: ; implicit-def: $vgpr0
-; GFX7LESS-NEXT: s_and_saveexec_b64 s[2:3], vcc
-; GFX7LESS-NEXT: s_cbranch_execz .LBB15_4
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX7LESS-NEXT: s_cbranch_execz .LBB15_2
; GFX7LESS-NEXT: ; %bb.1:
; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_and_b32 s4, s10, -4
-; GFX7LESS-NEXT: s_mov_b32 s5, s11
-; GFX7LESS-NEXT: s_and_b32 s0, s10, 3
-; GFX7LESS-NEXT: s_and_b32 s1, s12, 0xffff
-; GFX7LESS-NEXT: s_load_dword s6, s[4:5], 0x0
-; GFX7LESS-NEXT: s_mov_b64 s[10:11], 0
-; GFX7LESS-NEXT: s_mov_b32 s7, 0xf000
-; GFX7LESS-NEXT: s_lshl_b32 s13, s0, 3
-; GFX7LESS-NEXT: s_lshl_b32 s14, s1, s13
-; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: v_mov_b32_e32 v1, s6
-; GFX7LESS-NEXT: s_mov_b32 s6, -1
-; GFX7LESS-NEXT: .LBB15_2: ; %atomicrmw.start
-; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX7LESS-NEXT: v_or_b32_e32 v0, s14, v1
-; GFX7LESS-NEXT: s_waitcnt expcnt(0)
-; GFX7LESS-NEXT: v_mov_b32_e32 v3, v1
-; GFX7LESS-NEXT: v_mov_b32_e32 v2, v0
-; GFX7LESS-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 glc
-; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
-; GFX7LESS-NEXT: v_cmp_eq_u32_e64 s[0:1], v2, v1
-; GFX7LESS-NEXT: s_or_b64 s[10:11], s[0:1], s[10:11]
-; GFX7LESS-NEXT: v_mov_b32_e32 v1, v2
-; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[10:11]
-; GFX7LESS-NEXT: s_cbranch_execnz .LBB15_2
-; GFX7LESS-NEXT: ; %bb.3: ; %atomicrmw.end
-; GFX7LESS-NEXT: s_or_b64 exec, exec, s[10:11]
-; GFX7LESS-NEXT: v_bfe_u32 v0, v2, s13, 16
-; GFX7LESS-NEXT: .LBB15_4: ; %Flow
-; GFX7LESS-NEXT: s_or_b64 exec, exec, s[2:3]
-; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_and_b32 s8, s2, -4
; GFX7LESS-NEXT: s_mov_b32 s11, 0xf000
+; GFX7LESS-NEXT: s_and_b32 s2, s2, 3
+; GFX7LESS-NEXT: s_lshl_b32 s2, s2, 3
+; GFX7LESS-NEXT: s_and_b32 s7, s6, 0xffff
+; GFX7LESS-NEXT: s_lshl_b32 s7, s7, s2
; GFX7LESS-NEXT: s_mov_b32 s10, -1
+; GFX7LESS-NEXT: s_mov_b32 s9, s3
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, s7
+; GFX7LESS-NEXT: buffer_atomic_or v0, off, s[8:11], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0) expcnt(0)
+; GFX7LESS-NEXT: v_bfe_u32 v0, v0, s2, 16
+; GFX7LESS-NEXT: .LBB15_2:
+; GFX7LESS-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: s_mov_b32 s3, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s2, -1
; GFX7LESS-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX7LESS-NEXT: v_mov_b32_e32 v1, s12
-; GFX7LESS-NEXT: v_readfirstlane_b32 s0, v0
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, s6
+; GFX7LESS-NEXT: v_readfirstlane_b32 s4, v0
; GFX7LESS-NEXT: v_cndmask_b32_e64 v0, v1, 0, vcc
-; GFX7LESS-NEXT: v_or_b32_e32 v0, s0, v0
-; GFX7LESS-NEXT: buffer_store_short v0, off, s[8:11], 0
+; GFX7LESS-NEXT: v_or_b32_e32 v0, s4, v0
+; GFX7LESS-NEXT: buffer_store_short v0, off, s[0:3], 0
; GFX7LESS-NEXT: s_endpgm
;
; GFX8-LABEL: uniform_or_i16:
; GFX8: ; %bb.0:
-; GFX8-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x24
-; GFX8-NEXT: s_load_dword s12, s[4:5], 0x34
+; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8-NEXT: s_load_dword s6, s[4:5], 0x34
; GFX8-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
; GFX8-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX8-NEXT: ; implicit-def: $vgpr0
-; GFX8-NEXT: s_and_saveexec_b64 s[2:3], vcc
-; GFX8-NEXT: s_cbranch_execz .LBB15_4
+; GFX8-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX8-NEXT: s_cbranch_execz .LBB15_2
; GFX8-NEXT: ; %bb.1:
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: s_and_b32 s4, s10, -4
-; GFX8-NEXT: s_mov_b32 s5, s11
-; GFX8-NEXT: s_load_dword s1, s[4:5], 0x0
-; GFX8-NEXT: s_and_b32 s0, s10, 3
-; GFX8-NEXT: s_lshl_b32 s13, s0, 3
-; GFX8-NEXT: s_and_b32 s0, 0xffff, s12
-; GFX8-NEXT: s_lshl_b32 s14, s0, s13
-; GFX8-NEXT: s_mov_b64 s[10:11], 0
-; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v1, s1
-; GFX8-NEXT: s_mov_b32 s7, 0xf000
-; GFX8-NEXT: s_mov_b32 s6, -1
-; GFX8-NEXT: .LBB15_2: ; %atomicrmw.start
-; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX8-NEXT: v_or_b32_e32 v0, s14, v1
-; GFX8-NEXT: v_mov_b32_e32 v3, v1
-; GFX8-NEXT: v_mov_b32_e32 v2, v0
-; GFX8-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 glc
+; GFX8-NEXT: s_and_b32 s8, s2, -4
+; GFX8-NEXT: s_and_b32 s2, s2, 3
+; GFX8-NEXT: s_mov_b32 s9, s3
+; GFX8-NEXT: s_lshl_b32 s2, s2, 3
+; GFX8-NEXT: s_and_b32 s3, 0xffff, s6
+; GFX8-NEXT: s_lshl_b32 s3, s3, s2
+; GFX8-NEXT: s_mov_b32 s11, 0xf000
+; GFX8-NEXT: s_mov_b32 s10, -1
+; GFX8-NEXT: v_mov_b32_e32 v0, s3
+; GFX8-NEXT: buffer_atomic_or v0, off, s[8:11], 0 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], v2, v1
-; GFX8-NEXT: s_or_b64 s[10:11], s[0:1], s[10:11]
-; GFX8-NEXT: v_mov_b32_e32 v1, v2
-; GFX8-NEXT: s_andn2_b64 exec, exec, s[10:11]
-; GFX8-NEXT: s_cbranch_execnz .LBB15_2
-; GFX8-NEXT: ; %bb.3: ; %atomicrmw.end
-; GFX8-NEXT: s_or_b64 exec, exec, s[10:11]
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, s13, v2
-; GFX8-NEXT: .LBB15_4: ; %Flow
-; GFX8-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX8-NEXT: v_lshrrev_b32_e32 v0, s2, v0
+; GFX8-NEXT: .LBB15_2:
+; GFX8-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX8-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX8-NEXT: v_readfirstlane_b32 s0, v0
+; GFX8-NEXT: v_readfirstlane_b32 s4, v0
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v0, s12
+; GFX8-NEXT: v_mov_b32_e32 v0, s6
; GFX8-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc
-; GFX8-NEXT: s_mov_b32 s11, 0xf000
-; GFX8-NEXT: s_mov_b32 s10, -1
-; GFX8-NEXT: v_or_b32_e32 v0, s0, v0
-; GFX8-NEXT: buffer_store_short v0, off, s[8:11], 0
+; GFX8-NEXT: s_mov_b32 s3, 0xf000
+; GFX8-NEXT: s_mov_b32 s2, -1
+; GFX8-NEXT: v_or_b32_e32 v0, s4, v0
+; GFX8-NEXT: buffer_store_short v0, off, s[0:3], 0
; GFX8-NEXT: s_endpgm
;
; GFX9-LABEL: uniform_or_i16:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x24
-; GFX9-NEXT: s_load_dword s12, s[4:5], 0x34
+; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9-NEXT: s_load_dword s6, s[4:5], 0x34
; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX9-NEXT: ; implicit-def: $vgpr0
-; GFX9-NEXT: s_and_saveexec_b64 s[2:3], vcc
-; GFX9-NEXT: s_cbranch_execz .LBB15_4
+; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX9-NEXT: s_cbranch_execz .LBB15_2
; GFX9-NEXT: ; %bb.1:
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_and_b32 s4, s10, -4
-; GFX9-NEXT: s_mov_b32 s5, s11
-; GFX9-NEXT: s_load_dword s1, s[4:5], 0x0
-; GFX9-NEXT: s_and_b32 s0, s10, 3
-; GFX9-NEXT: s_lshl_b32 s13, s0, 3
-; GFX9-NEXT: s_and_b32 s0, 0xffff, s12
-; GFX9-NEXT: s_lshl_b32 s14, s0, s13
-; GFX9-NEXT: s_mov_b64 s[10:11], 0
-; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v1, s1
-; GFX9-NEXT: s_mov_b32 s7, 0xf000
-; GFX9-NEXT: s_mov_b32 s6, -1
-; GFX9-NEXT: .LBB15_2: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: v_or_b32_e32 v0, s14, v1
-; GFX9-NEXT: v_mov_b32_e32 v3, v1
-; GFX9-NEXT: v_mov_b32_e32 v2, v0
-; GFX9-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 glc
+; GFX9-NEXT: s_and_b32 s8, s2, -4
+; GFX9-NEXT: s_and_b32 s2, s2, 3
+; GFX9-NEXT: s_mov_b32 s9, s3
+; GFX9-NEXT: s_lshl_b32 s2, s2, 3
+; GFX9-NEXT: s_and_b32 s3, 0xffff, s6
+; GFX9-NEXT: s_lshl_b32 s3, s3, s2
+; GFX9-NEXT: s_mov_b32 s11, 0xf000
+; GFX9-NEXT: s_mov_b32 s10, -1
+; GFX9-NEXT: v_mov_b32_e32 v0, s3
+; GFX9-NEXT: buffer_atomic_or v0, off, s[8:11], 0 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], v2, v1
-; GFX9-NEXT: s_or_b64 s[10:11], s[0:1], s[10:11]
-; GFX9-NEXT: v_mov_b32_e32 v1, v2
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[10:11]
-; GFX9-NEXT: s_cbranch_execnz .LBB15_2
-; GFX9-NEXT: ; %bb.3: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[10:11]
-; GFX9-NEXT: v_lshrrev_b32_e32 v0, s13, v2
-; GFX9-NEXT: .LBB15_4: ; %Flow
-; GFX9-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX9-NEXT: v_lshrrev_b32_e32 v0, s2, v0
+; GFX9-NEXT: .LBB15_2:
+; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT: v_readfirstlane_b32 s0, v0
+; GFX9-NEXT: v_readfirstlane_b32 s4, v0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v0, s12
+; GFX9-NEXT: v_mov_b32_e32 v0, s6
; GFX9-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc
-; GFX9-NEXT: s_mov_b32 s11, 0xf000
-; GFX9-NEXT: s_mov_b32 s10, -1
-; GFX9-NEXT: v_or_b32_e32 v0, s0, v0
-; GFX9-NEXT: buffer_store_short v0, off, s[8:11], 0
+; GFX9-NEXT: s_mov_b32 s3, 0xf000
+; GFX9-NEXT: s_mov_b32 s2, -1
+; GFX9-NEXT: v_or_b32_e32 v0, s4, v0
+; GFX9-NEXT: buffer_store_short v0, off, s[0:3], 0
; GFX9-NEXT: s_endpgm
;
; GFX1064-LABEL: uniform_or_i16:
; GFX1064: ; %bb.0:
; GFX1064-NEXT: s_clause 0x1
-; GFX1064-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x24
-; GFX1064-NEXT: s_load_dword s12, s[4:5], 0x34
+; GFX1064-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1064-NEXT: s_load_dword s6, s[4:5], 0x34
; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX1064-NEXT: ; implicit-def: $vgpr0
-; GFX1064-NEXT: s_and_saveexec_b64 s[2:3], vcc
-; GFX1064-NEXT: s_cbranch_execz .LBB15_4
+; GFX1064-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX1064-NEXT: s_cbranch_execz .LBB15_2
; GFX1064-NEXT: ; %bb.1:
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_and_b32 s4, s10, -4
-; GFX1064-NEXT: s_mov_b32 s5, s11
-; GFX1064-NEXT: s_and_b32 s1, s10, 3
-; GFX1064-NEXT: s_load_dword s0, s[4:5], 0x0
-; GFX1064-NEXT: s_lshl_b32 s13, s1, 3
-; GFX1064-NEXT: s_and_b32 s1, 0xffff, s12
-; GFX1064-NEXT: s_mov_b64 s[10:11], 0
-; GFX1064-NEXT: s_lshl_b32 s14, s1, s13
-; GFX1064-NEXT: s_mov_b32 s7, 0x31016000
-; GFX1064-NEXT: s_mov_b32 s6, -1
-; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: v_mov_b32_e32 v1, s0
-; GFX1064-NEXT: .LBB15_2: ; %atomicrmw.start
-; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1064-NEXT: v_or_b32_e32 v0, s14, v1
-; GFX1064-NEXT: v_mov_b32_e32 v3, v1
-; GFX1064-NEXT: v_mov_b32_e32 v2, v0
-; GFX1064-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 glc
+; GFX1064-NEXT: s_and_b32 s7, s2, 3
+; GFX1064-NEXT: s_and_b32 s8, 0xffff, s6
+; GFX1064-NEXT: s_lshl_b32 s7, s7, 3
+; GFX1064-NEXT: s_mov_b32 s11, 0x31016000
+; GFX1064-NEXT: s_lshl_b32 s9, s8, s7
+; GFX1064-NEXT: s_and_b32 s8, s2, -4
+; GFX1064-NEXT: v_mov_b32_e32 v0, s9
+; GFX1064-NEXT: s_mov_b32 s10, -1
+; GFX1064-NEXT: s_mov_b32 s9, s3
+; GFX1064-NEXT: buffer_atomic_or v0, off, s[8:11], 0 glc
; GFX1064-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-NEXT: v_cmp_eq_u32_e64 s[0:1], v2, v1
-; GFX1064-NEXT: v_mov_b32_e32 v1, v2
-; GFX1064-NEXT: s_or_b64 s[10:11], s[0:1], s[10:11]
-; GFX1064-NEXT: s_andn2_b64 exec, exec, s[10:11]
-; GFX1064-NEXT: s_cbranch_execnz .LBB15_2
-; GFX1064-NEXT: ; %bb.3: ; %atomicrmw.end
-; GFX1064-NEXT: s_or_b64 exec, exec, s[10:11]
-; GFX1064-NEXT: v_lshrrev_b32_e32 v0, s13, v2
-; GFX1064-NEXT: .LBB15_4: ; %Flow
-; GFX1064-NEXT: s_or_b64 exec, exec, s[2:3]
+; GFX1064-NEXT: v_lshrrev_b32_e32 v0, s7, v0
+; GFX1064-NEXT: .LBB15_2:
+; GFX1064-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX1064-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_mov_b32 s11, 0x31016000
-; GFX1064-NEXT: s_mov_b32 s10, -1
-; GFX1064-NEXT: v_readfirstlane_b32 s0, v0
-; GFX1064-NEXT: v_cndmask_b32_e64 v0, s12, 0, vcc
-; GFX1064-NEXT: v_or_b32_e32 v0, s0, v0
-; GFX1064-NEXT: buffer_store_short v0, off, s[8:11], 0
+; GFX1064-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1064-NEXT: v_readfirstlane_b32 s2, v0
+; GFX1064-NEXT: v_cndmask_b32_e64 v0, s6, 0, vcc
+; GFX1064-NEXT: v_or_b32_e32 v0, s2, v0
+; GFX1064-NEXT: s_mov_b32 s2, -1
+; GFX1064-NEXT: buffer_store_short v0, off, s[0:3], 0
; GFX1064-NEXT: s_endpgm
;
; GFX1032-LABEL: uniform_or_i16:
; GFX1032: ; %bb.0:
; GFX1032-NEXT: s_clause 0x1
-; GFX1032-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x24
-; GFX1032-NEXT: s_load_dword s1, s[4:5], 0x34
+; GFX1032-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1032-NEXT: s_load_dword s6, s[4:5], 0x34
; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1032-NEXT: s_mov_b32 s3, 0
; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1032-NEXT: ; implicit-def: $vgpr0
-; GFX1032-NEXT: s_and_saveexec_b32 s2, vcc_lo
-; GFX1032-NEXT: s_cbranch_execz .LBB15_4
+; GFX1032-NEXT: s_and_saveexec_b32 s4, vcc_lo
+; GFX1032-NEXT: s_cbranch_execz .LBB15_2
; GFX1032-NEXT: ; %bb.1:
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_and_b32 s4, s10, -4
-; GFX1032-NEXT: s_mov_b32 s5, s11
-; GFX1032-NEXT: s_and_b32 s6, s10, 3
-; GFX1032-NEXT: s_load_dword s0, s[4:5], 0x0
-; GFX1032-NEXT: s_lshl_b32 s10, s6, 3
-; GFX1032-NEXT: s_and_b32 s6, 0xffff, s1
-; GFX1032-NEXT: s_mov_b32 s7, 0x31016000
-; GFX1032-NEXT: s_lshl_b32 s11, s6, s10
-; GFX1032-NEXT: s_mov_b32 s6, -1
-; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: v_mov_b32_e32 v1, s0
-; GFX1032-NEXT: .LBB15_2: ; %atomicrmw.start
-; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1032-NEXT: v_or_b32_e32 v0, s11, v1
-; GFX1032-NEXT: v_mov_b32_e32 v3, v1
-; GFX1032-NEXT: v_mov_b32_e32 v2, v0
-; GFX1032-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 glc
+; GFX1032-NEXT: s_and_b32 s5, s2, 3
+; GFX1032-NEXT: s_and_b32 s7, 0xffff, s6
+; GFX1032-NEXT: s_lshl_b32 s5, s5, 3
+; GFX1032-NEXT: s_and_b32 s8, s2, -4
+; GFX1032-NEXT: s_lshl_b32 s7, s7, s5
+; GFX1032-NEXT: s_mov_b32 s11, 0x31016000
+; GFX1032-NEXT: v_mov_b32_e32 v0, s7
+; GFX1032-NEXT: s_mov_b32 s10, -1
+; GFX1032-NEXT: s_mov_b32 s9, s3
+; GFX1032-NEXT: buffer_atomic_or v0, off, s[8:11], 0 glc
; GFX1032-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-NEXT: v_cmp_eq_u32_e64 s0, v2, v1
-; GFX1032-NEXT: v_mov_b32_e32 v1, v2
-; GFX1032-NEXT: s_or_b32 s3, s0, s3
-; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s3
-; GFX1032-NEXT: s_cbranch_execnz .LBB15_2
-; GFX1032-NEXT: ; %bb.3: ; %atomicrmw.end
-; GFX1032-NEXT: s_or_b32 exec_lo, exec_lo, s3
-; GFX1032-NEXT: v_lshrrev_b32_e32 v0, s10, v2
-; GFX1032-NEXT: .LBB15_4: ; %Flow
-; GFX1032-NEXT: s_or_b32 exec_lo, exec_lo, s2
+; GFX1032-NEXT: v_lshrrev_b32_e32 v0, s5, v0
+; GFX1032-NEXT: .LBB15_2:
+; GFX1032-NEXT: s_or_b32 exec_lo, exec_lo, s4
; GFX1032-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_mov_b32 s11, 0x31016000
-; GFX1032-NEXT: s_mov_b32 s10, -1
-; GFX1032-NEXT: v_readfirstlane_b32 s0, v0
-; GFX1032-NEXT: v_cndmask_b32_e64 v0, s1, 0, vcc_lo
-; GFX1032-NEXT: v_or_b32_e32 v0, s0, v0
-; GFX1032-NEXT: buffer_store_short v0, off, s[8:11], 0
+; GFX1032-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1032-NEXT: v_readfirstlane_b32 s2, v0
+; GFX1032-NEXT: v_cndmask_b32_e64 v0, s6, 0, vcc_lo
+; GFX1032-NEXT: v_or_b32_e32 v0, s2, v0
+; GFX1032-NEXT: s_mov_b32 s2, -1
+; GFX1032-NEXT: buffer_store_short v0, off, s[0:3], 0
; GFX1032-NEXT: s_endpgm
;
; GFX1164-TRUE16-LABEL: uniform_or_i16:
; GFX1164-TRUE16: ; %bb.0:
; GFX1164-TRUE16-NEXT: s_clause 0x1
-; GFX1164-TRUE16-NEXT: s_load_b128 s[8:11], s[4:5], 0x24
-; GFX1164-TRUE16-NEXT: s_load_b32 s12, s[4:5], 0x34
+; GFX1164-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164-TRUE16-NEXT: s_load_b32 s6, s[4:5], 0x34
; GFX1164-TRUE16-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
; GFX1164-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1164-TRUE16-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
; GFX1164-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX1164-TRUE16-NEXT: ; implicit-def: $vgpr0_lo16
-; GFX1164-TRUE16-NEXT: s_and_saveexec_b64 s[2:3], vcc
-; GFX1164-TRUE16-NEXT: s_cbranch_execz .LBB15_4
+; GFX1164-TRUE16-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX1164-TRUE16-NEXT: s_cbranch_execz .LBB15_2
; GFX1164-TRUE16-NEXT: ; %bb.1:
; GFX1164-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-TRUE16-NEXT: s_and_b32 s4, s10, -4
-; GFX1164-TRUE16-NEXT: s_mov_b32 s5, s11
-; GFX1164-TRUE16-NEXT: s_and_b32 s1, s10, 3
-; GFX1164-TRUE16-NEXT: s_load_b32 s0, s[4:5], 0x0
-; GFX1164-TRUE16-NEXT: s_mov_b32 s6, s12
-; GFX1164-TRUE16-NEXT: s_lshl_b32 s13, s1, 3
-; GFX1164-TRUE16-NEXT: s_and_b32 s1, 0xffff, s6
-; GFX1164-TRUE16-NEXT: s_mov_b64 s[10:11], 0
-; GFX1164-TRUE16-NEXT: s_lshl_b32 s14, s1, s13
-; GFX1164-TRUE16-NEXT: s_mov_b32 s7, 0x31016000
-; GFX1164-TRUE16-NEXT: s_mov_b32 s6, -1
-; GFX1164-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-TRUE16-NEXT: v_mov_b32_e32 v1, s0
-; GFX1164-TRUE16-NEXT: .LBB15_2: ; %atomicrmw.start
-; GFX1164-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1164-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX1164-TRUE16-NEXT: v_or_b32_e32 v0, s14, v1
-; GFX1164-TRUE16-NEXT: v_mov_b32_e32 v3, v1
-; GFX1164-TRUE16-NEXT: v_mov_b32_e32 v2, v0
-; GFX1164-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[2:3], off, s[4:7], 0 glc
+; GFX1164-TRUE16-NEXT: s_and_b32 s7, s2, 3
+; GFX1164-TRUE16-NEXT: s_mov_b32 s8, s6
+; GFX1164-TRUE16-NEXT: s_lshl_b32 s7, s7, 3
+; GFX1164-TRUE16-NEXT: s_and_b32 s8, 0xffff, s8
+; GFX1164-TRUE16-NEXT: s_mov_b32 s11, 0x31016000
+; GFX1164-TRUE16-NEXT: s_lshl_b32 s9, s8, s7
+; GFX1164-TRUE16-NEXT: s_and_b32 s8, s2, -4
+; GFX1164-TRUE16-NEXT: v_mov_b32_e32 v0, s9
+; GFX1164-TRUE16-NEXT: s_mov_b32 s10, -1
+; GFX1164-TRUE16-NEXT: s_mov_b32 s9, s3
+; GFX1164-TRUE16-NEXT: buffer_atomic_or_b32 v0, off, s[8:11], 0 glc
; GFX1164-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX1164-TRUE16-NEXT: v_cmp_eq_u32_e64 s[0:1], v2, v1
-; GFX1164-TRUE16-NEXT: v_mov_b32_e32 v1, v2
-; GFX1164-TRUE16-NEXT: s_or_b64 s[10:11], s[0:1], s[10:11]
-; GFX1164-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1164-TRUE16-NEXT: s_and_not1_b64 exec, exec, s[10:11]
-; GFX1164-TRUE16-NEXT: s_cbranch_execnz .LBB15_2
-; GFX1164-TRUE16-NEXT: ; %bb.3: ; %atomicrmw.end
-; GFX1164-TRUE16-NEXT: s_or_b64 exec, exec, s[10:11]
-; GFX1164-TRUE16-NEXT: v_lshrrev_b32_e32 v0, s13, v2
-; GFX1164-TRUE16-NEXT: .LBB15_4: ; %Flow
-; GFX1164-TRUE16-NEXT: s_or_b64 exec, exec, s[2:3]
-; GFX1164-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX1164-TRUE16-NEXT: v_lshrrev_b32_e32 v0, s7, v0
+; GFX1164-TRUE16-NEXT: .LBB15_2:
+; GFX1164-TRUE16-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX1164-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
; GFX1164-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX1164-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-TRUE16-NEXT: s_mov_b32 s11, 0x31016000
-; GFX1164-TRUE16-NEXT: s_mov_b32 s10, -1
-; GFX1164-TRUE16-NEXT: v_readfirstlane_b32 s0, v0
-; GFX1164-TRUE16-NEXT: v_cndmask_b16 v0.l, s12, 0, vcc
+; GFX1164-TRUE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1164-TRUE16-NEXT: v_readfirstlane_b32 s2, v0
+; GFX1164-TRUE16-NEXT: v_cndmask_b16 v0.l, s6, 0, vcc
; GFX1164-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-TRUE16-NEXT: v_or_b16 v0.l, s0, v0.l
-; GFX1164-TRUE16-NEXT: buffer_store_b16 v0, off, s[8:11], 0
+; GFX1164-TRUE16-NEXT: v_or_b16 v0.l, s2, v0.l
+; GFX1164-TRUE16-NEXT: s_mov_b32 s2, -1
+; GFX1164-TRUE16-NEXT: buffer_store_b16 v0, off, s[0:3], 0
; GFX1164-TRUE16-NEXT: s_endpgm
;
; GFX1164-FAKE16-LABEL: uniform_or_i16:
; GFX1164-FAKE16: ; %bb.0:
; GFX1164-FAKE16-NEXT: s_clause 0x1
-; GFX1164-FAKE16-NEXT: s_load_b128 s[8:11], s[4:5], 0x24
-; GFX1164-FAKE16-NEXT: s_load_b32 s12, s[4:5], 0x34
+; GFX1164-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1164-FAKE16-NEXT: s_load_b32 s6, s[4:5], 0x34
; GFX1164-FAKE16-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
; GFX1164-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1164-FAKE16-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
; GFX1164-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX1164-FAKE16-NEXT: ; implicit-def: $vgpr0
-; GFX1164-FAKE16-NEXT: s_and_saveexec_b64 s[2:3], vcc
-; GFX1164-FAKE16-NEXT: s_cbranch_execz .LBB15_4
+; GFX1164-FAKE16-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX1164-FAKE16-NEXT: s_cbranch_execz .LBB15_2
; GFX1164-FAKE16-NEXT: ; %bb.1:
; GFX1164-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-FAKE16-NEXT: s_and_b32 s4, s10, -4
-; GFX1164-FAKE16-NEXT: s_mov_b32 s5, s11
-; GFX1164-FAKE16-NEXT: s_and_b32 s1, s10, 3
-; GFX1164-FAKE16-NEXT: s_load_b32 s0, s[4:5], 0x0
-; GFX1164-FAKE16-NEXT: s_lshl_b32 s13, s1, 3
-; GFX1164-FAKE16-NEXT: s_and_b32 s1, 0xffff, s12
-; GFX1164-FAKE16-NEXT: s_mov_b64 s[10:11], 0
-; GFX1164-FAKE16-NEXT: s_lshl_b32 s14, s1, s13
-; GFX1164-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
-; GFX1164-FAKE16-NEXT: s_mov_b32 s6, -1
-; GFX1164-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-FAKE16-NEXT: v_mov_b32_e32 v1, s0
-; GFX1164-FAKE16-NEXT: .LBB15_2: ; %atomicrmw.start
-; GFX1164-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1164-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX1164-FAKE16-NEXT: v_or_b32_e32 v0, s14, v1
-; GFX1164-FAKE16-NEXT: v_mov_b32_e32 v3, v1
-; GFX1164-FAKE16-NEXT: v_mov_b32_e32 v2, v0
-; GFX1164-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[2:3], off, s[4:7], 0 glc
+; GFX1164-FAKE16-NEXT: s_and_b32 s7, s2, 3
+; GFX1164-FAKE16-NEXT: s_and_b32 s8, 0xffff, s6
+; GFX1164-FAKE16-NEXT: s_lshl_b32 s7, s7, 3
+; GFX1164-FAKE16-NEXT: s_mov_b32 s11, 0x31016000
+; GFX1164-FAKE16-NEXT: s_lshl_b32 s9, s8, s7
+; GFX1164-FAKE16-NEXT: s_and_b32 s8, s2, -4
+; GFX1164-FAKE16-NEXT: v_mov_b32_e32 v0, s9
+; GFX1164-FAKE16-NEXT: s_mov_b32 s10, -1
+; GFX1164-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1164-FAKE16-NEXT: buffer_atomic_or_b32 v0, off, s[8:11], 0 glc
; GFX1164-FAKE16-NEXT: s_waitcnt vmcnt(0)
-; GFX1164-FAKE16-NEXT: v_cmp_eq_u32_e64 s[0:1], v2, v1
-; GFX1164-FAKE16-NEXT: v_mov_b32_e32 v1, v2
-; GFX1164-FAKE16-NEXT: s_or_b64 s[10:11], s[0:1], s[10:11]
-; GFX1164-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1164-FAKE16-NEXT: s_and_not1_b64 exec, exec, s[10:11]
-; GFX1164-FAKE16-NEXT: s_cbranch_execnz .LBB15_2
-; GFX1164-FAKE16-NEXT: ; %bb.3: ; %atomicrmw.end
-; GFX1164-FAKE16-NEXT: s_or_b64 exec, exec, s[10:11]
-; GFX1164-FAKE16-NEXT: v_lshrrev_b32_e32 v0, s13, v2
-; GFX1164-FAKE16-NEXT: .LBB15_4: ; %Flow
-; GFX1164-FAKE16-NEXT: s_or_b64 exec, exec, s[2:3]
-; GFX1164-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX1164-FAKE16-NEXT: v_lshrrev_b32_e32 v0, s7, v0
+; GFX1164-FAKE16-NEXT: .LBB15_2:
+; GFX1164-FAKE16-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX1164-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
; GFX1164-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX1164-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-FAKE16-NEXT: s_mov_b32 s11, 0x31016000
-; GFX1164-FAKE16-NEXT: s_mov_b32 s10, -1
-; GFX1164-FAKE16-NEXT: v_readfirstlane_b32 s0, v0
-; GFX1164-FAKE16-NEXT: v_cndmask_b32_e64 v0, s12, 0, vcc
+; GFX1164-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1164-FAKE16-NEXT: v_readfirstlane_b32 s2, v0
+; GFX1164-FAKE16-NEXT: v_cndmask_b32_e64 v0, s6, 0, vcc
; GFX1164-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-FAKE16-NEXT: v_or_b32_e32 v0, s0, v0
-; GFX1164-FAKE16-NEXT: buffer_store_b16 v0, off, s[8:11], 0
+; GFX1164-FAKE16-NEXT: v_or_b32_e32 v0, s2, v0
+; GFX1164-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1164-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], 0
; GFX1164-FAKE16-NEXT: s_endpgm
;
; GFX1132-TRUE16-LABEL: uniform_or_i16:
; GFX1132-TRUE16: ; %bb.0:
; GFX1132-TRUE16-NEXT: s_clause 0x1
-; GFX1132-TRUE16-NEXT: s_load_b128 s[8:11], s[4:5], 0x24
-; GFX1132-TRUE16-NEXT: s_load_b32 s1, s[4:5], 0x34
+; GFX1132-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132-TRUE16-NEXT: s_load_b32 s4, s[4:5], 0x34
; GFX1132-TRUE16-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1132-TRUE16-NEXT: s_mov_b32 s3, 0
; GFX1132-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1132-TRUE16-NEXT: ; implicit-def: $vgpr0_lo16
-; GFX1132-TRUE16-NEXT: s_and_saveexec_b32 s2, vcc_lo
-; GFX1132-TRUE16-NEXT: s_cbranch_execz .LBB15_4
+; GFX1132-TRUE16-NEXT: s_and_saveexec_b32 s5, vcc_lo
+; GFX1132-TRUE16-NEXT: s_cbranch_execz .LBB15_2
; GFX1132-TRUE16-NEXT: ; %bb.1:
; GFX1132-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-TRUE16-NEXT: s_and_b32 s4, s10, -4
-; GFX1132-TRUE16-NEXT: s_mov_b32 s5, s11
-; GFX1132-TRUE16-NEXT: s_and_b32 s6, s10, 3
-; GFX1132-TRUE16-NEXT: s_load_b32 s0, s[4:5], 0x0
-; GFX1132-TRUE16-NEXT: s_mov_b32 s7, s1
-; GFX1132-TRUE16-NEXT: s_lshl_b32 s10, s6, 3
-; GFX1132-TRUE16-NEXT: s_and_b32 s6, 0xffff, s7
-; GFX1132-TRUE16-NEXT: s_mov_b32 s7, 0x31016000
-; GFX1132-TRUE16-NEXT: s_lshl_b32 s11, s6, s10
-; GFX1132-TRUE16-NEXT: s_mov_b32 s6, -1
-; GFX1132-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-TRUE16-NEXT: v_mov_b32_e32 v1, s0
-; GFX1132-TRUE16-NEXT: .LBB15_2: ; %atomicrmw.start
-; GFX1132-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1132-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1132-TRUE16-NEXT: v_or_b32_e32 v0, s11, v1
-; GFX1132-TRUE16-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
-; GFX1132-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[2:3], off, s[4:7], 0 glc
+; GFX1132-TRUE16-NEXT: s_and_b32 s6, s2, 3
+; GFX1132-TRUE16-NEXT: s_mov_b32 s7, s4
+; GFX1132-TRUE16-NEXT: s_lshl_b32 s6, s6, 3
+; GFX1132-TRUE16-NEXT: s_and_b32 s7, 0xffff, s7
+; GFX1132-TRUE16-NEXT: s_and_b32 s8, s2, -4
+; GFX1132-TRUE16-NEXT: s_lshl_b32 s7, s7, s6
+; GFX1132-TRUE16-NEXT: s_mov_b32 s11, 0x31016000
+; GFX1132-TRUE16-NEXT: v_mov_b32_e32 v0, s7
+; GFX1132-TRUE16-NEXT: s_mov_b32 s10, -1
+; GFX1132-TRUE16-NEXT: s_mov_b32 s9, s3
+; GFX1132-TRUE16-NEXT: buffer_atomic_or_b32 v0, off, s[8:11], 0 glc
; GFX1132-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX1132-TRUE16-NEXT: v_cmp_eq_u32_e64 s0, v2, v1
-; GFX1132-TRUE16-NEXT: v_mov_b32_e32 v1, v2
-; GFX1132-TRUE16-NEXT: s_or_b32 s3, s0, s3
-; GFX1132-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1132-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s3
-; GFX1132-TRUE16-NEXT: s_cbranch_execnz .LBB15_2
-; GFX1132-TRUE16-NEXT: ; %bb.3: ; %atomicrmw.end
-; GFX1132-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s3
-; GFX1132-TRUE16-NEXT: v_lshrrev_b32_e32 v0, s10, v2
-; GFX1132-TRUE16-NEXT: .LBB15_4: ; %Flow
-; GFX1132-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s2
-; GFX1132-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX1132-TRUE16-NEXT: v_lshrrev_b32_e32 v0, s6, v0
+; GFX1132-TRUE16-NEXT: .LBB15_2:
+; GFX1132-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX1132-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
; GFX1132-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX1132-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-TRUE16-NEXT: s_mov_b32 s11, 0x31016000
-; GFX1132-TRUE16-NEXT: s_mov_b32 s10, -1
-; GFX1132-TRUE16-NEXT: v_readfirstlane_b32 s0, v0
-; GFX1132-TRUE16-NEXT: v_cndmask_b16 v0.l, s1, 0, vcc_lo
+; GFX1132-TRUE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1132-TRUE16-NEXT: v_readfirstlane_b32 s2, v0
+; GFX1132-TRUE16-NEXT: v_cndmask_b16 v0.l, s4, 0, vcc_lo
; GFX1132-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1132-TRUE16-NEXT: v_or_b16 v0.l, s0, v0.l
-; GFX1132-TRUE16-NEXT: buffer_store_b16 v0, off, s[8:11], 0
+; GFX1132-TRUE16-NEXT: v_or_b16 v0.l, s2, v0.l
+; GFX1132-TRUE16-NEXT: s_mov_b32 s2, -1
+; GFX1132-TRUE16-NEXT: buffer_store_b16 v0, off, s[0:3], 0
; GFX1132-TRUE16-NEXT: s_endpgm
;
; GFX1132-FAKE16-LABEL: uniform_or_i16:
; GFX1132-FAKE16: ; %bb.0:
; GFX1132-FAKE16-NEXT: s_clause 0x1
-; GFX1132-FAKE16-NEXT: s_load_b128 s[8:11], s[4:5], 0x24
-; GFX1132-FAKE16-NEXT: s_load_b32 s1, s[4:5], 0x34
+; GFX1132-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1132-FAKE16-NEXT: s_load_b32 s4, s[4:5], 0x34
; GFX1132-FAKE16-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1132-FAKE16-NEXT: s_mov_b32 s3, 0
; GFX1132-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1132-FAKE16-NEXT: ; implicit-def: $vgpr0
-; GFX1132-FAKE16-NEXT: s_and_saveexec_b32 s2, vcc_lo
-; GFX1132-FAKE16-NEXT: s_cbranch_execz .LBB15_4
+; GFX1132-FAKE16-NEXT: s_and_saveexec_b32 s5, vcc_lo
+; GFX1132-FAKE16-NEXT: s_cbranch_execz .LBB15_2
; GFX1132-FAKE16-NEXT: ; %bb.1:
; GFX1132-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-FAKE16-NEXT: s_and_b32 s4, s10, -4
-; GFX1132-FAKE16-NEXT: s_mov_b32 s5, s11
-; GFX1132-FAKE16-NEXT: s_and_b32 s6, s10, 3
-; GFX1132-FAKE16-NEXT: s_load_b32 s0, s[4:5], 0x0
-; GFX1132-FAKE16-NEXT: s_lshl_b32 s10, s6, 3
-; GFX1132-FAKE16-NEXT: s_and_b32 s6, 0xffff, s1
-; GFX1132-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
-; GFX1132-FAKE16-NEXT: s_lshl_b32 s11, s6, s10
-; GFX1132-FAKE16-NEXT: s_mov_b32 s6, -1
-; GFX1132-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-FAKE16-NEXT: v_mov_b32_e32 v1, s0
-; GFX1132-FAKE16-NEXT: .LBB15_2: ; %atomicrmw.start
-; GFX1132-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1132-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1132-FAKE16-NEXT: v_or_b32_e32 v0, s11, v1
-; GFX1132-FAKE16-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
-; GFX1132-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[2:3], off, s[4:7], 0 glc
+; GFX1132-FAKE16-NEXT: s_and_b32 s6, s2, 3
+; GFX1132-FAKE16-NEXT: s_and_b32 s7, 0xffff, s4
+; GFX1132-FAKE16-NEXT: s_lshl_b32 s6, s6, 3
+; GFX1132-FAKE16-NEXT: s_and_b32 s8, s2, -4
+; GFX1132-FAKE16-NEXT: s_lshl_b32 s7, s7, s6
+; GFX1132-FAKE16-NEXT: s_mov_b32 s11, 0x31016000
+; GFX1132-FAKE16-NEXT: v_mov_b32_e32 v0, s7
+; GFX1132-FAKE16-NEXT: s_mov_b32 s10, -1
+; GFX1132-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1132-FAKE16-NEXT: buffer_atomic_or_b32 v0, off, s[8:11], 0 glc
; GFX1132-FAKE16-NEXT: s_waitcnt vmcnt(0)
-; GFX1132-FAKE16-NEXT: v_cmp_eq_u32_e64 s0, v2, v1
-; GFX1132-FAKE16-NEXT: v_mov_b32_e32 v1, v2
-; GFX1132-FAKE16-NEXT: s_or_b32 s3, s0, s3
-; GFX1132-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1132-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s3
-; GFX1132-FAKE16-NEXT: s_cbranch_execnz .LBB15_2
-; GFX1132-FAKE16-NEXT: ; %bb.3: ; %atomicrmw.end
-; GFX1132-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s3
-; GFX1132-FAKE16-NEXT: v_lshrrev_b32_e32 v0, s10, v2
-; GFX1132-FAKE16-NEXT: .LBB15_4: ; %Flow
-; GFX1132-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s2
-; GFX1132-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX1132-FAKE16-NEXT: v_lshrrev_b32_e32 v0, s6, v0
+; GFX1132-FAKE16-NEXT: .LBB15_2:
+; GFX1132-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX1132-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
; GFX1132-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX1132-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-FAKE16-NEXT: s_mov_b32 s11, 0x31016000
-; GFX1132-FAKE16-NEXT: s_mov_b32 s10, -1
-; GFX1132-FAKE16-NEXT: v_readfirstlane_b32 s0, v0
-; GFX1132-FAKE16-NEXT: v_cndmask_b32_e64 v0, s1, 0, vcc_lo
+; GFX1132-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1132-FAKE16-NEXT: v_readfirstlane_b32 s2, v0
+; GFX1132-FAKE16-NEXT: v_cndmask_b32_e64 v0, s4, 0, vcc_lo
; GFX1132-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1132-FAKE16-NEXT: v_or_b32_e32 v0, s0, v0
-; GFX1132-FAKE16-NEXT: buffer_store_b16 v0, off, s[8:11], 0
+; GFX1132-FAKE16-NEXT: v_or_b32_e32 v0, s2, v0
+; GFX1132-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1132-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], 0
; GFX1132-FAKE16-NEXT: s_endpgm
;
; GFX1264-TRUE16-LABEL: uniform_or_i16:
; GFX1264-TRUE16: ; %bb.0:
; GFX1264-TRUE16-NEXT: s_clause 0x1
-; GFX1264-TRUE16-NEXT: s_load_b128 s[8:11], s[4:5], 0x24
-; GFX1264-TRUE16-NEXT: s_load_b32 s12, s[4:5], 0x34
+; GFX1264-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1264-TRUE16-NEXT: s_load_b32 s6, s[4:5], 0x34
; GFX1264-TRUE16-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
; GFX1264-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1264-TRUE16-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
; GFX1264-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX1264-TRUE16-NEXT: ; implicit-def: $vgpr0_lo16
-; GFX1264-TRUE16-NEXT: s_and_saveexec_b64 s[2:3], vcc
-; GFX1264-TRUE16-NEXT: s_cbranch_execz .LBB15_4
+; GFX1264-TRUE16-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX1264-TRUE16-NEXT: s_cbranch_execz .LBB15_2
; GFX1264-TRUE16-NEXT: ; %bb.1:
; GFX1264-TRUE16-NEXT: s_wait_kmcnt 0x0
-; GFX1264-TRUE16-NEXT: s_and_b32 s4, s10, -4
-; GFX1264-TRUE16-NEXT: s_mov_b32 s5, s11
-; GFX1264-TRUE16-NEXT: s_and_b32 s1, s10, 3
-; GFX1264-TRUE16-NEXT: s_load_b32 s0, s[4:5], 0x0
-; GFX1264-TRUE16-NEXT: s_mov_b32 s6, s12
-; GFX1264-TRUE16-NEXT: s_lshl_b32 s13, s1, 3
-; GFX1264-TRUE16-NEXT: s_and_b32 s1, 0xffff, s6
-; GFX1264-TRUE16-NEXT: s_mov_b64 s[10:11], 0
-; GFX1264-TRUE16-NEXT: s_lshl_b32 s14, s1, s13
-; GFX1264-TRUE16-NEXT: s_mov_b32 s7, 0x31016000
-; GFX1264-TRUE16-NEXT: s_mov_b32 s6, -1
-; GFX1264-TRUE16-NEXT: s_wait_kmcnt 0x0
-; GFX1264-TRUE16-NEXT: v_mov_b32_e32 v1, s0
-; GFX1264-TRUE16-NEXT: .LBB15_2: ; %atomicrmw.start
-; GFX1264-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1264-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX1264-TRUE16-NEXT: v_or_b32_e32 v0, s14, v1
-; GFX1264-TRUE16-NEXT: v_mov_b32_e32 v3, v1
-; GFX1264-TRUE16-NEXT: v_mov_b32_e32 v2, v0
-; GFX1264-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[2:3], off, s[4:7], null th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1264-TRUE16-NEXT: s_and_b32 s7, s2, 3
+; GFX1264-TRUE16-NEXT: s_mov_b32 s8, s6
+; GFX1264-TRUE16-NEXT: s_lshl_b32 s7, s7, 3
+; GFX1264-TRUE16-NEXT: s_and_b32 s8, 0xffff, s8
+; GFX1264-TRUE16-NEXT: s_mov_b32 s11, 0x31016000
+; GFX1264-TRUE16-NEXT: s_lshl_b32 s9, s8, s7
+; GFX1264-TRUE16-NEXT: s_and_b32 s8, s2, -4
+; GFX1264-TRUE16-NEXT: v_mov_b32_e32 v0, s9
+; GFX1264-TRUE16-NEXT: s_mov_b32 s10, -1
+; GFX1264-TRUE16-NEXT: s_mov_b32 s9, s3
+; GFX1264-TRUE16-NEXT: buffer_atomic_or_b32 v0, off, s[8:11], null th:TH_ATOMIC_RETURN scope:SCOPE_SYS
; GFX1264-TRUE16-NEXT: s_wait_loadcnt 0x0
-; GFX1264-TRUE16-NEXT: v_cmp_eq_u32_e64 s[0:1], v2, v1
-; GFX1264-TRUE16-NEXT: v_mov_b32_e32 v1, v2
-; GFX1264-TRUE16-NEXT: s_or_b64 s[10:11], s[0:1], s[10:11]
-; GFX1264-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1264-TRUE16-NEXT: s_and_not1_b64 exec, exec, s[10:11]
-; GFX1264-TRUE16-NEXT: s_cbranch_execnz .LBB15_2
-; GFX1264-TRUE16-NEXT: ; %bb.3: ; %atomicrmw.end
-; GFX1264-TRUE16-NEXT: s_or_b64 exec, exec, s[10:11]
-; GFX1264-TRUE16-NEXT: v_lshrrev_b32_e32 v0, s13, v2
-; GFX1264-TRUE16-NEXT: .LBB15_4: ; %Flow
-; GFX1264-TRUE16-NEXT: s_or_b64 exec, exec, s[2:3]
-; GFX1264-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX1264-TRUE16-NEXT: v_lshrrev_b32_e32 v0, s7, v0
+; GFX1264-TRUE16-NEXT: .LBB15_2:
+; GFX1264-TRUE16-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX1264-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
; GFX1264-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX1264-TRUE16-NEXT: s_wait_kmcnt 0x0
-; GFX1264-TRUE16-NEXT: s_mov_b32 s11, 0x31016000
-; GFX1264-TRUE16-NEXT: s_mov_b32 s10, -1
-; GFX1264-TRUE16-NEXT: v_readfirstlane_b32 s0, v0
-; GFX1264-TRUE16-NEXT: v_cndmask_b16 v0.l, s12, 0, vcc
-; GFX1264-TRUE16-NEXT: s_wait_alu 0xf1ff
+; GFX1264-TRUE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1264-TRUE16-NEXT: v_readfirstlane_b32 s2, v0
+; GFX1264-TRUE16-NEXT: v_cndmask_b16 v0.l, s6, 0, vcc
; GFX1264-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1264-TRUE16-NEXT: v_or_b16 v0.l, s0, v0.l
-; GFX1264-TRUE16-NEXT: buffer_store_b16 v0, off, s[8:11], null
+; GFX1264-TRUE16-NEXT: v_or_b16 v0.l, s2, v0.l
+; GFX1264-TRUE16-NEXT: s_mov_b32 s2, -1
+; GFX1264-TRUE16-NEXT: buffer_store_b16 v0, off, s[0:3], null
; GFX1264-TRUE16-NEXT: s_endpgm
;
; GFX1264-FAKE16-LABEL: uniform_or_i16:
; GFX1264-FAKE16: ; %bb.0:
; GFX1264-FAKE16-NEXT: s_clause 0x1
-; GFX1264-FAKE16-NEXT: s_load_b128 s[8:11], s[4:5], 0x24
-; GFX1264-FAKE16-NEXT: s_load_b32 s12, s[4:5], 0x34
+; GFX1264-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1264-FAKE16-NEXT: s_load_b32 s6, s[4:5], 0x34
; GFX1264-FAKE16-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
; GFX1264-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1264-FAKE16-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
; GFX1264-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX1264-FAKE16-NEXT: ; implicit-def: $vgpr0
-; GFX1264-FAKE16-NEXT: s_and_saveexec_b64 s[2:3], vcc
-; GFX1264-FAKE16-NEXT: s_cbranch_execz .LBB15_4
+; GFX1264-FAKE16-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; GFX1264-FAKE16-NEXT: s_cbranch_execz .LBB15_2
; GFX1264-FAKE16-NEXT: ; %bb.1:
; GFX1264-FAKE16-NEXT: s_wait_kmcnt 0x0
-; GFX1264-FAKE16-NEXT: s_and_b32 s4, s10, -4
-; GFX1264-FAKE16-NEXT: s_mov_b32 s5, s11
-; GFX1264-FAKE16-NEXT: s_and_b32 s1, s10, 3
-; GFX1264-FAKE16-NEXT: s_load_b32 s0, s[4:5], 0x0
-; GFX1264-FAKE16-NEXT: s_lshl_b32 s13, s1, 3
-; GFX1264-FAKE16-NEXT: s_and_b32 s1, 0xffff, s12
-; GFX1264-FAKE16-NEXT: s_mov_b64 s[10:11], 0
-; GFX1264-FAKE16-NEXT: s_lshl_b32 s14, s1, s13
-; GFX1264-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
-; GFX1264-FAKE16-NEXT: s_mov_b32 s6, -1
-; GFX1264-FAKE16-NEXT: s_wait_kmcnt 0x0
-; GFX1264-FAKE16-NEXT: v_mov_b32_e32 v1, s0
-; GFX1264-FAKE16-NEXT: .LBB15_2: ; %atomicrmw.start
-; GFX1264-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1264-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX1264-FAKE16-NEXT: v_or_b32_e32 v0, s14, v1
-; GFX1264-FAKE16-NEXT: v_mov_b32_e32 v3, v1
-; GFX1264-FAKE16-NEXT: v_mov_b32_e32 v2, v0
-; GFX1264-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[2:3], off, s[4:7], null th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1264-FAKE16-NEXT: s_and_b32 s7, s2, 3
+; GFX1264-FAKE16-NEXT: s_and_b32 s8, 0xffff, s6
+; GFX1264-FAKE16-NEXT: s_lshl_b32 s7, s7, 3
+; GFX1264-FAKE16-NEXT: s_mov_b32 s11, 0x31016000
+; GFX1264-FAKE16-NEXT: s_lshl_b32 s9, s8, s7
+; GFX1264-FAKE16-NEXT: s_and_b32 s8, s2, -4
+; GFX1264-FAKE16-NEXT: v_mov_b32_e32 v0, s9
+; GFX1264-FAKE16-NEXT: s_mov_b32 s10, -1
+; GFX1264-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1264-FAKE16-NEXT: buffer_atomic_or_b32 v0, off, s[8:11], null th:TH_ATOMIC_RETURN scope:SCOPE_SYS
; GFX1264-FAKE16-NEXT: s_wait_loadcnt 0x0
-; GFX1264-FAKE16-NEXT: v_cmp_eq_u32_e64 s[0:1], v2, v1
-; GFX1264-FAKE16-NEXT: v_mov_b32_e32 v1, v2
-; GFX1264-FAKE16-NEXT: s_or_b64 s[10:11], s[0:1], s[10:11]
-; GFX1264-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1264-FAKE16-NEXT: s_and_not1_b64 exec, exec, s[10:11]
-; GFX1264-FAKE16-NEXT: s_cbranch_execnz .LBB15_2
-; GFX1264-FAKE16-NEXT: ; %bb.3: ; %atomicrmw.end
-; GFX1264-FAKE16-NEXT: s_or_b64 exec, exec, s[10:11]
-; GFX1264-FAKE16-NEXT: v_lshrrev_b32_e32 v0, s13, v2
-; GFX1264-FAKE16-NEXT: .LBB15_4: ; %Flow
-; GFX1264-FAKE16-NEXT: s_or_b64 exec, exec, s[2:3]
-; GFX1264-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX1264-FAKE16-NEXT: v_lshrrev_b32_e32 v0, s7, v0
+; GFX1264-FAKE16-NEXT: .LBB15_2:
+; GFX1264-FAKE16-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX1264-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
; GFX1264-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX1264-FAKE16-NEXT: s_wait_kmcnt 0x0
-; GFX1264-FAKE16-NEXT: s_mov_b32 s11, 0x31016000
-; GFX1264-FAKE16-NEXT: s_mov_b32 s10, -1
-; GFX1264-FAKE16-NEXT: v_readfirstlane_b32 s0, v0
-; GFX1264-FAKE16-NEXT: v_cndmask_b32_e64 v0, s12, 0, vcc
-; GFX1264-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX1264-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1264-FAKE16-NEXT: v_readfirstlane_b32 s2, v0
+; GFX1264-FAKE16-NEXT: v_cndmask_b32_e64 v0, s6, 0, vcc
; GFX1264-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1264-FAKE16-NEXT: v_or_b32_e32 v0, s0, v0
-; GFX1264-FAKE16-NEXT: buffer_store_b16 v0, off, s[8:11], null
+; GFX1264-FAKE16-NEXT: v_or_b32_e32 v0, s2, v0
+; GFX1264-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1264-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], null
; GFX1264-FAKE16-NEXT: s_endpgm
;
; GFX1232-TRUE16-LABEL: uniform_or_i16:
; GFX1232-TRUE16: ; %bb.0:
; GFX1232-TRUE16-NEXT: s_clause 0x1
-; GFX1232-TRUE16-NEXT: s_load_b128 s[8:11], s[4:5], 0x24
-; GFX1232-TRUE16-NEXT: s_load_b32 s1, s[4:5], 0x34
+; GFX1232-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1232-TRUE16-NEXT: s_load_b32 s4, s[4:5], 0x34
; GFX1232-TRUE16-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1232-TRUE16-NEXT: s_mov_b32 s3, 0
; GFX1232-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1232-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1232-TRUE16-NEXT: ; implicit-def: $vgpr0_lo16
-; GFX1232-TRUE16-NEXT: s_and_saveexec_b32 s2, vcc_lo
-; GFX1232-TRUE16-NEXT: s_cbranch_execz .LBB15_4
+; GFX1232-TRUE16-NEXT: s_and_saveexec_b32 s5, vcc_lo
+; GFX1232-TRUE16-NEXT: s_cbranch_execz .LBB15_2
; GFX1232-TRUE16-NEXT: ; %bb.1:
; GFX1232-TRUE16-NEXT: s_wait_kmcnt 0x0
-; GFX1232-TRUE16-NEXT: s_and_b32 s4, s10, -4
-; GFX1232-TRUE16-NEXT: s_mov_b32 s5, s11
-; GFX1232-TRUE16-NEXT: s_and_b32 s6, s10, 3
-; GFX1232-TRUE16-NEXT: s_load_b32 s0, s[4:5], 0x0
-; GFX1232-TRUE16-NEXT: s_mov_b32 s7, s1
-; GFX1232-TRUE16-NEXT: s_lshl_b32 s10, s6, 3
-; GFX1232-TRUE16-NEXT: s_and_b32 s6, 0xffff, s7
-; GFX1232-TRUE16-NEXT: s_mov_b32 s7, 0x31016000
-; GFX1232-TRUE16-NEXT: s_lshl_b32 s11, s6, s10
-; GFX1232-TRUE16-NEXT: s_mov_b32 s6, -1
-; GFX1232-TRUE16-NEXT: s_wait_kmcnt 0x0
-; GFX1232-TRUE16-NEXT: v_mov_b32_e32 v1, s0
-; GFX1232-TRUE16-NEXT: .LBB15_2: ; %atomicrmw.start
-; GFX1232-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1232-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1232-TRUE16-NEXT: v_or_b32_e32 v0, s11, v1
-; GFX1232-TRUE16-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
-; GFX1232-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[2:3], off, s[4:7], null th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1232-TRUE16-NEXT: s_and_b32 s6, s2, 3
+; GFX1232-TRUE16-NEXT: s_mov_b32 s7, s4
+; GFX1232-TRUE16-NEXT: s_lshl_b32 s6, s6, 3
+; GFX1232-TRUE16-NEXT: s_and_b32 s7, 0xffff, s7
+; GFX1232-TRUE16-NEXT: s_and_b32 s8, s2, -4
+; GFX1232-TRUE16-NEXT: s_lshl_b32 s7, s7, s6
+; GFX1232-TRUE16-NEXT: s_mov_b32 s11, 0x31016000
+; GFX1232-TRUE16-NEXT: v_mov_b32_e32 v0, s7
+; GFX1232-TRUE16-NEXT: s_mov_b32 s10, -1
+; GFX1232-TRUE16-NEXT: s_mov_b32 s9, s3
+; GFX1232-TRUE16-NEXT: buffer_atomic_or_b32 v0, off, s[8:11], null th:TH_ATOMIC_RETURN scope:SCOPE_SYS
; GFX1232-TRUE16-NEXT: s_wait_loadcnt 0x0
-; GFX1232-TRUE16-NEXT: v_cmp_eq_u32_e64 s0, v2, v1
-; GFX1232-TRUE16-NEXT: v_mov_b32_e32 v1, v2
-; GFX1232-TRUE16-NEXT: s_or_b32 s3, s0, s3
-; GFX1232-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1232-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s3
-; GFX1232-TRUE16-NEXT: s_cbranch_execnz .LBB15_2
-; GFX1232-TRUE16-NEXT: ; %bb.3: ; %atomicrmw.end
-; GFX1232-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s3
-; GFX1232-TRUE16-NEXT: v_lshrrev_b32_e32 v0, s10, v2
-; GFX1232-TRUE16-NEXT: .LBB15_4: ; %Flow
-; GFX1232-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s2
-; GFX1232-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX1232-TRUE16-NEXT: v_lshrrev_b32_e32 v0, s6, v0
+; GFX1232-TRUE16-NEXT: .LBB15_2:
+; GFX1232-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX1232-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
; GFX1232-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX1232-TRUE16-NEXT: s_wait_kmcnt 0x0
-; GFX1232-TRUE16-NEXT: s_mov_b32 s11, 0x31016000
-; GFX1232-TRUE16-NEXT: s_mov_b32 s10, -1
-; GFX1232-TRUE16-NEXT: v_readfirstlane_b32 s0, v0
-; GFX1232-TRUE16-NEXT: v_cndmask_b16 v0.l, s1, 0, vcc_lo
-; GFX1232-TRUE16-NEXT: s_wait_alu 0xf1ff
+; GFX1232-TRUE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1232-TRUE16-NEXT: v_readfirstlane_b32 s2, v0
+; GFX1232-TRUE16-NEXT: v_cndmask_b16 v0.l, s4, 0, vcc_lo
; GFX1232-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1232-TRUE16-NEXT: v_or_b16 v0.l, s0, v0.l
-; GFX1232-TRUE16-NEXT: buffer_store_b16 v0, off, s[8:11], null
+; GFX1232-TRUE16-NEXT: v_or_b16 v0.l, s2, v0.l
+; GFX1232-TRUE16-NEXT: s_mov_b32 s2, -1
+; GFX1232-TRUE16-NEXT: buffer_store_b16 v0, off, s[0:3], null
; GFX1232-TRUE16-NEXT: s_endpgm
;
; GFX1232-FAKE16-LABEL: uniform_or_i16:
; GFX1232-FAKE16: ; %bb.0:
; GFX1232-FAKE16-NEXT: s_clause 0x1
-; GFX1232-FAKE16-NEXT: s_load_b128 s[8:11], s[4:5], 0x24
-; GFX1232-FAKE16-NEXT: s_load_b32 s1, s[4:5], 0x34
+; GFX1232-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1232-FAKE16-NEXT: s_load_b32 s4, s[4:5], 0x34
; GFX1232-FAKE16-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1232-FAKE16-NEXT: s_mov_b32 s3, 0
; GFX1232-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1232-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX1232-FAKE16-NEXT: ; implicit-def: $vgpr0
-; GFX1232-FAKE16-NEXT: s_and_saveexec_b32 s2, vcc_lo
-; GFX1232-FAKE16-NEXT: s_cbranch_execz .LBB15_4
+; GFX1232-FAKE16-NEXT: s_and_saveexec_b32 s5, vcc_lo
+; GFX1232-FAKE16-NEXT: s_cbranch_execz .LBB15_2
; GFX1232-FAKE16-NEXT: ; %bb.1:
; GFX1232-FAKE16-NEXT: s_wait_kmcnt 0x0
-; GFX1232-FAKE16-NEXT: s_and_b32 s4, s10, -4
-; GFX1232-FAKE16-NEXT: s_mov_b32 s5, s11
-; GFX1232-FAKE16-NEXT: s_and_b32 s6, s10, 3
-; GFX1232-FAKE16-NEXT: s_load_b32 s0, s[4:5], 0x0
-; GFX1232-FAKE16-NEXT: s_lshl_b32 s10, s6, 3
-; GFX1232-FAKE16-NEXT: s_and_b32 s6, 0xffff, s1
-; GFX1232-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
-; GFX1232-FAKE16-NEXT: s_lshl_b32 s11, s6, s10
-; GFX1232-FAKE16-NEXT: s_mov_b32 s6, -1
-; GFX1232-FAKE16-NEXT: s_wait_kmcnt 0x0
-; GFX1232-FAKE16-NEXT: v_mov_b32_e32 v1, s0
-; GFX1232-FAKE16-NEXT: .LBB15_2: ; %atomicrmw.start
-; GFX1232-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1232-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1232-FAKE16-NEXT: v_or_b32_e32 v0, s11, v1
-; GFX1232-FAKE16-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
-; GFX1232-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[2:3], off, s[4:7], null th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1232-FAKE16-NEXT: s_and_b32 s6, s2, 3
+; GFX1232-FAKE16-NEXT: s_and_b32 s7, 0xffff, s4
+; GFX1232-FAKE16-NEXT: s_lshl_b32 s6, s6, 3
+; GFX1232-FAKE16-NEXT: s_and_b32 s8, s2, -4
+; GFX1232-FAKE16-NEXT: s_lshl_b32 s7, s7, s6
+; GFX1232-FAKE16-NEXT: s_mov_b32 s11, 0x31016000
+; GFX1232-FAKE16-NEXT: v_mov_b32_e32 v0, s7
+; GFX1232-FAKE16-NEXT: s_mov_b32 s10, -1
+; GFX1232-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1232-FAKE16-NEXT: buffer_atomic_or_b32 v0, off, s[8:11], null th:TH_ATOMIC_RETURN scope:SCOPE_SYS
; GFX1232-FAKE16-NEXT: s_wait_loadcnt 0x0
-; GFX1232-FAKE16-NEXT: v_cmp_eq_u32_e64 s0, v2, v1
-; GFX1232-FAKE16-NEXT: v_mov_b32_e32 v1, v2
-; GFX1232-FAKE16-NEXT: s_or_b32 s3, s0, s3
-; GFX1232-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1232-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s3
-; GFX1232-FAKE16-NEXT: s_cbranch_execnz .LBB15_2
-; GFX1232-FAKE16-NEXT: ; %bb.3: ; %atomicrmw.end
-; GFX1232-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s3
-; GFX1232-FAKE16-NEXT: v_lshrrev_b32_e32 v0, s10, v2
-; GFX1232-FAKE16-NEXT: .LBB15_4: ; %Flow
-; GFX1232-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s2
-; GFX1232-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX1232-FAKE16-NEXT: v_lshrrev_b32_e32 v0, s6, v0
+; GFX1232-FAKE16-NEXT: .LBB15_2:
+; GFX1232-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX1232-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
; GFX1232-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX1232-FAKE16-NEXT: s_wait_kmcnt 0x0
-; GFX1232-FAKE16-NEXT: s_mov_b32 s11, 0x31016000
-; GFX1232-FAKE16-NEXT: s_mov_b32 s10, -1
-; GFX1232-FAKE16-NEXT: v_readfirstlane_b32 s0, v0
-; GFX1232-FAKE16-NEXT: v_cndmask_b32_e64 v0, s1, 0, vcc_lo
-; GFX1232-FAKE16-NEXT: s_wait_alu 0xf1ff
+; GFX1232-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1232-FAKE16-NEXT: v_readfirstlane_b32 s2, v0
+; GFX1232-FAKE16-NEXT: v_cndmask_b32_e64 v0, s4, 0, vcc_lo
; GFX1232-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1232-FAKE16-NEXT: v_or_b32_e32 v0, s0, v0
-; GFX1232-FAKE16-NEXT: buffer_store_b16 v0, off, s[8:11], null
+; GFX1232-FAKE16-NEXT: v_or_b32_e32 v0, s2, v0
+; GFX1232-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1232-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], null
; GFX1232-FAKE16-NEXT: s_endpgm
- %rmw = atomicrmw or ptr addrspace(1) %uniform.ptr, i16 %val monotonic, align 2
+ %rmw = atomicrmw or ptr addrspace(1) %uniform.ptr, i16 %val monotonic, align 2, !amdgpu.no.fine.grained.memory !0
store i16 %rmw, ptr addrspace(1) %result
ret void
}
@@ -10555,7 +10131,7 @@ define amdgpu_kernel void @uniform_add_i16(ptr addrspace(1) %result, ptr addrspa
; GFX1232-FAKE16-NEXT: s_mov_b32 s2, -1
; GFX1232-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], null
; GFX1232-FAKE16-NEXT: s_endpgm
- %rmw = atomicrmw add ptr addrspace(1) %uniform.ptr, i16 %val monotonic, align 2
+ %rmw = atomicrmw add ptr addrspace(1) %uniform.ptr, i16 %val monotonic, align 2, !amdgpu.no.fine.grained.memory !0
store i16 %rmw, ptr addrspace(1) %result
ret void
}
@@ -10927,7 +10503,7 @@ define amdgpu_kernel void @uniform_xchg_i16(ptr addrspace(1) %result, ptr addrsp
; GFX1232-NEXT: s_mov_b32 s2, -1
; GFX1232-NEXT: buffer_store_b16 v0, off, s[0:3], null
; GFX1232-NEXT: s_endpgm
- %rmw = atomicrmw xchg ptr addrspace(1) %uniform.ptr, i16 %val monotonic, align 2
+ %rmw = atomicrmw xchg ptr addrspace(1) %uniform.ptr, i16 %val monotonic, align 2, !amdgpu.no.fine.grained.memory !0
store i16 %rmw, ptr addrspace(1) %result
ret void
}
@@ -11514,7 +11090,7 @@ define amdgpu_kernel void @uniform_fadd_f16(ptr addrspace(1) %result, ptr addrsp
; GFX1232-FAKE16-NEXT: s_mov_b32 s2, -1
; GFX1232-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], null
; GFX1232-FAKE16-NEXT: s_endpgm
- %rmw = atomicrmw fadd ptr addrspace(1) %uniform.ptr, half %val monotonic, align 2
+ %rmw = atomicrmw fadd ptr addrspace(1) %uniform.ptr, half %val monotonic, align 2, !amdgpu.no.fine.grained.memory !0
store half %rmw, ptr addrspace(1) %result
ret void
}
@@ -12204,7 +11780,7 @@ define amdgpu_kernel void @uniform_fadd_bf16(ptr addrspace(1) %result, ptr addrs
; GFX1232-FAKE16-NEXT: s_mov_b32 s2, -1
; GFX1232-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], null
; GFX1232-FAKE16-NEXT: s_endpgm
- %rmw = atomicrmw fadd ptr addrspace(1) %uniform.ptr, bfloat %val monotonic, align 2
+ %rmw = atomicrmw fadd ptr addrspace(1) %uniform.ptr, bfloat %val monotonic, align 2, !amdgpu.no.fine.grained.memory !0
store bfloat %rmw, ptr addrspace(1) %result
ret void
}
@@ -12475,72 +12051,32 @@ define amdgpu_kernel void @uniform_fadd_v2f16(ptr addrspace(1) %result, ptr addr
; GFX1264-LABEL: uniform_fadd_v2f16:
; GFX1264: ; %bb.0:
; GFX1264-NEXT: s_clause 0x1
+; GFX1264-NEXT: s_load_b32 s6, s[4:5], 0x34
; GFX1264-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX1264-NEXT: s_load_b32 s10, s[4:5], 0x34
-; GFX1264-NEXT: s_mov_b64 s[8:9], 0
-; GFX1264-NEXT: s_mov_b32 s7, 0x31016000
-; GFX1264-NEXT: s_mov_b32 s6, -1
-; GFX1264-NEXT: s_wait_kmcnt 0x0
-; GFX1264-NEXT: s_load_b32 s4, s[2:3], 0x0
-; GFX1264-NEXT: s_mov_b32 s5, s3
+; GFX1264-NEXT: v_mov_b32_e32 v0, 0
; GFX1264-NEXT: s_wait_kmcnt 0x0
-; GFX1264-NEXT: v_mov_b32_e32 v1, s4
-; GFX1264-NEXT: s_mov_b32 s4, s2
-; GFX1264-NEXT: .LBB20_1: ; %atomicrmw.start
-; GFX1264-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1264-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX1264-NEXT: v_pk_add_f16 v0, v1, s10
-; GFX1264-NEXT: v_mov_b32_e32 v3, v1
-; GFX1264-NEXT: v_mov_b32_e32 v2, v0
-; GFX1264-NEXT: buffer_atomic_cmpswap_b32 v[2:3], off, s[4:7], null th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX1264-NEXT: s_wait_loadcnt 0x0
-; GFX1264-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
-; GFX1264-NEXT: v_mov_b32_e32 v1, v2
-; GFX1264-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
-; GFX1264-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1264-NEXT: s_and_not1_b64 exec, exec, s[8:9]
-; GFX1264-NEXT: s_cbranch_execnz .LBB20_1
-; GFX1264-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1264-NEXT: s_or_b64 exec, exec, s[8:9]
+; GFX1264-NEXT: v_mov_b32_e32 v1, s6
+; GFX1264-NEXT: global_atomic_pk_add_f16 v0, v0, v1, s[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
; GFX1264-NEXT: s_mov_b32 s3, 0x31016000
; GFX1264-NEXT: s_mov_b32 s2, -1
-; GFX1264-NEXT: buffer_store_b32 v2, off, s[0:3], null
+; GFX1264-NEXT: s_wait_loadcnt 0x0
+; GFX1264-NEXT: buffer_store_b32 v0, off, s[0:3], null
; GFX1264-NEXT: s_endpgm
;
; GFX1232-LABEL: uniform_fadd_v2f16:
; GFX1232: ; %bb.0:
; GFX1232-NEXT: s_clause 0x1
+; GFX1232-NEXT: s_load_b32 s6, s[4:5], 0x34
; GFX1232-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX1232-NEXT: s_load_b32 s8, s[4:5], 0x34
-; GFX1232-NEXT: s_mov_b32 s9, 0
-; GFX1232-NEXT: s_mov_b32 s7, 0x31016000
-; GFX1232-NEXT: s_mov_b32 s6, -1
-; GFX1232-NEXT: s_wait_kmcnt 0x0
-; GFX1232-NEXT: s_load_b32 s4, s[2:3], 0x0
-; GFX1232-NEXT: s_mov_b32 s5, s3
; GFX1232-NEXT: s_wait_kmcnt 0x0
-; GFX1232-NEXT: v_mov_b32_e32 v1, s4
-; GFX1232-NEXT: s_mov_b32 s4, s2
-; GFX1232-NEXT: .LBB20_1: ; %atomicrmw.start
-; GFX1232-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1232-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1232-NEXT: v_pk_add_f16 v0, v1, s8
-; GFX1232-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
-; GFX1232-NEXT: buffer_atomic_cmpswap_b32 v[2:3], off, s[4:7], null th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX1232-NEXT: s_wait_loadcnt 0x0
-; GFX1232-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX1232-NEXT: v_mov_b32_e32 v1, v2
-; GFX1232-NEXT: s_or_b32 s9, vcc_lo, s9
-; GFX1232-NEXT: s_wait_alu 0xfffe
-; GFX1232-NEXT: s_and_not1_b32 exec_lo, exec_lo, s9
-; GFX1232-NEXT: s_cbranch_execnz .LBB20_1
-; GFX1232-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1232-NEXT: s_or_b32 exec_lo, exec_lo, s9
+; GFX1232-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s6
+; GFX1232-NEXT: global_atomic_pk_add_f16 v0, v0, v1, s[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
; GFX1232-NEXT: s_mov_b32 s3, 0x31016000
; GFX1232-NEXT: s_mov_b32 s2, -1
-; GFX1232-NEXT: buffer_store_b32 v2, off, s[0:3], null
+; GFX1232-NEXT: s_wait_loadcnt 0x0
+; GFX1232-NEXT: buffer_store_b32 v0, off, s[0:3], null
; GFX1232-NEXT: s_endpgm
- %rmw = atomicrmw fadd ptr addrspace(1) %uniform.ptr, <2 x half> %val monotonic, align 4
+ %rmw = atomicrmw fadd ptr addrspace(1) %uniform.ptr, <2 x half> %val monotonic, align 4, !amdgpu.no.fine.grained.memory !0
store <2 x half> %rmw, ptr addrspace(1) %result
ret void
}
@@ -13035,235 +12571,41 @@ define amdgpu_kernel void @uniform_fadd_v2bf16(ptr addrspace(1) %result, ptr add
; GFX1132-FAKE16-NEXT: buffer_store_b32 v2, off, s[8:11], 0
; GFX1132-FAKE16-NEXT: s_endpgm
;
-; GFX1264-TRUE16-LABEL: uniform_fadd_v2bf16:
-; GFX1264-TRUE16: ; %bb.0:
-; GFX1264-TRUE16-NEXT: s_clause 0x1
-; GFX1264-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX1264-TRUE16-NEXT: s_load_b32 s4, s[4:5], 0x34
-; GFX1264-TRUE16-NEXT: s_mov_b64 s[8:9], 0
-; GFX1264-TRUE16-NEXT: s_mov_b32 s7, 0x31016000
-; GFX1264-TRUE16-NEXT: s_mov_b32 s6, -1
-; GFX1264-TRUE16-NEXT: s_wait_kmcnt 0x0
-; GFX1264-TRUE16-NEXT: s_load_b32 s5, s[2:3], 0x0
-; GFX1264-TRUE16-NEXT: s_and_b32 s10, s4, 0xffff0000
-; GFX1264-TRUE16-NEXT: s_lshl_b32 s11, s4, 16
-; GFX1264-TRUE16-NEXT: s_mov_b32 s4, s2
-; GFX1264-TRUE16-NEXT: s_wait_kmcnt 0x0
-; GFX1264-TRUE16-NEXT: v_mov_b32_e32 v1, s5
-; GFX1264-TRUE16-NEXT: s_mov_b32 s5, s3
-; GFX1264-TRUE16-NEXT: .LBB21_1: ; %atomicrmw.start
-; GFX1264-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1264-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX1264-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 16, v1
-; GFX1264-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v1
-; GFX1264-TRUE16-NEXT: v_add_f32_e32 v0, s11, v0
-; GFX1264-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX1264-TRUE16-NEXT: v_add_f32_e32 v2, s10, v2
-; GFX1264-TRUE16-NEXT: v_bfe_u32 v3, v0, 16, 1
-; GFX1264-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX1264-TRUE16-NEXT: v_bfe_u32 v4, v2, 16, 1
-; GFX1264-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v0
-; GFX1264-TRUE16-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
-; GFX1264-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
-; GFX1264-TRUE16-NEXT: v_add3_u32 v3, v3, v0, 0x7fff
-; GFX1264-TRUE16-NEXT: v_add3_u32 v4, v4, v2, 0x7fff
-; GFX1264-TRUE16-NEXT: s_wait_alu 0xfffd
-; GFX1264-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX1264-TRUE16-NEXT: v_cndmask_b32_e32 v0, v3, v5, vcc
-; GFX1264-TRUE16-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
-; GFX1264-TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h
-; GFX1264-TRUE16-NEXT: s_wait_alu 0xfffd
-; GFX1264-TRUE16-NEXT: v_cndmask_b32_e32 v2, v4, v6, vcc
-; GFX1264-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX1264-TRUE16-NEXT: v_bfi_b32 v0, 0xffff, v0, v2
-; GFX1264-TRUE16-NEXT: v_mov_b32_e32 v3, v1
-; GFX1264-TRUE16-NEXT: v_mov_b32_e32 v2, v0
-; GFX1264-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[2:3], off, s[4:7], null th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX1264-TRUE16-NEXT: s_wait_loadcnt 0x0
-; GFX1264-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
-; GFX1264-TRUE16-NEXT: v_mov_b32_e32 v1, v2
-; GFX1264-TRUE16-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
-; GFX1264-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1264-TRUE16-NEXT: s_and_not1_b64 exec, exec, s[8:9]
-; GFX1264-TRUE16-NEXT: s_cbranch_execnz .LBB21_1
-; GFX1264-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1264-TRUE16-NEXT: s_or_b64 exec, exec, s[8:9]
-; GFX1264-TRUE16-NEXT: s_mov_b32 s3, 0x31016000
-; GFX1264-TRUE16-NEXT: s_mov_b32 s2, -1
-; GFX1264-TRUE16-NEXT: buffer_store_b32 v2, off, s[0:3], null
-; GFX1264-TRUE16-NEXT: s_endpgm
-;
-; GFX1264-FAKE16-LABEL: uniform_fadd_v2bf16:
-; GFX1264-FAKE16: ; %bb.0:
-; GFX1264-FAKE16-NEXT: s_clause 0x1
-; GFX1264-FAKE16-NEXT: s_load_b128 s[8:11], s[4:5], 0x24
-; GFX1264-FAKE16-NEXT: s_load_b32 s0, s[4:5], 0x34
-; GFX1264-FAKE16-NEXT: s_mov_b64 s[2:3], 0
-; GFX1264-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
-; GFX1264-FAKE16-NEXT: s_mov_b32 s6, -1
-; GFX1264-FAKE16-NEXT: s_wait_kmcnt 0x0
-; GFX1264-FAKE16-NEXT: s_load_b32 s1, s[10:11], 0x0
-; GFX1264-FAKE16-NEXT: s_lshl_b32 s12, s0, 16
-; GFX1264-FAKE16-NEXT: s_and_b32 s13, s0, 0xffff0000
-; GFX1264-FAKE16-NEXT: s_mov_b32 s4, s10
-; GFX1264-FAKE16-NEXT: s_mov_b32 s5, s11
-; GFX1264-FAKE16-NEXT: s_wait_kmcnt 0x0
-; GFX1264-FAKE16-NEXT: v_mov_b32_e32 v1, s1
-; GFX1264-FAKE16-NEXT: .LBB21_1: ; %atomicrmw.start
-; GFX1264-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1264-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX1264-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 16, v1
-; GFX1264-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v1
-; GFX1264-FAKE16-NEXT: v_add_f32_e32 v0, s12, v0
-; GFX1264-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX1264-FAKE16-NEXT: v_add_f32_e32 v2, s13, v2
-; GFX1264-FAKE16-NEXT: v_bfe_u32 v3, v0, 16, 1
-; GFX1264-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX1264-FAKE16-NEXT: v_bfe_u32 v4, v2, 16, 1
-; GFX1264-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v0
-; GFX1264-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
-; GFX1264-FAKE16-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
-; GFX1264-FAKE16-NEXT: v_add3_u32 v3, v3, v0, 0x7fff
-; GFX1264-FAKE16-NEXT: v_add3_u32 v4, v4, v2, 0x7fff
-; GFX1264-FAKE16-NEXT: v_cmp_u_f32_e64 s[0:1], v0, v0
-; GFX1264-FAKE16-NEXT: s_wait_alu 0xfffd
-; GFX1264-FAKE16-NEXT: v_cndmask_b32_e32 v2, v4, v6, vcc
-; GFX1264-FAKE16-NEXT: s_wait_alu 0xf1ff
-; GFX1264-FAKE16-NEXT: v_cndmask_b32_e64 v0, v3, v5, s[0:1]
-; GFX1264-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX1264-FAKE16-NEXT: v_perm_b32 v0, v2, v0, 0x7060302
-; GFX1264-FAKE16-NEXT: v_mov_b32_e32 v3, v1
-; GFX1264-FAKE16-NEXT: v_mov_b32_e32 v2, v0
-; GFX1264-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[2:3], off, s[4:7], null th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX1264-FAKE16-NEXT: s_wait_loadcnt 0x0
-; GFX1264-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
-; GFX1264-FAKE16-NEXT: v_mov_b32_e32 v1, v2
-; GFX1264-FAKE16-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
-; GFX1264-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1264-FAKE16-NEXT: s_and_not1_b64 exec, exec, s[2:3]
-; GFX1264-FAKE16-NEXT: s_cbranch_execnz .LBB21_1
-; GFX1264-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1264-FAKE16-NEXT: s_or_b64 exec, exec, s[2:3]
-; GFX1264-FAKE16-NEXT: s_mov_b32 s11, 0x31016000
-; GFX1264-FAKE16-NEXT: s_mov_b32 s10, -1
-; GFX1264-FAKE16-NEXT: buffer_store_b32 v2, off, s[8:11], null
-; GFX1264-FAKE16-NEXT: s_endpgm
-;
-; GFX1232-TRUE16-LABEL: uniform_fadd_v2bf16:
-; GFX1232-TRUE16: ; %bb.0:
-; GFX1232-TRUE16-NEXT: s_clause 0x1
-; GFX1232-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX1232-TRUE16-NEXT: s_load_b32 s4, s[4:5], 0x34
-; GFX1232-TRUE16-NEXT: s_mov_b32 s8, 0
-; GFX1232-TRUE16-NEXT: s_mov_b32 s7, 0x31016000
-; GFX1232-TRUE16-NEXT: s_mov_b32 s6, -1
-; GFX1232-TRUE16-NEXT: s_wait_kmcnt 0x0
-; GFX1232-TRUE16-NEXT: s_load_b32 s5, s[2:3], 0x0
-; GFX1232-TRUE16-NEXT: s_and_b32 s9, s4, 0xffff0000
-; GFX1232-TRUE16-NEXT: s_lshl_b32 s10, s4, 16
-; GFX1232-TRUE16-NEXT: s_mov_b32 s4, s2
-; GFX1232-TRUE16-NEXT: s_wait_kmcnt 0x0
-; GFX1232-TRUE16-NEXT: v_mov_b32_e32 v1, s5
-; GFX1232-TRUE16-NEXT: s_mov_b32 s5, s3
-; GFX1232-TRUE16-NEXT: .LBB21_1: ; %atomicrmw.start
-; GFX1232-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1232-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX1232-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 16, v1
-; GFX1232-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v1
-; GFX1232-TRUE16-NEXT: v_add_f32_e32 v0, s10, v0
-; GFX1232-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX1232-TRUE16-NEXT: v_add_f32_e32 v2, s9, v2
-; GFX1232-TRUE16-NEXT: v_bfe_u32 v3, v0, 16, 1
-; GFX1232-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX1232-TRUE16-NEXT: v_bfe_u32 v4, v2, 16, 1
-; GFX1232-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v0
-; GFX1232-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX1232-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
-; GFX1232-TRUE16-NEXT: v_add3_u32 v3, v3, v0, 0x7fff
-; GFX1232-TRUE16-NEXT: v_add3_u32 v4, v4, v2, 0x7fff
-; GFX1232-TRUE16-NEXT: s_wait_alu 0xfffd
-; GFX1232-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX1232-TRUE16-NEXT: v_cndmask_b32_e32 v0, v3, v5, vcc_lo
-; GFX1232-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX1232-TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h
-; GFX1232-TRUE16-NEXT: s_wait_alu 0xfffd
-; GFX1232-TRUE16-NEXT: v_cndmask_b32_e32 v2, v4, v6, vcc_lo
-; GFX1232-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1232-TRUE16-NEXT: v_bfi_b32 v0, 0xffff, v0, v2
-; GFX1232-TRUE16-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
-; GFX1232-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[2:3], off, s[4:7], null th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX1232-TRUE16-NEXT: s_wait_loadcnt 0x0
-; GFX1232-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX1232-TRUE16-NEXT: v_mov_b32_e32 v1, v2
-; GFX1232-TRUE16-NEXT: s_or_b32 s8, vcc_lo, s8
-; GFX1232-TRUE16-NEXT: s_wait_alu 0xfffe
-; GFX1232-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s8
-; GFX1232-TRUE16-NEXT: s_cbranch_execnz .LBB21_1
-; GFX1232-TRUE16-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1232-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s8
-; GFX1232-TRUE16-NEXT: s_mov_b32 s3, 0x31016000
-; GFX1232-TRUE16-NEXT: s_mov_b32 s2, -1
-; GFX1232-TRUE16-NEXT: buffer_store_b32 v2, off, s[0:3], null
-; GFX1232-TRUE16-NEXT: s_endpgm
+; GFX1264-LABEL: uniform_fadd_v2bf16:
+; GFX1264: ; %bb.0:
+; GFX1264-NEXT: s_clause 0x1
+; GFX1264-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX1264-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1264-NEXT: v_mov_b32_e32 v0, 0
+; GFX1264-NEXT: s_wait_kmcnt 0x0
+; GFX1264-NEXT: v_mov_b32_e32 v1, s6
+; GFX1264-NEXT: global_atomic_pk_add_bf16 v0, v0, v1, s[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1264-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1264-NEXT: s_mov_b32 s2, -1
+; GFX1264-NEXT: s_wait_loadcnt 0x0
+; GFX1264-NEXT: buffer_store_b32 v0, off, s[0:3], null
+; GFX1264-NEXT: s_endpgm
;
-; GFX1232-FAKE16-LABEL: uniform_fadd_v2bf16:
-; GFX1232-FAKE16: ; %bb.0:
-; GFX1232-FAKE16-NEXT: s_clause 0x1
-; GFX1232-FAKE16-NEXT: s_load_b128 s[8:11], s[4:5], 0x24
-; GFX1232-FAKE16-NEXT: s_load_b32 s0, s[4:5], 0x34
-; GFX1232-FAKE16-NEXT: s_mov_b32 s1, 0
-; GFX1232-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
-; GFX1232-FAKE16-NEXT: s_mov_b32 s6, -1
-; GFX1232-FAKE16-NEXT: s_wait_kmcnt 0x0
-; GFX1232-FAKE16-NEXT: s_load_b32 s4, s[10:11], 0x0
-; GFX1232-FAKE16-NEXT: s_lshl_b32 s2, s0, 16
-; GFX1232-FAKE16-NEXT: s_and_b32 s3, s0, 0xffff0000
-; GFX1232-FAKE16-NEXT: s_mov_b32 s5, s11
-; GFX1232-FAKE16-NEXT: s_wait_kmcnt 0x0
-; GFX1232-FAKE16-NEXT: v_mov_b32_e32 v1, s4
-; GFX1232-FAKE16-NEXT: s_mov_b32 s4, s10
-; GFX1232-FAKE16-NEXT: .LBB21_1: ; %atomicrmw.start
-; GFX1232-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1232-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX1232-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 16, v1
-; GFX1232-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v1
-; GFX1232-FAKE16-NEXT: v_add_f32_e32 v0, s2, v0
-; GFX1232-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX1232-FAKE16-NEXT: v_add_f32_e32 v2, s3, v2
-; GFX1232-FAKE16-NEXT: v_bfe_u32 v3, v0, 16, 1
-; GFX1232-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX1232-FAKE16-NEXT: v_bfe_u32 v4, v2, 16, 1
-; GFX1232-FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v0
-; GFX1232-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
-; GFX1232-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX1232-FAKE16-NEXT: v_add3_u32 v3, v3, v0, 0x7fff
-; GFX1232-FAKE16-NEXT: v_add3_u32 v4, v4, v2, 0x7fff
-; GFX1232-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v0, v0
-; GFX1232-FAKE16-NEXT: s_wait_alu 0xfffd
-; GFX1232-FAKE16-NEXT: v_cndmask_b32_e32 v2, v4, v6, vcc_lo
-; GFX1232-FAKE16-NEXT: s_wait_alu 0xf1ff
-; GFX1232-FAKE16-NEXT: v_cndmask_b32_e64 v0, v3, v5, s0
-; GFX1232-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1232-FAKE16-NEXT: v_perm_b32 v0, v2, v0, 0x7060302
-; GFX1232-FAKE16-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
-; GFX1232-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[2:3], off, s[4:7], null th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX1232-FAKE16-NEXT: s_wait_loadcnt 0x0
-; GFX1232-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX1232-FAKE16-NEXT: v_mov_b32_e32 v1, v2
-; GFX1232-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
-; GFX1232-FAKE16-NEXT: s_wait_alu 0xfffe
-; GFX1232-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
-; GFX1232-FAKE16-NEXT: s_cbranch_execnz .LBB21_1
-; GFX1232-FAKE16-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1232-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX1232-FAKE16-NEXT: s_mov_b32 s11, 0x31016000
-; GFX1232-FAKE16-NEXT: s_mov_b32 s10, -1
-; GFX1232-FAKE16-NEXT: buffer_store_b32 v2, off, s[8:11], null
-; GFX1232-FAKE16-NEXT: s_endpgm
- %rmw = atomicrmw fadd ptr addrspace(1) %uniform.ptr, <2 x bfloat> %val monotonic, align 4
+; GFX1232-LABEL: uniform_fadd_v2bf16:
+; GFX1232: ; %bb.0:
+; GFX1232-NEXT: s_clause 0x1
+; GFX1232-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX1232-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1232-NEXT: s_wait_kmcnt 0x0
+; GFX1232-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s6
+; GFX1232-NEXT: global_atomic_pk_add_bf16 v0, v0, v1, s[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1232-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1232-NEXT: s_mov_b32 s2, -1
+; GFX1232-NEXT: s_wait_loadcnt 0x0
+; GFX1232-NEXT: buffer_store_b32 v0, off, s[0:3], null
+; GFX1232-NEXT: s_endpgm
+ %rmw = atomicrmw fadd ptr addrspace(1) %uniform.ptr, <2 x bfloat> %val monotonic, align 4, !amdgpu.no.fine.grained.memory !0
store <2 x bfloat> %rmw, ptr addrspace(1) %result
ret void
}
+
+!0 = !{}
+
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; GFX1132_DPP-FAKE16: {{.*}}
; GFX1132_DPP-TRUE16: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/dag-divergence-atomic.ll b/llvm/test/CodeGen/AMDGPU/dag-divergence-atomic.ll
index 9a98a7cd01ed4..12de3750640db 100644
--- a/llvm/test/CodeGen/AMDGPU/dag-divergence-atomic.ll
+++ b/llvm/test/CodeGen/AMDGPU/dag-divergence-atomic.ll
@@ -42,7 +42,7 @@ define protected amdgpu_kernel void @sub(ptr addrspace(1) %p, ptr addrspace(1) %
; CHECK-NEXT: v_mov_b32_e32 v2, 1.0
; CHECK-NEXT: global_store_dword v[0:1], v2, off
; CHECK-NEXT: s_endpgm
- %n32 = atomicrmw sub ptr addrspace(1) %p, i32 1 syncscope("agent") monotonic
+ %n32 = atomicrmw sub ptr addrspace(1) %p, i32 1 syncscope("agent") monotonic, !amdgpu.no.fine.grained.memory !0
%n64 = zext i32 %n32 to i64
%p1 = getelementptr inbounds %S, ptr addrspace(1) %q, i64 %n64, i32 0
store float 1.0, ptr addrspace(1) %p1
@@ -64,7 +64,7 @@ define protected amdgpu_kernel void @and(ptr addrspace(1) %p, ptr addrspace(1) %
; CHECK-NEXT: v_mov_b32_e32 v2, 1.0
; CHECK-NEXT: global_store_dword v[0:1], v2, off
; CHECK-NEXT: s_endpgm
- %n32 = atomicrmw and ptr addrspace(1) %p, i32 1 syncscope("agent") monotonic
+ %n32 = atomicrmw and ptr addrspace(1) %p, i32 1 syncscope("agent") monotonic, !amdgpu.no.fine.grained.memory !0
%n64 = zext i32 %n32 to i64
%p1 = getelementptr inbounds %S, ptr addrspace(1) %q, i64 %n64, i32 0
store float 1.0, ptr addrspace(1) %p1
@@ -86,7 +86,7 @@ define protected amdgpu_kernel void @or(ptr addrspace(1) %p, ptr addrspace(1) %q
; CHECK-NEXT: v_mov_b32_e32 v2, 1.0
; CHECK-NEXT: global_store_dword v[0:1], v2, off
; CHECK-NEXT: s_endpgm
- %n32 = atomicrmw or ptr addrspace(1) %p, i32 1 syncscope("agent") monotonic
+ %n32 = atomicrmw or ptr addrspace(1) %p, i32 1 syncscope("agent") monotonic, !amdgpu.no.fine.grained.memory !0
%n64 = zext i32 %n32 to i64
%p1 = getelementptr inbounds %S, ptr addrspace(1) %q, i64 %n64, i32 0
store float 1.0, ptr addrspace(1) %p1
@@ -108,7 +108,7 @@ define protected amdgpu_kernel void @xor(ptr addrspace(1) %p, ptr addrspace(1) %
; CHECK-NEXT: v_mov_b32_e32 v2, 1.0
; CHECK-NEXT: global_store_dword v[0:1], v2, off
; CHECK-NEXT: s_endpgm
- %n32 = atomicrmw xor ptr addrspace(1) %p, i32 1 syncscope("agent") monotonic
+ %n32 = atomicrmw xor ptr addrspace(1) %p, i32 1 syncscope("agent") monotonic, !amdgpu.no.fine.grained.memory !0
%n64 = zext i32 %n32 to i64
%p1 = getelementptr inbounds %S, ptr addrspace(1) %q, i64 %n64, i32 0
store float 1.0, ptr addrspace(1) %p1
@@ -144,7 +144,7 @@ define protected amdgpu_kernel void @nand(ptr addrspace(1) %p, ptr addrspace(1)
; CHECK-NEXT: v_mov_b32_e32 v2, 1.0
; CHECK-NEXT: global_store_dword v[0:1], v2, off
; CHECK-NEXT: s_endpgm
- %n32 = atomicrmw nand ptr addrspace(1) %p, i32 1 syncscope("agent") monotonic
+ %n32 = atomicrmw nand ptr addrspace(1) %p, i32 1 syncscope("agent") monotonic, !amdgpu.no.fine.grained.memory !0
%n64 = zext i32 %n32 to i64
%p1 = getelementptr inbounds %S, ptr addrspace(1) %q, i64 %n64, i32 0
store float 1.0, ptr addrspace(1) %p1
@@ -166,7 +166,7 @@ define protected amdgpu_kernel void @max_workgroup(ptr addrspace(1) %p, ptr addr
; CHECK-NEXT: v_mov_b32_e32 v2, 1.0
; CHECK-NEXT: global_store_dword v[0:1], v2, off
; CHECK-NEXT: s_endpgm
- %n32 = atomicrmw max ptr addrspace(1) %p, i32 1 syncscope("workgroup") monotonic
+ %n32 = atomicrmw max ptr addrspace(1) %p, i32 1 syncscope("workgroup") monotonic, !amdgpu.no.fine.grained.memory !0
%n64 = zext i32 %n32 to i64
%p1 = getelementptr inbounds %S, ptr addrspace(1) %q, i64 %n64, i32 0
store float 1.0, ptr addrspace(1) %p1
@@ -188,7 +188,7 @@ define protected amdgpu_kernel void @max(ptr addrspace(1) %p, ptr addrspace(1) %
; CHECK-NEXT: v_mov_b32_e32 v2, 1.0
; CHECK-NEXT: global_store_dword v[0:1], v2, off
; CHECK-NEXT: s_endpgm
- %n32 = atomicrmw max ptr addrspace(1) %p, i32 1 syncscope("agent") monotonic
+ %n32 = atomicrmw max ptr addrspace(1) %p, i32 1 syncscope("agent") monotonic, !amdgpu.no.fine.grained.memory !0
%n64 = zext i32 %n32 to i64
%p1 = getelementptr inbounds %S, ptr addrspace(1) %q, i64 %n64, i32 0
store float 1.0, ptr addrspace(1) %p1
@@ -210,7 +210,7 @@ define protected amdgpu_kernel void @min_workgroup(ptr addrspace(1) %p, ptr addr
; CHECK-NEXT: v_mov_b32_e32 v2, 1.0
; CHECK-NEXT: global_store_dword v[0:1], v2, off
; CHECK-NEXT: s_endpgm
- %n32 = atomicrmw min ptr addrspace(1) %p, i32 1 syncscope("workgroup") monotonic
+ %n32 = atomicrmw min ptr addrspace(1) %p, i32 1 syncscope("workgroup") monotonic, !amdgpu.no.fine.grained.memory !0
%n64 = zext i32 %n32 to i64
%p1 = getelementptr inbounds %S, ptr addrspace(1) %q, i64 %n64, i32 0
store float 1.0, ptr addrspace(1) %p1
@@ -232,7 +232,7 @@ define protected amdgpu_kernel void @min(ptr addrspace(1) %p, ptr addrspace(1) %
; CHECK-NEXT: v_mov_b32_e32 v2, 1.0
; CHECK-NEXT: global_store_dword v[0:1], v2, off
; CHECK-NEXT: s_endpgm
- %n32 = atomicrmw min ptr addrspace(1) %p, i32 1 syncscope("agent") monotonic
+ %n32 = atomicrmw min ptr addrspace(1) %p, i32 1 syncscope("agent") monotonic, !amdgpu.no.fine.grained.memory !0
%n64 = zext i32 %n32 to i64
%p1 = getelementptr inbounds %S, ptr addrspace(1) %q, i64 %n64, i32 0
store float 1.0, ptr addrspace(1) %p1
@@ -254,7 +254,7 @@ define protected amdgpu_kernel void @umax_workgroup(ptr addrspace(1) %p, ptr add
; CHECK-NEXT: v_mov_b32_e32 v2, 1.0
; CHECK-NEXT: global_store_dword v[0:1], v2, off
; CHECK-NEXT: s_endpgm
- %n32 = atomicrmw umax ptr addrspace(1) %p, i32 1 syncscope("workgroup") monotonic
+ %n32 = atomicrmw umax ptr addrspace(1) %p, i32 1 syncscope("workgroup") monotonic, !amdgpu.no.fine.grained.memory !0
%n64 = zext i32 %n32 to i64
%p1 = getelementptr inbounds %S, ptr addrspace(1) %q, i64 %n64, i32 0
store float 1.0, ptr addrspace(1) %p1
@@ -276,7 +276,7 @@ define protected amdgpu_kernel void @umax(ptr addrspace(1) %p, ptr addrspace(1)
; CHECK-NEXT: v_mov_b32_e32 v2, 1.0
; CHECK-NEXT: global_store_dword v[0:1], v2, off
; CHECK-NEXT: s_endpgm
- %n32 = atomicrmw umax ptr addrspace(1) %p, i32 1 syncscope("agent") monotonic
+ %n32 = atomicrmw umax ptr addrspace(1) %p, i32 1 syncscope("agent") monotonic, !amdgpu.no.fine.grained.memory !0
%n64 = zext i32 %n32 to i64
%p1 = getelementptr inbounds %S, ptr addrspace(1) %q, i64 %n64, i32 0
store float 1.0, ptr addrspace(1) %p1
@@ -298,7 +298,7 @@ define protected amdgpu_kernel void @umin_workgroup(ptr addrspace(1) %p, ptr add
; CHECK-NEXT: v_mov_b32_e32 v2, 1.0
; CHECK-NEXT: global_store_dword v[0:1], v2, off
; CHECK-NEXT: s_endpgm
- %n32 = atomicrmw umin ptr addrspace(1) %p, i32 1 syncscope("workgroup") monotonic
+ %n32 = atomicrmw umin ptr addrspace(1) %p, i32 1 syncscope("workgroup") monotonic, !amdgpu.no.fine.grained.memory !0
%n64 = zext i32 %n32 to i64
%p1 = getelementptr inbounds %S, ptr addrspace(1) %q, i64 %n64, i32 0
store float 1.0, ptr addrspace(1) %p1
@@ -320,7 +320,7 @@ define protected amdgpu_kernel void @umin(ptr addrspace(1) %p, ptr addrspace(1)
; CHECK-NEXT: v_mov_b32_e32 v2, 1.0
; CHECK-NEXT: global_store_dword v[0:1], v2, off
; CHECK-NEXT: s_endpgm
- %n32 = atomicrmw umin ptr addrspace(1) %p, i32 1 syncscope("agent") monotonic
+ %n32 = atomicrmw umin ptr addrspace(1) %p, i32 1 syncscope("agent") monotonic, !amdgpu.no.fine.grained.memory !0
%n64 = zext i32 %n32 to i64
%p1 = getelementptr inbounds %S, ptr addrspace(1) %q, i64 %n64, i32 0
store float 1.0, ptr addrspace(1) %p1
@@ -388,7 +388,7 @@ define protected amdgpu_kernel void @inc(ptr addrspace(1) %p, ptr addrspace(1) %
; CHECK-NEXT: v_mov_b32_e32 v2, 1.0
; CHECK-NEXT: global_store_dword v[0:1], v2, off
; CHECK-NEXT: s_endpgm
- %n32 = atomicrmw uinc_wrap ptr addrspace(1) %p, i32 1 syncscope("agent") monotonic
+ %n32 = atomicrmw uinc_wrap ptr addrspace(1) %p, i32 1 syncscope("agent") monotonic, !amdgpu.no.fine.grained.memory !0
%n64 = zext i32 %n32 to i64
%p1 = getelementptr inbounds %S, ptr addrspace(1) %q, i64 %n64, i32 0
store float 1.0, ptr addrspace(1) %p1
@@ -410,7 +410,7 @@ define protected amdgpu_kernel void @dec(ptr addrspace(1) %p, ptr addrspace(1) %
; CHECK-NEXT: v_mov_b32_e32 v2, 1.0
; CHECK-NEXT: global_store_dword v[0:1], v2, off
; CHECK-NEXT: s_endpgm
- %n32 = atomicrmw udec_wrap ptr addrspace(1) %p, i32 1 syncscope("agent") monotonic
+ %n32 = atomicrmw udec_wrap ptr addrspace(1) %p, i32 1 syncscope("agent") monotonic, !amdgpu.no.fine.grained.memory !0
%n64 = zext i32 %n32 to i64
%p1 = getelementptr inbounds %S, ptr addrspace(1) %q, i64 %n64, i32 0
store float 1.0, ptr addrspace(1) %p1
@@ -446,7 +446,7 @@ define protected amdgpu_kernel void @fadd(ptr addrspace(1) %p, ptr addrspace(1)
; CHECK-NEXT: v_mov_b32_e32 v2, 1.0
; CHECK-NEXT: global_store_dword v[0:1], v2, off
; CHECK-NEXT: s_endpgm
- %f32 = atomicrmw fadd ptr addrspace(1) %p, float 1.0 syncscope("agent") monotonic
+ %f32 = atomicrmw fadd ptr addrspace(1) %p, float 1.0 syncscope("agent") monotonic, !amdgpu.no.fine.grained.memory !0
%n32 = fptoui float %f32 to i32
%n64 = zext i32 %n32 to i64
%p1 = getelementptr inbounds %S, ptr addrspace(1) %q, i64 %n64, i32 0
@@ -483,7 +483,7 @@ define protected amdgpu_kernel void @fsub(ptr addrspace(1) %p, ptr addrspace(1)
; CHECK-NEXT: v_mov_b32_e32 v2, 1.0
; CHECK-NEXT: global_store_dword v[0:1], v2, off
; CHECK-NEXT: s_endpgm
- %f32 = atomicrmw fsub ptr addrspace(1) %p, float 1.0 syncscope("agent") monotonic
+ %f32 = atomicrmw fsub ptr addrspace(1) %p, float 1.0 syncscope("agent") monotonic, !amdgpu.no.fine.grained.memory !0
%n32 = fptoui float %f32 to i32
%n64 = zext i32 %n32 to i64
%p1 = getelementptr inbounds %S, ptr addrspace(1) %q, i64 %n64, i32 0
diff --git a/llvm/test/CodeGen/AMDGPU/flat_atomics.ll b/llvm/test/CodeGen/AMDGPU/flat_atomics.ll
index e674b57aae3ef..e6fcf49a27749 100644
--- a/llvm/test/CodeGen/AMDGPU/flat_atomics.ll
+++ b/llvm/test/CodeGen/AMDGPU/flat_atomics.ll
@@ -612,7 +612,7 @@ define amdgpu_kernel void @atomic_and_i32_offset(ptr %out, i32 %in) {
; GCN3-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr %out, i32 4
- %val = atomicrmw volatile and ptr %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile and ptr %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -670,7 +670,7 @@ define amdgpu_kernel void @atomic_and_i32_ret_offset(ptr %out, ptr %out2, i32 %i
; GCN3-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr %out, i32 4
- %val = atomicrmw volatile and ptr %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile and ptr %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
store i32 %val, ptr %out2
ret void
}
@@ -733,7 +733,7 @@ define amdgpu_kernel void @atomic_and_i32_addr64_offset(ptr %out, i32 %in, i64 %
entry:
%ptr = getelementptr i32, ptr %out, i64 %index
%gep = getelementptr i32, ptr %ptr, i32 4
- %val = atomicrmw volatile and ptr %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile and ptr %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -804,7 +804,7 @@ define amdgpu_kernel void @atomic_and_i32_ret_addr64_offset(ptr %out, ptr %out2,
entry:
%ptr = getelementptr i32, ptr %out, i64 %index
%gep = getelementptr i32, ptr %ptr, i32 4
- %val = atomicrmw volatile and ptr %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile and ptr %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
store i32 %val, ptr %out2
ret void
}
@@ -849,7 +849,7 @@ define amdgpu_kernel void @atomic_and_i32(ptr %out, i32 %in) {
; GCN3-NEXT: buffer_wbinvl1_vol
; GCN3-NEXT: s_endpgm
entry:
- %val = atomicrmw volatile and ptr %out, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile and ptr %out, i32 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -902,7 +902,7 @@ define amdgpu_kernel void @atomic_and_i32_ret(ptr %out, ptr %out2, i32 %in) {
; GCN3-NEXT: flat_store_dword v[0:1], v2
; GCN3-NEXT: s_endpgm
entry:
- %val = atomicrmw volatile and ptr %out, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile and ptr %out, i32 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
store i32 %val, ptr %out2
ret void
}
@@ -960,7 +960,7 @@ define amdgpu_kernel void @atomic_and_i32_addr64(ptr %out, i32 %in, i64 %index)
; GCN3-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr %out, i64 %index
- %val = atomicrmw volatile and ptr %ptr, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile and ptr %ptr, i32 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -1026,7 +1026,7 @@ define amdgpu_kernel void @atomic_and_i32_ret_addr64(ptr %out, ptr %out2, i32 %i
; GCN3-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr %out, i64 %index
- %val = atomicrmw volatile and ptr %ptr, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile and ptr %ptr, i32 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
store i32 %val, ptr %out2
ret void
}
@@ -1076,7 +1076,7 @@ define amdgpu_kernel void @atomic_sub_i32_offset(ptr %out, i32 %in) {
; GCN3-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr %out, i32 4
- %val = atomicrmw volatile sub ptr %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile sub ptr %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -1134,7 +1134,7 @@ define amdgpu_kernel void @atomic_sub_i32_ret_offset(ptr %out, ptr %out2, i32 %i
; GCN3-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr %out, i32 4
- %val = atomicrmw volatile sub ptr %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile sub ptr %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
store i32 %val, ptr %out2
ret void
}
@@ -1197,7 +1197,7 @@ define amdgpu_kernel void @atomic_sub_i32_addr64_offset(ptr %out, i32 %in, i64 %
entry:
%ptr = getelementptr i32, ptr %out, i64 %index
%gep = getelementptr i32, ptr %ptr, i32 4
- %val = atomicrmw volatile sub ptr %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile sub ptr %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -1268,7 +1268,7 @@ define amdgpu_kernel void @atomic_sub_i32_ret_addr64_offset(ptr %out, ptr %out2,
entry:
%ptr = getelementptr i32, ptr %out, i64 %index
%gep = getelementptr i32, ptr %ptr, i32 4
- %val = atomicrmw volatile sub ptr %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile sub ptr %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
store i32 %val, ptr %out2
ret void
}
@@ -1313,7 +1313,7 @@ define amdgpu_kernel void @atomic_sub_i32(ptr %out, i32 %in) {
; GCN3-NEXT: buffer_wbinvl1_vol
; GCN3-NEXT: s_endpgm
entry:
- %val = atomicrmw volatile sub ptr %out, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile sub ptr %out, i32 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -1366,7 +1366,7 @@ define amdgpu_kernel void @atomic_sub_i32_ret(ptr %out, ptr %out2, i32 %in) {
; GCN3-NEXT: flat_store_dword v[0:1], v2
; GCN3-NEXT: s_endpgm
entry:
- %val = atomicrmw volatile sub ptr %out, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile sub ptr %out, i32 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
store i32 %val, ptr %out2
ret void
}
@@ -1424,7 +1424,7 @@ define amdgpu_kernel void @atomic_sub_i32_addr64(ptr %out, i32 %in, i64 %index)
; GCN3-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr %out, i64 %index
- %val = atomicrmw volatile sub ptr %ptr, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile sub ptr %ptr, i32 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -1490,7 +1490,7 @@ define amdgpu_kernel void @atomic_sub_i32_ret_addr64(ptr %out, ptr %out2, i32 %i
; GCN3-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr %out, i64 %index
- %val = atomicrmw volatile sub ptr %ptr, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile sub ptr %ptr, i32 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
store i32 %val, ptr %out2
ret void
}
@@ -1537,7 +1537,7 @@ define amdgpu_kernel void @atomic_max_i32_offset(ptr %out, i32 %in) {
; GCN3-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr %out, i32 4
- %val = atomicrmw volatile max ptr %gep, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile max ptr %gep, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -1595,7 +1595,7 @@ define amdgpu_kernel void @atomic_max_i32_ret_offset(ptr %out, ptr %out2, i32 %i
; GCN3-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr %out, i32 4
- %val = atomicrmw volatile max ptr %gep, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile max ptr %gep, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
store i32 %val, ptr %out2
ret void
}
@@ -1655,7 +1655,7 @@ define amdgpu_kernel void @atomic_max_i32_addr64_offset(ptr %out, i32 %in, i64 %
entry:
%ptr = getelementptr i32, ptr %out, i64 %index
%gep = getelementptr i32, ptr %ptr, i32 4
- %val = atomicrmw volatile max ptr %gep, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile max ptr %gep, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -1726,7 +1726,7 @@ define amdgpu_kernel void @atomic_max_i32_ret_addr64_offset(ptr %out, ptr %out2,
entry:
%ptr = getelementptr i32, ptr %out, i64 %index
%gep = getelementptr i32, ptr %ptr, i32 4
- %val = atomicrmw volatile max ptr %gep, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile max ptr %gep, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
store i32 %val, ptr %out2
ret void
}
@@ -1768,7 +1768,7 @@ define amdgpu_kernel void @atomic_max_i32(ptr %out, i32 %in) {
; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: s_endpgm
entry:
- %val = atomicrmw volatile max ptr %out, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile max ptr %out, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -1821,7 +1821,7 @@ define amdgpu_kernel void @atomic_max_i32_ret(ptr %out, ptr %out2, i32 %in) {
; GCN3-NEXT: flat_store_dword v[0:1], v2
; GCN3-NEXT: s_endpgm
entry:
- %val = atomicrmw volatile max ptr %out, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile max ptr %out, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
store i32 %val, ptr %out2
ret void
}
@@ -1876,7 +1876,7 @@ define amdgpu_kernel void @atomic_max_i32_addr64(ptr %out, i32 %in, i64 %index)
; GCN3-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr %out, i64 %index
- %val = atomicrmw volatile max ptr %ptr, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile max ptr %ptr, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -1942,7 +1942,7 @@ define amdgpu_kernel void @atomic_max_i32_ret_addr64(ptr %out, ptr %out2, i32 %i
; GCN3-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr %out, i64 %index
- %val = atomicrmw volatile max ptr %ptr, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile max ptr %ptr, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
store i32 %val, ptr %out2
ret void
}
@@ -1989,7 +1989,7 @@ define amdgpu_kernel void @atomic_umax_i32_offset(ptr %out, i32 %in) {
; GCN3-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr %out, i32 4
- %val = atomicrmw volatile umax ptr %gep, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile umax ptr %gep, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -2047,7 +2047,7 @@ define amdgpu_kernel void @atomic_umax_i32_ret_offset(ptr %out, ptr %out2, i32 %
; GCN3-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr %out, i32 4
- %val = atomicrmw volatile umax ptr %gep, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile umax ptr %gep, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
store i32 %val, ptr %out2
ret void
}
@@ -2107,7 +2107,7 @@ define amdgpu_kernel void @atomic_umax_i32_addr64_offset(ptr %out, i32 %in, i64
entry:
%ptr = getelementptr i32, ptr %out, i64 %index
%gep = getelementptr i32, ptr %ptr, i32 4
- %val = atomicrmw volatile umax ptr %gep, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile umax ptr %gep, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -2178,7 +2178,7 @@ define amdgpu_kernel void @atomic_umax_i32_ret_addr64_offset(ptr %out, ptr %out2
entry:
%ptr = getelementptr i32, ptr %out, i64 %index
%gep = getelementptr i32, ptr %ptr, i32 4
- %val = atomicrmw volatile umax ptr %gep, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile umax ptr %gep, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
store i32 %val, ptr %out2
ret void
}
@@ -2220,7 +2220,7 @@ define amdgpu_kernel void @atomic_umax_i32(ptr %out, i32 %in) {
; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: s_endpgm
entry:
- %val = atomicrmw volatile umax ptr %out, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile umax ptr %out, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -2273,7 +2273,7 @@ define amdgpu_kernel void @atomic_umax_i32_ret(ptr %out, ptr %out2, i32 %in) {
; GCN3-NEXT: flat_store_dword v[0:1], v2
; GCN3-NEXT: s_endpgm
entry:
- %val = atomicrmw volatile umax ptr %out, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile umax ptr %out, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
store i32 %val, ptr %out2
ret void
}
@@ -2328,7 +2328,7 @@ define amdgpu_kernel void @atomic_umax_i32_addr64(ptr %out, i32 %in, i64 %index)
; GCN3-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr %out, i64 %index
- %val = atomicrmw volatile umax ptr %ptr, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile umax ptr %ptr, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -2394,7 +2394,7 @@ define amdgpu_kernel void @atomic_umax_i32_ret_addr64(ptr %out, ptr %out2, i32 %
; GCN3-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr %out, i64 %index
- %val = atomicrmw volatile umax ptr %ptr, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile umax ptr %ptr, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
store i32 %val, ptr %out2
ret void
}
@@ -2441,7 +2441,7 @@ define amdgpu_kernel void @atomic_min_i32_offset(ptr %out, i32 %in) {
; GCN3-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr %out, i32 4
- %val = atomicrmw volatile min ptr %gep, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile min ptr %gep, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -2499,7 +2499,7 @@ define amdgpu_kernel void @atomic_min_i32_ret_offset(ptr %out, ptr %out2, i32 %i
; GCN3-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr %out, i32 4
- %val = atomicrmw volatile min ptr %gep, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile min ptr %gep, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
store i32 %val, ptr %out2
ret void
}
@@ -2559,7 +2559,7 @@ define amdgpu_kernel void @atomic_min_i32_addr64_offset(ptr %out, i32 %in, i64 %
entry:
%ptr = getelementptr i32, ptr %out, i64 %index
%gep = getelementptr i32, ptr %ptr, i32 4
- %val = atomicrmw volatile min ptr %gep, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile min ptr %gep, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -2630,7 +2630,7 @@ define amdgpu_kernel void @atomic_min_i32_ret_addr64_offset(ptr %out, ptr %out2,
entry:
%ptr = getelementptr i32, ptr %out, i64 %index
%gep = getelementptr i32, ptr %ptr, i32 4
- %val = atomicrmw volatile min ptr %gep, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile min ptr %gep, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
store i32 %val, ptr %out2
ret void
}
@@ -2672,7 +2672,7 @@ define amdgpu_kernel void @atomic_min_i32(ptr %out, i32 %in) {
; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: s_endpgm
entry:
- %val = atomicrmw volatile min ptr %out, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile min ptr %out, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -2725,7 +2725,7 @@ define amdgpu_kernel void @atomic_min_i32_ret(ptr %out, ptr %out2, i32 %in) {
; GCN3-NEXT: flat_store_dword v[0:1], v2
; GCN3-NEXT: s_endpgm
entry:
- %val = atomicrmw volatile min ptr %out, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile min ptr %out, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
store i32 %val, ptr %out2
ret void
}
@@ -2780,7 +2780,7 @@ define amdgpu_kernel void @atomic_min_i32_addr64(ptr %out, i32 %in, i64 %index)
; GCN3-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr %out, i64 %index
- %val = atomicrmw volatile min ptr %ptr, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile min ptr %ptr, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -2846,7 +2846,7 @@ define amdgpu_kernel void @atomic_min_i32_ret_addr64(ptr %out, ptr %out2, i32 %i
; GCN3-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr %out, i64 %index
- %val = atomicrmw volatile min ptr %ptr, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile min ptr %ptr, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
store i32 %val, ptr %out2
ret void
}
@@ -2893,7 +2893,7 @@ define amdgpu_kernel void @atomic_umin_i32_offset(ptr %out, i32 %in) {
; GCN3-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr %out, i32 4
- %val = atomicrmw volatile umin ptr %gep, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile umin ptr %gep, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -2951,7 +2951,7 @@ define amdgpu_kernel void @atomic_umin_i32_ret_offset(ptr %out, ptr %out2, i32 %
; GCN3-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr %out, i32 4
- %val = atomicrmw volatile umin ptr %gep, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile umin ptr %gep, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
store i32 %val, ptr %out2
ret void
}
@@ -3011,7 +3011,7 @@ define amdgpu_kernel void @atomic_umin_i32_addr64_offset(ptr %out, i32 %in, i64
entry:
%ptr = getelementptr i32, ptr %out, i64 %index
%gep = getelementptr i32, ptr %ptr, i32 4
- %val = atomicrmw volatile umin ptr %gep, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile umin ptr %gep, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -3082,7 +3082,7 @@ define amdgpu_kernel void @atomic_umin_i32_ret_addr64_offset(ptr %out, ptr %out2
entry:
%ptr = getelementptr i32, ptr %out, i64 %index
%gep = getelementptr i32, ptr %ptr, i32 4
- %val = atomicrmw volatile umin ptr %gep, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile umin ptr %gep, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
store i32 %val, ptr %out2
ret void
}
@@ -3124,7 +3124,7 @@ define amdgpu_kernel void @atomic_umin_i32(ptr %out, i32 %in) {
; GCN3-NEXT: s_waitcnt lgkmcnt(0)
; GCN3-NEXT: s_endpgm
entry:
- %val = atomicrmw volatile umin ptr %out, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile umin ptr %out, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -3177,7 +3177,7 @@ define amdgpu_kernel void @atomic_umin_i32_ret(ptr %out, ptr %out2, i32 %in) {
; GCN3-NEXT: flat_store_dword v[0:1], v2
; GCN3-NEXT: s_endpgm
entry:
- %val = atomicrmw volatile umin ptr %out, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile umin ptr %out, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
store i32 %val, ptr %out2
ret void
}
@@ -3232,7 +3232,7 @@ define amdgpu_kernel void @atomic_umin_i32_addr64(ptr %out, i32 %in, i64 %index)
; GCN3-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr %out, i64 %index
- %val = atomicrmw volatile umin ptr %ptr, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile umin ptr %ptr, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -3298,7 +3298,7 @@ define amdgpu_kernel void @atomic_umin_i32_ret_addr64(ptr %out, ptr %out2, i32 %
; GCN3-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr %out, i64 %index
- %val = atomicrmw volatile umin ptr %ptr, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile umin ptr %ptr, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
store i32 %val, ptr %out2
ret void
}
@@ -3348,7 +3348,7 @@ define amdgpu_kernel void @atomic_or_i32_offset(ptr %out, i32 %in) {
; GCN3-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr %out, i32 4
- %val = atomicrmw volatile or ptr %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile or ptr %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -3406,7 +3406,7 @@ define amdgpu_kernel void @atomic_or_i32_ret_offset(ptr %out, ptr %out2, i32 %in
; GCN3-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr %out, i32 4
- %val = atomicrmw volatile or ptr %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile or ptr %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
store i32 %val, ptr %out2
ret void
}
@@ -3469,7 +3469,7 @@ define amdgpu_kernel void @atomic_or_i32_addr64_offset(ptr %out, i32 %in, i64 %i
entry:
%ptr = getelementptr i32, ptr %out, i64 %index
%gep = getelementptr i32, ptr %ptr, i32 4
- %val = atomicrmw volatile or ptr %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile or ptr %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -3540,7 +3540,7 @@ define amdgpu_kernel void @atomic_or_i32_ret_addr64_offset(ptr %out, ptr %out2,
entry:
%ptr = getelementptr i32, ptr %out, i64 %index
%gep = getelementptr i32, ptr %ptr, i32 4
- %val = atomicrmw volatile or ptr %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile or ptr %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
store i32 %val, ptr %out2
ret void
}
@@ -3585,7 +3585,7 @@ define amdgpu_kernel void @atomic_or_i32(ptr %out, i32 %in) {
; GCN3-NEXT: buffer_wbinvl1_vol
; GCN3-NEXT: s_endpgm
entry:
- %val = atomicrmw volatile or ptr %out, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile or ptr %out, i32 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -3638,7 +3638,7 @@ define amdgpu_kernel void @atomic_or_i32_ret(ptr %out, ptr %out2, i32 %in) {
; GCN3-NEXT: flat_store_dword v[0:1], v2
; GCN3-NEXT: s_endpgm
entry:
- %val = atomicrmw volatile or ptr %out, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile or ptr %out, i32 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
store i32 %val, ptr %out2
ret void
}
@@ -3696,7 +3696,7 @@ define amdgpu_kernel void @atomic_or_i32_addr64(ptr %out, i32 %in, i64 %index) {
; GCN3-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr %out, i64 %index
- %val = atomicrmw volatile or ptr %ptr, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile or ptr %ptr, i32 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -3762,7 +3762,7 @@ define amdgpu_kernel void @atomic_or_i32_ret_addr64(ptr %out, ptr %out2, i32 %in
; GCN3-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr %out, i64 %index
- %val = atomicrmw volatile or ptr %ptr, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile or ptr %ptr, i32 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
store i32 %val, ptr %out2
ret void
}
@@ -4327,7 +4327,7 @@ define amdgpu_kernel void @atomic_cmpxchg_i32_offset(ptr %out, i32 %in, i32 %old
; GCN3-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr %out, i32 4
- %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent") seq_cst seq_cst
+ %val = cmpxchg volatile ptr %gep, i32 %old, i32 %in syncscope("agent") seq_cst seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -4825,7 +4825,7 @@ define amdgpu_kernel void @atomic_xor_i32_offset(ptr %out, i32 %in) {
; GCN3-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr %out, i32 4
- %val = atomicrmw volatile xor ptr %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile xor ptr %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -4883,7 +4883,7 @@ define amdgpu_kernel void @atomic_xor_i32_ret_offset(ptr %out, ptr %out2, i32 %i
; GCN3-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr %out, i32 4
- %val = atomicrmw volatile xor ptr %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile xor ptr %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
store i32 %val, ptr %out2
ret void
}
@@ -4946,7 +4946,7 @@ define amdgpu_kernel void @atomic_xor_i32_addr64_offset(ptr %out, i32 %in, i64 %
entry:
%ptr = getelementptr i32, ptr %out, i64 %index
%gep = getelementptr i32, ptr %ptr, i32 4
- %val = atomicrmw volatile xor ptr %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile xor ptr %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -5017,7 +5017,7 @@ define amdgpu_kernel void @atomic_xor_i32_ret_addr64_offset(ptr %out, ptr %out2,
entry:
%ptr = getelementptr i32, ptr %out, i64 %index
%gep = getelementptr i32, ptr %ptr, i32 4
- %val = atomicrmw volatile xor ptr %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile xor ptr %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
store i32 %val, ptr %out2
ret void
}
@@ -5062,7 +5062,7 @@ define amdgpu_kernel void @atomic_xor_i32(ptr %out, i32 %in) {
; GCN3-NEXT: buffer_wbinvl1_vol
; GCN3-NEXT: s_endpgm
entry:
- %val = atomicrmw volatile xor ptr %out, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile xor ptr %out, i32 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -5115,7 +5115,7 @@ define amdgpu_kernel void @atomic_xor_i32_ret(ptr %out, ptr %out2, i32 %in) {
; GCN3-NEXT: flat_store_dword v[0:1], v2
; GCN3-NEXT: s_endpgm
entry:
- %val = atomicrmw volatile xor ptr %out, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile xor ptr %out, i32 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
store i32 %val, ptr %out2
ret void
}
@@ -5173,7 +5173,7 @@ define amdgpu_kernel void @atomic_xor_i32_addr64(ptr %out, i32 %in, i64 %index)
; GCN3-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr %out, i64 %index
- %val = atomicrmw volatile xor ptr %ptr, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile xor ptr %ptr, i32 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -5239,7 +5239,7 @@ define amdgpu_kernel void @atomic_xor_i32_ret_addr64(ptr %out, ptr %out2, i32 %i
; GCN3-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr %out, i64 %index
- %val = atomicrmw volatile xor ptr %ptr, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile xor ptr %ptr, i32 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
store i32 %val, ptr %out2
ret void
}
@@ -6861,7 +6861,7 @@ define amdgpu_kernel void @atomic_inc_i32_offset(ptr %out, i32 %in) {
; GCN3-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr %out, i32 4
- %val = atomicrmw volatile uinc_wrap ptr %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile uinc_wrap ptr %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -6910,7 +6910,7 @@ define amdgpu_kernel void @atomic_inc_i32_max_offset(ptr %out, i32 %in) {
; GCN3-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr %out, i32 1023
- %val = atomicrmw volatile uinc_wrap ptr %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile uinc_wrap ptr %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -6961,7 +6961,7 @@ define amdgpu_kernel void @atomic_inc_i32_max_offset_p1(ptr %out, i32 %in) {
; GCN3-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr %out, i32 1024
- %val = atomicrmw volatile uinc_wrap ptr %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile uinc_wrap ptr %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -7019,7 +7019,7 @@ define amdgpu_kernel void @atomic_inc_i32_ret_offset(ptr %out, ptr %out2, i32 %i
; GCN3-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr %out, i32 4
- %val = atomicrmw volatile uinc_wrap ptr %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile uinc_wrap ptr %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
store i32 %val, ptr %out2
ret void
}
@@ -7082,7 +7082,7 @@ define amdgpu_kernel void @atomic_inc_i32_incr64_offset(ptr %out, i32 %in, i64 %
entry:
%ptr = getelementptr i32, ptr %out, i64 %index
%gep = getelementptr i32, ptr %ptr, i32 4
- %val = atomicrmw volatile uinc_wrap ptr %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile uinc_wrap ptr %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -7153,7 +7153,7 @@ define amdgpu_kernel void @atomic_inc_i32_ret_incr64_offset(ptr %out, ptr %out2,
entry:
%ptr = getelementptr i32, ptr %out, i64 %index
%gep = getelementptr i32, ptr %ptr, i32 4
- %val = atomicrmw volatile uinc_wrap ptr %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile uinc_wrap ptr %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
store i32 %val, ptr %out2
ret void
}
@@ -7198,7 +7198,7 @@ define amdgpu_kernel void @atomic_inc_i32(ptr %out, i32 %in) {
; GCN3-NEXT: buffer_wbinvl1_vol
; GCN3-NEXT: s_endpgm
entry:
- %val = atomicrmw volatile uinc_wrap ptr %out, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile uinc_wrap ptr %out, i32 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -7251,7 +7251,7 @@ define amdgpu_kernel void @atomic_inc_i32_ret(ptr %out, ptr %out2, i32 %in) {
; GCN3-NEXT: flat_store_dword v[0:1], v2
; GCN3-NEXT: s_endpgm
entry:
- %val = atomicrmw volatile uinc_wrap ptr %out, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile uinc_wrap ptr %out, i32 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
store i32 %val, ptr %out2
ret void
}
@@ -7309,7 +7309,7 @@ define amdgpu_kernel void @atomic_inc_i32_incr64(ptr %out, i32 %in, i64 %index)
; GCN3-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr %out, i64 %index
- %val = atomicrmw volatile uinc_wrap ptr %ptr, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile uinc_wrap ptr %ptr, i32 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -7375,7 +7375,7 @@ define amdgpu_kernel void @atomic_inc_i32_ret_incr64(ptr %out, ptr %out2, i32 %i
; GCN3-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr %out, i64 %index
- %val = atomicrmw volatile uinc_wrap ptr %ptr, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile uinc_wrap ptr %ptr, i32 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
store i32 %val, ptr %out2
ret void
}
@@ -7425,7 +7425,7 @@ define amdgpu_kernel void @atomic_dec_i32_offset(ptr %out, i32 %in) {
; GCN3-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr %out, i32 4
- %val = atomicrmw volatile udec_wrap ptr %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile udec_wrap ptr %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -7474,7 +7474,7 @@ define amdgpu_kernel void @atomic_dec_i32_max_offset(ptr %out, i32 %in) {
; GCN3-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr %out, i32 1023
- %val = atomicrmw volatile udec_wrap ptr %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile udec_wrap ptr %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -7525,7 +7525,7 @@ define amdgpu_kernel void @atomic_dec_i32_max_offset_p1(ptr %out, i32 %in) {
; GCN3-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr %out, i32 1024
- %val = atomicrmw volatile udec_wrap ptr %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile udec_wrap ptr %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -7583,7 +7583,7 @@ define amdgpu_kernel void @atomic_dec_i32_ret_offset(ptr %out, ptr %out2, i32 %i
; GCN3-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr %out, i32 4
- %val = atomicrmw volatile udec_wrap ptr %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile udec_wrap ptr %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
store i32 %val, ptr %out2
ret void
}
@@ -7646,7 +7646,7 @@ define amdgpu_kernel void @atomic_dec_i32_decr64_offset(ptr %out, i32 %in, i64 %
entry:
%ptr = getelementptr i32, ptr %out, i64 %index
%gep = getelementptr i32, ptr %ptr, i32 4
- %val = atomicrmw volatile udec_wrap ptr %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile udec_wrap ptr %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -7717,7 +7717,7 @@ define amdgpu_kernel void @atomic_dec_i32_ret_decr64_offset(ptr %out, ptr %out2,
entry:
%ptr = getelementptr i32, ptr %out, i64 %index
%gep = getelementptr i32, ptr %ptr, i32 4
- %val = atomicrmw volatile udec_wrap ptr %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile udec_wrap ptr %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
store i32 %val, ptr %out2
ret void
}
@@ -7762,7 +7762,7 @@ define amdgpu_kernel void @atomic_dec_i32(ptr %out, i32 %in) {
; GCN3-NEXT: buffer_wbinvl1_vol
; GCN3-NEXT: s_endpgm
entry:
- %val = atomicrmw volatile udec_wrap ptr %out, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile udec_wrap ptr %out, i32 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -7815,7 +7815,7 @@ define amdgpu_kernel void @atomic_dec_i32_ret(ptr %out, ptr %out2, i32 %in) {
; GCN3-NEXT: flat_store_dword v[0:1], v2
; GCN3-NEXT: s_endpgm
entry:
- %val = atomicrmw volatile udec_wrap ptr %out, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile udec_wrap ptr %out, i32 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
store i32 %val, ptr %out2
ret void
}
@@ -7873,7 +7873,7 @@ define amdgpu_kernel void @atomic_dec_i32_decr64(ptr %out, i32 %in, i64 %index)
; GCN3-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr %out, i64 %index
- %val = atomicrmw volatile udec_wrap ptr %ptr, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile udec_wrap ptr %ptr, i32 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -7939,7 +7939,7 @@ define amdgpu_kernel void @atomic_dec_i32_ret_decr64(ptr %out, ptr %out2, i32 %i
; GCN3-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr %out, i64 %index
- %val = atomicrmw volatile udec_wrap ptr %ptr, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile udec_wrap ptr %ptr, i32 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
store i32 %val, ptr %out2
ret void
}
@@ -8141,3 +8141,5 @@ define amdgpu_kernel void @atomic_load_bf16(ptr %in, ptr %out) {
store bfloat %val, ptr %out
ret void
}
+
+!0 = !{}
diff --git a/llvm/test/CodeGen/AMDGPU/flat_atomics_i32_system.ll b/llvm/test/CodeGen/AMDGPU/flat_atomics_i32_system.ll
index d587c97f4ed7a..e74ad3d62bea4 100644
--- a/llvm/test/CodeGen/AMDGPU/flat_atomics_i32_system.ll
+++ b/llvm/test/CodeGen/AMDGPU/flat_atomics_i32_system.ll
@@ -1655,22 +1655,9 @@ define void @flat_atomic_sub_i32_noret_offset__amdgpu_no_remote_memory(ptr %out,
; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN1-NEXT: v_add_i32_e32 v0, vcc, 16, v0
; GCN1-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GCN1-NEXT: flat_load_dword v4, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[4:5], 0
-; GCN1-NEXT: .LBB38_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_sub_i32_e32 v3, vcc, v4, v2
-; GCN1-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN1-NEXT: flat_atomic_sub v[0:1], v2
; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT: v_mov_b32_e32 v4, v3
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB38_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN1-NEXT: s_setpc_b64 s[30:31]
;
; GCN2-LABEL: flat_atomic_sub_i32_noret_offset__amdgpu_no_remote_memory:
@@ -1678,43 +1665,17 @@ define void @flat_atomic_sub_i32_noret_offset__amdgpu_no_remote_memory(ptr %out,
; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN2-NEXT: v_add_u32_e32 v0, vcc, 16, v0
; GCN2-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GCN2-NEXT: flat_load_dword v4, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[4:5], 0
-; GCN2-NEXT: .LBB38_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_sub_u32_e32 v3, vcc, v4, v2
-; GCN2-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN2-NEXT: flat_atomic_sub v[0:1], v2
; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT: v_mov_b32_e32 v4, v3
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB38_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN2-NEXT: s_setpc_b64 s[30:31]
;
; GCN3-LABEL: flat_atomic_sub_i32_noret_offset__amdgpu_no_remote_memory:
; GCN3: ; %bb.0:
; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: flat_load_dword v4, v[0:1] offset:16
-; GCN3-NEXT: s_mov_b64 s[4:5], 0
-; GCN3-NEXT: .LBB38_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_sub_u32_e32 v3, v4, v2
-; GCN3-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] offset:16 glc
+; GCN3-NEXT: flat_atomic_sub v[0:1], v2 offset:16
; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GCN3-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN3-NEXT: v_mov_b32_e32 v4, v3
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB38_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN3-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i32, ptr %out, i64 4
%tmp0 = atomicrmw sub ptr %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
@@ -1725,69 +1686,29 @@ define i32 @flat_atomic_sub_i32_ret_offset__amdgpu_no_remote_memory(ptr %out, i3
; GCN1-LABEL: flat_atomic_sub_i32_ret_offset__amdgpu_no_remote_memory:
; GCN1: ; %bb.0:
; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_add_i32_e32 v3, vcc, 16, v0
-; GCN1-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc
-; GCN1-NEXT: flat_load_dword v0, v[3:4]
-; GCN1-NEXT: s_mov_b64 s[4:5], 0
-; GCN1-NEXT: .LBB39_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_mov_b32_e32 v1, v0
-; GCN1-NEXT: v_sub_i32_e32 v0, vcc, v1, v2
-; GCN1-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
+; GCN1-NEXT: v_add_i32_e32 v0, vcc, 16, v0
+; GCN1-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN1-NEXT: flat_atomic_sub v0, v[0:1], v2 glc
; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB39_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN1-NEXT: s_setpc_b64 s[30:31]
;
; GCN2-LABEL: flat_atomic_sub_i32_ret_offset__amdgpu_no_remote_memory:
; GCN2: ; %bb.0:
; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_add_u32_e32 v3, vcc, 16, v0
-; GCN2-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc
-; GCN2-NEXT: flat_load_dword v0, v[3:4]
-; GCN2-NEXT: s_mov_b64 s[4:5], 0
-; GCN2-NEXT: .LBB39_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_mov_b32_e32 v1, v0
-; GCN2-NEXT: v_sub_u32_e32 v0, vcc, v1, v2
-; GCN2-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
+; GCN2-NEXT: v_add_u32_e32 v0, vcc, 16, v0
+; GCN2-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN2-NEXT: flat_atomic_sub v0, v[0:1], v2 glc
; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB39_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN2-NEXT: s_setpc_b64 s[30:31]
;
; GCN3-LABEL: flat_atomic_sub_i32_ret_offset__amdgpu_no_remote_memory:
; GCN3: ; %bb.0:
; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: flat_load_dword v3, v[0:1] offset:16
-; GCN3-NEXT: s_mov_b64 s[4:5], 0
-; GCN3-NEXT: .LBB39_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v4, v3
-; GCN3-NEXT: v_sub_u32_e32 v3, v4, v2
-; GCN3-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] offset:16 glc
+; GCN3-NEXT: flat_atomic_sub v0, v[0:1], v2 offset:16 glc
; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GCN3-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB39_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN3-NEXT: v_mov_b32_e32 v0, v3
; GCN3-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i32, ptr %out, i64 4
%result = atomicrmw sub ptr %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
@@ -2396,22 +2317,9 @@ define void @flat_atomic_and_i32_noret_offset__amdgpu_no_remote_memory(ptr %out,
; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN1-NEXT: v_add_i32_e32 v0, vcc, 16, v0
; GCN1-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GCN1-NEXT: flat_load_dword v4, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[4:5], 0
-; GCN1-NEXT: .LBB48_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_and_b32_e32 v3, v4, v2
-; GCN1-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN1-NEXT: flat_atomic_and v[0:1], v2
; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT: v_mov_b32_e32 v4, v3
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB48_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN1-NEXT: s_setpc_b64 s[30:31]
;
; GCN2-LABEL: flat_atomic_and_i32_noret_offset__amdgpu_no_remote_memory:
@@ -2419,43 +2327,17 @@ define void @flat_atomic_and_i32_noret_offset__amdgpu_no_remote_memory(ptr %out,
; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN2-NEXT: v_add_u32_e32 v0, vcc, 16, v0
; GCN2-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GCN2-NEXT: flat_load_dword v4, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[4:5], 0
-; GCN2-NEXT: .LBB48_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_and_b32_e32 v3, v4, v2
-; GCN2-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN2-NEXT: flat_atomic_and v[0:1], v2
; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT: v_mov_b32_e32 v4, v3
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB48_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN2-NEXT: s_setpc_b64 s[30:31]
;
; GCN3-LABEL: flat_atomic_and_i32_noret_offset__amdgpu_no_remote_memory:
; GCN3: ; %bb.0:
; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: flat_load_dword v4, v[0:1] offset:16
-; GCN3-NEXT: s_mov_b64 s[4:5], 0
-; GCN3-NEXT: .LBB48_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_and_b32_e32 v3, v4, v2
-; GCN3-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] offset:16 glc
+; GCN3-NEXT: flat_atomic_and v[0:1], v2 offset:16
; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GCN3-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN3-NEXT: v_mov_b32_e32 v4, v3
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB48_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN3-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i32, ptr %out, i64 4
%tmp0 = atomicrmw and ptr %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
@@ -2466,69 +2348,29 @@ define i32 @flat_atomic_and_i32_ret_offset__amdgpu_no_remote_memory(ptr %out, i3
; GCN1-LABEL: flat_atomic_and_i32_ret_offset__amdgpu_no_remote_memory:
; GCN1: ; %bb.0:
; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_add_i32_e32 v3, vcc, 16, v0
-; GCN1-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc
-; GCN1-NEXT: flat_load_dword v0, v[3:4]
-; GCN1-NEXT: s_mov_b64 s[4:5], 0
-; GCN1-NEXT: .LBB49_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_mov_b32_e32 v1, v0
-; GCN1-NEXT: v_and_b32_e32 v0, v1, v2
-; GCN1-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
+; GCN1-NEXT: v_add_i32_e32 v0, vcc, 16, v0
+; GCN1-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN1-NEXT: flat_atomic_and v0, v[0:1], v2 glc
; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB49_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN1-NEXT: s_setpc_b64 s[30:31]
;
; GCN2-LABEL: flat_atomic_and_i32_ret_offset__amdgpu_no_remote_memory:
; GCN2: ; %bb.0:
; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_add_u32_e32 v3, vcc, 16, v0
-; GCN2-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc
-; GCN2-NEXT: flat_load_dword v0, v[3:4]
-; GCN2-NEXT: s_mov_b64 s[4:5], 0
-; GCN2-NEXT: .LBB49_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_mov_b32_e32 v1, v0
-; GCN2-NEXT: v_and_b32_e32 v0, v1, v2
-; GCN2-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
+; GCN2-NEXT: v_add_u32_e32 v0, vcc, 16, v0
+; GCN2-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN2-NEXT: flat_atomic_and v0, v[0:1], v2 glc
; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB49_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN2-NEXT: s_setpc_b64 s[30:31]
;
; GCN3-LABEL: flat_atomic_and_i32_ret_offset__amdgpu_no_remote_memory:
; GCN3: ; %bb.0:
; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: flat_load_dword v3, v[0:1] offset:16
-; GCN3-NEXT: s_mov_b64 s[4:5], 0
-; GCN3-NEXT: .LBB49_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v4, v3
-; GCN3-NEXT: v_and_b32_e32 v3, v4, v2
-; GCN3-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] offset:16 glc
+; GCN3-NEXT: flat_atomic_and v0, v[0:1], v2 offset:16 glc
; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GCN3-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB49_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN3-NEXT: v_mov_b32_e32 v0, v3
; GCN3-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i32, ptr %out, i64 4
%result = atomicrmw and ptr %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
@@ -3908,22 +3750,9 @@ define void @flat_atomic_or_i32_noret_offset__amdgpu_no_remote_memory(ptr %out,
; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN1-NEXT: v_add_i32_e32 v0, vcc, 16, v0
; GCN1-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GCN1-NEXT: flat_load_dword v4, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[4:5], 0
-; GCN1-NEXT: .LBB68_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_or_b32_e32 v3, v4, v2
-; GCN1-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN1-NEXT: flat_atomic_or v[0:1], v2
; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT: v_mov_b32_e32 v4, v3
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB68_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN1-NEXT: s_setpc_b64 s[30:31]
;
; GCN2-LABEL: flat_atomic_or_i32_noret_offset__amdgpu_no_remote_memory:
@@ -3931,43 +3760,17 @@ define void @flat_atomic_or_i32_noret_offset__amdgpu_no_remote_memory(ptr %out,
; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN2-NEXT: v_add_u32_e32 v0, vcc, 16, v0
; GCN2-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GCN2-NEXT: flat_load_dword v4, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[4:5], 0
-; GCN2-NEXT: .LBB68_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_or_b32_e32 v3, v4, v2
-; GCN2-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN2-NEXT: flat_atomic_or v[0:1], v2
; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT: v_mov_b32_e32 v4, v3
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB68_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN2-NEXT: s_setpc_b64 s[30:31]
;
; GCN3-LABEL: flat_atomic_or_i32_noret_offset__amdgpu_no_remote_memory:
; GCN3: ; %bb.0:
; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: flat_load_dword v4, v[0:1] offset:16
-; GCN3-NEXT: s_mov_b64 s[4:5], 0
-; GCN3-NEXT: .LBB68_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_or_b32_e32 v3, v4, v2
-; GCN3-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] offset:16 glc
+; GCN3-NEXT: flat_atomic_or v[0:1], v2 offset:16
; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GCN3-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN3-NEXT: v_mov_b32_e32 v4, v3
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB68_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN3-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i32, ptr %out, i64 4
%tmp0 = atomicrmw or ptr %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
@@ -3978,69 +3781,29 @@ define i32 @flat_atomic_or_i32_ret_offset__amdgpu_no_remote_memory(ptr %out, i32
; GCN1-LABEL: flat_atomic_or_i32_ret_offset__amdgpu_no_remote_memory:
; GCN1: ; %bb.0:
; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_add_i32_e32 v3, vcc, 16, v0
-; GCN1-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc
-; GCN1-NEXT: flat_load_dword v0, v[3:4]
-; GCN1-NEXT: s_mov_b64 s[4:5], 0
-; GCN1-NEXT: .LBB69_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_mov_b32_e32 v1, v0
-; GCN1-NEXT: v_or_b32_e32 v0, v1, v2
-; GCN1-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
+; GCN1-NEXT: v_add_i32_e32 v0, vcc, 16, v0
+; GCN1-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN1-NEXT: flat_atomic_or v0, v[0:1], v2 glc
; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB69_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN1-NEXT: s_setpc_b64 s[30:31]
;
; GCN2-LABEL: flat_atomic_or_i32_ret_offset__amdgpu_no_remote_memory:
; GCN2: ; %bb.0:
; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_add_u32_e32 v3, vcc, 16, v0
-; GCN2-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc
-; GCN2-NEXT: flat_load_dword v0, v[3:4]
-; GCN2-NEXT: s_mov_b64 s[4:5], 0
-; GCN2-NEXT: .LBB69_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_mov_b32_e32 v1, v0
-; GCN2-NEXT: v_or_b32_e32 v0, v1, v2
-; GCN2-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
+; GCN2-NEXT: v_add_u32_e32 v0, vcc, 16, v0
+; GCN2-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN2-NEXT: flat_atomic_or v0, v[0:1], v2 glc
; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB69_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN2-NEXT: s_setpc_b64 s[30:31]
;
; GCN3-LABEL: flat_atomic_or_i32_ret_offset__amdgpu_no_remote_memory:
; GCN3: ; %bb.0:
; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: flat_load_dword v3, v[0:1] offset:16
-; GCN3-NEXT: s_mov_b64 s[4:5], 0
-; GCN3-NEXT: .LBB69_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v4, v3
-; GCN3-NEXT: v_or_b32_e32 v3, v4, v2
-; GCN3-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] offset:16 glc
+; GCN3-NEXT: flat_atomic_or v0, v[0:1], v2 offset:16 glc
; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GCN3-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB69_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN3-NEXT: v_mov_b32_e32 v0, v3
; GCN3-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i32, ptr %out, i64 4
%result = atomicrmw or ptr %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
@@ -4649,22 +4412,9 @@ define void @flat_xor_i32_noret_offset__amdgpu_no_remote_memory(ptr %out, i32 %i
; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN1-NEXT: v_add_i32_e32 v0, vcc, 16, v0
; GCN1-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GCN1-NEXT: flat_load_dword v4, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[4:5], 0
-; GCN1-NEXT: .LBB78_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_xor_b32_e32 v3, v4, v2
-; GCN1-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN1-NEXT: flat_atomic_xor v[0:1], v2
; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT: v_mov_b32_e32 v4, v3
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB78_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN1-NEXT: s_setpc_b64 s[30:31]
;
; GCN2-LABEL: flat_xor_i32_noret_offset__amdgpu_no_remote_memory:
@@ -4672,43 +4422,17 @@ define void @flat_xor_i32_noret_offset__amdgpu_no_remote_memory(ptr %out, i32 %i
; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN2-NEXT: v_add_u32_e32 v0, vcc, 16, v0
; GCN2-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GCN2-NEXT: flat_load_dword v4, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[4:5], 0
-; GCN2-NEXT: .LBB78_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_xor_b32_e32 v3, v4, v2
-; GCN2-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN2-NEXT: flat_atomic_xor v[0:1], v2
; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT: v_mov_b32_e32 v4, v3
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB78_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN2-NEXT: s_setpc_b64 s[30:31]
;
; GCN3-LABEL: flat_xor_i32_noret_offset__amdgpu_no_remote_memory:
; GCN3: ; %bb.0:
; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: flat_load_dword v4, v[0:1] offset:16
-; GCN3-NEXT: s_mov_b64 s[4:5], 0
-; GCN3-NEXT: .LBB78_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_xor_b32_e32 v3, v4, v2
-; GCN3-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] offset:16 glc
+; GCN3-NEXT: flat_atomic_xor v[0:1], v2 offset:16
; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GCN3-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN3-NEXT: v_mov_b32_e32 v4, v3
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB78_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN3-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i32, ptr %out, i64 4
%tmp0 = atomicrmw xor ptr %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
@@ -4719,69 +4443,29 @@ define i32 @flat_atomic_xor_i32_ret_offset__amdgpu_no_remote_memory(ptr %out, i3
; GCN1-LABEL: flat_atomic_xor_i32_ret_offset__amdgpu_no_remote_memory:
; GCN1: ; %bb.0:
; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_add_i32_e32 v3, vcc, 16, v0
-; GCN1-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc
-; GCN1-NEXT: flat_load_dword v0, v[3:4]
-; GCN1-NEXT: s_mov_b64 s[4:5], 0
-; GCN1-NEXT: .LBB79_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_mov_b32_e32 v1, v0
-; GCN1-NEXT: v_xor_b32_e32 v0, v1, v2
-; GCN1-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
+; GCN1-NEXT: v_add_i32_e32 v0, vcc, 16, v0
+; GCN1-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN1-NEXT: flat_atomic_xor v0, v[0:1], v2 glc
; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB79_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN1-NEXT: s_setpc_b64 s[30:31]
;
; GCN2-LABEL: flat_atomic_xor_i32_ret_offset__amdgpu_no_remote_memory:
; GCN2: ; %bb.0:
; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_add_u32_e32 v3, vcc, 16, v0
-; GCN2-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc
-; GCN2-NEXT: flat_load_dword v0, v[3:4]
-; GCN2-NEXT: s_mov_b64 s[4:5], 0
-; GCN2-NEXT: .LBB79_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_mov_b32_e32 v1, v0
-; GCN2-NEXT: v_xor_b32_e32 v0, v1, v2
-; GCN2-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
+; GCN2-NEXT: v_add_u32_e32 v0, vcc, 16, v0
+; GCN2-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN2-NEXT: flat_atomic_xor v0, v[0:1], v2 glc
; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB79_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN2-NEXT: s_setpc_b64 s[30:31]
;
; GCN3-LABEL: flat_atomic_xor_i32_ret_offset__amdgpu_no_remote_memory:
; GCN3: ; %bb.0:
; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: flat_load_dword v3, v[0:1] offset:16
-; GCN3-NEXT: s_mov_b64 s[4:5], 0
-; GCN3-NEXT: .LBB79_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v4, v3
-; GCN3-NEXT: v_xor_b32_e32 v3, v4, v2
-; GCN3-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] offset:16 glc
+; GCN3-NEXT: flat_atomic_xor v0, v[0:1], v2 offset:16 glc
; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GCN3-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB79_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN3-NEXT: v_mov_b32_e32 v0, v3
; GCN3-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i32, ptr %out, i64 4
%result = atomicrmw xor ptr %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
@@ -5792,22 +5476,9 @@ define void @flat_max_i32_noret_offset__amdgpu_no_remote_memory(ptr %out, i32 %i
; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN1-NEXT: v_add_i32_e32 v0, vcc, 16, v0
; GCN1-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GCN1-NEXT: flat_load_dword v4, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[4:5], 0
-; GCN1-NEXT: .LBB92_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_max_i32_e32 v3, v4, v2
-; GCN1-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN1-NEXT: flat_atomic_smax v[0:1], v2
; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT: v_mov_b32_e32 v4, v3
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB92_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN1-NEXT: s_setpc_b64 s[30:31]
;
; GCN2-LABEL: flat_max_i32_noret_offset__amdgpu_no_remote_memory:
@@ -5815,43 +5486,17 @@ define void @flat_max_i32_noret_offset__amdgpu_no_remote_memory(ptr %out, i32 %i
; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN2-NEXT: v_add_u32_e32 v0, vcc, 16, v0
; GCN2-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GCN2-NEXT: flat_load_dword v4, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[4:5], 0
-; GCN2-NEXT: .LBB92_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_max_i32_e32 v3, v4, v2
-; GCN2-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN2-NEXT: flat_atomic_smax v[0:1], v2
; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT: v_mov_b32_e32 v4, v3
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB92_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN2-NEXT: s_setpc_b64 s[30:31]
;
; GCN3-LABEL: flat_max_i32_noret_offset__amdgpu_no_remote_memory:
; GCN3: ; %bb.0:
; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: flat_load_dword v4, v[0:1] offset:16
-; GCN3-NEXT: s_mov_b64 s[4:5], 0
-; GCN3-NEXT: .LBB92_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_max_i32_e32 v3, v4, v2
-; GCN3-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] offset:16 glc
+; GCN3-NEXT: flat_atomic_smax v[0:1], v2 offset:16
; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GCN3-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN3-NEXT: v_mov_b32_e32 v4, v3
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB92_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN3-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i32, ptr %out, i64 4
%tmp0 = atomicrmw max ptr %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
@@ -5862,69 +5507,29 @@ define i32 @flat_atomic_max_i32_ret_offset__amdgpu_no_remote_memory(ptr %out, i3
; GCN1-LABEL: flat_atomic_max_i32_ret_offset__amdgpu_no_remote_memory:
; GCN1: ; %bb.0:
; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_add_i32_e32 v3, vcc, 16, v0
-; GCN1-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc
-; GCN1-NEXT: flat_load_dword v0, v[3:4]
-; GCN1-NEXT: s_mov_b64 s[4:5], 0
-; GCN1-NEXT: .LBB93_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_mov_b32_e32 v1, v0
-; GCN1-NEXT: v_max_i32_e32 v0, v1, v2
-; GCN1-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
+; GCN1-NEXT: v_add_i32_e32 v0, vcc, 16, v0
+; GCN1-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN1-NEXT: flat_atomic_smax v0, v[0:1], v2 glc
; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB93_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN1-NEXT: s_setpc_b64 s[30:31]
;
; GCN2-LABEL: flat_atomic_max_i32_ret_offset__amdgpu_no_remote_memory:
; GCN2: ; %bb.0:
; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_add_u32_e32 v3, vcc, 16, v0
-; GCN2-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc
-; GCN2-NEXT: flat_load_dword v0, v[3:4]
-; GCN2-NEXT: s_mov_b64 s[4:5], 0
-; GCN2-NEXT: .LBB93_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_mov_b32_e32 v1, v0
-; GCN2-NEXT: v_max_i32_e32 v0, v1, v2
-; GCN2-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
+; GCN2-NEXT: v_add_u32_e32 v0, vcc, 16, v0
+; GCN2-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN2-NEXT: flat_atomic_smax v0, v[0:1], v2 glc
; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB93_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN2-NEXT: s_setpc_b64 s[30:31]
;
; GCN3-LABEL: flat_atomic_max_i32_ret_offset__amdgpu_no_remote_memory:
; GCN3: ; %bb.0:
; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: flat_load_dword v3, v[0:1] offset:16
-; GCN3-NEXT: s_mov_b64 s[4:5], 0
-; GCN3-NEXT: .LBB93_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v4, v3
-; GCN3-NEXT: v_max_i32_e32 v3, v4, v2
-; GCN3-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] offset:16 glc
+; GCN3-NEXT: flat_atomic_smax v0, v[0:1], v2 offset:16 glc
; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GCN3-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB93_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN3-NEXT: v_mov_b32_e32 v0, v3
; GCN3-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i32, ptr %out, i64 4
%result = atomicrmw max ptr %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
@@ -6845,22 +6450,9 @@ define void @flat_umax_i32_noret_offset__amdgpu_no_remote_memory(ptr %out, i32 %
; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN1-NEXT: v_add_i32_e32 v0, vcc, 16, v0
; GCN1-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GCN1-NEXT: flat_load_dword v4, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[4:5], 0
-; GCN1-NEXT: .LBB105_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_max_u32_e32 v3, v4, v2
-; GCN1-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN1-NEXT: flat_atomic_umax v[0:1], v2
; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT: v_mov_b32_e32 v4, v3
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB105_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN1-NEXT: s_setpc_b64 s[30:31]
;
; GCN2-LABEL: flat_umax_i32_noret_offset__amdgpu_no_remote_memory:
@@ -6868,43 +6460,17 @@ define void @flat_umax_i32_noret_offset__amdgpu_no_remote_memory(ptr %out, i32 %
; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN2-NEXT: v_add_u32_e32 v0, vcc, 16, v0
; GCN2-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GCN2-NEXT: flat_load_dword v4, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[4:5], 0
-; GCN2-NEXT: .LBB105_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_max_u32_e32 v3, v4, v2
-; GCN2-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN2-NEXT: flat_atomic_umax v[0:1], v2
; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT: v_mov_b32_e32 v4, v3
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB105_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN2-NEXT: s_setpc_b64 s[30:31]
;
; GCN3-LABEL: flat_umax_i32_noret_offset__amdgpu_no_remote_memory:
; GCN3: ; %bb.0:
; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: flat_load_dword v4, v[0:1] offset:16
-; GCN3-NEXT: s_mov_b64 s[4:5], 0
-; GCN3-NEXT: .LBB105_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_max_u32_e32 v3, v4, v2
-; GCN3-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] offset:16 glc
+; GCN3-NEXT: flat_atomic_umax v[0:1], v2 offset:16
; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GCN3-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN3-NEXT: v_mov_b32_e32 v4, v3
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB105_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN3-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i32, ptr %out, i64 4
%tmp0 = atomicrmw umax ptr %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
@@ -6915,69 +6481,29 @@ define i32 @flat_atomic_umax_i32_ret_offset__amdgpu_no_remote_memory(ptr %out, i
; GCN1-LABEL: flat_atomic_umax_i32_ret_offset__amdgpu_no_remote_memory:
; GCN1: ; %bb.0:
; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_add_i32_e32 v3, vcc, 16, v0
-; GCN1-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc
-; GCN1-NEXT: flat_load_dword v0, v[3:4]
-; GCN1-NEXT: s_mov_b64 s[4:5], 0
-; GCN1-NEXT: .LBB106_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_mov_b32_e32 v1, v0
-; GCN1-NEXT: v_max_u32_e32 v0, v1, v2
-; GCN1-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
+; GCN1-NEXT: v_add_i32_e32 v0, vcc, 16, v0
+; GCN1-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN1-NEXT: flat_atomic_umax v0, v[0:1], v2 glc
; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB106_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN1-NEXT: s_setpc_b64 s[30:31]
;
; GCN2-LABEL: flat_atomic_umax_i32_ret_offset__amdgpu_no_remote_memory:
; GCN2: ; %bb.0:
; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_add_u32_e32 v3, vcc, 16, v0
-; GCN2-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc
-; GCN2-NEXT: flat_load_dword v0, v[3:4]
-; GCN2-NEXT: s_mov_b64 s[4:5], 0
-; GCN2-NEXT: .LBB106_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_mov_b32_e32 v1, v0
-; GCN2-NEXT: v_max_u32_e32 v0, v1, v2
-; GCN2-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
+; GCN2-NEXT: v_add_u32_e32 v0, vcc, 16, v0
+; GCN2-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN2-NEXT: flat_atomic_umax v0, v[0:1], v2 glc
; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB106_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN2-NEXT: s_setpc_b64 s[30:31]
;
; GCN3-LABEL: flat_atomic_umax_i32_ret_offset__amdgpu_no_remote_memory:
; GCN3: ; %bb.0:
; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: flat_load_dword v3, v[0:1] offset:16
-; GCN3-NEXT: s_mov_b64 s[4:5], 0
-; GCN3-NEXT: .LBB106_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v4, v3
-; GCN3-NEXT: v_max_u32_e32 v3, v4, v2
-; GCN3-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] offset:16 glc
+; GCN3-NEXT: flat_atomic_umax v0, v[0:1], v2 offset:16 glc
; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GCN3-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB106_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN3-NEXT: v_mov_b32_e32 v0, v3
; GCN3-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i32, ptr %out, i64 4
%result = atomicrmw umax ptr %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
@@ -7586,22 +7112,9 @@ define void @flat_umin_i32_noret_offset__amdgpu_no_remote_memory(ptr %out, i32 %
; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN1-NEXT: v_add_i32_e32 v0, vcc, 16, v0
; GCN1-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GCN1-NEXT: flat_load_dword v4, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[4:5], 0
-; GCN1-NEXT: .LBB115_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_min_u32_e32 v3, v4, v2
-; GCN1-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN1-NEXT: flat_atomic_umin v[0:1], v2
; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT: v_mov_b32_e32 v4, v3
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB115_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN1-NEXT: s_setpc_b64 s[30:31]
;
; GCN2-LABEL: flat_umin_i32_noret_offset__amdgpu_no_remote_memory:
@@ -7609,43 +7122,17 @@ define void @flat_umin_i32_noret_offset__amdgpu_no_remote_memory(ptr %out, i32 %
; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN2-NEXT: v_add_u32_e32 v0, vcc, 16, v0
; GCN2-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GCN2-NEXT: flat_load_dword v4, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[4:5], 0
-; GCN2-NEXT: .LBB115_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_min_u32_e32 v3, v4, v2
-; GCN2-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN2-NEXT: flat_atomic_umin v[0:1], v2
; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT: v_mov_b32_e32 v4, v3
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB115_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN2-NEXT: s_setpc_b64 s[30:31]
;
; GCN3-LABEL: flat_umin_i32_noret_offset__amdgpu_no_remote_memory:
; GCN3: ; %bb.0:
; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: flat_load_dword v4, v[0:1] offset:16
-; GCN3-NEXT: s_mov_b64 s[4:5], 0
-; GCN3-NEXT: .LBB115_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_min_u32_e32 v3, v4, v2
-; GCN3-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] offset:16 glc
+; GCN3-NEXT: flat_atomic_umin v[0:1], v2 offset:16
; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GCN3-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN3-NEXT: v_mov_b32_e32 v4, v3
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB115_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN3-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i32, ptr %out, i64 4
%tmp0 = atomicrmw umin ptr %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
@@ -7656,69 +7143,29 @@ define i32 @flat_atomic_umin_i32_ret_offset__amdgpu_no_remote_memory(ptr %out, i
; GCN1-LABEL: flat_atomic_umin_i32_ret_offset__amdgpu_no_remote_memory:
; GCN1: ; %bb.0:
; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_add_i32_e32 v3, vcc, 16, v0
-; GCN1-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc
-; GCN1-NEXT: flat_load_dword v0, v[3:4]
-; GCN1-NEXT: s_mov_b64 s[4:5], 0
-; GCN1-NEXT: .LBB116_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_mov_b32_e32 v1, v0
-; GCN1-NEXT: v_min_u32_e32 v0, v1, v2
-; GCN1-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
+; GCN1-NEXT: v_add_i32_e32 v0, vcc, 16, v0
+; GCN1-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN1-NEXT: flat_atomic_umin v0, v[0:1], v2 glc
; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB116_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN1-NEXT: s_setpc_b64 s[30:31]
;
; GCN2-LABEL: flat_atomic_umin_i32_ret_offset__amdgpu_no_remote_memory:
; GCN2: ; %bb.0:
; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_add_u32_e32 v3, vcc, 16, v0
-; GCN2-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc
-; GCN2-NEXT: flat_load_dword v0, v[3:4]
-; GCN2-NEXT: s_mov_b64 s[4:5], 0
-; GCN2-NEXT: .LBB116_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_mov_b32_e32 v1, v0
-; GCN2-NEXT: v_min_u32_e32 v0, v1, v2
-; GCN2-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
+; GCN2-NEXT: v_add_u32_e32 v0, vcc, 16, v0
+; GCN2-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN2-NEXT: flat_atomic_umin v0, v[0:1], v2 glc
; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB116_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN2-NEXT: s_setpc_b64 s[30:31]
;
; GCN3-LABEL: flat_atomic_umin_i32_ret_offset__amdgpu_no_remote_memory:
; GCN3: ; %bb.0:
; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: flat_load_dword v3, v[0:1] offset:16
-; GCN3-NEXT: s_mov_b64 s[4:5], 0
-; GCN3-NEXT: .LBB116_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v4, v3
-; GCN3-NEXT: v_min_u32_e32 v3, v4, v2
-; GCN3-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] offset:16 glc
+; GCN3-NEXT: flat_atomic_umin v0, v[0:1], v2 offset:16 glc
; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GCN3-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB116_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN3-NEXT: v_mov_b32_e32 v0, v3
; GCN3-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i32, ptr %out, i64 4
%result = atomicrmw umin ptr %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
@@ -8716,22 +8163,9 @@ define void @flat_min_i32_noret_offset__amdgpu_no_remote_memory(ptr %out, i32 %i
; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN1-NEXT: v_add_i32_e32 v0, vcc, 16, v0
; GCN1-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GCN1-NEXT: flat_load_dword v4, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[4:5], 0
-; GCN1-NEXT: .LBB129_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_min_i32_e32 v3, v4, v2
-; GCN1-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN1-NEXT: flat_atomic_smin v[0:1], v2
; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT: v_mov_b32_e32 v4, v3
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB129_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN1-NEXT: s_setpc_b64 s[30:31]
;
; GCN2-LABEL: flat_min_i32_noret_offset__amdgpu_no_remote_memory:
@@ -8739,43 +8173,17 @@ define void @flat_min_i32_noret_offset__amdgpu_no_remote_memory(ptr %out, i32 %i
; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN2-NEXT: v_add_u32_e32 v0, vcc, 16, v0
; GCN2-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GCN2-NEXT: flat_load_dword v4, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[4:5], 0
-; GCN2-NEXT: .LBB129_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_min_i32_e32 v3, v4, v2
-; GCN2-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN2-NEXT: flat_atomic_smin v[0:1], v2
; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT: v_mov_b32_e32 v4, v3
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB129_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN2-NEXT: s_setpc_b64 s[30:31]
;
; GCN3-LABEL: flat_min_i32_noret_offset__amdgpu_no_remote_memory:
; GCN3: ; %bb.0:
; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: flat_load_dword v4, v[0:1] offset:16
-; GCN3-NEXT: s_mov_b64 s[4:5], 0
-; GCN3-NEXT: .LBB129_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_min_i32_e32 v3, v4, v2
-; GCN3-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] offset:16 glc
+; GCN3-NEXT: flat_atomic_smin v[0:1], v2 offset:16
; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GCN3-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN3-NEXT: v_mov_b32_e32 v4, v3
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB129_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN3-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i32, ptr %out, i64 4
%tmp0 = atomicrmw min ptr %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
@@ -8786,69 +8194,29 @@ define i32 @flat_atomic_min_i32_ret_offset__amdgpu_no_remote_memory(ptr %out, i3
; GCN1-LABEL: flat_atomic_min_i32_ret_offset__amdgpu_no_remote_memory:
; GCN1: ; %bb.0:
; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_add_i32_e32 v3, vcc, 16, v0
-; GCN1-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc
-; GCN1-NEXT: flat_load_dword v0, v[3:4]
-; GCN1-NEXT: s_mov_b64 s[4:5], 0
-; GCN1-NEXT: .LBB130_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_mov_b32_e32 v1, v0
-; GCN1-NEXT: v_min_i32_e32 v0, v1, v2
-; GCN1-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
+; GCN1-NEXT: v_add_i32_e32 v0, vcc, 16, v0
+; GCN1-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN1-NEXT: flat_atomic_smin v0, v[0:1], v2 glc
; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB130_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN1-NEXT: s_setpc_b64 s[30:31]
;
; GCN2-LABEL: flat_atomic_min_i32_ret_offset__amdgpu_no_remote_memory:
; GCN2: ; %bb.0:
; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_add_u32_e32 v3, vcc, 16, v0
-; GCN2-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc
-; GCN2-NEXT: flat_load_dword v0, v[3:4]
-; GCN2-NEXT: s_mov_b64 s[4:5], 0
-; GCN2-NEXT: .LBB130_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_mov_b32_e32 v1, v0
-; GCN2-NEXT: v_min_i32_e32 v0, v1, v2
-; GCN2-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
+; GCN2-NEXT: v_add_u32_e32 v0, vcc, 16, v0
+; GCN2-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN2-NEXT: flat_atomic_smin v0, v[0:1], v2 glc
; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB130_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN2-NEXT: s_setpc_b64 s[30:31]
;
; GCN3-LABEL: flat_atomic_min_i32_ret_offset__amdgpu_no_remote_memory:
; GCN3: ; %bb.0:
; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: flat_load_dword v3, v[0:1] offset:16
-; GCN3-NEXT: s_mov_b64 s[4:5], 0
-; GCN3-NEXT: .LBB130_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v4, v3
-; GCN3-NEXT: v_min_i32_e32 v3, v4, v2
-; GCN3-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] offset:16 glc
+; GCN3-NEXT: flat_atomic_smin v0, v[0:1], v2 offset:16 glc
; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GCN3-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB130_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN3-NEXT: v_mov_b32_e32 v0, v3
; GCN3-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i32, ptr %out, i64 4
%result = atomicrmw min ptr %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
@@ -9505,24 +8873,9 @@ define void @flat_uinc_wrap_i32_noret_offset__amdgpu_no_remote_memory(ptr %out,
; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN1-NEXT: v_add_i32_e32 v0, vcc, 16, v0
; GCN1-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GCN1-NEXT: flat_load_dword v4, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[4:5], 0
-; GCN1-NEXT: .LBB139_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_add_i32_e32 v3, vcc, 1, v4
-; GCN1-NEXT: v_cmp_lt_u32_e32 vcc, v4, v2
-; GCN1-NEXT: v_cndmask_b32_e32 v3, 0, v3, vcc
-; GCN1-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN1-NEXT: flat_atomic_inc v[0:1], v2
; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT: v_mov_b32_e32 v4, v3
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB139_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN1-NEXT: s_setpc_b64 s[30:31]
;
; GCN2-LABEL: flat_uinc_wrap_i32_noret_offset__amdgpu_no_remote_memory:
@@ -9530,47 +8883,17 @@ define void @flat_uinc_wrap_i32_noret_offset__amdgpu_no_remote_memory(ptr %out,
; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN2-NEXT: v_add_u32_e32 v0, vcc, 16, v0
; GCN2-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GCN2-NEXT: flat_load_dword v4, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[4:5], 0
-; GCN2-NEXT: .LBB139_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_add_u32_e32 v3, vcc, 1, v4
-; GCN2-NEXT: v_cmp_lt_u32_e32 vcc, v4, v2
-; GCN2-NEXT: v_cndmask_b32_e32 v3, 0, v3, vcc
-; GCN2-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN2-NEXT: flat_atomic_inc v[0:1], v2
; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT: v_mov_b32_e32 v4, v3
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB139_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN2-NEXT: s_setpc_b64 s[30:31]
;
; GCN3-LABEL: flat_uinc_wrap_i32_noret_offset__amdgpu_no_remote_memory:
; GCN3: ; %bb.0:
; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: flat_load_dword v4, v[0:1] offset:16
-; GCN3-NEXT: s_mov_b64 s[4:5], 0
-; GCN3-NEXT: .LBB139_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_add_u32_e32 v3, 1, v4
-; GCN3-NEXT: v_cmp_lt_u32_e32 vcc, v4, v2
-; GCN3-NEXT: v_cndmask_b32_e32 v3, 0, v3, vcc
-; GCN3-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] offset:16 glc
+; GCN3-NEXT: flat_atomic_inc v[0:1], v2 offset:16
; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GCN3-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN3-NEXT: v_mov_b32_e32 v4, v3
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB139_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN3-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i32, ptr %out, i64 4
%tmp0 = atomicrmw uinc_wrap ptr %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
@@ -9581,75 +8904,29 @@ define i32 @flat_atomic_uinc_wrap_i32_ret_offset__amdgpu_no_remote_memory(ptr %o
; GCN1-LABEL: flat_atomic_uinc_wrap_i32_ret_offset__amdgpu_no_remote_memory:
; GCN1: ; %bb.0:
; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_add_i32_e32 v3, vcc, 16, v0
-; GCN1-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc
-; GCN1-NEXT: flat_load_dword v0, v[3:4]
-; GCN1-NEXT: s_mov_b64 s[4:5], 0
-; GCN1-NEXT: .LBB140_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_mov_b32_e32 v1, v0
-; GCN1-NEXT: v_add_i32_e32 v0, vcc, 1, v1
-; GCN1-NEXT: v_cmp_lt_u32_e32 vcc, v1, v2
-; GCN1-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
-; GCN1-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
+; GCN1-NEXT: v_add_i32_e32 v0, vcc, 16, v0
+; GCN1-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN1-NEXT: flat_atomic_inc v0, v[0:1], v2 glc
; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB140_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN1-NEXT: s_setpc_b64 s[30:31]
;
; GCN2-LABEL: flat_atomic_uinc_wrap_i32_ret_offset__amdgpu_no_remote_memory:
; GCN2: ; %bb.0:
; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_add_u32_e32 v3, vcc, 16, v0
-; GCN2-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc
-; GCN2-NEXT: flat_load_dword v0, v[3:4]
-; GCN2-NEXT: s_mov_b64 s[4:5], 0
-; GCN2-NEXT: .LBB140_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_mov_b32_e32 v1, v0
-; GCN2-NEXT: v_add_u32_e32 v0, vcc, 1, v1
-; GCN2-NEXT: v_cmp_lt_u32_e32 vcc, v1, v2
-; GCN2-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
-; GCN2-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
+; GCN2-NEXT: v_add_u32_e32 v0, vcc, 16, v0
+; GCN2-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN2-NEXT: flat_atomic_inc v0, v[0:1], v2 glc
; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB140_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN2-NEXT: s_setpc_b64 s[30:31]
;
; GCN3-LABEL: flat_atomic_uinc_wrap_i32_ret_offset__amdgpu_no_remote_memory:
; GCN3: ; %bb.0:
; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: flat_load_dword v3, v[0:1] offset:16
-; GCN3-NEXT: s_mov_b64 s[4:5], 0
-; GCN3-NEXT: .LBB140_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v4, v3
-; GCN3-NEXT: v_add_u32_e32 v3, 1, v4
-; GCN3-NEXT: v_cmp_lt_u32_e32 vcc, v4, v2
-; GCN3-NEXT: v_cndmask_b32_e32 v3, 0, v3, vcc
-; GCN3-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] offset:16 glc
+; GCN3-NEXT: flat_atomic_inc v0, v[0:1], v2 offset:16 glc
; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GCN3-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB140_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
-; GCN3-NEXT: v_mov_b32_e32 v0, v3
; GCN3-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i32, ptr %out, i64 4
%result = atomicrmw uinc_wrap ptr %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
@@ -10366,26 +9643,9 @@ define void @flat_udec_wrap_i32_noret_offset__amdgpu_no_remote_memory(ptr %out,
; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN1-NEXT: v_add_i32_e32 v0, vcc, 16, v0
; GCN1-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GCN1-NEXT: flat_load_dword v4, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[6:7], 0
-; GCN1-NEXT: .LBB149_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_add_i32_e32 v3, vcc, -1, v4
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
-; GCN1-NEXT: v_cmp_gt_u32_e64 s[4:5], v4, v2
-; GCN1-NEXT: s_or_b64 vcc, vcc, s[4:5]
-; GCN1-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc
-; GCN1-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN1-NEXT: flat_atomic_dec v[0:1], v2
; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GCN1-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN1-NEXT: v_mov_b32_e32 v4, v3
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN1-NEXT: s_cbranch_execnz .LBB149_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN1-NEXT: s_setpc_b64 s[30:31]
;
; GCN2-LABEL: flat_udec_wrap_i32_noret_offset__amdgpu_no_remote_memory:
@@ -10393,51 +9653,17 @@ define void @flat_udec_wrap_i32_noret_offset__amdgpu_no_remote_memory(ptr %out,
; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN2-NEXT: v_add_u32_e32 v0, vcc, 16, v0
; GCN2-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GCN2-NEXT: flat_load_dword v4, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[6:7], 0
-; GCN2-NEXT: .LBB149_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_add_u32_e32 v3, vcc, -1, v4
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
-; GCN2-NEXT: v_cmp_gt_u32_e64 s[4:5], v4, v2
-; GCN2-NEXT: s_or_b64 vcc, vcc, s[4:5]
-; GCN2-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc
-; GCN2-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; GCN2-NEXT: flat_atomic_dec v[0:1], v2
; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GCN2-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN2-NEXT: v_mov_b32_e32 v4, v3
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN2-NEXT: s_cbranch_execnz .LBB149_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN2-NEXT: s_setpc_b64 s[30:31]
;
; GCN3-LABEL: flat_udec_wrap_i32_noret_offset__amdgpu_no_remote_memory:
; GCN3: ; %bb.0:
; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: flat_load_dword v4, v[0:1] offset:16
-; GCN3-NEXT: s_mov_b64 s[6:7], 0
-; GCN3-NEXT: .LBB149_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
-; GCN3-NEXT: v_cmp_gt_u32_e64 s[4:5], v4, v2
-; GCN3-NEXT: v_add_u32_e32 v3, -1, v4
-; GCN3-NEXT: s_or_b64 vcc, vcc, s[4:5]
-; GCN3-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc
-; GCN3-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] offset:16 glc
+; GCN3-NEXT: flat_atomic_dec v[0:1], v2 offset:16
; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GCN3-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN3-NEXT: v_mov_b32_e32 v4, v3
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN3-NEXT: s_cbranch_execnz .LBB149_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN3-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i32, ptr %out, i64 4
%tmp0 = atomicrmw udec_wrap ptr %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
@@ -10448,81 +9674,29 @@ define i32 @flat_atomic_udec_wrap_i32_ret_offset__amdgpu_no_remote_memory(ptr %o
; GCN1-LABEL: flat_atomic_udec_wrap_i32_ret_offset__amdgpu_no_remote_memory:
; GCN1: ; %bb.0:
; GCN1-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_add_i32_e32 v3, vcc, 16, v0
-; GCN1-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc
-; GCN1-NEXT: flat_load_dword v0, v[3:4]
-; GCN1-NEXT: s_mov_b64 s[6:7], 0
-; GCN1-NEXT: .LBB150_1: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_mov_b32_e32 v1, v0
-; GCN1-NEXT: v_add_i32_e32 v0, vcc, -1, v1
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
-; GCN1-NEXT: v_cmp_gt_u32_e64 s[4:5], v1, v2
-; GCN1-NEXT: s_or_b64 vcc, vcc, s[4:5]
-; GCN1-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
-; GCN1-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
+; GCN1-NEXT: v_add_i32_e32 v0, vcc, 16, v0
+; GCN1-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN1-NEXT: flat_atomic_dec v0, v[0:1], v2 glc
; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN1-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN1-NEXT: s_cbranch_execnz .LBB150_1
-; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN1-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN1-NEXT: s_setpc_b64 s[30:31]
;
; GCN2-LABEL: flat_atomic_udec_wrap_i32_ret_offset__amdgpu_no_remote_memory:
; GCN2: ; %bb.0:
; GCN2-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_add_u32_e32 v3, vcc, 16, v0
-; GCN2-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc
-; GCN2-NEXT: flat_load_dword v0, v[3:4]
-; GCN2-NEXT: s_mov_b64 s[6:7], 0
-; GCN2-NEXT: .LBB150_1: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_mov_b32_e32 v1, v0
-; GCN2-NEXT: v_add_u32_e32 v0, vcc, -1, v1
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
-; GCN2-NEXT: v_cmp_gt_u32_e64 s[4:5], v1, v2
-; GCN2-NEXT: s_or_b64 vcc, vcc, s[4:5]
-; GCN2-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
-; GCN2-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
+; GCN2-NEXT: v_add_u32_e32 v0, vcc, 16, v0
+; GCN2-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN2-NEXT: flat_atomic_dec v0, v[0:1], v2 glc
; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; GCN2-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN2-NEXT: s_cbranch_execnz .LBB150_1
-; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN2-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN2-NEXT: s_setpc_b64 s[30:31]
;
; GCN3-LABEL: flat_atomic_udec_wrap_i32_ret_offset__amdgpu_no_remote_memory:
; GCN3: ; %bb.0:
; GCN3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN3-NEXT: flat_load_dword v3, v[0:1] offset:16
-; GCN3-NEXT: s_mov_b64 s[6:7], 0
-; GCN3-NEXT: .LBB150_1: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v4, v3
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
-; GCN3-NEXT: v_cmp_gt_u32_e64 s[4:5], v4, v2
-; GCN3-NEXT: v_add_u32_e32 v3, -1, v4
-; GCN3-NEXT: s_or_b64 vcc, vcc, s[4:5]
-; GCN3-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc
-; GCN3-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] offset:16 glc
+; GCN3-NEXT: flat_atomic_dec v0, v[0:1], v2 offset:16 glc
; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GCN3-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN3-NEXT: s_cbranch_execnz .LBB150_1
-; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN3-NEXT: s_or_b64 exec, exec, s[6:7]
-; GCN3-NEXT: v_mov_b32_e32 v0, v3
; GCN3-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i32, ptr %out, i64 4
%result = atomicrmw udec_wrap ptr %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
diff --git a/llvm/test/CodeGen/AMDGPU/flat_atomics_i64.ll b/llvm/test/CodeGen/AMDGPU/flat_atomics_i64.ll
index ffe0596a95e33..d9a596283db1e 100644
--- a/llvm/test/CodeGen/AMDGPU/flat_atomics_i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/flat_atomics_i64.ll
@@ -1299,7 +1299,7 @@ define amdgpu_kernel void @atomic_and_i64_offset(ptr %out, i64 %in) {
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr %out, i64 4
- %tmp0 = atomicrmw volatile and ptr %gep, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile and ptr %gep, i64 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -1442,7 +1442,7 @@ define amdgpu_kernel void @atomic_and_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr %out, i64 4
- %tmp0 = atomicrmw volatile and ptr %gep, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile and ptr %gep, i64 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
store i64 %tmp0, ptr %out2
ret void
}
@@ -1594,7 +1594,7 @@ define amdgpu_kernel void @atomic_and_i64_addr64_offset(ptr %out, i64 %in, i64 %
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
%gep = getelementptr i64, ptr %ptr, i64 4
- %tmp0 = atomicrmw volatile and ptr %gep, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile and ptr %gep, i64 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -1743,7 +1743,7 @@ define amdgpu_kernel void @atomic_and_i64_ret_addr64_offset(ptr %out, ptr %out2,
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
%gep = getelementptr i64, ptr %ptr, i64 4
- %tmp0 = atomicrmw volatile and ptr %gep, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile and ptr %gep, i64 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
store i64 %tmp0, ptr %out2
ret void
}
@@ -1875,7 +1875,7 @@ define amdgpu_kernel void @atomic_and_i64(ptr %out, i64 %in) {
; GFX12-NEXT: scratch_store_b64 off, v[0:1], s0
; GFX12-NEXT: s_endpgm
entry:
- %tmp0 = atomicrmw volatile and ptr %out, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile and ptr %out, i64 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -2012,7 +2012,7 @@ define amdgpu_kernel void @atomic_and_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
entry:
- %tmp0 = atomicrmw volatile and ptr %out, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile and ptr %out, i64 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
store i64 %tmp0, ptr %out2
ret void
}
@@ -2157,7 +2157,7 @@ define amdgpu_kernel void @atomic_and_i64_addr64(ptr %out, i64 %in, i64 %index)
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
- %tmp0 = atomicrmw volatile and ptr %ptr, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile and ptr %ptr, i64 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -2300,7 +2300,7 @@ define amdgpu_kernel void @atomic_and_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
- %tmp0 = atomicrmw volatile and ptr %ptr, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile and ptr %ptr, i64 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
store i64 %tmp0, ptr %out2
ret void
}
@@ -2441,7 +2441,7 @@ define amdgpu_kernel void @atomic_sub_i64_offset(ptr %out, i64 %in) {
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr %out, i64 4
- %tmp0 = atomicrmw volatile sub ptr %gep, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile sub ptr %gep, i64 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -2587,7 +2587,7 @@ define amdgpu_kernel void @atomic_sub_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr %out, i64 4
- %tmp0 = atomicrmw volatile sub ptr %gep, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile sub ptr %gep, i64 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
store i64 %tmp0, ptr %out2
ret void
}
@@ -2742,7 +2742,7 @@ define amdgpu_kernel void @atomic_sub_i64_addr64_offset(ptr %out, i64 %in, i64 %
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
%gep = getelementptr i64, ptr %ptr, i64 4
- %tmp0 = atomicrmw volatile sub ptr %gep, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile sub ptr %gep, i64 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -2894,7 +2894,7 @@ define amdgpu_kernel void @atomic_sub_i64_ret_addr64_offset(ptr %out, ptr %out2,
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
%gep = getelementptr i64, ptr %ptr, i64 4
- %tmp0 = atomicrmw volatile sub ptr %gep, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile sub ptr %gep, i64 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
store i64 %tmp0, ptr %out2
ret void
}
@@ -3029,7 +3029,7 @@ define amdgpu_kernel void @atomic_sub_i64(ptr %out, i64 %in) {
; GFX12-NEXT: scratch_store_b64 off, v[0:1], s0
; GFX12-NEXT: s_endpgm
entry:
- %tmp0 = atomicrmw volatile sub ptr %out, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile sub ptr %out, i64 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -3169,7 +3169,7 @@ define amdgpu_kernel void @atomic_sub_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
entry:
- %tmp0 = atomicrmw volatile sub ptr %out, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile sub ptr %out, i64 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
store i64 %tmp0, ptr %out2
ret void
}
@@ -3317,7 +3317,7 @@ define amdgpu_kernel void @atomic_sub_i64_addr64(ptr %out, i64 %in, i64 %index)
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
- %tmp0 = atomicrmw volatile sub ptr %ptr, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile sub ptr %ptr, i64 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -3463,7 +3463,7 @@ define amdgpu_kernel void @atomic_sub_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
- %tmp0 = atomicrmw volatile sub ptr %ptr, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile sub ptr %ptr, i64 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
store i64 %tmp0, ptr %out2
ret void
}
@@ -3604,7 +3604,7 @@ define amdgpu_kernel void @atomic_max_i64_offset(ptr %out, i64 %in) {
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr %out, i64 4
- %tmp0 = atomicrmw volatile max ptr %gep, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile max ptr %gep, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -3752,7 +3752,7 @@ define amdgpu_kernel void @atomic_max_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr %out, i64 4
- %tmp0 = atomicrmw volatile max ptr %gep, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile max ptr %gep, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
store i64 %tmp0, ptr %out2
ret void
}
@@ -3907,7 +3907,7 @@ define amdgpu_kernel void @atomic_max_i64_addr64_offset(ptr %out, i64 %in, i64 %
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
%gep = getelementptr i64, ptr %ptr, i64 4
- %tmp0 = atomicrmw volatile max ptr %gep, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile max ptr %gep, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -4061,7 +4061,7 @@ define amdgpu_kernel void @atomic_max_i64_ret_addr64_offset(ptr %out, ptr %out2,
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
%gep = getelementptr i64, ptr %ptr, i64 4
- %tmp0 = atomicrmw volatile max ptr %gep, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile max ptr %gep, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
store i64 %tmp0, ptr %out2
ret void
}
@@ -4196,7 +4196,7 @@ define amdgpu_kernel void @atomic_max_i64(ptr %out, i64 %in) {
; GFX12-NEXT: scratch_store_b64 off, v[0:1], s0
; GFX12-NEXT: s_endpgm
entry:
- %tmp0 = atomicrmw volatile max ptr %out, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile max ptr %out, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -4338,7 +4338,7 @@ define amdgpu_kernel void @atomic_max_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
entry:
- %tmp0 = atomicrmw volatile max ptr %out, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile max ptr %out, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
store i64 %tmp0, ptr %out2
ret void
}
@@ -4486,7 +4486,7 @@ define amdgpu_kernel void @atomic_max_i64_addr64(ptr %out, i64 %in, i64 %index)
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
- %tmp0 = atomicrmw volatile max ptr %ptr, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile max ptr %ptr, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -4634,7 +4634,7 @@ define amdgpu_kernel void @atomic_max_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
- %tmp0 = atomicrmw volatile max ptr %ptr, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile max ptr %ptr, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
store i64 %tmp0, ptr %out2
ret void
}
@@ -4775,7 +4775,7 @@ define amdgpu_kernel void @atomic_umax_i64_offset(ptr %out, i64 %in) {
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr %out, i64 4
- %tmp0 = atomicrmw volatile umax ptr %gep, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile umax ptr %gep, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -4923,7 +4923,7 @@ define amdgpu_kernel void @atomic_umax_i64_ret_offset(ptr %out, ptr %out2, i64 %
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr %out, i64 4
- %tmp0 = atomicrmw volatile umax ptr %gep, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile umax ptr %gep, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
store i64 %tmp0, ptr %out2
ret void
}
@@ -5078,7 +5078,7 @@ define amdgpu_kernel void @atomic_umax_i64_addr64_offset(ptr %out, i64 %in, i64
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
%gep = getelementptr i64, ptr %ptr, i64 4
- %tmp0 = atomicrmw volatile umax ptr %gep, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile umax ptr %gep, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -5232,7 +5232,7 @@ define amdgpu_kernel void @atomic_umax_i64_ret_addr64_offset(ptr %out, ptr %out2
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
%gep = getelementptr i64, ptr %ptr, i64 4
- %tmp0 = atomicrmw volatile umax ptr %gep, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile umax ptr %gep, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
store i64 %tmp0, ptr %out2
ret void
}
@@ -5367,7 +5367,7 @@ define amdgpu_kernel void @atomic_umax_i64(ptr %out, i64 %in) {
; GFX12-NEXT: scratch_store_b64 off, v[0:1], s0
; GFX12-NEXT: s_endpgm
entry:
- %tmp0 = atomicrmw volatile umax ptr %out, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile umax ptr %out, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -5509,7 +5509,7 @@ define amdgpu_kernel void @atomic_umax_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
entry:
- %tmp0 = atomicrmw volatile umax ptr %out, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile umax ptr %out, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
store i64 %tmp0, ptr %out2
ret void
}
@@ -5657,7 +5657,7 @@ define amdgpu_kernel void @atomic_umax_i64_addr64(ptr %out, i64 %in, i64 %index)
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
- %tmp0 = atomicrmw volatile umax ptr %ptr, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile umax ptr %ptr, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -5805,7 +5805,7 @@ define amdgpu_kernel void @atomic_umax_i64_ret_addr64(ptr %out, ptr %out2, i64 %
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
- %tmp0 = atomicrmw volatile umax ptr %ptr, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile umax ptr %ptr, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
store i64 %tmp0, ptr %out2
ret void
}
@@ -5946,7 +5946,7 @@ define amdgpu_kernel void @atomic_min_i64_offset(ptr %out, i64 %in) {
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr %out, i64 4
- %tmp0 = atomicrmw volatile min ptr %gep, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile min ptr %gep, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -6094,7 +6094,7 @@ define amdgpu_kernel void @atomic_min_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr %out, i64 4
- %tmp0 = atomicrmw volatile min ptr %gep, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile min ptr %gep, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
store i64 %tmp0, ptr %out2
ret void
}
@@ -6249,7 +6249,7 @@ define amdgpu_kernel void @atomic_min_i64_addr64_offset(ptr %out, i64 %in, i64 %
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
%gep = getelementptr i64, ptr %ptr, i64 4
- %tmp0 = atomicrmw volatile min ptr %gep, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile min ptr %gep, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -6403,7 +6403,7 @@ define amdgpu_kernel void @atomic_min_i64_ret_addr64_offset(ptr %out, ptr %out2,
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
%gep = getelementptr i64, ptr %ptr, i64 4
- %tmp0 = atomicrmw volatile min ptr %gep, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile min ptr %gep, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
store i64 %tmp0, ptr %out2
ret void
}
@@ -6538,7 +6538,7 @@ define amdgpu_kernel void @atomic_min_i64(ptr %out, i64 %in) {
; GFX12-NEXT: scratch_store_b64 off, v[0:1], s0
; GFX12-NEXT: s_endpgm
entry:
- %tmp0 = atomicrmw volatile min ptr %out, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile min ptr %out, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -6680,7 +6680,7 @@ define amdgpu_kernel void @atomic_min_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
entry:
- %tmp0 = atomicrmw volatile min ptr %out, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile min ptr %out, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
store i64 %tmp0, ptr %out2
ret void
}
@@ -6828,7 +6828,7 @@ define amdgpu_kernel void @atomic_min_i64_addr64(ptr %out, i64 %in, i64 %index)
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
- %tmp0 = atomicrmw volatile min ptr %ptr, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile min ptr %ptr, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -6976,7 +6976,7 @@ define amdgpu_kernel void @atomic_min_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
- %tmp0 = atomicrmw volatile min ptr %ptr, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile min ptr %ptr, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
store i64 %tmp0, ptr %out2
ret void
}
@@ -7117,7 +7117,7 @@ define amdgpu_kernel void @atomic_umin_i64_offset(ptr %out, i64 %in) {
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr %out, i64 4
- %tmp0 = atomicrmw volatile umin ptr %gep, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile umin ptr %gep, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -7265,7 +7265,7 @@ define amdgpu_kernel void @atomic_umin_i64_ret_offset(ptr %out, ptr %out2, i64 %
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr %out, i64 4
- %tmp0 = atomicrmw volatile umin ptr %gep, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile umin ptr %gep, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
store i64 %tmp0, ptr %out2
ret void
}
@@ -7420,7 +7420,7 @@ define amdgpu_kernel void @atomic_umin_i64_addr64_offset(ptr %out, i64 %in, i64
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
%gep = getelementptr i64, ptr %ptr, i64 4
- %tmp0 = atomicrmw volatile umin ptr %gep, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile umin ptr %gep, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -7574,7 +7574,7 @@ define amdgpu_kernel void @atomic_umin_i64_ret_addr64_offset(ptr %out, ptr %out2
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
%gep = getelementptr i64, ptr %ptr, i64 4
- %tmp0 = atomicrmw volatile umin ptr %gep, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile umin ptr %gep, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
store i64 %tmp0, ptr %out2
ret void
}
@@ -7709,7 +7709,7 @@ define amdgpu_kernel void @atomic_umin_i64(ptr %out, i64 %in) {
; GFX12-NEXT: scratch_store_b64 off, v[0:1], s0
; GFX12-NEXT: s_endpgm
entry:
- %tmp0 = atomicrmw volatile umin ptr %out, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile umin ptr %out, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -7851,7 +7851,7 @@ define amdgpu_kernel void @atomic_umin_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
entry:
- %tmp0 = atomicrmw volatile umin ptr %out, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile umin ptr %out, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
store i64 %tmp0, ptr %out2
ret void
}
@@ -7999,7 +7999,7 @@ define amdgpu_kernel void @atomic_umin_i64_addr64(ptr %out, i64 %in, i64 %index)
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
- %tmp0 = atomicrmw volatile umin ptr %ptr, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile umin ptr %ptr, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -8147,7 +8147,7 @@ define amdgpu_kernel void @atomic_umin_i64_ret_addr64(ptr %out, ptr %out2, i64 %
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
- %tmp0 = atomicrmw volatile umin ptr %ptr, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile umin ptr %ptr, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
store i64 %tmp0, ptr %out2
ret void
}
@@ -8285,7 +8285,7 @@ define amdgpu_kernel void @atomic_or_i64_offset(ptr %out, i64 %in) {
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr %out, i64 4
- %tmp0 = atomicrmw volatile or ptr %gep, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile or ptr %gep, i64 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -8428,7 +8428,7 @@ define amdgpu_kernel void @atomic_or_i64_ret_offset(ptr %out, ptr %out2, i64 %in
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr %out, i64 4
- %tmp0 = atomicrmw volatile or ptr %gep, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile or ptr %gep, i64 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
store i64 %tmp0, ptr %out2
ret void
}
@@ -8580,7 +8580,7 @@ define amdgpu_kernel void @atomic_or_i64_addr64_offset(ptr %out, i64 %in, i64 %i
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
%gep = getelementptr i64, ptr %ptr, i64 4
- %tmp0 = atomicrmw volatile or ptr %gep, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile or ptr %gep, i64 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -8729,7 +8729,7 @@ define amdgpu_kernel void @atomic_or_i64_ret_addr64_offset(ptr %out, ptr %out2,
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
%gep = getelementptr i64, ptr %ptr, i64 4
- %tmp0 = atomicrmw volatile or ptr %gep, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile or ptr %gep, i64 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
store i64 %tmp0, ptr %out2
ret void
}
@@ -8861,7 +8861,7 @@ define amdgpu_kernel void @atomic_or_i64(ptr %out, i64 %in) {
; GFX12-NEXT: scratch_store_b64 off, v[0:1], s0
; GFX12-NEXT: s_endpgm
entry:
- %tmp0 = atomicrmw volatile or ptr %out, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile or ptr %out, i64 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -8998,7 +8998,7 @@ define amdgpu_kernel void @atomic_or_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
entry:
- %tmp0 = atomicrmw volatile or ptr %out, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile or ptr %out, i64 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
store i64 %tmp0, ptr %out2
ret void
}
@@ -9143,7 +9143,7 @@ define amdgpu_kernel void @atomic_or_i64_addr64(ptr %out, i64 %in, i64 %index) {
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
- %tmp0 = atomicrmw volatile or ptr %ptr, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile or ptr %ptr, i64 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -9286,7 +9286,7 @@ define amdgpu_kernel void @atomic_or_i64_ret_addr64(ptr %out, ptr %out2, i64 %in
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
- %tmp0 = atomicrmw volatile or ptr %ptr, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile or ptr %ptr, i64 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
store i64 %tmp0, ptr %out2
ret void
}
@@ -10759,7 +10759,7 @@ define amdgpu_kernel void @atomic_xor_i64_offset(ptr %out, i64 %in) {
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr %out, i64 4
- %tmp0 = atomicrmw volatile xor ptr %gep, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile xor ptr %gep, i64 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -10902,7 +10902,7 @@ define amdgpu_kernel void @atomic_xor_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr %out, i64 4
- %tmp0 = atomicrmw volatile xor ptr %gep, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile xor ptr %gep, i64 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
store i64 %tmp0, ptr %out2
ret void
}
@@ -11054,7 +11054,7 @@ define amdgpu_kernel void @atomic_xor_i64_addr64_offset(ptr %out, i64 %in, i64 %
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
%gep = getelementptr i64, ptr %ptr, i64 4
- %tmp0 = atomicrmw volatile xor ptr %gep, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile xor ptr %gep, i64 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -11203,7 +11203,7 @@ define amdgpu_kernel void @atomic_xor_i64_ret_addr64_offset(ptr %out, ptr %out2,
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
%gep = getelementptr i64, ptr %ptr, i64 4
- %tmp0 = atomicrmw volatile xor ptr %gep, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile xor ptr %gep, i64 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
store i64 %tmp0, ptr %out2
ret void
}
@@ -11335,7 +11335,7 @@ define amdgpu_kernel void @atomic_xor_i64(ptr %out, i64 %in) {
; GFX12-NEXT: scratch_store_b64 off, v[0:1], s0
; GFX12-NEXT: s_endpgm
entry:
- %tmp0 = atomicrmw volatile xor ptr %out, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile xor ptr %out, i64 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -11472,7 +11472,7 @@ define amdgpu_kernel void @atomic_xor_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
entry:
- %tmp0 = atomicrmw volatile xor ptr %out, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile xor ptr %out, i64 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
store i64 %tmp0, ptr %out2
ret void
}
@@ -11617,7 +11617,7 @@ define amdgpu_kernel void @atomic_xor_i64_addr64(ptr %out, i64 %in, i64 %index)
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
- %tmp0 = atomicrmw volatile xor ptr %ptr, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile xor ptr %ptr, i64 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -11760,7 +11760,7 @@ define amdgpu_kernel void @atomic_xor_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
- %tmp0 = atomicrmw volatile xor ptr %ptr, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile xor ptr %ptr, i64 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
store i64 %tmp0, ptr %out2
ret void
}
@@ -14107,7 +14107,7 @@ define amdgpu_kernel void @atomic_inc_i64_offset(ptr %out, i64 %in) {
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr %out, i64 4
- %tmp0 = atomicrmw volatile uinc_wrap ptr %gep, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile uinc_wrap ptr %gep, i64 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -14260,7 +14260,7 @@ define amdgpu_kernel void @atomic_inc_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr %out, i64 4
- %tmp0 = atomicrmw volatile uinc_wrap ptr %gep, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile uinc_wrap ptr %gep, i64 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
store i64 %tmp0, ptr %out2
ret void
}
@@ -14422,7 +14422,7 @@ define amdgpu_kernel void @atomic_inc_i64_incr64_offset(ptr %out, i64 %in, i64 %
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
%gep = getelementptr i64, ptr %ptr, i64 4
- %tmp0 = atomicrmw volatile uinc_wrap ptr %gep, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile uinc_wrap ptr %gep, i64 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -14581,7 +14581,7 @@ define amdgpu_kernel void @atomic_inc_i64_ret_incr64_offset(ptr %out, ptr %out2,
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
%gep = getelementptr i64, ptr %ptr, i64 4
- %tmp0 = atomicrmw volatile uinc_wrap ptr %gep, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile uinc_wrap ptr %gep, i64 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
store i64 %tmp0, ptr %out2
ret void
}
@@ -14723,7 +14723,7 @@ define amdgpu_kernel void @atomic_inc_i64(ptr %out, i64 %in) {
; GFX12-NEXT: scratch_store_b64 off, v[0:1], s0
; GFX12-NEXT: s_endpgm
entry:
- %tmp0 = atomicrmw volatile uinc_wrap ptr %out, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile uinc_wrap ptr %out, i64 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -14870,7 +14870,7 @@ define amdgpu_kernel void @atomic_inc_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
entry:
- %tmp0 = atomicrmw volatile uinc_wrap ptr %out, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile uinc_wrap ptr %out, i64 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
store i64 %tmp0, ptr %out2
ret void
}
@@ -15025,7 +15025,7 @@ define amdgpu_kernel void @atomic_inc_i64_incr64(ptr %out, i64 %in, i64 %index)
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
- %tmp0 = atomicrmw volatile uinc_wrap ptr %ptr, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile uinc_wrap ptr %ptr, i64 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -15178,7 +15178,7 @@ define amdgpu_kernel void @atomic_inc_i64_ret_incr64(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
- %tmp0 = atomicrmw volatile uinc_wrap ptr %ptr, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile uinc_wrap ptr %ptr, i64 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
store i64 %tmp0, ptr %out2
ret void
}
@@ -15335,7 +15335,7 @@ define amdgpu_kernel void @atomic_dec_i64_offset(ptr %out, i64 %in) {
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr %out, i64 4
- %tmp0 = atomicrmw volatile udec_wrap ptr %gep, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile udec_wrap ptr %gep, i64 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -15499,7 +15499,7 @@ define amdgpu_kernel void @atomic_dec_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr %out, i64 4
- %tmp0 = atomicrmw volatile udec_wrap ptr %gep, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile udec_wrap ptr %gep, i64 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
store i64 %tmp0, ptr %out2
ret void
}
@@ -15670,7 +15670,7 @@ define amdgpu_kernel void @atomic_dec_i64_decr64_offset(ptr %out, i64 %in, i64 %
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
%gep = getelementptr i64, ptr %ptr, i64 4
- %tmp0 = atomicrmw volatile udec_wrap ptr %gep, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile udec_wrap ptr %gep, i64 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -15840,7 +15840,7 @@ define amdgpu_kernel void @atomic_dec_i64_ret_decr64_offset(ptr %out, ptr %out2,
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
%gep = getelementptr i64, ptr %ptr, i64 4
- %tmp0 = atomicrmw volatile udec_wrap ptr %gep, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile udec_wrap ptr %gep, i64 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
store i64 %tmp0, ptr %out2
ret void
}
@@ -15991,7 +15991,7 @@ define amdgpu_kernel void @atomic_dec_i64(ptr %out, i64 %in) {
; GFX12-NEXT: scratch_store_b64 off, v[0:1], s4
; GFX12-NEXT: s_endpgm
entry:
- %tmp0 = atomicrmw volatile udec_wrap ptr %out, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile udec_wrap ptr %out, i64 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -16149,7 +16149,7 @@ define amdgpu_kernel void @atomic_dec_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
entry:
- %tmp0 = atomicrmw volatile udec_wrap ptr %out, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile udec_wrap ptr %out, i64 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
store i64 %tmp0, ptr %out2
ret void
}
@@ -16313,7 +16313,7 @@ define amdgpu_kernel void @atomic_dec_i64_decr64(ptr %out, i64 %in, i64 %index)
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
- %tmp0 = atomicrmw volatile udec_wrap ptr %ptr, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile udec_wrap ptr %ptr, i64 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -16477,7 +16477,9 @@ define amdgpu_kernel void @atomic_dec_i64_ret_decr64(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
- %tmp0 = atomicrmw volatile udec_wrap ptr %ptr, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile udec_wrap ptr %ptr, i64 %in syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
store i64 %tmp0, ptr %out2
ret void
}
+
+!0 = !{}
diff --git a/llvm/test/CodeGen/AMDGPU/flat_atomics_i64_noprivate.ll b/llvm/test/CodeGen/AMDGPU/flat_atomics_i64_noprivate.ll
index 3c1bc95cc38f6..757649ca592b3 100644
--- a/llvm/test/CodeGen/AMDGPU/flat_atomics_i64_noprivate.ll
+++ b/llvm/test/CodeGen/AMDGPU/flat_atomics_i64_noprivate.ll
@@ -458,13 +458,25 @@ define amdgpu_kernel void @atomic_and_i64_offset(ptr %out, i64 %in) {
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
; GFX7-NEXT: s_add_u32 s0, s0, 32
; GFX7-NEXT: s_addc_u32 s1, s1, 0
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v0, s2
-; GFX7-NEXT: v_mov_b32_e32 v1, s3
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_and_x2 v[2:3], v[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v5, s1
+; GFX7-NEXT: v_mov_b32_e32 v4, s0
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: .LBB8_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v1, s3, v3
+; GFX7-NEXT: v_and_b32_e32 v0, s2, v2
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_mov_b32_e32 v3, v1
+; GFX7-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v2, v0
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7-NEXT: s_cbranch_execnz .LBB8_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_and_i64_offset:
@@ -473,13 +485,25 @@ define amdgpu_kernel void @atomic_and_i64_offset(ptr %out, i64 %in) {
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: s_add_u32 s0, s0, 32
; GFX8-NEXT: s_addc_u32 s1, s1, 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v0, s2
-; GFX8-NEXT: v_mov_b32_e32 v1, s3
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_and_x2 v[2:3], v[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v5, s1
+; GFX8-NEXT: v_mov_b32_e32 v4, s0
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX8-NEXT: s_mov_b64 s[0:1], 0
+; GFX8-NEXT: .LBB8_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_and_b32_e32 v1, s3, v3
+; GFX8-NEXT: v_and_b32_e32 v0, s2, v2
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v3, v1
+; GFX8-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v2, v0
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX8-NEXT: s_cbranch_execnz .LBB8_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_and_i64_offset:
@@ -501,40 +525,66 @@ entry:
define amdgpu_kernel void @atomic_and_i64_ret_offset(ptr %out, ptr %out2, i64 %in) {
; GFX7-LABEL: atomic_and_i64_ret_offset:
; GFX7: ; %bb.0: ; %entry
-; GFX7-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0xd
; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; GFX7-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v0, s6
; GFX7-NEXT: s_add_u32 s0, s0, 32
; GFX7-NEXT: s_addc_u32 s1, s1, 0
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v1, s7
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_and_x2 v[0:1], v[2:3], v[0:1] glc
+; GFX7-NEXT: v_mov_b32_e32 v0, s0
+; GFX7-NEXT: v_mov_b32_e32 v1, s1
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v5, v3
+; GFX7-NEXT: v_mov_b32_e32 v4, v2
+; GFX7-NEXT: v_and_b32_e32 v3, s5, v5
+; GFX7-NEXT: v_and_b32_e32 v2, s4, v4
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5] glc
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
-; GFX7-NEXT: v_mov_b32_e32 v2, s2
-; GFX7-NEXT: v_mov_b32_e32 v3, s3
-; GFX7-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX7-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7-NEXT: s_cbranch_execnz .LBB9_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v0, s2
+; GFX7-NEXT: v_mov_b32_e32 v1, s3
+; GFX7-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_and_i64_ret_offset:
; GFX8: ; %bb.0: ; %entry
-; GFX8-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v0, s6
; GFX8-NEXT: s_add_u32 s0, s0, 32
; GFX8-NEXT: s_addc_u32 s1, s1, 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v1, s7
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_and_x2 v[0:1], v[2:3], v[0:1] glc
+; GFX8-NEXT: v_mov_b32_e32 v0, s0
+; GFX8-NEXT: v_mov_b32_e32 v1, s1
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: s_mov_b64 s[0:1], 0
+; GFX8-NEXT: .LBB9_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v5, v3
+; GFX8-NEXT: v_mov_b32_e32 v4, v2
+; GFX8-NEXT: v_and_b32_e32 v3, s5, v5
+; GFX8-NEXT: v_and_b32_e32 v2, s4, v4
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
-; GFX8-NEXT: v_mov_b32_e32 v2, s2
-; GFX8-NEXT: v_mov_b32_e32 v3, s3
-; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX8-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX8-NEXT: s_cbranch_execnz .LBB9_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v0, s2
+; GFX8-NEXT: v_mov_b32_e32 v1, s3
+; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_and_i64_ret_offset:
@@ -561,40 +611,64 @@ entry:
define amdgpu_kernel void @atomic_and_i64_addr64_offset(ptr %out, i64 %in, i64 %index) {
; GFX7-LABEL: atomic_and_i64_addr64_offset:
; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0xd
; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX7-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v0, s2
-; GFX7-NEXT: v_mov_b32_e32 v1, s3
-; GFX7-NEXT: s_lshl_b64 s[2:3], s[4:5], 3
-; GFX7-NEXT: s_add_u32 s0, s0, s2
-; GFX7-NEXT: s_addc_u32 s1, s1, s3
+; GFX7-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
+; GFX7-NEXT: s_add_u32 s0, s0, s4
+; GFX7-NEXT: s_addc_u32 s1, s1, s5
; GFX7-NEXT: s_add_u32 s0, s0, 32
; GFX7-NEXT: s_addc_u32 s1, s1, 0
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_and_x2 v[2:3], v[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v5, s1
+; GFX7-NEXT: v_mov_b32_e32 v4, s0
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v1, s3, v3
+; GFX7-NEXT: v_and_b32_e32 v0, s2, v2
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_mov_b32_e32 v3, v1
+; GFX7-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v2, v0
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7-NEXT: s_cbranch_execnz .LBB10_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_and_i64_addr64_offset:
; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX8-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v0, s2
-; GFX8-NEXT: v_mov_b32_e32 v1, s3
-; GFX8-NEXT: s_lshl_b64 s[2:3], s[4:5], 3
-; GFX8-NEXT: s_add_u32 s0, s0, s2
-; GFX8-NEXT: s_addc_u32 s1, s1, s3
+; GFX8-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
+; GFX8-NEXT: s_add_u32 s0, s0, s4
+; GFX8-NEXT: s_addc_u32 s1, s1, s5
; GFX8-NEXT: s_add_u32 s0, s0, 32
; GFX8-NEXT: s_addc_u32 s1, s1, 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_and_x2 v[2:3], v[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v5, s1
+; GFX8-NEXT: v_mov_b32_e32 v4, s0
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX8-NEXT: s_mov_b64 s[0:1], 0
+; GFX8-NEXT: .LBB10_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_and_b32_e32 v1, s3, v3
+; GFX8-NEXT: v_and_b32_e32 v0, s2, v2
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v3, v1
+; GFX8-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v2, v0
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX8-NEXT: s_cbranch_execnz .LBB10_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_and_i64_addr64_offset:
@@ -624,42 +698,68 @@ define amdgpu_kernel void @atomic_and_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GFX7: ; %bb.0: ; %entry
; GFX7-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v0, s4
-; GFX7-NEXT: v_mov_b32_e32 v1, s5
-; GFX7-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
-; GFX7-NEXT: s_add_u32 s0, s0, s4
-; GFX7-NEXT: s_addc_u32 s1, s1, s5
+; GFX7-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GFX7-NEXT: s_add_u32 s0, s0, s6
+; GFX7-NEXT: s_addc_u32 s1, s1, s7
; GFX7-NEXT: s_add_u32 s0, s0, 32
; GFX7-NEXT: s_addc_u32 s1, s1, 0
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_and_x2 v[0:1], v[2:3], v[0:1] glc
+; GFX7-NEXT: v_mov_b32_e32 v0, s0
+; GFX7-NEXT: v_mov_b32_e32 v1, s1
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v5, v3
+; GFX7-NEXT: v_mov_b32_e32 v4, v2
+; GFX7-NEXT: v_and_b32_e32 v3, s5, v5
+; GFX7-NEXT: v_and_b32_e32 v2, s4, v4
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5] glc
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
-; GFX7-NEXT: v_mov_b32_e32 v2, s2
-; GFX7-NEXT: v_mov_b32_e32 v3, s3
-; GFX7-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX7-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7-NEXT: s_cbranch_execnz .LBB11_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v0, s2
+; GFX7-NEXT: v_mov_b32_e32 v1, s3
+; GFX7-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_and_i64_ret_addr64_offset:
; GFX8: ; %bb.0: ; %entry
; GFX8-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v0, s4
-; GFX8-NEXT: v_mov_b32_e32 v1, s5
-; GFX8-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
-; GFX8-NEXT: s_add_u32 s0, s0, s4
-; GFX8-NEXT: s_addc_u32 s1, s1, s5
+; GFX8-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GFX8-NEXT: s_add_u32 s0, s0, s6
+; GFX8-NEXT: s_addc_u32 s1, s1, s7
; GFX8-NEXT: s_add_u32 s0, s0, 32
; GFX8-NEXT: s_addc_u32 s1, s1, 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_and_x2 v[0:1], v[2:3], v[0:1] glc
+; GFX8-NEXT: v_mov_b32_e32 v0, s0
+; GFX8-NEXT: v_mov_b32_e32 v1, s1
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: s_mov_b64 s[0:1], 0
+; GFX8-NEXT: .LBB11_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v5, v3
+; GFX8-NEXT: v_mov_b32_e32 v4, v2
+; GFX8-NEXT: v_and_b32_e32 v3, s5, v5
+; GFX8-NEXT: v_and_b32_e32 v2, s4, v4
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
-; GFX8-NEXT: v_mov_b32_e32 v2, s2
-; GFX8-NEXT: v_mov_b32_e32 v3, s3
-; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX8-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX8-NEXT: s_cbranch_execnz .LBB11_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v0, s2
+; GFX8-NEXT: v_mov_b32_e32 v1, s3
+; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_and_i64_ret_addr64_offset:
@@ -689,27 +789,55 @@ define amdgpu_kernel void @atomic_and_i64(ptr %out, i64 %in) {
; GFX7-LABEL: atomic_and_i64:
; GFX7: ; %bb.0: ; %entry
; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; GFX7-NEXT: s_mov_b64 s[4:5], 0
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
; GFX7-NEXT: v_mov_b32_e32 v0, s0
; GFX7-NEXT: v_mov_b32_e32 v1, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s2
-; GFX7-NEXT: v_mov_b32_e32 v3, s3
-; GFX7-NEXT: flat_atomic_and_x2 v[0:1], v[2:3]
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v5, s1
+; GFX7-NEXT: v_mov_b32_e32 v4, s0
+; GFX7-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v1, s3, v3
+; GFX7-NEXT: v_and_b32_e32 v0, s2, v2
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_mov_b32_e32 v3, v1
+; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7-NEXT: v_mov_b32_e32 v2, v0
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX7-NEXT: s_cbranch_execnz .LBB12_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_and_i64:
; GFX8: ; %bb.0: ; %entry
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8-NEXT: s_mov_b64 s[4:5], 0
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v0, s0
; GFX8-NEXT: v_mov_b32_e32 v1, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s2
-; GFX8-NEXT: v_mov_b32_e32 v3, s3
-; GFX8-NEXT: flat_atomic_and_x2 v[0:1], v[2:3]
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v5, s1
+; GFX8-NEXT: v_mov_b32_e32 v4, s0
+; GFX8-NEXT: .LBB12_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_and_b32_e32 v1, s3, v3
+; GFX8-NEXT: v_and_b32_e32 v0, s2, v2
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v3, v1
+; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v2, v0
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX8-NEXT: s_cbranch_execnz .LBB12_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_and_i64:
@@ -732,14 +860,29 @@ define amdgpu_kernel void @atomic_and_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX7: ; %bb.0: ; %entry
; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; GFX7-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
+; GFX7-NEXT: s_mov_b64 s[6:7], 0
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
; GFX7-NEXT: v_mov_b32_e32 v0, s0
; GFX7-NEXT: v_mov_b32_e32 v1, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s4
-; GFX7-NEXT: v_mov_b32_e32 v3, s5
-; GFX7-NEXT: flat_atomic_and_x2 v[0:1], v[0:1], v[2:3] glc
+; GFX7-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v3, s1
+; GFX7-NEXT: v_mov_b32_e32 v2, s0
+; GFX7-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v7, v1
+; GFX7-NEXT: v_mov_b32_e32 v6, v0
+; GFX7-NEXT: v_and_b32_e32 v5, s5, v7
+; GFX7-NEXT: v_and_b32_e32 v4, s4, v6
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[2:3], v[4:7] glc
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[6:7]
+; GFX7-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[6:7]
+; GFX7-NEXT: s_cbranch_execnz .LBB13_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX7-NEXT: v_mov_b32_e32 v2, s2
; GFX7-NEXT: v_mov_b32_e32 v3, s3
; GFX7-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
@@ -749,14 +892,29 @@ define amdgpu_kernel void @atomic_and_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX8: ; %bb.0: ; %entry
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GFX8-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; GFX8-NEXT: s_mov_b64 s[6:7], 0
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v0, s0
; GFX8-NEXT: v_mov_b32_e32 v1, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s4
-; GFX8-NEXT: v_mov_b32_e32 v3, s5
-; GFX8-NEXT: flat_atomic_and_x2 v[0:1], v[0:1], v[2:3] glc
+; GFX8-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v3, s1
+; GFX8-NEXT: v_mov_b32_e32 v2, s0
+; GFX8-NEXT: .LBB13_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v7, v1
+; GFX8-NEXT: v_mov_b32_e32 v6, v0
+; GFX8-NEXT: v_and_b32_e32 v5, s5, v7
+; GFX8-NEXT: v_and_b32_e32 v4, s4, v6
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[2:3], v[4:7] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[6:7]
+; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7]
+; GFX8-NEXT: s_cbranch_execnz .LBB13_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX8-NEXT: v_mov_b32_e32 v2, s2
; GFX8-NEXT: v_mov_b32_e32 v3, s3
; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
@@ -785,36 +943,60 @@ entry:
define amdgpu_kernel void @atomic_and_i64_addr64(ptr %out, i64 %in, i64 %index) {
; GFX7-LABEL: atomic_and_i64_addr64:
; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0xd
; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX7-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v0, s2
-; GFX7-NEXT: v_mov_b32_e32 v1, s3
-; GFX7-NEXT: s_lshl_b64 s[2:3], s[4:5], 3
-; GFX7-NEXT: s_add_u32 s0, s0, s2
-; GFX7-NEXT: s_addc_u32 s1, s1, s3
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_and_x2 v[2:3], v[0:1]
+; GFX7-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
+; GFX7-NEXT: s_add_u32 s0, s0, s4
+; GFX7-NEXT: s_addc_u32 s1, s1, s5
+; GFX7-NEXT: v_mov_b32_e32 v5, s1
+; GFX7-NEXT: v_mov_b32_e32 v4, s0
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v1, s3, v3
+; GFX7-NEXT: v_and_b32_e32 v0, s2, v2
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_mov_b32_e32 v3, v1
+; GFX7-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v2, v0
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7-NEXT: s_cbranch_execnz .LBB14_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_and_i64_addr64:
; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX8-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v0, s2
-; GFX8-NEXT: v_mov_b32_e32 v1, s3
-; GFX8-NEXT: s_lshl_b64 s[2:3], s[4:5], 3
-; GFX8-NEXT: s_add_u32 s0, s0, s2
-; GFX8-NEXT: s_addc_u32 s1, s1, s3
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_and_x2 v[2:3], v[0:1]
+; GFX8-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
+; GFX8-NEXT: s_add_u32 s0, s0, s4
+; GFX8-NEXT: s_addc_u32 s1, s1, s5
+; GFX8-NEXT: v_mov_b32_e32 v5, s1
+; GFX8-NEXT: v_mov_b32_e32 v4, s0
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX8-NEXT: s_mov_b64 s[0:1], 0
+; GFX8-NEXT: .LBB14_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_and_b32_e32 v1, s3, v3
+; GFX8-NEXT: v_and_b32_e32 v0, s2, v2
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v3, v1
+; GFX8-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v2, v0
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX8-NEXT: s_cbranch_execnz .LBB14_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_and_i64_addr64:
@@ -843,38 +1025,64 @@ define amdgpu_kernel void @atomic_and_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GFX7: ; %bb.0: ; %entry
; GFX7-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v0, s4
-; GFX7-NEXT: v_mov_b32_e32 v1, s5
-; GFX7-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
-; GFX7-NEXT: s_add_u32 s0, s0, s4
-; GFX7-NEXT: s_addc_u32 s1, s1, s5
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_and_x2 v[0:1], v[2:3], v[0:1] glc
+; GFX7-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GFX7-NEXT: s_add_u32 s0, s0, s6
+; GFX7-NEXT: s_addc_u32 s1, s1, s7
+; GFX7-NEXT: v_mov_b32_e32 v0, s0
+; GFX7-NEXT: v_mov_b32_e32 v1, s1
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v5, v3
+; GFX7-NEXT: v_mov_b32_e32 v4, v2
+; GFX7-NEXT: v_and_b32_e32 v3, s5, v5
+; GFX7-NEXT: v_and_b32_e32 v2, s4, v4
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5] glc
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
-; GFX7-NEXT: v_mov_b32_e32 v2, s2
-; GFX7-NEXT: v_mov_b32_e32 v3, s3
-; GFX7-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX7-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7-NEXT: s_cbranch_execnz .LBB15_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v0, s2
+; GFX7-NEXT: v_mov_b32_e32 v1, s3
+; GFX7-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_and_i64_ret_addr64:
; GFX8: ; %bb.0: ; %entry
; GFX8-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v0, s4
-; GFX8-NEXT: v_mov_b32_e32 v1, s5
-; GFX8-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
-; GFX8-NEXT: s_add_u32 s0, s0, s4
-; GFX8-NEXT: s_addc_u32 s1, s1, s5
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_and_x2 v[0:1], v[2:3], v[0:1] glc
+; GFX8-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GFX8-NEXT: s_add_u32 s0, s0, s6
+; GFX8-NEXT: s_addc_u32 s1, s1, s7
+; GFX8-NEXT: v_mov_b32_e32 v0, s0
+; GFX8-NEXT: v_mov_b32_e32 v1, s1
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: s_mov_b64 s[0:1], 0
+; GFX8-NEXT: .LBB15_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v5, v3
+; GFX8-NEXT: v_mov_b32_e32 v4, v2
+; GFX8-NEXT: v_and_b32_e32 v3, s5, v5
+; GFX8-NEXT: v_and_b32_e32 v2, s4, v4
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
-; GFX8-NEXT: v_mov_b32_e32 v2, s2
-; GFX8-NEXT: v_mov_b32_e32 v3, s3
-; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX8-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX8-NEXT: s_cbranch_execnz .LBB15_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v0, s2
+; GFX8-NEXT: v_mov_b32_e32 v1, s3
+; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_and_i64_ret_addr64:
@@ -906,13 +1114,26 @@ define amdgpu_kernel void @atomic_sub_i64_offset(ptr %out, i64 %in) {
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
; GFX7-NEXT: s_add_u32 s0, s0, 32
; GFX7-NEXT: s_addc_u32 s1, s1, 0
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v0, s2
-; GFX7-NEXT: v_mov_b32_e32 v1, s3
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_sub_x2 v[2:3], v[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v5, s1
+; GFX7-NEXT: v_mov_b32_e32 v4, s0
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: v_mov_b32_e32 v6, s3
+; GFX7-NEXT: .LBB16_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_subrev_i32_e32 v0, vcc, s2, v2
+; GFX7-NEXT: v_subb_u32_e32 v1, vcc, v3, v6, vcc
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_mov_b32_e32 v3, v1
+; GFX7-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v2, v0
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7-NEXT: s_cbranch_execnz .LBB16_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_sub_i64_offset:
@@ -921,13 +1142,26 @@ define amdgpu_kernel void @atomic_sub_i64_offset(ptr %out, i64 %in) {
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: s_add_u32 s0, s0, 32
; GFX8-NEXT: s_addc_u32 s1, s1, 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v0, s2
-; GFX8-NEXT: v_mov_b32_e32 v1, s3
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_sub_x2 v[2:3], v[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v5, s1
+; GFX8-NEXT: v_mov_b32_e32 v4, s0
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX8-NEXT: s_mov_b64 s[0:1], 0
+; GFX8-NEXT: v_mov_b32_e32 v6, s3
+; GFX8-NEXT: .LBB16_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_subrev_u32_e32 v0, vcc, s2, v2
+; GFX8-NEXT: v_subb_u32_e32 v1, vcc, v3, v6, vcc
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v3, v1
+; GFX8-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v2, v0
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX8-NEXT: s_cbranch_execnz .LBB16_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_sub_i64_offset:
@@ -949,40 +1183,68 @@ entry:
define amdgpu_kernel void @atomic_sub_i64_ret_offset(ptr %out, ptr %out2, i64 %in) {
; GFX7-LABEL: atomic_sub_i64_ret_offset:
; GFX7: ; %bb.0: ; %entry
-; GFX7-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0xd
; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; GFX7-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v0, s6
; GFX7-NEXT: s_add_u32 s0, s0, 32
; GFX7-NEXT: s_addc_u32 s1, s1, 0
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v1, s7
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_sub_x2 v[0:1], v[2:3], v[0:1] glc
+; GFX7-NEXT: v_mov_b32_e32 v0, s0
+; GFX7-NEXT: v_mov_b32_e32 v1, s1
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: v_mov_b32_e32 v4, s5
+; GFX7-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v8, v3
+; GFX7-NEXT: v_mov_b32_e32 v7, v2
+; GFX7-NEXT: v_subrev_i32_e32 v5, vcc, s4, v7
+; GFX7-NEXT: v_subb_u32_e32 v6, vcc, v8, v4, vcc
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[5:8] glc
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
-; GFX7-NEXT: v_mov_b32_e32 v2, s2
-; GFX7-NEXT: v_mov_b32_e32 v3, s3
-; GFX7-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[7:8]
+; GFX7-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7-NEXT: s_cbranch_execnz .LBB17_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v0, s2
+; GFX7-NEXT: v_mov_b32_e32 v1, s3
+; GFX7-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_sub_i64_ret_offset:
; GFX8: ; %bb.0: ; %entry
-; GFX8-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v0, s6
; GFX8-NEXT: s_add_u32 s0, s0, 32
; GFX8-NEXT: s_addc_u32 s1, s1, 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v1, s7
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_sub_x2 v[0:1], v[2:3], v[0:1] glc
+; GFX8-NEXT: v_mov_b32_e32 v0, s0
+; GFX8-NEXT: v_mov_b32_e32 v1, s1
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: s_mov_b64 s[0:1], 0
+; GFX8-NEXT: v_mov_b32_e32 v4, s5
+; GFX8-NEXT: .LBB17_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v8, v3
+; GFX8-NEXT: v_mov_b32_e32 v7, v2
+; GFX8-NEXT: v_subrev_u32_e32 v5, vcc, s4, v7
+; GFX8-NEXT: v_subb_u32_e32 v6, vcc, v8, v4, vcc
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[5:8] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
-; GFX8-NEXT: v_mov_b32_e32 v2, s2
-; GFX8-NEXT: v_mov_b32_e32 v3, s3
-; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[7:8]
+; GFX8-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX8-NEXT: s_cbranch_execnz .LBB17_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v0, s2
+; GFX8-NEXT: v_mov_b32_e32 v1, s3
+; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_sub_i64_ret_offset:
@@ -1009,40 +1271,66 @@ entry:
define amdgpu_kernel void @atomic_sub_i64_addr64_offset(ptr %out, i64 %in, i64 %index) {
; GFX7-LABEL: atomic_sub_i64_addr64_offset:
; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0xd
; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX7-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v0, s2
-; GFX7-NEXT: v_mov_b32_e32 v1, s3
-; GFX7-NEXT: s_lshl_b64 s[2:3], s[4:5], 3
-; GFX7-NEXT: s_add_u32 s0, s0, s2
-; GFX7-NEXT: s_addc_u32 s1, s1, s3
+; GFX7-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
+; GFX7-NEXT: s_add_u32 s0, s0, s4
+; GFX7-NEXT: s_addc_u32 s1, s1, s5
; GFX7-NEXT: s_add_u32 s0, s0, 32
; GFX7-NEXT: s_addc_u32 s1, s1, 0
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_sub_x2 v[2:3], v[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v5, s1
+; GFX7-NEXT: v_mov_b32_e32 v4, s0
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: v_mov_b32_e32 v6, s3
+; GFX7-NEXT: .LBB18_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_subrev_i32_e32 v0, vcc, s2, v2
+; GFX7-NEXT: v_subb_u32_e32 v1, vcc, v3, v6, vcc
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_mov_b32_e32 v3, v1
+; GFX7-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v2, v0
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7-NEXT: s_cbranch_execnz .LBB18_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_sub_i64_addr64_offset:
; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX8-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v0, s2
-; GFX8-NEXT: v_mov_b32_e32 v1, s3
-; GFX8-NEXT: s_lshl_b64 s[2:3], s[4:5], 3
-; GFX8-NEXT: s_add_u32 s0, s0, s2
-; GFX8-NEXT: s_addc_u32 s1, s1, s3
+; GFX8-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
+; GFX8-NEXT: s_add_u32 s0, s0, s4
+; GFX8-NEXT: s_addc_u32 s1, s1, s5
; GFX8-NEXT: s_add_u32 s0, s0, 32
; GFX8-NEXT: s_addc_u32 s1, s1, 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_sub_x2 v[2:3], v[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v5, s1
+; GFX8-NEXT: v_mov_b32_e32 v4, s0
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX8-NEXT: s_mov_b64 s[0:1], 0
+; GFX8-NEXT: v_mov_b32_e32 v6, s3
+; GFX8-NEXT: .LBB18_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_subrev_u32_e32 v0, vcc, s2, v2
+; GFX8-NEXT: v_subb_u32_e32 v1, vcc, v3, v6, vcc
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v3, v1
+; GFX8-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v2, v0
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX8-NEXT: s_cbranch_execnz .LBB18_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_sub_i64_addr64_offset:
@@ -1072,42 +1360,70 @@ define amdgpu_kernel void @atomic_sub_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GFX7: ; %bb.0: ; %entry
; GFX7-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v0, s4
-; GFX7-NEXT: v_mov_b32_e32 v1, s5
-; GFX7-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
-; GFX7-NEXT: s_add_u32 s0, s0, s4
-; GFX7-NEXT: s_addc_u32 s1, s1, s5
+; GFX7-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GFX7-NEXT: s_add_u32 s0, s0, s6
+; GFX7-NEXT: s_addc_u32 s1, s1, s7
; GFX7-NEXT: s_add_u32 s0, s0, 32
; GFX7-NEXT: s_addc_u32 s1, s1, 0
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_sub_x2 v[0:1], v[2:3], v[0:1] glc
+; GFX7-NEXT: v_mov_b32_e32 v0, s0
+; GFX7-NEXT: v_mov_b32_e32 v1, s1
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: v_mov_b32_e32 v4, s5
+; GFX7-NEXT: .LBB19_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v8, v3
+; GFX7-NEXT: v_mov_b32_e32 v7, v2
+; GFX7-NEXT: v_subrev_i32_e32 v5, vcc, s4, v7
+; GFX7-NEXT: v_subb_u32_e32 v6, vcc, v8, v4, vcc
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[5:8] glc
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
-; GFX7-NEXT: v_mov_b32_e32 v2, s2
-; GFX7-NEXT: v_mov_b32_e32 v3, s3
-; GFX7-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[7:8]
+; GFX7-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7-NEXT: s_cbranch_execnz .LBB19_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v0, s2
+; GFX7-NEXT: v_mov_b32_e32 v1, s3
+; GFX7-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_sub_i64_ret_addr64_offset:
; GFX8: ; %bb.0: ; %entry
; GFX8-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v0, s4
-; GFX8-NEXT: v_mov_b32_e32 v1, s5
-; GFX8-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
-; GFX8-NEXT: s_add_u32 s0, s0, s4
-; GFX8-NEXT: s_addc_u32 s1, s1, s5
+; GFX8-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GFX8-NEXT: s_add_u32 s0, s0, s6
+; GFX8-NEXT: s_addc_u32 s1, s1, s7
; GFX8-NEXT: s_add_u32 s0, s0, 32
; GFX8-NEXT: s_addc_u32 s1, s1, 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_sub_x2 v[0:1], v[2:3], v[0:1] glc
+; GFX8-NEXT: v_mov_b32_e32 v0, s0
+; GFX8-NEXT: v_mov_b32_e32 v1, s1
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: s_mov_b64 s[0:1], 0
+; GFX8-NEXT: v_mov_b32_e32 v4, s5
+; GFX8-NEXT: .LBB19_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v8, v3
+; GFX8-NEXT: v_mov_b32_e32 v7, v2
+; GFX8-NEXT: v_subrev_u32_e32 v5, vcc, s4, v7
+; GFX8-NEXT: v_subb_u32_e32 v6, vcc, v8, v4, vcc
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[5:8] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
-; GFX8-NEXT: v_mov_b32_e32 v2, s2
-; GFX8-NEXT: v_mov_b32_e32 v3, s3
-; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[7:8]
+; GFX8-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX8-NEXT: s_cbranch_execnz .LBB19_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v0, s2
+; GFX8-NEXT: v_mov_b32_e32 v1, s3
+; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_sub_i64_ret_addr64_offset:
@@ -1137,27 +1453,57 @@ define amdgpu_kernel void @atomic_sub_i64(ptr %out, i64 %in) {
; GFX7-LABEL: atomic_sub_i64:
; GFX7: ; %bb.0: ; %entry
; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; GFX7-NEXT: s_mov_b64 s[4:5], 0
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
; GFX7-NEXT: v_mov_b32_e32 v0, s0
; GFX7-NEXT: v_mov_b32_e32 v1, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s2
-; GFX7-NEXT: v_mov_b32_e32 v3, s3
-; GFX7-NEXT: flat_atomic_sub_x2 v[0:1], v[2:3]
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v5, s1
+; GFX7-NEXT: v_mov_b32_e32 v6, s3
+; GFX7-NEXT: v_mov_b32_e32 v4, s0
+; GFX7-NEXT: .LBB20_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_subrev_i32_e32 v0, vcc, s2, v2
+; GFX7-NEXT: v_subb_u32_e32 v1, vcc, v3, v6, vcc
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_mov_b32_e32 v3, v1
+; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7-NEXT: v_mov_b32_e32 v2, v0
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX7-NEXT: s_cbranch_execnz .LBB20_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_sub_i64:
; GFX8: ; %bb.0: ; %entry
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8-NEXT: s_mov_b64 s[4:5], 0
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v0, s0
; GFX8-NEXT: v_mov_b32_e32 v1, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s2
-; GFX8-NEXT: v_mov_b32_e32 v3, s3
-; GFX8-NEXT: flat_atomic_sub_x2 v[0:1], v[2:3]
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v5, s1
+; GFX8-NEXT: v_mov_b32_e32 v6, s3
+; GFX8-NEXT: v_mov_b32_e32 v4, s0
+; GFX8-NEXT: .LBB20_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_subrev_u32_e32 v0, vcc, s2, v2
+; GFX8-NEXT: v_subb_u32_e32 v1, vcc, v3, v6, vcc
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v3, v1
+; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v2, v0
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX8-NEXT: s_cbranch_execnz .LBB20_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_sub_i64:
@@ -1180,14 +1526,30 @@ define amdgpu_kernel void @atomic_sub_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX7: ; %bb.0: ; %entry
; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; GFX7-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
+; GFX7-NEXT: s_mov_b64 s[6:7], 0
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
; GFX7-NEXT: v_mov_b32_e32 v0, s0
; GFX7-NEXT: v_mov_b32_e32 v1, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s4
-; GFX7-NEXT: v_mov_b32_e32 v3, s5
-; GFX7-NEXT: flat_atomic_sub_x2 v[0:1], v[0:1], v[2:3] glc
+; GFX7-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v3, s1
+; GFX7-NEXT: v_mov_b32_e32 v4, s5
+; GFX7-NEXT: v_mov_b32_e32 v2, s0
+; GFX7-NEXT: .LBB21_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v8, v1
+; GFX7-NEXT: v_mov_b32_e32 v7, v0
+; GFX7-NEXT: v_subrev_i32_e32 v5, vcc, s4, v7
+; GFX7-NEXT: v_subb_u32_e32 v6, vcc, v8, v4, vcc
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[2:3], v[5:8] glc
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[7:8]
+; GFX7-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[6:7]
+; GFX7-NEXT: s_cbranch_execnz .LBB21_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX7-NEXT: v_mov_b32_e32 v2, s2
; GFX7-NEXT: v_mov_b32_e32 v3, s3
; GFX7-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
@@ -1197,14 +1559,30 @@ define amdgpu_kernel void @atomic_sub_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX8: ; %bb.0: ; %entry
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GFX8-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; GFX8-NEXT: s_mov_b64 s[6:7], 0
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v0, s0
; GFX8-NEXT: v_mov_b32_e32 v1, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s4
-; GFX8-NEXT: v_mov_b32_e32 v3, s5
-; GFX8-NEXT: flat_atomic_sub_x2 v[0:1], v[0:1], v[2:3] glc
+; GFX8-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v3, s1
+; GFX8-NEXT: v_mov_b32_e32 v4, s5
+; GFX8-NEXT: v_mov_b32_e32 v2, s0
+; GFX8-NEXT: .LBB21_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v8, v1
+; GFX8-NEXT: v_mov_b32_e32 v7, v0
+; GFX8-NEXT: v_subrev_u32_e32 v5, vcc, s4, v7
+; GFX8-NEXT: v_subb_u32_e32 v6, vcc, v8, v4, vcc
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[2:3], v[5:8] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[7:8]
+; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7]
+; GFX8-NEXT: s_cbranch_execnz .LBB21_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX8-NEXT: v_mov_b32_e32 v2, s2
; GFX8-NEXT: v_mov_b32_e32 v3, s3
; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
@@ -1233,36 +1611,62 @@ entry:
define amdgpu_kernel void @atomic_sub_i64_addr64(ptr %out, i64 %in, i64 %index) {
; GFX7-LABEL: atomic_sub_i64_addr64:
; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0xd
; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX7-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v0, s2
-; GFX7-NEXT: v_mov_b32_e32 v1, s3
-; GFX7-NEXT: s_lshl_b64 s[2:3], s[4:5], 3
-; GFX7-NEXT: s_add_u32 s0, s0, s2
-; GFX7-NEXT: s_addc_u32 s1, s1, s3
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_sub_x2 v[2:3], v[0:1]
+; GFX7-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
+; GFX7-NEXT: s_add_u32 s0, s0, s4
+; GFX7-NEXT: s_addc_u32 s1, s1, s5
+; GFX7-NEXT: v_mov_b32_e32 v5, s1
+; GFX7-NEXT: v_mov_b32_e32 v4, s0
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: v_mov_b32_e32 v6, s3
+; GFX7-NEXT: .LBB22_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_subrev_i32_e32 v0, vcc, s2, v2
+; GFX7-NEXT: v_subb_u32_e32 v1, vcc, v3, v6, vcc
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_mov_b32_e32 v3, v1
+; GFX7-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v2, v0
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7-NEXT: s_cbranch_execnz .LBB22_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_sub_i64_addr64:
; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX8-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v0, s2
-; GFX8-NEXT: v_mov_b32_e32 v1, s3
-; GFX8-NEXT: s_lshl_b64 s[2:3], s[4:5], 3
-; GFX8-NEXT: s_add_u32 s0, s0, s2
-; GFX8-NEXT: s_addc_u32 s1, s1, s3
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_sub_x2 v[2:3], v[0:1]
+; GFX8-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
+; GFX8-NEXT: s_add_u32 s0, s0, s4
+; GFX8-NEXT: s_addc_u32 s1, s1, s5
+; GFX8-NEXT: v_mov_b32_e32 v5, s1
+; GFX8-NEXT: v_mov_b32_e32 v4, s0
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX8-NEXT: s_mov_b64 s[0:1], 0
+; GFX8-NEXT: v_mov_b32_e32 v6, s3
+; GFX8-NEXT: .LBB22_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_subrev_u32_e32 v0, vcc, s2, v2
+; GFX8-NEXT: v_subb_u32_e32 v1, vcc, v3, v6, vcc
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v3, v1
+; GFX8-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v2, v0
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX8-NEXT: s_cbranch_execnz .LBB22_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_sub_i64_addr64:
@@ -1291,38 +1695,66 @@ define amdgpu_kernel void @atomic_sub_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GFX7: ; %bb.0: ; %entry
; GFX7-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v0, s4
-; GFX7-NEXT: v_mov_b32_e32 v1, s5
-; GFX7-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
-; GFX7-NEXT: s_add_u32 s0, s0, s4
-; GFX7-NEXT: s_addc_u32 s1, s1, s5
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_sub_x2 v[0:1], v[2:3], v[0:1] glc
+; GFX7-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GFX7-NEXT: s_add_u32 s0, s0, s6
+; GFX7-NEXT: s_addc_u32 s1, s1, s7
+; GFX7-NEXT: v_mov_b32_e32 v0, s0
+; GFX7-NEXT: v_mov_b32_e32 v1, s1
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: v_mov_b32_e32 v4, s5
+; GFX7-NEXT: .LBB23_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v8, v3
+; GFX7-NEXT: v_mov_b32_e32 v7, v2
+; GFX7-NEXT: v_subrev_i32_e32 v5, vcc, s4, v7
+; GFX7-NEXT: v_subb_u32_e32 v6, vcc, v8, v4, vcc
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[5:8] glc
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
-; GFX7-NEXT: v_mov_b32_e32 v2, s2
-; GFX7-NEXT: v_mov_b32_e32 v3, s3
-; GFX7-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[7:8]
+; GFX7-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7-NEXT: s_cbranch_execnz .LBB23_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v0, s2
+; GFX7-NEXT: v_mov_b32_e32 v1, s3
+; GFX7-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_sub_i64_ret_addr64:
; GFX8: ; %bb.0: ; %entry
; GFX8-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v0, s4
-; GFX8-NEXT: v_mov_b32_e32 v1, s5
-; GFX8-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
-; GFX8-NEXT: s_add_u32 s0, s0, s4
-; GFX8-NEXT: s_addc_u32 s1, s1, s5
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_sub_x2 v[0:1], v[2:3], v[0:1] glc
+; GFX8-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GFX8-NEXT: s_add_u32 s0, s0, s6
+; GFX8-NEXT: s_addc_u32 s1, s1, s7
+; GFX8-NEXT: v_mov_b32_e32 v0, s0
+; GFX8-NEXT: v_mov_b32_e32 v1, s1
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: s_mov_b64 s[0:1], 0
+; GFX8-NEXT: v_mov_b32_e32 v4, s5
+; GFX8-NEXT: .LBB23_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v8, v3
+; GFX8-NEXT: v_mov_b32_e32 v7, v2
+; GFX8-NEXT: v_subrev_u32_e32 v5, vcc, s4, v7
+; GFX8-NEXT: v_subb_u32_e32 v6, vcc, v8, v4, vcc
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[5:8] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
-; GFX8-NEXT: v_mov_b32_e32 v2, s2
-; GFX8-NEXT: v_mov_b32_e32 v3, s3
-; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[7:8]
+; GFX8-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX8-NEXT: s_cbranch_execnz .LBB23_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v0, s2
+; GFX8-NEXT: v_mov_b32_e32 v1, s3
+; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_sub_i64_ret_addr64:
@@ -1354,12 +1786,27 @@ define amdgpu_kernel void @atomic_max_i64_offset(ptr %out, i64 %in) {
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
; GFX7-NEXT: s_add_u32 s0, s0, 32
; GFX7-NEXT: s_addc_u32 s1, s1, 0
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v0, s2
-; GFX7-NEXT: v_mov_b32_e32 v1, s3
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_smax_x2 v[2:3], v[0:1]
-; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v5, s1
+; GFX7-NEXT: v_mov_b32_e32 v4, s0
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: v_mov_b32_e32 v6, s3
+; GFX7-NEXT: v_mov_b32_e32 v7, s2
+; GFX7-NEXT: .LBB24_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3]
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v6, v3, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v7, v2, vcc
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_mov_b32_e32 v3, v1
+; GFX7-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v2, v0
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7-NEXT: s_cbranch_execnz .LBB24_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_max_i64_offset:
@@ -1368,12 +1815,27 @@ define amdgpu_kernel void @atomic_max_i64_offset(ptr %out, i64 %in) {
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: s_add_u32 s0, s0, 32
; GFX8-NEXT: s_addc_u32 s1, s1, 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v0, s2
-; GFX8-NEXT: v_mov_b32_e32 v1, s3
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_smax_x2 v[2:3], v[0:1]
-; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v5, s1
+; GFX8-NEXT: v_mov_b32_e32 v4, s0
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX8-NEXT: s_mov_b64 s[0:1], 0
+; GFX8-NEXT: v_mov_b32_e32 v6, s3
+; GFX8-NEXT: v_mov_b32_e32 v7, s2
+; GFX8-NEXT: .LBB24_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3]
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v6, v3, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v7, v2, vcc
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v3, v1
+; GFX8-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v2, v0
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX8-NEXT: s_cbranch_execnz .LBB24_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_max_i64_offset:
@@ -1395,40 +1857,70 @@ entry:
define amdgpu_kernel void @atomic_max_i64_ret_offset(ptr %out, ptr %out2, i64 %in) {
; GFX7-LABEL: atomic_max_i64_ret_offset:
; GFX7: ; %bb.0: ; %entry
-; GFX7-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0xd
; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; GFX7-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v0, s6
; GFX7-NEXT: s_add_u32 s0, s0, 32
; GFX7-NEXT: s_addc_u32 s1, s1, 0
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v1, s7
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_smax_x2 v[0:1], v[2:3], v[0:1] glc
-; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v2, s2
-; GFX7-NEXT: v_mov_b32_e32 v3, s3
-; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v0, s0
+; GFX7-NEXT: v_mov_b32_e32 v1, s1
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: v_mov_b32_e32 v4, s5
+; GFX7-NEXT: v_mov_b32_e32 v5, s4
+; GFX7-NEXT: .LBB25_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v9, v3
+; GFX7-NEXT: v_mov_b32_e32 v8, v2
+; GFX7-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[8:9]
+; GFX7-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[6:9] glc
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
+; GFX7-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7-NEXT: s_cbranch_execnz .LBB25_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v0, s2
+; GFX7-NEXT: v_mov_b32_e32 v1, s3
+; GFX7-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_max_i64_ret_offset:
; GFX8: ; %bb.0: ; %entry
-; GFX8-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v0, s6
; GFX8-NEXT: s_add_u32 s0, s0, 32
; GFX8-NEXT: s_addc_u32 s1, s1, 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v1, s7
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_smax_x2 v[0:1], v[2:3], v[0:1] glc
-; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v2, s2
-; GFX8-NEXT: v_mov_b32_e32 v3, s3
-; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v0, s0
+; GFX8-NEXT: v_mov_b32_e32 v1, s1
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: s_mov_b64 s[0:1], 0
+; GFX8-NEXT: v_mov_b32_e32 v4, s5
+; GFX8-NEXT: v_mov_b32_e32 v5, s4
+; GFX8-NEXT: .LBB25_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v9, v3
+; GFX8-NEXT: v_mov_b32_e32 v8, v2
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[8:9]
+; GFX8-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[6:9] glc
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
+; GFX8-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX8-NEXT: s_cbranch_execnz .LBB25_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v0, s2
+; GFX8-NEXT: v_mov_b32_e32 v1, s3
+; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_max_i64_ret_offset:
@@ -1455,38 +1947,68 @@ entry:
define amdgpu_kernel void @atomic_max_i64_addr64_offset(ptr %out, i64 %in, i64 %index) {
; GFX7-LABEL: atomic_max_i64_addr64_offset:
; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0xd
; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX7-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v0, s2
-; GFX7-NEXT: v_mov_b32_e32 v1, s3
-; GFX7-NEXT: s_lshl_b64 s[2:3], s[4:5], 3
-; GFX7-NEXT: s_add_u32 s0, s0, s2
-; GFX7-NEXT: s_addc_u32 s1, s1, s3
+; GFX7-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
+; GFX7-NEXT: s_add_u32 s0, s0, s4
+; GFX7-NEXT: s_addc_u32 s1, s1, s5
; GFX7-NEXT: s_add_u32 s0, s0, 32
; GFX7-NEXT: s_addc_u32 s1, s1, 0
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_smax_x2 v[2:3], v[0:1]
-; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v5, s1
+; GFX7-NEXT: v_mov_b32_e32 v4, s0
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: v_mov_b32_e32 v6, s3
+; GFX7-NEXT: v_mov_b32_e32 v7, s2
+; GFX7-NEXT: .LBB26_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3]
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v6, v3, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v7, v2, vcc
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_mov_b32_e32 v3, v1
+; GFX7-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v2, v0
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7-NEXT: s_cbranch_execnz .LBB26_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_max_i64_addr64_offset:
; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX8-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v0, s2
-; GFX8-NEXT: v_mov_b32_e32 v1, s3
-; GFX8-NEXT: s_lshl_b64 s[2:3], s[4:5], 3
-; GFX8-NEXT: s_add_u32 s0, s0, s2
-; GFX8-NEXT: s_addc_u32 s1, s1, s3
+; GFX8-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
+; GFX8-NEXT: s_add_u32 s0, s0, s4
+; GFX8-NEXT: s_addc_u32 s1, s1, s5
; GFX8-NEXT: s_add_u32 s0, s0, 32
; GFX8-NEXT: s_addc_u32 s1, s1, 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_smax_x2 v[2:3], v[0:1]
-; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v5, s1
+; GFX8-NEXT: v_mov_b32_e32 v4, s0
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX8-NEXT: s_mov_b64 s[0:1], 0
+; GFX8-NEXT: v_mov_b32_e32 v6, s3
+; GFX8-NEXT: v_mov_b32_e32 v7, s2
+; GFX8-NEXT: .LBB26_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3]
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v6, v3, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v7, v2, vcc
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v3, v1
+; GFX8-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v2, v0
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX8-NEXT: s_cbranch_execnz .LBB26_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_max_i64_addr64_offset:
@@ -1516,42 +2038,72 @@ define amdgpu_kernel void @atomic_max_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GFX7: ; %bb.0: ; %entry
; GFX7-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v0, s4
-; GFX7-NEXT: v_mov_b32_e32 v1, s5
-; GFX7-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
-; GFX7-NEXT: s_add_u32 s0, s0, s4
-; GFX7-NEXT: s_addc_u32 s1, s1, s5
+; GFX7-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GFX7-NEXT: s_add_u32 s0, s0, s6
+; GFX7-NEXT: s_addc_u32 s1, s1, s7
; GFX7-NEXT: s_add_u32 s0, s0, 32
; GFX7-NEXT: s_addc_u32 s1, s1, 0
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_smax_x2 v[0:1], v[2:3], v[0:1] glc
-; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v2, s2
-; GFX7-NEXT: v_mov_b32_e32 v3, s3
-; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v0, s0
+; GFX7-NEXT: v_mov_b32_e32 v1, s1
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: v_mov_b32_e32 v4, s5
+; GFX7-NEXT: v_mov_b32_e32 v5, s4
+; GFX7-NEXT: .LBB27_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v9, v3
+; GFX7-NEXT: v_mov_b32_e32 v8, v2
+; GFX7-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[8:9]
+; GFX7-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[6:9] glc
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
+; GFX7-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7-NEXT: s_cbranch_execnz .LBB27_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v0, s2
+; GFX7-NEXT: v_mov_b32_e32 v1, s3
+; GFX7-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_max_i64_ret_addr64_offset:
; GFX8: ; %bb.0: ; %entry
; GFX8-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v0, s4
-; GFX8-NEXT: v_mov_b32_e32 v1, s5
-; GFX8-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
-; GFX8-NEXT: s_add_u32 s0, s0, s4
-; GFX8-NEXT: s_addc_u32 s1, s1, s5
+; GFX8-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GFX8-NEXT: s_add_u32 s0, s0, s6
+; GFX8-NEXT: s_addc_u32 s1, s1, s7
; GFX8-NEXT: s_add_u32 s0, s0, 32
; GFX8-NEXT: s_addc_u32 s1, s1, 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_smax_x2 v[0:1], v[2:3], v[0:1] glc
-; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v2, s2
-; GFX8-NEXT: v_mov_b32_e32 v3, s3
-; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v0, s0
+; GFX8-NEXT: v_mov_b32_e32 v1, s1
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: s_mov_b64 s[0:1], 0
+; GFX8-NEXT: v_mov_b32_e32 v4, s5
+; GFX8-NEXT: v_mov_b32_e32 v5, s4
+; GFX8-NEXT: .LBB27_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v9, v3
+; GFX8-NEXT: v_mov_b32_e32 v8, v2
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[8:9]
+; GFX8-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[6:9] glc
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
+; GFX8-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX8-NEXT: s_cbranch_execnz .LBB27_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v0, s2
+; GFX8-NEXT: v_mov_b32_e32 v1, s3
+; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_max_i64_ret_addr64_offset:
@@ -1581,25 +2133,59 @@ define amdgpu_kernel void @atomic_max_i64(ptr %out, i64 %in) {
; GFX7-LABEL: atomic_max_i64:
; GFX7: ; %bb.0: ; %entry
; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; GFX7-NEXT: s_mov_b64 s[4:5], 0
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
; GFX7-NEXT: v_mov_b32_e32 v0, s0
; GFX7-NEXT: v_mov_b32_e32 v1, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s2
-; GFX7-NEXT: v_mov_b32_e32 v3, s3
-; GFX7-NEXT: flat_atomic_smax_x2 v[0:1], v[2:3]
-; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v5, s1
+; GFX7-NEXT: v_mov_b32_e32 v6, s3
+; GFX7-NEXT: v_mov_b32_e32 v7, s2
+; GFX7-NEXT: v_mov_b32_e32 v4, s0
+; GFX7-NEXT: .LBB28_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3]
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v6, v3, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v7, v2, vcc
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_mov_b32_e32 v3, v1
+; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7-NEXT: v_mov_b32_e32 v2, v0
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX7-NEXT: s_cbranch_execnz .LBB28_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_max_i64:
; GFX8: ; %bb.0: ; %entry
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8-NEXT: s_mov_b64 s[4:5], 0
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v0, s0
; GFX8-NEXT: v_mov_b32_e32 v1, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s2
-; GFX8-NEXT: v_mov_b32_e32 v3, s3
-; GFX8-NEXT: flat_atomic_smax_x2 v[0:1], v[2:3]
-; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v5, s1
+; GFX8-NEXT: v_mov_b32_e32 v6, s3
+; GFX8-NEXT: v_mov_b32_e32 v7, s2
+; GFX8-NEXT: v_mov_b32_e32 v4, s0
+; GFX8-NEXT: .LBB28_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3]
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v6, v3, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v7, v2, vcc
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v3, v1
+; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v2, v0
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX8-NEXT: s_cbranch_execnz .LBB28_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_max_i64:
@@ -1622,16 +2208,33 @@ define amdgpu_kernel void @atomic_max_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX7: ; %bb.0: ; %entry
; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; GFX7-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
+; GFX7-NEXT: s_mov_b64 s[6:7], 0
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
; GFX7-NEXT: v_mov_b32_e32 v0, s0
; GFX7-NEXT: v_mov_b32_e32 v1, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s4
-; GFX7-NEXT: v_mov_b32_e32 v3, s5
-; GFX7-NEXT: flat_atomic_smax_x2 v[0:1], v[0:1], v[2:3] glc
-; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v3, s1
+; GFX7-NEXT: v_mov_b32_e32 v4, s5
+; GFX7-NEXT: v_mov_b32_e32 v5, s4
+; GFX7-NEXT: v_mov_b32_e32 v2, s0
+; GFX7-NEXT: .LBB29_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v9, v1
+; GFX7-NEXT: v_mov_b32_e32 v8, v0
+; GFX7-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[8:9]
+; GFX7-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[2:3], v[6:9] glc
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GFX7-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[6:7]
+; GFX7-NEXT: s_cbranch_execnz .LBB29_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX7-NEXT: v_mov_b32_e32 v2, s2
; GFX7-NEXT: v_mov_b32_e32 v3, s3
-; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; GFX7-NEXT: s_endpgm
;
@@ -1639,16 +2242,33 @@ define amdgpu_kernel void @atomic_max_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX8: ; %bb.0: ; %entry
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GFX8-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; GFX8-NEXT: s_mov_b64 s[6:7], 0
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v0, s0
; GFX8-NEXT: v_mov_b32_e32 v1, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s4
-; GFX8-NEXT: v_mov_b32_e32 v3, s5
-; GFX8-NEXT: flat_atomic_smax_x2 v[0:1], v[0:1], v[2:3] glc
-; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v3, s1
+; GFX8-NEXT: v_mov_b32_e32 v4, s5
+; GFX8-NEXT: v_mov_b32_e32 v5, s4
+; GFX8-NEXT: v_mov_b32_e32 v2, s0
+; GFX8-NEXT: .LBB29_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v9, v1
+; GFX8-NEXT: v_mov_b32_e32 v8, v0
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[8:9]
+; GFX8-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[2:3], v[6:9] glc
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7]
+; GFX8-NEXT: s_cbranch_execnz .LBB29_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX8-NEXT: v_mov_b32_e32 v2, s2
; GFX8-NEXT: v_mov_b32_e32 v3, s3
-; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; GFX8-NEXT: s_endpgm
;
@@ -1675,34 +2295,64 @@ entry:
define amdgpu_kernel void @atomic_max_i64_addr64(ptr %out, i64 %in, i64 %index) {
; GFX7-LABEL: atomic_max_i64_addr64:
; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0xd
; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX7-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
-; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v0, s2
-; GFX7-NEXT: v_mov_b32_e32 v1, s3
-; GFX7-NEXT: s_lshl_b64 s[2:3], s[4:5], 3
-; GFX7-NEXT: s_add_u32 s0, s0, s2
-; GFX7-NEXT: s_addc_u32 s1, s1, s3
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_smax_x2 v[2:3], v[0:1]
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
+; GFX7-NEXT: s_add_u32 s0, s0, s4
+; GFX7-NEXT: s_addc_u32 s1, s1, s5
+; GFX7-NEXT: v_mov_b32_e32 v5, s1
+; GFX7-NEXT: v_mov_b32_e32 v4, s0
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: v_mov_b32_e32 v6, s3
+; GFX7-NEXT: v_mov_b32_e32 v7, s2
+; GFX7-NEXT: .LBB30_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3]
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v6, v3, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v7, v2, vcc
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_mov_b32_e32 v3, v1
+; GFX7-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v2, v0
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7-NEXT: s_cbranch_execnz .LBB30_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_max_i64_addr64:
; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX8-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
-; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v0, s2
-; GFX8-NEXT: v_mov_b32_e32 v1, s3
-; GFX8-NEXT: s_lshl_b64 s[2:3], s[4:5], 3
-; GFX8-NEXT: s_add_u32 s0, s0, s2
-; GFX8-NEXT: s_addc_u32 s1, s1, s3
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_smax_x2 v[2:3], v[0:1]
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
+; GFX8-NEXT: s_add_u32 s0, s0, s4
+; GFX8-NEXT: s_addc_u32 s1, s1, s5
+; GFX8-NEXT: v_mov_b32_e32 v5, s1
+; GFX8-NEXT: v_mov_b32_e32 v4, s0
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX8-NEXT: s_mov_b64 s[0:1], 0
+; GFX8-NEXT: v_mov_b32_e32 v6, s3
+; GFX8-NEXT: v_mov_b32_e32 v7, s2
+; GFX8-NEXT: .LBB30_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3]
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v6, v3, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v7, v2, vcc
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v3, v1
+; GFX8-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v2, v0
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX8-NEXT: s_cbranch_execnz .LBB30_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_max_i64_addr64:
@@ -1731,38 +2381,68 @@ define amdgpu_kernel void @atomic_max_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GFX7: ; %bb.0: ; %entry
; GFX7-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v0, s4
-; GFX7-NEXT: v_mov_b32_e32 v1, s5
-; GFX7-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
-; GFX7-NEXT: s_add_u32 s0, s0, s4
-; GFX7-NEXT: s_addc_u32 s1, s1, s5
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_smax_x2 v[0:1], v[2:3], v[0:1] glc
-; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v2, s2
-; GFX7-NEXT: v_mov_b32_e32 v3, s3
-; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GFX7-NEXT: s_add_u32 s0, s0, s6
+; GFX7-NEXT: s_addc_u32 s1, s1, s7
+; GFX7-NEXT: v_mov_b32_e32 v0, s0
+; GFX7-NEXT: v_mov_b32_e32 v1, s1
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: v_mov_b32_e32 v4, s5
+; GFX7-NEXT: v_mov_b32_e32 v5, s4
+; GFX7-NEXT: .LBB31_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v9, v3
+; GFX7-NEXT: v_mov_b32_e32 v8, v2
+; GFX7-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[8:9]
+; GFX7-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[6:9] glc
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
+; GFX7-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7-NEXT: s_cbranch_execnz .LBB31_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v0, s2
+; GFX7-NEXT: v_mov_b32_e32 v1, s3
+; GFX7-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_max_i64_ret_addr64:
; GFX8: ; %bb.0: ; %entry
; GFX8-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v0, s4
-; GFX8-NEXT: v_mov_b32_e32 v1, s5
-; GFX8-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
-; GFX8-NEXT: s_add_u32 s0, s0, s4
-; GFX8-NEXT: s_addc_u32 s1, s1, s5
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_smax_x2 v[0:1], v[2:3], v[0:1] glc
-; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v2, s2
-; GFX8-NEXT: v_mov_b32_e32 v3, s3
-; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GFX8-NEXT: s_add_u32 s0, s0, s6
+; GFX8-NEXT: s_addc_u32 s1, s1, s7
+; GFX8-NEXT: v_mov_b32_e32 v0, s0
+; GFX8-NEXT: v_mov_b32_e32 v1, s1
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: s_mov_b64 s[0:1], 0
+; GFX8-NEXT: v_mov_b32_e32 v4, s5
+; GFX8-NEXT: v_mov_b32_e32 v5, s4
+; GFX8-NEXT: .LBB31_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v9, v3
+; GFX8-NEXT: v_mov_b32_e32 v8, v2
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[8:9]
+; GFX8-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[6:9] glc
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
+; GFX8-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX8-NEXT: s_cbranch_execnz .LBB31_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v0, s2
+; GFX8-NEXT: v_mov_b32_e32 v1, s3
+; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_max_i64_ret_addr64:
@@ -1794,12 +2474,27 @@ define amdgpu_kernel void @atomic_umax_i64_offset(ptr %out, i64 %in) {
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
; GFX7-NEXT: s_add_u32 s0, s0, 32
; GFX7-NEXT: s_addc_u32 s1, s1, 0
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v0, s2
-; GFX7-NEXT: v_mov_b32_e32 v1, s3
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_umax_x2 v[2:3], v[0:1]
-; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v5, s1
+; GFX7-NEXT: v_mov_b32_e32 v4, s0
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: v_mov_b32_e32 v6, s3
+; GFX7-NEXT: v_mov_b32_e32 v7, s2
+; GFX7-NEXT: .LBB32_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3]
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v6, v3, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v7, v2, vcc
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_mov_b32_e32 v3, v1
+; GFX7-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v2, v0
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7-NEXT: s_cbranch_execnz .LBB32_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_umax_i64_offset:
@@ -1808,12 +2503,27 @@ define amdgpu_kernel void @atomic_umax_i64_offset(ptr %out, i64 %in) {
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: s_add_u32 s0, s0, 32
; GFX8-NEXT: s_addc_u32 s1, s1, 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v0, s2
-; GFX8-NEXT: v_mov_b32_e32 v1, s3
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_umax_x2 v[2:3], v[0:1]
-; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v5, s1
+; GFX8-NEXT: v_mov_b32_e32 v4, s0
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX8-NEXT: s_mov_b64 s[0:1], 0
+; GFX8-NEXT: v_mov_b32_e32 v6, s3
+; GFX8-NEXT: v_mov_b32_e32 v7, s2
+; GFX8-NEXT: .LBB32_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3]
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v6, v3, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v7, v2, vcc
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v3, v1
+; GFX8-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v2, v0
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX8-NEXT: s_cbranch_execnz .LBB32_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_umax_i64_offset:
@@ -1835,40 +2545,70 @@ entry:
define amdgpu_kernel void @atomic_umax_i64_ret_offset(ptr %out, ptr %out2, i64 %in) {
; GFX7-LABEL: atomic_umax_i64_ret_offset:
; GFX7: ; %bb.0: ; %entry
-; GFX7-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0xd
; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; GFX7-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v0, s6
; GFX7-NEXT: s_add_u32 s0, s0, 32
; GFX7-NEXT: s_addc_u32 s1, s1, 0
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v1, s7
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_umax_x2 v[0:1], v[2:3], v[0:1] glc
-; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v2, s2
-; GFX7-NEXT: v_mov_b32_e32 v3, s3
-; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v0, s0
+; GFX7-NEXT: v_mov_b32_e32 v1, s1
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: v_mov_b32_e32 v4, s5
+; GFX7-NEXT: v_mov_b32_e32 v5, s4
+; GFX7-NEXT: .LBB33_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v9, v3
+; GFX7-NEXT: v_mov_b32_e32 v8, v2
+; GFX7-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[8:9]
+; GFX7-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[6:9] glc
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
+; GFX7-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7-NEXT: s_cbranch_execnz .LBB33_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v0, s2
+; GFX7-NEXT: v_mov_b32_e32 v1, s3
+; GFX7-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_umax_i64_ret_offset:
; GFX8: ; %bb.0: ; %entry
-; GFX8-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v0, s6
; GFX8-NEXT: s_add_u32 s0, s0, 32
; GFX8-NEXT: s_addc_u32 s1, s1, 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v1, s7
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_umax_x2 v[0:1], v[2:3], v[0:1] glc
-; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v2, s2
-; GFX8-NEXT: v_mov_b32_e32 v3, s3
-; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v0, s0
+; GFX8-NEXT: v_mov_b32_e32 v1, s1
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: s_mov_b64 s[0:1], 0
+; GFX8-NEXT: v_mov_b32_e32 v4, s5
+; GFX8-NEXT: v_mov_b32_e32 v5, s4
+; GFX8-NEXT: .LBB33_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v9, v3
+; GFX8-NEXT: v_mov_b32_e32 v8, v2
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[8:9]
+; GFX8-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[6:9] glc
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
+; GFX8-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX8-NEXT: s_cbranch_execnz .LBB33_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v0, s2
+; GFX8-NEXT: v_mov_b32_e32 v1, s3
+; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_umax_i64_ret_offset:
@@ -1895,38 +2635,68 @@ entry:
define amdgpu_kernel void @atomic_umax_i64_addr64_offset(ptr %out, i64 %in, i64 %index) {
; GFX7-LABEL: atomic_umax_i64_addr64_offset:
; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0xd
; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX7-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v0, s2
-; GFX7-NEXT: v_mov_b32_e32 v1, s3
-; GFX7-NEXT: s_lshl_b64 s[2:3], s[4:5], 3
-; GFX7-NEXT: s_add_u32 s0, s0, s2
-; GFX7-NEXT: s_addc_u32 s1, s1, s3
+; GFX7-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
+; GFX7-NEXT: s_add_u32 s0, s0, s4
+; GFX7-NEXT: s_addc_u32 s1, s1, s5
; GFX7-NEXT: s_add_u32 s0, s0, 32
; GFX7-NEXT: s_addc_u32 s1, s1, 0
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_umax_x2 v[2:3], v[0:1]
-; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v5, s1
+; GFX7-NEXT: v_mov_b32_e32 v4, s0
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: v_mov_b32_e32 v6, s3
+; GFX7-NEXT: v_mov_b32_e32 v7, s2
+; GFX7-NEXT: .LBB34_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3]
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v6, v3, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v7, v2, vcc
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_mov_b32_e32 v3, v1
+; GFX7-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v2, v0
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7-NEXT: s_cbranch_execnz .LBB34_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_umax_i64_addr64_offset:
; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX8-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v0, s2
-; GFX8-NEXT: v_mov_b32_e32 v1, s3
-; GFX8-NEXT: s_lshl_b64 s[2:3], s[4:5], 3
-; GFX8-NEXT: s_add_u32 s0, s0, s2
-; GFX8-NEXT: s_addc_u32 s1, s1, s3
+; GFX8-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
+; GFX8-NEXT: s_add_u32 s0, s0, s4
+; GFX8-NEXT: s_addc_u32 s1, s1, s5
; GFX8-NEXT: s_add_u32 s0, s0, 32
; GFX8-NEXT: s_addc_u32 s1, s1, 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_umax_x2 v[2:3], v[0:1]
-; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v5, s1
+; GFX8-NEXT: v_mov_b32_e32 v4, s0
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX8-NEXT: s_mov_b64 s[0:1], 0
+; GFX8-NEXT: v_mov_b32_e32 v6, s3
+; GFX8-NEXT: v_mov_b32_e32 v7, s2
+; GFX8-NEXT: .LBB34_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3]
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v6, v3, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v7, v2, vcc
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v3, v1
+; GFX8-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v2, v0
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX8-NEXT: s_cbranch_execnz .LBB34_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_umax_i64_addr64_offset:
@@ -1956,42 +2726,72 @@ define amdgpu_kernel void @atomic_umax_i64_ret_addr64_offset(ptr %out, ptr %out2
; GFX7: ; %bb.0: ; %entry
; GFX7-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v0, s4
-; GFX7-NEXT: v_mov_b32_e32 v1, s5
-; GFX7-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
-; GFX7-NEXT: s_add_u32 s0, s0, s4
-; GFX7-NEXT: s_addc_u32 s1, s1, s5
+; GFX7-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GFX7-NEXT: s_add_u32 s0, s0, s6
+; GFX7-NEXT: s_addc_u32 s1, s1, s7
; GFX7-NEXT: s_add_u32 s0, s0, 32
; GFX7-NEXT: s_addc_u32 s1, s1, 0
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_umax_x2 v[0:1], v[2:3], v[0:1] glc
-; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v2, s2
-; GFX7-NEXT: v_mov_b32_e32 v3, s3
-; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v0, s0
+; GFX7-NEXT: v_mov_b32_e32 v1, s1
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: v_mov_b32_e32 v4, s5
+; GFX7-NEXT: v_mov_b32_e32 v5, s4
+; GFX7-NEXT: .LBB35_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v9, v3
+; GFX7-NEXT: v_mov_b32_e32 v8, v2
+; GFX7-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[8:9]
+; GFX7-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[6:9] glc
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
+; GFX7-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7-NEXT: s_cbranch_execnz .LBB35_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v0, s2
+; GFX7-NEXT: v_mov_b32_e32 v1, s3
+; GFX7-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_umax_i64_ret_addr64_offset:
; GFX8: ; %bb.0: ; %entry
; GFX8-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v0, s4
-; GFX8-NEXT: v_mov_b32_e32 v1, s5
-; GFX8-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
-; GFX8-NEXT: s_add_u32 s0, s0, s4
-; GFX8-NEXT: s_addc_u32 s1, s1, s5
+; GFX8-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GFX8-NEXT: s_add_u32 s0, s0, s6
+; GFX8-NEXT: s_addc_u32 s1, s1, s7
; GFX8-NEXT: s_add_u32 s0, s0, 32
; GFX8-NEXT: s_addc_u32 s1, s1, 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_umax_x2 v[0:1], v[2:3], v[0:1] glc
-; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v2, s2
-; GFX8-NEXT: v_mov_b32_e32 v3, s3
-; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v0, s0
+; GFX8-NEXT: v_mov_b32_e32 v1, s1
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: s_mov_b64 s[0:1], 0
+; GFX8-NEXT: v_mov_b32_e32 v4, s5
+; GFX8-NEXT: v_mov_b32_e32 v5, s4
+; GFX8-NEXT: .LBB35_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v9, v3
+; GFX8-NEXT: v_mov_b32_e32 v8, v2
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[8:9]
+; GFX8-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[6:9] glc
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
+; GFX8-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX8-NEXT: s_cbranch_execnz .LBB35_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v0, s2
+; GFX8-NEXT: v_mov_b32_e32 v1, s3
+; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_umax_i64_ret_addr64_offset:
@@ -2021,25 +2821,59 @@ define amdgpu_kernel void @atomic_umax_i64(ptr %out, i64 %in) {
; GFX7-LABEL: atomic_umax_i64:
; GFX7: ; %bb.0: ; %entry
; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; GFX7-NEXT: s_mov_b64 s[4:5], 0
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
; GFX7-NEXT: v_mov_b32_e32 v0, s0
; GFX7-NEXT: v_mov_b32_e32 v1, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s2
-; GFX7-NEXT: v_mov_b32_e32 v3, s3
-; GFX7-NEXT: flat_atomic_umax_x2 v[0:1], v[2:3]
-; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v5, s1
+; GFX7-NEXT: v_mov_b32_e32 v6, s3
+; GFX7-NEXT: v_mov_b32_e32 v7, s2
+; GFX7-NEXT: v_mov_b32_e32 v4, s0
+; GFX7-NEXT: .LBB36_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3]
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v6, v3, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v7, v2, vcc
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_mov_b32_e32 v3, v1
+; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7-NEXT: v_mov_b32_e32 v2, v0
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX7-NEXT: s_cbranch_execnz .LBB36_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_umax_i64:
; GFX8: ; %bb.0: ; %entry
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8-NEXT: s_mov_b64 s[4:5], 0
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v0, s0
; GFX8-NEXT: v_mov_b32_e32 v1, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s2
-; GFX8-NEXT: v_mov_b32_e32 v3, s3
-; GFX8-NEXT: flat_atomic_umax_x2 v[0:1], v[2:3]
-; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v5, s1
+; GFX8-NEXT: v_mov_b32_e32 v6, s3
+; GFX8-NEXT: v_mov_b32_e32 v7, s2
+; GFX8-NEXT: v_mov_b32_e32 v4, s0
+; GFX8-NEXT: .LBB36_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3]
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v6, v3, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v7, v2, vcc
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v3, v1
+; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v2, v0
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX8-NEXT: s_cbranch_execnz .LBB36_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_umax_i64:
@@ -2062,16 +2896,33 @@ define amdgpu_kernel void @atomic_umax_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX7: ; %bb.0: ; %entry
; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; GFX7-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
+; GFX7-NEXT: s_mov_b64 s[6:7], 0
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
; GFX7-NEXT: v_mov_b32_e32 v0, s0
; GFX7-NEXT: v_mov_b32_e32 v1, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s4
-; GFX7-NEXT: v_mov_b32_e32 v3, s5
-; GFX7-NEXT: flat_atomic_umax_x2 v[0:1], v[0:1], v[2:3] glc
-; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v3, s1
+; GFX7-NEXT: v_mov_b32_e32 v4, s5
+; GFX7-NEXT: v_mov_b32_e32 v5, s4
+; GFX7-NEXT: v_mov_b32_e32 v2, s0
+; GFX7-NEXT: .LBB37_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v9, v1
+; GFX7-NEXT: v_mov_b32_e32 v8, v0
+; GFX7-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[8:9]
+; GFX7-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[2:3], v[6:9] glc
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GFX7-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[6:7]
+; GFX7-NEXT: s_cbranch_execnz .LBB37_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX7-NEXT: v_mov_b32_e32 v2, s2
; GFX7-NEXT: v_mov_b32_e32 v3, s3
-; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; GFX7-NEXT: s_endpgm
;
@@ -2079,16 +2930,33 @@ define amdgpu_kernel void @atomic_umax_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX8: ; %bb.0: ; %entry
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GFX8-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; GFX8-NEXT: s_mov_b64 s[6:7], 0
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v0, s0
; GFX8-NEXT: v_mov_b32_e32 v1, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s4
-; GFX8-NEXT: v_mov_b32_e32 v3, s5
-; GFX8-NEXT: flat_atomic_umax_x2 v[0:1], v[0:1], v[2:3] glc
-; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v3, s1
+; GFX8-NEXT: v_mov_b32_e32 v4, s5
+; GFX8-NEXT: v_mov_b32_e32 v5, s4
+; GFX8-NEXT: v_mov_b32_e32 v2, s0
+; GFX8-NEXT: .LBB37_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v9, v1
+; GFX8-NEXT: v_mov_b32_e32 v8, v0
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[8:9]
+; GFX8-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[2:3], v[6:9] glc
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7]
+; GFX8-NEXT: s_cbranch_execnz .LBB37_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX8-NEXT: v_mov_b32_e32 v2, s2
; GFX8-NEXT: v_mov_b32_e32 v3, s3
-; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; GFX8-NEXT: s_endpgm
;
@@ -2115,34 +2983,64 @@ entry:
define amdgpu_kernel void @atomic_umax_i64_addr64(ptr %out, i64 %in, i64 %index) {
; GFX7-LABEL: atomic_umax_i64_addr64:
; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0xd
; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX7-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
-; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v0, s2
-; GFX7-NEXT: v_mov_b32_e32 v1, s3
-; GFX7-NEXT: s_lshl_b64 s[2:3], s[4:5], 3
-; GFX7-NEXT: s_add_u32 s0, s0, s2
-; GFX7-NEXT: s_addc_u32 s1, s1, s3
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_umax_x2 v[2:3], v[0:1]
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: s_endpgm
-;
-; GFX8-LABEL: atomic_umax_i64_addr64:
-; GFX8: ; %bb.0: ; %entry
+; GFX7-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
+; GFX7-NEXT: s_add_u32 s0, s0, s4
+; GFX7-NEXT: s_addc_u32 s1, s1, s5
+; GFX7-NEXT: v_mov_b32_e32 v5, s1
+; GFX7-NEXT: v_mov_b32_e32 v4, s0
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: v_mov_b32_e32 v6, s3
+; GFX7-NEXT: v_mov_b32_e32 v7, s2
+; GFX7-NEXT: .LBB38_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3]
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v6, v3, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v7, v2, vcc
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_mov_b32_e32 v3, v1
+; GFX7-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v2, v0
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7-NEXT: s_cbranch_execnz .LBB38_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_endpgm
+;
+; GFX8-LABEL: atomic_umax_i64_addr64:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX8-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
-; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v0, s2
-; GFX8-NEXT: v_mov_b32_e32 v1, s3
-; GFX8-NEXT: s_lshl_b64 s[2:3], s[4:5], 3
-; GFX8-NEXT: s_add_u32 s0, s0, s2
-; GFX8-NEXT: s_addc_u32 s1, s1, s3
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_umax_x2 v[2:3], v[0:1]
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
+; GFX8-NEXT: s_add_u32 s0, s0, s4
+; GFX8-NEXT: s_addc_u32 s1, s1, s5
+; GFX8-NEXT: v_mov_b32_e32 v5, s1
+; GFX8-NEXT: v_mov_b32_e32 v4, s0
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX8-NEXT: s_mov_b64 s[0:1], 0
+; GFX8-NEXT: v_mov_b32_e32 v6, s3
+; GFX8-NEXT: v_mov_b32_e32 v7, s2
+; GFX8-NEXT: .LBB38_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3]
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v6, v3, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v7, v2, vcc
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v3, v1
+; GFX8-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v2, v0
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX8-NEXT: s_cbranch_execnz .LBB38_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_umax_i64_addr64:
@@ -2171,38 +3069,68 @@ define amdgpu_kernel void @atomic_umax_i64_ret_addr64(ptr %out, ptr %out2, i64 %
; GFX7: ; %bb.0: ; %entry
; GFX7-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v0, s4
-; GFX7-NEXT: v_mov_b32_e32 v1, s5
-; GFX7-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
-; GFX7-NEXT: s_add_u32 s0, s0, s4
-; GFX7-NEXT: s_addc_u32 s1, s1, s5
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_umax_x2 v[0:1], v[2:3], v[0:1] glc
-; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v2, s2
-; GFX7-NEXT: v_mov_b32_e32 v3, s3
-; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GFX7-NEXT: s_add_u32 s0, s0, s6
+; GFX7-NEXT: s_addc_u32 s1, s1, s7
+; GFX7-NEXT: v_mov_b32_e32 v0, s0
+; GFX7-NEXT: v_mov_b32_e32 v1, s1
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: v_mov_b32_e32 v4, s5
+; GFX7-NEXT: v_mov_b32_e32 v5, s4
+; GFX7-NEXT: .LBB39_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v9, v3
+; GFX7-NEXT: v_mov_b32_e32 v8, v2
+; GFX7-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[8:9]
+; GFX7-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[6:9] glc
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
+; GFX7-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7-NEXT: s_cbranch_execnz .LBB39_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v0, s2
+; GFX7-NEXT: v_mov_b32_e32 v1, s3
+; GFX7-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_umax_i64_ret_addr64:
; GFX8: ; %bb.0: ; %entry
; GFX8-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v0, s4
-; GFX8-NEXT: v_mov_b32_e32 v1, s5
-; GFX8-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
-; GFX8-NEXT: s_add_u32 s0, s0, s4
-; GFX8-NEXT: s_addc_u32 s1, s1, s5
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_umax_x2 v[0:1], v[2:3], v[0:1] glc
-; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v2, s2
-; GFX8-NEXT: v_mov_b32_e32 v3, s3
-; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GFX8-NEXT: s_add_u32 s0, s0, s6
+; GFX8-NEXT: s_addc_u32 s1, s1, s7
+; GFX8-NEXT: v_mov_b32_e32 v0, s0
+; GFX8-NEXT: v_mov_b32_e32 v1, s1
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: s_mov_b64 s[0:1], 0
+; GFX8-NEXT: v_mov_b32_e32 v4, s5
+; GFX8-NEXT: v_mov_b32_e32 v5, s4
+; GFX8-NEXT: .LBB39_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v9, v3
+; GFX8-NEXT: v_mov_b32_e32 v8, v2
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[8:9]
+; GFX8-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[6:9] glc
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
+; GFX8-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX8-NEXT: s_cbranch_execnz .LBB39_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v0, s2
+; GFX8-NEXT: v_mov_b32_e32 v1, s3
+; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_umax_i64_ret_addr64:
@@ -2234,12 +3162,27 @@ define amdgpu_kernel void @atomic_min_i64_offset(ptr %out, i64 %in) {
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
; GFX7-NEXT: s_add_u32 s0, s0, 32
; GFX7-NEXT: s_addc_u32 s1, s1, 0
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v0, s2
-; GFX7-NEXT: v_mov_b32_e32 v1, s3
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_smin_x2 v[2:3], v[0:1]
-; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v5, s1
+; GFX7-NEXT: v_mov_b32_e32 v4, s0
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: v_mov_b32_e32 v6, s3
+; GFX7-NEXT: v_mov_b32_e32 v7, s2
+; GFX7-NEXT: .LBB40_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3]
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v6, v3, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v7, v2, vcc
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_mov_b32_e32 v3, v1
+; GFX7-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v2, v0
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7-NEXT: s_cbranch_execnz .LBB40_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_min_i64_offset:
@@ -2248,12 +3191,27 @@ define amdgpu_kernel void @atomic_min_i64_offset(ptr %out, i64 %in) {
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: s_add_u32 s0, s0, 32
; GFX8-NEXT: s_addc_u32 s1, s1, 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v0, s2
-; GFX8-NEXT: v_mov_b32_e32 v1, s3
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_smin_x2 v[2:3], v[0:1]
-; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v5, s1
+; GFX8-NEXT: v_mov_b32_e32 v4, s0
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX8-NEXT: s_mov_b64 s[0:1], 0
+; GFX8-NEXT: v_mov_b32_e32 v6, s3
+; GFX8-NEXT: v_mov_b32_e32 v7, s2
+; GFX8-NEXT: .LBB40_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3]
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v6, v3, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v7, v2, vcc
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v3, v1
+; GFX8-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v2, v0
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX8-NEXT: s_cbranch_execnz .LBB40_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_min_i64_offset:
@@ -2275,40 +3233,70 @@ entry:
define amdgpu_kernel void @atomic_min_i64_ret_offset(ptr %out, ptr %out2, i64 %in) {
; GFX7-LABEL: atomic_min_i64_ret_offset:
; GFX7: ; %bb.0: ; %entry
-; GFX7-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0xd
; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; GFX7-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v0, s6
; GFX7-NEXT: s_add_u32 s0, s0, 32
; GFX7-NEXT: s_addc_u32 s1, s1, 0
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v1, s7
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_smin_x2 v[0:1], v[2:3], v[0:1] glc
-; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v2, s2
-; GFX7-NEXT: v_mov_b32_e32 v3, s3
-; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v0, s0
+; GFX7-NEXT: v_mov_b32_e32 v1, s1
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: v_mov_b32_e32 v4, s5
+; GFX7-NEXT: v_mov_b32_e32 v5, s4
+; GFX7-NEXT: .LBB41_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v9, v3
+; GFX7-NEXT: v_mov_b32_e32 v8, v2
+; GFX7-NEXT: v_cmp_ge_i64_e32 vcc, s[4:5], v[8:9]
+; GFX7-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[6:9] glc
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
+; GFX7-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7-NEXT: s_cbranch_execnz .LBB41_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v0, s2
+; GFX7-NEXT: v_mov_b32_e32 v1, s3
+; GFX7-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_min_i64_ret_offset:
; GFX8: ; %bb.0: ; %entry
-; GFX8-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v0, s6
; GFX8-NEXT: s_add_u32 s0, s0, 32
; GFX8-NEXT: s_addc_u32 s1, s1, 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v1, s7
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_smin_x2 v[0:1], v[2:3], v[0:1] glc
-; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v2, s2
-; GFX8-NEXT: v_mov_b32_e32 v3, s3
-; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v0, s0
+; GFX8-NEXT: v_mov_b32_e32 v1, s1
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: s_mov_b64 s[0:1], 0
+; GFX8-NEXT: v_mov_b32_e32 v4, s5
+; GFX8-NEXT: v_mov_b32_e32 v5, s4
+; GFX8-NEXT: .LBB41_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v9, v3
+; GFX8-NEXT: v_mov_b32_e32 v8, v2
+; GFX8-NEXT: v_cmp_ge_i64_e32 vcc, s[4:5], v[8:9]
+; GFX8-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[6:9] glc
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
+; GFX8-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX8-NEXT: s_cbranch_execnz .LBB41_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v0, s2
+; GFX8-NEXT: v_mov_b32_e32 v1, s3
+; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_min_i64_ret_offset:
@@ -2335,38 +3323,68 @@ entry:
define amdgpu_kernel void @atomic_min_i64_addr64_offset(ptr %out, i64 %in, i64 %index) {
; GFX7-LABEL: atomic_min_i64_addr64_offset:
; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0xd
; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX7-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v0, s2
-; GFX7-NEXT: v_mov_b32_e32 v1, s3
-; GFX7-NEXT: s_lshl_b64 s[2:3], s[4:5], 3
-; GFX7-NEXT: s_add_u32 s0, s0, s2
-; GFX7-NEXT: s_addc_u32 s1, s1, s3
+; GFX7-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
+; GFX7-NEXT: s_add_u32 s0, s0, s4
+; GFX7-NEXT: s_addc_u32 s1, s1, s5
; GFX7-NEXT: s_add_u32 s0, s0, 32
; GFX7-NEXT: s_addc_u32 s1, s1, 0
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_smin_x2 v[2:3], v[0:1]
-; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v5, s1
+; GFX7-NEXT: v_mov_b32_e32 v4, s0
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: v_mov_b32_e32 v6, s3
+; GFX7-NEXT: v_mov_b32_e32 v7, s2
+; GFX7-NEXT: .LBB42_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3]
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v6, v3, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v7, v2, vcc
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_mov_b32_e32 v3, v1
+; GFX7-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v2, v0
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7-NEXT: s_cbranch_execnz .LBB42_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_min_i64_addr64_offset:
; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX8-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v0, s2
-; GFX8-NEXT: v_mov_b32_e32 v1, s3
-; GFX8-NEXT: s_lshl_b64 s[2:3], s[4:5], 3
-; GFX8-NEXT: s_add_u32 s0, s0, s2
-; GFX8-NEXT: s_addc_u32 s1, s1, s3
+; GFX8-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
+; GFX8-NEXT: s_add_u32 s0, s0, s4
+; GFX8-NEXT: s_addc_u32 s1, s1, s5
; GFX8-NEXT: s_add_u32 s0, s0, 32
; GFX8-NEXT: s_addc_u32 s1, s1, 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_smin_x2 v[2:3], v[0:1]
-; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v5, s1
+; GFX8-NEXT: v_mov_b32_e32 v4, s0
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX8-NEXT: s_mov_b64 s[0:1], 0
+; GFX8-NEXT: v_mov_b32_e32 v6, s3
+; GFX8-NEXT: v_mov_b32_e32 v7, s2
+; GFX8-NEXT: .LBB42_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3]
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v6, v3, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v7, v2, vcc
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v3, v1
+; GFX8-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v2, v0
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX8-NEXT: s_cbranch_execnz .LBB42_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_min_i64_addr64_offset:
@@ -2396,42 +3414,72 @@ define amdgpu_kernel void @atomic_min_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GFX7: ; %bb.0: ; %entry
; GFX7-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v0, s4
-; GFX7-NEXT: v_mov_b32_e32 v1, s5
-; GFX7-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
-; GFX7-NEXT: s_add_u32 s0, s0, s4
-; GFX7-NEXT: s_addc_u32 s1, s1, s5
+; GFX7-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GFX7-NEXT: s_add_u32 s0, s0, s6
+; GFX7-NEXT: s_addc_u32 s1, s1, s7
; GFX7-NEXT: s_add_u32 s0, s0, 32
; GFX7-NEXT: s_addc_u32 s1, s1, 0
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_smin_x2 v[0:1], v[2:3], v[0:1] glc
-; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v2, s2
-; GFX7-NEXT: v_mov_b32_e32 v3, s3
-; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v0, s0
+; GFX7-NEXT: v_mov_b32_e32 v1, s1
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: v_mov_b32_e32 v4, s5
+; GFX7-NEXT: v_mov_b32_e32 v5, s4
+; GFX7-NEXT: .LBB43_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v9, v3
+; GFX7-NEXT: v_mov_b32_e32 v8, v2
+; GFX7-NEXT: v_cmp_ge_i64_e32 vcc, s[4:5], v[8:9]
+; GFX7-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[6:9] glc
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
+; GFX7-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7-NEXT: s_cbranch_execnz .LBB43_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v0, s2
+; GFX7-NEXT: v_mov_b32_e32 v1, s3
+; GFX7-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_min_i64_ret_addr64_offset:
; GFX8: ; %bb.0: ; %entry
; GFX8-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v0, s4
-; GFX8-NEXT: v_mov_b32_e32 v1, s5
-; GFX8-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
-; GFX8-NEXT: s_add_u32 s0, s0, s4
-; GFX8-NEXT: s_addc_u32 s1, s1, s5
+; GFX8-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GFX8-NEXT: s_add_u32 s0, s0, s6
+; GFX8-NEXT: s_addc_u32 s1, s1, s7
; GFX8-NEXT: s_add_u32 s0, s0, 32
; GFX8-NEXT: s_addc_u32 s1, s1, 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_smin_x2 v[0:1], v[2:3], v[0:1] glc
-; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v2, s2
-; GFX8-NEXT: v_mov_b32_e32 v3, s3
-; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v0, s0
+; GFX8-NEXT: v_mov_b32_e32 v1, s1
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: s_mov_b64 s[0:1], 0
+; GFX8-NEXT: v_mov_b32_e32 v4, s5
+; GFX8-NEXT: v_mov_b32_e32 v5, s4
+; GFX8-NEXT: .LBB43_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v9, v3
+; GFX8-NEXT: v_mov_b32_e32 v8, v2
+; GFX8-NEXT: v_cmp_ge_i64_e32 vcc, s[4:5], v[8:9]
+; GFX8-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[6:9] glc
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
+; GFX8-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX8-NEXT: s_cbranch_execnz .LBB43_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v0, s2
+; GFX8-NEXT: v_mov_b32_e32 v1, s3
+; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_min_i64_ret_addr64_offset:
@@ -2461,25 +3509,59 @@ define amdgpu_kernel void @atomic_min_i64(ptr %out, i64 %in) {
; GFX7-LABEL: atomic_min_i64:
; GFX7: ; %bb.0: ; %entry
; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; GFX7-NEXT: s_mov_b64 s[4:5], 0
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
; GFX7-NEXT: v_mov_b32_e32 v0, s0
; GFX7-NEXT: v_mov_b32_e32 v1, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s2
-; GFX7-NEXT: v_mov_b32_e32 v3, s3
-; GFX7-NEXT: flat_atomic_smin_x2 v[0:1], v[2:3]
-; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v5, s1
+; GFX7-NEXT: v_mov_b32_e32 v6, s3
+; GFX7-NEXT: v_mov_b32_e32 v7, s2
+; GFX7-NEXT: v_mov_b32_e32 v4, s0
+; GFX7-NEXT: .LBB44_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3]
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v6, v3, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v7, v2, vcc
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_mov_b32_e32 v3, v1
+; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7-NEXT: v_mov_b32_e32 v2, v0
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX7-NEXT: s_cbranch_execnz .LBB44_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_min_i64:
; GFX8: ; %bb.0: ; %entry
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8-NEXT: s_mov_b64 s[4:5], 0
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v0, s0
; GFX8-NEXT: v_mov_b32_e32 v1, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s2
-; GFX8-NEXT: v_mov_b32_e32 v3, s3
-; GFX8-NEXT: flat_atomic_smin_x2 v[0:1], v[2:3]
-; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v5, s1
+; GFX8-NEXT: v_mov_b32_e32 v6, s3
+; GFX8-NEXT: v_mov_b32_e32 v7, s2
+; GFX8-NEXT: v_mov_b32_e32 v4, s0
+; GFX8-NEXT: .LBB44_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3]
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v6, v3, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v7, v2, vcc
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v3, v1
+; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v2, v0
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX8-NEXT: s_cbranch_execnz .LBB44_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_min_i64:
@@ -2502,16 +3584,33 @@ define amdgpu_kernel void @atomic_min_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX7: ; %bb.0: ; %entry
; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; GFX7-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
+; GFX7-NEXT: s_mov_b64 s[6:7], 0
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
; GFX7-NEXT: v_mov_b32_e32 v0, s0
; GFX7-NEXT: v_mov_b32_e32 v1, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s4
-; GFX7-NEXT: v_mov_b32_e32 v3, s5
-; GFX7-NEXT: flat_atomic_smin_x2 v[0:1], v[0:1], v[2:3] glc
-; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v3, s1
+; GFX7-NEXT: v_mov_b32_e32 v4, s5
+; GFX7-NEXT: v_mov_b32_e32 v5, s4
+; GFX7-NEXT: v_mov_b32_e32 v2, s0
+; GFX7-NEXT: .LBB45_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v9, v1
+; GFX7-NEXT: v_mov_b32_e32 v8, v0
+; GFX7-NEXT: v_cmp_ge_i64_e32 vcc, s[4:5], v[8:9]
+; GFX7-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[2:3], v[6:9] glc
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GFX7-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[6:7]
+; GFX7-NEXT: s_cbranch_execnz .LBB45_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX7-NEXT: v_mov_b32_e32 v2, s2
; GFX7-NEXT: v_mov_b32_e32 v3, s3
-; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; GFX7-NEXT: s_endpgm
;
@@ -2519,16 +3618,33 @@ define amdgpu_kernel void @atomic_min_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX8: ; %bb.0: ; %entry
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GFX8-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; GFX8-NEXT: s_mov_b64 s[6:7], 0
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v0, s0
; GFX8-NEXT: v_mov_b32_e32 v1, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s4
-; GFX8-NEXT: v_mov_b32_e32 v3, s5
-; GFX8-NEXT: flat_atomic_smin_x2 v[0:1], v[0:1], v[2:3] glc
-; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v3, s1
+; GFX8-NEXT: v_mov_b32_e32 v4, s5
+; GFX8-NEXT: v_mov_b32_e32 v5, s4
+; GFX8-NEXT: v_mov_b32_e32 v2, s0
+; GFX8-NEXT: .LBB45_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v9, v1
+; GFX8-NEXT: v_mov_b32_e32 v8, v0
+; GFX8-NEXT: v_cmp_ge_i64_e32 vcc, s[4:5], v[8:9]
+; GFX8-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[2:3], v[6:9] glc
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7]
+; GFX8-NEXT: s_cbranch_execnz .LBB45_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX8-NEXT: v_mov_b32_e32 v2, s2
; GFX8-NEXT: v_mov_b32_e32 v3, s3
-; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; GFX8-NEXT: s_endpgm
;
@@ -2555,34 +3671,64 @@ entry:
define amdgpu_kernel void @atomic_min_i64_addr64(ptr %out, i64 %in, i64 %index) {
; GFX7-LABEL: atomic_min_i64_addr64:
; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0xd
; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX7-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
-; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v0, s2
-; GFX7-NEXT: v_mov_b32_e32 v1, s3
-; GFX7-NEXT: s_lshl_b64 s[2:3], s[4:5], 3
-; GFX7-NEXT: s_add_u32 s0, s0, s2
-; GFX7-NEXT: s_addc_u32 s1, s1, s3
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_smin_x2 v[2:3], v[0:1]
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
+; GFX7-NEXT: s_add_u32 s0, s0, s4
+; GFX7-NEXT: s_addc_u32 s1, s1, s5
+; GFX7-NEXT: v_mov_b32_e32 v5, s1
+; GFX7-NEXT: v_mov_b32_e32 v4, s0
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: v_mov_b32_e32 v6, s3
+; GFX7-NEXT: v_mov_b32_e32 v7, s2
+; GFX7-NEXT: .LBB46_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3]
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v6, v3, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v7, v2, vcc
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_mov_b32_e32 v3, v1
+; GFX7-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v2, v0
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7-NEXT: s_cbranch_execnz .LBB46_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_min_i64_addr64:
; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX8-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
-; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v0, s2
-; GFX8-NEXT: v_mov_b32_e32 v1, s3
-; GFX8-NEXT: s_lshl_b64 s[2:3], s[4:5], 3
-; GFX8-NEXT: s_add_u32 s0, s0, s2
-; GFX8-NEXT: s_addc_u32 s1, s1, s3
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_smin_x2 v[2:3], v[0:1]
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
+; GFX8-NEXT: s_add_u32 s0, s0, s4
+; GFX8-NEXT: s_addc_u32 s1, s1, s5
+; GFX8-NEXT: v_mov_b32_e32 v5, s1
+; GFX8-NEXT: v_mov_b32_e32 v4, s0
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX8-NEXT: s_mov_b64 s[0:1], 0
+; GFX8-NEXT: v_mov_b32_e32 v6, s3
+; GFX8-NEXT: v_mov_b32_e32 v7, s2
+; GFX8-NEXT: .LBB46_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3]
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v6, v3, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v7, v2, vcc
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v3, v1
+; GFX8-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v2, v0
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX8-NEXT: s_cbranch_execnz .LBB46_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_min_i64_addr64:
@@ -2611,38 +3757,68 @@ define amdgpu_kernel void @atomic_min_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GFX7: ; %bb.0: ; %entry
; GFX7-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v0, s4
-; GFX7-NEXT: v_mov_b32_e32 v1, s5
-; GFX7-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
-; GFX7-NEXT: s_add_u32 s0, s0, s4
-; GFX7-NEXT: s_addc_u32 s1, s1, s5
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_smin_x2 v[0:1], v[2:3], v[0:1] glc
-; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v2, s2
-; GFX7-NEXT: v_mov_b32_e32 v3, s3
-; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GFX7-NEXT: s_add_u32 s0, s0, s6
+; GFX7-NEXT: s_addc_u32 s1, s1, s7
+; GFX7-NEXT: v_mov_b32_e32 v0, s0
+; GFX7-NEXT: v_mov_b32_e32 v1, s1
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: v_mov_b32_e32 v4, s5
+; GFX7-NEXT: v_mov_b32_e32 v5, s4
+; GFX7-NEXT: .LBB47_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v9, v3
+; GFX7-NEXT: v_mov_b32_e32 v8, v2
+; GFX7-NEXT: v_cmp_ge_i64_e32 vcc, s[4:5], v[8:9]
+; GFX7-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[6:9] glc
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
+; GFX7-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7-NEXT: s_cbranch_execnz .LBB47_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v0, s2
+; GFX7-NEXT: v_mov_b32_e32 v1, s3
+; GFX7-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_min_i64_ret_addr64:
; GFX8: ; %bb.0: ; %entry
; GFX8-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v0, s4
-; GFX8-NEXT: v_mov_b32_e32 v1, s5
-; GFX8-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
-; GFX8-NEXT: s_add_u32 s0, s0, s4
-; GFX8-NEXT: s_addc_u32 s1, s1, s5
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_smin_x2 v[0:1], v[2:3], v[0:1] glc
-; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v2, s2
-; GFX8-NEXT: v_mov_b32_e32 v3, s3
-; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GFX8-NEXT: s_add_u32 s0, s0, s6
+; GFX8-NEXT: s_addc_u32 s1, s1, s7
+; GFX8-NEXT: v_mov_b32_e32 v0, s0
+; GFX8-NEXT: v_mov_b32_e32 v1, s1
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: s_mov_b64 s[0:1], 0
+; GFX8-NEXT: v_mov_b32_e32 v4, s5
+; GFX8-NEXT: v_mov_b32_e32 v5, s4
+; GFX8-NEXT: .LBB47_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v9, v3
+; GFX8-NEXT: v_mov_b32_e32 v8, v2
+; GFX8-NEXT: v_cmp_ge_i64_e32 vcc, s[4:5], v[8:9]
+; GFX8-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[6:9] glc
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
+; GFX8-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX8-NEXT: s_cbranch_execnz .LBB47_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v0, s2
+; GFX8-NEXT: v_mov_b32_e32 v1, s3
+; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_min_i64_ret_addr64:
@@ -2674,12 +3850,27 @@ define amdgpu_kernel void @atomic_umin_i64_offset(ptr %out, i64 %in) {
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
; GFX7-NEXT: s_add_u32 s0, s0, 32
; GFX7-NEXT: s_addc_u32 s1, s1, 0
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v0, s2
-; GFX7-NEXT: v_mov_b32_e32 v1, s3
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_umin_x2 v[2:3], v[0:1]
-; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v5, s1
+; GFX7-NEXT: v_mov_b32_e32 v4, s0
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: v_mov_b32_e32 v6, s3
+; GFX7-NEXT: v_mov_b32_e32 v7, s2
+; GFX7-NEXT: .LBB48_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_ge_u64_e32 vcc, s[2:3], v[2:3]
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v6, v3, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v7, v2, vcc
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_mov_b32_e32 v3, v1
+; GFX7-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v2, v0
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7-NEXT: s_cbranch_execnz .LBB48_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_umin_i64_offset:
@@ -2688,12 +3879,27 @@ define amdgpu_kernel void @atomic_umin_i64_offset(ptr %out, i64 %in) {
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: s_add_u32 s0, s0, 32
; GFX8-NEXT: s_addc_u32 s1, s1, 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v0, s2
-; GFX8-NEXT: v_mov_b32_e32 v1, s3
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_umin_x2 v[2:3], v[0:1]
-; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v5, s1
+; GFX8-NEXT: v_mov_b32_e32 v4, s0
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX8-NEXT: s_mov_b64 s[0:1], 0
+; GFX8-NEXT: v_mov_b32_e32 v6, s3
+; GFX8-NEXT: v_mov_b32_e32 v7, s2
+; GFX8-NEXT: .LBB48_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_ge_u64_e32 vcc, s[2:3], v[2:3]
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v6, v3, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v7, v2, vcc
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v3, v1
+; GFX8-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v2, v0
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX8-NEXT: s_cbranch_execnz .LBB48_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_umin_i64_offset:
@@ -2715,40 +3921,70 @@ entry:
define amdgpu_kernel void @atomic_umin_i64_ret_offset(ptr %out, ptr %out2, i64 %in) {
; GFX7-LABEL: atomic_umin_i64_ret_offset:
; GFX7: ; %bb.0: ; %entry
-; GFX7-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0xd
; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; GFX7-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v0, s6
; GFX7-NEXT: s_add_u32 s0, s0, 32
; GFX7-NEXT: s_addc_u32 s1, s1, 0
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v1, s7
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_umin_x2 v[0:1], v[2:3], v[0:1] glc
-; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v2, s2
-; GFX7-NEXT: v_mov_b32_e32 v3, s3
-; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v0, s0
+; GFX7-NEXT: v_mov_b32_e32 v1, s1
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: v_mov_b32_e32 v4, s5
+; GFX7-NEXT: v_mov_b32_e32 v5, s4
+; GFX7-NEXT: .LBB49_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v9, v3
+; GFX7-NEXT: v_mov_b32_e32 v8, v2
+; GFX7-NEXT: v_cmp_ge_u64_e32 vcc, s[4:5], v[8:9]
+; GFX7-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[6:9] glc
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
+; GFX7-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7-NEXT: s_cbranch_execnz .LBB49_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v0, s2
+; GFX7-NEXT: v_mov_b32_e32 v1, s3
+; GFX7-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_umin_i64_ret_offset:
; GFX8: ; %bb.0: ; %entry
-; GFX8-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v0, s6
; GFX8-NEXT: s_add_u32 s0, s0, 32
; GFX8-NEXT: s_addc_u32 s1, s1, 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v1, s7
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_umin_x2 v[0:1], v[2:3], v[0:1] glc
-; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v2, s2
-; GFX8-NEXT: v_mov_b32_e32 v3, s3
-; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v0, s0
+; GFX8-NEXT: v_mov_b32_e32 v1, s1
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: s_mov_b64 s[0:1], 0
+; GFX8-NEXT: v_mov_b32_e32 v4, s5
+; GFX8-NEXT: v_mov_b32_e32 v5, s4
+; GFX8-NEXT: .LBB49_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v9, v3
+; GFX8-NEXT: v_mov_b32_e32 v8, v2
+; GFX8-NEXT: v_cmp_ge_u64_e32 vcc, s[4:5], v[8:9]
+; GFX8-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[6:9] glc
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
+; GFX8-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX8-NEXT: s_cbranch_execnz .LBB49_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v0, s2
+; GFX8-NEXT: v_mov_b32_e32 v1, s3
+; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_umin_i64_ret_offset:
@@ -2775,38 +4011,68 @@ entry:
define amdgpu_kernel void @atomic_umin_i64_addr64_offset(ptr %out, i64 %in, i64 %index) {
; GFX7-LABEL: atomic_umin_i64_addr64_offset:
; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0xd
; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX7-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v0, s2
-; GFX7-NEXT: v_mov_b32_e32 v1, s3
-; GFX7-NEXT: s_lshl_b64 s[2:3], s[4:5], 3
-; GFX7-NEXT: s_add_u32 s0, s0, s2
-; GFX7-NEXT: s_addc_u32 s1, s1, s3
+; GFX7-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
+; GFX7-NEXT: s_add_u32 s0, s0, s4
+; GFX7-NEXT: s_addc_u32 s1, s1, s5
; GFX7-NEXT: s_add_u32 s0, s0, 32
; GFX7-NEXT: s_addc_u32 s1, s1, 0
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_umin_x2 v[2:3], v[0:1]
-; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v5, s1
+; GFX7-NEXT: v_mov_b32_e32 v4, s0
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: v_mov_b32_e32 v6, s3
+; GFX7-NEXT: v_mov_b32_e32 v7, s2
+; GFX7-NEXT: .LBB50_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_ge_u64_e32 vcc, s[2:3], v[2:3]
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v6, v3, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v7, v2, vcc
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_mov_b32_e32 v3, v1
+; GFX7-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v2, v0
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7-NEXT: s_cbranch_execnz .LBB50_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_umin_i64_addr64_offset:
; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX8-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v0, s2
-; GFX8-NEXT: v_mov_b32_e32 v1, s3
-; GFX8-NEXT: s_lshl_b64 s[2:3], s[4:5], 3
-; GFX8-NEXT: s_add_u32 s0, s0, s2
-; GFX8-NEXT: s_addc_u32 s1, s1, s3
+; GFX8-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
+; GFX8-NEXT: s_add_u32 s0, s0, s4
+; GFX8-NEXT: s_addc_u32 s1, s1, s5
; GFX8-NEXT: s_add_u32 s0, s0, 32
; GFX8-NEXT: s_addc_u32 s1, s1, 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_umin_x2 v[2:3], v[0:1]
-; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v5, s1
+; GFX8-NEXT: v_mov_b32_e32 v4, s0
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX8-NEXT: s_mov_b64 s[0:1], 0
+; GFX8-NEXT: v_mov_b32_e32 v6, s3
+; GFX8-NEXT: v_mov_b32_e32 v7, s2
+; GFX8-NEXT: .LBB50_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_ge_u64_e32 vcc, s[2:3], v[2:3]
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v6, v3, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v7, v2, vcc
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v3, v1
+; GFX8-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v2, v0
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX8-NEXT: s_cbranch_execnz .LBB50_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_umin_i64_addr64_offset:
@@ -2836,42 +4102,72 @@ define amdgpu_kernel void @atomic_umin_i64_ret_addr64_offset(ptr %out, ptr %out2
; GFX7: ; %bb.0: ; %entry
; GFX7-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v0, s4
-; GFX7-NEXT: v_mov_b32_e32 v1, s5
-; GFX7-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
-; GFX7-NEXT: s_add_u32 s0, s0, s4
-; GFX7-NEXT: s_addc_u32 s1, s1, s5
+; GFX7-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GFX7-NEXT: s_add_u32 s0, s0, s6
+; GFX7-NEXT: s_addc_u32 s1, s1, s7
; GFX7-NEXT: s_add_u32 s0, s0, 32
; GFX7-NEXT: s_addc_u32 s1, s1, 0
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_umin_x2 v[0:1], v[2:3], v[0:1] glc
-; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v2, s2
-; GFX7-NEXT: v_mov_b32_e32 v3, s3
-; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v0, s0
+; GFX7-NEXT: v_mov_b32_e32 v1, s1
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: v_mov_b32_e32 v4, s5
+; GFX7-NEXT: v_mov_b32_e32 v5, s4
+; GFX7-NEXT: .LBB51_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v9, v3
+; GFX7-NEXT: v_mov_b32_e32 v8, v2
+; GFX7-NEXT: v_cmp_ge_u64_e32 vcc, s[4:5], v[8:9]
+; GFX7-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[6:9] glc
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
+; GFX7-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7-NEXT: s_cbranch_execnz .LBB51_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v0, s2
+; GFX7-NEXT: v_mov_b32_e32 v1, s3
+; GFX7-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_umin_i64_ret_addr64_offset:
; GFX8: ; %bb.0: ; %entry
; GFX8-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v0, s4
-; GFX8-NEXT: v_mov_b32_e32 v1, s5
-; GFX8-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
-; GFX8-NEXT: s_add_u32 s0, s0, s4
-; GFX8-NEXT: s_addc_u32 s1, s1, s5
+; GFX8-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GFX8-NEXT: s_add_u32 s0, s0, s6
+; GFX8-NEXT: s_addc_u32 s1, s1, s7
; GFX8-NEXT: s_add_u32 s0, s0, 32
; GFX8-NEXT: s_addc_u32 s1, s1, 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_umin_x2 v[0:1], v[2:3], v[0:1] glc
-; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v2, s2
-; GFX8-NEXT: v_mov_b32_e32 v3, s3
-; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v0, s0
+; GFX8-NEXT: v_mov_b32_e32 v1, s1
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: s_mov_b64 s[0:1], 0
+; GFX8-NEXT: v_mov_b32_e32 v4, s5
+; GFX8-NEXT: v_mov_b32_e32 v5, s4
+; GFX8-NEXT: .LBB51_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v9, v3
+; GFX8-NEXT: v_mov_b32_e32 v8, v2
+; GFX8-NEXT: v_cmp_ge_u64_e32 vcc, s[4:5], v[8:9]
+; GFX8-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[6:9] glc
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
+; GFX8-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX8-NEXT: s_cbranch_execnz .LBB51_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v0, s2
+; GFX8-NEXT: v_mov_b32_e32 v1, s3
+; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_umin_i64_ret_addr64_offset:
@@ -2901,25 +4197,59 @@ define amdgpu_kernel void @atomic_umin_i64(ptr %out, i64 %in) {
; GFX7-LABEL: atomic_umin_i64:
; GFX7: ; %bb.0: ; %entry
; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; GFX7-NEXT: s_mov_b64 s[4:5], 0
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
; GFX7-NEXT: v_mov_b32_e32 v0, s0
; GFX7-NEXT: v_mov_b32_e32 v1, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s2
-; GFX7-NEXT: v_mov_b32_e32 v3, s3
-; GFX7-NEXT: flat_atomic_umin_x2 v[0:1], v[2:3]
-; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v5, s1
+; GFX7-NEXT: v_mov_b32_e32 v6, s3
+; GFX7-NEXT: v_mov_b32_e32 v7, s2
+; GFX7-NEXT: v_mov_b32_e32 v4, s0
+; GFX7-NEXT: .LBB52_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_ge_u64_e32 vcc, s[2:3], v[2:3]
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v6, v3, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v7, v2, vcc
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_mov_b32_e32 v3, v1
+; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7-NEXT: v_mov_b32_e32 v2, v0
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX7-NEXT: s_cbranch_execnz .LBB52_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_umin_i64:
; GFX8: ; %bb.0: ; %entry
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8-NEXT: s_mov_b64 s[4:5], 0
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v0, s0
; GFX8-NEXT: v_mov_b32_e32 v1, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s2
-; GFX8-NEXT: v_mov_b32_e32 v3, s3
-; GFX8-NEXT: flat_atomic_umin_x2 v[0:1], v[2:3]
-; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v5, s1
+; GFX8-NEXT: v_mov_b32_e32 v6, s3
+; GFX8-NEXT: v_mov_b32_e32 v7, s2
+; GFX8-NEXT: v_mov_b32_e32 v4, s0
+; GFX8-NEXT: .LBB52_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_ge_u64_e32 vcc, s[2:3], v[2:3]
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v6, v3, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v7, v2, vcc
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v3, v1
+; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v2, v0
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX8-NEXT: s_cbranch_execnz .LBB52_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_umin_i64:
@@ -2942,16 +4272,33 @@ define amdgpu_kernel void @atomic_umin_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX7: ; %bb.0: ; %entry
; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; GFX7-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
+; GFX7-NEXT: s_mov_b64 s[6:7], 0
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
; GFX7-NEXT: v_mov_b32_e32 v0, s0
; GFX7-NEXT: v_mov_b32_e32 v1, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s4
-; GFX7-NEXT: v_mov_b32_e32 v3, s5
-; GFX7-NEXT: flat_atomic_umin_x2 v[0:1], v[0:1], v[2:3] glc
-; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v3, s1
+; GFX7-NEXT: v_mov_b32_e32 v4, s5
+; GFX7-NEXT: v_mov_b32_e32 v5, s4
+; GFX7-NEXT: v_mov_b32_e32 v2, s0
+; GFX7-NEXT: .LBB53_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v9, v1
+; GFX7-NEXT: v_mov_b32_e32 v8, v0
+; GFX7-NEXT: v_cmp_ge_u64_e32 vcc, s[4:5], v[8:9]
+; GFX7-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[2:3], v[6:9] glc
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GFX7-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[6:7]
+; GFX7-NEXT: s_cbranch_execnz .LBB53_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX7-NEXT: v_mov_b32_e32 v2, s2
; GFX7-NEXT: v_mov_b32_e32 v3, s3
-; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; GFX7-NEXT: s_endpgm
;
@@ -2959,16 +4306,33 @@ define amdgpu_kernel void @atomic_umin_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX8: ; %bb.0: ; %entry
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GFX8-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; GFX8-NEXT: s_mov_b64 s[6:7], 0
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v0, s0
; GFX8-NEXT: v_mov_b32_e32 v1, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s4
-; GFX8-NEXT: v_mov_b32_e32 v3, s5
-; GFX8-NEXT: flat_atomic_umin_x2 v[0:1], v[0:1], v[2:3] glc
-; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v3, s1
+; GFX8-NEXT: v_mov_b32_e32 v4, s5
+; GFX8-NEXT: v_mov_b32_e32 v5, s4
+; GFX8-NEXT: v_mov_b32_e32 v2, s0
+; GFX8-NEXT: .LBB53_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v9, v1
+; GFX8-NEXT: v_mov_b32_e32 v8, v0
+; GFX8-NEXT: v_cmp_ge_u64_e32 vcc, s[4:5], v[8:9]
+; GFX8-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[2:3], v[6:9] glc
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7]
+; GFX8-NEXT: s_cbranch_execnz .LBB53_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX8-NEXT: v_mov_b32_e32 v2, s2
; GFX8-NEXT: v_mov_b32_e32 v3, s3
-; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; GFX8-NEXT: s_endpgm
;
@@ -2995,34 +4359,64 @@ entry:
define amdgpu_kernel void @atomic_umin_i64_addr64(ptr %out, i64 %in, i64 %index) {
; GFX7-LABEL: atomic_umin_i64_addr64:
; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0xd
; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX7-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
-; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v0, s2
-; GFX7-NEXT: v_mov_b32_e32 v1, s3
-; GFX7-NEXT: s_lshl_b64 s[2:3], s[4:5], 3
-; GFX7-NEXT: s_add_u32 s0, s0, s2
-; GFX7-NEXT: s_addc_u32 s1, s1, s3
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_umin_x2 v[2:3], v[0:1]
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
+; GFX7-NEXT: s_add_u32 s0, s0, s4
+; GFX7-NEXT: s_addc_u32 s1, s1, s5
+; GFX7-NEXT: v_mov_b32_e32 v5, s1
+; GFX7-NEXT: v_mov_b32_e32 v4, s0
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: v_mov_b32_e32 v6, s3
+; GFX7-NEXT: v_mov_b32_e32 v7, s2
+; GFX7-NEXT: .LBB54_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_ge_u64_e32 vcc, s[2:3], v[2:3]
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v6, v3, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v7, v2, vcc
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_mov_b32_e32 v3, v1
+; GFX7-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v2, v0
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7-NEXT: s_cbranch_execnz .LBB54_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_umin_i64_addr64:
; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX8-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
-; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v0, s2
-; GFX8-NEXT: v_mov_b32_e32 v1, s3
-; GFX8-NEXT: s_lshl_b64 s[2:3], s[4:5], 3
-; GFX8-NEXT: s_add_u32 s0, s0, s2
-; GFX8-NEXT: s_addc_u32 s1, s1, s3
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_umin_x2 v[2:3], v[0:1]
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
+; GFX8-NEXT: s_add_u32 s0, s0, s4
+; GFX8-NEXT: s_addc_u32 s1, s1, s5
+; GFX8-NEXT: v_mov_b32_e32 v5, s1
+; GFX8-NEXT: v_mov_b32_e32 v4, s0
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX8-NEXT: s_mov_b64 s[0:1], 0
+; GFX8-NEXT: v_mov_b32_e32 v6, s3
+; GFX8-NEXT: v_mov_b32_e32 v7, s2
+; GFX8-NEXT: .LBB54_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_ge_u64_e32 vcc, s[2:3], v[2:3]
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v6, v3, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v7, v2, vcc
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v3, v1
+; GFX8-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v2, v0
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX8-NEXT: s_cbranch_execnz .LBB54_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_umin_i64_addr64:
@@ -3051,38 +4445,68 @@ define amdgpu_kernel void @atomic_umin_i64_ret_addr64(ptr %out, ptr %out2, i64 %
; GFX7: ; %bb.0: ; %entry
; GFX7-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v0, s4
-; GFX7-NEXT: v_mov_b32_e32 v1, s5
-; GFX7-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
-; GFX7-NEXT: s_add_u32 s0, s0, s4
-; GFX7-NEXT: s_addc_u32 s1, s1, s5
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_umin_x2 v[0:1], v[2:3], v[0:1] glc
-; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v2, s2
-; GFX7-NEXT: v_mov_b32_e32 v3, s3
-; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GFX7-NEXT: s_add_u32 s0, s0, s6
+; GFX7-NEXT: s_addc_u32 s1, s1, s7
+; GFX7-NEXT: v_mov_b32_e32 v0, s0
+; GFX7-NEXT: v_mov_b32_e32 v1, s1
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: v_mov_b32_e32 v4, s5
+; GFX7-NEXT: v_mov_b32_e32 v5, s4
+; GFX7-NEXT: .LBB55_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v9, v3
+; GFX7-NEXT: v_mov_b32_e32 v8, v2
+; GFX7-NEXT: v_cmp_ge_u64_e32 vcc, s[4:5], v[8:9]
+; GFX7-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[6:9] glc
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
+; GFX7-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7-NEXT: s_cbranch_execnz .LBB55_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v0, s2
+; GFX7-NEXT: v_mov_b32_e32 v1, s3
+; GFX7-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_umin_i64_ret_addr64:
; GFX8: ; %bb.0: ; %entry
; GFX8-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v0, s4
-; GFX8-NEXT: v_mov_b32_e32 v1, s5
-; GFX8-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
-; GFX8-NEXT: s_add_u32 s0, s0, s4
-; GFX8-NEXT: s_addc_u32 s1, s1, s5
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_umin_x2 v[0:1], v[2:3], v[0:1] glc
-; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v2, s2
-; GFX8-NEXT: v_mov_b32_e32 v3, s3
-; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GFX8-NEXT: s_add_u32 s0, s0, s6
+; GFX8-NEXT: s_addc_u32 s1, s1, s7
+; GFX8-NEXT: v_mov_b32_e32 v0, s0
+; GFX8-NEXT: v_mov_b32_e32 v1, s1
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: s_mov_b64 s[0:1], 0
+; GFX8-NEXT: v_mov_b32_e32 v4, s5
+; GFX8-NEXT: v_mov_b32_e32 v5, s4
+; GFX8-NEXT: .LBB55_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v9, v3
+; GFX8-NEXT: v_mov_b32_e32 v8, v2
+; GFX8-NEXT: v_cmp_ge_u64_e32 vcc, s[4:5], v[8:9]
+; GFX8-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[6:9] glc
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
+; GFX8-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX8-NEXT: s_cbranch_execnz .LBB55_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v0, s2
+; GFX8-NEXT: v_mov_b32_e32 v1, s3
+; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_umin_i64_ret_addr64:
@@ -3114,13 +4538,25 @@ define amdgpu_kernel void @atomic_or_i64_offset(ptr %out, i64 %in) {
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
; GFX7-NEXT: s_add_u32 s0, s0, 32
; GFX7-NEXT: s_addc_u32 s1, s1, 0
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v0, s2
-; GFX7-NEXT: v_mov_b32_e32 v1, s3
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_or_x2 v[2:3], v[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v5, s1
+; GFX7-NEXT: v_mov_b32_e32 v4, s0
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: .LBB56_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_or_b32_e32 v1, s3, v3
+; GFX7-NEXT: v_or_b32_e32 v0, s2, v2
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_mov_b32_e32 v3, v1
+; GFX7-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v2, v0
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7-NEXT: s_cbranch_execnz .LBB56_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_or_i64_offset:
@@ -3129,13 +4565,25 @@ define amdgpu_kernel void @atomic_or_i64_offset(ptr %out, i64 %in) {
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: s_add_u32 s0, s0, 32
; GFX8-NEXT: s_addc_u32 s1, s1, 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v0, s2
-; GFX8-NEXT: v_mov_b32_e32 v1, s3
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_or_x2 v[2:3], v[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v5, s1
+; GFX8-NEXT: v_mov_b32_e32 v4, s0
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX8-NEXT: s_mov_b64 s[0:1], 0
+; GFX8-NEXT: .LBB56_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_or_b32_e32 v1, s3, v3
+; GFX8-NEXT: v_or_b32_e32 v0, s2, v2
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v3, v1
+; GFX8-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v2, v0
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX8-NEXT: s_cbranch_execnz .LBB56_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_or_i64_offset:
@@ -3157,40 +4605,66 @@ entry:
define amdgpu_kernel void @atomic_or_i64_ret_offset(ptr %out, ptr %out2, i64 %in) {
; GFX7-LABEL: atomic_or_i64_ret_offset:
; GFX7: ; %bb.0: ; %entry
-; GFX7-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0xd
; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; GFX7-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v0, s6
; GFX7-NEXT: s_add_u32 s0, s0, 32
; GFX7-NEXT: s_addc_u32 s1, s1, 0
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v1, s7
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_or_x2 v[0:1], v[2:3], v[0:1] glc
+; GFX7-NEXT: v_mov_b32_e32 v0, s0
+; GFX7-NEXT: v_mov_b32_e32 v1, s1
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: .LBB57_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v5, v3
+; GFX7-NEXT: v_mov_b32_e32 v4, v2
+; GFX7-NEXT: v_or_b32_e32 v3, s5, v5
+; GFX7-NEXT: v_or_b32_e32 v2, s4, v4
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5] glc
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
-; GFX7-NEXT: v_mov_b32_e32 v2, s2
-; GFX7-NEXT: v_mov_b32_e32 v3, s3
-; GFX7-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX7-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7-NEXT: s_cbranch_execnz .LBB57_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v0, s2
+; GFX7-NEXT: v_mov_b32_e32 v1, s3
+; GFX7-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_or_i64_ret_offset:
; GFX8: ; %bb.0: ; %entry
-; GFX8-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v0, s6
; GFX8-NEXT: s_add_u32 s0, s0, 32
; GFX8-NEXT: s_addc_u32 s1, s1, 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v1, s7
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_or_x2 v[0:1], v[2:3], v[0:1] glc
+; GFX8-NEXT: v_mov_b32_e32 v0, s0
+; GFX8-NEXT: v_mov_b32_e32 v1, s1
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: s_mov_b64 s[0:1], 0
+; GFX8-NEXT: .LBB57_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v5, v3
+; GFX8-NEXT: v_mov_b32_e32 v4, v2
+; GFX8-NEXT: v_or_b32_e32 v3, s5, v5
+; GFX8-NEXT: v_or_b32_e32 v2, s4, v4
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
-; GFX8-NEXT: v_mov_b32_e32 v2, s2
-; GFX8-NEXT: v_mov_b32_e32 v3, s3
-; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX8-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX8-NEXT: s_cbranch_execnz .LBB57_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v0, s2
+; GFX8-NEXT: v_mov_b32_e32 v1, s3
+; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_or_i64_ret_offset:
@@ -3217,40 +4691,64 @@ entry:
define amdgpu_kernel void @atomic_or_i64_addr64_offset(ptr %out, i64 %in, i64 %index) {
; GFX7-LABEL: atomic_or_i64_addr64_offset:
; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0xd
; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX7-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v0, s2
-; GFX7-NEXT: v_mov_b32_e32 v1, s3
-; GFX7-NEXT: s_lshl_b64 s[2:3], s[4:5], 3
-; GFX7-NEXT: s_add_u32 s0, s0, s2
-; GFX7-NEXT: s_addc_u32 s1, s1, s3
+; GFX7-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
+; GFX7-NEXT: s_add_u32 s0, s0, s4
+; GFX7-NEXT: s_addc_u32 s1, s1, s5
; GFX7-NEXT: s_add_u32 s0, s0, 32
; GFX7-NEXT: s_addc_u32 s1, s1, 0
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_or_x2 v[2:3], v[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v5, s1
+; GFX7-NEXT: v_mov_b32_e32 v4, s0
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: .LBB58_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_or_b32_e32 v1, s3, v3
+; GFX7-NEXT: v_or_b32_e32 v0, s2, v2
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_mov_b32_e32 v3, v1
+; GFX7-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v2, v0
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7-NEXT: s_cbranch_execnz .LBB58_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_or_i64_addr64_offset:
; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX8-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v0, s2
-; GFX8-NEXT: v_mov_b32_e32 v1, s3
-; GFX8-NEXT: s_lshl_b64 s[2:3], s[4:5], 3
-; GFX8-NEXT: s_add_u32 s0, s0, s2
-; GFX8-NEXT: s_addc_u32 s1, s1, s3
+; GFX8-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
+; GFX8-NEXT: s_add_u32 s0, s0, s4
+; GFX8-NEXT: s_addc_u32 s1, s1, s5
; GFX8-NEXT: s_add_u32 s0, s0, 32
; GFX8-NEXT: s_addc_u32 s1, s1, 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_or_x2 v[2:3], v[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v5, s1
+; GFX8-NEXT: v_mov_b32_e32 v4, s0
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX8-NEXT: s_mov_b64 s[0:1], 0
+; GFX8-NEXT: .LBB58_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_or_b32_e32 v1, s3, v3
+; GFX8-NEXT: v_or_b32_e32 v0, s2, v2
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v3, v1
+; GFX8-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v2, v0
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX8-NEXT: s_cbranch_execnz .LBB58_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_or_i64_addr64_offset:
@@ -3280,42 +4778,68 @@ define amdgpu_kernel void @atomic_or_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GFX7: ; %bb.0: ; %entry
; GFX7-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v0, s4
-; GFX7-NEXT: v_mov_b32_e32 v1, s5
-; GFX7-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
-; GFX7-NEXT: s_add_u32 s0, s0, s4
-; GFX7-NEXT: s_addc_u32 s1, s1, s5
+; GFX7-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GFX7-NEXT: s_add_u32 s0, s0, s6
+; GFX7-NEXT: s_addc_u32 s1, s1, s7
; GFX7-NEXT: s_add_u32 s0, s0, 32
; GFX7-NEXT: s_addc_u32 s1, s1, 0
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_or_x2 v[0:1], v[2:3], v[0:1] glc
+; GFX7-NEXT: v_mov_b32_e32 v0, s0
+; GFX7-NEXT: v_mov_b32_e32 v1, s1
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: .LBB59_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v5, v3
+; GFX7-NEXT: v_mov_b32_e32 v4, v2
+; GFX7-NEXT: v_or_b32_e32 v3, s5, v5
+; GFX7-NEXT: v_or_b32_e32 v2, s4, v4
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5] glc
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
-; GFX7-NEXT: v_mov_b32_e32 v2, s2
-; GFX7-NEXT: v_mov_b32_e32 v3, s3
-; GFX7-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX7-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7-NEXT: s_cbranch_execnz .LBB59_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v0, s2
+; GFX7-NEXT: v_mov_b32_e32 v1, s3
+; GFX7-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_or_i64_ret_addr64_offset:
; GFX8: ; %bb.0: ; %entry
; GFX8-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v0, s4
-; GFX8-NEXT: v_mov_b32_e32 v1, s5
-; GFX8-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
-; GFX8-NEXT: s_add_u32 s0, s0, s4
-; GFX8-NEXT: s_addc_u32 s1, s1, s5
+; GFX8-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GFX8-NEXT: s_add_u32 s0, s0, s6
+; GFX8-NEXT: s_addc_u32 s1, s1, s7
; GFX8-NEXT: s_add_u32 s0, s0, 32
; GFX8-NEXT: s_addc_u32 s1, s1, 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_or_x2 v[0:1], v[2:3], v[0:1] glc
+; GFX8-NEXT: v_mov_b32_e32 v0, s0
+; GFX8-NEXT: v_mov_b32_e32 v1, s1
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: s_mov_b64 s[0:1], 0
+; GFX8-NEXT: .LBB59_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v5, v3
+; GFX8-NEXT: v_mov_b32_e32 v4, v2
+; GFX8-NEXT: v_or_b32_e32 v3, s5, v5
+; GFX8-NEXT: v_or_b32_e32 v2, s4, v4
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
-; GFX8-NEXT: v_mov_b32_e32 v2, s2
-; GFX8-NEXT: v_mov_b32_e32 v3, s3
-; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX8-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX8-NEXT: s_cbranch_execnz .LBB59_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v0, s2
+; GFX8-NEXT: v_mov_b32_e32 v1, s3
+; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_or_i64_ret_addr64_offset:
@@ -3345,27 +4869,55 @@ define amdgpu_kernel void @atomic_or_i64(ptr %out, i64 %in) {
; GFX7-LABEL: atomic_or_i64:
; GFX7: ; %bb.0: ; %entry
; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; GFX7-NEXT: s_mov_b64 s[4:5], 0
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
; GFX7-NEXT: v_mov_b32_e32 v0, s0
; GFX7-NEXT: v_mov_b32_e32 v1, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s2
-; GFX7-NEXT: v_mov_b32_e32 v3, s3
-; GFX7-NEXT: flat_atomic_or_x2 v[0:1], v[2:3]
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v5, s1
+; GFX7-NEXT: v_mov_b32_e32 v4, s0
+; GFX7-NEXT: .LBB60_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_or_b32_e32 v1, s3, v3
+; GFX7-NEXT: v_or_b32_e32 v0, s2, v2
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_mov_b32_e32 v3, v1
+; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7-NEXT: v_mov_b32_e32 v2, v0
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX7-NEXT: s_cbranch_execnz .LBB60_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_or_i64:
; GFX8: ; %bb.0: ; %entry
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8-NEXT: s_mov_b64 s[4:5], 0
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v0, s0
; GFX8-NEXT: v_mov_b32_e32 v1, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s2
-; GFX8-NEXT: v_mov_b32_e32 v3, s3
-; GFX8-NEXT: flat_atomic_or_x2 v[0:1], v[2:3]
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v5, s1
+; GFX8-NEXT: v_mov_b32_e32 v4, s0
+; GFX8-NEXT: .LBB60_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_or_b32_e32 v1, s3, v3
+; GFX8-NEXT: v_or_b32_e32 v0, s2, v2
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v3, v1
+; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v2, v0
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX8-NEXT: s_cbranch_execnz .LBB60_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_or_i64:
@@ -3388,14 +4940,29 @@ define amdgpu_kernel void @atomic_or_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX7: ; %bb.0: ; %entry
; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; GFX7-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
+; GFX7-NEXT: s_mov_b64 s[6:7], 0
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
; GFX7-NEXT: v_mov_b32_e32 v0, s0
; GFX7-NEXT: v_mov_b32_e32 v1, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s4
-; GFX7-NEXT: v_mov_b32_e32 v3, s5
-; GFX7-NEXT: flat_atomic_or_x2 v[0:1], v[0:1], v[2:3] glc
+; GFX7-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v3, s1
+; GFX7-NEXT: v_mov_b32_e32 v2, s0
+; GFX7-NEXT: .LBB61_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v7, v1
+; GFX7-NEXT: v_mov_b32_e32 v6, v0
+; GFX7-NEXT: v_or_b32_e32 v5, s5, v7
+; GFX7-NEXT: v_or_b32_e32 v4, s4, v6
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[2:3], v[4:7] glc
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[6:7]
+; GFX7-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[6:7]
+; GFX7-NEXT: s_cbranch_execnz .LBB61_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX7-NEXT: v_mov_b32_e32 v2, s2
; GFX7-NEXT: v_mov_b32_e32 v3, s3
; GFX7-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
@@ -3405,14 +4972,29 @@ define amdgpu_kernel void @atomic_or_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX8: ; %bb.0: ; %entry
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GFX8-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; GFX8-NEXT: s_mov_b64 s[6:7], 0
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v0, s0
; GFX8-NEXT: v_mov_b32_e32 v1, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s4
-; GFX8-NEXT: v_mov_b32_e32 v3, s5
-; GFX8-NEXT: flat_atomic_or_x2 v[0:1], v[0:1], v[2:3] glc
+; GFX8-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v3, s1
+; GFX8-NEXT: v_mov_b32_e32 v2, s0
+; GFX8-NEXT: .LBB61_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v7, v1
+; GFX8-NEXT: v_mov_b32_e32 v6, v0
+; GFX8-NEXT: v_or_b32_e32 v5, s5, v7
+; GFX8-NEXT: v_or_b32_e32 v4, s4, v6
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[2:3], v[4:7] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[6:7]
+; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7]
+; GFX8-NEXT: s_cbranch_execnz .LBB61_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX8-NEXT: v_mov_b32_e32 v2, s2
; GFX8-NEXT: v_mov_b32_e32 v3, s3
; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
@@ -3441,36 +5023,60 @@ entry:
define amdgpu_kernel void @atomic_or_i64_addr64(ptr %out, i64 %in, i64 %index) {
; GFX7-LABEL: atomic_or_i64_addr64:
; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0xd
; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX7-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v0, s2
-; GFX7-NEXT: v_mov_b32_e32 v1, s3
-; GFX7-NEXT: s_lshl_b64 s[2:3], s[4:5], 3
-; GFX7-NEXT: s_add_u32 s0, s0, s2
-; GFX7-NEXT: s_addc_u32 s1, s1, s3
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_or_x2 v[2:3], v[0:1]
+; GFX7-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
+; GFX7-NEXT: s_add_u32 s0, s0, s4
+; GFX7-NEXT: s_addc_u32 s1, s1, s5
+; GFX7-NEXT: v_mov_b32_e32 v5, s1
+; GFX7-NEXT: v_mov_b32_e32 v4, s0
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: .LBB62_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_or_b32_e32 v1, s3, v3
+; GFX7-NEXT: v_or_b32_e32 v0, s2, v2
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_mov_b32_e32 v3, v1
+; GFX7-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v2, v0
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7-NEXT: s_cbranch_execnz .LBB62_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_or_i64_addr64:
; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX8-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v0, s2
-; GFX8-NEXT: v_mov_b32_e32 v1, s3
-; GFX8-NEXT: s_lshl_b64 s[2:3], s[4:5], 3
-; GFX8-NEXT: s_add_u32 s0, s0, s2
-; GFX8-NEXT: s_addc_u32 s1, s1, s3
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_or_x2 v[2:3], v[0:1]
+; GFX8-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
+; GFX8-NEXT: s_add_u32 s0, s0, s4
+; GFX8-NEXT: s_addc_u32 s1, s1, s5
+; GFX8-NEXT: v_mov_b32_e32 v5, s1
+; GFX8-NEXT: v_mov_b32_e32 v4, s0
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX8-NEXT: s_mov_b64 s[0:1], 0
+; GFX8-NEXT: .LBB62_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_or_b32_e32 v1, s3, v3
+; GFX8-NEXT: v_or_b32_e32 v0, s2, v2
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v3, v1
+; GFX8-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v2, v0
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX8-NEXT: s_cbranch_execnz .LBB62_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_or_i64_addr64:
@@ -3499,38 +5105,64 @@ define amdgpu_kernel void @atomic_or_i64_ret_addr64(ptr %out, ptr %out2, i64 %in
; GFX7: ; %bb.0: ; %entry
; GFX7-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v0, s4
-; GFX7-NEXT: v_mov_b32_e32 v1, s5
-; GFX7-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
-; GFX7-NEXT: s_add_u32 s0, s0, s4
-; GFX7-NEXT: s_addc_u32 s1, s1, s5
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_or_x2 v[0:1], v[2:3], v[0:1] glc
+; GFX7-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GFX7-NEXT: s_add_u32 s0, s0, s6
+; GFX7-NEXT: s_addc_u32 s1, s1, s7
+; GFX7-NEXT: v_mov_b32_e32 v0, s0
+; GFX7-NEXT: v_mov_b32_e32 v1, s1
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: .LBB63_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v5, v3
+; GFX7-NEXT: v_mov_b32_e32 v4, v2
+; GFX7-NEXT: v_or_b32_e32 v3, s5, v5
+; GFX7-NEXT: v_or_b32_e32 v2, s4, v4
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5] glc
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
-; GFX7-NEXT: v_mov_b32_e32 v2, s2
-; GFX7-NEXT: v_mov_b32_e32 v3, s3
-; GFX7-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX7-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7-NEXT: s_cbranch_execnz .LBB63_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v0, s2
+; GFX7-NEXT: v_mov_b32_e32 v1, s3
+; GFX7-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_or_i64_ret_addr64:
; GFX8: ; %bb.0: ; %entry
; GFX8-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v0, s4
-; GFX8-NEXT: v_mov_b32_e32 v1, s5
-; GFX8-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
-; GFX8-NEXT: s_add_u32 s0, s0, s4
-; GFX8-NEXT: s_addc_u32 s1, s1, s5
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_or_x2 v[0:1], v[2:3], v[0:1] glc
+; GFX8-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GFX8-NEXT: s_add_u32 s0, s0, s6
+; GFX8-NEXT: s_addc_u32 s1, s1, s7
+; GFX8-NEXT: v_mov_b32_e32 v0, s0
+; GFX8-NEXT: v_mov_b32_e32 v1, s1
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: s_mov_b64 s[0:1], 0
+; GFX8-NEXT: .LBB63_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v5, v3
+; GFX8-NEXT: v_mov_b32_e32 v4, v2
+; GFX8-NEXT: v_or_b32_e32 v3, s5, v5
+; GFX8-NEXT: v_or_b32_e32 v2, s4, v4
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
-; GFX8-NEXT: v_mov_b32_e32 v2, s2
-; GFX8-NEXT: v_mov_b32_e32 v3, s3
-; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX8-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX8-NEXT: s_cbranch_execnz .LBB63_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v0, s2
+; GFX8-NEXT: v_mov_b32_e32 v1, s3
+; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_or_i64_ret_addr64:
@@ -4104,13 +5736,25 @@ define amdgpu_kernel void @atomic_xor_i64_offset(ptr %out, i64 %in) {
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
; GFX7-NEXT: s_add_u32 s0, s0, 32
; GFX7-NEXT: s_addc_u32 s1, s1, 0
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v0, s2
-; GFX7-NEXT: v_mov_b32_e32 v1, s3
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_xor_x2 v[2:3], v[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v5, s1
+; GFX7-NEXT: v_mov_b32_e32 v4, s0
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: .LBB74_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_xor_b32_e32 v1, s3, v3
+; GFX7-NEXT: v_xor_b32_e32 v0, s2, v2
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_mov_b32_e32 v3, v1
+; GFX7-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v2, v0
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7-NEXT: s_cbranch_execnz .LBB74_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_xor_i64_offset:
@@ -4119,13 +5763,25 @@ define amdgpu_kernel void @atomic_xor_i64_offset(ptr %out, i64 %in) {
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: s_add_u32 s0, s0, 32
; GFX8-NEXT: s_addc_u32 s1, s1, 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v0, s2
-; GFX8-NEXT: v_mov_b32_e32 v1, s3
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_xor_x2 v[2:3], v[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v5, s1
+; GFX8-NEXT: v_mov_b32_e32 v4, s0
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX8-NEXT: s_mov_b64 s[0:1], 0
+; GFX8-NEXT: .LBB74_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_xor_b32_e32 v1, s3, v3
+; GFX8-NEXT: v_xor_b32_e32 v0, s2, v2
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v3, v1
+; GFX8-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v2, v0
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX8-NEXT: s_cbranch_execnz .LBB74_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_xor_i64_offset:
@@ -4147,40 +5803,66 @@ entry:
define amdgpu_kernel void @atomic_xor_i64_ret_offset(ptr %out, ptr %out2, i64 %in) {
; GFX7-LABEL: atomic_xor_i64_ret_offset:
; GFX7: ; %bb.0: ; %entry
-; GFX7-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0xd
; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; GFX7-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v0, s6
; GFX7-NEXT: s_add_u32 s0, s0, 32
; GFX7-NEXT: s_addc_u32 s1, s1, 0
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v1, s7
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_xor_x2 v[0:1], v[2:3], v[0:1] glc
+; GFX7-NEXT: v_mov_b32_e32 v0, s0
+; GFX7-NEXT: v_mov_b32_e32 v1, s1
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: .LBB75_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v5, v3
+; GFX7-NEXT: v_mov_b32_e32 v4, v2
+; GFX7-NEXT: v_xor_b32_e32 v3, s5, v5
+; GFX7-NEXT: v_xor_b32_e32 v2, s4, v4
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5] glc
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
-; GFX7-NEXT: v_mov_b32_e32 v2, s2
-; GFX7-NEXT: v_mov_b32_e32 v3, s3
-; GFX7-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX7-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7-NEXT: s_cbranch_execnz .LBB75_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v0, s2
+; GFX7-NEXT: v_mov_b32_e32 v1, s3
+; GFX7-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_xor_i64_ret_offset:
; GFX8: ; %bb.0: ; %entry
-; GFX8-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v0, s6
; GFX8-NEXT: s_add_u32 s0, s0, 32
; GFX8-NEXT: s_addc_u32 s1, s1, 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v1, s7
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_xor_x2 v[0:1], v[2:3], v[0:1] glc
+; GFX8-NEXT: v_mov_b32_e32 v0, s0
+; GFX8-NEXT: v_mov_b32_e32 v1, s1
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: s_mov_b64 s[0:1], 0
+; GFX8-NEXT: .LBB75_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v5, v3
+; GFX8-NEXT: v_mov_b32_e32 v4, v2
+; GFX8-NEXT: v_xor_b32_e32 v3, s5, v5
+; GFX8-NEXT: v_xor_b32_e32 v2, s4, v4
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
-; GFX8-NEXT: v_mov_b32_e32 v2, s2
-; GFX8-NEXT: v_mov_b32_e32 v3, s3
-; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX8-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX8-NEXT: s_cbranch_execnz .LBB75_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v0, s2
+; GFX8-NEXT: v_mov_b32_e32 v1, s3
+; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_xor_i64_ret_offset:
@@ -4207,40 +5889,64 @@ entry:
define amdgpu_kernel void @atomic_xor_i64_addr64_offset(ptr %out, i64 %in, i64 %index) {
; GFX7-LABEL: atomic_xor_i64_addr64_offset:
; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0xd
; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX7-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v0, s2
-; GFX7-NEXT: v_mov_b32_e32 v1, s3
-; GFX7-NEXT: s_lshl_b64 s[2:3], s[4:5], 3
-; GFX7-NEXT: s_add_u32 s0, s0, s2
-; GFX7-NEXT: s_addc_u32 s1, s1, s3
+; GFX7-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
+; GFX7-NEXT: s_add_u32 s0, s0, s4
+; GFX7-NEXT: s_addc_u32 s1, s1, s5
; GFX7-NEXT: s_add_u32 s0, s0, 32
; GFX7-NEXT: s_addc_u32 s1, s1, 0
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_xor_x2 v[2:3], v[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v5, s1
+; GFX7-NEXT: v_mov_b32_e32 v4, s0
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: .LBB76_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_xor_b32_e32 v1, s3, v3
+; GFX7-NEXT: v_xor_b32_e32 v0, s2, v2
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_mov_b32_e32 v3, v1
+; GFX7-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v2, v0
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7-NEXT: s_cbranch_execnz .LBB76_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_xor_i64_addr64_offset:
; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX8-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v0, s2
-; GFX8-NEXT: v_mov_b32_e32 v1, s3
-; GFX8-NEXT: s_lshl_b64 s[2:3], s[4:5], 3
-; GFX8-NEXT: s_add_u32 s0, s0, s2
-; GFX8-NEXT: s_addc_u32 s1, s1, s3
+; GFX8-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
+; GFX8-NEXT: s_add_u32 s0, s0, s4
+; GFX8-NEXT: s_addc_u32 s1, s1, s5
; GFX8-NEXT: s_add_u32 s0, s0, 32
; GFX8-NEXT: s_addc_u32 s1, s1, 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_xor_x2 v[2:3], v[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v5, s1
+; GFX8-NEXT: v_mov_b32_e32 v4, s0
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX8-NEXT: s_mov_b64 s[0:1], 0
+; GFX8-NEXT: .LBB76_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_xor_b32_e32 v1, s3, v3
+; GFX8-NEXT: v_xor_b32_e32 v0, s2, v2
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v3, v1
+; GFX8-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v2, v0
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX8-NEXT: s_cbranch_execnz .LBB76_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_xor_i64_addr64_offset:
@@ -4269,43 +5975,69 @@ define amdgpu_kernel void @atomic_xor_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GFX7-LABEL: atomic_xor_i64_ret_addr64_offset:
; GFX7: ; %bb.0: ; %entry
; GFX7-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
-; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v0, s4
-; GFX7-NEXT: v_mov_b32_e32 v1, s5
-; GFX7-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
-; GFX7-NEXT: s_add_u32 s0, s0, s4
-; GFX7-NEXT: s_addc_u32 s1, s1, s5
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GFX7-NEXT: s_add_u32 s0, s0, s6
+; GFX7-NEXT: s_addc_u32 s1, s1, s7
; GFX7-NEXT: s_add_u32 s0, s0, 32
; GFX7-NEXT: s_addc_u32 s1, s1, 0
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_xor_x2 v[0:1], v[2:3], v[0:1] glc
+; GFX7-NEXT: v_mov_b32_e32 v0, s0
+; GFX7-NEXT: v_mov_b32_e32 v1, s1
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: .LBB77_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v5, v3
+; GFX7-NEXT: v_mov_b32_e32 v4, v2
+; GFX7-NEXT: v_xor_b32_e32 v3, s5, v5
+; GFX7-NEXT: v_xor_b32_e32 v2, s4, v4
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5] glc
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
-; GFX7-NEXT: v_mov_b32_e32 v2, s2
-; GFX7-NEXT: v_mov_b32_e32 v3, s3
-; GFX7-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX7-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7-NEXT: s_cbranch_execnz .LBB77_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v0, s2
+; GFX7-NEXT: v_mov_b32_e32 v1, s3
+; GFX7-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_xor_i64_ret_addr64_offset:
; GFX8: ; %bb.0: ; %entry
; GFX8-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v0, s4
-; GFX8-NEXT: v_mov_b32_e32 v1, s5
-; GFX8-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
-; GFX8-NEXT: s_add_u32 s0, s0, s4
-; GFX8-NEXT: s_addc_u32 s1, s1, s5
+; GFX8-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GFX8-NEXT: s_add_u32 s0, s0, s6
+; GFX8-NEXT: s_addc_u32 s1, s1, s7
; GFX8-NEXT: s_add_u32 s0, s0, 32
; GFX8-NEXT: s_addc_u32 s1, s1, 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_xor_x2 v[0:1], v[2:3], v[0:1] glc
+; GFX8-NEXT: v_mov_b32_e32 v0, s0
+; GFX8-NEXT: v_mov_b32_e32 v1, s1
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: s_mov_b64 s[0:1], 0
+; GFX8-NEXT: .LBB77_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v5, v3
+; GFX8-NEXT: v_mov_b32_e32 v4, v2
+; GFX8-NEXT: v_xor_b32_e32 v3, s5, v5
+; GFX8-NEXT: v_xor_b32_e32 v2, s4, v4
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
-; GFX8-NEXT: v_mov_b32_e32 v2, s2
-; GFX8-NEXT: v_mov_b32_e32 v3, s3
-; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX8-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX8-NEXT: s_cbranch_execnz .LBB77_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v0, s2
+; GFX8-NEXT: v_mov_b32_e32 v1, s3
+; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_xor_i64_ret_addr64_offset:
@@ -4335,27 +6067,55 @@ define amdgpu_kernel void @atomic_xor_i64(ptr %out, i64 %in) {
; GFX7-LABEL: atomic_xor_i64:
; GFX7: ; %bb.0: ; %entry
; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; GFX7-NEXT: s_mov_b64 s[4:5], 0
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
; GFX7-NEXT: v_mov_b32_e32 v0, s0
; GFX7-NEXT: v_mov_b32_e32 v1, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s2
-; GFX7-NEXT: v_mov_b32_e32 v3, s3
-; GFX7-NEXT: flat_atomic_xor_x2 v[0:1], v[2:3]
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v5, s1
+; GFX7-NEXT: v_mov_b32_e32 v4, s0
+; GFX7-NEXT: .LBB78_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_xor_b32_e32 v1, s3, v3
+; GFX7-NEXT: v_xor_b32_e32 v0, s2, v2
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_mov_b32_e32 v3, v1
+; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7-NEXT: v_mov_b32_e32 v2, v0
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX7-NEXT: s_cbranch_execnz .LBB78_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_xor_i64:
; GFX8: ; %bb.0: ; %entry
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8-NEXT: s_mov_b64 s[4:5], 0
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v0, s0
; GFX8-NEXT: v_mov_b32_e32 v1, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s2
-; GFX8-NEXT: v_mov_b32_e32 v3, s3
-; GFX8-NEXT: flat_atomic_xor_x2 v[0:1], v[2:3]
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v5, s1
+; GFX8-NEXT: v_mov_b32_e32 v4, s0
+; GFX8-NEXT: .LBB78_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_xor_b32_e32 v1, s3, v3
+; GFX8-NEXT: v_xor_b32_e32 v0, s2, v2
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v3, v1
+; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v2, v0
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX8-NEXT: s_cbranch_execnz .LBB78_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_xor_i64:
@@ -4378,14 +6138,29 @@ define amdgpu_kernel void @atomic_xor_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX7: ; %bb.0: ; %entry
; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; GFX7-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
+; GFX7-NEXT: s_mov_b64 s[6:7], 0
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
; GFX7-NEXT: v_mov_b32_e32 v0, s0
; GFX7-NEXT: v_mov_b32_e32 v1, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s4
-; GFX7-NEXT: v_mov_b32_e32 v3, s5
-; GFX7-NEXT: flat_atomic_xor_x2 v[0:1], v[0:1], v[2:3] glc
+; GFX7-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v3, s1
+; GFX7-NEXT: v_mov_b32_e32 v2, s0
+; GFX7-NEXT: .LBB79_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v7, v1
+; GFX7-NEXT: v_mov_b32_e32 v6, v0
+; GFX7-NEXT: v_xor_b32_e32 v5, s5, v7
+; GFX7-NEXT: v_xor_b32_e32 v4, s4, v6
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[2:3], v[4:7] glc
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[6:7]
+; GFX7-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[6:7]
+; GFX7-NEXT: s_cbranch_execnz .LBB79_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX7-NEXT: v_mov_b32_e32 v2, s2
; GFX7-NEXT: v_mov_b32_e32 v3, s3
; GFX7-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
@@ -4395,14 +6170,29 @@ define amdgpu_kernel void @atomic_xor_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX8: ; %bb.0: ; %entry
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GFX8-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; GFX8-NEXT: s_mov_b64 s[6:7], 0
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v0, s0
; GFX8-NEXT: v_mov_b32_e32 v1, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s4
-; GFX8-NEXT: v_mov_b32_e32 v3, s5
-; GFX8-NEXT: flat_atomic_xor_x2 v[0:1], v[0:1], v[2:3] glc
+; GFX8-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v3, s1
+; GFX8-NEXT: v_mov_b32_e32 v2, s0
+; GFX8-NEXT: .LBB79_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v7, v1
+; GFX8-NEXT: v_mov_b32_e32 v6, v0
+; GFX8-NEXT: v_xor_b32_e32 v5, s5, v7
+; GFX8-NEXT: v_xor_b32_e32 v4, s4, v6
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[2:3], v[4:7] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[6:7]
+; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7]
+; GFX8-NEXT: s_cbranch_execnz .LBB79_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX8-NEXT: v_mov_b32_e32 v2, s2
; GFX8-NEXT: v_mov_b32_e32 v3, s3
; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
@@ -4431,36 +6221,60 @@ entry:
define amdgpu_kernel void @atomic_xor_i64_addr64(ptr %out, i64 %in, i64 %index) {
; GFX7-LABEL: atomic_xor_i64_addr64:
; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0xd
; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX7-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v0, s2
-; GFX7-NEXT: v_mov_b32_e32 v1, s3
-; GFX7-NEXT: s_lshl_b64 s[2:3], s[4:5], 3
-; GFX7-NEXT: s_add_u32 s0, s0, s2
-; GFX7-NEXT: s_addc_u32 s1, s1, s3
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_xor_x2 v[2:3], v[0:1]
+; GFX7-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
+; GFX7-NEXT: s_add_u32 s0, s0, s4
+; GFX7-NEXT: s_addc_u32 s1, s1, s5
+; GFX7-NEXT: v_mov_b32_e32 v5, s1
+; GFX7-NEXT: v_mov_b32_e32 v4, s0
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: .LBB80_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_xor_b32_e32 v1, s3, v3
+; GFX7-NEXT: v_xor_b32_e32 v0, s2, v2
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_mov_b32_e32 v3, v1
+; GFX7-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v2, v0
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7-NEXT: s_cbranch_execnz .LBB80_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_xor_i64_addr64:
; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX8-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v0, s2
-; GFX8-NEXT: v_mov_b32_e32 v1, s3
-; GFX8-NEXT: s_lshl_b64 s[2:3], s[4:5], 3
-; GFX8-NEXT: s_add_u32 s0, s0, s2
-; GFX8-NEXT: s_addc_u32 s1, s1, s3
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_xor_x2 v[2:3], v[0:1]
+; GFX8-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
+; GFX8-NEXT: s_add_u32 s0, s0, s4
+; GFX8-NEXT: s_addc_u32 s1, s1, s5
+; GFX8-NEXT: v_mov_b32_e32 v5, s1
+; GFX8-NEXT: v_mov_b32_e32 v4, s0
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX8-NEXT: s_mov_b64 s[0:1], 0
+; GFX8-NEXT: .LBB80_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_xor_b32_e32 v1, s3, v3
+; GFX8-NEXT: v_xor_b32_e32 v0, s2, v2
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v3, v1
+; GFX8-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v2, v0
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX8-NEXT: s_cbranch_execnz .LBB80_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_xor_i64_addr64:
@@ -4489,38 +6303,64 @@ define amdgpu_kernel void @atomic_xor_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GFX7: ; %bb.0: ; %entry
; GFX7-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v0, s4
-; GFX7-NEXT: v_mov_b32_e32 v1, s5
-; GFX7-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
-; GFX7-NEXT: s_add_u32 s0, s0, s4
-; GFX7-NEXT: s_addc_u32 s1, s1, s5
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_xor_x2 v[0:1], v[2:3], v[0:1] glc
+; GFX7-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GFX7-NEXT: s_add_u32 s0, s0, s6
+; GFX7-NEXT: s_addc_u32 s1, s1, s7
+; GFX7-NEXT: v_mov_b32_e32 v0, s0
+; GFX7-NEXT: v_mov_b32_e32 v1, s1
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: .LBB81_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v5, v3
+; GFX7-NEXT: v_mov_b32_e32 v4, v2
+; GFX7-NEXT: v_xor_b32_e32 v3, s5, v5
+; GFX7-NEXT: v_xor_b32_e32 v2, s4, v4
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5] glc
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
-; GFX7-NEXT: v_mov_b32_e32 v2, s2
-; GFX7-NEXT: v_mov_b32_e32 v3, s3
-; GFX7-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX7-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7-NEXT: s_cbranch_execnz .LBB81_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v0, s2
+; GFX7-NEXT: v_mov_b32_e32 v1, s3
+; GFX7-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_xor_i64_ret_addr64:
; GFX8: ; %bb.0: ; %entry
; GFX8-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v0, s4
-; GFX8-NEXT: v_mov_b32_e32 v1, s5
-; GFX8-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
-; GFX8-NEXT: s_add_u32 s0, s0, s4
-; GFX8-NEXT: s_addc_u32 s1, s1, s5
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_xor_x2 v[0:1], v[2:3], v[0:1] glc
+; GFX8-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GFX8-NEXT: s_add_u32 s0, s0, s6
+; GFX8-NEXT: s_addc_u32 s1, s1, s7
+; GFX8-NEXT: v_mov_b32_e32 v0, s0
+; GFX8-NEXT: v_mov_b32_e32 v1, s1
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: s_mov_b64 s[0:1], 0
+; GFX8-NEXT: .LBB81_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v5, v3
+; GFX8-NEXT: v_mov_b32_e32 v4, v2
+; GFX8-NEXT: v_xor_b32_e32 v3, s5, v5
+; GFX8-NEXT: v_xor_b32_e32 v2, s4, v4
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
-; GFX8-NEXT: v_mov_b32_e32 v2, s2
-; GFX8-NEXT: v_mov_b32_e32 v3, s3
-; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX8-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX8-NEXT: s_cbranch_execnz .LBB81_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v0, s2
+; GFX8-NEXT: v_mov_b32_e32 v1, s3
+; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_xor_i64_ret_addr64:
@@ -5920,13 +7760,28 @@ define amdgpu_kernel void @atomic_inc_i64_offset(ptr %out, i64 %in) {
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
; GFX7-NEXT: s_add_u32 s0, s0, 32
; GFX7-NEXT: s_addc_u32 s1, s1, 0
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v0, s2
-; GFX7-NEXT: v_mov_b32_e32 v1, s3
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_inc_x2 v[2:3], v[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v5, s1
+; GFX7-NEXT: v_mov_b32_e32 v4, s0
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: .LBB107_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, 1, v2
+; GFX7-NEXT: v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; GFX7-NEXT: v_cmp_gt_u64_e32 vcc, s[2:3], v[2:3]
+; GFX7-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_mov_b32_e32 v3, v1
+; GFX7-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v2, v0
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7-NEXT: s_cbranch_execnz .LBB107_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_inc_i64_offset:
@@ -5935,13 +7790,28 @@ define amdgpu_kernel void @atomic_inc_i64_offset(ptr %out, i64 %in) {
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: s_add_u32 s0, s0, 32
; GFX8-NEXT: s_addc_u32 s1, s1, 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v0, s2
-; GFX8-NEXT: v_mov_b32_e32 v1, s3
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_inc_x2 v[2:3], v[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v5, s1
+; GFX8-NEXT: v_mov_b32_e32 v4, s0
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX8-NEXT: s_mov_b64 s[0:1], 0
+; GFX8-NEXT: .LBB107_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, 1, v2
+; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; GFX8-NEXT: v_cmp_gt_u64_e32 vcc, s[2:3], v[2:3]
+; GFX8-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v3, v1
+; GFX8-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v2, v0
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX8-NEXT: s_cbranch_execnz .LBB107_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_inc_i64_offset:
@@ -5963,40 +7833,72 @@ entry:
define amdgpu_kernel void @atomic_inc_i64_ret_offset(ptr %out, ptr %out2, i64 %in) {
; GFX7-LABEL: atomic_inc_i64_ret_offset:
; GFX7: ; %bb.0: ; %entry
-; GFX7-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0xd
; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; GFX7-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v0, s6
; GFX7-NEXT: s_add_u32 s0, s0, 32
; GFX7-NEXT: s_addc_u32 s1, s1, 0
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v1, s7
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_inc_x2 v[0:1], v[2:3], v[0:1] glc
+; GFX7-NEXT: v_mov_b32_e32 v0, s0
+; GFX7-NEXT: v_mov_b32_e32 v1, s1
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: .LBB108_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v5, v3
+; GFX7-NEXT: v_mov_b32_e32 v4, v2
+; GFX7-NEXT: v_add_i32_e32 v2, vcc, 1, v4
+; GFX7-NEXT: v_addc_u32_e32 v3, vcc, 0, v5, vcc
+; GFX7-NEXT: v_cmp_gt_u64_e32 vcc, s[4:5], v[4:5]
+; GFX7-NEXT: v_cndmask_b32_e32 v3, 0, v3, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v2, 0, v2, vcc
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5] glc
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
-; GFX7-NEXT: v_mov_b32_e32 v2, s2
-; GFX7-NEXT: v_mov_b32_e32 v3, s3
-; GFX7-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX7-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7-NEXT: s_cbranch_execnz .LBB108_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v0, s2
+; GFX7-NEXT: v_mov_b32_e32 v1, s3
+; GFX7-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_inc_i64_ret_offset:
; GFX8: ; %bb.0: ; %entry
-; GFX8-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v0, s6
; GFX8-NEXT: s_add_u32 s0, s0, 32
; GFX8-NEXT: s_addc_u32 s1, s1, 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v1, s7
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_inc_x2 v[0:1], v[2:3], v[0:1] glc
+; GFX8-NEXT: v_mov_b32_e32 v0, s0
+; GFX8-NEXT: v_mov_b32_e32 v1, s1
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: s_mov_b64 s[0:1], 0
+; GFX8-NEXT: .LBB108_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v5, v3
+; GFX8-NEXT: v_mov_b32_e32 v4, v2
+; GFX8-NEXT: v_add_u32_e32 v2, vcc, 1, v4
+; GFX8-NEXT: v_addc_u32_e32 v3, vcc, 0, v5, vcc
+; GFX8-NEXT: v_cmp_gt_u64_e32 vcc, s[4:5], v[4:5]
+; GFX8-NEXT: v_cndmask_b32_e32 v3, 0, v3, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v2, 0, v2, vcc
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
-; GFX8-NEXT: v_mov_b32_e32 v2, s2
-; GFX8-NEXT: v_mov_b32_e32 v3, s3
-; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX8-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX8-NEXT: s_cbranch_execnz .LBB108_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v0, s2
+; GFX8-NEXT: v_mov_b32_e32 v1, s3
+; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_inc_i64_ret_offset:
@@ -6023,40 +7925,70 @@ entry:
define amdgpu_kernel void @atomic_inc_i64_incr64_offset(ptr %out, i64 %in, i64 %index) {
; GFX7-LABEL: atomic_inc_i64_incr64_offset:
; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0xd
; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX7-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v0, s2
-; GFX7-NEXT: v_mov_b32_e32 v1, s3
-; GFX7-NEXT: s_lshl_b64 s[2:3], s[4:5], 3
-; GFX7-NEXT: s_add_u32 s0, s0, s2
-; GFX7-NEXT: s_addc_u32 s1, s1, s3
+; GFX7-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
+; GFX7-NEXT: s_add_u32 s0, s0, s4
+; GFX7-NEXT: s_addc_u32 s1, s1, s5
; GFX7-NEXT: s_add_u32 s0, s0, 32
; GFX7-NEXT: s_addc_u32 s1, s1, 0
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_inc_x2 v[2:3], v[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v5, s1
+; GFX7-NEXT: v_mov_b32_e32 v4, s0
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: .LBB109_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, 1, v2
+; GFX7-NEXT: v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; GFX7-NEXT: v_cmp_gt_u64_e32 vcc, s[2:3], v[2:3]
+; GFX7-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_mov_b32_e32 v3, v1
+; GFX7-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v2, v0
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7-NEXT: s_cbranch_execnz .LBB109_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_inc_i64_incr64_offset:
; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX8-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v0, s2
-; GFX8-NEXT: v_mov_b32_e32 v1, s3
-; GFX8-NEXT: s_lshl_b64 s[2:3], s[4:5], 3
-; GFX8-NEXT: s_add_u32 s0, s0, s2
-; GFX8-NEXT: s_addc_u32 s1, s1, s3
+; GFX8-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
+; GFX8-NEXT: s_add_u32 s0, s0, s4
+; GFX8-NEXT: s_addc_u32 s1, s1, s5
; GFX8-NEXT: s_add_u32 s0, s0, 32
; GFX8-NEXT: s_addc_u32 s1, s1, 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_inc_x2 v[2:3], v[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v5, s1
+; GFX8-NEXT: v_mov_b32_e32 v4, s0
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX8-NEXT: s_mov_b64 s[0:1], 0
+; GFX8-NEXT: .LBB109_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, 1, v2
+; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; GFX8-NEXT: v_cmp_gt_u64_e32 vcc, s[2:3], v[2:3]
+; GFX8-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v3, v1
+; GFX8-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v2, v0
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX8-NEXT: s_cbranch_execnz .LBB109_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_inc_i64_incr64_offset:
@@ -6086,42 +8018,74 @@ define amdgpu_kernel void @atomic_inc_i64_ret_incr64_offset(ptr %out, ptr %out2,
; GFX7: ; %bb.0: ; %entry
; GFX7-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v0, s4
-; GFX7-NEXT: v_mov_b32_e32 v1, s5
-; GFX7-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
-; GFX7-NEXT: s_add_u32 s0, s0, s4
-; GFX7-NEXT: s_addc_u32 s1, s1, s5
+; GFX7-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GFX7-NEXT: s_add_u32 s0, s0, s6
+; GFX7-NEXT: s_addc_u32 s1, s1, s7
; GFX7-NEXT: s_add_u32 s0, s0, 32
; GFX7-NEXT: s_addc_u32 s1, s1, 0
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_inc_x2 v[0:1], v[2:3], v[0:1] glc
+; GFX7-NEXT: v_mov_b32_e32 v0, s0
+; GFX7-NEXT: v_mov_b32_e32 v1, s1
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: .LBB110_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v5, v3
+; GFX7-NEXT: v_mov_b32_e32 v4, v2
+; GFX7-NEXT: v_add_i32_e32 v2, vcc, 1, v4
+; GFX7-NEXT: v_addc_u32_e32 v3, vcc, 0, v5, vcc
+; GFX7-NEXT: v_cmp_gt_u64_e32 vcc, s[4:5], v[4:5]
+; GFX7-NEXT: v_cndmask_b32_e32 v3, 0, v3, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v2, 0, v2, vcc
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5] glc
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
-; GFX7-NEXT: v_mov_b32_e32 v2, s2
-; GFX7-NEXT: v_mov_b32_e32 v3, s3
-; GFX7-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX7-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7-NEXT: s_cbranch_execnz .LBB110_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v0, s2
+; GFX7-NEXT: v_mov_b32_e32 v1, s3
+; GFX7-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_inc_i64_ret_incr64_offset:
; GFX8: ; %bb.0: ; %entry
; GFX8-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v0, s4
-; GFX8-NEXT: v_mov_b32_e32 v1, s5
-; GFX8-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
-; GFX8-NEXT: s_add_u32 s0, s0, s4
-; GFX8-NEXT: s_addc_u32 s1, s1, s5
+; GFX8-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GFX8-NEXT: s_add_u32 s0, s0, s6
+; GFX8-NEXT: s_addc_u32 s1, s1, s7
; GFX8-NEXT: s_add_u32 s0, s0, 32
; GFX8-NEXT: s_addc_u32 s1, s1, 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_inc_x2 v[0:1], v[2:3], v[0:1] glc
+; GFX8-NEXT: v_mov_b32_e32 v0, s0
+; GFX8-NEXT: v_mov_b32_e32 v1, s1
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: s_mov_b64 s[0:1], 0
+; GFX8-NEXT: .LBB110_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v5, v3
+; GFX8-NEXT: v_mov_b32_e32 v4, v2
+; GFX8-NEXT: v_add_u32_e32 v2, vcc, 1, v4
+; GFX8-NEXT: v_addc_u32_e32 v3, vcc, 0, v5, vcc
+; GFX8-NEXT: v_cmp_gt_u64_e32 vcc, s[4:5], v[4:5]
+; GFX8-NEXT: v_cndmask_b32_e32 v3, 0, v3, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v2, 0, v2, vcc
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
-; GFX8-NEXT: v_mov_b32_e32 v2, s2
-; GFX8-NEXT: v_mov_b32_e32 v3, s3
-; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX8-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX8-NEXT: s_cbranch_execnz .LBB110_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v0, s2
+; GFX8-NEXT: v_mov_b32_e32 v1, s3
+; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_inc_i64_ret_incr64_offset:
@@ -6151,27 +8115,61 @@ define amdgpu_kernel void @atomic_inc_i64(ptr %out, i64 %in) {
; GFX7-LABEL: atomic_inc_i64:
; GFX7: ; %bb.0: ; %entry
; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; GFX7-NEXT: s_mov_b64 s[4:5], 0
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
; GFX7-NEXT: v_mov_b32_e32 v0, s0
; GFX7-NEXT: v_mov_b32_e32 v1, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s2
-; GFX7-NEXT: v_mov_b32_e32 v3, s3
-; GFX7-NEXT: flat_atomic_inc_x2 v[0:1], v[2:3]
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v5, s1
+; GFX7-NEXT: v_mov_b32_e32 v4, s0
+; GFX7-NEXT: .LBB111_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, 1, v2
+; GFX7-NEXT: v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; GFX7-NEXT: v_cmp_gt_u64_e32 vcc, s[2:3], v[2:3]
+; GFX7-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_mov_b32_e32 v3, v1
+; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7-NEXT: v_mov_b32_e32 v2, v0
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX7-NEXT: s_cbranch_execnz .LBB111_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_inc_i64:
; GFX8: ; %bb.0: ; %entry
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8-NEXT: s_mov_b64 s[4:5], 0
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v0, s0
; GFX8-NEXT: v_mov_b32_e32 v1, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s2
-; GFX8-NEXT: v_mov_b32_e32 v3, s3
-; GFX8-NEXT: flat_atomic_inc_x2 v[0:1], v[2:3]
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v5, s1
+; GFX8-NEXT: v_mov_b32_e32 v4, s0
+; GFX8-NEXT: .LBB111_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, 1, v2
+; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; GFX8-NEXT: v_cmp_gt_u64_e32 vcc, s[2:3], v[2:3]
+; GFX8-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v3, v1
+; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v2, v0
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX8-NEXT: s_cbranch_execnz .LBB111_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_inc_i64:
@@ -6194,34 +8192,66 @@ define amdgpu_kernel void @atomic_inc_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX7: ; %bb.0: ; %entry
; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; GFX7-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
+; GFX7-NEXT: s_mov_b64 s[6:7], 0
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
; GFX7-NEXT: v_mov_b32_e32 v0, s0
; GFX7-NEXT: v_mov_b32_e32 v1, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s4
-; GFX7-NEXT: v_mov_b32_e32 v3, s5
-; GFX7-NEXT: flat_atomic_inc_x2 v[0:1], v[0:1], v[2:3] glc
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: .LBB112_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v5, v3
+; GFX7-NEXT: v_mov_b32_e32 v4, v2
+; GFX7-NEXT: v_add_i32_e32 v2, vcc, 1, v4
+; GFX7-NEXT: v_addc_u32_e32 v3, vcc, 0, v5, vcc
+; GFX7-NEXT: v_cmp_gt_u64_e32 vcc, s[4:5], v[4:5]
+; GFX7-NEXT: v_cndmask_b32_e32 v3, 0, v3, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v2, 0, v2, vcc
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5] glc
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
-; GFX7-NEXT: v_mov_b32_e32 v2, s2
-; GFX7-NEXT: v_mov_b32_e32 v3, s3
-; GFX7-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX7-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[6:7]
+; GFX7-NEXT: s_cbranch_execnz .LBB112_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[6:7]
+; GFX7-NEXT: v_mov_b32_e32 v0, s2
+; GFX7-NEXT: v_mov_b32_e32 v1, s3
+; GFX7-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_inc_i64_ret:
; GFX8: ; %bb.0: ; %entry
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GFX8-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; GFX8-NEXT: s_mov_b64 s[6:7], 0
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v0, s0
; GFX8-NEXT: v_mov_b32_e32 v1, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s4
-; GFX8-NEXT: v_mov_b32_e32 v3, s5
-; GFX8-NEXT: flat_atomic_inc_x2 v[0:1], v[0:1], v[2:3] glc
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: .LBB112_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v5, v3
+; GFX8-NEXT: v_mov_b32_e32 v4, v2
+; GFX8-NEXT: v_add_u32_e32 v2, vcc, 1, v4
+; GFX8-NEXT: v_addc_u32_e32 v3, vcc, 0, v5, vcc
+; GFX8-NEXT: v_cmp_gt_u64_e32 vcc, s[4:5], v[4:5]
+; GFX8-NEXT: v_cndmask_b32_e32 v3, 0, v3, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v2, 0, v2, vcc
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
-; GFX8-NEXT: v_mov_b32_e32 v2, s2
-; GFX8-NEXT: v_mov_b32_e32 v3, s3
-; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7]
+; GFX8-NEXT: s_cbranch_execnz .LBB112_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[6:7]
+; GFX8-NEXT: v_mov_b32_e32 v0, s2
+; GFX8-NEXT: v_mov_b32_e32 v1, s3
+; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_inc_i64_ret:
@@ -6247,36 +8277,66 @@ entry:
define amdgpu_kernel void @atomic_inc_i64_incr64(ptr %out, i64 %in, i64 %index) {
; GFX7-LABEL: atomic_inc_i64_incr64:
; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0xd
; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX7-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v0, s2
-; GFX7-NEXT: v_mov_b32_e32 v1, s3
-; GFX7-NEXT: s_lshl_b64 s[2:3], s[4:5], 3
-; GFX7-NEXT: s_add_u32 s0, s0, s2
-; GFX7-NEXT: s_addc_u32 s1, s1, s3
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_inc_x2 v[2:3], v[0:1]
+; GFX7-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
+; GFX7-NEXT: s_add_u32 s0, s0, s4
+; GFX7-NEXT: s_addc_u32 s1, s1, s5
+; GFX7-NEXT: v_mov_b32_e32 v5, s1
+; GFX7-NEXT: v_mov_b32_e32 v4, s0
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: .LBB113_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, 1, v2
+; GFX7-NEXT: v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; GFX7-NEXT: v_cmp_gt_u64_e32 vcc, s[2:3], v[2:3]
+; GFX7-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_mov_b32_e32 v3, v1
+; GFX7-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v2, v0
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7-NEXT: s_cbranch_execnz .LBB113_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_inc_i64_incr64:
; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX8-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v0, s2
-; GFX8-NEXT: v_mov_b32_e32 v1, s3
-; GFX8-NEXT: s_lshl_b64 s[2:3], s[4:5], 3
-; GFX8-NEXT: s_add_u32 s0, s0, s2
-; GFX8-NEXT: s_addc_u32 s1, s1, s3
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_inc_x2 v[2:3], v[0:1]
+; GFX8-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
+; GFX8-NEXT: s_add_u32 s0, s0, s4
+; GFX8-NEXT: s_addc_u32 s1, s1, s5
+; GFX8-NEXT: v_mov_b32_e32 v5, s1
+; GFX8-NEXT: v_mov_b32_e32 v4, s0
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX8-NEXT: s_mov_b64 s[0:1], 0
+; GFX8-NEXT: .LBB113_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, 1, v2
+; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; GFX8-NEXT: v_cmp_gt_u64_e32 vcc, s[2:3], v[2:3]
+; GFX8-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v3, v1
+; GFX8-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v2, v0
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX8-NEXT: s_cbranch_execnz .LBB113_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_inc_i64_incr64:
@@ -6305,38 +8365,70 @@ define amdgpu_kernel void @atomic_inc_i64_ret_incr64(ptr %out, ptr %out2, i64 %i
; GFX7: ; %bb.0: ; %entry
; GFX7-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v0, s4
-; GFX7-NEXT: v_mov_b32_e32 v1, s5
-; GFX7-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
-; GFX7-NEXT: s_add_u32 s0, s0, s4
-; GFX7-NEXT: s_addc_u32 s1, s1, s5
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_inc_x2 v[0:1], v[2:3], v[0:1] glc
+; GFX7-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GFX7-NEXT: s_add_u32 s0, s0, s6
+; GFX7-NEXT: s_addc_u32 s1, s1, s7
+; GFX7-NEXT: v_mov_b32_e32 v0, s0
+; GFX7-NEXT: v_mov_b32_e32 v1, s1
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: .LBB114_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v5, v3
+; GFX7-NEXT: v_mov_b32_e32 v4, v2
+; GFX7-NEXT: v_add_i32_e32 v2, vcc, 1, v4
+; GFX7-NEXT: v_addc_u32_e32 v3, vcc, 0, v5, vcc
+; GFX7-NEXT: v_cmp_gt_u64_e32 vcc, s[4:5], v[4:5]
+; GFX7-NEXT: v_cndmask_b32_e32 v3, 0, v3, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v2, 0, v2, vcc
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5] glc
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
-; GFX7-NEXT: v_mov_b32_e32 v2, s2
-; GFX7-NEXT: v_mov_b32_e32 v3, s3
-; GFX7-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX7-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX7-NEXT: s_cbranch_execnz .LBB114_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v0, s2
+; GFX7-NEXT: v_mov_b32_e32 v1, s3
+; GFX7-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_inc_i64_ret_incr64:
; GFX8: ; %bb.0: ; %entry
; GFX8-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v0, s4
-; GFX8-NEXT: v_mov_b32_e32 v1, s5
-; GFX8-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
-; GFX8-NEXT: s_add_u32 s0, s0, s4
-; GFX8-NEXT: s_addc_u32 s1, s1, s5
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_inc_x2 v[0:1], v[2:3], v[0:1] glc
+; GFX8-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
+; GFX8-NEXT: s_add_u32 s0, s0, s6
+; GFX8-NEXT: s_addc_u32 s1, s1, s7
+; GFX8-NEXT: v_mov_b32_e32 v0, s0
+; GFX8-NEXT: v_mov_b32_e32 v1, s1
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: s_mov_b64 s[0:1], 0
+; GFX8-NEXT: .LBB114_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v5, v3
+; GFX8-NEXT: v_mov_b32_e32 v4, v2
+; GFX8-NEXT: v_add_u32_e32 v2, vcc, 1, v4
+; GFX8-NEXT: v_addc_u32_e32 v3, vcc, 0, v5, vcc
+; GFX8-NEXT: v_cmp_gt_u64_e32 vcc, s[4:5], v[4:5]
+; GFX8-NEXT: v_cndmask_b32_e32 v3, 0, v3, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v2, 0, v2, vcc
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
-; GFX8-NEXT: v_mov_b32_e32 v2, s2
-; GFX8-NEXT: v_mov_b32_e32 v3, s3
-; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX8-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[0:1]
+; GFX8-NEXT: s_cbranch_execnz .LBB114_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v0, s2
+; GFX8-NEXT: v_mov_b32_e32 v1, s3
+; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_inc_i64_ret_incr64:
@@ -6364,32 +8456,70 @@ entry:
define amdgpu_kernel void @atomic_dec_i64_offset(ptr %out, i64 %in) {
; GFX7-LABEL: atomic_dec_i64_offset:
; GFX7: ; %bb.0: ; %entry
-; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; GFX7-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x9
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: s_add_u32 s0, s0, 32
-; GFX7-NEXT: s_addc_u32 s1, s1, 0
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v0, s2
-; GFX7-NEXT: v_mov_b32_e32 v1, s3
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_dec_x2 v[2:3], v[0:1]
+; GFX7-NEXT: s_add_u32 s0, s4, 32
+; GFX7-NEXT: s_addc_u32 s1, s5, 0
+; GFX7-NEXT: v_mov_b32_e32 v5, s1
+; GFX7-NEXT: v_mov_b32_e32 v4, s0
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX7-NEXT: s_mov_b64 s[4:5], 0
+; GFX7-NEXT: v_mov_b32_e32 v6, s7
+; GFX7-NEXT: v_mov_b32_e32 v7, s6
+; GFX7-NEXT: .LBB115_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[2:3]
+; GFX7-NEXT: v_cmp_lt_u64_e64 s[0:1], s[6:7], v[2:3]
+; GFX7-NEXT: v_add_i32_e64 v0, s[2:3], -1, v2
+; GFX7-NEXT: v_addc_u32_e64 v1, s[2:3], -1, v3, s[2:3]
+; GFX7-NEXT: s_or_b64 vcc, vcc, s[0:1]
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v6, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v0, v7, vcc
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_mov_b32_e32 v3, v1
+; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7-NEXT: v_mov_b32_e32 v2, v0
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX7-NEXT: s_cbranch_execnz .LBB115_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_dec_i64_offset:
; GFX8: ; %bb.0: ; %entry
-; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x24
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: s_add_u32 s0, s0, 32
-; GFX8-NEXT: s_addc_u32 s1, s1, 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v0, s2
-; GFX8-NEXT: v_mov_b32_e32 v1, s3
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_dec_x2 v[2:3], v[0:1]
+; GFX8-NEXT: s_add_u32 s0, s4, 32
+; GFX8-NEXT: s_addc_u32 s1, s5, 0
+; GFX8-NEXT: v_mov_b32_e32 v5, s1
+; GFX8-NEXT: v_mov_b32_e32 v4, s0
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX8-NEXT: s_mov_b64 s[4:5], 0
+; GFX8-NEXT: v_mov_b32_e32 v6, s7
+; GFX8-NEXT: v_mov_b32_e32 v7, s6
+; GFX8-NEXT: .LBB115_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[2:3]
+; GFX8-NEXT: v_cmp_lt_u64_e64 s[0:1], s[6:7], v[2:3]
+; GFX8-NEXT: v_add_u32_e64 v0, s[2:3], -1, v2
+; GFX8-NEXT: v_addc_u32_e64 v1, s[2:3], -1, v3, s[2:3]
+; GFX8-NEXT: s_or_b64 vcc, vcc, s[0:1]
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v6, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v7, vcc
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v3, v1
+; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v2, v0
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX8-NEXT: s_cbranch_execnz .LBB115_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_dec_i64_offset:
@@ -6411,40 +8541,80 @@ entry:
define amdgpu_kernel void @atomic_dec_i64_ret_offset(ptr %out, ptr %out2, i64 %in) {
; GFX7-LABEL: atomic_dec_i64_ret_offset:
; GFX7: ; %bb.0: ; %entry
-; GFX7-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0xd
-; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; GFX7-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x9
+; GFX7-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
+; GFX7-NEXT: s_mov_b64 s[6:7], 0
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v0, s6
-; GFX7-NEXT: s_add_u32 s0, s0, 32
-; GFX7-NEXT: s_addc_u32 s1, s1, 0
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v1, s7
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_dec_x2 v[0:1], v[2:3], v[0:1] glc
+; GFX7-NEXT: s_add_u32 s0, s8, 32
+; GFX7-NEXT: s_addc_u32 s1, s9, 0
+; GFX7-NEXT: v_mov_b32_e32 v0, s0
+; GFX7-NEXT: v_mov_b32_e32 v1, s1
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v4, s5
+; GFX7-NEXT: v_mov_b32_e32 v5, s4
+; GFX7-NEXT: .LBB116_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v9, v3
+; GFX7-NEXT: v_mov_b32_e32 v8, v2
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[8:9]
+; GFX7-NEXT: v_cmp_lt_u64_e64 s[0:1], s[4:5], v[8:9]
+; GFX7-NEXT: v_add_i32_e64 v2, s[2:3], -1, v8
+; GFX7-NEXT: v_addc_u32_e64 v3, s[2:3], -1, v9, s[2:3]
+; GFX7-NEXT: s_or_b64 vcc, vcc, s[0:1]
+; GFX7-NEXT: v_cndmask_b32_e32 v7, v3, v4, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v6, v2, v5, vcc
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[6:9] glc
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
-; GFX7-NEXT: v_mov_b32_e32 v2, s2
-; GFX7-NEXT: v_mov_b32_e32 v3, s3
-; GFX7-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
+; GFX7-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[6:7]
+; GFX7-NEXT: s_cbranch_execnz .LBB116_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[6:7]
+; GFX7-NEXT: v_mov_b32_e32 v0, s10
+; GFX7-NEXT: v_mov_b32_e32 v1, s11
+; GFX7-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_dec_i64_ret_offset:
; GFX8: ; %bb.0: ; %entry
-; GFX8-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
-; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x24
+; GFX8-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; GFX8-NEXT: s_mov_b64 s[6:7], 0
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v0, s6
-; GFX8-NEXT: s_add_u32 s0, s0, 32
-; GFX8-NEXT: s_addc_u32 s1, s1, 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v1, s7
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_dec_x2 v[0:1], v[2:3], v[0:1] glc
+; GFX8-NEXT: s_add_u32 s0, s8, 32
+; GFX8-NEXT: s_addc_u32 s1, s9, 0
+; GFX8-NEXT: v_mov_b32_e32 v0, s0
+; GFX8-NEXT: v_mov_b32_e32 v1, s1
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v4, s5
+; GFX8-NEXT: v_mov_b32_e32 v5, s4
+; GFX8-NEXT: .LBB116_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v9, v3
+; GFX8-NEXT: v_mov_b32_e32 v8, v2
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[8:9]
+; GFX8-NEXT: v_cmp_lt_u64_e64 s[0:1], s[4:5], v[8:9]
+; GFX8-NEXT: v_add_u32_e64 v2, s[2:3], -1, v8
+; GFX8-NEXT: v_addc_u32_e64 v3, s[2:3], -1, v9, s[2:3]
+; GFX8-NEXT: s_or_b64 vcc, vcc, s[0:1]
+; GFX8-NEXT: v_cndmask_b32_e32 v7, v3, v4, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v6, v2, v5, vcc
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[6:9] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
-; GFX8-NEXT: v_mov_b32_e32 v2, s2
-; GFX8-NEXT: v_mov_b32_e32 v3, s3
-; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
+; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7]
+; GFX8-NEXT: s_cbranch_execnz .LBB116_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[6:7]
+; GFX8-NEXT: v_mov_b32_e32 v0, s10
+; GFX8-NEXT: v_mov_b32_e32 v1, s11
+; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_dec_i64_ret_offset:
@@ -6471,40 +8641,78 @@ entry:
define amdgpu_kernel void @atomic_dec_i64_decr64_offset(ptr %out, i64 %in, i64 %index) {
; GFX7-LABEL: atomic_dec_i64_decr64_offset:
; GFX7: ; %bb.0: ; %entry
-; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX7-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
+; GFX7-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0xd
+; GFX7-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x9
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v0, s2
-; GFX7-NEXT: v_mov_b32_e32 v1, s3
-; GFX7-NEXT: s_lshl_b64 s[2:3], s[4:5], 3
-; GFX7-NEXT: s_add_u32 s0, s0, s2
-; GFX7-NEXT: s_addc_u32 s1, s1, s3
+; GFX7-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
+; GFX7-NEXT: s_add_u32 s0, s4, s0
+; GFX7-NEXT: s_addc_u32 s1, s5, s1
; GFX7-NEXT: s_add_u32 s0, s0, 32
; GFX7-NEXT: s_addc_u32 s1, s1, 0
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_dec_x2 v[2:3], v[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v5, s1
+; GFX7-NEXT: v_mov_b32_e32 v4, s0
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX7-NEXT: s_mov_b64 s[4:5], 0
+; GFX7-NEXT: v_mov_b32_e32 v6, s7
+; GFX7-NEXT: v_mov_b32_e32 v7, s6
+; GFX7-NEXT: .LBB117_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[2:3]
+; GFX7-NEXT: v_cmp_lt_u64_e64 s[0:1], s[6:7], v[2:3]
+; GFX7-NEXT: v_add_i32_e64 v0, s[2:3], -1, v2
+; GFX7-NEXT: v_addc_u32_e64 v1, s[2:3], -1, v3, s[2:3]
+; GFX7-NEXT: s_or_b64 vcc, vcc, s[0:1]
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v6, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v0, v7, vcc
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_mov_b32_e32 v3, v1
+; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7-NEXT: v_mov_b32_e32 v2, v0
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX7-NEXT: s_cbranch_execnz .LBB117_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_dec_i64_decr64_offset:
; GFX8: ; %bb.0: ; %entry
-; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX8-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; GFX8-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x34
+; GFX8-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x24
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v0, s2
-; GFX8-NEXT: v_mov_b32_e32 v1, s3
-; GFX8-NEXT: s_lshl_b64 s[2:3], s[4:5], 3
-; GFX8-NEXT: s_add_u32 s0, s0, s2
-; GFX8-NEXT: s_addc_u32 s1, s1, s3
+; GFX8-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
+; GFX8-NEXT: s_add_u32 s0, s4, s0
+; GFX8-NEXT: s_addc_u32 s1, s5, s1
; GFX8-NEXT: s_add_u32 s0, s0, 32
; GFX8-NEXT: s_addc_u32 s1, s1, 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_dec_x2 v[2:3], v[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v5, s1
+; GFX8-NEXT: v_mov_b32_e32 v4, s0
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX8-NEXT: s_mov_b64 s[4:5], 0
+; GFX8-NEXT: v_mov_b32_e32 v6, s7
+; GFX8-NEXT: v_mov_b32_e32 v7, s6
+; GFX8-NEXT: .LBB117_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[2:3]
+; GFX8-NEXT: v_cmp_lt_u64_e64 s[0:1], s[6:7], v[2:3]
+; GFX8-NEXT: v_add_u32_e64 v0, s[2:3], -1, v2
+; GFX8-NEXT: v_addc_u32_e64 v1, s[2:3], -1, v3, s[2:3]
+; GFX8-NEXT: s_or_b64 vcc, vcc, s[0:1]
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v6, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v7, vcc
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v3, v1
+; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v2, v0
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX8-NEXT: s_cbranch_execnz .LBB117_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_dec_i64_decr64_offset:
@@ -6532,44 +8740,84 @@ entry:
define amdgpu_kernel void @atomic_dec_i64_ret_decr64_offset(ptr %out, ptr %out2, i64 %in, i64 %index) {
; GFX7-LABEL: atomic_dec_i64_ret_decr64_offset:
; GFX7: ; %bb.0: ; %entry
-; GFX7-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
+; GFX7-NEXT: s_load_dwordx8 s[4:11], s[4:5], 0x9
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v0, s4
-; GFX7-NEXT: v_mov_b32_e32 v1, s5
-; GFX7-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
-; GFX7-NEXT: s_add_u32 s0, s0, s4
-; GFX7-NEXT: s_addc_u32 s1, s1, s5
+; GFX7-NEXT: s_lshl_b64 s[0:1], s[10:11], 3
+; GFX7-NEXT: s_add_u32 s0, s4, s0
+; GFX7-NEXT: s_addc_u32 s1, s5, s1
; GFX7-NEXT: s_add_u32 s0, s0, 32
; GFX7-NEXT: s_addc_u32 s1, s1, 0
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_dec_x2 v[0:1], v[2:3], v[0:1] glc
+; GFX7-NEXT: v_mov_b32_e32 v0, s0
+; GFX7-NEXT: v_mov_b32_e32 v1, s1
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: s_mov_b64 s[4:5], 0
+; GFX7-NEXT: v_mov_b32_e32 v4, s9
+; GFX7-NEXT: v_mov_b32_e32 v5, s8
+; GFX7-NEXT: .LBB118_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v9, v3
+; GFX7-NEXT: v_mov_b32_e32 v8, v2
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[8:9]
+; GFX7-NEXT: v_cmp_lt_u64_e64 s[0:1], s[8:9], v[8:9]
+; GFX7-NEXT: v_add_i32_e64 v2, s[2:3], -1, v8
+; GFX7-NEXT: v_addc_u32_e64 v3, s[2:3], -1, v9, s[2:3]
+; GFX7-NEXT: s_or_b64 vcc, vcc, s[0:1]
+; GFX7-NEXT: v_cndmask_b32_e32 v7, v3, v4, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v6, v2, v5, vcc
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[6:9] glc
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
-; GFX7-NEXT: v_mov_b32_e32 v2, s2
-; GFX7-NEXT: v_mov_b32_e32 v3, s3
-; GFX7-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
+; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX7-NEXT: s_cbranch_execnz .LBB118_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX7-NEXT: v_mov_b32_e32 v0, s6
+; GFX7-NEXT: v_mov_b32_e32 v1, s7
+; GFX7-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_dec_i64_ret_decr64_offset:
; GFX8: ; %bb.0: ; %entry
-; GFX8-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
+; GFX8-NEXT: s_load_dwordx8 s[4:11], s[4:5], 0x24
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v0, s4
-; GFX8-NEXT: v_mov_b32_e32 v1, s5
-; GFX8-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
-; GFX8-NEXT: s_add_u32 s0, s0, s4
-; GFX8-NEXT: s_addc_u32 s1, s1, s5
+; GFX8-NEXT: s_lshl_b64 s[0:1], s[10:11], 3
+; GFX8-NEXT: s_add_u32 s0, s4, s0
+; GFX8-NEXT: s_addc_u32 s1, s5, s1
; GFX8-NEXT: s_add_u32 s0, s0, 32
; GFX8-NEXT: s_addc_u32 s1, s1, 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_dec_x2 v[0:1], v[2:3], v[0:1] glc
+; GFX8-NEXT: v_mov_b32_e32 v0, s0
+; GFX8-NEXT: v_mov_b32_e32 v1, s1
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: s_mov_b64 s[4:5], 0
+; GFX8-NEXT: v_mov_b32_e32 v4, s9
+; GFX8-NEXT: v_mov_b32_e32 v5, s8
+; GFX8-NEXT: .LBB118_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v9, v3
+; GFX8-NEXT: v_mov_b32_e32 v8, v2
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[8:9]
+; GFX8-NEXT: v_cmp_lt_u64_e64 s[0:1], s[8:9], v[8:9]
+; GFX8-NEXT: v_add_u32_e64 v2, s[2:3], -1, v8
+; GFX8-NEXT: v_addc_u32_e64 v3, s[2:3], -1, v9, s[2:3]
+; GFX8-NEXT: s_or_b64 vcc, vcc, s[0:1]
+; GFX8-NEXT: v_cndmask_b32_e32 v7, v3, v4, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v6, v2, v5, vcc
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[6:9] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
-; GFX8-NEXT: v_mov_b32_e32 v2, s2
-; GFX8-NEXT: v_mov_b32_e32 v3, s3
-; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
+; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX8-NEXT: s_cbranch_execnz .LBB118_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v0, s6
+; GFX8-NEXT: v_mov_b32_e32 v1, s7
+; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_dec_i64_ret_decr64_offset:
@@ -6598,28 +8846,70 @@ entry:
define amdgpu_kernel void @atomic_dec_i64(ptr %out, i64 %in) {
; GFX7-LABEL: atomic_dec_i64:
; GFX7: ; %bb.0: ; %entry
-; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; GFX7-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x9
+; GFX7-NEXT: s_mov_b64 s[8:9], 0
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v0, s0
-; GFX7-NEXT: v_mov_b32_e32 v1, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s2
-; GFX7-NEXT: v_mov_b32_e32 v3, s3
-; GFX7-NEXT: flat_atomic_dec_x2 v[0:1], v[2:3]
+; GFX7-NEXT: v_mov_b32_e32 v0, s4
+; GFX7-NEXT: v_mov_b32_e32 v1, s5
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v4, s4
+; GFX7-NEXT: v_mov_b32_e32 v6, s7
+; GFX7-NEXT: v_mov_b32_e32 v7, s6
+; GFX7-NEXT: v_mov_b32_e32 v5, s5
+; GFX7-NEXT: .LBB119_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[2:3]
+; GFX7-NEXT: v_cmp_lt_u64_e64 s[0:1], s[6:7], v[2:3]
+; GFX7-NEXT: v_add_i32_e64 v0, s[2:3], -1, v2
+; GFX7-NEXT: v_addc_u32_e64 v1, s[2:3], -1, v3, s[2:3]
+; GFX7-NEXT: s_or_b64 vcc, vcc, s[0:1]
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v6, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v0, v7, vcc
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_mov_b32_e32 v3, v1
+; GFX7-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX7-NEXT: v_mov_b32_e32 v2, v0
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; GFX7-NEXT: s_cbranch_execnz .LBB119_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_dec_i64:
; GFX8: ; %bb.0: ; %entry
-; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x24
+; GFX8-NEXT: s_mov_b64 s[8:9], 0
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v0, s0
-; GFX8-NEXT: v_mov_b32_e32 v1, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s2
-; GFX8-NEXT: v_mov_b32_e32 v3, s3
-; GFX8-NEXT: flat_atomic_dec_x2 v[0:1], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v0, s4
+; GFX8-NEXT: v_mov_b32_e32 v1, s5
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v4, s4
+; GFX8-NEXT: v_mov_b32_e32 v6, s7
+; GFX8-NEXT: v_mov_b32_e32 v7, s6
+; GFX8-NEXT: v_mov_b32_e32 v5, s5
+; GFX8-NEXT: .LBB119_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[2:3]
+; GFX8-NEXT: v_cmp_lt_u64_e64 s[0:1], s[6:7], v[2:3]
+; GFX8-NEXT: v_add_u32_e64 v0, s[2:3], -1, v2
+; GFX8-NEXT: v_addc_u32_e64 v1, s[2:3], -1, v3, s[2:3]
+; GFX8-NEXT: s_or_b64 vcc, vcc, s[0:1]
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v6, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v7, vcc
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v3, v1
+; GFX8-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX8-NEXT: v_mov_b32_e32 v2, v0
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; GFX8-NEXT: s_cbranch_execnz .LBB119_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_dec_i64:
@@ -6640,36 +8930,76 @@ entry:
define amdgpu_kernel void @atomic_dec_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX7-LABEL: atomic_dec_i64_ret:
; GFX7: ; %bb.0: ; %entry
-; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; GFX7-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x9
; GFX7-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
-; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v0, s0
-; GFX7-NEXT: v_mov_b32_e32 v1, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s4
-; GFX7-NEXT: v_mov_b32_e32 v3, s5
-; GFX7-NEXT: flat_atomic_dec_x2 v[0:1], v[0:1], v[2:3] glc
+; GFX7-NEXT: s_mov_b64 s[6:7], 0
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v0, s8
+; GFX7-NEXT: v_mov_b32_e32 v1, s9
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: v_mov_b32_e32 v4, s5
+; GFX7-NEXT: v_mov_b32_e32 v5, s4
+; GFX7-NEXT: .LBB120_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v9, v3
+; GFX7-NEXT: v_mov_b32_e32 v8, v2
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[8:9]
+; GFX7-NEXT: v_cmp_lt_u64_e64 s[0:1], s[4:5], v[8:9]
+; GFX7-NEXT: v_add_i32_e64 v2, s[2:3], -1, v8
+; GFX7-NEXT: v_addc_u32_e64 v3, s[2:3], -1, v9, s[2:3]
+; GFX7-NEXT: s_or_b64 vcc, vcc, s[0:1]
+; GFX7-NEXT: v_cndmask_b32_e32 v7, v3, v4, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v6, v2, v5, vcc
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[6:9] glc
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
-; GFX7-NEXT: v_mov_b32_e32 v2, s2
-; GFX7-NEXT: v_mov_b32_e32 v3, s3
-; GFX7-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
+; GFX7-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[6:7]
+; GFX7-NEXT: s_cbranch_execnz .LBB120_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[6:7]
+; GFX7-NEXT: v_mov_b32_e32 v0, s10
+; GFX7-NEXT: v_mov_b32_e32 v1, s11
+; GFX7-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_dec_i64_ret:
; GFX8: ; %bb.0: ; %entry
-; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x24
; GFX8-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
-; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v0, s0
-; GFX8-NEXT: v_mov_b32_e32 v1, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s4
-; GFX8-NEXT: v_mov_b32_e32 v3, s5
-; GFX8-NEXT: flat_atomic_dec_x2 v[0:1], v[0:1], v[2:3] glc
+; GFX8-NEXT: s_mov_b64 s[6:7], 0
+; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v0, s8
+; GFX8-NEXT: v_mov_b32_e32 v1, s9
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v4, s5
+; GFX8-NEXT: v_mov_b32_e32 v5, s4
+; GFX8-NEXT: .LBB120_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v9, v3
+; GFX8-NEXT: v_mov_b32_e32 v8, v2
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[8:9]
+; GFX8-NEXT: v_cmp_lt_u64_e64 s[0:1], s[4:5], v[8:9]
+; GFX8-NEXT: v_add_u32_e64 v2, s[2:3], -1, v8
+; GFX8-NEXT: v_addc_u32_e64 v3, s[2:3], -1, v9, s[2:3]
+; GFX8-NEXT: s_or_b64 vcc, vcc, s[0:1]
+; GFX8-NEXT: v_cndmask_b32_e32 v7, v3, v4, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v6, v2, v5, vcc
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[6:9] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
-; GFX8-NEXT: v_mov_b32_e32 v2, s2
-; GFX8-NEXT: v_mov_b32_e32 v3, s3
-; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
+; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7]
+; GFX8-NEXT: s_cbranch_execnz .LBB120_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[6:7]
+; GFX8-NEXT: v_mov_b32_e32 v0, s10
+; GFX8-NEXT: v_mov_b32_e32 v1, s11
+; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_dec_i64_ret:
@@ -6695,36 +9025,74 @@ entry:
define amdgpu_kernel void @atomic_dec_i64_decr64(ptr %out, i64 %in, i64 %index) {
; GFX7-LABEL: atomic_dec_i64_decr64:
; GFX7: ; %bb.0: ; %entry
-; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX7-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
+; GFX7-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0xd
+; GFX7-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x9
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v0, s2
-; GFX7-NEXT: v_mov_b32_e32 v1, s3
-; GFX7-NEXT: s_lshl_b64 s[2:3], s[4:5], 3
-; GFX7-NEXT: s_add_u32 s0, s0, s2
-; GFX7-NEXT: s_addc_u32 s1, s1, s3
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_dec_x2 v[2:3], v[0:1]
+; GFX7-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
+; GFX7-NEXT: s_add_u32 s0, s4, s0
+; GFX7-NEXT: s_addc_u32 s1, s5, s1
+; GFX7-NEXT: v_mov_b32_e32 v5, s1
+; GFX7-NEXT: v_mov_b32_e32 v4, s0
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX7-NEXT: s_mov_b64 s[4:5], 0
+; GFX7-NEXT: v_mov_b32_e32 v6, s7
+; GFX7-NEXT: v_mov_b32_e32 v7, s6
+; GFX7-NEXT: .LBB121_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[2:3]
+; GFX7-NEXT: v_cmp_lt_u64_e64 s[0:1], s[6:7], v[2:3]
+; GFX7-NEXT: v_add_i32_e64 v0, s[2:3], -1, v2
+; GFX7-NEXT: v_addc_u32_e64 v1, s[2:3], -1, v3, s[2:3]
+; GFX7-NEXT: s_or_b64 vcc, vcc, s[0:1]
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v6, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v0, v7, vcc
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX7-NEXT: v_mov_b32_e32 v3, v1
+; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7-NEXT: v_mov_b32_e32 v2, v0
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX7-NEXT: s_cbranch_execnz .LBB121_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_dec_i64_decr64:
; GFX8: ; %bb.0: ; %entry
-; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX8-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; GFX8-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x34
+; GFX8-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x24
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v0, s2
-; GFX8-NEXT: v_mov_b32_e32 v1, s3
-; GFX8-NEXT: s_lshl_b64 s[2:3], s[4:5], 3
-; GFX8-NEXT: s_add_u32 s0, s0, s2
-; GFX8-NEXT: s_addc_u32 s1, s1, s3
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_dec_x2 v[2:3], v[0:1]
+; GFX8-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
+; GFX8-NEXT: s_add_u32 s0, s4, s0
+; GFX8-NEXT: s_addc_u32 s1, s5, s1
+; GFX8-NEXT: v_mov_b32_e32 v5, s1
+; GFX8-NEXT: v_mov_b32_e32 v4, s0
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
+; GFX8-NEXT: s_mov_b64 s[4:5], 0
+; GFX8-NEXT: v_mov_b32_e32 v6, s7
+; GFX8-NEXT: v_mov_b32_e32 v7, s6
+; GFX8-NEXT: .LBB121_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[2:3]
+; GFX8-NEXT: v_cmp_lt_u64_e64 s[0:1], s[6:7], v[2:3]
+; GFX8-NEXT: v_add_u32_e64 v0, s[2:3], -1, v2
+; GFX8-NEXT: v_addc_u32_e64 v1, s[2:3], -1, v3, s[2:3]
+; GFX8-NEXT: s_or_b64 vcc, vcc, s[0:1]
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v6, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v7, vcc
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v3, v1
+; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v2, v0
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX8-NEXT: s_cbranch_execnz .LBB121_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_dec_i64_decr64:
@@ -6751,40 +9119,80 @@ entry:
define amdgpu_kernel void @atomic_dec_i64_ret_decr64(ptr %out, ptr %out2, i64 %in, i64 %index) {
; GFX7-LABEL: atomic_dec_i64_ret_decr64:
; GFX7: ; %bb.0: ; %entry
-; GFX7-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
+; GFX7-NEXT: s_load_dwordx8 s[4:11], s[4:5], 0x9
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v0, s4
-; GFX7-NEXT: v_mov_b32_e32 v1, s5
-; GFX7-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
-; GFX7-NEXT: s_add_u32 s0, s0, s4
-; GFX7-NEXT: s_addc_u32 s1, s1, s5
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: flat_atomic_dec_x2 v[0:1], v[2:3], v[0:1] glc
+; GFX7-NEXT: s_lshl_b64 s[0:1], s[10:11], 3
+; GFX7-NEXT: s_add_u32 s0, s4, s0
+; GFX7-NEXT: s_addc_u32 s1, s5, s1
+; GFX7-NEXT: v_mov_b32_e32 v0, s0
+; GFX7-NEXT: v_mov_b32_e32 v1, s1
+; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: s_mov_b64 s[4:5], 0
+; GFX7-NEXT: v_mov_b32_e32 v4, s9
+; GFX7-NEXT: v_mov_b32_e32 v5, s8
+; GFX7-NEXT: .LBB122_1: ; %atomicrmw.start
+; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mov_b32_e32 v9, v3
+; GFX7-NEXT: v_mov_b32_e32 v8, v2
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[8:9]
+; GFX7-NEXT: v_cmp_lt_u64_e64 s[0:1], s[8:9], v[8:9]
+; GFX7-NEXT: v_add_i32_e64 v2, s[2:3], -1, v8
+; GFX7-NEXT: v_addc_u32_e64 v3, s[2:3], -1, v9, s[2:3]
+; GFX7-NEXT: s_or_b64 vcc, vcc, s[0:1]
+; GFX7-NEXT: v_cndmask_b32_e32 v7, v3, v4, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v6, v2, v5, vcc
+; GFX7-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[6:9] glc
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
-; GFX7-NEXT: v_mov_b32_e32 v2, s2
-; GFX7-NEXT: v_mov_b32_e32 v3, s3
-; GFX7-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
+; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX7-NEXT: s_cbranch_execnz .LBB122_1
+; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX7-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX7-NEXT: v_mov_b32_e32 v0, s6
+; GFX7-NEXT: v_mov_b32_e32 v1, s7
+; GFX7-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: atomic_dec_i64_ret_decr64:
; GFX8: ; %bb.0: ; %entry
-; GFX8-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
+; GFX8-NEXT: s_load_dwordx8 s[4:11], s[4:5], 0x24
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v0, s4
-; GFX8-NEXT: v_mov_b32_e32 v1, s5
-; GFX8-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
-; GFX8-NEXT: s_add_u32 s0, s0, s4
-; GFX8-NEXT: s_addc_u32 s1, s1, s5
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: flat_atomic_dec_x2 v[0:1], v[2:3], v[0:1] glc
+; GFX8-NEXT: s_lshl_b64 s[0:1], s[10:11], 3
+; GFX8-NEXT: s_add_u32 s0, s4, s0
+; GFX8-NEXT: s_addc_u32 s1, s5, s1
+; GFX8-NEXT: v_mov_b32_e32 v0, s0
+; GFX8-NEXT: v_mov_b32_e32 v1, s1
+; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: s_mov_b64 s[4:5], 0
+; GFX8-NEXT: v_mov_b32_e32 v4, s9
+; GFX8-NEXT: v_mov_b32_e32 v5, s8
+; GFX8-NEXT: .LBB122_1: ; %atomicrmw.start
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v9, v3
+; GFX8-NEXT: v_mov_b32_e32 v8, v2
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[8:9]
+; GFX8-NEXT: v_cmp_lt_u64_e64 s[0:1], s[8:9], v[8:9]
+; GFX8-NEXT: v_add_u32_e64 v2, s[2:3], -1, v8
+; GFX8-NEXT: v_addc_u32_e64 v3, s[2:3], -1, v9, s[2:3]
+; GFX8-NEXT: s_or_b64 vcc, vcc, s[0:1]
+; GFX8-NEXT: v_cndmask_b32_e32 v7, v3, v4, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v6, v2, v5, vcc
+; GFX8-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[6:9] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
-; GFX8-NEXT: v_mov_b32_e32 v2, s2
-; GFX8-NEXT: v_mov_b32_e32 v3, s3
-; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
+; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX8-NEXT: s_cbranch_execnz .LBB122_1
+; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX8-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v0, s6
+; GFX8-NEXT: v_mov_b32_e32 v1, s7
+; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX8-NEXT: s_endpgm
;
; GFX12-LABEL: atomic_dec_i64_ret_decr64:
diff --git a/llvm/test/CodeGen/AMDGPU/flat_atomics_i64_system.ll b/llvm/test/CodeGen/AMDGPU/flat_atomics_i64_system.ll
index 6bff8f558af71..524100c5b7a25 100644
--- a/llvm/test/CodeGen/AMDGPU/flat_atomics_i64_system.ll
+++ b/llvm/test/CodeGen/AMDGPU/flat_atomics_i64_system.ll
@@ -5083,40 +5083,21 @@ define void @flat_atomic_sub_i64_noret_offset__amdgpu_no_remote_memory(ptr %out,
; GCN1-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN1-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN1-NEXT: s_cbranch_execnz .LBB38_3
-; GCN1-NEXT: ; %bb.1: ; %Flow3
+; GCN1-NEXT: ; %bb.1: ; %Flow
; GCN1-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB38_6
+; GCN1-NEXT: s_cbranch_execnz .LBB38_4
; GCN1-NEXT: .LBB38_2: ; %atomicrmw.phi
; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN1-NEXT: s_setpc_b64 s[30:31]
; GCN1-NEXT: .LBB38_3: ; %atomicrmw.global
-; GCN1-NEXT: v_add_i32_e32 v4, vcc, 4, v0
-; GCN1-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GCN1-NEXT: flat_load_dword v7, v[4:5]
-; GCN1-NEXT: flat_load_dword v6, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[6:7], 0
-; GCN1-NEXT: .LBB38_4: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_sub_i32_e32 v4, vcc, v6, v2
-; GCN1-NEXT: v_subb_u32_e32 v5, vcc, v7, v3, vcc
-; GCN1-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN1-NEXT: flat_atomic_sub_x2 v[0:1], v[2:3]
; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GCN1-NEXT: v_mov_b32_e32 v7, v5
-; GCN1-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN1-NEXT: v_mov_b32_e32 v6, v4
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN1-NEXT: s_cbranch_execnz .LBB38_4
-; GCN1-NEXT: ; %bb.5: ; %Flow
-; GCN1-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
; GCN1-NEXT: ; implicit-def: $vgpr2
-; GCN1-NEXT: ; implicit-def: $vgpr3
; GCN1-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GCN1-NEXT: s_cbranch_execz .LBB38_2
-; GCN1-NEXT: .LBB38_6: ; %atomicrmw.private
+; GCN1-NEXT: .LBB38_4: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
; GCN1-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc
; GCN1-NEXT: buffer_load_dword v1, v0, s[0:3], 0 offen
@@ -5144,40 +5125,21 @@ define void @flat_atomic_sub_i64_noret_offset__amdgpu_no_remote_memory(ptr %out,
; GCN2-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN2-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN2-NEXT: s_cbranch_execnz .LBB38_3
-; GCN2-NEXT: ; %bb.1: ; %Flow3
+; GCN2-NEXT: ; %bb.1: ; %Flow
; GCN2-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB38_6
+; GCN2-NEXT: s_cbranch_execnz .LBB38_4
; GCN2-NEXT: .LBB38_2: ; %atomicrmw.phi
; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN2-NEXT: s_setpc_b64 s[30:31]
; GCN2-NEXT: .LBB38_3: ; %atomicrmw.global
-; GCN2-NEXT: v_add_u32_e32 v4, vcc, 4, v0
-; GCN2-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GCN2-NEXT: flat_load_dword v7, v[4:5]
-; GCN2-NEXT: flat_load_dword v6, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[6:7], 0
-; GCN2-NEXT: .LBB38_4: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_sub_u32_e32 v4, vcc, v6, v2
-; GCN2-NEXT: v_subb_u32_e32 v5, vcc, v7, v3, vcc
-; GCN2-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN2-NEXT: flat_atomic_sub_x2 v[0:1], v[2:3]
; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GCN2-NEXT: v_mov_b32_e32 v7, v5
-; GCN2-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN2-NEXT: v_mov_b32_e32 v6, v4
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN2-NEXT: s_cbranch_execnz .LBB38_4
-; GCN2-NEXT: ; %bb.5: ; %Flow
-; GCN2-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
; GCN2-NEXT: ; implicit-def: $vgpr2
-; GCN2-NEXT: ; implicit-def: $vgpr3
; GCN2-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GCN2-NEXT: s_cbranch_execz .LBB38_2
-; GCN2-NEXT: .LBB38_6: ; %atomicrmw.private
+; GCN2-NEXT: .LBB38_4: ; %atomicrmw.private
; GCN2-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
; GCN2-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc
; GCN2-NEXT: buffer_load_dword v1, v0, s[0:3], 0 offen
@@ -5203,37 +5165,21 @@ define void @flat_atomic_sub_i64_noret_offset__amdgpu_no_remote_memory(ptr %out,
; GCN3-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN3-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN3-NEXT: s_cbranch_execnz .LBB38_3
-; GCN3-NEXT: ; %bb.1: ; %Flow3
+; GCN3-NEXT: ; %bb.1: ; %Flow
; GCN3-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB38_6
+; GCN3-NEXT: s_cbranch_execnz .LBB38_4
; GCN3-NEXT: .LBB38_2: ; %atomicrmw.phi
; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN3-NEXT: s_setpc_b64 s[30:31]
; GCN3-NEXT: .LBB38_3: ; %atomicrmw.global
-; GCN3-NEXT: flat_load_dwordx2 v[6:7], v[0:1]
-; GCN3-NEXT: s_mov_b64 s[6:7], 0
-; GCN3-NEXT: .LBB38_4: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_sub_co_u32_e32 v4, vcc, v6, v2
-; GCN3-NEXT: v_subb_co_u32_e32 v5, vcc, v7, v3, vcc
-; GCN3-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN3-NEXT: flat_atomic_sub_x2 v[0:1], v[2:3]
; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GCN3-NEXT: v_mov_b32_e32 v7, v5
-; GCN3-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN3-NEXT: v_mov_b32_e32 v6, v4
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN3-NEXT: s_cbranch_execnz .LBB38_4
-; GCN3-NEXT: ; %bb.5: ; %Flow
-; GCN3-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN3-NEXT: ; implicit-def: $vgpr0_vgpr1
; GCN3-NEXT: ; implicit-def: $vgpr2
-; GCN3-NEXT: ; implicit-def: $vgpr3
; GCN3-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GCN3-NEXT: s_cbranch_execz .LBB38_2
-; GCN3-NEXT: .LBB38_6: ; %atomicrmw.private
+; GCN3-NEXT: .LBB38_4: ; %atomicrmw.private
; GCN3-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
; GCN3-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc
; GCN3-NEXT: buffer_load_dword v1, v0, s[0:3], 0 offen
@@ -5266,40 +5212,21 @@ define i64 @flat_atomic_sub_i64_ret_offset__amdgpu_no_remote_memory(ptr %out, i6
; GCN1-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN1-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN1-NEXT: s_cbranch_execnz .LBB39_3
-; GCN1-NEXT: ; %bb.1: ; %Flow3
+; GCN1-NEXT: ; %bb.1: ; %Flow
; GCN1-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB39_6
+; GCN1-NEXT: s_cbranch_execnz .LBB39_4
; GCN1-NEXT: .LBB39_2: ; %atomicrmw.phi
; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN1-NEXT: s_setpc_b64 s[30:31]
; GCN1-NEXT: .LBB39_3: ; %atomicrmw.global
-; GCN1-NEXT: v_add_i32_e32 v0, vcc, 4, v4
-; GCN1-NEXT: v_addc_u32_e32 v1, vcc, 0, v5, vcc
-; GCN1-NEXT: flat_load_dword v1, v[0:1]
-; GCN1-NEXT: flat_load_dword v0, v[4:5]
-; GCN1-NEXT: s_mov_b64 s[6:7], 0
-; GCN1-NEXT: .LBB39_4: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_mov_b32_e32 v9, v1
-; GCN1-NEXT: v_mov_b32_e32 v8, v0
-; GCN1-NEXT: v_sub_i32_e32 v6, vcc, v8, v2
-; GCN1-NEXT: v_subb_u32_e32 v7, vcc, v9, v3, vcc
-; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GCN1-NEXT: flat_atomic_sub_x2 v[0:1], v[4:5], v[2:3] glc
; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; GCN1-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN1-NEXT: s_cbranch_execnz .LBB39_4
-; GCN1-NEXT: ; %bb.5: ; %Flow
-; GCN1-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN1-NEXT: ; implicit-def: $vgpr4_vgpr5
; GCN1-NEXT: ; implicit-def: $vgpr2
-; GCN1-NEXT: ; implicit-def: $vgpr3
; GCN1-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GCN1-NEXT: s_cbranch_execz .LBB39_2
-; GCN1-NEXT: .LBB39_6: ; %atomicrmw.private
+; GCN1-NEXT: .LBB39_4: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
; GCN1-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc
; GCN1-NEXT: buffer_load_dword v0, v4, s[0:3], 0 offen
@@ -5328,40 +5255,21 @@ define i64 @flat_atomic_sub_i64_ret_offset__amdgpu_no_remote_memory(ptr %out, i6
; GCN2-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN2-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN2-NEXT: s_cbranch_execnz .LBB39_3
-; GCN2-NEXT: ; %bb.1: ; %Flow3
+; GCN2-NEXT: ; %bb.1: ; %Flow
; GCN2-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB39_6
+; GCN2-NEXT: s_cbranch_execnz .LBB39_4
; GCN2-NEXT: .LBB39_2: ; %atomicrmw.phi
; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN2-NEXT: s_setpc_b64 s[30:31]
; GCN2-NEXT: .LBB39_3: ; %atomicrmw.global
-; GCN2-NEXT: v_add_u32_e32 v0, vcc, 4, v4
-; GCN2-NEXT: v_addc_u32_e32 v1, vcc, 0, v5, vcc
-; GCN2-NEXT: flat_load_dword v1, v[0:1]
-; GCN2-NEXT: flat_load_dword v0, v[4:5]
-; GCN2-NEXT: s_mov_b64 s[6:7], 0
-; GCN2-NEXT: .LBB39_4: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_mov_b32_e32 v9, v1
-; GCN2-NEXT: v_mov_b32_e32 v8, v0
-; GCN2-NEXT: v_sub_u32_e32 v6, vcc, v8, v2
-; GCN2-NEXT: v_subb_u32_e32 v7, vcc, v9, v3, vcc
-; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GCN2-NEXT: flat_atomic_sub_x2 v[0:1], v[4:5], v[2:3] glc
; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; GCN2-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN2-NEXT: s_cbranch_execnz .LBB39_4
-; GCN2-NEXT: ; %bb.5: ; %Flow
-; GCN2-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN2-NEXT: ; implicit-def: $vgpr4_vgpr5
; GCN2-NEXT: ; implicit-def: $vgpr2
-; GCN2-NEXT: ; implicit-def: $vgpr3
; GCN2-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GCN2-NEXT: s_cbranch_execz .LBB39_2
-; GCN2-NEXT: .LBB39_6: ; %atomicrmw.private
+; GCN2-NEXT: .LBB39_4: ; %atomicrmw.private
; GCN2-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
; GCN2-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc
; GCN2-NEXT: buffer_load_dword v0, v4, s[0:3], 0 offen
@@ -5388,37 +5296,21 @@ define i64 @flat_atomic_sub_i64_ret_offset__amdgpu_no_remote_memory(ptr %out, i6
; GCN3-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN3-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN3-NEXT: s_cbranch_execnz .LBB39_3
-; GCN3-NEXT: ; %bb.1: ; %Flow3
+; GCN3-NEXT: ; %bb.1: ; %Flow
; GCN3-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB39_6
+; GCN3-NEXT: s_cbranch_execnz .LBB39_4
; GCN3-NEXT: .LBB39_2: ; %atomicrmw.phi
; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN3-NEXT: s_setpc_b64 s[30:31]
; GCN3-NEXT: .LBB39_3: ; %atomicrmw.global
-; GCN3-NEXT: flat_load_dwordx2 v[0:1], v[4:5]
-; GCN3-NEXT: s_mov_b64 s[6:7], 0
-; GCN3-NEXT: .LBB39_4: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v9, v1
-; GCN3-NEXT: v_mov_b32_e32 v8, v0
-; GCN3-NEXT: v_sub_co_u32_e32 v6, vcc, v8, v2
-; GCN3-NEXT: v_subb_co_u32_e32 v7, vcc, v9, v3, vcc
-; GCN3-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GCN3-NEXT: flat_atomic_sub_x2 v[0:1], v[4:5], v[2:3] glc
; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; GCN3-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN3-NEXT: s_cbranch_execnz .LBB39_4
-; GCN3-NEXT: ; %bb.5: ; %Flow
-; GCN3-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN3-NEXT: ; implicit-def: $vgpr4_vgpr5
; GCN3-NEXT: ; implicit-def: $vgpr2
-; GCN3-NEXT: ; implicit-def: $vgpr3
; GCN3-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GCN3-NEXT: s_cbranch_execz .LBB39_2
-; GCN3-NEXT: .LBB39_6: ; %atomicrmw.private
+; GCN3-NEXT: .LBB39_4: ; %atomicrmw.private
; GCN3-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
; GCN3-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc
; GCN3-NEXT: buffer_load_dword v0, v4, s[0:3], 0 offen
@@ -6878,40 +6770,21 @@ define void @flat_atomic_and_i64_noret_offset__amdgpu_no_remote_memory(ptr %out,
; GCN1-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN1-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN1-NEXT: s_cbranch_execnz .LBB48_3
-; GCN1-NEXT: ; %bb.1: ; %Flow3
+; GCN1-NEXT: ; %bb.1: ; %Flow
; GCN1-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB48_6
+; GCN1-NEXT: s_cbranch_execnz .LBB48_4
; GCN1-NEXT: .LBB48_2: ; %atomicrmw.phi
; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN1-NEXT: s_setpc_b64 s[30:31]
; GCN1-NEXT: .LBB48_3: ; %atomicrmw.global
-; GCN1-NEXT: v_add_i32_e32 v4, vcc, 4, v0
-; GCN1-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GCN1-NEXT: flat_load_dword v7, v[4:5]
-; GCN1-NEXT: flat_load_dword v6, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[6:7], 0
-; GCN1-NEXT: .LBB48_4: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_and_b32_e32 v5, v7, v3
-; GCN1-NEXT: v_and_b32_e32 v4, v6, v2
-; GCN1-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN1-NEXT: flat_atomic_and_x2 v[0:1], v[2:3]
; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GCN1-NEXT: v_mov_b32_e32 v7, v5
-; GCN1-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN1-NEXT: v_mov_b32_e32 v6, v4
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN1-NEXT: s_cbranch_execnz .LBB48_4
-; GCN1-NEXT: ; %bb.5: ; %Flow
-; GCN1-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
; GCN1-NEXT: ; implicit-def: $vgpr3
-; GCN1-NEXT: ; implicit-def: $vgpr2
; GCN1-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GCN1-NEXT: s_cbranch_execz .LBB48_2
-; GCN1-NEXT: .LBB48_6: ; %atomicrmw.private
+; GCN1-NEXT: .LBB48_4: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
; GCN1-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc
; GCN1-NEXT: v_add_i32_e32 v1, vcc, 4, v0
@@ -6939,40 +6812,21 @@ define void @flat_atomic_and_i64_noret_offset__amdgpu_no_remote_memory(ptr %out,
; GCN2-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN2-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN2-NEXT: s_cbranch_execnz .LBB48_3
-; GCN2-NEXT: ; %bb.1: ; %Flow3
+; GCN2-NEXT: ; %bb.1: ; %Flow
; GCN2-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB48_6
+; GCN2-NEXT: s_cbranch_execnz .LBB48_4
; GCN2-NEXT: .LBB48_2: ; %atomicrmw.phi
; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN2-NEXT: s_setpc_b64 s[30:31]
; GCN2-NEXT: .LBB48_3: ; %atomicrmw.global
-; GCN2-NEXT: v_add_u32_e32 v4, vcc, 4, v0
-; GCN2-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GCN2-NEXT: flat_load_dword v7, v[4:5]
-; GCN2-NEXT: flat_load_dword v6, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[6:7], 0
-; GCN2-NEXT: .LBB48_4: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_and_b32_e32 v5, v7, v3
-; GCN2-NEXT: v_and_b32_e32 v4, v6, v2
-; GCN2-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN2-NEXT: flat_atomic_and_x2 v[0:1], v[2:3]
; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GCN2-NEXT: v_mov_b32_e32 v7, v5
-; GCN2-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN2-NEXT: v_mov_b32_e32 v6, v4
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN2-NEXT: s_cbranch_execnz .LBB48_4
-; GCN2-NEXT: ; %bb.5: ; %Flow
-; GCN2-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
; GCN2-NEXT: ; implicit-def: $vgpr3
-; GCN2-NEXT: ; implicit-def: $vgpr2
; GCN2-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GCN2-NEXT: s_cbranch_execz .LBB48_2
-; GCN2-NEXT: .LBB48_6: ; %atomicrmw.private
+; GCN2-NEXT: .LBB48_4: ; %atomicrmw.private
; GCN2-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
; GCN2-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc
; GCN2-NEXT: v_add_u32_e32 v1, vcc, 4, v0
@@ -6998,37 +6852,21 @@ define void @flat_atomic_and_i64_noret_offset__amdgpu_no_remote_memory(ptr %out,
; GCN3-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN3-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN3-NEXT: s_cbranch_execnz .LBB48_3
-; GCN3-NEXT: ; %bb.1: ; %Flow3
+; GCN3-NEXT: ; %bb.1: ; %Flow
; GCN3-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB48_6
+; GCN3-NEXT: s_cbranch_execnz .LBB48_4
; GCN3-NEXT: .LBB48_2: ; %atomicrmw.phi
; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN3-NEXT: s_setpc_b64 s[30:31]
; GCN3-NEXT: .LBB48_3: ; %atomicrmw.global
-; GCN3-NEXT: flat_load_dwordx2 v[6:7], v[0:1]
-; GCN3-NEXT: s_mov_b64 s[6:7], 0
-; GCN3-NEXT: .LBB48_4: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_and_b32_e32 v5, v7, v3
-; GCN3-NEXT: v_and_b32_e32 v4, v6, v2
-; GCN3-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN3-NEXT: flat_atomic_and_x2 v[0:1], v[2:3]
; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GCN3-NEXT: v_mov_b32_e32 v7, v5
-; GCN3-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN3-NEXT: v_mov_b32_e32 v6, v4
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN3-NEXT: s_cbranch_execnz .LBB48_4
-; GCN3-NEXT: ; %bb.5: ; %Flow
-; GCN3-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN3-NEXT: ; implicit-def: $vgpr0_vgpr1
; GCN3-NEXT: ; implicit-def: $vgpr3
-; GCN3-NEXT: ; implicit-def: $vgpr2
; GCN3-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GCN3-NEXT: s_cbranch_execz .LBB48_2
-; GCN3-NEXT: .LBB48_6: ; %atomicrmw.private
+; GCN3-NEXT: .LBB48_4: ; %atomicrmw.private
; GCN3-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
; GCN3-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc
; GCN3-NEXT: buffer_load_dword v1, v0, s[0:3], 0 offen offset:4
@@ -7061,40 +6899,21 @@ define i64 @flat_atomic_and_i64_ret_offset__amdgpu_no_remote_memory(ptr %out, i6
; GCN1-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN1-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN1-NEXT: s_cbranch_execnz .LBB49_3
-; GCN1-NEXT: ; %bb.1: ; %Flow3
+; GCN1-NEXT: ; %bb.1: ; %Flow
; GCN1-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB49_6
+; GCN1-NEXT: s_cbranch_execnz .LBB49_4
; GCN1-NEXT: .LBB49_2: ; %atomicrmw.phi
; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN1-NEXT: s_setpc_b64 s[30:31]
; GCN1-NEXT: .LBB49_3: ; %atomicrmw.global
-; GCN1-NEXT: v_add_i32_e32 v0, vcc, 4, v4
-; GCN1-NEXT: v_addc_u32_e32 v1, vcc, 0, v5, vcc
-; GCN1-NEXT: flat_load_dword v1, v[0:1]
-; GCN1-NEXT: flat_load_dword v0, v[4:5]
-; GCN1-NEXT: s_mov_b64 s[6:7], 0
-; GCN1-NEXT: .LBB49_4: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_mov_b32_e32 v9, v1
-; GCN1-NEXT: v_mov_b32_e32 v8, v0
-; GCN1-NEXT: v_and_b32_e32 v7, v9, v3
-; GCN1-NEXT: v_and_b32_e32 v6, v8, v2
-; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GCN1-NEXT: flat_atomic_and_x2 v[0:1], v[4:5], v[2:3] glc
; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; GCN1-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN1-NEXT: s_cbranch_execnz .LBB49_4
-; GCN1-NEXT: ; %bb.5: ; %Flow
-; GCN1-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN1-NEXT: ; implicit-def: $vgpr4_vgpr5
; GCN1-NEXT: ; implicit-def: $vgpr3
-; GCN1-NEXT: ; implicit-def: $vgpr2
; GCN1-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GCN1-NEXT: s_cbranch_execz .LBB49_2
-; GCN1-NEXT: .LBB49_6: ; %atomicrmw.private
+; GCN1-NEXT: .LBB49_4: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
; GCN1-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc
; GCN1-NEXT: v_add_i32_e32 v5, vcc, 4, v4
@@ -7123,40 +6942,21 @@ define i64 @flat_atomic_and_i64_ret_offset__amdgpu_no_remote_memory(ptr %out, i6
; GCN2-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN2-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN2-NEXT: s_cbranch_execnz .LBB49_3
-; GCN2-NEXT: ; %bb.1: ; %Flow3
+; GCN2-NEXT: ; %bb.1: ; %Flow
; GCN2-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB49_6
+; GCN2-NEXT: s_cbranch_execnz .LBB49_4
; GCN2-NEXT: .LBB49_2: ; %atomicrmw.phi
; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN2-NEXT: s_setpc_b64 s[30:31]
; GCN2-NEXT: .LBB49_3: ; %atomicrmw.global
-; GCN2-NEXT: v_add_u32_e32 v0, vcc, 4, v4
-; GCN2-NEXT: v_addc_u32_e32 v1, vcc, 0, v5, vcc
-; GCN2-NEXT: flat_load_dword v1, v[0:1]
-; GCN2-NEXT: flat_load_dword v0, v[4:5]
-; GCN2-NEXT: s_mov_b64 s[6:7], 0
-; GCN2-NEXT: .LBB49_4: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_mov_b32_e32 v9, v1
-; GCN2-NEXT: v_mov_b32_e32 v8, v0
-; GCN2-NEXT: v_and_b32_e32 v7, v9, v3
-; GCN2-NEXT: v_and_b32_e32 v6, v8, v2
-; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GCN2-NEXT: flat_atomic_and_x2 v[0:1], v[4:5], v[2:3] glc
; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; GCN2-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN2-NEXT: s_cbranch_execnz .LBB49_4
-; GCN2-NEXT: ; %bb.5: ; %Flow
-; GCN2-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN2-NEXT: ; implicit-def: $vgpr4_vgpr5
; GCN2-NEXT: ; implicit-def: $vgpr3
-; GCN2-NEXT: ; implicit-def: $vgpr2
; GCN2-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GCN2-NEXT: s_cbranch_execz .LBB49_2
-; GCN2-NEXT: .LBB49_6: ; %atomicrmw.private
+; GCN2-NEXT: .LBB49_4: ; %atomicrmw.private
; GCN2-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
; GCN2-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc
; GCN2-NEXT: v_add_u32_e32 v5, vcc, 4, v4
@@ -7183,37 +6983,21 @@ define i64 @flat_atomic_and_i64_ret_offset__amdgpu_no_remote_memory(ptr %out, i6
; GCN3-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN3-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN3-NEXT: s_cbranch_execnz .LBB49_3
-; GCN3-NEXT: ; %bb.1: ; %Flow3
+; GCN3-NEXT: ; %bb.1: ; %Flow
; GCN3-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB49_6
+; GCN3-NEXT: s_cbranch_execnz .LBB49_4
; GCN3-NEXT: .LBB49_2: ; %atomicrmw.phi
; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN3-NEXT: s_setpc_b64 s[30:31]
; GCN3-NEXT: .LBB49_3: ; %atomicrmw.global
-; GCN3-NEXT: flat_load_dwordx2 v[0:1], v[4:5]
-; GCN3-NEXT: s_mov_b64 s[6:7], 0
-; GCN3-NEXT: .LBB49_4: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v9, v1
-; GCN3-NEXT: v_mov_b32_e32 v8, v0
-; GCN3-NEXT: v_and_b32_e32 v7, v9, v3
-; GCN3-NEXT: v_and_b32_e32 v6, v8, v2
-; GCN3-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GCN3-NEXT: flat_atomic_and_x2 v[0:1], v[4:5], v[2:3] glc
; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; GCN3-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN3-NEXT: s_cbranch_execnz .LBB49_4
-; GCN3-NEXT: ; %bb.5: ; %Flow
-; GCN3-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN3-NEXT: ; implicit-def: $vgpr4_vgpr5
; GCN3-NEXT: ; implicit-def: $vgpr3
-; GCN3-NEXT: ; implicit-def: $vgpr2
; GCN3-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GCN3-NEXT: s_cbranch_execz .LBB49_2
-; GCN3-NEXT: .LBB49_6: ; %atomicrmw.private
+; GCN3-NEXT: .LBB49_4: ; %atomicrmw.private
; GCN3-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
; GCN3-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc
; GCN3-NEXT: buffer_load_dword v1, v4, s[0:3], 0 offen offset:4
@@ -10588,40 +10372,21 @@ define void @flat_atomic_or_i64_noret_offset__amdgpu_no_remote_memory(ptr %out,
; GCN1-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN1-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN1-NEXT: s_cbranch_execnz .LBB68_3
-; GCN1-NEXT: ; %bb.1: ; %Flow3
+; GCN1-NEXT: ; %bb.1: ; %Flow
; GCN1-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB68_6
+; GCN1-NEXT: s_cbranch_execnz .LBB68_4
; GCN1-NEXT: .LBB68_2: ; %atomicrmw.phi
; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN1-NEXT: s_setpc_b64 s[30:31]
; GCN1-NEXT: .LBB68_3: ; %atomicrmw.global
-; GCN1-NEXT: v_add_i32_e32 v4, vcc, 4, v0
-; GCN1-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GCN1-NEXT: flat_load_dword v7, v[4:5]
-; GCN1-NEXT: flat_load_dword v6, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[6:7], 0
-; GCN1-NEXT: .LBB68_4: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_or_b32_e32 v5, v7, v3
-; GCN1-NEXT: v_or_b32_e32 v4, v6, v2
-; GCN1-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN1-NEXT: flat_atomic_or_x2 v[0:1], v[2:3]
; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GCN1-NEXT: v_mov_b32_e32 v7, v5
-; GCN1-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN1-NEXT: v_mov_b32_e32 v6, v4
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN1-NEXT: s_cbranch_execnz .LBB68_4
-; GCN1-NEXT: ; %bb.5: ; %Flow
-; GCN1-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
; GCN1-NEXT: ; implicit-def: $vgpr3
-; GCN1-NEXT: ; implicit-def: $vgpr2
; GCN1-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GCN1-NEXT: s_cbranch_execz .LBB68_2
-; GCN1-NEXT: .LBB68_6: ; %atomicrmw.private
+; GCN1-NEXT: .LBB68_4: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
; GCN1-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc
; GCN1-NEXT: v_add_i32_e32 v1, vcc, 4, v0
@@ -10649,40 +10414,21 @@ define void @flat_atomic_or_i64_noret_offset__amdgpu_no_remote_memory(ptr %out,
; GCN2-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN2-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN2-NEXT: s_cbranch_execnz .LBB68_3
-; GCN2-NEXT: ; %bb.1: ; %Flow3
+; GCN2-NEXT: ; %bb.1: ; %Flow
; GCN2-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB68_6
+; GCN2-NEXT: s_cbranch_execnz .LBB68_4
; GCN2-NEXT: .LBB68_2: ; %atomicrmw.phi
; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN2-NEXT: s_setpc_b64 s[30:31]
; GCN2-NEXT: .LBB68_3: ; %atomicrmw.global
-; GCN2-NEXT: v_add_u32_e32 v4, vcc, 4, v0
-; GCN2-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GCN2-NEXT: flat_load_dword v7, v[4:5]
-; GCN2-NEXT: flat_load_dword v6, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[6:7], 0
-; GCN2-NEXT: .LBB68_4: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_or_b32_e32 v5, v7, v3
-; GCN2-NEXT: v_or_b32_e32 v4, v6, v2
-; GCN2-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN2-NEXT: flat_atomic_or_x2 v[0:1], v[2:3]
; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GCN2-NEXT: v_mov_b32_e32 v7, v5
-; GCN2-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN2-NEXT: v_mov_b32_e32 v6, v4
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN2-NEXT: s_cbranch_execnz .LBB68_4
-; GCN2-NEXT: ; %bb.5: ; %Flow
-; GCN2-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
; GCN2-NEXT: ; implicit-def: $vgpr3
-; GCN2-NEXT: ; implicit-def: $vgpr2
; GCN2-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GCN2-NEXT: s_cbranch_execz .LBB68_2
-; GCN2-NEXT: .LBB68_6: ; %atomicrmw.private
+; GCN2-NEXT: .LBB68_4: ; %atomicrmw.private
; GCN2-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
; GCN2-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc
; GCN2-NEXT: v_add_u32_e32 v1, vcc, 4, v0
@@ -10708,37 +10454,21 @@ define void @flat_atomic_or_i64_noret_offset__amdgpu_no_remote_memory(ptr %out,
; GCN3-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN3-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN3-NEXT: s_cbranch_execnz .LBB68_3
-; GCN3-NEXT: ; %bb.1: ; %Flow3
+; GCN3-NEXT: ; %bb.1: ; %Flow
; GCN3-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB68_6
+; GCN3-NEXT: s_cbranch_execnz .LBB68_4
; GCN3-NEXT: .LBB68_2: ; %atomicrmw.phi
; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN3-NEXT: s_setpc_b64 s[30:31]
; GCN3-NEXT: .LBB68_3: ; %atomicrmw.global
-; GCN3-NEXT: flat_load_dwordx2 v[6:7], v[0:1]
-; GCN3-NEXT: s_mov_b64 s[6:7], 0
-; GCN3-NEXT: .LBB68_4: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_or_b32_e32 v5, v7, v3
-; GCN3-NEXT: v_or_b32_e32 v4, v6, v2
-; GCN3-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN3-NEXT: flat_atomic_or_x2 v[0:1], v[2:3]
; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GCN3-NEXT: v_mov_b32_e32 v7, v5
-; GCN3-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN3-NEXT: v_mov_b32_e32 v6, v4
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN3-NEXT: s_cbranch_execnz .LBB68_4
-; GCN3-NEXT: ; %bb.5: ; %Flow
-; GCN3-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN3-NEXT: ; implicit-def: $vgpr0_vgpr1
; GCN3-NEXT: ; implicit-def: $vgpr3
-; GCN3-NEXT: ; implicit-def: $vgpr2
; GCN3-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GCN3-NEXT: s_cbranch_execz .LBB68_2
-; GCN3-NEXT: .LBB68_6: ; %atomicrmw.private
+; GCN3-NEXT: .LBB68_4: ; %atomicrmw.private
; GCN3-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
; GCN3-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc
; GCN3-NEXT: buffer_load_dword v1, v0, s[0:3], 0 offen offset:4
@@ -10771,40 +10501,21 @@ define i64 @flat_atomic_or_i64_ret_offset__amdgpu_no_remote_memory(ptr %out, i64
; GCN1-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN1-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN1-NEXT: s_cbranch_execnz .LBB69_3
-; GCN1-NEXT: ; %bb.1: ; %Flow3
+; GCN1-NEXT: ; %bb.1: ; %Flow
; GCN1-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB69_6
+; GCN1-NEXT: s_cbranch_execnz .LBB69_4
; GCN1-NEXT: .LBB69_2: ; %atomicrmw.phi
; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN1-NEXT: s_setpc_b64 s[30:31]
; GCN1-NEXT: .LBB69_3: ; %atomicrmw.global
-; GCN1-NEXT: v_add_i32_e32 v0, vcc, 4, v4
-; GCN1-NEXT: v_addc_u32_e32 v1, vcc, 0, v5, vcc
-; GCN1-NEXT: flat_load_dword v1, v[0:1]
-; GCN1-NEXT: flat_load_dword v0, v[4:5]
-; GCN1-NEXT: s_mov_b64 s[6:7], 0
-; GCN1-NEXT: .LBB69_4: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_mov_b32_e32 v9, v1
-; GCN1-NEXT: v_mov_b32_e32 v8, v0
-; GCN1-NEXT: v_or_b32_e32 v7, v9, v3
-; GCN1-NEXT: v_or_b32_e32 v6, v8, v2
-; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GCN1-NEXT: flat_atomic_or_x2 v[0:1], v[4:5], v[2:3] glc
; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; GCN1-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN1-NEXT: s_cbranch_execnz .LBB69_4
-; GCN1-NEXT: ; %bb.5: ; %Flow
-; GCN1-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN1-NEXT: ; implicit-def: $vgpr4_vgpr5
; GCN1-NEXT: ; implicit-def: $vgpr3
-; GCN1-NEXT: ; implicit-def: $vgpr2
; GCN1-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GCN1-NEXT: s_cbranch_execz .LBB69_2
-; GCN1-NEXT: .LBB69_6: ; %atomicrmw.private
+; GCN1-NEXT: .LBB69_4: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
; GCN1-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc
; GCN1-NEXT: v_add_i32_e32 v5, vcc, 4, v4
@@ -10833,40 +10544,21 @@ define i64 @flat_atomic_or_i64_ret_offset__amdgpu_no_remote_memory(ptr %out, i64
; GCN2-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN2-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN2-NEXT: s_cbranch_execnz .LBB69_3
-; GCN2-NEXT: ; %bb.1: ; %Flow3
+; GCN2-NEXT: ; %bb.1: ; %Flow
; GCN2-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB69_6
+; GCN2-NEXT: s_cbranch_execnz .LBB69_4
; GCN2-NEXT: .LBB69_2: ; %atomicrmw.phi
; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN2-NEXT: s_setpc_b64 s[30:31]
; GCN2-NEXT: .LBB69_3: ; %atomicrmw.global
-; GCN2-NEXT: v_add_u32_e32 v0, vcc, 4, v4
-; GCN2-NEXT: v_addc_u32_e32 v1, vcc, 0, v5, vcc
-; GCN2-NEXT: flat_load_dword v1, v[0:1]
-; GCN2-NEXT: flat_load_dword v0, v[4:5]
-; GCN2-NEXT: s_mov_b64 s[6:7], 0
-; GCN2-NEXT: .LBB69_4: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_mov_b32_e32 v9, v1
-; GCN2-NEXT: v_mov_b32_e32 v8, v0
-; GCN2-NEXT: v_or_b32_e32 v7, v9, v3
-; GCN2-NEXT: v_or_b32_e32 v6, v8, v2
-; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GCN2-NEXT: flat_atomic_or_x2 v[0:1], v[4:5], v[2:3] glc
; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; GCN2-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN2-NEXT: s_cbranch_execnz .LBB69_4
-; GCN2-NEXT: ; %bb.5: ; %Flow
-; GCN2-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN2-NEXT: ; implicit-def: $vgpr4_vgpr5
; GCN2-NEXT: ; implicit-def: $vgpr3
-; GCN2-NEXT: ; implicit-def: $vgpr2
; GCN2-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GCN2-NEXT: s_cbranch_execz .LBB69_2
-; GCN2-NEXT: .LBB69_6: ; %atomicrmw.private
+; GCN2-NEXT: .LBB69_4: ; %atomicrmw.private
; GCN2-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
; GCN2-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc
; GCN2-NEXT: v_add_u32_e32 v5, vcc, 4, v4
@@ -10893,37 +10585,21 @@ define i64 @flat_atomic_or_i64_ret_offset__amdgpu_no_remote_memory(ptr %out, i64
; GCN3-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN3-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN3-NEXT: s_cbranch_execnz .LBB69_3
-; GCN3-NEXT: ; %bb.1: ; %Flow3
+; GCN3-NEXT: ; %bb.1: ; %Flow
; GCN3-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB69_6
+; GCN3-NEXT: s_cbranch_execnz .LBB69_4
; GCN3-NEXT: .LBB69_2: ; %atomicrmw.phi
; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN3-NEXT: s_setpc_b64 s[30:31]
; GCN3-NEXT: .LBB69_3: ; %atomicrmw.global
-; GCN3-NEXT: flat_load_dwordx2 v[0:1], v[4:5]
-; GCN3-NEXT: s_mov_b64 s[6:7], 0
-; GCN3-NEXT: .LBB69_4: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v9, v1
-; GCN3-NEXT: v_mov_b32_e32 v8, v0
-; GCN3-NEXT: v_or_b32_e32 v7, v9, v3
-; GCN3-NEXT: v_or_b32_e32 v6, v8, v2
-; GCN3-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GCN3-NEXT: flat_atomic_or_x2 v[0:1], v[4:5], v[2:3] glc
; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; GCN3-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN3-NEXT: s_cbranch_execnz .LBB69_4
-; GCN3-NEXT: ; %bb.5: ; %Flow
-; GCN3-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN3-NEXT: ; implicit-def: $vgpr4_vgpr5
; GCN3-NEXT: ; implicit-def: $vgpr3
-; GCN3-NEXT: ; implicit-def: $vgpr2
; GCN3-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GCN3-NEXT: s_cbranch_execz .LBB69_2
-; GCN3-NEXT: .LBB69_6: ; %atomicrmw.private
+; GCN3-NEXT: .LBB69_4: ; %atomicrmw.private
; GCN3-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
; GCN3-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc
; GCN3-NEXT: buffer_load_dword v1, v4, s[0:3], 0 offen offset:4
@@ -12383,40 +12059,21 @@ define void @flat_atomic_xor_i64_noret_offset__amdgpu_no_remote_memory(ptr %out,
; GCN1-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN1-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN1-NEXT: s_cbranch_execnz .LBB78_3
-; GCN1-NEXT: ; %bb.1: ; %Flow3
+; GCN1-NEXT: ; %bb.1: ; %Flow
; GCN1-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB78_6
+; GCN1-NEXT: s_cbranch_execnz .LBB78_4
; GCN1-NEXT: .LBB78_2: ; %atomicrmw.phi
; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN1-NEXT: s_setpc_b64 s[30:31]
; GCN1-NEXT: .LBB78_3: ; %atomicrmw.global
-; GCN1-NEXT: v_add_i32_e32 v4, vcc, 4, v0
-; GCN1-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GCN1-NEXT: flat_load_dword v7, v[4:5]
-; GCN1-NEXT: flat_load_dword v6, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[6:7], 0
-; GCN1-NEXT: .LBB78_4: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_xor_b32_e32 v5, v7, v3
-; GCN1-NEXT: v_xor_b32_e32 v4, v6, v2
-; GCN1-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN1-NEXT: flat_atomic_xor_x2 v[0:1], v[2:3]
; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GCN1-NEXT: v_mov_b32_e32 v7, v5
-; GCN1-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN1-NEXT: v_mov_b32_e32 v6, v4
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN1-NEXT: s_cbranch_execnz .LBB78_4
-; GCN1-NEXT: ; %bb.5: ; %Flow
-; GCN1-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
; GCN1-NEXT: ; implicit-def: $vgpr3
-; GCN1-NEXT: ; implicit-def: $vgpr2
; GCN1-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GCN1-NEXT: s_cbranch_execz .LBB78_2
-; GCN1-NEXT: .LBB78_6: ; %atomicrmw.private
+; GCN1-NEXT: .LBB78_4: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
; GCN1-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc
; GCN1-NEXT: v_add_i32_e32 v1, vcc, 4, v0
@@ -12444,40 +12101,21 @@ define void @flat_atomic_xor_i64_noret_offset__amdgpu_no_remote_memory(ptr %out,
; GCN2-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN2-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN2-NEXT: s_cbranch_execnz .LBB78_3
-; GCN2-NEXT: ; %bb.1: ; %Flow3
+; GCN2-NEXT: ; %bb.1: ; %Flow
; GCN2-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB78_6
+; GCN2-NEXT: s_cbranch_execnz .LBB78_4
; GCN2-NEXT: .LBB78_2: ; %atomicrmw.phi
; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN2-NEXT: s_setpc_b64 s[30:31]
; GCN2-NEXT: .LBB78_3: ; %atomicrmw.global
-; GCN2-NEXT: v_add_u32_e32 v4, vcc, 4, v0
-; GCN2-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GCN2-NEXT: flat_load_dword v7, v[4:5]
-; GCN2-NEXT: flat_load_dword v6, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[6:7], 0
-; GCN2-NEXT: .LBB78_4: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_xor_b32_e32 v5, v7, v3
-; GCN2-NEXT: v_xor_b32_e32 v4, v6, v2
-; GCN2-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN2-NEXT: flat_atomic_xor_x2 v[0:1], v[2:3]
; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GCN2-NEXT: v_mov_b32_e32 v7, v5
-; GCN2-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN2-NEXT: v_mov_b32_e32 v6, v4
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN2-NEXT: s_cbranch_execnz .LBB78_4
-; GCN2-NEXT: ; %bb.5: ; %Flow
-; GCN2-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
; GCN2-NEXT: ; implicit-def: $vgpr3
-; GCN2-NEXT: ; implicit-def: $vgpr2
; GCN2-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GCN2-NEXT: s_cbranch_execz .LBB78_2
-; GCN2-NEXT: .LBB78_6: ; %atomicrmw.private
+; GCN2-NEXT: .LBB78_4: ; %atomicrmw.private
; GCN2-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
; GCN2-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc
; GCN2-NEXT: v_add_u32_e32 v1, vcc, 4, v0
@@ -12503,37 +12141,21 @@ define void @flat_atomic_xor_i64_noret_offset__amdgpu_no_remote_memory(ptr %out,
; GCN3-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN3-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN3-NEXT: s_cbranch_execnz .LBB78_3
-; GCN3-NEXT: ; %bb.1: ; %Flow3
+; GCN3-NEXT: ; %bb.1: ; %Flow
; GCN3-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB78_6
+; GCN3-NEXT: s_cbranch_execnz .LBB78_4
; GCN3-NEXT: .LBB78_2: ; %atomicrmw.phi
; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN3-NEXT: s_setpc_b64 s[30:31]
; GCN3-NEXT: .LBB78_3: ; %atomicrmw.global
-; GCN3-NEXT: flat_load_dwordx2 v[6:7], v[0:1]
-; GCN3-NEXT: s_mov_b64 s[6:7], 0
-; GCN3-NEXT: .LBB78_4: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_xor_b32_e32 v5, v7, v3
-; GCN3-NEXT: v_xor_b32_e32 v4, v6, v2
-; GCN3-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN3-NEXT: flat_atomic_xor_x2 v[0:1], v[2:3]
; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GCN3-NEXT: v_mov_b32_e32 v7, v5
-; GCN3-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN3-NEXT: v_mov_b32_e32 v6, v4
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN3-NEXT: s_cbranch_execnz .LBB78_4
-; GCN3-NEXT: ; %bb.5: ; %Flow
-; GCN3-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN3-NEXT: ; implicit-def: $vgpr0_vgpr1
; GCN3-NEXT: ; implicit-def: $vgpr3
-; GCN3-NEXT: ; implicit-def: $vgpr2
; GCN3-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GCN3-NEXT: s_cbranch_execz .LBB78_2
-; GCN3-NEXT: .LBB78_6: ; %atomicrmw.private
+; GCN3-NEXT: .LBB78_4: ; %atomicrmw.private
; GCN3-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
; GCN3-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc
; GCN3-NEXT: buffer_load_dword v1, v0, s[0:3], 0 offen offset:4
@@ -12566,40 +12188,21 @@ define i64 @flat_atomic_xor_i64_ret_offset__amdgpu_no_remote_memory(ptr %out, i6
; GCN1-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN1-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN1-NEXT: s_cbranch_execnz .LBB79_3
-; GCN1-NEXT: ; %bb.1: ; %Flow3
+; GCN1-NEXT: ; %bb.1: ; %Flow
; GCN1-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB79_6
+; GCN1-NEXT: s_cbranch_execnz .LBB79_4
; GCN1-NEXT: .LBB79_2: ; %atomicrmw.phi
; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN1-NEXT: s_setpc_b64 s[30:31]
; GCN1-NEXT: .LBB79_3: ; %atomicrmw.global
-; GCN1-NEXT: v_add_i32_e32 v0, vcc, 4, v4
-; GCN1-NEXT: v_addc_u32_e32 v1, vcc, 0, v5, vcc
-; GCN1-NEXT: flat_load_dword v1, v[0:1]
-; GCN1-NEXT: flat_load_dword v0, v[4:5]
-; GCN1-NEXT: s_mov_b64 s[6:7], 0
-; GCN1-NEXT: .LBB79_4: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_mov_b32_e32 v9, v1
-; GCN1-NEXT: v_mov_b32_e32 v8, v0
-; GCN1-NEXT: v_xor_b32_e32 v7, v9, v3
-; GCN1-NEXT: v_xor_b32_e32 v6, v8, v2
-; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GCN1-NEXT: flat_atomic_xor_x2 v[0:1], v[4:5], v[2:3] glc
; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; GCN1-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN1-NEXT: s_cbranch_execnz .LBB79_4
-; GCN1-NEXT: ; %bb.5: ; %Flow
-; GCN1-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN1-NEXT: ; implicit-def: $vgpr4_vgpr5
; GCN1-NEXT: ; implicit-def: $vgpr3
-; GCN1-NEXT: ; implicit-def: $vgpr2
; GCN1-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GCN1-NEXT: s_cbranch_execz .LBB79_2
-; GCN1-NEXT: .LBB79_6: ; %atomicrmw.private
+; GCN1-NEXT: .LBB79_4: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
; GCN1-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc
; GCN1-NEXT: v_add_i32_e32 v5, vcc, 4, v4
@@ -12628,40 +12231,21 @@ define i64 @flat_atomic_xor_i64_ret_offset__amdgpu_no_remote_memory(ptr %out, i6
; GCN2-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN2-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN2-NEXT: s_cbranch_execnz .LBB79_3
-; GCN2-NEXT: ; %bb.1: ; %Flow3
+; GCN2-NEXT: ; %bb.1: ; %Flow
; GCN2-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB79_6
+; GCN2-NEXT: s_cbranch_execnz .LBB79_4
; GCN2-NEXT: .LBB79_2: ; %atomicrmw.phi
; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN2-NEXT: s_setpc_b64 s[30:31]
; GCN2-NEXT: .LBB79_3: ; %atomicrmw.global
-; GCN2-NEXT: v_add_u32_e32 v0, vcc, 4, v4
-; GCN2-NEXT: v_addc_u32_e32 v1, vcc, 0, v5, vcc
-; GCN2-NEXT: flat_load_dword v1, v[0:1]
-; GCN2-NEXT: flat_load_dword v0, v[4:5]
-; GCN2-NEXT: s_mov_b64 s[6:7], 0
-; GCN2-NEXT: .LBB79_4: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_mov_b32_e32 v9, v1
-; GCN2-NEXT: v_mov_b32_e32 v8, v0
-; GCN2-NEXT: v_xor_b32_e32 v7, v9, v3
-; GCN2-NEXT: v_xor_b32_e32 v6, v8, v2
-; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GCN2-NEXT: flat_atomic_xor_x2 v[0:1], v[4:5], v[2:3] glc
; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; GCN2-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN2-NEXT: s_cbranch_execnz .LBB79_4
-; GCN2-NEXT: ; %bb.5: ; %Flow
-; GCN2-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN2-NEXT: ; implicit-def: $vgpr4_vgpr5
; GCN2-NEXT: ; implicit-def: $vgpr3
-; GCN2-NEXT: ; implicit-def: $vgpr2
; GCN2-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GCN2-NEXT: s_cbranch_execz .LBB79_2
-; GCN2-NEXT: .LBB79_6: ; %atomicrmw.private
+; GCN2-NEXT: .LBB79_4: ; %atomicrmw.private
; GCN2-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
; GCN2-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc
; GCN2-NEXT: v_add_u32_e32 v5, vcc, 4, v4
@@ -12688,37 +12272,21 @@ define i64 @flat_atomic_xor_i64_ret_offset__amdgpu_no_remote_memory(ptr %out, i6
; GCN3-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN3-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN3-NEXT: s_cbranch_execnz .LBB79_3
-; GCN3-NEXT: ; %bb.1: ; %Flow3
+; GCN3-NEXT: ; %bb.1: ; %Flow
; GCN3-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB79_6
+; GCN3-NEXT: s_cbranch_execnz .LBB79_4
; GCN3-NEXT: .LBB79_2: ; %atomicrmw.phi
; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN3-NEXT: s_setpc_b64 s[30:31]
; GCN3-NEXT: .LBB79_3: ; %atomicrmw.global
-; GCN3-NEXT: flat_load_dwordx2 v[0:1], v[4:5]
-; GCN3-NEXT: s_mov_b64 s[6:7], 0
-; GCN3-NEXT: .LBB79_4: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v9, v1
-; GCN3-NEXT: v_mov_b32_e32 v8, v0
-; GCN3-NEXT: v_xor_b32_e32 v7, v9, v3
-; GCN3-NEXT: v_xor_b32_e32 v6, v8, v2
-; GCN3-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GCN3-NEXT: flat_atomic_xor_x2 v[0:1], v[4:5], v[2:3] glc
; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; GCN3-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN3-NEXT: s_cbranch_execnz .LBB79_4
-; GCN3-NEXT: ; %bb.5: ; %Flow
-; GCN3-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN3-NEXT: ; implicit-def: $vgpr4_vgpr5
; GCN3-NEXT: ; implicit-def: $vgpr3
-; GCN3-NEXT: ; implicit-def: $vgpr2
; GCN3-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GCN3-NEXT: s_cbranch_execz .LBB79_2
-; GCN3-NEXT: .LBB79_6: ; %atomicrmw.private
+; GCN3-NEXT: .LBB79_4: ; %atomicrmw.private
; GCN3-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
; GCN3-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc
; GCN3-NEXT: buffer_load_dword v1, v4, s[0:3], 0 offen offset:4
@@ -15090,40 +14658,21 @@ define void @flat_atomic_max_i64_noret_offset__amdgpu_no_remote_memory(ptr %out,
; GCN1-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN1-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN1-NEXT: s_cbranch_execnz .LBB92_3
-; GCN1-NEXT: ; %bb.1: ; %Flow3
+; GCN1-NEXT: ; %bb.1: ; %Flow
; GCN1-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB92_6
+; GCN1-NEXT: s_cbranch_execnz .LBB92_4
; GCN1-NEXT: .LBB92_2: ; %atomicrmw.phi
; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN1-NEXT: s_setpc_b64 s[30:31]
; GCN1-NEXT: .LBB92_3: ; %atomicrmw.global
-; GCN1-NEXT: v_add_i32_e32 v4, vcc, 4, v0
-; GCN1-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GCN1-NEXT: flat_load_dword v7, v[4:5]
-; GCN1-NEXT: flat_load_dword v6, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[6:7], 0
-; GCN1-NEXT: .LBB92_4: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_cmp_gt_i64_e32 vcc, v[6:7], v[2:3]
-; GCN1-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
-; GCN1-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
-; GCN1-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN1-NEXT: flat_atomic_smax_x2 v[0:1], v[2:3]
; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GCN1-NEXT: v_mov_b32_e32 v7, v5
-; GCN1-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN1-NEXT: v_mov_b32_e32 v6, v4
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN1-NEXT: s_cbranch_execnz .LBB92_4
-; GCN1-NEXT: ; %bb.5: ; %Flow
-; GCN1-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
; GCN1-NEXT: ; implicit-def: $vgpr2_vgpr3
; GCN1-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GCN1-NEXT: s_cbranch_execz .LBB92_2
-; GCN1-NEXT: .LBB92_6: ; %atomicrmw.private
+; GCN1-NEXT: .LBB92_4: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
; GCN1-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc
; GCN1-NEXT: v_add_i32_e32 v5, vcc, 4, v4
@@ -15151,40 +14700,21 @@ define void @flat_atomic_max_i64_noret_offset__amdgpu_no_remote_memory(ptr %out,
; GCN2-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN2-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN2-NEXT: s_cbranch_execnz .LBB92_3
-; GCN2-NEXT: ; %bb.1: ; %Flow3
+; GCN2-NEXT: ; %bb.1: ; %Flow
; GCN2-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB92_6
+; GCN2-NEXT: s_cbranch_execnz .LBB92_4
; GCN2-NEXT: .LBB92_2: ; %atomicrmw.phi
; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN2-NEXT: s_setpc_b64 s[30:31]
; GCN2-NEXT: .LBB92_3: ; %atomicrmw.global
-; GCN2-NEXT: v_add_u32_e32 v4, vcc, 4, v0
-; GCN2-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GCN2-NEXT: flat_load_dword v7, v[4:5]
-; GCN2-NEXT: flat_load_dword v6, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[6:7], 0
-; GCN2-NEXT: .LBB92_4: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_cmp_gt_i64_e32 vcc, v[6:7], v[2:3]
-; GCN2-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
-; GCN2-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
-; GCN2-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN2-NEXT: flat_atomic_smax_x2 v[0:1], v[2:3]
; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GCN2-NEXT: v_mov_b32_e32 v7, v5
-; GCN2-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN2-NEXT: v_mov_b32_e32 v6, v4
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN2-NEXT: s_cbranch_execnz .LBB92_4
-; GCN2-NEXT: ; %bb.5: ; %Flow
-; GCN2-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
; GCN2-NEXT: ; implicit-def: $vgpr2_vgpr3
; GCN2-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GCN2-NEXT: s_cbranch_execz .LBB92_2
-; GCN2-NEXT: .LBB92_6: ; %atomicrmw.private
+; GCN2-NEXT: .LBB92_4: ; %atomicrmw.private
; GCN2-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
; GCN2-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc
; GCN2-NEXT: v_add_u32_e32 v5, vcc, 4, v4
@@ -15210,37 +14740,21 @@ define void @flat_atomic_max_i64_noret_offset__amdgpu_no_remote_memory(ptr %out,
; GCN3-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN3-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN3-NEXT: s_cbranch_execnz .LBB92_3
-; GCN3-NEXT: ; %bb.1: ; %Flow3
+; GCN3-NEXT: ; %bb.1: ; %Flow
; GCN3-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB92_6
+; GCN3-NEXT: s_cbranch_execnz .LBB92_4
; GCN3-NEXT: .LBB92_2: ; %atomicrmw.phi
; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN3-NEXT: s_setpc_b64 s[30:31]
; GCN3-NEXT: .LBB92_3: ; %atomicrmw.global
-; GCN3-NEXT: flat_load_dwordx2 v[6:7], v[0:1]
-; GCN3-NEXT: s_mov_b64 s[6:7], 0
-; GCN3-NEXT: .LBB92_4: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_cmp_gt_i64_e32 vcc, v[6:7], v[2:3]
-; GCN3-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
-; GCN3-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
-; GCN3-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN3-NEXT: flat_atomic_smax_x2 v[0:1], v[2:3]
; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GCN3-NEXT: v_mov_b32_e32 v7, v5
-; GCN3-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN3-NEXT: v_mov_b32_e32 v6, v4
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN3-NEXT: s_cbranch_execnz .LBB92_4
-; GCN3-NEXT: ; %bb.5: ; %Flow
-; GCN3-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN3-NEXT: ; implicit-def: $vgpr0_vgpr1
; GCN3-NEXT: ; implicit-def: $vgpr2_vgpr3
; GCN3-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GCN3-NEXT: s_cbranch_execz .LBB92_2
-; GCN3-NEXT: .LBB92_6: ; %atomicrmw.private
+; GCN3-NEXT: .LBB92_4: ; %atomicrmw.private
; GCN3-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
; GCN3-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc
; GCN3-NEXT: buffer_load_dword v0, v4, s[0:3], 0 offen
@@ -15273,40 +14787,21 @@ define i64 @flat_atomic_max_i64_ret_offset__amdgpu_no_remote_memory(ptr %out, i6
; GCN1-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN1-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN1-NEXT: s_cbranch_execnz .LBB93_3
-; GCN1-NEXT: ; %bb.1: ; %Flow3
+; GCN1-NEXT: ; %bb.1: ; %Flow
; GCN1-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB93_6
+; GCN1-NEXT: s_cbranch_execnz .LBB93_4
; GCN1-NEXT: .LBB93_2: ; %atomicrmw.phi
; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN1-NEXT: s_setpc_b64 s[30:31]
; GCN1-NEXT: .LBB93_3: ; %atomicrmw.global
-; GCN1-NEXT: v_add_i32_e32 v0, vcc, 4, v4
-; GCN1-NEXT: v_addc_u32_e32 v1, vcc, 0, v5, vcc
-; GCN1-NEXT: flat_load_dword v1, v[0:1]
-; GCN1-NEXT: flat_load_dword v0, v[4:5]
-; GCN1-NEXT: s_mov_b64 s[6:7], 0
-; GCN1-NEXT: .LBB93_4: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_mov_b32_e32 v9, v1
-; GCN1-NEXT: v_mov_b32_e32 v8, v0
-; GCN1-NEXT: v_cmp_gt_i64_e32 vcc, v[8:9], v[2:3]
-; GCN1-NEXT: v_cndmask_b32_e32 v7, v3, v9, vcc
-; GCN1-NEXT: v_cndmask_b32_e32 v6, v2, v8, vcc
-; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GCN1-NEXT: flat_atomic_smax_x2 v[0:1], v[4:5], v[2:3] glc
; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; GCN1-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN1-NEXT: s_cbranch_execnz .LBB93_4
-; GCN1-NEXT: ; %bb.5: ; %Flow
-; GCN1-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN1-NEXT: ; implicit-def: $vgpr4_vgpr5
; GCN1-NEXT: ; implicit-def: $vgpr2_vgpr3
; GCN1-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GCN1-NEXT: s_cbranch_execz .LBB93_2
-; GCN1-NEXT: .LBB93_6: ; %atomicrmw.private
+; GCN1-NEXT: .LBB93_4: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
; GCN1-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc
; GCN1-NEXT: v_add_i32_e32 v5, vcc, 4, v4
@@ -15335,40 +14830,21 @@ define i64 @flat_atomic_max_i64_ret_offset__amdgpu_no_remote_memory(ptr %out, i6
; GCN2-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN2-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN2-NEXT: s_cbranch_execnz .LBB93_3
-; GCN2-NEXT: ; %bb.1: ; %Flow3
+; GCN2-NEXT: ; %bb.1: ; %Flow
; GCN2-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB93_6
+; GCN2-NEXT: s_cbranch_execnz .LBB93_4
; GCN2-NEXT: .LBB93_2: ; %atomicrmw.phi
; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN2-NEXT: s_setpc_b64 s[30:31]
; GCN2-NEXT: .LBB93_3: ; %atomicrmw.global
-; GCN2-NEXT: v_add_u32_e32 v0, vcc, 4, v4
-; GCN2-NEXT: v_addc_u32_e32 v1, vcc, 0, v5, vcc
-; GCN2-NEXT: flat_load_dword v1, v[0:1]
-; GCN2-NEXT: flat_load_dword v0, v[4:5]
-; GCN2-NEXT: s_mov_b64 s[6:7], 0
-; GCN2-NEXT: .LBB93_4: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_mov_b32_e32 v9, v1
-; GCN2-NEXT: v_mov_b32_e32 v8, v0
-; GCN2-NEXT: v_cmp_gt_i64_e32 vcc, v[8:9], v[2:3]
-; GCN2-NEXT: v_cndmask_b32_e32 v7, v3, v9, vcc
-; GCN2-NEXT: v_cndmask_b32_e32 v6, v2, v8, vcc
-; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GCN2-NEXT: flat_atomic_smax_x2 v[0:1], v[4:5], v[2:3] glc
; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; GCN2-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN2-NEXT: s_cbranch_execnz .LBB93_4
-; GCN2-NEXT: ; %bb.5: ; %Flow
-; GCN2-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN2-NEXT: ; implicit-def: $vgpr4_vgpr5
; GCN2-NEXT: ; implicit-def: $vgpr2_vgpr3
; GCN2-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GCN2-NEXT: s_cbranch_execz .LBB93_2
-; GCN2-NEXT: .LBB93_6: ; %atomicrmw.private
+; GCN2-NEXT: .LBB93_4: ; %atomicrmw.private
; GCN2-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
; GCN2-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc
; GCN2-NEXT: v_add_u32_e32 v5, vcc, 4, v4
@@ -15395,37 +14871,21 @@ define i64 @flat_atomic_max_i64_ret_offset__amdgpu_no_remote_memory(ptr %out, i6
; GCN3-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN3-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN3-NEXT: s_cbranch_execnz .LBB93_3
-; GCN3-NEXT: ; %bb.1: ; %Flow3
+; GCN3-NEXT: ; %bb.1: ; %Flow
; GCN3-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB93_6
+; GCN3-NEXT: s_cbranch_execnz .LBB93_4
; GCN3-NEXT: .LBB93_2: ; %atomicrmw.phi
; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN3-NEXT: s_setpc_b64 s[30:31]
; GCN3-NEXT: .LBB93_3: ; %atomicrmw.global
-; GCN3-NEXT: flat_load_dwordx2 v[0:1], v[4:5]
-; GCN3-NEXT: s_mov_b64 s[6:7], 0
-; GCN3-NEXT: .LBB93_4: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v9, v1
-; GCN3-NEXT: v_mov_b32_e32 v8, v0
-; GCN3-NEXT: v_cmp_gt_i64_e32 vcc, v[8:9], v[2:3]
-; GCN3-NEXT: v_cndmask_b32_e32 v7, v3, v9, vcc
-; GCN3-NEXT: v_cndmask_b32_e32 v6, v2, v8, vcc
-; GCN3-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GCN3-NEXT: flat_atomic_smax_x2 v[0:1], v[4:5], v[2:3] glc
; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; GCN3-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN3-NEXT: s_cbranch_execnz .LBB93_4
-; GCN3-NEXT: ; %bb.5: ; %Flow
-; GCN3-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN3-NEXT: ; implicit-def: $vgpr4_vgpr5
; GCN3-NEXT: ; implicit-def: $vgpr2_vgpr3
; GCN3-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GCN3-NEXT: s_cbranch_execz .LBB93_2
-; GCN3-NEXT: .LBB93_6: ; %atomicrmw.private
+; GCN3-NEXT: .LBB93_4: ; %atomicrmw.private
; GCN3-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
; GCN3-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc
; GCN3-NEXT: buffer_load_dword v0, v4, s[0:3], 0 offen
@@ -17588,40 +17048,21 @@ define void @flat_atomic_umax_i64_noret_offset__amdgpu_no_remote_memory(ptr %out
; GCN1-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN1-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN1-NEXT: s_cbranch_execnz .LBB105_3
-; GCN1-NEXT: ; %bb.1: ; %Flow3
+; GCN1-NEXT: ; %bb.1: ; %Flow
; GCN1-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB105_6
+; GCN1-NEXT: s_cbranch_execnz .LBB105_4
; GCN1-NEXT: .LBB105_2: ; %atomicrmw.phi
; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN1-NEXT: s_setpc_b64 s[30:31]
; GCN1-NEXT: .LBB105_3: ; %atomicrmw.global
-; GCN1-NEXT: v_add_i32_e32 v4, vcc, 4, v0
-; GCN1-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GCN1-NEXT: flat_load_dword v7, v[4:5]
-; GCN1-NEXT: flat_load_dword v6, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[6:7], 0
-; GCN1-NEXT: .LBB105_4: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_cmp_gt_u64_e32 vcc, v[6:7], v[2:3]
-; GCN1-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
-; GCN1-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
-; GCN1-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN1-NEXT: flat_atomic_umax_x2 v[0:1], v[2:3]
; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GCN1-NEXT: v_mov_b32_e32 v7, v5
-; GCN1-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN1-NEXT: v_mov_b32_e32 v6, v4
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN1-NEXT: s_cbranch_execnz .LBB105_4
-; GCN1-NEXT: ; %bb.5: ; %Flow
-; GCN1-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
; GCN1-NEXT: ; implicit-def: $vgpr2_vgpr3
; GCN1-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GCN1-NEXT: s_cbranch_execz .LBB105_2
-; GCN1-NEXT: .LBB105_6: ; %atomicrmw.private
+; GCN1-NEXT: .LBB105_4: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
; GCN1-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc
; GCN1-NEXT: v_add_i32_e32 v5, vcc, 4, v4
@@ -17649,40 +17090,21 @@ define void @flat_atomic_umax_i64_noret_offset__amdgpu_no_remote_memory(ptr %out
; GCN2-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN2-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN2-NEXT: s_cbranch_execnz .LBB105_3
-; GCN2-NEXT: ; %bb.1: ; %Flow3
+; GCN2-NEXT: ; %bb.1: ; %Flow
; GCN2-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB105_6
+; GCN2-NEXT: s_cbranch_execnz .LBB105_4
; GCN2-NEXT: .LBB105_2: ; %atomicrmw.phi
; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN2-NEXT: s_setpc_b64 s[30:31]
; GCN2-NEXT: .LBB105_3: ; %atomicrmw.global
-; GCN2-NEXT: v_add_u32_e32 v4, vcc, 4, v0
-; GCN2-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GCN2-NEXT: flat_load_dword v7, v[4:5]
-; GCN2-NEXT: flat_load_dword v6, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[6:7], 0
-; GCN2-NEXT: .LBB105_4: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_cmp_gt_u64_e32 vcc, v[6:7], v[2:3]
-; GCN2-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
-; GCN2-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
-; GCN2-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN2-NEXT: flat_atomic_umax_x2 v[0:1], v[2:3]
; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GCN2-NEXT: v_mov_b32_e32 v7, v5
-; GCN2-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN2-NEXT: v_mov_b32_e32 v6, v4
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN2-NEXT: s_cbranch_execnz .LBB105_4
-; GCN2-NEXT: ; %bb.5: ; %Flow
-; GCN2-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
; GCN2-NEXT: ; implicit-def: $vgpr2_vgpr3
; GCN2-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GCN2-NEXT: s_cbranch_execz .LBB105_2
-; GCN2-NEXT: .LBB105_6: ; %atomicrmw.private
+; GCN2-NEXT: .LBB105_4: ; %atomicrmw.private
; GCN2-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
; GCN2-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc
; GCN2-NEXT: v_add_u32_e32 v5, vcc, 4, v4
@@ -17708,37 +17130,21 @@ define void @flat_atomic_umax_i64_noret_offset__amdgpu_no_remote_memory(ptr %out
; GCN3-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN3-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN3-NEXT: s_cbranch_execnz .LBB105_3
-; GCN3-NEXT: ; %bb.1: ; %Flow3
+; GCN3-NEXT: ; %bb.1: ; %Flow
; GCN3-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB105_6
+; GCN3-NEXT: s_cbranch_execnz .LBB105_4
; GCN3-NEXT: .LBB105_2: ; %atomicrmw.phi
; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN3-NEXT: s_setpc_b64 s[30:31]
; GCN3-NEXT: .LBB105_3: ; %atomicrmw.global
-; GCN3-NEXT: flat_load_dwordx2 v[6:7], v[0:1]
-; GCN3-NEXT: s_mov_b64 s[6:7], 0
-; GCN3-NEXT: .LBB105_4: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_cmp_gt_u64_e32 vcc, v[6:7], v[2:3]
-; GCN3-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
-; GCN3-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
-; GCN3-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN3-NEXT: flat_atomic_umax_x2 v[0:1], v[2:3]
; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GCN3-NEXT: v_mov_b32_e32 v7, v5
-; GCN3-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN3-NEXT: v_mov_b32_e32 v6, v4
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN3-NEXT: s_cbranch_execnz .LBB105_4
-; GCN3-NEXT: ; %bb.5: ; %Flow
-; GCN3-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN3-NEXT: ; implicit-def: $vgpr0_vgpr1
; GCN3-NEXT: ; implicit-def: $vgpr2_vgpr3
; GCN3-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GCN3-NEXT: s_cbranch_execz .LBB105_2
-; GCN3-NEXT: .LBB105_6: ; %atomicrmw.private
+; GCN3-NEXT: .LBB105_4: ; %atomicrmw.private
; GCN3-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
; GCN3-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc
; GCN3-NEXT: buffer_load_dword v0, v4, s[0:3], 0 offen
@@ -17771,40 +17177,21 @@ define i64 @flat_atomic_umax_i64_ret_offset__amdgpu_no_remote_memory(ptr %out, i
; GCN1-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN1-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN1-NEXT: s_cbranch_execnz .LBB106_3
-; GCN1-NEXT: ; %bb.1: ; %Flow3
+; GCN1-NEXT: ; %bb.1: ; %Flow
; GCN1-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB106_6
+; GCN1-NEXT: s_cbranch_execnz .LBB106_4
; GCN1-NEXT: .LBB106_2: ; %atomicrmw.phi
; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN1-NEXT: s_setpc_b64 s[30:31]
; GCN1-NEXT: .LBB106_3: ; %atomicrmw.global
-; GCN1-NEXT: v_add_i32_e32 v0, vcc, 4, v4
-; GCN1-NEXT: v_addc_u32_e32 v1, vcc, 0, v5, vcc
-; GCN1-NEXT: flat_load_dword v1, v[0:1]
-; GCN1-NEXT: flat_load_dword v0, v[4:5]
-; GCN1-NEXT: s_mov_b64 s[6:7], 0
-; GCN1-NEXT: .LBB106_4: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_mov_b32_e32 v9, v1
-; GCN1-NEXT: v_mov_b32_e32 v8, v0
-; GCN1-NEXT: v_cmp_gt_u64_e32 vcc, v[8:9], v[2:3]
-; GCN1-NEXT: v_cndmask_b32_e32 v7, v3, v9, vcc
-; GCN1-NEXT: v_cndmask_b32_e32 v6, v2, v8, vcc
-; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GCN1-NEXT: flat_atomic_umax_x2 v[0:1], v[4:5], v[2:3] glc
; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; GCN1-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN1-NEXT: s_cbranch_execnz .LBB106_4
-; GCN1-NEXT: ; %bb.5: ; %Flow
-; GCN1-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN1-NEXT: ; implicit-def: $vgpr4_vgpr5
; GCN1-NEXT: ; implicit-def: $vgpr2_vgpr3
; GCN1-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GCN1-NEXT: s_cbranch_execz .LBB106_2
-; GCN1-NEXT: .LBB106_6: ; %atomicrmw.private
+; GCN1-NEXT: .LBB106_4: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
; GCN1-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc
; GCN1-NEXT: v_add_i32_e32 v5, vcc, 4, v4
@@ -17833,40 +17220,21 @@ define i64 @flat_atomic_umax_i64_ret_offset__amdgpu_no_remote_memory(ptr %out, i
; GCN2-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN2-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN2-NEXT: s_cbranch_execnz .LBB106_3
-; GCN2-NEXT: ; %bb.1: ; %Flow3
+; GCN2-NEXT: ; %bb.1: ; %Flow
; GCN2-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB106_6
+; GCN2-NEXT: s_cbranch_execnz .LBB106_4
; GCN2-NEXT: .LBB106_2: ; %atomicrmw.phi
; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN2-NEXT: s_setpc_b64 s[30:31]
; GCN2-NEXT: .LBB106_3: ; %atomicrmw.global
-; GCN2-NEXT: v_add_u32_e32 v0, vcc, 4, v4
-; GCN2-NEXT: v_addc_u32_e32 v1, vcc, 0, v5, vcc
-; GCN2-NEXT: flat_load_dword v1, v[0:1]
-; GCN2-NEXT: flat_load_dword v0, v[4:5]
-; GCN2-NEXT: s_mov_b64 s[6:7], 0
-; GCN2-NEXT: .LBB106_4: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_mov_b32_e32 v9, v1
-; GCN2-NEXT: v_mov_b32_e32 v8, v0
-; GCN2-NEXT: v_cmp_gt_u64_e32 vcc, v[8:9], v[2:3]
-; GCN2-NEXT: v_cndmask_b32_e32 v7, v3, v9, vcc
-; GCN2-NEXT: v_cndmask_b32_e32 v6, v2, v8, vcc
-; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GCN2-NEXT: flat_atomic_umax_x2 v[0:1], v[4:5], v[2:3] glc
; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; GCN2-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN2-NEXT: s_cbranch_execnz .LBB106_4
-; GCN2-NEXT: ; %bb.5: ; %Flow
-; GCN2-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN2-NEXT: ; implicit-def: $vgpr4_vgpr5
; GCN2-NEXT: ; implicit-def: $vgpr2_vgpr3
; GCN2-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GCN2-NEXT: s_cbranch_execz .LBB106_2
-; GCN2-NEXT: .LBB106_6: ; %atomicrmw.private
+; GCN2-NEXT: .LBB106_4: ; %atomicrmw.private
; GCN2-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
; GCN2-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc
; GCN2-NEXT: v_add_u32_e32 v5, vcc, 4, v4
@@ -17893,37 +17261,21 @@ define i64 @flat_atomic_umax_i64_ret_offset__amdgpu_no_remote_memory(ptr %out, i
; GCN3-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN3-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN3-NEXT: s_cbranch_execnz .LBB106_3
-; GCN3-NEXT: ; %bb.1: ; %Flow3
+; GCN3-NEXT: ; %bb.1: ; %Flow
; GCN3-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB106_6
+; GCN3-NEXT: s_cbranch_execnz .LBB106_4
; GCN3-NEXT: .LBB106_2: ; %atomicrmw.phi
; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN3-NEXT: s_setpc_b64 s[30:31]
; GCN3-NEXT: .LBB106_3: ; %atomicrmw.global
-; GCN3-NEXT: flat_load_dwordx2 v[0:1], v[4:5]
-; GCN3-NEXT: s_mov_b64 s[6:7], 0
-; GCN3-NEXT: .LBB106_4: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v9, v1
-; GCN3-NEXT: v_mov_b32_e32 v8, v0
-; GCN3-NEXT: v_cmp_gt_u64_e32 vcc, v[8:9], v[2:3]
-; GCN3-NEXT: v_cndmask_b32_e32 v7, v3, v9, vcc
-; GCN3-NEXT: v_cndmask_b32_e32 v6, v2, v8, vcc
-; GCN3-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GCN3-NEXT: flat_atomic_umax_x2 v[0:1], v[4:5], v[2:3] glc
; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; GCN3-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN3-NEXT: s_cbranch_execnz .LBB106_4
-; GCN3-NEXT: ; %bb.5: ; %Flow
-; GCN3-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN3-NEXT: ; implicit-def: $vgpr4_vgpr5
; GCN3-NEXT: ; implicit-def: $vgpr2_vgpr3
; GCN3-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GCN3-NEXT: s_cbranch_execz .LBB106_2
-; GCN3-NEXT: .LBB106_6: ; %atomicrmw.private
+; GCN3-NEXT: .LBB106_4: ; %atomicrmw.private
; GCN3-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
; GCN3-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc
; GCN3-NEXT: buffer_load_dword v0, v4, s[0:3], 0 offen
@@ -19443,40 +18795,21 @@ define void @flat_atomic_umin_i64_noret_offset__amdgpu_no_remote_memory(ptr %out
; GCN1-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN1-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN1-NEXT: s_cbranch_execnz .LBB115_3
-; GCN1-NEXT: ; %bb.1: ; %Flow3
+; GCN1-NEXT: ; %bb.1: ; %Flow
; GCN1-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB115_6
+; GCN1-NEXT: s_cbranch_execnz .LBB115_4
; GCN1-NEXT: .LBB115_2: ; %atomicrmw.phi
; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN1-NEXT: s_setpc_b64 s[30:31]
; GCN1-NEXT: .LBB115_3: ; %atomicrmw.global
-; GCN1-NEXT: v_add_i32_e32 v4, vcc, 4, v0
-; GCN1-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GCN1-NEXT: flat_load_dword v7, v[4:5]
-; GCN1-NEXT: flat_load_dword v6, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[6:7], 0
-; GCN1-NEXT: .LBB115_4: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_cmp_le_u64_e32 vcc, v[6:7], v[2:3]
-; GCN1-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
-; GCN1-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
-; GCN1-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN1-NEXT: flat_atomic_umin_x2 v[0:1], v[2:3]
; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GCN1-NEXT: v_mov_b32_e32 v7, v5
-; GCN1-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN1-NEXT: v_mov_b32_e32 v6, v4
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN1-NEXT: s_cbranch_execnz .LBB115_4
-; GCN1-NEXT: ; %bb.5: ; %Flow
-; GCN1-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
; GCN1-NEXT: ; implicit-def: $vgpr2_vgpr3
; GCN1-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GCN1-NEXT: s_cbranch_execz .LBB115_2
-; GCN1-NEXT: .LBB115_6: ; %atomicrmw.private
+; GCN1-NEXT: .LBB115_4: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
; GCN1-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc
; GCN1-NEXT: v_add_i32_e32 v5, vcc, 4, v4
@@ -19504,40 +18837,21 @@ define void @flat_atomic_umin_i64_noret_offset__amdgpu_no_remote_memory(ptr %out
; GCN2-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN2-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN2-NEXT: s_cbranch_execnz .LBB115_3
-; GCN2-NEXT: ; %bb.1: ; %Flow3
+; GCN2-NEXT: ; %bb.1: ; %Flow
; GCN2-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB115_6
+; GCN2-NEXT: s_cbranch_execnz .LBB115_4
; GCN2-NEXT: .LBB115_2: ; %atomicrmw.phi
; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN2-NEXT: s_setpc_b64 s[30:31]
; GCN2-NEXT: .LBB115_3: ; %atomicrmw.global
-; GCN2-NEXT: v_add_u32_e32 v4, vcc, 4, v0
-; GCN2-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GCN2-NEXT: flat_load_dword v7, v[4:5]
-; GCN2-NEXT: flat_load_dword v6, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[6:7], 0
-; GCN2-NEXT: .LBB115_4: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_cmp_le_u64_e32 vcc, v[6:7], v[2:3]
-; GCN2-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
-; GCN2-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
-; GCN2-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN2-NEXT: flat_atomic_umin_x2 v[0:1], v[2:3]
; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GCN2-NEXT: v_mov_b32_e32 v7, v5
-; GCN2-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN2-NEXT: v_mov_b32_e32 v6, v4
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN2-NEXT: s_cbranch_execnz .LBB115_4
-; GCN2-NEXT: ; %bb.5: ; %Flow
-; GCN2-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
; GCN2-NEXT: ; implicit-def: $vgpr2_vgpr3
; GCN2-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GCN2-NEXT: s_cbranch_execz .LBB115_2
-; GCN2-NEXT: .LBB115_6: ; %atomicrmw.private
+; GCN2-NEXT: .LBB115_4: ; %atomicrmw.private
; GCN2-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
; GCN2-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc
; GCN2-NEXT: v_add_u32_e32 v5, vcc, 4, v4
@@ -19563,37 +18877,21 @@ define void @flat_atomic_umin_i64_noret_offset__amdgpu_no_remote_memory(ptr %out
; GCN3-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN3-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN3-NEXT: s_cbranch_execnz .LBB115_3
-; GCN3-NEXT: ; %bb.1: ; %Flow3
+; GCN3-NEXT: ; %bb.1: ; %Flow
; GCN3-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB115_6
+; GCN3-NEXT: s_cbranch_execnz .LBB115_4
; GCN3-NEXT: .LBB115_2: ; %atomicrmw.phi
; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN3-NEXT: s_setpc_b64 s[30:31]
; GCN3-NEXT: .LBB115_3: ; %atomicrmw.global
-; GCN3-NEXT: flat_load_dwordx2 v[6:7], v[0:1]
-; GCN3-NEXT: s_mov_b64 s[6:7], 0
-; GCN3-NEXT: .LBB115_4: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_cmp_le_u64_e32 vcc, v[6:7], v[2:3]
-; GCN3-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
-; GCN3-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
-; GCN3-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN3-NEXT: flat_atomic_umin_x2 v[0:1], v[2:3]
; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GCN3-NEXT: v_mov_b32_e32 v7, v5
-; GCN3-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN3-NEXT: v_mov_b32_e32 v6, v4
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN3-NEXT: s_cbranch_execnz .LBB115_4
-; GCN3-NEXT: ; %bb.5: ; %Flow
-; GCN3-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN3-NEXT: ; implicit-def: $vgpr0_vgpr1
; GCN3-NEXT: ; implicit-def: $vgpr2_vgpr3
; GCN3-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GCN3-NEXT: s_cbranch_execz .LBB115_2
-; GCN3-NEXT: .LBB115_6: ; %atomicrmw.private
+; GCN3-NEXT: .LBB115_4: ; %atomicrmw.private
; GCN3-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
; GCN3-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc
; GCN3-NEXT: buffer_load_dword v0, v4, s[0:3], 0 offen
@@ -19626,40 +18924,21 @@ define i64 @flat_atomic_umin_i64_ret_offset__amdgpu_no_remote_memory(ptr %out, i
; GCN1-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN1-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN1-NEXT: s_cbranch_execnz .LBB116_3
-; GCN1-NEXT: ; %bb.1: ; %Flow3
+; GCN1-NEXT: ; %bb.1: ; %Flow
; GCN1-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB116_6
+; GCN1-NEXT: s_cbranch_execnz .LBB116_4
; GCN1-NEXT: .LBB116_2: ; %atomicrmw.phi
; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN1-NEXT: s_setpc_b64 s[30:31]
; GCN1-NEXT: .LBB116_3: ; %atomicrmw.global
-; GCN1-NEXT: v_add_i32_e32 v0, vcc, 4, v4
-; GCN1-NEXT: v_addc_u32_e32 v1, vcc, 0, v5, vcc
-; GCN1-NEXT: flat_load_dword v1, v[0:1]
-; GCN1-NEXT: flat_load_dword v0, v[4:5]
-; GCN1-NEXT: s_mov_b64 s[6:7], 0
-; GCN1-NEXT: .LBB116_4: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_mov_b32_e32 v9, v1
-; GCN1-NEXT: v_mov_b32_e32 v8, v0
-; GCN1-NEXT: v_cmp_le_u64_e32 vcc, v[8:9], v[2:3]
-; GCN1-NEXT: v_cndmask_b32_e32 v7, v3, v9, vcc
-; GCN1-NEXT: v_cndmask_b32_e32 v6, v2, v8, vcc
-; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GCN1-NEXT: flat_atomic_umin_x2 v[0:1], v[4:5], v[2:3] glc
; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; GCN1-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN1-NEXT: s_cbranch_execnz .LBB116_4
-; GCN1-NEXT: ; %bb.5: ; %Flow
-; GCN1-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN1-NEXT: ; implicit-def: $vgpr4_vgpr5
; GCN1-NEXT: ; implicit-def: $vgpr2_vgpr3
; GCN1-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GCN1-NEXT: s_cbranch_execz .LBB116_2
-; GCN1-NEXT: .LBB116_6: ; %atomicrmw.private
+; GCN1-NEXT: .LBB116_4: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
; GCN1-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc
; GCN1-NEXT: v_add_i32_e32 v5, vcc, 4, v4
@@ -19688,40 +18967,21 @@ define i64 @flat_atomic_umin_i64_ret_offset__amdgpu_no_remote_memory(ptr %out, i
; GCN2-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN2-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN2-NEXT: s_cbranch_execnz .LBB116_3
-; GCN2-NEXT: ; %bb.1: ; %Flow3
+; GCN2-NEXT: ; %bb.1: ; %Flow
; GCN2-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB116_6
+; GCN2-NEXT: s_cbranch_execnz .LBB116_4
; GCN2-NEXT: .LBB116_2: ; %atomicrmw.phi
; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN2-NEXT: s_setpc_b64 s[30:31]
; GCN2-NEXT: .LBB116_3: ; %atomicrmw.global
-; GCN2-NEXT: v_add_u32_e32 v0, vcc, 4, v4
-; GCN2-NEXT: v_addc_u32_e32 v1, vcc, 0, v5, vcc
-; GCN2-NEXT: flat_load_dword v1, v[0:1]
-; GCN2-NEXT: flat_load_dword v0, v[4:5]
-; GCN2-NEXT: s_mov_b64 s[6:7], 0
-; GCN2-NEXT: .LBB116_4: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_mov_b32_e32 v9, v1
-; GCN2-NEXT: v_mov_b32_e32 v8, v0
-; GCN2-NEXT: v_cmp_le_u64_e32 vcc, v[8:9], v[2:3]
-; GCN2-NEXT: v_cndmask_b32_e32 v7, v3, v9, vcc
-; GCN2-NEXT: v_cndmask_b32_e32 v6, v2, v8, vcc
-; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GCN2-NEXT: flat_atomic_umin_x2 v[0:1], v[4:5], v[2:3] glc
; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; GCN2-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN2-NEXT: s_cbranch_execnz .LBB116_4
-; GCN2-NEXT: ; %bb.5: ; %Flow
-; GCN2-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN2-NEXT: ; implicit-def: $vgpr4_vgpr5
; GCN2-NEXT: ; implicit-def: $vgpr2_vgpr3
; GCN2-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GCN2-NEXT: s_cbranch_execz .LBB116_2
-; GCN2-NEXT: .LBB116_6: ; %atomicrmw.private
+; GCN2-NEXT: .LBB116_4: ; %atomicrmw.private
; GCN2-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
; GCN2-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc
; GCN2-NEXT: v_add_u32_e32 v5, vcc, 4, v4
@@ -19748,37 +19008,21 @@ define i64 @flat_atomic_umin_i64_ret_offset__amdgpu_no_remote_memory(ptr %out, i
; GCN3-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN3-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN3-NEXT: s_cbranch_execnz .LBB116_3
-; GCN3-NEXT: ; %bb.1: ; %Flow3
+; GCN3-NEXT: ; %bb.1: ; %Flow
; GCN3-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB116_6
+; GCN3-NEXT: s_cbranch_execnz .LBB116_4
; GCN3-NEXT: .LBB116_2: ; %atomicrmw.phi
; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN3-NEXT: s_setpc_b64 s[30:31]
; GCN3-NEXT: .LBB116_3: ; %atomicrmw.global
-; GCN3-NEXT: flat_load_dwordx2 v[0:1], v[4:5]
-; GCN3-NEXT: s_mov_b64 s[6:7], 0
-; GCN3-NEXT: .LBB116_4: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v9, v1
-; GCN3-NEXT: v_mov_b32_e32 v8, v0
-; GCN3-NEXT: v_cmp_le_u64_e32 vcc, v[8:9], v[2:3]
-; GCN3-NEXT: v_cndmask_b32_e32 v7, v3, v9, vcc
-; GCN3-NEXT: v_cndmask_b32_e32 v6, v2, v8, vcc
-; GCN3-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GCN3-NEXT: flat_atomic_umin_x2 v[0:1], v[4:5], v[2:3] glc
; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; GCN3-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN3-NEXT: s_cbranch_execnz .LBB116_4
-; GCN3-NEXT: ; %bb.5: ; %Flow
-; GCN3-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN3-NEXT: ; implicit-def: $vgpr4_vgpr5
; GCN3-NEXT: ; implicit-def: $vgpr2_vgpr3
; GCN3-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GCN3-NEXT: s_cbranch_execz .LBB116_2
-; GCN3-NEXT: .LBB116_6: ; %atomicrmw.private
+; GCN3-NEXT: .LBB116_4: ; %atomicrmw.private
; GCN3-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
; GCN3-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc
; GCN3-NEXT: buffer_load_dword v0, v4, s[0:3], 0 offen
@@ -22137,40 +21381,21 @@ define void @flat_atomic_min_i64_noret_offset__amdgpu_no_remote_memory(ptr %out,
; GCN1-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN1-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN1-NEXT: s_cbranch_execnz .LBB129_3
-; GCN1-NEXT: ; %bb.1: ; %Flow3
+; GCN1-NEXT: ; %bb.1: ; %Flow
; GCN1-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB129_6
+; GCN1-NEXT: s_cbranch_execnz .LBB129_4
; GCN1-NEXT: .LBB129_2: ; %atomicrmw.phi
; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN1-NEXT: s_setpc_b64 s[30:31]
; GCN1-NEXT: .LBB129_3: ; %atomicrmw.global
-; GCN1-NEXT: v_add_i32_e32 v4, vcc, 4, v0
-; GCN1-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GCN1-NEXT: flat_load_dword v7, v[4:5]
-; GCN1-NEXT: flat_load_dword v6, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[6:7], 0
-; GCN1-NEXT: .LBB129_4: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_cmp_le_i64_e32 vcc, v[6:7], v[2:3]
-; GCN1-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
-; GCN1-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
-; GCN1-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN1-NEXT: flat_atomic_smin_x2 v[0:1], v[2:3]
; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GCN1-NEXT: v_mov_b32_e32 v7, v5
-; GCN1-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN1-NEXT: v_mov_b32_e32 v6, v4
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN1-NEXT: s_cbranch_execnz .LBB129_4
-; GCN1-NEXT: ; %bb.5: ; %Flow
-; GCN1-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
; GCN1-NEXT: ; implicit-def: $vgpr2_vgpr3
; GCN1-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GCN1-NEXT: s_cbranch_execz .LBB129_2
-; GCN1-NEXT: .LBB129_6: ; %atomicrmw.private
+; GCN1-NEXT: .LBB129_4: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
; GCN1-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc
; GCN1-NEXT: v_add_i32_e32 v5, vcc, 4, v4
@@ -22198,40 +21423,21 @@ define void @flat_atomic_min_i64_noret_offset__amdgpu_no_remote_memory(ptr %out,
; GCN2-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN2-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN2-NEXT: s_cbranch_execnz .LBB129_3
-; GCN2-NEXT: ; %bb.1: ; %Flow3
+; GCN2-NEXT: ; %bb.1: ; %Flow
; GCN2-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB129_6
+; GCN2-NEXT: s_cbranch_execnz .LBB129_4
; GCN2-NEXT: .LBB129_2: ; %atomicrmw.phi
; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN2-NEXT: s_setpc_b64 s[30:31]
; GCN2-NEXT: .LBB129_3: ; %atomicrmw.global
-; GCN2-NEXT: v_add_u32_e32 v4, vcc, 4, v0
-; GCN2-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GCN2-NEXT: flat_load_dword v7, v[4:5]
-; GCN2-NEXT: flat_load_dword v6, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[6:7], 0
-; GCN2-NEXT: .LBB129_4: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_cmp_le_i64_e32 vcc, v[6:7], v[2:3]
-; GCN2-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
-; GCN2-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
-; GCN2-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN2-NEXT: flat_atomic_smin_x2 v[0:1], v[2:3]
; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GCN2-NEXT: v_mov_b32_e32 v7, v5
-; GCN2-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN2-NEXT: v_mov_b32_e32 v6, v4
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN2-NEXT: s_cbranch_execnz .LBB129_4
-; GCN2-NEXT: ; %bb.5: ; %Flow
-; GCN2-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
; GCN2-NEXT: ; implicit-def: $vgpr2_vgpr3
; GCN2-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GCN2-NEXT: s_cbranch_execz .LBB129_2
-; GCN2-NEXT: .LBB129_6: ; %atomicrmw.private
+; GCN2-NEXT: .LBB129_4: ; %atomicrmw.private
; GCN2-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
; GCN2-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc
; GCN2-NEXT: v_add_u32_e32 v5, vcc, 4, v4
@@ -22257,37 +21463,21 @@ define void @flat_atomic_min_i64_noret_offset__amdgpu_no_remote_memory(ptr %out,
; GCN3-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN3-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN3-NEXT: s_cbranch_execnz .LBB129_3
-; GCN3-NEXT: ; %bb.1: ; %Flow3
+; GCN3-NEXT: ; %bb.1: ; %Flow
; GCN3-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB129_6
+; GCN3-NEXT: s_cbranch_execnz .LBB129_4
; GCN3-NEXT: .LBB129_2: ; %atomicrmw.phi
; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN3-NEXT: s_setpc_b64 s[30:31]
; GCN3-NEXT: .LBB129_3: ; %atomicrmw.global
-; GCN3-NEXT: flat_load_dwordx2 v[6:7], v[0:1]
-; GCN3-NEXT: s_mov_b64 s[6:7], 0
-; GCN3-NEXT: .LBB129_4: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_cmp_le_i64_e32 vcc, v[6:7], v[2:3]
-; GCN3-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
-; GCN3-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
-; GCN3-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN3-NEXT: flat_atomic_smin_x2 v[0:1], v[2:3]
; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GCN3-NEXT: v_mov_b32_e32 v7, v5
-; GCN3-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN3-NEXT: v_mov_b32_e32 v6, v4
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN3-NEXT: s_cbranch_execnz .LBB129_4
-; GCN3-NEXT: ; %bb.5: ; %Flow
-; GCN3-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN3-NEXT: ; implicit-def: $vgpr0_vgpr1
; GCN3-NEXT: ; implicit-def: $vgpr2_vgpr3
; GCN3-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GCN3-NEXT: s_cbranch_execz .LBB129_2
-; GCN3-NEXT: .LBB129_6: ; %atomicrmw.private
+; GCN3-NEXT: .LBB129_4: ; %atomicrmw.private
; GCN3-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
; GCN3-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc
; GCN3-NEXT: buffer_load_dword v0, v4, s[0:3], 0 offen
@@ -22320,40 +21510,21 @@ define i64 @flat_atomic_min_i64_ret_offset__amdgpu_no_remote_memory(ptr %out, i6
; GCN1-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN1-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN1-NEXT: s_cbranch_execnz .LBB130_3
-; GCN1-NEXT: ; %bb.1: ; %Flow3
+; GCN1-NEXT: ; %bb.1: ; %Flow
; GCN1-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB130_6
+; GCN1-NEXT: s_cbranch_execnz .LBB130_4
; GCN1-NEXT: .LBB130_2: ; %atomicrmw.phi
; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN1-NEXT: s_setpc_b64 s[30:31]
; GCN1-NEXT: .LBB130_3: ; %atomicrmw.global
-; GCN1-NEXT: v_add_i32_e32 v0, vcc, 4, v4
-; GCN1-NEXT: v_addc_u32_e32 v1, vcc, 0, v5, vcc
-; GCN1-NEXT: flat_load_dword v1, v[0:1]
-; GCN1-NEXT: flat_load_dword v0, v[4:5]
-; GCN1-NEXT: s_mov_b64 s[6:7], 0
-; GCN1-NEXT: .LBB130_4: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_mov_b32_e32 v9, v1
-; GCN1-NEXT: v_mov_b32_e32 v8, v0
-; GCN1-NEXT: v_cmp_le_i64_e32 vcc, v[8:9], v[2:3]
-; GCN1-NEXT: v_cndmask_b32_e32 v7, v3, v9, vcc
-; GCN1-NEXT: v_cndmask_b32_e32 v6, v2, v8, vcc
-; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GCN1-NEXT: flat_atomic_smin_x2 v[0:1], v[4:5], v[2:3] glc
; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; GCN1-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN1-NEXT: s_cbranch_execnz .LBB130_4
-; GCN1-NEXT: ; %bb.5: ; %Flow
-; GCN1-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN1-NEXT: ; implicit-def: $vgpr4_vgpr5
; GCN1-NEXT: ; implicit-def: $vgpr2_vgpr3
; GCN1-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GCN1-NEXT: s_cbranch_execz .LBB130_2
-; GCN1-NEXT: .LBB130_6: ; %atomicrmw.private
+; GCN1-NEXT: .LBB130_4: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
; GCN1-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc
; GCN1-NEXT: v_add_i32_e32 v5, vcc, 4, v4
@@ -22382,40 +21553,21 @@ define i64 @flat_atomic_min_i64_ret_offset__amdgpu_no_remote_memory(ptr %out, i6
; GCN2-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN2-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN2-NEXT: s_cbranch_execnz .LBB130_3
-; GCN2-NEXT: ; %bb.1: ; %Flow3
+; GCN2-NEXT: ; %bb.1: ; %Flow
; GCN2-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB130_6
+; GCN2-NEXT: s_cbranch_execnz .LBB130_4
; GCN2-NEXT: .LBB130_2: ; %atomicrmw.phi
; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN2-NEXT: s_setpc_b64 s[30:31]
; GCN2-NEXT: .LBB130_3: ; %atomicrmw.global
-; GCN2-NEXT: v_add_u32_e32 v0, vcc, 4, v4
-; GCN2-NEXT: v_addc_u32_e32 v1, vcc, 0, v5, vcc
-; GCN2-NEXT: flat_load_dword v1, v[0:1]
-; GCN2-NEXT: flat_load_dword v0, v[4:5]
-; GCN2-NEXT: s_mov_b64 s[6:7], 0
-; GCN2-NEXT: .LBB130_4: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_mov_b32_e32 v9, v1
-; GCN2-NEXT: v_mov_b32_e32 v8, v0
-; GCN2-NEXT: v_cmp_le_i64_e32 vcc, v[8:9], v[2:3]
-; GCN2-NEXT: v_cndmask_b32_e32 v7, v3, v9, vcc
-; GCN2-NEXT: v_cndmask_b32_e32 v6, v2, v8, vcc
-; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GCN2-NEXT: flat_atomic_smin_x2 v[0:1], v[4:5], v[2:3] glc
; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; GCN2-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN2-NEXT: s_cbranch_execnz .LBB130_4
-; GCN2-NEXT: ; %bb.5: ; %Flow
-; GCN2-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN2-NEXT: ; implicit-def: $vgpr4_vgpr5
; GCN2-NEXT: ; implicit-def: $vgpr2_vgpr3
; GCN2-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GCN2-NEXT: s_cbranch_execz .LBB130_2
-; GCN2-NEXT: .LBB130_6: ; %atomicrmw.private
+; GCN2-NEXT: .LBB130_4: ; %atomicrmw.private
; GCN2-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
; GCN2-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc
; GCN2-NEXT: v_add_u32_e32 v5, vcc, 4, v4
@@ -22442,37 +21594,21 @@ define i64 @flat_atomic_min_i64_ret_offset__amdgpu_no_remote_memory(ptr %out, i6
; GCN3-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN3-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN3-NEXT: s_cbranch_execnz .LBB130_3
-; GCN3-NEXT: ; %bb.1: ; %Flow3
+; GCN3-NEXT: ; %bb.1: ; %Flow
; GCN3-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB130_6
+; GCN3-NEXT: s_cbranch_execnz .LBB130_4
; GCN3-NEXT: .LBB130_2: ; %atomicrmw.phi
; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN3-NEXT: s_setpc_b64 s[30:31]
; GCN3-NEXT: .LBB130_3: ; %atomicrmw.global
-; GCN3-NEXT: flat_load_dwordx2 v[0:1], v[4:5]
-; GCN3-NEXT: s_mov_b64 s[6:7], 0
-; GCN3-NEXT: .LBB130_4: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v9, v1
-; GCN3-NEXT: v_mov_b32_e32 v8, v0
-; GCN3-NEXT: v_cmp_le_i64_e32 vcc, v[8:9], v[2:3]
-; GCN3-NEXT: v_cndmask_b32_e32 v7, v3, v9, vcc
-; GCN3-NEXT: v_cndmask_b32_e32 v6, v2, v8, vcc
-; GCN3-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GCN3-NEXT: flat_atomic_smin_x2 v[0:1], v[4:5], v[2:3] glc
; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; GCN3-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN3-NEXT: s_cbranch_execnz .LBB130_4
-; GCN3-NEXT: ; %bb.5: ; %Flow
-; GCN3-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN3-NEXT: ; implicit-def: $vgpr4_vgpr5
; GCN3-NEXT: ; implicit-def: $vgpr2_vgpr3
; GCN3-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GCN3-NEXT: s_cbranch_execz .LBB130_2
-; GCN3-NEXT: .LBB130_6: ; %atomicrmw.private
+; GCN3-NEXT: .LBB130_4: ; %atomicrmw.private
; GCN3-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
; GCN3-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc
; GCN3-NEXT: buffer_load_dword v0, v4, s[0:3], 0 offen
@@ -24064,42 +23200,21 @@ define void @flat_atomic_uinc_wrap_i64_noret_offset__amdgpu_no_remote_memory(ptr
; GCN1-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN1-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN1-NEXT: s_cbranch_execnz .LBB139_3
-; GCN1-NEXT: ; %bb.1: ; %Flow3
+; GCN1-NEXT: ; %bb.1: ; %Flow
; GCN1-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB139_6
+; GCN1-NEXT: s_cbranch_execnz .LBB139_4
; GCN1-NEXT: .LBB139_2: ; %atomicrmw.phi
; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN1-NEXT: s_setpc_b64 s[30:31]
; GCN1-NEXT: .LBB139_3: ; %atomicrmw.global
-; GCN1-NEXT: v_add_i32_e32 v4, vcc, 4, v0
-; GCN1-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GCN1-NEXT: flat_load_dword v7, v[4:5]
-; GCN1-NEXT: flat_load_dword v6, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[6:7], 0
-; GCN1-NEXT: .LBB139_4: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_add_i32_e32 v4, vcc, 1, v6
-; GCN1-NEXT: v_addc_u32_e32 v5, vcc, 0, v7, vcc
-; GCN1-NEXT: v_cmp_lt_u64_e32 vcc, v[6:7], v[2:3]
-; GCN1-NEXT: v_cndmask_b32_e32 v5, 0, v5, vcc
-; GCN1-NEXT: v_cndmask_b32_e32 v4, 0, v4, vcc
-; GCN1-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN1-NEXT: flat_atomic_inc_x2 v[0:1], v[2:3]
; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GCN1-NEXT: v_mov_b32_e32 v7, v5
-; GCN1-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN1-NEXT: v_mov_b32_e32 v6, v4
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN1-NEXT: s_cbranch_execnz .LBB139_4
-; GCN1-NEXT: ; %bb.5: ; %Flow
-; GCN1-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
; GCN1-NEXT: ; implicit-def: $vgpr2_vgpr3
; GCN1-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GCN1-NEXT: s_cbranch_execz .LBB139_2
-; GCN1-NEXT: .LBB139_6: ; %atomicrmw.private
+; GCN1-NEXT: .LBB139_4: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
; GCN1-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc
; GCN1-NEXT: buffer_load_dword v0, v4, s[0:3], 0 offen
@@ -24130,42 +23245,21 @@ define void @flat_atomic_uinc_wrap_i64_noret_offset__amdgpu_no_remote_memory(ptr
; GCN2-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN2-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN2-NEXT: s_cbranch_execnz .LBB139_3
-; GCN2-NEXT: ; %bb.1: ; %Flow3
+; GCN2-NEXT: ; %bb.1: ; %Flow
; GCN2-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB139_6
+; GCN2-NEXT: s_cbranch_execnz .LBB139_4
; GCN2-NEXT: .LBB139_2: ; %atomicrmw.phi
; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN2-NEXT: s_setpc_b64 s[30:31]
; GCN2-NEXT: .LBB139_3: ; %atomicrmw.global
-; GCN2-NEXT: v_add_u32_e32 v4, vcc, 4, v0
-; GCN2-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GCN2-NEXT: flat_load_dword v7, v[4:5]
-; GCN2-NEXT: flat_load_dword v6, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[6:7], 0
-; GCN2-NEXT: .LBB139_4: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_add_u32_e32 v4, vcc, 1, v6
-; GCN2-NEXT: v_addc_u32_e32 v5, vcc, 0, v7, vcc
-; GCN2-NEXT: v_cmp_lt_u64_e32 vcc, v[6:7], v[2:3]
-; GCN2-NEXT: v_cndmask_b32_e32 v5, 0, v5, vcc
-; GCN2-NEXT: v_cndmask_b32_e32 v4, 0, v4, vcc
-; GCN2-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN2-NEXT: flat_atomic_inc_x2 v[0:1], v[2:3]
; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GCN2-NEXT: v_mov_b32_e32 v7, v5
-; GCN2-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN2-NEXT: v_mov_b32_e32 v6, v4
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN2-NEXT: s_cbranch_execnz .LBB139_4
-; GCN2-NEXT: ; %bb.5: ; %Flow
-; GCN2-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
; GCN2-NEXT: ; implicit-def: $vgpr2_vgpr3
; GCN2-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GCN2-NEXT: s_cbranch_execz .LBB139_2
-; GCN2-NEXT: .LBB139_6: ; %atomicrmw.private
+; GCN2-NEXT: .LBB139_4: ; %atomicrmw.private
; GCN2-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
; GCN2-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc
; GCN2-NEXT: buffer_load_dword v0, v4, s[0:3], 0 offen
@@ -24194,39 +23288,21 @@ define void @flat_atomic_uinc_wrap_i64_noret_offset__amdgpu_no_remote_memory(ptr
; GCN3-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN3-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN3-NEXT: s_cbranch_execnz .LBB139_3
-; GCN3-NEXT: ; %bb.1: ; %Flow3
+; GCN3-NEXT: ; %bb.1: ; %Flow
; GCN3-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB139_6
+; GCN3-NEXT: s_cbranch_execnz .LBB139_4
; GCN3-NEXT: .LBB139_2: ; %atomicrmw.phi
; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN3-NEXT: s_setpc_b64 s[30:31]
; GCN3-NEXT: .LBB139_3: ; %atomicrmw.global
-; GCN3-NEXT: flat_load_dwordx2 v[6:7], v[0:1]
-; GCN3-NEXT: s_mov_b64 s[6:7], 0
-; GCN3-NEXT: .LBB139_4: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_add_co_u32_e32 v4, vcc, 1, v6
-; GCN3-NEXT: v_addc_co_u32_e32 v5, vcc, 0, v7, vcc
-; GCN3-NEXT: v_cmp_lt_u64_e32 vcc, v[6:7], v[2:3]
-; GCN3-NEXT: v_cndmask_b32_e32 v5, 0, v5, vcc
-; GCN3-NEXT: v_cndmask_b32_e32 v4, 0, v4, vcc
-; GCN3-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN3-NEXT: flat_atomic_inc_x2 v[0:1], v[2:3]
; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GCN3-NEXT: v_mov_b32_e32 v7, v5
-; GCN3-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN3-NEXT: v_mov_b32_e32 v6, v4
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN3-NEXT: s_cbranch_execnz .LBB139_4
-; GCN3-NEXT: ; %bb.5: ; %Flow
-; GCN3-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN3-NEXT: ; implicit-def: $vgpr0_vgpr1
; GCN3-NEXT: ; implicit-def: $vgpr2_vgpr3
; GCN3-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GCN3-NEXT: s_cbranch_execz .LBB139_2
-; GCN3-NEXT: .LBB139_6: ; %atomicrmw.private
+; GCN3-NEXT: .LBB139_4: ; %atomicrmw.private
; GCN3-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
; GCN3-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc
; GCN3-NEXT: buffer_load_dword v0, v4, s[0:3], 0 offen
@@ -24262,42 +23338,21 @@ define i64 @flat_atomic_uinc_wrap_i64_ret_offset__amdgpu_no_remote_memory(ptr %o
; GCN1-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN1-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN1-NEXT: s_cbranch_execnz .LBB140_3
-; GCN1-NEXT: ; %bb.1: ; %Flow3
+; GCN1-NEXT: ; %bb.1: ; %Flow
; GCN1-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GCN1-NEXT: s_cbranch_execnz .LBB140_6
+; GCN1-NEXT: s_cbranch_execnz .LBB140_4
; GCN1-NEXT: .LBB140_2: ; %atomicrmw.phi
; GCN1-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN1-NEXT: s_setpc_b64 s[30:31]
; GCN1-NEXT: .LBB140_3: ; %atomicrmw.global
-; GCN1-NEXT: v_add_i32_e32 v0, vcc, 4, v4
-; GCN1-NEXT: v_addc_u32_e32 v1, vcc, 0, v5, vcc
-; GCN1-NEXT: flat_load_dword v1, v[0:1]
-; GCN1-NEXT: flat_load_dword v0, v[4:5]
-; GCN1-NEXT: s_mov_b64 s[6:7], 0
-; GCN1-NEXT: .LBB140_4: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_mov_b32_e32 v9, v1
-; GCN1-NEXT: v_mov_b32_e32 v8, v0
-; GCN1-NEXT: v_add_i32_e32 v0, vcc, 1, v8
-; GCN1-NEXT: v_addc_u32_e32 v1, vcc, 0, v9, vcc
-; GCN1-NEXT: v_cmp_lt_u64_e32 vcc, v[8:9], v[2:3]
-; GCN1-NEXT: v_cndmask_b32_e32 v7, 0, v1, vcc
-; GCN1-NEXT: v_cndmask_b32_e32 v6, 0, v0, vcc
-; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GCN1-NEXT: flat_atomic_inc_x2 v[0:1], v[4:5], v[2:3] glc
; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; GCN1-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN1-NEXT: s_cbranch_execnz .LBB140_4
-; GCN1-NEXT: ; %bb.5: ; %Flow
-; GCN1-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN1-NEXT: ; implicit-def: $vgpr4_vgpr5
; GCN1-NEXT: ; implicit-def: $vgpr2_vgpr3
; GCN1-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GCN1-NEXT: s_cbranch_execz .LBB140_2
-; GCN1-NEXT: .LBB140_6: ; %atomicrmw.private
+; GCN1-NEXT: .LBB140_4: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
; GCN1-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc
; GCN1-NEXT: buffer_load_dword v0, v4, s[0:3], 0 offen
@@ -24329,42 +23384,21 @@ define i64 @flat_atomic_uinc_wrap_i64_ret_offset__amdgpu_no_remote_memory(ptr %o
; GCN2-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN2-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN2-NEXT: s_cbranch_execnz .LBB140_3
-; GCN2-NEXT: ; %bb.1: ; %Flow3
+; GCN2-NEXT: ; %bb.1: ; %Flow
; GCN2-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GCN2-NEXT: s_cbranch_execnz .LBB140_6
+; GCN2-NEXT: s_cbranch_execnz .LBB140_4
; GCN2-NEXT: .LBB140_2: ; %atomicrmw.phi
; GCN2-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN2-NEXT: s_setpc_b64 s[30:31]
; GCN2-NEXT: .LBB140_3: ; %atomicrmw.global
-; GCN2-NEXT: v_add_u32_e32 v0, vcc, 4, v4
-; GCN2-NEXT: v_addc_u32_e32 v1, vcc, 0, v5, vcc
-; GCN2-NEXT: flat_load_dword v1, v[0:1]
-; GCN2-NEXT: flat_load_dword v0, v[4:5]
-; GCN2-NEXT: s_mov_b64 s[6:7], 0
-; GCN2-NEXT: .LBB140_4: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_mov_b32_e32 v9, v1
-; GCN2-NEXT: v_mov_b32_e32 v8, v0
-; GCN2-NEXT: v_add_u32_e32 v0, vcc, 1, v8
-; GCN2-NEXT: v_addc_u32_e32 v1, vcc, 0, v9, vcc
-; GCN2-NEXT: v_cmp_lt_u64_e32 vcc, v[8:9], v[2:3]
-; GCN2-NEXT: v_cndmask_b32_e32 v7, 0, v1, vcc
-; GCN2-NEXT: v_cndmask_b32_e32 v6, 0, v0, vcc
-; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GCN2-NEXT: flat_atomic_inc_x2 v[0:1], v[4:5], v[2:3] glc
; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; GCN2-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN2-NEXT: s_cbranch_execnz .LBB140_4
-; GCN2-NEXT: ; %bb.5: ; %Flow
-; GCN2-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN2-NEXT: ; implicit-def: $vgpr4_vgpr5
; GCN2-NEXT: ; implicit-def: $vgpr2_vgpr3
; GCN2-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GCN2-NEXT: s_cbranch_execz .LBB140_2
-; GCN2-NEXT: .LBB140_6: ; %atomicrmw.private
+; GCN2-NEXT: .LBB140_4: ; %atomicrmw.private
; GCN2-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
; GCN2-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc
; GCN2-NEXT: buffer_load_dword v0, v4, s[0:3], 0 offen
@@ -24394,39 +23428,21 @@ define i64 @flat_atomic_uinc_wrap_i64_ret_offset__amdgpu_no_remote_memory(ptr %o
; GCN3-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GCN3-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN3-NEXT: s_cbranch_execnz .LBB140_3
-; GCN3-NEXT: ; %bb.1: ; %Flow3
+; GCN3-NEXT: ; %bb.1: ; %Flow
; GCN3-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
-; GCN3-NEXT: s_cbranch_execnz .LBB140_6
+; GCN3-NEXT: s_cbranch_execnz .LBB140_4
; GCN3-NEXT: .LBB140_2: ; %atomicrmw.phi
; GCN3-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN3-NEXT: s_setpc_b64 s[30:31]
; GCN3-NEXT: .LBB140_3: ; %atomicrmw.global
-; GCN3-NEXT: flat_load_dwordx2 v[0:1], v[4:5]
-; GCN3-NEXT: s_mov_b64 s[6:7], 0
-; GCN3-NEXT: .LBB140_4: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v9, v1
-; GCN3-NEXT: v_mov_b32_e32 v8, v0
-; GCN3-NEXT: v_add_co_u32_e32 v0, vcc, 1, v8
-; GCN3-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v9, vcc
-; GCN3-NEXT: v_cmp_lt_u64_e32 vcc, v[8:9], v[2:3]
-; GCN3-NEXT: v_cndmask_b32_e32 v7, 0, v1, vcc
-; GCN3-NEXT: v_cndmask_b32_e32 v6, 0, v0, vcc
-; GCN3-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GCN3-NEXT: flat_atomic_inc_x2 v[0:1], v[4:5], v[2:3] glc
; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; GCN3-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GCN3-NEXT: s_cbranch_execnz .LBB140_4
-; GCN3-NEXT: ; %bb.5: ; %Flow
-; GCN3-NEXT: s_or_b64 exec, exec, s[6:7]
; GCN3-NEXT: ; implicit-def: $vgpr4_vgpr5
; GCN3-NEXT: ; implicit-def: $vgpr2_vgpr3
; GCN3-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GCN3-NEXT: s_cbranch_execz .LBB140_2
-; GCN3-NEXT: .LBB140_6: ; %atomicrmw.private
+; GCN3-NEXT: .LBB140_4: ; %atomicrmw.private
; GCN3-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
; GCN3-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc
; GCN3-NEXT: buffer_load_dword v0, v4, s[0:3], 0 offen
@@ -26151,46 +25167,23 @@ define void @flat_atomic_udec_wrap_i64_noret_offset__amdgpu_no_remote_memory(ptr
; GCN1-NEXT: s_waitcnt lgkmcnt(0)
; GCN1-NEXT: v_cmp_ne_u32_e32 vcc, s4, v1
; GCN1-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GCN1-NEXT: s_xor_b64 s[8:9], exec, s[4:5]
+; GCN1-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN1-NEXT: s_cbranch_execnz .LBB149_3
-; GCN1-NEXT: ; %bb.1: ; %Flow3
-; GCN1-NEXT: s_andn2_saveexec_b64 s[8:9], s[8:9]
-; GCN1-NEXT: s_cbranch_execnz .LBB149_6
+; GCN1-NEXT: ; %bb.1: ; %Flow
+; GCN1-NEXT: s_andn2_saveexec_b64 s[8:9], s[4:5]
+; GCN1-NEXT: s_cbranch_execnz .LBB149_4
; GCN1-NEXT: .LBB149_2: ; %atomicrmw.phi
; GCN1-NEXT: s_or_b64 exec, exec, s[8:9]
; GCN1-NEXT: s_setpc_b64 s[30:31]
; GCN1-NEXT: .LBB149_3: ; %atomicrmw.global
-; GCN1-NEXT: v_add_i32_e32 v4, vcc, 4, v0
-; GCN1-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GCN1-NEXT: flat_load_dword v7, v[4:5]
-; GCN1-NEXT: flat_load_dword v6, v[0:1]
-; GCN1-NEXT: s_mov_b64 s[10:11], 0
-; GCN1-NEXT: .LBB149_4: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GCN1-NEXT: v_cmp_gt_u64_e64 s[4:5], v[6:7], v[2:3]
-; GCN1-NEXT: v_add_i32_e64 v4, s[6:7], -1, v6
-; GCN1-NEXT: v_addc_u32_e64 v5, s[6:7], -1, v7, s[6:7]
-; GCN1-NEXT: s_or_b64 vcc, vcc, s[4:5]
-; GCN1-NEXT: v_cndmask_b32_e32 v5, v5, v3, vcc
-; GCN1-NEXT: v_cndmask_b32_e32 v4, v4, v2, vcc
-; GCN1-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN1-NEXT: flat_atomic_dec_x2 v[0:1], v[2:3]
; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GCN1-NEXT: v_mov_b32_e32 v7, v5
-; GCN1-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
-; GCN1-NEXT: v_mov_b32_e32 v6, v4
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[10:11]
-; GCN1-NEXT: s_cbranch_execnz .LBB149_4
-; GCN1-NEXT: ; %bb.5: ; %Flow
-; GCN1-NEXT: s_or_b64 exec, exec, s[10:11]
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
; GCN1-NEXT: ; implicit-def: $vgpr2_vgpr3
-; GCN1-NEXT: s_andn2_saveexec_b64 s[8:9], s[8:9]
+; GCN1-NEXT: s_andn2_saveexec_b64 s[8:9], s[4:5]
; GCN1-NEXT: s_cbranch_execz .LBB149_2
-; GCN1-NEXT: .LBB149_6: ; %atomicrmw.private
+; GCN1-NEXT: .LBB149_4: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
; GCN1-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc
; GCN1-NEXT: v_add_i32_e32 v5, vcc, 4, v4
@@ -26220,46 +25213,23 @@ define void @flat_atomic_udec_wrap_i64_noret_offset__amdgpu_no_remote_memory(ptr
; GCN2-NEXT: s_waitcnt lgkmcnt(0)
; GCN2-NEXT: v_cmp_ne_u32_e32 vcc, s4, v1
; GCN2-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GCN2-NEXT: s_xor_b64 s[8:9], exec, s[4:5]
+; GCN2-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN2-NEXT: s_cbranch_execnz .LBB149_3
-; GCN2-NEXT: ; %bb.1: ; %Flow3
-; GCN2-NEXT: s_andn2_saveexec_b64 s[8:9], s[8:9]
-; GCN2-NEXT: s_cbranch_execnz .LBB149_6
+; GCN2-NEXT: ; %bb.1: ; %Flow
+; GCN2-NEXT: s_andn2_saveexec_b64 s[8:9], s[4:5]
+; GCN2-NEXT: s_cbranch_execnz .LBB149_4
; GCN2-NEXT: .LBB149_2: ; %atomicrmw.phi
; GCN2-NEXT: s_or_b64 exec, exec, s[8:9]
; GCN2-NEXT: s_setpc_b64 s[30:31]
; GCN2-NEXT: .LBB149_3: ; %atomicrmw.global
-; GCN2-NEXT: v_add_u32_e32 v4, vcc, 4, v0
-; GCN2-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GCN2-NEXT: flat_load_dword v7, v[4:5]
-; GCN2-NEXT: flat_load_dword v6, v[0:1]
-; GCN2-NEXT: s_mov_b64 s[10:11], 0
-; GCN2-NEXT: .LBB149_4: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GCN2-NEXT: v_cmp_gt_u64_e64 s[4:5], v[6:7], v[2:3]
-; GCN2-NEXT: v_add_u32_e64 v4, s[6:7], -1, v6
-; GCN2-NEXT: v_addc_u32_e64 v5, s[6:7], -1, v7, s[6:7]
-; GCN2-NEXT: s_or_b64 vcc, vcc, s[4:5]
-; GCN2-NEXT: v_cndmask_b32_e32 v5, v5, v3, vcc
-; GCN2-NEXT: v_cndmask_b32_e32 v4, v4, v2, vcc
-; GCN2-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN2-NEXT: flat_atomic_dec_x2 v[0:1], v[2:3]
; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GCN2-NEXT: v_mov_b32_e32 v7, v5
-; GCN2-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
-; GCN2-NEXT: v_mov_b32_e32 v6, v4
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[10:11]
-; GCN2-NEXT: s_cbranch_execnz .LBB149_4
-; GCN2-NEXT: ; %bb.5: ; %Flow
-; GCN2-NEXT: s_or_b64 exec, exec, s[10:11]
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
; GCN2-NEXT: ; implicit-def: $vgpr2_vgpr3
-; GCN2-NEXT: s_andn2_saveexec_b64 s[8:9], s[8:9]
+; GCN2-NEXT: s_andn2_saveexec_b64 s[8:9], s[4:5]
; GCN2-NEXT: s_cbranch_execz .LBB149_2
-; GCN2-NEXT: .LBB149_6: ; %atomicrmw.private
+; GCN2-NEXT: .LBB149_4: ; %atomicrmw.private
; GCN2-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
; GCN2-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc
; GCN2-NEXT: v_add_u32_e32 v5, vcc, 4, v4
@@ -26287,43 +25257,23 @@ define void @flat_atomic_udec_wrap_i64_noret_offset__amdgpu_no_remote_memory(ptr
; GCN3-NEXT: s_mov_b64 s[4:5], src_private_base
; GCN3-NEXT: v_cmp_ne_u32_e32 vcc, s5, v1
; GCN3-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GCN3-NEXT: s_xor_b64 s[8:9], exec, s[4:5]
+; GCN3-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN3-NEXT: s_cbranch_execnz .LBB149_3
-; GCN3-NEXT: ; %bb.1: ; %Flow3
-; GCN3-NEXT: s_andn2_saveexec_b64 s[8:9], s[8:9]
-; GCN3-NEXT: s_cbranch_execnz .LBB149_6
+; GCN3-NEXT: ; %bb.1: ; %Flow
+; GCN3-NEXT: s_andn2_saveexec_b64 s[8:9], s[4:5]
+; GCN3-NEXT: s_cbranch_execnz .LBB149_4
; GCN3-NEXT: .LBB149_2: ; %atomicrmw.phi
; GCN3-NEXT: s_or_b64 exec, exec, s[8:9]
; GCN3-NEXT: s_setpc_b64 s[30:31]
; GCN3-NEXT: .LBB149_3: ; %atomicrmw.global
-; GCN3-NEXT: flat_load_dwordx2 v[6:7], v[0:1]
-; GCN3-NEXT: s_mov_b64 s[10:11], 0
-; GCN3-NEXT: .LBB149_4: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GCN3-NEXT: v_cmp_gt_u64_e64 s[4:5], v[6:7], v[2:3]
-; GCN3-NEXT: v_add_co_u32_e64 v4, s[6:7], -1, v6
-; GCN3-NEXT: v_addc_co_u32_e64 v5, s[6:7], -1, v7, s[6:7]
-; GCN3-NEXT: s_or_b64 vcc, vcc, s[4:5]
-; GCN3-NEXT: v_cndmask_b32_e32 v5, v5, v3, vcc
-; GCN3-NEXT: v_cndmask_b32_e32 v4, v4, v2, vcc
-; GCN3-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; GCN3-NEXT: flat_atomic_dec_x2 v[0:1], v[2:3]
; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GCN3-NEXT: v_mov_b32_e32 v7, v5
-; GCN3-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
-; GCN3-NEXT: v_mov_b32_e32 v6, v4
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[10:11]
-; GCN3-NEXT: s_cbranch_execnz .LBB149_4
-; GCN3-NEXT: ; %bb.5: ; %Flow
-; GCN3-NEXT: s_or_b64 exec, exec, s[10:11]
; GCN3-NEXT: ; implicit-def: $vgpr0_vgpr1
; GCN3-NEXT: ; implicit-def: $vgpr2_vgpr3
-; GCN3-NEXT: s_andn2_saveexec_b64 s[8:9], s[8:9]
+; GCN3-NEXT: s_andn2_saveexec_b64 s[8:9], s[4:5]
; GCN3-NEXT: s_cbranch_execz .LBB149_2
-; GCN3-NEXT: .LBB149_6: ; %atomicrmw.private
+; GCN3-NEXT: .LBB149_4: ; %atomicrmw.private
; GCN3-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
; GCN3-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc
; GCN3-NEXT: buffer_load_dword v0, v4, s[0:3], 0 offen
@@ -26358,46 +25308,23 @@ define i64 @flat_atomic_udec_wrap_i64_ret_offset__amdgpu_no_remote_memory(ptr %o
; GCN1-NEXT: v_cmp_ne_u32_e32 vcc, s4, v5
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
; GCN1-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GCN1-NEXT: s_xor_b64 s[8:9], exec, s[4:5]
+; GCN1-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN1-NEXT: s_cbranch_execnz .LBB150_3
-; GCN1-NEXT: ; %bb.1: ; %Flow3
-; GCN1-NEXT: s_andn2_saveexec_b64 s[8:9], s[8:9]
-; GCN1-NEXT: s_cbranch_execnz .LBB150_6
+; GCN1-NEXT: ; %bb.1: ; %Flow
+; GCN1-NEXT: s_andn2_saveexec_b64 s[8:9], s[4:5]
+; GCN1-NEXT: s_cbranch_execnz .LBB150_4
; GCN1-NEXT: .LBB150_2: ; %atomicrmw.phi
; GCN1-NEXT: s_or_b64 exec, exec, s[8:9]
; GCN1-NEXT: s_setpc_b64 s[30:31]
; GCN1-NEXT: .LBB150_3: ; %atomicrmw.global
-; GCN1-NEXT: v_add_i32_e32 v0, vcc, 4, v4
-; GCN1-NEXT: v_addc_u32_e32 v1, vcc, 0, v5, vcc
-; GCN1-NEXT: flat_load_dword v1, v[0:1]
-; GCN1-NEXT: flat_load_dword v0, v[4:5]
-; GCN1-NEXT: s_mov_b64 s[10:11], 0
-; GCN1-NEXT: .LBB150_4: ; %atomicrmw.start
-; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN1-NEXT: v_mov_b32_e32 v9, v1
-; GCN1-NEXT: v_mov_b32_e32 v8, v0
-; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[8:9]
-; GCN1-NEXT: v_cmp_gt_u64_e64 s[4:5], v[8:9], v[2:3]
-; GCN1-NEXT: v_add_i32_e64 v0, s[6:7], -1, v8
-; GCN1-NEXT: v_addc_u32_e64 v1, s[6:7], -1, v9, s[6:7]
-; GCN1-NEXT: s_or_b64 vcc, vcc, s[4:5]
-; GCN1-NEXT: v_cndmask_b32_e32 v7, v1, v3, vcc
-; GCN1-NEXT: v_cndmask_b32_e32 v6, v0, v2, vcc
-; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GCN1-NEXT: flat_atomic_dec_x2 v[0:1], v[4:5], v[2:3] glc
; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN1-NEXT: buffer_wbinvl1_vol
-; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; GCN1-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
-; GCN1-NEXT: s_andn2_b64 exec, exec, s[10:11]
-; GCN1-NEXT: s_cbranch_execnz .LBB150_4
-; GCN1-NEXT: ; %bb.5: ; %Flow
-; GCN1-NEXT: s_or_b64 exec, exec, s[10:11]
; GCN1-NEXT: ; implicit-def: $vgpr4_vgpr5
; GCN1-NEXT: ; implicit-def: $vgpr2_vgpr3
-; GCN1-NEXT: s_andn2_saveexec_b64 s[8:9], s[8:9]
+; GCN1-NEXT: s_andn2_saveexec_b64 s[8:9], s[4:5]
; GCN1-NEXT: s_cbranch_execz .LBB150_2
-; GCN1-NEXT: .LBB150_6: ; %atomicrmw.private
+; GCN1-NEXT: .LBB150_4: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
; GCN1-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc
; GCN1-NEXT: v_add_i32_e32 v5, vcc, 4, v4
@@ -26429,46 +25356,23 @@ define i64 @flat_atomic_udec_wrap_i64_ret_offset__amdgpu_no_remote_memory(ptr %o
; GCN2-NEXT: v_cmp_ne_u32_e32 vcc, s4, v5
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
; GCN2-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GCN2-NEXT: s_xor_b64 s[8:9], exec, s[4:5]
+; GCN2-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN2-NEXT: s_cbranch_execnz .LBB150_3
-; GCN2-NEXT: ; %bb.1: ; %Flow3
-; GCN2-NEXT: s_andn2_saveexec_b64 s[8:9], s[8:9]
-; GCN2-NEXT: s_cbranch_execnz .LBB150_6
+; GCN2-NEXT: ; %bb.1: ; %Flow
+; GCN2-NEXT: s_andn2_saveexec_b64 s[8:9], s[4:5]
+; GCN2-NEXT: s_cbranch_execnz .LBB150_4
; GCN2-NEXT: .LBB150_2: ; %atomicrmw.phi
; GCN2-NEXT: s_or_b64 exec, exec, s[8:9]
; GCN2-NEXT: s_setpc_b64 s[30:31]
; GCN2-NEXT: .LBB150_3: ; %atomicrmw.global
-; GCN2-NEXT: v_add_u32_e32 v0, vcc, 4, v4
-; GCN2-NEXT: v_addc_u32_e32 v1, vcc, 0, v5, vcc
-; GCN2-NEXT: flat_load_dword v1, v[0:1]
-; GCN2-NEXT: flat_load_dword v0, v[4:5]
-; GCN2-NEXT: s_mov_b64 s[10:11], 0
-; GCN2-NEXT: .LBB150_4: ; %atomicrmw.start
-; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN2-NEXT: v_mov_b32_e32 v9, v1
-; GCN2-NEXT: v_mov_b32_e32 v8, v0
-; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[8:9]
-; GCN2-NEXT: v_cmp_gt_u64_e64 s[4:5], v[8:9], v[2:3]
-; GCN2-NEXT: v_add_u32_e64 v0, s[6:7], -1, v8
-; GCN2-NEXT: v_addc_u32_e64 v1, s[6:7], -1, v9, s[6:7]
-; GCN2-NEXT: s_or_b64 vcc, vcc, s[4:5]
-; GCN2-NEXT: v_cndmask_b32_e32 v7, v1, v3, vcc
-; GCN2-NEXT: v_cndmask_b32_e32 v6, v0, v2, vcc
-; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GCN2-NEXT: flat_atomic_dec_x2 v[0:1], v[4:5], v[2:3] glc
; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN2-NEXT: buffer_wbinvl1_vol
-; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; GCN2-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
-; GCN2-NEXT: s_andn2_b64 exec, exec, s[10:11]
-; GCN2-NEXT: s_cbranch_execnz .LBB150_4
-; GCN2-NEXT: ; %bb.5: ; %Flow
-; GCN2-NEXT: s_or_b64 exec, exec, s[10:11]
; GCN2-NEXT: ; implicit-def: $vgpr4_vgpr5
; GCN2-NEXT: ; implicit-def: $vgpr2_vgpr3
-; GCN2-NEXT: s_andn2_saveexec_b64 s[8:9], s[8:9]
+; GCN2-NEXT: s_andn2_saveexec_b64 s[8:9], s[4:5]
; GCN2-NEXT: s_cbranch_execz .LBB150_2
-; GCN2-NEXT: .LBB150_6: ; %atomicrmw.private
+; GCN2-NEXT: .LBB150_4: ; %atomicrmw.private
; GCN2-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
; GCN2-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc
; GCN2-NEXT: v_add_u32_e32 v5, vcc, 4, v4
@@ -26498,43 +25402,23 @@ define i64 @flat_atomic_udec_wrap_i64_ret_offset__amdgpu_no_remote_memory(ptr %o
; GCN3-NEXT: v_cmp_ne_u32_e32 vcc, s5, v5
; GCN3-NEXT: ; implicit-def: $vgpr0_vgpr1
; GCN3-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GCN3-NEXT: s_xor_b64 s[8:9], exec, s[4:5]
+; GCN3-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GCN3-NEXT: s_cbranch_execnz .LBB150_3
-; GCN3-NEXT: ; %bb.1: ; %Flow3
-; GCN3-NEXT: s_andn2_saveexec_b64 s[8:9], s[8:9]
-; GCN3-NEXT: s_cbranch_execnz .LBB150_6
+; GCN3-NEXT: ; %bb.1: ; %Flow
+; GCN3-NEXT: s_andn2_saveexec_b64 s[8:9], s[4:5]
+; GCN3-NEXT: s_cbranch_execnz .LBB150_4
; GCN3-NEXT: .LBB150_2: ; %atomicrmw.phi
; GCN3-NEXT: s_or_b64 exec, exec, s[8:9]
; GCN3-NEXT: s_setpc_b64 s[30:31]
; GCN3-NEXT: .LBB150_3: ; %atomicrmw.global
-; GCN3-NEXT: flat_load_dwordx2 v[0:1], v[4:5]
-; GCN3-NEXT: s_mov_b64 s[10:11], 0
-; GCN3-NEXT: .LBB150_4: ; %atomicrmw.start
-; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GCN3-NEXT: v_mov_b32_e32 v9, v1
-; GCN3-NEXT: v_mov_b32_e32 v8, v0
-; GCN3-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[8:9]
-; GCN3-NEXT: v_cmp_gt_u64_e64 s[4:5], v[8:9], v[2:3]
-; GCN3-NEXT: v_add_co_u32_e64 v0, s[6:7], -1, v8
-; GCN3-NEXT: v_addc_co_u32_e64 v1, s[6:7], -1, v9, s[6:7]
-; GCN3-NEXT: s_or_b64 vcc, vcc, s[4:5]
-; GCN3-NEXT: v_cndmask_b32_e32 v7, v1, v3, vcc
-; GCN3-NEXT: v_cndmask_b32_e32 v6, v0, v2, vcc
-; GCN3-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GCN3-NEXT: flat_atomic_dec_x2 v[0:1], v[4:5], v[2:3] glc
; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN3-NEXT: buffer_wbinvl1_vol
-; GCN3-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; GCN3-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
-; GCN3-NEXT: s_andn2_b64 exec, exec, s[10:11]
-; GCN3-NEXT: s_cbranch_execnz .LBB150_4
-; GCN3-NEXT: ; %bb.5: ; %Flow
-; GCN3-NEXT: s_or_b64 exec, exec, s[10:11]
; GCN3-NEXT: ; implicit-def: $vgpr4_vgpr5
; GCN3-NEXT: ; implicit-def: $vgpr2_vgpr3
-; GCN3-NEXT: s_andn2_saveexec_b64 s[8:9], s[8:9]
+; GCN3-NEXT: s_andn2_saveexec_b64 s[8:9], s[4:5]
; GCN3-NEXT: s_cbranch_execz .LBB150_2
-; GCN3-NEXT: .LBB150_6: ; %atomicrmw.private
+; GCN3-NEXT: .LBB150_4: ; %atomicrmw.private
; GCN3-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
; GCN3-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc
; GCN3-NEXT: buffer_load_dword v0, v4, s[0:3], 0 offen
diff --git a/llvm/test/CodeGen/AMDGPU/flat_atomics_i64_system_noprivate.ll b/llvm/test/CodeGen/AMDGPU/flat_atomics_i64_system_noprivate.ll
index 36c4c381d1b3a..4dea4495b36fb 100644
--- a/llvm/test/CodeGen/AMDGPU/flat_atomics_i64_system_noprivate.ll
+++ b/llvm/test/CodeGen/AMDGPU/flat_atomics_i64_system_noprivate.ll
@@ -1825,80 +1825,29 @@ define void @flat_atomic_sub_i64_noret_offset__amdgpu_no_remote_memory(ptr %out,
; GFX7-LABEL: flat_atomic_sub_i64_noret_offset__amdgpu_no_remote_memory:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_add_i32_e32 v8, vcc, 32, v0
-; GFX7-NEXT: v_addc_u32_e32 v9, vcc, 0, v1, vcc
-; GFX7-NEXT: v_add_i32_e32 v0, vcc, 36, v0
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, 32, v0
; GFX7-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GFX7-NEXT: flat_load_dword v7, v[0:1]
-; GFX7-NEXT: flat_load_dword v6, v[8:9]
-; GFX7-NEXT: s_mov_b64 s[4:5], 0
-; GFX7-NEXT: .LBB38_1: ; %atomicrmw.start
-; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_sub_i32_e32 v4, vcc, v6, v2
-; GFX7-NEXT: v_subb_u32_e32 v5, vcc, v7, v3, vcc
-; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[8:9], v[4:7] glc
+; GFX7-NEXT: flat_atomic_sub_x2 v[0:1], v[2:3]
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
-; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[6:7]
-; GFX7-NEXT: v_mov_b32_e32 v7, v1
-; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v6, v0
-; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX7-NEXT: s_cbranch_execnz .LBB38_1
-; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX7-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: flat_atomic_sub_i64_noret_offset__amdgpu_no_remote_memory:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_add_u32_e32 v8, vcc, 32, v0
-; GFX8-NEXT: v_addc_u32_e32 v9, vcc, 0, v1, vcc
-; GFX8-NEXT: v_add_u32_e32 v0, vcc, 36, v0
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, 32, v0
; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GFX8-NEXT: flat_load_dword v7, v[0:1]
-; GFX8-NEXT: flat_load_dword v6, v[8:9]
-; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: .LBB38_1: ; %atomicrmw.start
-; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_sub_u32_e32 v4, vcc, v6, v2
-; GFX8-NEXT: v_subb_u32_e32 v5, vcc, v7, v3, vcc
-; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[8:9], v[4:7] glc
+; GFX8-NEXT: flat_atomic_sub_x2 v[0:1], v[2:3]
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
-; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[6:7]
-; GFX8-NEXT: v_mov_b32_e32 v7, v1
-; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v6, v0
-; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX8-NEXT: s_cbranch_execnz .LBB38_1
-; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX8-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: flat_atomic_sub_i64_noret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: flat_load_dwordx2 v[6:7], v[0:1] offset:32
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: .LBB38_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_sub_co_u32_e32 v4, vcc, v6, v2
-; GFX9-NEXT: v_subb_co_u32_e32 v5, vcc, v7, v3, vcc
-; GFX9-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] offset:32 glc
+; GFX9-NEXT: flat_atomic_sub_x2 v[0:1], v[2:3] offset:32
; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GFX9-NEXT: v_mov_b32_e32 v7, v5
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v6, v4
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB38_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i64, ptr %out, i64 4
%tmp0 = atomicrmw sub ptr %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0, !noalias.addrspace !1
@@ -1909,82 +1858,29 @@ define i64 @flat_atomic_sub_i64_ret_offset__amdgpu_no_remote_memory(ptr %out, i6
; GFX7-LABEL: flat_atomic_sub_i64_ret_offset__amdgpu_no_remote_memory:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_add_i32_e32 v4, vcc, 32, v0
-; GFX7-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GFX7-NEXT: v_add_i32_e32 v0, vcc, 36, v0
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, 32, v0
; GFX7-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GFX7-NEXT: flat_load_dword v1, v[0:1]
-; GFX7-NEXT: flat_load_dword v0, v[4:5]
-; GFX7-NEXT: s_mov_b64 s[4:5], 0
-; GFX7-NEXT: .LBB39_1: ; %atomicrmw.start
-; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v9, v1
-; GFX7-NEXT: v_mov_b32_e32 v8, v0
-; GFX7-NEXT: v_sub_i32_e32 v6, vcc, v8, v2
-; GFX7-NEXT: v_subb_u32_e32 v7, vcc, v9, v3, vcc
-; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GFX7-NEXT: flat_atomic_sub_x2 v[0:1], v[0:1], v[2:3] glc
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
-; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX7-NEXT: s_cbranch_execnz .LBB39_1
-; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX7-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: flat_atomic_sub_i64_ret_offset__amdgpu_no_remote_memory:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_add_u32_e32 v4, vcc, 32, v0
-; GFX8-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GFX8-NEXT: v_add_u32_e32 v0, vcc, 36, v0
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, 32, v0
; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GFX8-NEXT: flat_load_dword v1, v[0:1]
-; GFX8-NEXT: flat_load_dword v0, v[4:5]
-; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: .LBB39_1: ; %atomicrmw.start
-; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v9, v1
-; GFX8-NEXT: v_mov_b32_e32 v8, v0
-; GFX8-NEXT: v_sub_u32_e32 v6, vcc, v8, v2
-; GFX8-NEXT: v_subb_u32_e32 v7, vcc, v9, v3, vcc
-; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GFX8-NEXT: flat_atomic_sub_x2 v[0:1], v[0:1], v[2:3] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
-; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX8-NEXT: s_cbranch_execnz .LBB39_1
-; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX8-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: flat_atomic_sub_i64_ret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: flat_load_dwordx2 v[4:5], v[0:1] offset:32
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: .LBB39_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v7, v5
-; GFX9-NEXT: v_mov_b32_e32 v6, v4
-; GFX9-NEXT: v_sub_co_u32_e32 v4, vcc, v6, v2
-; GFX9-NEXT: v_subb_co_u32_e32 v5, vcc, v7, v3, vcc
-; GFX9-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] offset:32 glc
+; GFX9-NEXT: flat_atomic_sub_x2 v[0:1], v[0:1], v[2:3] offset:32 glc
; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB39_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v0, v4
-; GFX9-NEXT: v_mov_b32_e32 v1, v5
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i64, ptr %out, i64 4
%result = atomicrmw sub ptr %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0, !noalias.addrspace !1
@@ -2715,80 +2611,29 @@ define void @flat_atomic_and_i64_noret_offset__amdgpu_no_remote_memory(ptr %out,
; GFX7-LABEL: flat_atomic_and_i64_noret_offset__amdgpu_no_remote_memory:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_add_i32_e32 v8, vcc, 32, v0
-; GFX7-NEXT: v_addc_u32_e32 v9, vcc, 0, v1, vcc
-; GFX7-NEXT: v_add_i32_e32 v0, vcc, 36, v0
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, 32, v0
; GFX7-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GFX7-NEXT: flat_load_dword v7, v[0:1]
-; GFX7-NEXT: flat_load_dword v6, v[8:9]
-; GFX7-NEXT: s_mov_b64 s[4:5], 0
-; GFX7-NEXT: .LBB48_1: ; %atomicrmw.start
-; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_and_b32_e32 v5, v7, v3
-; GFX7-NEXT: v_and_b32_e32 v4, v6, v2
-; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[8:9], v[4:7] glc
+; GFX7-NEXT: flat_atomic_and_x2 v[0:1], v[2:3]
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
-; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[6:7]
-; GFX7-NEXT: v_mov_b32_e32 v7, v1
-; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v6, v0
-; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX7-NEXT: s_cbranch_execnz .LBB48_1
-; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX7-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: flat_atomic_and_i64_noret_offset__amdgpu_no_remote_memory:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_add_u32_e32 v8, vcc, 32, v0
-; GFX8-NEXT: v_addc_u32_e32 v9, vcc, 0, v1, vcc
-; GFX8-NEXT: v_add_u32_e32 v0, vcc, 36, v0
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, 32, v0
; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GFX8-NEXT: flat_load_dword v7, v[0:1]
-; GFX8-NEXT: flat_load_dword v6, v[8:9]
-; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: .LBB48_1: ; %atomicrmw.start
-; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_and_b32_e32 v5, v7, v3
-; GFX8-NEXT: v_and_b32_e32 v4, v6, v2
-; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[8:9], v[4:7] glc
+; GFX8-NEXT: flat_atomic_and_x2 v[0:1], v[2:3]
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
-; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[6:7]
-; GFX8-NEXT: v_mov_b32_e32 v7, v1
-; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v6, v0
-; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX8-NEXT: s_cbranch_execnz .LBB48_1
-; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX8-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: flat_atomic_and_i64_noret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: flat_load_dwordx2 v[6:7], v[0:1] offset:32
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: .LBB48_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_and_b32_e32 v5, v7, v3
-; GFX9-NEXT: v_and_b32_e32 v4, v6, v2
-; GFX9-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] offset:32 glc
+; GFX9-NEXT: flat_atomic_and_x2 v[0:1], v[2:3] offset:32
; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GFX9-NEXT: v_mov_b32_e32 v7, v5
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v6, v4
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB48_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i64, ptr %out, i64 4
%tmp0 = atomicrmw and ptr %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0, !noalias.addrspace !1
@@ -2799,82 +2644,29 @@ define i64 @flat_atomic_and_i64_ret_offset__amdgpu_no_remote_memory(ptr %out, i6
; GFX7-LABEL: flat_atomic_and_i64_ret_offset__amdgpu_no_remote_memory:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_add_i32_e32 v4, vcc, 32, v0
-; GFX7-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GFX7-NEXT: v_add_i32_e32 v0, vcc, 36, v0
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, 32, v0
; GFX7-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GFX7-NEXT: flat_load_dword v1, v[0:1]
-; GFX7-NEXT: flat_load_dword v0, v[4:5]
-; GFX7-NEXT: s_mov_b64 s[4:5], 0
-; GFX7-NEXT: .LBB49_1: ; %atomicrmw.start
-; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v9, v1
-; GFX7-NEXT: v_mov_b32_e32 v8, v0
-; GFX7-NEXT: v_and_b32_e32 v7, v9, v3
-; GFX7-NEXT: v_and_b32_e32 v6, v8, v2
-; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GFX7-NEXT: flat_atomic_and_x2 v[0:1], v[0:1], v[2:3] glc
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
-; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX7-NEXT: s_cbranch_execnz .LBB49_1
-; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX7-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: flat_atomic_and_i64_ret_offset__amdgpu_no_remote_memory:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_add_u32_e32 v4, vcc, 32, v0
-; GFX8-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GFX8-NEXT: v_add_u32_e32 v0, vcc, 36, v0
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, 32, v0
; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GFX8-NEXT: flat_load_dword v1, v[0:1]
-; GFX8-NEXT: flat_load_dword v0, v[4:5]
-; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: .LBB49_1: ; %atomicrmw.start
-; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v9, v1
-; GFX8-NEXT: v_mov_b32_e32 v8, v0
-; GFX8-NEXT: v_and_b32_e32 v7, v9, v3
-; GFX8-NEXT: v_and_b32_e32 v6, v8, v2
-; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GFX8-NEXT: flat_atomic_and_x2 v[0:1], v[0:1], v[2:3] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
-; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX8-NEXT: s_cbranch_execnz .LBB49_1
-; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX8-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: flat_atomic_and_i64_ret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: flat_load_dwordx2 v[4:5], v[0:1] offset:32
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: .LBB49_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v7, v5
-; GFX9-NEXT: v_mov_b32_e32 v6, v4
-; GFX9-NEXT: v_and_b32_e32 v5, v7, v3
-; GFX9-NEXT: v_and_b32_e32 v4, v6, v2
-; GFX9-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] offset:32 glc
+; GFX9-NEXT: flat_atomic_and_x2 v[0:1], v[0:1], v[2:3] offset:32 glc
; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB49_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v0, v4
-; GFX9-NEXT: v_mov_b32_e32 v1, v5
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i64, ptr %out, i64 4
%result = atomicrmw and ptr %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0, !noalias.addrspace !1
@@ -4555,80 +4347,29 @@ define void @flat_atomic_or_i64_noret_offset__amdgpu_no_remote_memory(ptr %out,
; GFX7-LABEL: flat_atomic_or_i64_noret_offset__amdgpu_no_remote_memory:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_add_i32_e32 v8, vcc, 32, v0
-; GFX7-NEXT: v_addc_u32_e32 v9, vcc, 0, v1, vcc
-; GFX7-NEXT: v_add_i32_e32 v0, vcc, 36, v0
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, 32, v0
; GFX7-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GFX7-NEXT: flat_load_dword v7, v[0:1]
-; GFX7-NEXT: flat_load_dword v6, v[8:9]
-; GFX7-NEXT: s_mov_b64 s[4:5], 0
-; GFX7-NEXT: .LBB68_1: ; %atomicrmw.start
-; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_or_b32_e32 v5, v7, v3
-; GFX7-NEXT: v_or_b32_e32 v4, v6, v2
-; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[8:9], v[4:7] glc
+; GFX7-NEXT: flat_atomic_or_x2 v[0:1], v[2:3]
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
-; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[6:7]
-; GFX7-NEXT: v_mov_b32_e32 v7, v1
-; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v6, v0
-; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX7-NEXT: s_cbranch_execnz .LBB68_1
-; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX7-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: flat_atomic_or_i64_noret_offset__amdgpu_no_remote_memory:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_add_u32_e32 v8, vcc, 32, v0
-; GFX8-NEXT: v_addc_u32_e32 v9, vcc, 0, v1, vcc
-; GFX8-NEXT: v_add_u32_e32 v0, vcc, 36, v0
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, 32, v0
; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GFX8-NEXT: flat_load_dword v7, v[0:1]
-; GFX8-NEXT: flat_load_dword v6, v[8:9]
-; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: .LBB68_1: ; %atomicrmw.start
-; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_or_b32_e32 v5, v7, v3
-; GFX8-NEXT: v_or_b32_e32 v4, v6, v2
-; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[8:9], v[4:7] glc
+; GFX8-NEXT: flat_atomic_or_x2 v[0:1], v[2:3]
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
-; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[6:7]
-; GFX8-NEXT: v_mov_b32_e32 v7, v1
-; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v6, v0
-; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX8-NEXT: s_cbranch_execnz .LBB68_1
-; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX8-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: flat_atomic_or_i64_noret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: flat_load_dwordx2 v[6:7], v[0:1] offset:32
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: .LBB68_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_or_b32_e32 v5, v7, v3
-; GFX9-NEXT: v_or_b32_e32 v4, v6, v2
-; GFX9-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] offset:32 glc
+; GFX9-NEXT: flat_atomic_or_x2 v[0:1], v[2:3] offset:32
; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GFX9-NEXT: v_mov_b32_e32 v7, v5
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v6, v4
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB68_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i64, ptr %out, i64 4
%tmp0 = atomicrmw or ptr %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0, !noalias.addrspace !1
@@ -4639,82 +4380,29 @@ define i64 @flat_atomic_or_i64_ret_offset__amdgpu_no_remote_memory(ptr %out, i64
; GFX7-LABEL: flat_atomic_or_i64_ret_offset__amdgpu_no_remote_memory:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_add_i32_e32 v4, vcc, 32, v0
-; GFX7-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GFX7-NEXT: v_add_i32_e32 v0, vcc, 36, v0
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, 32, v0
; GFX7-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GFX7-NEXT: flat_load_dword v1, v[0:1]
-; GFX7-NEXT: flat_load_dword v0, v[4:5]
-; GFX7-NEXT: s_mov_b64 s[4:5], 0
-; GFX7-NEXT: .LBB69_1: ; %atomicrmw.start
-; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v9, v1
-; GFX7-NEXT: v_mov_b32_e32 v8, v0
-; GFX7-NEXT: v_or_b32_e32 v7, v9, v3
-; GFX7-NEXT: v_or_b32_e32 v6, v8, v2
-; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GFX7-NEXT: flat_atomic_or_x2 v[0:1], v[0:1], v[2:3] glc
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
-; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX7-NEXT: s_cbranch_execnz .LBB69_1
-; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX7-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: flat_atomic_or_i64_ret_offset__amdgpu_no_remote_memory:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_add_u32_e32 v4, vcc, 32, v0
-; GFX8-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GFX8-NEXT: v_add_u32_e32 v0, vcc, 36, v0
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, 32, v0
; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GFX8-NEXT: flat_load_dword v1, v[0:1]
-; GFX8-NEXT: flat_load_dword v0, v[4:5]
-; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: .LBB69_1: ; %atomicrmw.start
-; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v9, v1
-; GFX8-NEXT: v_mov_b32_e32 v8, v0
-; GFX8-NEXT: v_or_b32_e32 v7, v9, v3
-; GFX8-NEXT: v_or_b32_e32 v6, v8, v2
-; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GFX8-NEXT: flat_atomic_or_x2 v[0:1], v[0:1], v[2:3] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
-; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX8-NEXT: s_cbranch_execnz .LBB69_1
-; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX8-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: flat_atomic_or_i64_ret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: flat_load_dwordx2 v[4:5], v[0:1] offset:32
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: .LBB69_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v7, v5
-; GFX9-NEXT: v_mov_b32_e32 v6, v4
-; GFX9-NEXT: v_or_b32_e32 v5, v7, v3
-; GFX9-NEXT: v_or_b32_e32 v4, v6, v2
-; GFX9-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] offset:32 glc
+; GFX9-NEXT: flat_atomic_or_x2 v[0:1], v[0:1], v[2:3] offset:32 glc
; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB69_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v0, v4
-; GFX9-NEXT: v_mov_b32_e32 v1, v5
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i64, ptr %out, i64 4
%result = atomicrmw or ptr %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0, !noalias.addrspace !1
@@ -5445,80 +5133,29 @@ define void @flat_atomic_xor_i64_noret_offset__amdgpu_no_remote_memory(ptr %out,
; GFX7-LABEL: flat_atomic_xor_i64_noret_offset__amdgpu_no_remote_memory:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_add_i32_e32 v8, vcc, 32, v0
-; GFX7-NEXT: v_addc_u32_e32 v9, vcc, 0, v1, vcc
-; GFX7-NEXT: v_add_i32_e32 v0, vcc, 36, v0
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, 32, v0
; GFX7-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GFX7-NEXT: flat_load_dword v7, v[0:1]
-; GFX7-NEXT: flat_load_dword v6, v[8:9]
-; GFX7-NEXT: s_mov_b64 s[4:5], 0
-; GFX7-NEXT: .LBB78_1: ; %atomicrmw.start
-; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_xor_b32_e32 v5, v7, v3
-; GFX7-NEXT: v_xor_b32_e32 v4, v6, v2
-; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[8:9], v[4:7] glc
+; GFX7-NEXT: flat_atomic_xor_x2 v[0:1], v[2:3]
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
-; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[6:7]
-; GFX7-NEXT: v_mov_b32_e32 v7, v1
-; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v6, v0
-; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX7-NEXT: s_cbranch_execnz .LBB78_1
-; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX7-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: flat_atomic_xor_i64_noret_offset__amdgpu_no_remote_memory:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_add_u32_e32 v8, vcc, 32, v0
-; GFX8-NEXT: v_addc_u32_e32 v9, vcc, 0, v1, vcc
-; GFX8-NEXT: v_add_u32_e32 v0, vcc, 36, v0
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, 32, v0
; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GFX8-NEXT: flat_load_dword v7, v[0:1]
-; GFX8-NEXT: flat_load_dword v6, v[8:9]
-; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: .LBB78_1: ; %atomicrmw.start
-; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_xor_b32_e32 v5, v7, v3
-; GFX8-NEXT: v_xor_b32_e32 v4, v6, v2
-; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[8:9], v[4:7] glc
+; GFX8-NEXT: flat_atomic_xor_x2 v[0:1], v[2:3]
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
-; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[6:7]
-; GFX8-NEXT: v_mov_b32_e32 v7, v1
-; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v6, v0
-; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX8-NEXT: s_cbranch_execnz .LBB78_1
-; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX8-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: flat_atomic_xor_i64_noret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: flat_load_dwordx2 v[6:7], v[0:1] offset:32
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: .LBB78_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_xor_b32_e32 v5, v7, v3
-; GFX9-NEXT: v_xor_b32_e32 v4, v6, v2
-; GFX9-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] offset:32 glc
+; GFX9-NEXT: flat_atomic_xor_x2 v[0:1], v[2:3] offset:32
; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GFX9-NEXT: v_mov_b32_e32 v7, v5
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v6, v4
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB78_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i64, ptr %out, i64 4
%tmp0 = atomicrmw xor ptr %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0, !noalias.addrspace !1
@@ -5529,82 +5166,29 @@ define i64 @flat_atomic_xor_i64_ret_offset__amdgpu_no_remote_memory(ptr %out, i6
; GFX7-LABEL: flat_atomic_xor_i64_ret_offset__amdgpu_no_remote_memory:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_add_i32_e32 v4, vcc, 32, v0
-; GFX7-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GFX7-NEXT: v_add_i32_e32 v0, vcc, 36, v0
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, 32, v0
; GFX7-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GFX7-NEXT: flat_load_dword v1, v[0:1]
-; GFX7-NEXT: flat_load_dword v0, v[4:5]
-; GFX7-NEXT: s_mov_b64 s[4:5], 0
-; GFX7-NEXT: .LBB79_1: ; %atomicrmw.start
-; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v9, v1
-; GFX7-NEXT: v_mov_b32_e32 v8, v0
-; GFX7-NEXT: v_xor_b32_e32 v7, v9, v3
-; GFX7-NEXT: v_xor_b32_e32 v6, v8, v2
-; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GFX7-NEXT: flat_atomic_xor_x2 v[0:1], v[0:1], v[2:3] glc
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
-; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX7-NEXT: s_cbranch_execnz .LBB79_1
-; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX7-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: flat_atomic_xor_i64_ret_offset__amdgpu_no_remote_memory:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_add_u32_e32 v4, vcc, 32, v0
-; GFX8-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GFX8-NEXT: v_add_u32_e32 v0, vcc, 36, v0
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, 32, v0
; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GFX8-NEXT: flat_load_dword v1, v[0:1]
-; GFX8-NEXT: flat_load_dword v0, v[4:5]
-; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: .LBB79_1: ; %atomicrmw.start
-; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v9, v1
-; GFX8-NEXT: v_mov_b32_e32 v8, v0
-; GFX8-NEXT: v_xor_b32_e32 v7, v9, v3
-; GFX8-NEXT: v_xor_b32_e32 v6, v8, v2
-; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GFX8-NEXT: flat_atomic_xor_x2 v[0:1], v[0:1], v[2:3] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
-; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX8-NEXT: s_cbranch_execnz .LBB79_1
-; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX8-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: flat_atomic_xor_i64_ret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: flat_load_dwordx2 v[4:5], v[0:1] offset:32
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: .LBB79_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v7, v5
-; GFX9-NEXT: v_mov_b32_e32 v6, v4
-; GFX9-NEXT: v_xor_b32_e32 v5, v7, v3
-; GFX9-NEXT: v_xor_b32_e32 v4, v6, v2
-; GFX9-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] offset:32 glc
+; GFX9-NEXT: flat_atomic_xor_x2 v[0:1], v[0:1], v[2:3] offset:32 glc
; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB79_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v0, v4
-; GFX9-NEXT: v_mov_b32_e32 v1, v5
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i64, ptr %out, i64 4
%result = atomicrmw xor ptr %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0, !noalias.addrspace !1
@@ -6821,83 +6405,29 @@ define void @flat_atomic_max_i64_noret_offset__amdgpu_no_remote_memory(ptr %out,
; GFX7-LABEL: flat_atomic_max_i64_noret_offset__amdgpu_no_remote_memory:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_add_i32_e32 v8, vcc, 32, v0
-; GFX7-NEXT: v_addc_u32_e32 v9, vcc, 0, v1, vcc
-; GFX7-NEXT: v_add_i32_e32 v0, vcc, 36, v0
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, 32, v0
; GFX7-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GFX7-NEXT: flat_load_dword v7, v[0:1]
-; GFX7-NEXT: flat_load_dword v6, v[8:9]
-; GFX7-NEXT: s_mov_b64 s[4:5], 0
-; GFX7-NEXT: .LBB92_1: ; %atomicrmw.start
-; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_cmp_gt_i64_e32 vcc, v[6:7], v[2:3]
-; GFX7-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
-; GFX7-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
-; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[8:9], v[4:7] glc
+; GFX7-NEXT: flat_atomic_smax_x2 v[0:1], v[2:3]
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
-; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[6:7]
-; GFX7-NEXT: v_mov_b32_e32 v7, v1
-; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v6, v0
-; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX7-NEXT: s_cbranch_execnz .LBB92_1
-; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX7-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: flat_atomic_max_i64_noret_offset__amdgpu_no_remote_memory:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_add_u32_e32 v8, vcc, 32, v0
-; GFX8-NEXT: v_addc_u32_e32 v9, vcc, 0, v1, vcc
-; GFX8-NEXT: v_add_u32_e32 v0, vcc, 36, v0
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, 32, v0
; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GFX8-NEXT: flat_load_dword v7, v[0:1]
-; GFX8-NEXT: flat_load_dword v6, v[8:9]
-; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: .LBB92_1: ; %atomicrmw.start
-; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_cmp_gt_i64_e32 vcc, v[6:7], v[2:3]
-; GFX8-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
-; GFX8-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
-; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[8:9], v[4:7] glc
+; GFX8-NEXT: flat_atomic_smax_x2 v[0:1], v[2:3]
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
-; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[6:7]
-; GFX8-NEXT: v_mov_b32_e32 v7, v1
-; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v6, v0
-; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX8-NEXT: s_cbranch_execnz .LBB92_1
-; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX8-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: flat_atomic_max_i64_noret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: flat_load_dwordx2 v[6:7], v[0:1] offset:32
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: .LBB92_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_cmp_gt_i64_e32 vcc, v[6:7], v[2:3]
-; GFX9-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
-; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
-; GFX9-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] offset:32 glc
+; GFX9-NEXT: flat_atomic_smax_x2 v[0:1], v[2:3] offset:32
; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GFX9-NEXT: v_mov_b32_e32 v7, v5
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v6, v4
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB92_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i64, ptr %out, i64 4
%tmp0 = atomicrmw max ptr %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0, !noalias.addrspace !1
@@ -6908,85 +6438,29 @@ define i64 @flat_atomic_max_i64_ret_offset__amdgpu_no_remote_memory(ptr %out, i6
; GFX7-LABEL: flat_atomic_max_i64_ret_offset__amdgpu_no_remote_memory:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_add_i32_e32 v4, vcc, 32, v0
-; GFX7-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GFX7-NEXT: v_add_i32_e32 v0, vcc, 36, v0
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, 32, v0
; GFX7-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GFX7-NEXT: flat_load_dword v1, v[0:1]
-; GFX7-NEXT: flat_load_dword v0, v[4:5]
-; GFX7-NEXT: s_mov_b64 s[4:5], 0
-; GFX7-NEXT: .LBB93_1: ; %atomicrmw.start
-; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v9, v1
-; GFX7-NEXT: v_mov_b32_e32 v8, v0
-; GFX7-NEXT: v_cmp_gt_i64_e32 vcc, v[8:9], v[2:3]
-; GFX7-NEXT: v_cndmask_b32_e32 v7, v3, v9, vcc
-; GFX7-NEXT: v_cndmask_b32_e32 v6, v2, v8, vcc
-; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GFX7-NEXT: flat_atomic_smax_x2 v[0:1], v[0:1], v[2:3] glc
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
-; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX7-NEXT: s_cbranch_execnz .LBB93_1
-; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX7-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: flat_atomic_max_i64_ret_offset__amdgpu_no_remote_memory:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_add_u32_e32 v4, vcc, 32, v0
-; GFX8-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GFX8-NEXT: v_add_u32_e32 v0, vcc, 36, v0
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, 32, v0
; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GFX8-NEXT: flat_load_dword v1, v[0:1]
-; GFX8-NEXT: flat_load_dword v0, v[4:5]
-; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: .LBB93_1: ; %atomicrmw.start
-; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v9, v1
-; GFX8-NEXT: v_mov_b32_e32 v8, v0
-; GFX8-NEXT: v_cmp_gt_i64_e32 vcc, v[8:9], v[2:3]
-; GFX8-NEXT: v_cndmask_b32_e32 v7, v3, v9, vcc
-; GFX8-NEXT: v_cndmask_b32_e32 v6, v2, v8, vcc
-; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GFX8-NEXT: flat_atomic_smax_x2 v[0:1], v[0:1], v[2:3] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
-; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX8-NEXT: s_cbranch_execnz .LBB93_1
-; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX8-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: flat_atomic_max_i64_ret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: flat_load_dwordx2 v[4:5], v[0:1] offset:32
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: .LBB93_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v7, v5
-; GFX9-NEXT: v_mov_b32_e32 v6, v4
-; GFX9-NEXT: v_cmp_gt_i64_e32 vcc, v[6:7], v[2:3]
-; GFX9-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
-; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
-; GFX9-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] offset:32 glc
+; GFX9-NEXT: flat_atomic_smax_x2 v[0:1], v[0:1], v[2:3] offset:32 glc
; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB93_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v0, v4
-; GFX9-NEXT: v_mov_b32_e32 v1, v5
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i64, ptr %out, i64 4
%result = atomicrmw max ptr %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0, !noalias.addrspace !1
@@ -8101,83 +7575,29 @@ define void @flat_atomic_umax_i64_noret_offset__amdgpu_no_remote_memory(ptr %out
; GFX7-LABEL: flat_atomic_umax_i64_noret_offset__amdgpu_no_remote_memory:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_add_i32_e32 v8, vcc, 32, v0
-; GFX7-NEXT: v_addc_u32_e32 v9, vcc, 0, v1, vcc
-; GFX7-NEXT: v_add_i32_e32 v0, vcc, 36, v0
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, 32, v0
; GFX7-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GFX7-NEXT: flat_load_dword v7, v[0:1]
-; GFX7-NEXT: flat_load_dword v6, v[8:9]
-; GFX7-NEXT: s_mov_b64 s[4:5], 0
-; GFX7-NEXT: .LBB105_1: ; %atomicrmw.start
-; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_cmp_gt_u64_e32 vcc, v[6:7], v[2:3]
-; GFX7-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
-; GFX7-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
-; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[8:9], v[4:7] glc
+; GFX7-NEXT: flat_atomic_umax_x2 v[0:1], v[2:3]
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
-; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[6:7]
-; GFX7-NEXT: v_mov_b32_e32 v7, v1
-; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v6, v0
-; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX7-NEXT: s_cbranch_execnz .LBB105_1
-; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX7-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: flat_atomic_umax_i64_noret_offset__amdgpu_no_remote_memory:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_add_u32_e32 v8, vcc, 32, v0
-; GFX8-NEXT: v_addc_u32_e32 v9, vcc, 0, v1, vcc
-; GFX8-NEXT: v_add_u32_e32 v0, vcc, 36, v0
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, 32, v0
; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GFX8-NEXT: flat_load_dword v7, v[0:1]
-; GFX8-NEXT: flat_load_dword v6, v[8:9]
-; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: .LBB105_1: ; %atomicrmw.start
-; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_cmp_gt_u64_e32 vcc, v[6:7], v[2:3]
-; GFX8-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
-; GFX8-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
-; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[8:9], v[4:7] glc
+; GFX8-NEXT: flat_atomic_umax_x2 v[0:1], v[2:3]
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
-; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[6:7]
-; GFX8-NEXT: v_mov_b32_e32 v7, v1
-; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v6, v0
-; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX8-NEXT: s_cbranch_execnz .LBB105_1
-; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX8-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: flat_atomic_umax_i64_noret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: flat_load_dwordx2 v[6:7], v[0:1] offset:32
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: .LBB105_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, v[6:7], v[2:3]
-; GFX9-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
-; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
-; GFX9-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] offset:32 glc
+; GFX9-NEXT: flat_atomic_umax_x2 v[0:1], v[2:3] offset:32
; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GFX9-NEXT: v_mov_b32_e32 v7, v5
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v6, v4
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB105_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i64, ptr %out, i64 4
%tmp0 = atomicrmw umax ptr %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0, !noalias.addrspace !1
@@ -8188,85 +7608,29 @@ define i64 @flat_atomic_umax_i64_ret_offset__amdgpu_no_remote_memory(ptr %out, i
; GFX7-LABEL: flat_atomic_umax_i64_ret_offset__amdgpu_no_remote_memory:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_add_i32_e32 v4, vcc, 32, v0
-; GFX7-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GFX7-NEXT: v_add_i32_e32 v0, vcc, 36, v0
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, 32, v0
; GFX7-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GFX7-NEXT: flat_load_dword v1, v[0:1]
-; GFX7-NEXT: flat_load_dword v0, v[4:5]
-; GFX7-NEXT: s_mov_b64 s[4:5], 0
-; GFX7-NEXT: .LBB106_1: ; %atomicrmw.start
-; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v9, v1
-; GFX7-NEXT: v_mov_b32_e32 v8, v0
-; GFX7-NEXT: v_cmp_gt_u64_e32 vcc, v[8:9], v[2:3]
-; GFX7-NEXT: v_cndmask_b32_e32 v7, v3, v9, vcc
-; GFX7-NEXT: v_cndmask_b32_e32 v6, v2, v8, vcc
-; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GFX7-NEXT: flat_atomic_umax_x2 v[0:1], v[0:1], v[2:3] glc
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
-; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX7-NEXT: s_cbranch_execnz .LBB106_1
-; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX7-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: flat_atomic_umax_i64_ret_offset__amdgpu_no_remote_memory:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_add_u32_e32 v4, vcc, 32, v0
-; GFX8-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GFX8-NEXT: v_add_u32_e32 v0, vcc, 36, v0
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, 32, v0
; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GFX8-NEXT: flat_load_dword v1, v[0:1]
-; GFX8-NEXT: flat_load_dword v0, v[4:5]
-; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: .LBB106_1: ; %atomicrmw.start
-; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v9, v1
-; GFX8-NEXT: v_mov_b32_e32 v8, v0
-; GFX8-NEXT: v_cmp_gt_u64_e32 vcc, v[8:9], v[2:3]
-; GFX8-NEXT: v_cndmask_b32_e32 v7, v3, v9, vcc
-; GFX8-NEXT: v_cndmask_b32_e32 v6, v2, v8, vcc
-; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GFX8-NEXT: flat_atomic_umax_x2 v[0:1], v[0:1], v[2:3] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
-; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX8-NEXT: s_cbranch_execnz .LBB106_1
-; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX8-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: flat_atomic_umax_i64_ret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: flat_load_dwordx2 v[4:5], v[0:1] offset:32
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: .LBB106_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v7, v5
-; GFX9-NEXT: v_mov_b32_e32 v6, v4
-; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, v[6:7], v[2:3]
-; GFX9-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
-; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
-; GFX9-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] offset:32 glc
+; GFX9-NEXT: flat_atomic_umax_x2 v[0:1], v[0:1], v[2:3] offset:32 glc
; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB106_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v0, v4
-; GFX9-NEXT: v_mov_b32_e32 v1, v5
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i64, ptr %out, i64 4
%result = atomicrmw umax ptr %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0, !noalias.addrspace !1
@@ -9045,83 +8409,29 @@ define void @flat_atomic_umin_i64_noret_offset__amdgpu_no_remote_memory(ptr %out
; GFX7-LABEL: flat_atomic_umin_i64_noret_offset__amdgpu_no_remote_memory:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_add_i32_e32 v8, vcc, 32, v0
-; GFX7-NEXT: v_addc_u32_e32 v9, vcc, 0, v1, vcc
-; GFX7-NEXT: v_add_i32_e32 v0, vcc, 36, v0
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, 32, v0
; GFX7-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GFX7-NEXT: flat_load_dword v7, v[0:1]
-; GFX7-NEXT: flat_load_dword v6, v[8:9]
-; GFX7-NEXT: s_mov_b64 s[4:5], 0
-; GFX7-NEXT: .LBB115_1: ; %atomicrmw.start
-; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_cmp_le_u64_e32 vcc, v[6:7], v[2:3]
-; GFX7-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
-; GFX7-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
-; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[8:9], v[4:7] glc
+; GFX7-NEXT: flat_atomic_umin_x2 v[0:1], v[2:3]
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
-; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[6:7]
-; GFX7-NEXT: v_mov_b32_e32 v7, v1
-; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v6, v0
-; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX7-NEXT: s_cbranch_execnz .LBB115_1
-; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX7-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: flat_atomic_umin_i64_noret_offset__amdgpu_no_remote_memory:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_add_u32_e32 v8, vcc, 32, v0
-; GFX8-NEXT: v_addc_u32_e32 v9, vcc, 0, v1, vcc
-; GFX8-NEXT: v_add_u32_e32 v0, vcc, 36, v0
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, 32, v0
; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GFX8-NEXT: flat_load_dword v7, v[0:1]
-; GFX8-NEXT: flat_load_dword v6, v[8:9]
-; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: .LBB115_1: ; %atomicrmw.start
-; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_cmp_le_u64_e32 vcc, v[6:7], v[2:3]
-; GFX8-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
-; GFX8-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
-; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[8:9], v[4:7] glc
+; GFX8-NEXT: flat_atomic_umin_x2 v[0:1], v[2:3]
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
-; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[6:7]
-; GFX8-NEXT: v_mov_b32_e32 v7, v1
-; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v6, v0
-; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX8-NEXT: s_cbranch_execnz .LBB115_1
-; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX8-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: flat_atomic_umin_i64_noret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: flat_load_dwordx2 v[6:7], v[0:1] offset:32
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: .LBB115_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_cmp_le_u64_e32 vcc, v[6:7], v[2:3]
-; GFX9-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
-; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
-; GFX9-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] offset:32 glc
+; GFX9-NEXT: flat_atomic_umin_x2 v[0:1], v[2:3] offset:32
; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GFX9-NEXT: v_mov_b32_e32 v7, v5
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v6, v4
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB115_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i64, ptr %out, i64 4
%tmp0 = atomicrmw umin ptr %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0, !noalias.addrspace !1
@@ -9132,85 +8442,29 @@ define i64 @flat_atomic_umin_i64_ret_offset__amdgpu_no_remote_memory(ptr %out, i
; GFX7-LABEL: flat_atomic_umin_i64_ret_offset__amdgpu_no_remote_memory:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_add_i32_e32 v4, vcc, 32, v0
-; GFX7-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GFX7-NEXT: v_add_i32_e32 v0, vcc, 36, v0
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, 32, v0
; GFX7-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GFX7-NEXT: flat_load_dword v1, v[0:1]
-; GFX7-NEXT: flat_load_dword v0, v[4:5]
-; GFX7-NEXT: s_mov_b64 s[4:5], 0
-; GFX7-NEXT: .LBB116_1: ; %atomicrmw.start
-; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v9, v1
-; GFX7-NEXT: v_mov_b32_e32 v8, v0
-; GFX7-NEXT: v_cmp_le_u64_e32 vcc, v[8:9], v[2:3]
-; GFX7-NEXT: v_cndmask_b32_e32 v7, v3, v9, vcc
-; GFX7-NEXT: v_cndmask_b32_e32 v6, v2, v8, vcc
-; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GFX7-NEXT: flat_atomic_umin_x2 v[0:1], v[0:1], v[2:3] glc
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
-; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX7-NEXT: s_cbranch_execnz .LBB116_1
-; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX7-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: flat_atomic_umin_i64_ret_offset__amdgpu_no_remote_memory:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_add_u32_e32 v4, vcc, 32, v0
-; GFX8-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GFX8-NEXT: v_add_u32_e32 v0, vcc, 36, v0
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, 32, v0
; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GFX8-NEXT: flat_load_dword v1, v[0:1]
-; GFX8-NEXT: flat_load_dword v0, v[4:5]
-; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: .LBB116_1: ; %atomicrmw.start
-; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v9, v1
-; GFX8-NEXT: v_mov_b32_e32 v8, v0
-; GFX8-NEXT: v_cmp_le_u64_e32 vcc, v[8:9], v[2:3]
-; GFX8-NEXT: v_cndmask_b32_e32 v7, v3, v9, vcc
-; GFX8-NEXT: v_cndmask_b32_e32 v6, v2, v8, vcc
-; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GFX8-NEXT: flat_atomic_umin_x2 v[0:1], v[0:1], v[2:3] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
-; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX8-NEXT: s_cbranch_execnz .LBB116_1
-; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX8-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: flat_atomic_umin_i64_ret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: flat_load_dwordx2 v[4:5], v[0:1] offset:32
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: .LBB116_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v7, v5
-; GFX9-NEXT: v_mov_b32_e32 v6, v4
-; GFX9-NEXT: v_cmp_le_u64_e32 vcc, v[6:7], v[2:3]
-; GFX9-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
-; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
-; GFX9-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] offset:32 glc
+; GFX9-NEXT: flat_atomic_umin_x2 v[0:1], v[0:1], v[2:3] offset:32 glc
; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB116_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v0, v4
-; GFX9-NEXT: v_mov_b32_e32 v1, v5
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i64, ptr %out, i64 4
%result = atomicrmw umin ptr %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0, !noalias.addrspace !1
@@ -10420,83 +9674,29 @@ define void @flat_atomic_min_i64_noret_offset__amdgpu_no_remote_memory(ptr %out,
; GFX7-LABEL: flat_atomic_min_i64_noret_offset__amdgpu_no_remote_memory:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_add_i32_e32 v8, vcc, 32, v0
-; GFX7-NEXT: v_addc_u32_e32 v9, vcc, 0, v1, vcc
-; GFX7-NEXT: v_add_i32_e32 v0, vcc, 36, v0
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, 32, v0
; GFX7-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GFX7-NEXT: flat_load_dword v7, v[0:1]
-; GFX7-NEXT: flat_load_dword v6, v[8:9]
-; GFX7-NEXT: s_mov_b64 s[4:5], 0
-; GFX7-NEXT: .LBB129_1: ; %atomicrmw.start
-; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_cmp_le_i64_e32 vcc, v[6:7], v[2:3]
-; GFX7-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
-; GFX7-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
-; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[8:9], v[4:7] glc
+; GFX7-NEXT: flat_atomic_smin_x2 v[0:1], v[2:3]
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
-; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[6:7]
-; GFX7-NEXT: v_mov_b32_e32 v7, v1
-; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v6, v0
-; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX7-NEXT: s_cbranch_execnz .LBB129_1
-; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX7-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: flat_atomic_min_i64_noret_offset__amdgpu_no_remote_memory:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_add_u32_e32 v8, vcc, 32, v0
-; GFX8-NEXT: v_addc_u32_e32 v9, vcc, 0, v1, vcc
-; GFX8-NEXT: v_add_u32_e32 v0, vcc, 36, v0
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, 32, v0
; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GFX8-NEXT: flat_load_dword v7, v[0:1]
-; GFX8-NEXT: flat_load_dword v6, v[8:9]
-; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: .LBB129_1: ; %atomicrmw.start
-; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_cmp_le_i64_e32 vcc, v[6:7], v[2:3]
-; GFX8-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
-; GFX8-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
-; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[8:9], v[4:7] glc
+; GFX8-NEXT: flat_atomic_smin_x2 v[0:1], v[2:3]
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
-; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[6:7]
-; GFX8-NEXT: v_mov_b32_e32 v7, v1
-; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v6, v0
-; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX8-NEXT: s_cbranch_execnz .LBB129_1
-; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX8-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: flat_atomic_min_i64_noret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: flat_load_dwordx2 v[6:7], v[0:1] offset:32
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: .LBB129_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_cmp_le_i64_e32 vcc, v[6:7], v[2:3]
-; GFX9-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
-; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
-; GFX9-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] offset:32 glc
+; GFX9-NEXT: flat_atomic_smin_x2 v[0:1], v[2:3] offset:32
; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GFX9-NEXT: v_mov_b32_e32 v7, v5
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v6, v4
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB129_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i64, ptr %out, i64 4
%tmp0 = atomicrmw min ptr %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0, !noalias.addrspace !1
@@ -10507,85 +9707,29 @@ define i64 @flat_atomic_min_i64_ret_offset__amdgpu_no_remote_memory(ptr %out, i6
; GFX7-LABEL: flat_atomic_min_i64_ret_offset__amdgpu_no_remote_memory:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_add_i32_e32 v4, vcc, 32, v0
-; GFX7-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GFX7-NEXT: v_add_i32_e32 v0, vcc, 36, v0
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, 32, v0
; GFX7-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GFX7-NEXT: flat_load_dword v1, v[0:1]
-; GFX7-NEXT: flat_load_dword v0, v[4:5]
-; GFX7-NEXT: s_mov_b64 s[4:5], 0
-; GFX7-NEXT: .LBB130_1: ; %atomicrmw.start
-; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v9, v1
-; GFX7-NEXT: v_mov_b32_e32 v8, v0
-; GFX7-NEXT: v_cmp_le_i64_e32 vcc, v[8:9], v[2:3]
-; GFX7-NEXT: v_cndmask_b32_e32 v7, v3, v9, vcc
-; GFX7-NEXT: v_cndmask_b32_e32 v6, v2, v8, vcc
-; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GFX7-NEXT: flat_atomic_smin_x2 v[0:1], v[0:1], v[2:3] glc
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
-; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX7-NEXT: s_cbranch_execnz .LBB130_1
-; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX7-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: flat_atomic_min_i64_ret_offset__amdgpu_no_remote_memory:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_add_u32_e32 v4, vcc, 32, v0
-; GFX8-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GFX8-NEXT: v_add_u32_e32 v0, vcc, 36, v0
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, 32, v0
; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GFX8-NEXT: flat_load_dword v1, v[0:1]
-; GFX8-NEXT: flat_load_dword v0, v[4:5]
-; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: .LBB130_1: ; %atomicrmw.start
-; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v9, v1
-; GFX8-NEXT: v_mov_b32_e32 v8, v0
-; GFX8-NEXT: v_cmp_le_i64_e32 vcc, v[8:9], v[2:3]
-; GFX8-NEXT: v_cndmask_b32_e32 v7, v3, v9, vcc
-; GFX8-NEXT: v_cndmask_b32_e32 v6, v2, v8, vcc
-; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GFX8-NEXT: flat_atomic_smin_x2 v[0:1], v[0:1], v[2:3] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
-; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX8-NEXT: s_cbranch_execnz .LBB130_1
-; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX8-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: flat_atomic_min_i64_ret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: flat_load_dwordx2 v[4:5], v[0:1] offset:32
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: .LBB130_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v7, v5
-; GFX9-NEXT: v_mov_b32_e32 v6, v4
-; GFX9-NEXT: v_cmp_le_i64_e32 vcc, v[6:7], v[2:3]
-; GFX9-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
-; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
-; GFX9-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] offset:32 glc
+; GFX9-NEXT: flat_atomic_smin_x2 v[0:1], v[0:1], v[2:3] offset:32 glc
; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB130_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v0, v4
-; GFX9-NEXT: v_mov_b32_e32 v1, v5
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i64, ptr %out, i64 4
%result = atomicrmw min ptr %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0, !noalias.addrspace !1
@@ -11388,89 +10532,29 @@ define void @flat_atomic_uinc_wrap_i64_noret_offset__amdgpu_no_remote_memory(ptr
; GFX7-LABEL: flat_atomic_uinc_wrap_i64_noret_offset__amdgpu_no_remote_memory:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_add_i32_e32 v8, vcc, 32, v0
-; GFX7-NEXT: v_addc_u32_e32 v9, vcc, 0, v1, vcc
-; GFX7-NEXT: v_add_i32_e32 v0, vcc, 36, v0
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, 32, v0
; GFX7-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GFX7-NEXT: flat_load_dword v7, v[0:1]
-; GFX7-NEXT: flat_load_dword v6, v[8:9]
-; GFX7-NEXT: s_mov_b64 s[4:5], 0
-; GFX7-NEXT: .LBB139_1: ; %atomicrmw.start
-; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_add_i32_e32 v0, vcc, 1, v6
-; GFX7-NEXT: v_addc_u32_e32 v1, vcc, 0, v7, vcc
-; GFX7-NEXT: v_cmp_lt_u64_e32 vcc, v[6:7], v[2:3]
-; GFX7-NEXT: v_cndmask_b32_e32 v5, 0, v1, vcc
-; GFX7-NEXT: v_cndmask_b32_e32 v4, 0, v0, vcc
-; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[8:9], v[4:7] glc
+; GFX7-NEXT: flat_atomic_inc_x2 v[0:1], v[2:3]
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
-; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[6:7]
-; GFX7-NEXT: v_mov_b32_e32 v7, v1
-; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v6, v0
-; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX7-NEXT: s_cbranch_execnz .LBB139_1
-; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX7-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: flat_atomic_uinc_wrap_i64_noret_offset__amdgpu_no_remote_memory:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_add_u32_e32 v8, vcc, 32, v0
-; GFX8-NEXT: v_addc_u32_e32 v9, vcc, 0, v1, vcc
-; GFX8-NEXT: v_add_u32_e32 v0, vcc, 36, v0
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, 32, v0
; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GFX8-NEXT: flat_load_dword v7, v[0:1]
-; GFX8-NEXT: flat_load_dword v6, v[8:9]
-; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: .LBB139_1: ; %atomicrmw.start
-; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_add_u32_e32 v0, vcc, 1, v6
-; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v7, vcc
-; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, v[6:7], v[2:3]
-; GFX8-NEXT: v_cndmask_b32_e32 v5, 0, v1, vcc
-; GFX8-NEXT: v_cndmask_b32_e32 v4, 0, v0, vcc
-; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[8:9], v[4:7] glc
+; GFX8-NEXT: flat_atomic_inc_x2 v[0:1], v[2:3]
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
-; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[6:7]
-; GFX8-NEXT: v_mov_b32_e32 v7, v1
-; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v6, v0
-; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX8-NEXT: s_cbranch_execnz .LBB139_1
-; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX8-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: flat_atomic_uinc_wrap_i64_noret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: flat_load_dwordx2 v[6:7], v[0:1] offset:32
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: .LBB139_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, 1, v6
-; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, 0, v7, vcc
-; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, v[6:7], v[2:3]
-; GFX9-NEXT: v_cndmask_b32_e32 v5, 0, v5, vcc
-; GFX9-NEXT: v_cndmask_b32_e32 v4, 0, v4, vcc
-; GFX9-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] offset:32 glc
+; GFX9-NEXT: flat_atomic_inc_x2 v[0:1], v[2:3] offset:32
; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GFX9-NEXT: v_mov_b32_e32 v7, v5
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v6, v4
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB139_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i64, ptr %out, i64 4
%tmp0 = atomicrmw uinc_wrap ptr %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0, !noalias.addrspace !1
@@ -11481,91 +10565,29 @@ define i64 @flat_atomic_uinc_wrap_i64_ret_offset__amdgpu_no_remote_memory(ptr %o
; GFX7-LABEL: flat_atomic_uinc_wrap_i64_ret_offset__amdgpu_no_remote_memory:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_add_i32_e32 v4, vcc, 32, v0
-; GFX7-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GFX7-NEXT: v_add_i32_e32 v0, vcc, 36, v0
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, 32, v0
; GFX7-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GFX7-NEXT: flat_load_dword v1, v[0:1]
-; GFX7-NEXT: flat_load_dword v0, v[4:5]
-; GFX7-NEXT: s_mov_b64 s[4:5], 0
-; GFX7-NEXT: .LBB140_1: ; %atomicrmw.start
-; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v9, v1
-; GFX7-NEXT: v_mov_b32_e32 v8, v0
-; GFX7-NEXT: v_add_i32_e32 v0, vcc, 1, v8
-; GFX7-NEXT: v_addc_u32_e32 v1, vcc, 0, v9, vcc
-; GFX7-NEXT: v_cmp_lt_u64_e32 vcc, v[8:9], v[2:3]
-; GFX7-NEXT: v_cndmask_b32_e32 v7, 0, v1, vcc
-; GFX7-NEXT: v_cndmask_b32_e32 v6, 0, v0, vcc
-; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GFX7-NEXT: flat_atomic_inc_x2 v[0:1], v[0:1], v[2:3] glc
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
-; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX7-NEXT: s_cbranch_execnz .LBB140_1
-; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX7-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: flat_atomic_uinc_wrap_i64_ret_offset__amdgpu_no_remote_memory:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_add_u32_e32 v4, vcc, 32, v0
-; GFX8-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GFX8-NEXT: v_add_u32_e32 v0, vcc, 36, v0
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, 32, v0
; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GFX8-NEXT: flat_load_dword v1, v[0:1]
-; GFX8-NEXT: flat_load_dword v0, v[4:5]
-; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: .LBB140_1: ; %atomicrmw.start
-; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v9, v1
-; GFX8-NEXT: v_mov_b32_e32 v8, v0
-; GFX8-NEXT: v_add_u32_e32 v0, vcc, 1, v8
-; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v9, vcc
-; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, v[8:9], v[2:3]
-; GFX8-NEXT: v_cndmask_b32_e32 v7, 0, v1, vcc
-; GFX8-NEXT: v_cndmask_b32_e32 v6, 0, v0, vcc
-; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GFX8-NEXT: flat_atomic_inc_x2 v[0:1], v[0:1], v[2:3] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
-; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX8-NEXT: s_cbranch_execnz .LBB140_1
-; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX8-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: flat_atomic_uinc_wrap_i64_ret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: flat_load_dwordx2 v[4:5], v[0:1] offset:32
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: .LBB140_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v7, v5
-; GFX9-NEXT: v_mov_b32_e32 v6, v4
-; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, 1, v6
-; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, 0, v7, vcc
-; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, v[6:7], v[2:3]
-; GFX9-NEXT: v_cndmask_b32_e32 v5, 0, v5, vcc
-; GFX9-NEXT: v_cndmask_b32_e32 v4, 0, v4, vcc
-; GFX9-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] offset:32 glc
+; GFX9-NEXT: flat_atomic_inc_x2 v[0:1], v[0:1], v[2:3] offset:32 glc
; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB140_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v0, v4
-; GFX9-NEXT: v_mov_b32_e32 v1, v5
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i64, ptr %out, i64 4
%result = atomicrmw uinc_wrap ptr %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0, !noalias.addrspace !1
@@ -12440,95 +11462,29 @@ define void @flat_atomic_udec_wrap_i64_noret_offset__amdgpu_no_remote_memory(ptr
; GFX7-LABEL: flat_atomic_udec_wrap_i64_noret_offset__amdgpu_no_remote_memory:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_add_i32_e32 v8, vcc, 32, v0
-; GFX7-NEXT: v_addc_u32_e32 v9, vcc, 0, v1, vcc
-; GFX7-NEXT: v_add_i32_e32 v0, vcc, 36, v0
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, 32, v0
; GFX7-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GFX7-NEXT: flat_load_dword v7, v[0:1]
-; GFX7-NEXT: flat_load_dword v6, v[8:9]
-; GFX7-NEXT: s_mov_b64 s[8:9], 0
-; GFX7-NEXT: .LBB149_1: ; %atomicrmw.start
-; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GFX7-NEXT: v_cmp_gt_u64_e64 s[4:5], v[6:7], v[2:3]
-; GFX7-NEXT: v_add_i32_e64 v0, s[6:7], -1, v6
-; GFX7-NEXT: v_addc_u32_e64 v1, s[6:7], -1, v7, s[6:7]
-; GFX7-NEXT: s_or_b64 vcc, vcc, s[4:5]
-; GFX7-NEXT: v_cndmask_b32_e32 v5, v1, v3, vcc
-; GFX7-NEXT: v_cndmask_b32_e32 v4, v0, v2, vcc
-; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[8:9], v[4:7] glc
+; GFX7-NEXT: flat_atomic_dec_x2 v[0:1], v[2:3]
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
-; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[6:7]
-; GFX7-NEXT: v_mov_b32_e32 v7, v1
-; GFX7-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
-; GFX7-NEXT: v_mov_b32_e32 v6, v0
-; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9]
-; GFX7-NEXT: s_cbranch_execnz .LBB149_1
-; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX7-NEXT: s_or_b64 exec, exec, s[8:9]
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: flat_atomic_udec_wrap_i64_noret_offset__amdgpu_no_remote_memory:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_add_u32_e32 v8, vcc, 32, v0
-; GFX8-NEXT: v_addc_u32_e32 v9, vcc, 0, v1, vcc
-; GFX8-NEXT: v_add_u32_e32 v0, vcc, 36, v0
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, 32, v0
; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GFX8-NEXT: flat_load_dword v7, v[0:1]
-; GFX8-NEXT: flat_load_dword v6, v[8:9]
-; GFX8-NEXT: s_mov_b64 s[8:9], 0
-; GFX8-NEXT: .LBB149_1: ; %atomicrmw.start
-; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GFX8-NEXT: v_cmp_gt_u64_e64 s[4:5], v[6:7], v[2:3]
-; GFX8-NEXT: v_add_u32_e64 v0, s[6:7], -1, v6
-; GFX8-NEXT: v_addc_u32_e64 v1, s[6:7], -1, v7, s[6:7]
-; GFX8-NEXT: s_or_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v5, v1, v3, vcc
-; GFX8-NEXT: v_cndmask_b32_e32 v4, v0, v2, vcc
-; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[8:9], v[4:7] glc
+; GFX8-NEXT: flat_atomic_dec_x2 v[0:1], v[2:3]
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
-; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[6:7]
-; GFX8-NEXT: v_mov_b32_e32 v7, v1
-; GFX8-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
-; GFX8-NEXT: v_mov_b32_e32 v6, v0
-; GFX8-NEXT: s_andn2_b64 exec, exec, s[8:9]
-; GFX8-NEXT: s_cbranch_execnz .LBB149_1
-; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX8-NEXT: s_or_b64 exec, exec, s[8:9]
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: flat_atomic_udec_wrap_i64_noret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: flat_load_dwordx2 v[6:7], v[0:1] offset:32
-; GFX9-NEXT: s_mov_b64 s[8:9], 0
-; GFX9-NEXT: .LBB149_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GFX9-NEXT: v_cmp_gt_u64_e64 s[4:5], v[6:7], v[2:3]
-; GFX9-NEXT: v_add_co_u32_e64 v4, s[6:7], -1, v6
-; GFX9-NEXT: v_addc_co_u32_e64 v5, s[6:7], -1, v7, s[6:7]
-; GFX9-NEXT: s_or_b64 vcc, vcc, s[4:5]
-; GFX9-NEXT: v_cndmask_b32_e32 v5, v5, v3, vcc
-; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v2, vcc
-; GFX9-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] offset:32 glc
+; GFX9-NEXT: flat_atomic_dec_x2 v[0:1], v[2:3] offset:32
; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GFX9-NEXT: v_mov_b32_e32 v7, v5
-; GFX9-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
-; GFX9-NEXT: v_mov_b32_e32 v6, v4
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[8:9]
-; GFX9-NEXT: s_cbranch_execnz .LBB149_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[8:9]
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i64, ptr %out, i64 4
%tmp0 = atomicrmw udec_wrap ptr %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0, !noalias.addrspace !1
@@ -12539,97 +11495,29 @@ define i64 @flat_atomic_udec_wrap_i64_ret_offset__amdgpu_no_remote_memory(ptr %o
; GFX7-LABEL: flat_atomic_udec_wrap_i64_ret_offset__amdgpu_no_remote_memory:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_add_i32_e32 v4, vcc, 32, v0
-; GFX7-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GFX7-NEXT: v_add_i32_e32 v0, vcc, 36, v0
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, 32, v0
; GFX7-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GFX7-NEXT: flat_load_dword v1, v[0:1]
-; GFX7-NEXT: flat_load_dword v0, v[4:5]
-; GFX7-NEXT: s_mov_b64 s[8:9], 0
-; GFX7-NEXT: .LBB150_1: ; %atomicrmw.start
-; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v9, v1
-; GFX7-NEXT: v_mov_b32_e32 v8, v0
-; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[8:9]
-; GFX7-NEXT: v_cmp_gt_u64_e64 s[4:5], v[8:9], v[2:3]
-; GFX7-NEXT: v_add_i32_e64 v0, s[6:7], -1, v8
-; GFX7-NEXT: v_addc_u32_e64 v1, s[6:7], -1, v9, s[6:7]
-; GFX7-NEXT: s_or_b64 vcc, vcc, s[4:5]
-; GFX7-NEXT: v_cndmask_b32_e32 v7, v1, v3, vcc
-; GFX7-NEXT: v_cndmask_b32_e32 v6, v0, v2, vcc
-; GFX7-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GFX7-NEXT: flat_atomic_dec_x2 v[0:1], v[0:1], v[2:3] glc
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX7-NEXT: buffer_wbinvl1_vol
-; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; GFX7-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
-; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9]
-; GFX7-NEXT: s_cbranch_execnz .LBB150_1
-; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX7-NEXT: s_or_b64 exec, exec, s[8:9]
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: flat_atomic_udec_wrap_i64_ret_offset__amdgpu_no_remote_memory:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_add_u32_e32 v4, vcc, 32, v0
-; GFX8-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GFX8-NEXT: v_add_u32_e32 v0, vcc, 36, v0
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, 32, v0
; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GFX8-NEXT: flat_load_dword v1, v[0:1]
-; GFX8-NEXT: flat_load_dword v0, v[4:5]
-; GFX8-NEXT: s_mov_b64 s[8:9], 0
-; GFX8-NEXT: .LBB150_1: ; %atomicrmw.start
-; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v9, v1
-; GFX8-NEXT: v_mov_b32_e32 v8, v0
-; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[8:9]
-; GFX8-NEXT: v_cmp_gt_u64_e64 s[4:5], v[8:9], v[2:3]
-; GFX8-NEXT: v_add_u32_e64 v0, s[6:7], -1, v8
-; GFX8-NEXT: v_addc_u32_e64 v1, s[6:7], -1, v9, s[6:7]
-; GFX8-NEXT: s_or_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v7, v1, v3, vcc
-; GFX8-NEXT: v_cndmask_b32_e32 v6, v0, v2, vcc
-; GFX8-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; GFX8-NEXT: flat_atomic_dec_x2 v[0:1], v[0:1], v[2:3] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
-; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; GFX8-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
-; GFX8-NEXT: s_andn2_b64 exec, exec, s[8:9]
-; GFX8-NEXT: s_cbranch_execnz .LBB150_1
-; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX8-NEXT: s_or_b64 exec, exec, s[8:9]
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: flat_atomic_udec_wrap_i64_ret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: flat_load_dwordx2 v[4:5], v[0:1] offset:32
-; GFX9-NEXT: s_mov_b64 s[8:9], 0
-; GFX9-NEXT: .LBB150_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v7, v5
-; GFX9-NEXT: v_mov_b32_e32 v6, v4
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GFX9-NEXT: v_cmp_gt_u64_e64 s[4:5], v[6:7], v[2:3]
-; GFX9-NEXT: v_add_co_u32_e64 v4, s[6:7], -1, v6
-; GFX9-NEXT: v_addc_co_u32_e64 v5, s[6:7], -1, v7, s[6:7]
-; GFX9-NEXT: s_or_b64 vcc, vcc, s[4:5]
-; GFX9-NEXT: v_cndmask_b32_e32 v5, v5, v3, vcc
-; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v2, vcc
-; GFX9-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] offset:32 glc
+; GFX9-NEXT: flat_atomic_dec_x2 v[0:1], v[0:1], v[2:3] offset:32 glc
; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GFX9-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[8:9]
-; GFX9-NEXT: s_cbranch_execnz .LBB150_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[8:9]
-; GFX9-NEXT: v_mov_b32_e32 v0, v4
-; GFX9-NEXT: v_mov_b32_e32 v1, v5
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i64, ptr %out, i64 4
%result = atomicrmw udec_wrap ptr %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0, !noalias.addrspace !1
diff --git a/llvm/test/CodeGen/AMDGPU/global-saddr-atomics.ll b/llvm/test/CodeGen/AMDGPU/global-saddr-atomics.ll
index 1def479be283a..d297955f109ab 100644
--- a/llvm/test/CodeGen/AMDGPU/global-saddr-atomics.ll
+++ b/llvm/test/CodeGen/AMDGPU/global-saddr-atomics.ll
@@ -985,7 +985,7 @@ define amdgpu_ps float @global_sub_saddr_i32_rtn(ptr addrspace(1) inreg %sbase,
; GFX12-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
- %rtn = atomicrmw sub ptr addrspace(1) %gep0, i32 %data syncscope("agent") seq_cst
+ %rtn = atomicrmw sub ptr addrspace(1) %gep0, i32 %data syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
%cast.rtn = bitcast i32 %rtn to float
ret float %cast.rtn
}
@@ -1024,7 +1024,7 @@ define amdgpu_ps float @global_sub_saddr_i32_rtn_neg128(ptr addrspace(1) inreg %
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, ptr addrspace(1) %gep0, i64 -128
- %rtn = atomicrmw sub ptr addrspace(1) %gep1, i32 %data syncscope("agent") seq_cst
+ %rtn = atomicrmw sub ptr addrspace(1) %gep1, i32 %data syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
%cast.rtn = bitcast i32 %rtn to float
ret float %cast.rtn
}
@@ -1061,7 +1061,7 @@ define amdgpu_ps void @global_sub_saddr_i32_nortn(ptr addrspace(1) inreg %sbase,
; GFX12-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
- %unused = atomicrmw sub ptr addrspace(1) %gep0, i32 %data syncscope("agent") seq_cst
+ %unused = atomicrmw sub ptr addrspace(1) %gep0, i32 %data syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -1098,7 +1098,7 @@ define amdgpu_ps void @global_sub_saddr_i32_nortn_neg128(ptr addrspace(1) inreg
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, ptr addrspace(1) %gep0, i64 -128
- %unused = atomicrmw sub ptr addrspace(1) %gep1, i32 %data syncscope("agent") seq_cst
+ %unused = atomicrmw sub ptr addrspace(1) %gep1, i32 %data syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -1135,7 +1135,7 @@ define amdgpu_ps <2 x float> @global_sub_saddr_i64_rtn(ptr addrspace(1) inreg %s
; GFX12-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
- %rtn = atomicrmw sub ptr addrspace(1) %gep0, i64 %data syncscope("agent") seq_cst
+ %rtn = atomicrmw sub ptr addrspace(1) %gep0, i64 %data syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
%cast.rtn = bitcast i64 %rtn to <2 x float>
ret <2 x float> %cast.rtn
}
@@ -1174,7 +1174,7 @@ define amdgpu_ps <2 x float> @global_sub_saddr_i64_rtn_neg128(ptr addrspace(1) i
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, ptr addrspace(1) %gep0, i64 -128
- %rtn = atomicrmw sub ptr addrspace(1) %gep1, i64 %data syncscope("agent") seq_cst
+ %rtn = atomicrmw sub ptr addrspace(1) %gep1, i64 %data syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
%cast.rtn = bitcast i64 %rtn to <2 x float>
ret <2 x float> %cast.rtn
}
@@ -1211,7 +1211,7 @@ define amdgpu_ps void @global_sub_saddr_i64_nortn(ptr addrspace(1) inreg %sbase,
; GFX12-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
- %unused = atomicrmw sub ptr addrspace(1) %gep0, i64 %data syncscope("agent") seq_cst
+ %unused = atomicrmw sub ptr addrspace(1) %gep0, i64 %data syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -1248,7 +1248,7 @@ define amdgpu_ps void @global_sub_saddr_i64_nortn_neg128(ptr addrspace(1) inreg
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, ptr addrspace(1) %gep0, i64 -128
- %unused = atomicrmw sub ptr addrspace(1) %gep1, i64 %data syncscope("agent") seq_cst
+ %unused = atomicrmw sub ptr addrspace(1) %gep1, i64 %data syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -1289,7 +1289,7 @@ define amdgpu_ps float @global_and_saddr_i32_rtn(ptr addrspace(1) inreg %sbase,
; GFX12-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
- %rtn = atomicrmw and ptr addrspace(1) %gep0, i32 %data syncscope("agent") seq_cst
+ %rtn = atomicrmw and ptr addrspace(1) %gep0, i32 %data syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
%cast.rtn = bitcast i32 %rtn to float
ret float %cast.rtn
}
@@ -1328,7 +1328,7 @@ define amdgpu_ps float @global_and_saddr_i32_rtn_neg128(ptr addrspace(1) inreg %
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, ptr addrspace(1) %gep0, i64 -128
- %rtn = atomicrmw and ptr addrspace(1) %gep1, i32 %data syncscope("agent") seq_cst
+ %rtn = atomicrmw and ptr addrspace(1) %gep1, i32 %data syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
%cast.rtn = bitcast i32 %rtn to float
ret float %cast.rtn
}
@@ -1365,7 +1365,7 @@ define amdgpu_ps void @global_and_saddr_i32_nortn(ptr addrspace(1) inreg %sbase,
; GFX12-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
- %unused = atomicrmw and ptr addrspace(1) %gep0, i32 %data syncscope("agent") seq_cst
+ %unused = atomicrmw and ptr addrspace(1) %gep0, i32 %data syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -1402,7 +1402,7 @@ define amdgpu_ps void @global_and_saddr_i32_nortn_neg128(ptr addrspace(1) inreg
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, ptr addrspace(1) %gep0, i64 -128
- %unused = atomicrmw and ptr addrspace(1) %gep1, i32 %data syncscope("agent") seq_cst
+ %unused = atomicrmw and ptr addrspace(1) %gep1, i32 %data syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -1439,7 +1439,7 @@ define amdgpu_ps <2 x float> @global_and_saddr_i64_rtn(ptr addrspace(1) inreg %s
; GFX12-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
- %rtn = atomicrmw and ptr addrspace(1) %gep0, i64 %data syncscope("agent") seq_cst
+ %rtn = atomicrmw and ptr addrspace(1) %gep0, i64 %data syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
%cast.rtn = bitcast i64 %rtn to <2 x float>
ret <2 x float> %cast.rtn
}
@@ -1478,7 +1478,7 @@ define amdgpu_ps <2 x float> @global_and_saddr_i64_rtn_neg128(ptr addrspace(1) i
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, ptr addrspace(1) %gep0, i64 -128
- %rtn = atomicrmw and ptr addrspace(1) %gep1, i64 %data syncscope("agent") seq_cst
+ %rtn = atomicrmw and ptr addrspace(1) %gep1, i64 %data syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
%cast.rtn = bitcast i64 %rtn to <2 x float>
ret <2 x float> %cast.rtn
}
@@ -1515,7 +1515,7 @@ define amdgpu_ps void @global_and_saddr_i64_nortn(ptr addrspace(1) inreg %sbase,
; GFX12-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
- %unused = atomicrmw and ptr addrspace(1) %gep0, i64 %data syncscope("agent") seq_cst
+ %unused = atomicrmw and ptr addrspace(1) %gep0, i64 %data syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -1552,7 +1552,7 @@ define amdgpu_ps void @global_and_saddr_i64_nortn_neg128(ptr addrspace(1) inreg
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, ptr addrspace(1) %gep0, i64 -128
- %unused = atomicrmw and ptr addrspace(1) %gep1, i64 %data syncscope("agent") seq_cst
+ %unused = atomicrmw and ptr addrspace(1) %gep1, i64 %data syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -1593,7 +1593,7 @@ define amdgpu_ps float @global_or_saddr_i32_rtn(ptr addrspace(1) inreg %sbase, i
; GFX12-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
- %rtn = atomicrmw or ptr addrspace(1) %gep0, i32 %data syncscope("agent") seq_cst
+ %rtn = atomicrmw or ptr addrspace(1) %gep0, i32 %data syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
%cast.rtn = bitcast i32 %rtn to float
ret float %cast.rtn
}
@@ -1632,7 +1632,7 @@ define amdgpu_ps float @global_or_saddr_i32_rtn_neg128(ptr addrspace(1) inreg %s
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, ptr addrspace(1) %gep0, i64 -128
- %rtn = atomicrmw or ptr addrspace(1) %gep1, i32 %data syncscope("agent") seq_cst
+ %rtn = atomicrmw or ptr addrspace(1) %gep1, i32 %data syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
%cast.rtn = bitcast i32 %rtn to float
ret float %cast.rtn
}
@@ -1669,7 +1669,7 @@ define amdgpu_ps void @global_or_saddr_i32_nortn(ptr addrspace(1) inreg %sbase,
; GFX12-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
- %unused = atomicrmw or ptr addrspace(1) %gep0, i32 %data syncscope("agent") seq_cst
+ %unused = atomicrmw or ptr addrspace(1) %gep0, i32 %data syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -1706,7 +1706,7 @@ define amdgpu_ps void @global_or_saddr_i32_nortn_neg128(ptr addrspace(1) inreg %
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, ptr addrspace(1) %gep0, i64 -128
- %unused = atomicrmw or ptr addrspace(1) %gep1, i32 %data syncscope("agent") seq_cst
+ %unused = atomicrmw or ptr addrspace(1) %gep1, i32 %data syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -1743,7 +1743,7 @@ define amdgpu_ps <2 x float> @global_or_saddr_i64_rtn(ptr addrspace(1) inreg %sb
; GFX12-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
- %rtn = atomicrmw or ptr addrspace(1) %gep0, i64 %data syncscope("agent") seq_cst
+ %rtn = atomicrmw or ptr addrspace(1) %gep0, i64 %data syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
%cast.rtn = bitcast i64 %rtn to <2 x float>
ret <2 x float> %cast.rtn
}
@@ -1782,7 +1782,7 @@ define amdgpu_ps <2 x float> @global_or_saddr_i64_rtn_neg128(ptr addrspace(1) in
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, ptr addrspace(1) %gep0, i64 -128
- %rtn = atomicrmw or ptr addrspace(1) %gep1, i64 %data syncscope("agent") seq_cst
+ %rtn = atomicrmw or ptr addrspace(1) %gep1, i64 %data syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
%cast.rtn = bitcast i64 %rtn to <2 x float>
ret <2 x float> %cast.rtn
}
@@ -1819,7 +1819,7 @@ define amdgpu_ps void @global_or_saddr_i64_nortn(ptr addrspace(1) inreg %sbase,
; GFX12-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
- %unused = atomicrmw or ptr addrspace(1) %gep0, i64 %data syncscope("agent") seq_cst
+ %unused = atomicrmw or ptr addrspace(1) %gep0, i64 %data syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -1856,7 +1856,7 @@ define amdgpu_ps void @global_or_saddr_i64_nortn_neg128(ptr addrspace(1) inreg %
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, ptr addrspace(1) %gep0, i64 -128
- %unused = atomicrmw or ptr addrspace(1) %gep1, i64 %data syncscope("agent") seq_cst
+ %unused = atomicrmw or ptr addrspace(1) %gep1, i64 %data syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -1897,7 +1897,7 @@ define amdgpu_ps float @global_xor_saddr_i32_rtn(ptr addrspace(1) inreg %sbase,
; GFX12-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
- %rtn = atomicrmw xor ptr addrspace(1) %gep0, i32 %data syncscope("agent") seq_cst
+ %rtn = atomicrmw xor ptr addrspace(1) %gep0, i32 %data syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
%cast.rtn = bitcast i32 %rtn to float
ret float %cast.rtn
}
@@ -1936,7 +1936,7 @@ define amdgpu_ps float @global_xor_saddr_i32_rtn_neg128(ptr addrspace(1) inreg %
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, ptr addrspace(1) %gep0, i64 -128
- %rtn = atomicrmw xor ptr addrspace(1) %gep1, i32 %data syncscope("agent") seq_cst
+ %rtn = atomicrmw xor ptr addrspace(1) %gep1, i32 %data syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
%cast.rtn = bitcast i32 %rtn to float
ret float %cast.rtn
}
@@ -1973,7 +1973,7 @@ define amdgpu_ps void @global_xor_saddr_i32_nortn(ptr addrspace(1) inreg %sbase,
; GFX12-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
- %unused = atomicrmw xor ptr addrspace(1) %gep0, i32 %data syncscope("agent") seq_cst
+ %unused = atomicrmw xor ptr addrspace(1) %gep0, i32 %data syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -2010,7 +2010,7 @@ define amdgpu_ps void @global_xor_saddr_i32_nortn_neg128(ptr addrspace(1) inreg
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, ptr addrspace(1) %gep0, i64 -128
- %unused = atomicrmw xor ptr addrspace(1) %gep1, i32 %data syncscope("agent") seq_cst
+ %unused = atomicrmw xor ptr addrspace(1) %gep1, i32 %data syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -2047,7 +2047,7 @@ define amdgpu_ps <2 x float> @global_xor_saddr_i64_rtn(ptr addrspace(1) inreg %s
; GFX12-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
- %rtn = atomicrmw xor ptr addrspace(1) %gep0, i64 %data syncscope("agent") seq_cst
+ %rtn = atomicrmw xor ptr addrspace(1) %gep0, i64 %data syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
%cast.rtn = bitcast i64 %rtn to <2 x float>
ret <2 x float> %cast.rtn
}
@@ -2086,7 +2086,7 @@ define amdgpu_ps <2 x float> @global_xor_saddr_i64_rtn_neg128(ptr addrspace(1) i
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, ptr addrspace(1) %gep0, i64 -128
- %rtn = atomicrmw xor ptr addrspace(1) %gep1, i64 %data syncscope("agent") seq_cst
+ %rtn = atomicrmw xor ptr addrspace(1) %gep1, i64 %data syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
%cast.rtn = bitcast i64 %rtn to <2 x float>
ret <2 x float> %cast.rtn
}
@@ -2123,7 +2123,7 @@ define amdgpu_ps void @global_xor_saddr_i64_nortn(ptr addrspace(1) inreg %sbase,
; GFX12-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
- %unused = atomicrmw xor ptr addrspace(1) %gep0, i64 %data syncscope("agent") seq_cst
+ %unused = atomicrmw xor ptr addrspace(1) %gep0, i64 %data syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -2160,7 +2160,7 @@ define amdgpu_ps void @global_xor_saddr_i64_nortn_neg128(ptr addrspace(1) inreg
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, ptr addrspace(1) %gep0, i64 -128
- %unused = atomicrmw xor ptr addrspace(1) %gep1, i64 %data syncscope("agent") seq_cst
+ %unused = atomicrmw xor ptr addrspace(1) %gep1, i64 %data syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -2198,7 +2198,7 @@ define amdgpu_ps float @global_max_saddr_i32_rtn(ptr addrspace(1) inreg %sbase,
; GFX12-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
- %rtn = atomicrmw max ptr addrspace(1) %gep0, i32 %data syncscope("workgroup") seq_cst
+ %rtn = atomicrmw max ptr addrspace(1) %gep0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
%cast.rtn = bitcast i32 %rtn to float
ret float %cast.rtn
}
@@ -2234,7 +2234,7 @@ define amdgpu_ps float @global_max_saddr_i32_rtn_neg128(ptr addrspace(1) inreg %
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, ptr addrspace(1) %gep0, i64 -128
- %rtn = atomicrmw max ptr addrspace(1) %gep1, i32 %data syncscope("workgroup") seq_cst
+ %rtn = atomicrmw max ptr addrspace(1) %gep1, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
%cast.rtn = bitcast i32 %rtn to float
ret float %cast.rtn
}
@@ -2267,7 +2267,7 @@ define amdgpu_ps void @global_max_saddr_i32_nortn(ptr addrspace(1) inreg %sbase,
; GFX12-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
- %unused = atomicrmw max ptr addrspace(1) %gep0, i32 %data syncscope("workgroup") seq_cst
+ %unused = atomicrmw max ptr addrspace(1) %gep0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -2300,7 +2300,7 @@ define amdgpu_ps void @global_max_saddr_i32_nortn_neg128(ptr addrspace(1) inreg
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, ptr addrspace(1) %gep0, i64 -128
- %unused = atomicrmw max ptr addrspace(1) %gep1, i32 %data syncscope("workgroup") seq_cst
+ %unused = atomicrmw max ptr addrspace(1) %gep1, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -2334,7 +2334,7 @@ define amdgpu_ps <2 x float> @global_max_saddr_i64_rtn(ptr addrspace(1) inreg %s
; GFX12-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
- %rtn = atomicrmw max ptr addrspace(1) %gep0, i64 %data syncscope("workgroup") seq_cst
+ %rtn = atomicrmw max ptr addrspace(1) %gep0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
%cast.rtn = bitcast i64 %rtn to <2 x float>
ret <2 x float> %cast.rtn
}
@@ -2370,7 +2370,7 @@ define amdgpu_ps <2 x float> @global_max_saddr_i64_rtn_neg128(ptr addrspace(1) i
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, ptr addrspace(1) %gep0, i64 -128
- %rtn = atomicrmw max ptr addrspace(1) %gep1, i64 %data syncscope("workgroup") seq_cst
+ %rtn = atomicrmw max ptr addrspace(1) %gep1, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
%cast.rtn = bitcast i64 %rtn to <2 x float>
ret <2 x float> %cast.rtn
}
@@ -2403,7 +2403,7 @@ define amdgpu_ps void @global_max_saddr_i64_nortn(ptr addrspace(1) inreg %sbase,
; GFX12-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
- %unused = atomicrmw max ptr addrspace(1) %gep0, i64 %data syncscope("workgroup") seq_cst
+ %unused = atomicrmw max ptr addrspace(1) %gep0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -2436,7 +2436,7 @@ define amdgpu_ps void @global_max_saddr_i64_nortn_neg128(ptr addrspace(1) inreg
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, ptr addrspace(1) %gep0, i64 -128
- %unused = atomicrmw max ptr addrspace(1) %gep1, i64 %data syncscope("workgroup") seq_cst
+ %unused = atomicrmw max ptr addrspace(1) %gep1, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -2474,7 +2474,7 @@ define amdgpu_ps float @global_min_saddr_i32_rtn(ptr addrspace(1) inreg %sbase,
; GFX12-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
- %rtn = atomicrmw min ptr addrspace(1) %gep0, i32 %data syncscope("workgroup") seq_cst
+ %rtn = atomicrmw min ptr addrspace(1) %gep0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
%cast.rtn = bitcast i32 %rtn to float
ret float %cast.rtn
}
@@ -2510,7 +2510,7 @@ define amdgpu_ps float @global_min_saddr_i32_rtn_neg128(ptr addrspace(1) inreg %
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, ptr addrspace(1) %gep0, i64 -128
- %rtn = atomicrmw min ptr addrspace(1) %gep1, i32 %data syncscope("workgroup") seq_cst
+ %rtn = atomicrmw min ptr addrspace(1) %gep1, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
%cast.rtn = bitcast i32 %rtn to float
ret float %cast.rtn
}
@@ -2543,7 +2543,7 @@ define amdgpu_ps void @global_min_saddr_i32_nortn(ptr addrspace(1) inreg %sbase,
; GFX12-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
- %unused = atomicrmw min ptr addrspace(1) %gep0, i32 %data syncscope("workgroup") seq_cst
+ %unused = atomicrmw min ptr addrspace(1) %gep0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -2576,7 +2576,7 @@ define amdgpu_ps void @global_min_saddr_i32_nortn_neg128(ptr addrspace(1) inreg
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, ptr addrspace(1) %gep0, i64 -128
- %unused = atomicrmw min ptr addrspace(1) %gep1, i32 %data syncscope("workgroup") seq_cst
+ %unused = atomicrmw min ptr addrspace(1) %gep1, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -2610,7 +2610,7 @@ define amdgpu_ps <2 x float> @global_min_saddr_i64_rtn(ptr addrspace(1) inreg %s
; GFX12-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
- %rtn = atomicrmw min ptr addrspace(1) %gep0, i64 %data syncscope("workgroup") seq_cst
+ %rtn = atomicrmw min ptr addrspace(1) %gep0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
%cast.rtn = bitcast i64 %rtn to <2 x float>
ret <2 x float> %cast.rtn
}
@@ -2646,7 +2646,7 @@ define amdgpu_ps <2 x float> @global_min_saddr_i64_rtn_neg128(ptr addrspace(1) i
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, ptr addrspace(1) %gep0, i64 -128
- %rtn = atomicrmw min ptr addrspace(1) %gep1, i64 %data syncscope("workgroup") seq_cst
+ %rtn = atomicrmw min ptr addrspace(1) %gep1, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
%cast.rtn = bitcast i64 %rtn to <2 x float>
ret <2 x float> %cast.rtn
}
@@ -2679,7 +2679,7 @@ define amdgpu_ps void @global_min_saddr_i64_nortn(ptr addrspace(1) inreg %sbase,
; GFX12-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
- %unused = atomicrmw min ptr addrspace(1) %gep0, i64 %data syncscope("workgroup") seq_cst
+ %unused = atomicrmw min ptr addrspace(1) %gep0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -2712,7 +2712,7 @@ define amdgpu_ps void @global_min_saddr_i64_nortn_neg128(ptr addrspace(1) inreg
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, ptr addrspace(1) %gep0, i64 -128
- %unused = atomicrmw min ptr addrspace(1) %gep1, i64 %data syncscope("workgroup") seq_cst
+ %unused = atomicrmw min ptr addrspace(1) %gep1, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -2750,7 +2750,7 @@ define amdgpu_ps float @global_umax_saddr_i32_rtn(ptr addrspace(1) inreg %sbase,
; GFX12-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
- %rtn = atomicrmw umax ptr addrspace(1) %gep0, i32 %data syncscope("workgroup") seq_cst
+ %rtn = atomicrmw umax ptr addrspace(1) %gep0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
%cast.rtn = bitcast i32 %rtn to float
ret float %cast.rtn
}
@@ -2786,7 +2786,7 @@ define amdgpu_ps float @global_umax_saddr_i32_rtn_neg128(ptr addrspace(1) inreg
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, ptr addrspace(1) %gep0, i64 -128
- %rtn = atomicrmw umax ptr addrspace(1) %gep1, i32 %data syncscope("workgroup") seq_cst
+ %rtn = atomicrmw umax ptr addrspace(1) %gep1, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
%cast.rtn = bitcast i32 %rtn to float
ret float %cast.rtn
}
@@ -2819,7 +2819,7 @@ define amdgpu_ps void @global_umax_saddr_i32_nortn(ptr addrspace(1) inreg %sbase
; GFX12-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
- %unused = atomicrmw umax ptr addrspace(1) %gep0, i32 %data syncscope("workgroup") seq_cst
+ %unused = atomicrmw umax ptr addrspace(1) %gep0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -2852,7 +2852,7 @@ define amdgpu_ps void @global_umax_saddr_i32_nortn_neg128(ptr addrspace(1) inreg
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, ptr addrspace(1) %gep0, i64 -128
- %unused = atomicrmw umax ptr addrspace(1) %gep1, i32 %data syncscope("workgroup") seq_cst
+ %unused = atomicrmw umax ptr addrspace(1) %gep1, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -2886,7 +2886,7 @@ define amdgpu_ps <2 x float> @global_umax_saddr_i64_rtn(ptr addrspace(1) inreg %
; GFX12-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
- %rtn = atomicrmw umax ptr addrspace(1) %gep0, i64 %data syncscope("workgroup") seq_cst
+ %rtn = atomicrmw umax ptr addrspace(1) %gep0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
%cast.rtn = bitcast i64 %rtn to <2 x float>
ret <2 x float> %cast.rtn
}
@@ -2922,7 +2922,7 @@ define amdgpu_ps <2 x float> @global_umax_saddr_i64_rtn_neg128(ptr addrspace(1)
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, ptr addrspace(1) %gep0, i64 -128
- %rtn = atomicrmw umax ptr addrspace(1) %gep1, i64 %data syncscope("workgroup") seq_cst
+ %rtn = atomicrmw umax ptr addrspace(1) %gep1, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
%cast.rtn = bitcast i64 %rtn to <2 x float>
ret <2 x float> %cast.rtn
}
@@ -2955,7 +2955,7 @@ define amdgpu_ps void @global_umax_saddr_i64_nortn(ptr addrspace(1) inreg %sbase
; GFX12-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
- %unused = atomicrmw umax ptr addrspace(1) %gep0, i64 %data syncscope("workgroup") seq_cst
+ %unused = atomicrmw umax ptr addrspace(1) %gep0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -2988,7 +2988,7 @@ define amdgpu_ps void @global_umax_saddr_i64_nortn_neg128(ptr addrspace(1) inreg
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, ptr addrspace(1) %gep0, i64 -128
- %unused = atomicrmw umax ptr addrspace(1) %gep1, i64 %data syncscope("workgroup") seq_cst
+ %unused = atomicrmw umax ptr addrspace(1) %gep1, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -3026,7 +3026,7 @@ define amdgpu_ps float @global_umin_saddr_i32_rtn(ptr addrspace(1) inreg %sbase,
; GFX12-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
- %rtn = atomicrmw umin ptr addrspace(1) %gep0, i32 %data syncscope("workgroup") seq_cst
+ %rtn = atomicrmw umin ptr addrspace(1) %gep0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
%cast.rtn = bitcast i32 %rtn to float
ret float %cast.rtn
}
@@ -3062,7 +3062,7 @@ define amdgpu_ps float @global_umin_saddr_i32_rtn_neg128(ptr addrspace(1) inreg
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, ptr addrspace(1) %gep0, i64 -128
- %rtn = atomicrmw umin ptr addrspace(1) %gep1, i32 %data syncscope("workgroup") seq_cst
+ %rtn = atomicrmw umin ptr addrspace(1) %gep1, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
%cast.rtn = bitcast i32 %rtn to float
ret float %cast.rtn
}
@@ -3095,7 +3095,7 @@ define amdgpu_ps void @global_umin_saddr_i32_nortn(ptr addrspace(1) inreg %sbase
; GFX12-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
- %unused = atomicrmw umin ptr addrspace(1) %gep0, i32 %data syncscope("workgroup") seq_cst
+ %unused = atomicrmw umin ptr addrspace(1) %gep0, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -3128,7 +3128,7 @@ define amdgpu_ps void @global_umin_saddr_i32_nortn_neg128(ptr addrspace(1) inreg
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, ptr addrspace(1) %gep0, i64 -128
- %unused = atomicrmw umin ptr addrspace(1) %gep1, i32 %data syncscope("workgroup") seq_cst
+ %unused = atomicrmw umin ptr addrspace(1) %gep1, i32 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -3162,7 +3162,7 @@ define amdgpu_ps <2 x float> @global_umin_saddr_i64_rtn(ptr addrspace(1) inreg %
; GFX12-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
- %rtn = atomicrmw umin ptr addrspace(1) %gep0, i64 %data syncscope("workgroup") seq_cst
+ %rtn = atomicrmw umin ptr addrspace(1) %gep0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
%cast.rtn = bitcast i64 %rtn to <2 x float>
ret <2 x float> %cast.rtn
}
@@ -3198,7 +3198,7 @@ define amdgpu_ps <2 x float> @global_umin_saddr_i64_rtn_neg128(ptr addrspace(1)
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, ptr addrspace(1) %gep0, i64 -128
- %rtn = atomicrmw umin ptr addrspace(1) %gep1, i64 %data syncscope("workgroup") seq_cst
+ %rtn = atomicrmw umin ptr addrspace(1) %gep1, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
%cast.rtn = bitcast i64 %rtn to <2 x float>
ret <2 x float> %cast.rtn
}
@@ -3231,7 +3231,7 @@ define amdgpu_ps void @global_umin_saddr_i64_nortn(ptr addrspace(1) inreg %sbase
; GFX12-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
- %unused = atomicrmw umin ptr addrspace(1) %gep0, i64 %data syncscope("workgroup") seq_cst
+ %unused = atomicrmw umin ptr addrspace(1) %gep0, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -3264,7 +3264,7 @@ define amdgpu_ps void @global_umin_saddr_i64_nortn_neg128(ptr addrspace(1) inreg
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, ptr addrspace(1) %gep0, i64 -128
- %unused = atomicrmw umin ptr addrspace(1) %gep1, i64 %data syncscope("workgroup") seq_cst
+ %unused = atomicrmw umin ptr addrspace(1) %gep1, i64 %data syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -3664,7 +3664,7 @@ define amdgpu_ps float @global_inc_saddr_i32_rtn(ptr addrspace(1) inreg %sbase,
; GFX12-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
- %rtn = atomicrmw uinc_wrap ptr addrspace(1) %gep0, i32 %data syncscope("agent") monotonic
+ %rtn = atomicrmw uinc_wrap ptr addrspace(1) %gep0, i32 %data syncscope("agent") monotonic, !amdgpu.no.fine.grained.memory !0
%cast.rtn = bitcast i32 %rtn to float
ret float %cast.rtn
}
@@ -3690,7 +3690,7 @@ define amdgpu_ps float @global_inc_saddr_i32_rtn_neg128(ptr addrspace(1) inreg %
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, ptr addrspace(1) %gep0, i64 -128
- %rtn = atomicrmw uinc_wrap ptr addrspace(1) %gep1, i32 %data syncscope("agent") monotonic
+ %rtn = atomicrmw uinc_wrap ptr addrspace(1) %gep1, i32 %data syncscope("agent") monotonic, !amdgpu.no.fine.grained.memory !0
%cast.rtn = bitcast i32 %rtn to float
ret float %cast.rtn
}
@@ -3712,7 +3712,7 @@ define amdgpu_ps void @global_inc_saddr_i32_nortn(ptr addrspace(1) inreg %sbase,
; GFX12-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
- %unused = atomicrmw uinc_wrap ptr addrspace(1) %gep0, i32 %data syncscope("agent") monotonic
+ %unused = atomicrmw uinc_wrap ptr addrspace(1) %gep0, i32 %data syncscope("agent") monotonic, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -3734,7 +3734,7 @@ define amdgpu_ps void @global_inc_saddr_i32_nortn_neg128(ptr addrspace(1) inreg
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, ptr addrspace(1) %gep0, i64 -128
- %unused = atomicrmw uinc_wrap ptr addrspace(1) %gep1, i32 %data syncscope("agent") monotonic
+ %unused = atomicrmw uinc_wrap ptr addrspace(1) %gep1, i32 %data syncscope("agent") monotonic, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -3758,7 +3758,7 @@ define amdgpu_ps <2 x float> @global_inc_saddr_i64_rtn(ptr addrspace(1) inreg %s
; GFX12-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
- %rtn = atomicrmw uinc_wrap ptr addrspace(1) %gep0, i64 %data syncscope("agent") monotonic
+ %rtn = atomicrmw uinc_wrap ptr addrspace(1) %gep0, i64 %data syncscope("agent") monotonic, !amdgpu.no.fine.grained.memory !0
%cast.rtn = bitcast i64 %rtn to <2 x float>
ret <2 x float> %cast.rtn
}
@@ -3784,7 +3784,7 @@ define amdgpu_ps <2 x float> @global_inc_saddr_i64_rtn_neg128(ptr addrspace(1) i
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, ptr addrspace(1) %gep0, i64 -128
- %rtn = atomicrmw uinc_wrap ptr addrspace(1) %gep1, i64 %data syncscope("agent") monotonic
+ %rtn = atomicrmw uinc_wrap ptr addrspace(1) %gep1, i64 %data syncscope("agent") monotonic, !amdgpu.no.fine.grained.memory !0
%cast.rtn = bitcast i64 %rtn to <2 x float>
ret <2 x float> %cast.rtn
}
@@ -3806,7 +3806,7 @@ define amdgpu_ps void @global_inc_saddr_i64_nortn(ptr addrspace(1) inreg %sbase,
; GFX12-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
- %unused = atomicrmw uinc_wrap ptr addrspace(1) %gep0, i64 %data syncscope("agent") monotonic
+ %unused = atomicrmw uinc_wrap ptr addrspace(1) %gep0, i64 %data syncscope("agent") monotonic, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -3828,7 +3828,7 @@ define amdgpu_ps void @global_inc_saddr_i64_nortn_neg128(ptr addrspace(1) inreg
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, ptr addrspace(1) %gep0, i64 -128
- %unused = atomicrmw uinc_wrap ptr addrspace(1) %gep1, i64 %data syncscope("agent") monotonic
+ %unused = atomicrmw uinc_wrap ptr addrspace(1) %gep1, i64 %data syncscope("agent") monotonic, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -3857,7 +3857,7 @@ define amdgpu_ps float @global_dec_saddr_i32_rtn(ptr addrspace(1) inreg %sbase,
; GFX12-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
- %rtn = atomicrmw udec_wrap ptr addrspace(1) %gep0, i32 %data syncscope("agent") monotonic
+ %rtn = atomicrmw udec_wrap ptr addrspace(1) %gep0, i32 %data syncscope("agent") monotonic, !amdgpu.no.fine.grained.memory !0
%cast.rtn = bitcast i32 %rtn to float
ret float %cast.rtn
}
@@ -3883,7 +3883,7 @@ define amdgpu_ps float @global_dec_saddr_i32_rtn_neg128(ptr addrspace(1) inreg %
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, ptr addrspace(1) %gep0, i64 -128
- %rtn = atomicrmw udec_wrap ptr addrspace(1) %gep1, i32 %data syncscope("agent") monotonic
+ %rtn = atomicrmw udec_wrap ptr addrspace(1) %gep1, i32 %data syncscope("agent") monotonic, !amdgpu.no.fine.grained.memory !0
%cast.rtn = bitcast i32 %rtn to float
ret float %cast.rtn
}
@@ -3905,7 +3905,7 @@ define amdgpu_ps void @global_dec_saddr_i32_nortn(ptr addrspace(1) inreg %sbase,
; GFX12-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
- %unused = atomicrmw udec_wrap ptr addrspace(1) %gep0, i32 %data syncscope("agent") monotonic
+ %unused = atomicrmw udec_wrap ptr addrspace(1) %gep0, i32 %data syncscope("agent") monotonic, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -3927,7 +3927,7 @@ define amdgpu_ps void @global_dec_saddr_i32_nortn_neg128(ptr addrspace(1) inreg
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, ptr addrspace(1) %gep0, i64 -128
- %unused = atomicrmw udec_wrap ptr addrspace(1) %gep1, i32 %data syncscope("agent") monotonic
+ %unused = atomicrmw udec_wrap ptr addrspace(1) %gep1, i32 %data syncscope("agent") monotonic, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -3951,7 +3951,7 @@ define amdgpu_ps <2 x float> @global_dec_saddr_i64_rtn(ptr addrspace(1) inreg %s
; GFX12-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
- %rtn = atomicrmw udec_wrap ptr addrspace(1) %gep0, i64 %data syncscope("agent") monotonic
+ %rtn = atomicrmw udec_wrap ptr addrspace(1) %gep0, i64 %data syncscope("agent") monotonic, !amdgpu.no.fine.grained.memory !0
%cast.rtn = bitcast i64 %rtn to <2 x float>
ret <2 x float> %cast.rtn
}
@@ -3977,7 +3977,7 @@ define amdgpu_ps <2 x float> @global_dec_saddr_i64_rtn_neg128(ptr addrspace(1) i
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, ptr addrspace(1) %gep0, i64 -128
- %rtn = atomicrmw udec_wrap ptr addrspace(1) %gep1, i64 %data syncscope("agent") monotonic
+ %rtn = atomicrmw udec_wrap ptr addrspace(1) %gep1, i64 %data syncscope("agent") monotonic, !amdgpu.no.fine.grained.memory !0
%cast.rtn = bitcast i64 %rtn to <2 x float>
ret <2 x float> %cast.rtn
}
@@ -3999,7 +3999,7 @@ define amdgpu_ps void @global_dec_saddr_i64_nortn(ptr addrspace(1) inreg %sbase,
; GFX12-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
- %unused = atomicrmw udec_wrap ptr addrspace(1) %gep0, i64 %data syncscope("agent") monotonic
+ %unused = atomicrmw udec_wrap ptr addrspace(1) %gep0, i64 %data syncscope("agent") monotonic, !amdgpu.no.fine.grained.memory !0
ret void
}
@@ -4021,8 +4021,10 @@ define amdgpu_ps void @global_dec_saddr_i64_nortn_neg128(ptr addrspace(1) inreg
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
%gep1 = getelementptr inbounds i8, ptr addrspace(1) %gep0, i64 -128
- %unused = atomicrmw udec_wrap ptr addrspace(1) %gep1, i64 %data syncscope("agent") monotonic
+ %unused = atomicrmw udec_wrap ptr addrspace(1) %gep1, i64 %data syncscope("agent") monotonic, !amdgpu.no.fine.grained.memory !0
ret void
}
attributes #0 = { argmemonly nounwind willreturn }
+
+!0 = !{}
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics.ll b/llvm/test/CodeGen/AMDGPU/global_atomics.ll
index 3e15b135eeab9..4650bb2e325c9 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomics.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics.ll
@@ -640,7 +640,7 @@ define amdgpu_kernel void @atomic_and_i32_offset(ptr addrspace(1) %out, i32 %in)
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
- %val = atomicrmw volatile and ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile and ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -695,7 +695,7 @@ define amdgpu_kernel void @atomic_and_i32_ret_offset(ptr addrspace(1) %out, ptr
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
- %val = atomicrmw volatile and ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile and ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
store i32 %val, ptr addrspace(1) %out2
ret void
}
@@ -755,7 +755,7 @@ define amdgpu_kernel void @atomic_and_i32_addr64_offset(ptr addrspace(1) %out, i
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4
- %val = atomicrmw volatile and ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile and ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -825,7 +825,7 @@ define amdgpu_kernel void @atomic_and_i32_ret_addr64_offset(ptr addrspace(1) %ou
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4
- %val = atomicrmw volatile and ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile and ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
store i32 %val, ptr addrspace(1) %out2
ret void
}
@@ -869,7 +869,7 @@ define amdgpu_kernel void @atomic_and_i32(ptr addrspace(1) %out, i32 %in) {
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
- %val = atomicrmw volatile and ptr addrspace(1) %out, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile and ptr addrspace(1) %out, i32 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -923,7 +923,7 @@ define amdgpu_kernel void @atomic_and_i32_ret(ptr addrspace(1) %out, ptr addrspa
; GFX9-NEXT: global_store_dword v0, v1, s[2:3]
; GFX9-NEXT: s_endpgm
entry:
- %val = atomicrmw volatile and ptr addrspace(1) %out, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile and ptr addrspace(1) %out, i32 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
store i32 %val, ptr addrspace(1) %out2
ret void
}
@@ -980,7 +980,7 @@ define amdgpu_kernel void @atomic_and_i32_addr64(ptr addrspace(1) %out, i32 %in,
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
- %val = atomicrmw volatile and ptr addrspace(1) %ptr, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile and ptr addrspace(1) %ptr, i32 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -1047,7 +1047,7 @@ define amdgpu_kernel void @atomic_and_i32_ret_addr64(ptr addrspace(1) %out, ptr
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
- %val = atomicrmw volatile and ptr addrspace(1) %ptr, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile and ptr addrspace(1) %ptr, i32 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
store i32 %val, ptr addrspace(1) %out2
ret void
}
@@ -1092,7 +1092,7 @@ define amdgpu_kernel void @atomic_sub_i32_offset(ptr addrspace(1) %out, i32 %in)
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
- %val = atomicrmw volatile sub ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile sub ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -1147,7 +1147,7 @@ define amdgpu_kernel void @atomic_sub_i32_ret_offset(ptr addrspace(1) %out, ptr
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
- %val = atomicrmw volatile sub ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile sub ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
store i32 %val, ptr addrspace(1) %out2
ret void
}
@@ -1207,7 +1207,7 @@ define amdgpu_kernel void @atomic_sub_i32_addr64_offset(ptr addrspace(1) %out, i
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4
- %val = atomicrmw volatile sub ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile sub ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -1277,7 +1277,7 @@ define amdgpu_kernel void @atomic_sub_i32_ret_addr64_offset(ptr addrspace(1) %ou
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4
- %val = atomicrmw volatile sub ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile sub ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
store i32 %val, ptr addrspace(1) %out2
ret void
}
@@ -1321,7 +1321,7 @@ define amdgpu_kernel void @atomic_sub_i32(ptr addrspace(1) %out, i32 %in) {
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
- %val = atomicrmw volatile sub ptr addrspace(1) %out, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile sub ptr addrspace(1) %out, i32 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -1375,7 +1375,7 @@ define amdgpu_kernel void @atomic_sub_i32_ret(ptr addrspace(1) %out, ptr addrspa
; GFX9-NEXT: global_store_dword v0, v1, s[2:3]
; GFX9-NEXT: s_endpgm
entry:
- %val = atomicrmw volatile sub ptr addrspace(1) %out, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile sub ptr addrspace(1) %out, i32 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
store i32 %val, ptr addrspace(1) %out2
ret void
}
@@ -1432,7 +1432,7 @@ define amdgpu_kernel void @atomic_sub_i32_addr64(ptr addrspace(1) %out, i32 %in,
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
- %val = atomicrmw volatile sub ptr addrspace(1) %ptr, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile sub ptr addrspace(1) %ptr, i32 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -1499,7 +1499,7 @@ define amdgpu_kernel void @atomic_sub_i32_ret_addr64(ptr addrspace(1) %out, ptr
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
- %val = atomicrmw volatile sub ptr addrspace(1) %ptr, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile sub ptr addrspace(1) %ptr, i32 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
store i32 %val, ptr addrspace(1) %out2
ret void
}
@@ -1544,7 +1544,7 @@ define amdgpu_kernel void @atomic_max_i32_offset(ptr addrspace(1) %out, i32 %in)
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
- %val = atomicrmw volatile max ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile max ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -1596,7 +1596,7 @@ define amdgpu_kernel void @atomic_max_i32_ret_offset(ptr addrspace(1) %out, ptr
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
- %val = atomicrmw volatile max ptr addrspace(1) %gep, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile max ptr addrspace(1) %gep, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
store i32 %val, ptr addrspace(1) %out2
ret void
}
@@ -1650,7 +1650,7 @@ define amdgpu_kernel void @atomic_max_i32_addr64_offset(ptr addrspace(1) %out, i
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4
- %val = atomicrmw volatile max ptr addrspace(1) %gep, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile max ptr addrspace(1) %gep, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -1717,7 +1717,7 @@ define amdgpu_kernel void @atomic_max_i32_ret_addr64_offset(ptr addrspace(1) %ou
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4
- %val = atomicrmw volatile max ptr addrspace(1) %gep, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile max ptr addrspace(1) %gep, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
store i32 %val, ptr addrspace(1) %out2
ret void
}
@@ -1755,7 +1755,7 @@ define amdgpu_kernel void @atomic_max_i32(ptr addrspace(1) %out, i32 %in) {
; GFX9-NEXT: global_atomic_smax v0, v1, s[0:1]
; GFX9-NEXT: s_endpgm
entry:
- %val = atomicrmw volatile max ptr addrspace(1) %out, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile max ptr addrspace(1) %out, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -1806,7 +1806,7 @@ define amdgpu_kernel void @atomic_max_i32_ret(ptr addrspace(1) %out, ptr addrspa
; GFX9-NEXT: global_store_dword v0, v1, s[2:3]
; GFX9-NEXT: s_endpgm
entry:
- %val = atomicrmw volatile max ptr addrspace(1) %out, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile max ptr addrspace(1) %out, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
store i32 %val, ptr addrspace(1) %out2
ret void
}
@@ -1857,7 +1857,7 @@ define amdgpu_kernel void @atomic_max_i32_addr64(ptr addrspace(1) %out, i32 %in,
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
- %val = atomicrmw volatile max ptr addrspace(1) %ptr, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile max ptr addrspace(1) %ptr, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -1921,7 +1921,7 @@ define amdgpu_kernel void @atomic_max_i32_ret_addr64(ptr addrspace(1) %out, ptr
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
- %val = atomicrmw volatile max ptr addrspace(1) %ptr, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile max ptr addrspace(1) %ptr, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
store i32 %val, ptr addrspace(1) %out2
ret void
}
@@ -1960,7 +1960,7 @@ define amdgpu_kernel void @atomic_umax_i32_offset(ptr addrspace(1) %out, i32 %in
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
- %val = atomicrmw volatile umax ptr addrspace(1) %gep, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile umax ptr addrspace(1) %gep, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -2012,7 +2012,7 @@ define amdgpu_kernel void @atomic_umax_i32_ret_offset(ptr addrspace(1) %out, ptr
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
- %val = atomicrmw volatile umax ptr addrspace(1) %gep, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile umax ptr addrspace(1) %gep, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
store i32 %val, ptr addrspace(1) %out2
ret void
}
@@ -2066,7 +2066,7 @@ define amdgpu_kernel void @atomic_umax_i32_addr64_offset(ptr addrspace(1) %out,
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4
- %val = atomicrmw volatile umax ptr addrspace(1) %gep, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile umax ptr addrspace(1) %gep, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -2133,7 +2133,7 @@ define amdgpu_kernel void @atomic_umax_i32_ret_addr64_offset(ptr addrspace(1) %o
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4
- %val = atomicrmw volatile umax ptr addrspace(1) %gep, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile umax ptr addrspace(1) %gep, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
store i32 %val, ptr addrspace(1) %out2
ret void
}
@@ -2171,7 +2171,7 @@ define amdgpu_kernel void @atomic_umax_i32(ptr addrspace(1) %out, i32 %in) {
; GFX9-NEXT: global_atomic_umax v0, v1, s[0:1]
; GFX9-NEXT: s_endpgm
entry:
- %val = atomicrmw volatile umax ptr addrspace(1) %out, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile umax ptr addrspace(1) %out, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -2222,7 +2222,7 @@ define amdgpu_kernel void @atomic_umax_i32_ret(ptr addrspace(1) %out, ptr addrsp
; GFX9-NEXT: global_store_dword v0, v1, s[2:3]
; GFX9-NEXT: s_endpgm
entry:
- %val = atomicrmw volatile umax ptr addrspace(1) %out, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile umax ptr addrspace(1) %out, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
store i32 %val, ptr addrspace(1) %out2
ret void
}
@@ -2273,7 +2273,7 @@ define amdgpu_kernel void @atomic_umax_i32_addr64(ptr addrspace(1) %out, i32 %in
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
- %val = atomicrmw volatile umax ptr addrspace(1) %ptr, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile umax ptr addrspace(1) %ptr, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -2337,7 +2337,7 @@ define amdgpu_kernel void @atomic_umax_i32_ret_addr64(ptr addrspace(1) %out, ptr
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
- %val = atomicrmw volatile umax ptr addrspace(1) %ptr, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile umax ptr addrspace(1) %ptr, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
store i32 %val, ptr addrspace(1) %out2
ret void
}
@@ -2376,7 +2376,7 @@ define amdgpu_kernel void @atomic_min_i32_offset(ptr addrspace(1) %out, i32 %in)
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
- %val = atomicrmw volatile min ptr addrspace(1) %gep, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile min ptr addrspace(1) %gep, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -2428,7 +2428,7 @@ define amdgpu_kernel void @atomic_min_i32_ret_offset(ptr addrspace(1) %out, ptr
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
- %val = atomicrmw volatile min ptr addrspace(1) %gep, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile min ptr addrspace(1) %gep, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
store i32 %val, ptr addrspace(1) %out2
ret void
}
@@ -2482,7 +2482,7 @@ define amdgpu_kernel void @atomic_min_i32_addr64_offset(ptr addrspace(1) %out, i
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4
- %val = atomicrmw volatile min ptr addrspace(1) %gep, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile min ptr addrspace(1) %gep, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -2549,7 +2549,7 @@ define amdgpu_kernel void @atomic_min_i32_ret_addr64_offset(ptr addrspace(1) %ou
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4
- %val = atomicrmw volatile min ptr addrspace(1) %gep, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile min ptr addrspace(1) %gep, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
store i32 %val, ptr addrspace(1) %out2
ret void
}
@@ -2587,7 +2587,7 @@ define amdgpu_kernel void @atomic_min_i32(ptr addrspace(1) %out, i32 %in) {
; GFX9-NEXT: global_atomic_smin v0, v1, s[0:1]
; GFX9-NEXT: s_endpgm
entry:
- %val = atomicrmw volatile min ptr addrspace(1) %out, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile min ptr addrspace(1) %out, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -2638,7 +2638,7 @@ define amdgpu_kernel void @atomic_min_i32_ret(ptr addrspace(1) %out, ptr addrspa
; GFX9-NEXT: global_store_dword v0, v1, s[2:3]
; GFX9-NEXT: s_endpgm
entry:
- %val = atomicrmw volatile min ptr addrspace(1) %out, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile min ptr addrspace(1) %out, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
store i32 %val, ptr addrspace(1) %out2
ret void
}
@@ -2689,7 +2689,7 @@ define amdgpu_kernel void @atomic_min_i32_addr64(ptr addrspace(1) %out, i32 %in,
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
- %val = atomicrmw volatile min ptr addrspace(1) %ptr, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile min ptr addrspace(1) %ptr, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -2753,7 +2753,7 @@ define amdgpu_kernel void @atomic_min_i32_ret_addr64(ptr addrspace(1) %out, ptr
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
- %val = atomicrmw volatile min ptr addrspace(1) %ptr, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile min ptr addrspace(1) %ptr, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
store i32 %val, ptr addrspace(1) %out2
ret void
}
@@ -2792,7 +2792,7 @@ define amdgpu_kernel void @atomic_umin_i32_offset(ptr addrspace(1) %out, i32 %in
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
- %val = atomicrmw volatile umin ptr addrspace(1) %gep, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile umin ptr addrspace(1) %gep, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -2844,7 +2844,7 @@ define amdgpu_kernel void @atomic_umin_i32_ret_offset(ptr addrspace(1) %out, ptr
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
- %val = atomicrmw volatile umin ptr addrspace(1) %gep, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile umin ptr addrspace(1) %gep, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
store i32 %val, ptr addrspace(1) %out2
ret void
}
@@ -2898,7 +2898,7 @@ define amdgpu_kernel void @atomic_umin_i32_addr64_offset(ptr addrspace(1) %out,
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4
- %val = atomicrmw volatile umin ptr addrspace(1) %gep, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile umin ptr addrspace(1) %gep, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -2965,7 +2965,7 @@ define amdgpu_kernel void @atomic_umin_i32_ret_addr64_offset(ptr addrspace(1) %o
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4
- %val = atomicrmw volatile umin ptr addrspace(1) %gep, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile umin ptr addrspace(1) %gep, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
store i32 %val, ptr addrspace(1) %out2
ret void
}
@@ -3003,7 +3003,7 @@ define amdgpu_kernel void @atomic_umin_i32(ptr addrspace(1) %out, i32 %in) {
; GFX9-NEXT: global_atomic_umin v0, v1, s[0:1]
; GFX9-NEXT: s_endpgm
entry:
- %val = atomicrmw volatile umin ptr addrspace(1) %out, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile umin ptr addrspace(1) %out, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -3054,7 +3054,7 @@ define amdgpu_kernel void @atomic_umin_i32_ret(ptr addrspace(1) %out, ptr addrsp
; GFX9-NEXT: global_store_dword v0, v1, s[2:3]
; GFX9-NEXT: s_endpgm
entry:
- %val = atomicrmw volatile umin ptr addrspace(1) %out, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile umin ptr addrspace(1) %out, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
store i32 %val, ptr addrspace(1) %out2
ret void
}
@@ -3105,7 +3105,7 @@ define amdgpu_kernel void @atomic_umin_i32_addr64(ptr addrspace(1) %out, i32 %in
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
- %val = atomicrmw volatile umin ptr addrspace(1) %ptr, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile umin ptr addrspace(1) %ptr, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -3169,7 +3169,7 @@ define amdgpu_kernel void @atomic_umin_i32_ret_addr64(ptr addrspace(1) %out, ptr
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
- %val = atomicrmw volatile umin ptr addrspace(1) %ptr, i32 %in syncscope("workgroup") seq_cst
+ %val = atomicrmw volatile umin ptr addrspace(1) %ptr, i32 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
store i32 %val, ptr addrspace(1) %out2
ret void
}
@@ -3214,7 +3214,7 @@ define amdgpu_kernel void @atomic_or_i32_offset(ptr addrspace(1) %out, i32 %in)
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
- %val = atomicrmw volatile or ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile or ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -3269,7 +3269,7 @@ define amdgpu_kernel void @atomic_or_i32_ret_offset(ptr addrspace(1) %out, ptr a
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
- %val = atomicrmw volatile or ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile or ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
store i32 %val, ptr addrspace(1) %out2
ret void
}
@@ -3329,7 +3329,7 @@ define amdgpu_kernel void @atomic_or_i32_addr64_offset(ptr addrspace(1) %out, i3
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4
- %val = atomicrmw volatile or ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile or ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -3399,7 +3399,7 @@ define amdgpu_kernel void @atomic_or_i32_ret_addr64_offset(ptr addrspace(1) %out
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4
- %val = atomicrmw volatile or ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile or ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
store i32 %val, ptr addrspace(1) %out2
ret void
}
@@ -3443,7 +3443,7 @@ define amdgpu_kernel void @atomic_or_i32(ptr addrspace(1) %out, i32 %in) {
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
- %val = atomicrmw volatile or ptr addrspace(1) %out, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile or ptr addrspace(1) %out, i32 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -3497,7 +3497,7 @@ define amdgpu_kernel void @atomic_or_i32_ret(ptr addrspace(1) %out, ptr addrspac
; GFX9-NEXT: global_store_dword v0, v1, s[2:3]
; GFX9-NEXT: s_endpgm
entry:
- %val = atomicrmw volatile or ptr addrspace(1) %out, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile or ptr addrspace(1) %out, i32 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
store i32 %val, ptr addrspace(1) %out2
ret void
}
@@ -3554,7 +3554,7 @@ define amdgpu_kernel void @atomic_or_i32_addr64(ptr addrspace(1) %out, i32 %in,
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
- %val = atomicrmw volatile or ptr addrspace(1) %ptr, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile or ptr addrspace(1) %ptr, i32 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -3621,7 +3621,7 @@ define amdgpu_kernel void @atomic_or_i32_ret_addr64(ptr addrspace(1) %out, ptr a
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
- %val = atomicrmw volatile or ptr addrspace(1) %ptr, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile or ptr addrspace(1) %ptr, i32 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
store i32 %val, ptr addrspace(1) %out2
ret void
}
@@ -3666,7 +3666,7 @@ define amdgpu_kernel void @atomic_xchg_i32_offset(ptr addrspace(1) %out, i32 %in
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
- %val = atomicrmw volatile xchg ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile xchg ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -4656,7 +4656,7 @@ define amdgpu_kernel void @atomic_xor_i32_offset(ptr addrspace(1) %out, i32 %in)
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
- %val = atomicrmw volatile xor ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile xor ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -4711,7 +4711,7 @@ define amdgpu_kernel void @atomic_xor_i32_ret_offset(ptr addrspace(1) %out, ptr
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
- %val = atomicrmw volatile xor ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile xor ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
store i32 %val, ptr addrspace(1) %out2
ret void
}
@@ -4771,7 +4771,7 @@ define amdgpu_kernel void @atomic_xor_i32_addr64_offset(ptr addrspace(1) %out, i
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4
- %val = atomicrmw volatile xor ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile xor ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -4841,7 +4841,7 @@ define amdgpu_kernel void @atomic_xor_i32_ret_addr64_offset(ptr addrspace(1) %ou
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4
- %val = atomicrmw volatile xor ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile xor ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
store i32 %val, ptr addrspace(1) %out2
ret void
}
@@ -4885,7 +4885,7 @@ define amdgpu_kernel void @atomic_xor_i32(ptr addrspace(1) %out, i32 %in) {
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
- %val = atomicrmw volatile xor ptr addrspace(1) %out, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile xor ptr addrspace(1) %out, i32 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -4939,7 +4939,7 @@ define amdgpu_kernel void @atomic_xor_i32_ret(ptr addrspace(1) %out, ptr addrspa
; GFX9-NEXT: global_store_dword v0, v1, s[2:3]
; GFX9-NEXT: s_endpgm
entry:
- %val = atomicrmw volatile xor ptr addrspace(1) %out, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile xor ptr addrspace(1) %out, i32 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
store i32 %val, ptr addrspace(1) %out2
ret void
}
@@ -4996,7 +4996,7 @@ define amdgpu_kernel void @atomic_xor_i32_addr64(ptr addrspace(1) %out, i32 %in,
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
- %val = atomicrmw volatile xor ptr addrspace(1) %ptr, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile xor ptr addrspace(1) %ptr, i32 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -5063,7 +5063,7 @@ define amdgpu_kernel void @atomic_xor_i32_ret_addr64(ptr addrspace(1) %out, ptr
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
- %val = atomicrmw volatile xor ptr addrspace(1) %ptr, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile xor ptr addrspace(1) %ptr, i32 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
store i32 %val, ptr addrspace(1) %out2
ret void
}
@@ -6331,7 +6331,7 @@ define amdgpu_kernel void @atomic_inc_i32_offset(ptr addrspace(1) %out, i32 %in)
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
- %val = atomicrmw volatile uinc_wrap ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile uinc_wrap ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -6379,7 +6379,7 @@ define amdgpu_kernel void @atomic_inc_i32_max_neg_offset(ptr addrspace(1) %out,
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 -1024
- %val = atomicrmw volatile uinc_wrap ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile uinc_wrap ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -6425,7 +6425,7 @@ define amdgpu_kernel void @atomic_inc_i32_soffset(ptr addrspace(1) %out, i32 %in
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 9000
- %val = atomicrmw volatile uinc_wrap ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile uinc_wrap ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -6475,7 +6475,7 @@ define amdgpu_kernel void @atomic_inc_i32_huge_offset(ptr addrspace(1) %out, i32
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 47224239175595
- %val = atomicrmw volatile uinc_wrap ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile uinc_wrap ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -6530,7 +6530,7 @@ define amdgpu_kernel void @atomic_inc_i32_ret_offset(ptr addrspace(1) %out, ptr
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
- %val = atomicrmw volatile uinc_wrap ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile uinc_wrap ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
store i32 %val, ptr addrspace(1) %out2
ret void
}
@@ -6590,7 +6590,7 @@ define amdgpu_kernel void @atomic_inc_i32_addr64_offset(ptr addrspace(1) %out, i
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4
- %val = atomicrmw volatile uinc_wrap ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile uinc_wrap ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -6660,7 +6660,7 @@ define amdgpu_kernel void @atomic_inc_i32_ret_addr64_offset(ptr addrspace(1) %ou
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4
- %val = atomicrmw volatile uinc_wrap ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile uinc_wrap ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
store i32 %val, ptr addrspace(1) %out2
ret void
}
@@ -6705,7 +6705,7 @@ define amdgpu_kernel void @atomic_dec_i32_offset(ptr addrspace(1) %out, i32 %in)
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
- %val = atomicrmw volatile udec_wrap ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile udec_wrap ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -6753,7 +6753,7 @@ define amdgpu_kernel void @atomic_dec_i32_max_neg_offset(ptr addrspace(1) %out,
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 -1024
- %val = atomicrmw volatile udec_wrap ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile udec_wrap ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -6799,7 +6799,7 @@ define amdgpu_kernel void @atomic_dec_i32_soffset(ptr addrspace(1) %out, i32 %in
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 9000
- %val = atomicrmw volatile udec_wrap ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile udec_wrap ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -6849,7 +6849,7 @@ define amdgpu_kernel void @atomic_dec_i32_huge_offset(ptr addrspace(1) %out, i32
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 47224239175595
- %val = atomicrmw volatile udec_wrap ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile udec_wrap ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -6904,7 +6904,7 @@ define amdgpu_kernel void @atomic_dec_i32_ret_offset(ptr addrspace(1) %out, ptr
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
- %val = atomicrmw volatile udec_wrap ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile udec_wrap ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
store i32 %val, ptr addrspace(1) %out2
ret void
}
@@ -6964,7 +6964,7 @@ define amdgpu_kernel void @atomic_dec_i32_addr64_offset(ptr addrspace(1) %out, i
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4
- %val = atomicrmw volatile udec_wrap ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile udec_wrap ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -7034,7 +7034,7 @@ define amdgpu_kernel void @atomic_dec_i32_ret_addr64_offset(ptr addrspace(1) %ou
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4
- %val = atomicrmw volatile udec_wrap ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
+ %val = atomicrmw volatile udec_wrap ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
store i32 %val, ptr addrspace(1) %out2
ret void
}
@@ -7242,3 +7242,243 @@ define amdgpu_kernel void @atomic_load_bf16_negoffset(ptr addrspace(1) %in, ptr
store bfloat %val, ptr addrspace(1) %out
ret void
}
+
+define amdgpu_kernel void @atomic_sub_i16_soffset__amdgpu_no_remote_memory(ptr addrspace(1) %out, i16 %in) {
+; SI-LABEL: atomic_sub_i16_soffset__amdgpu_no_remote_memory:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
+; SI-NEXT: s_load_dword s2, s[4:5], 0xb
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_add_u32 s6, s0, 0x4650
+; SI-NEXT: s_addc_u32 s1, s1, 0
+; SI-NEXT: s_and_b32 s0, s6, -4
+; SI-NEXT: s_and_b32 s6, s6, 3
+; SI-NEXT: s_and_b32 s2, s2, 0xffff
+; SI-NEXT: s_load_dword s9, s[0:1], 0x0
+; SI-NEXT: s_lshl_b32 s7, s6, 3
+; SI-NEXT: s_lshl_b32 s6, 0xffff, s7
+; SI-NEXT: s_lshl_b32 s7, s2, s7
+; SI-NEXT: s_not_b32 s8, s6
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s9
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: .LBB136_1: ; %atomicrmw.start
+; SI-NEXT: ; =>This Inner Loop Header: Depth=1
+; SI-NEXT: v_subrev_i32_e32 v0, vcc, s7, v1
+; SI-NEXT: v_and_b32_e32 v0, s6, v0
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_and_b32_e32 v2, s8, v1
+; SI-NEXT: v_or_b32_e32 v0, v2, v0
+; SI-NEXT: v_mov_b32_e32 v3, v1
+; SI-NEXT: v_mov_b32_e32 v2, v0
+; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_wbinvl1
+; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; SI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; SI-NEXT: v_mov_b32_e32 v1, v2
+; SI-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; SI-NEXT: s_cbranch_execnz .LBB136_1
+; SI-NEXT: ; %bb.2: ; %atomicrmw.end
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_sub_i16_soffset__amdgpu_no_remote_memory:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; VI-NEXT: s_load_dword s2, s[4:5], 0x2c
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_add_u32 s3, s0, 0x4650
+; VI-NEXT: s_addc_u32 s1, s1, 0
+; VI-NEXT: s_and_b32 s0, s3, -4
+; VI-NEXT: s_load_dword s9, s[0:1], 0x0
+; VI-NEXT: s_and_b32 s3, s3, 3
+; VI-NEXT: s_lshl_b32 s3, s3, 3
+; VI-NEXT: s_lshl_b32 s6, 0xffff, s3
+; VI-NEXT: s_and_b32 s2, s2, 0xffff
+; VI-NEXT: s_not_b32 s7, s6
+; VI-NEXT: s_lshl_b32 s8, s2, s3
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s9
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: .LBB136_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_subrev_u32_e32 v0, vcc, s8, v1
+; VI-NEXT: v_and_b32_e32 v2, s7, v1
+; VI-NEXT: v_and_b32_e32 v0, s6, v0
+; VI-NEXT: v_or_b32_e32 v0, v2, v0
+; VI-NEXT: v_mov_b32_e32 v3, v1
+; VI-NEXT: v_mov_b32_e32 v2, v0
+; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT: v_mov_b32_e32 v1, v2
+; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT: s_cbranch_execnz .LBB136_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_sub_i16_soffset__amdgpu_no_remote_memory:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9-NEXT: s_load_dword s2, s[4:5], 0x2c
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_add_u32 s3, s0, 0x4650
+; GFX9-NEXT: s_addc_u32 s1, s1, 0
+; GFX9-NEXT: s_and_b32 s0, s3, -4
+; GFX9-NEXT: s_load_dword s7, s[0:1], 0x0
+; GFX9-NEXT: s_and_b32 s3, s3, 3
+; GFX9-NEXT: s_lshl_b32 s3, s3, 3
+; GFX9-NEXT: s_lshl_b32 s4, 0xffff, s3
+; GFX9-NEXT: s_and_b32 s2, s2, 0xffff
+; GFX9-NEXT: s_not_b32 s5, s4
+; GFX9-NEXT: s_lshl_b32 s6, s2, s3
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v1, s7
+; GFX9-NEXT: .LBB136_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_subrev_u32_e32 v0, s6, v1
+; GFX9-NEXT: v_and_b32_e32 v0, s4, v0
+; GFX9-NEXT: v_and_or_b32 v0, v1, s5, v0
+; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v1, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-NEXT: s_cbranch_execnz .LBB136_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+ %gep = getelementptr i16, ptr addrspace(1) %out, i64 9000
+ %val = atomicrmw sub ptr addrspace(1) %gep, i16 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
+ ret void
+}
+
+define amdgpu_kernel void @atomic_sub_i8_soffset__amdgpu_no_remote_memory(ptr addrspace(1) %out, i8 %in) {
+; SI-LABEL: atomic_sub_i8_soffset__amdgpu_no_remote_memory:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
+; SI-NEXT: s_load_dword s2, s[4:5], 0xb
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_add_u32 s6, s0, 0x2328
+; SI-NEXT: s_addc_u32 s1, s1, 0
+; SI-NEXT: s_and_b32 s0, s6, -4
+; SI-NEXT: s_and_b32 s6, s6, 3
+; SI-NEXT: s_and_b32 s2, s2, 0xff
+; SI-NEXT: s_load_dword s9, s[0:1], 0x0
+; SI-NEXT: s_lshl_b32 s7, s6, 3
+; SI-NEXT: s_lshl_b32 s6, 0xff, s7
+; SI-NEXT: s_lshl_b32 s7, s2, s7
+; SI-NEXT: s_not_b32 s8, s6
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s9
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: .LBB137_1: ; %atomicrmw.start
+; SI-NEXT: ; =>This Inner Loop Header: Depth=1
+; SI-NEXT: v_subrev_i32_e32 v0, vcc, s7, v1
+; SI-NEXT: v_and_b32_e32 v0, s6, v0
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_and_b32_e32 v2, s8, v1
+; SI-NEXT: v_or_b32_e32 v0, v2, v0
+; SI-NEXT: v_mov_b32_e32 v3, v1
+; SI-NEXT: v_mov_b32_e32 v2, v0
+; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_wbinvl1
+; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; SI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; SI-NEXT: v_mov_b32_e32 v1, v2
+; SI-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; SI-NEXT: s_cbranch_execnz .LBB137_1
+; SI-NEXT: ; %bb.2: ; %atomicrmw.end
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: atomic_sub_i8_soffset__amdgpu_no_remote_memory:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; VI-NEXT: s_load_dword s2, s[4:5], 0x2c
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_add_u32 s3, s0, 0x2328
+; VI-NEXT: s_addc_u32 s1, s1, 0
+; VI-NEXT: s_and_b32 s0, s3, -4
+; VI-NEXT: s_load_dword s9, s[0:1], 0x0
+; VI-NEXT: s_and_b32 s3, s3, 3
+; VI-NEXT: s_lshl_b32 s3, s3, 3
+; VI-NEXT: s_lshl_b32 s6, 0xff, s3
+; VI-NEXT: s_and_b32 s2, s2, 0xff
+; VI-NEXT: s_not_b32 s7, s6
+; VI-NEXT: s_lshl_b32 s8, s2, s3
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s9
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: .LBB137_1: ; %atomicrmw.start
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_subrev_u32_e32 v0, vcc, s8, v1
+; VI-NEXT: v_and_b32_e32 v2, s7, v1
+; VI-NEXT: v_and_b32_e32 v0, s6, v0
+; VI-NEXT: v_or_b32_e32 v0, v2, v0
+; VI-NEXT: v_mov_b32_e32 v3, v1
+; VI-NEXT: v_mov_b32_e32 v2, v0
+; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_wbinvl1_vol
+; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT: v_mov_b32_e32 v1, v2
+; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT: s_cbranch_execnz .LBB137_1
+; VI-NEXT: ; %bb.2: ; %atomicrmw.end
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: atomic_sub_i8_soffset__amdgpu_no_remote_memory:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9-NEXT: s_load_dword s2, s[4:5], 0x2c
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_add_u32 s3, s0, 0x2328
+; GFX9-NEXT: s_addc_u32 s1, s1, 0
+; GFX9-NEXT: s_and_b32 s0, s3, -4
+; GFX9-NEXT: s_load_dword s7, s[0:1], 0x0
+; GFX9-NEXT: s_and_b32 s3, s3, 3
+; GFX9-NEXT: s_lshl_b32 s3, s3, 3
+; GFX9-NEXT: s_lshl_b32 s4, 0xff, s3
+; GFX9-NEXT: s_and_b32 s2, s2, 0xff
+; GFX9-NEXT: s_not_b32 s5, s4
+; GFX9-NEXT: s_lshl_b32 s6, s2, s3
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v1, s7
+; GFX9-NEXT: .LBB137_1: ; %atomicrmw.start
+; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_subrev_u32_e32 v0, s6, v1
+; GFX9-NEXT: v_and_b32_e32 v0, s4, v0
+; GFX9-NEXT: v_and_or_b32 v0, v1, s5, v0
+; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_wbinvl1_vol
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v1, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
+; GFX9-NEXT: s_cbranch_execnz .LBB137_1
+; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX9-NEXT: s_endpgm
+ %gep = getelementptr i8, ptr addrspace(1) %out, i64 9000
+ %val = atomicrmw sub ptr addrspace(1) %gep, i8 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
+ ret void
+}
+
+!0 = !{}
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_i32_system.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_i32_system.ll
index 24ca27c7b3551..704f57028188a 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomics_i32_system.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics_i32_system.ll
@@ -1992,25 +1992,9 @@ define void @global_atomic_sub_i32_noret_offset__amdgpu_no_remote_memory(ptr add
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64 offset:16
-; SI-NEXT: s_mov_b64 s[8:9], 0
-; SI-NEXT: .LBB39_1: ; %atomicrmw.start
-; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_sub_i32_e32 v3, vcc, v4, v2
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v6, v4
-; SI-NEXT: v_mov_b32_e32 v5, v3
-; SI-NEXT: buffer_atomic_cmpswap v[5:6], v[0:1], s[4:7], 0 addr64 offset:16 glc
+; SI-NEXT: buffer_atomic_sub v2, v[0:1], s[4:7], 0 addr64 offset:16
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4
-; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
-; SI-NEXT: v_mov_b32_e32 v4, v5
-; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
-; SI-NEXT: s_cbranch_execnz .LBB39_1
-; SI-NEXT: ; %bb.2: ; %atomicrmw.end
-; SI-NEXT: s_or_b64 exec, exec, s[8:9]
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
;
@@ -2019,43 +2003,17 @@ define void @global_atomic_sub_i32_noret_offset__amdgpu_no_remote_memory(ptr add
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_add_u32_e32 v0, vcc, 16, v0
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT: flat_load_dword v4, v[0:1]
-; VI-NEXT: s_mov_b64 s[4:5], 0
-; VI-NEXT: .LBB39_1: ; %atomicrmw.start
-; VI-NEXT: ; =>This Inner Loop Header: Depth=1
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_sub_u32_e32 v3, vcc, v4, v2
-; VI-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; VI-NEXT: flat_atomic_sub v[0:1], v2
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
-; VI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT: v_mov_b32_e32 v4, v3
-; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT: s_cbranch_execnz .LBB39_1
-; VI-NEXT: ; %bb.2: ; %atomicrmw.end
-; VI-NEXT: s_or_b64 exec, exec, s[4:5]
; VI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: global_atomic_sub_i32_noret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: global_load_dword v4, v[0:1], off offset:16
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: .LBB39_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_sub_u32_e32 v3, v4, v2
-; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
+; GFX9-NEXT: global_atomic_sub v[0:1], v2, off offset:16
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v4, v3
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB39_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
%tmp0 = atomicrmw sub ptr addrspace(1) %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
@@ -2070,72 +2028,29 @@ define i32 @global_atomic_sub_i32_ret_offset__amdgpu_no_remote_memory(ptr addrsp
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 offset:16
-; SI-NEXT: s_mov_b64 s[8:9], 0
-; SI-NEXT: .LBB40_1: ; %atomicrmw.start
-; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v5, v3
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_sub_i32_e32 v4, vcc, v5, v2
-; SI-NEXT: v_mov_b32_e32 v3, v4
-; SI-NEXT: v_mov_b32_e32 v4, v5
-; SI-NEXT: buffer_atomic_cmpswap v[3:4], v[0:1], s[4:7], 0 addr64 offset:16 glc
+; SI-NEXT: buffer_atomic_sub v2, v[0:1], s[4:7], 0 addr64 offset:16 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
-; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
-; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
-; SI-NEXT: s_cbranch_execnz .LBB40_1
-; SI-NEXT: ; %bb.2: ; %atomicrmw.end
-; SI-NEXT: s_or_b64 exec, exec, s[8:9]
-; SI-NEXT: v_mov_b32_e32 v0, v3
+; SI-NEXT: v_mov_b32_e32 v0, v2
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
;
; VI-LABEL: global_atomic_sub_i32_ret_offset__amdgpu_no_remote_memory:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_add_u32_e32 v3, vcc, 16, v0
-; VI-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc
-; VI-NEXT: flat_load_dword v0, v[3:4]
-; VI-NEXT: s_mov_b64 s[4:5], 0
-; VI-NEXT: .LBB40_1: ; %atomicrmw.start
-; VI-NEXT: ; =>This Inner Loop Header: Depth=1
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v1, v0
-; VI-NEXT: v_sub_u32_e32 v0, vcc, v1, v2
-; VI-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
+; VI-NEXT: v_add_u32_e32 v0, vcc, 16, v0
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: flat_atomic_sub v0, v[0:1], v2 glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
-; VI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT: s_cbranch_execnz .LBB40_1
-; VI-NEXT: ; %bb.2: ; %atomicrmw.end
-; VI-NEXT: s_or_b64 exec, exec, s[4:5]
; VI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: global_atomic_sub_i32_ret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: global_load_dword v3, v[0:1], off offset:16
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: .LBB40_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v4, v3
-; GFX9-NEXT: v_sub_u32_e32 v3, v4, v2
-; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
+; GFX9-NEXT: global_atomic_sub v0, v[0:1], v2, off offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB40_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v0, v3
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
%result = atomicrmw sub ptr addrspace(1) %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
@@ -2825,25 +2740,9 @@ define void @global_atomic_and_i32_noret_offset__amdgpu_no_remote_memory(ptr add
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64 offset:16
-; SI-NEXT: s_mov_b64 s[8:9], 0
-; SI-NEXT: .LBB49_1: ; %atomicrmw.start
-; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_and_b32_e32 v3, v4, v2
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v6, v4
-; SI-NEXT: v_mov_b32_e32 v5, v3
-; SI-NEXT: buffer_atomic_cmpswap v[5:6], v[0:1], s[4:7], 0 addr64 offset:16 glc
+; SI-NEXT: buffer_atomic_and v2, v[0:1], s[4:7], 0 addr64 offset:16
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4
-; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
-; SI-NEXT: v_mov_b32_e32 v4, v5
-; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
-; SI-NEXT: s_cbranch_execnz .LBB49_1
-; SI-NEXT: ; %bb.2: ; %atomicrmw.end
-; SI-NEXT: s_or_b64 exec, exec, s[8:9]
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
;
@@ -2852,43 +2751,17 @@ define void @global_atomic_and_i32_noret_offset__amdgpu_no_remote_memory(ptr add
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_add_u32_e32 v0, vcc, 16, v0
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT: flat_load_dword v4, v[0:1]
-; VI-NEXT: s_mov_b64 s[4:5], 0
-; VI-NEXT: .LBB49_1: ; %atomicrmw.start
-; VI-NEXT: ; =>This Inner Loop Header: Depth=1
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_and_b32_e32 v3, v4, v2
-; VI-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; VI-NEXT: flat_atomic_and v[0:1], v2
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
-; VI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT: v_mov_b32_e32 v4, v3
-; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT: s_cbranch_execnz .LBB49_1
-; VI-NEXT: ; %bb.2: ; %atomicrmw.end
-; VI-NEXT: s_or_b64 exec, exec, s[4:5]
; VI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: global_atomic_and_i32_noret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: global_load_dword v4, v[0:1], off offset:16
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: .LBB49_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_and_b32_e32 v3, v4, v2
-; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
+; GFX9-NEXT: global_atomic_and v[0:1], v2, off offset:16
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v4, v3
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB49_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
%tmp0 = atomicrmw and ptr addrspace(1) %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
@@ -2903,72 +2776,29 @@ define i32 @global_atomic_and_i32_ret_offset__amdgpu_no_remote_memory(ptr addrsp
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 offset:16
-; SI-NEXT: s_mov_b64 s[8:9], 0
-; SI-NEXT: .LBB50_1: ; %atomicrmw.start
-; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v5, v3
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_and_b32_e32 v4, v5, v2
-; SI-NEXT: v_mov_b32_e32 v3, v4
-; SI-NEXT: v_mov_b32_e32 v4, v5
-; SI-NEXT: buffer_atomic_cmpswap v[3:4], v[0:1], s[4:7], 0 addr64 offset:16 glc
+; SI-NEXT: buffer_atomic_and v2, v[0:1], s[4:7], 0 addr64 offset:16 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
-; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
-; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
-; SI-NEXT: s_cbranch_execnz .LBB50_1
-; SI-NEXT: ; %bb.2: ; %atomicrmw.end
-; SI-NEXT: s_or_b64 exec, exec, s[8:9]
-; SI-NEXT: v_mov_b32_e32 v0, v3
+; SI-NEXT: v_mov_b32_e32 v0, v2
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
;
; VI-LABEL: global_atomic_and_i32_ret_offset__amdgpu_no_remote_memory:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_add_u32_e32 v3, vcc, 16, v0
-; VI-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc
-; VI-NEXT: flat_load_dword v0, v[3:4]
-; VI-NEXT: s_mov_b64 s[4:5], 0
-; VI-NEXT: .LBB50_1: ; %atomicrmw.start
-; VI-NEXT: ; =>This Inner Loop Header: Depth=1
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v1, v0
-; VI-NEXT: v_and_b32_e32 v0, v1, v2
-; VI-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
+; VI-NEXT: v_add_u32_e32 v0, vcc, 16, v0
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: flat_atomic_and v0, v[0:1], v2 glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
-; VI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT: s_cbranch_execnz .LBB50_1
-; VI-NEXT: ; %bb.2: ; %atomicrmw.end
-; VI-NEXT: s_or_b64 exec, exec, s[4:5]
; VI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: global_atomic_and_i32_ret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: global_load_dword v3, v[0:1], off offset:16
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: .LBB50_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v4, v3
-; GFX9-NEXT: v_and_b32_e32 v3, v4, v2
-; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
+; GFX9-NEXT: global_atomic_and v0, v[0:1], v2, off offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB50_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v0, v3
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
%result = atomicrmw and ptr addrspace(1) %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
@@ -4558,25 +4388,9 @@ define void @global_atomic_or_i32_noret_offset__amdgpu_no_remote_memory(ptr addr
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64 offset:16
-; SI-NEXT: s_mov_b64 s[8:9], 0
-; SI-NEXT: .LBB70_1: ; %atomicrmw.start
-; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_or_b32_e32 v3, v4, v2
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v6, v4
-; SI-NEXT: v_mov_b32_e32 v5, v3
-; SI-NEXT: buffer_atomic_cmpswap v[5:6], v[0:1], s[4:7], 0 addr64 offset:16 glc
+; SI-NEXT: buffer_atomic_or v2, v[0:1], s[4:7], 0 addr64 offset:16
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4
-; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
-; SI-NEXT: v_mov_b32_e32 v4, v5
-; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
-; SI-NEXT: s_cbranch_execnz .LBB70_1
-; SI-NEXT: ; %bb.2: ; %atomicrmw.end
-; SI-NEXT: s_or_b64 exec, exec, s[8:9]
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
;
@@ -4585,43 +4399,17 @@ define void @global_atomic_or_i32_noret_offset__amdgpu_no_remote_memory(ptr addr
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_add_u32_e32 v0, vcc, 16, v0
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT: flat_load_dword v4, v[0:1]
-; VI-NEXT: s_mov_b64 s[4:5], 0
-; VI-NEXT: .LBB70_1: ; %atomicrmw.start
-; VI-NEXT: ; =>This Inner Loop Header: Depth=1
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_e32 v3, v4, v2
-; VI-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; VI-NEXT: flat_atomic_or v[0:1], v2
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
-; VI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT: v_mov_b32_e32 v4, v3
-; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT: s_cbranch_execnz .LBB70_1
-; VI-NEXT: ; %bb.2: ; %atomicrmw.end
-; VI-NEXT: s_or_b64 exec, exec, s[4:5]
; VI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: global_atomic_or_i32_noret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: global_load_dword v4, v[0:1], off offset:16
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: .LBB70_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_e32 v3, v4, v2
-; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
+; GFX9-NEXT: global_atomic_or v[0:1], v2, off offset:16
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v4, v3
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB70_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
%tmp0 = atomicrmw or ptr addrspace(1) %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
@@ -4636,72 +4424,29 @@ define i32 @global_atomic_or_i32_ret_offset__amdgpu_no_remote_memory(ptr addrspa
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 offset:16
-; SI-NEXT: s_mov_b64 s[8:9], 0
-; SI-NEXT: .LBB71_1: ; %atomicrmw.start
-; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v5, v3
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_or_b32_e32 v4, v5, v2
-; SI-NEXT: v_mov_b32_e32 v3, v4
-; SI-NEXT: v_mov_b32_e32 v4, v5
-; SI-NEXT: buffer_atomic_cmpswap v[3:4], v[0:1], s[4:7], 0 addr64 offset:16 glc
+; SI-NEXT: buffer_atomic_or v2, v[0:1], s[4:7], 0 addr64 offset:16 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
-; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
-; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
-; SI-NEXT: s_cbranch_execnz .LBB71_1
-; SI-NEXT: ; %bb.2: ; %atomicrmw.end
-; SI-NEXT: s_or_b64 exec, exec, s[8:9]
-; SI-NEXT: v_mov_b32_e32 v0, v3
+; SI-NEXT: v_mov_b32_e32 v0, v2
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
;
; VI-LABEL: global_atomic_or_i32_ret_offset__amdgpu_no_remote_memory:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_add_u32_e32 v3, vcc, 16, v0
-; VI-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc
-; VI-NEXT: flat_load_dword v0, v[3:4]
-; VI-NEXT: s_mov_b64 s[4:5], 0
-; VI-NEXT: .LBB71_1: ; %atomicrmw.start
-; VI-NEXT: ; =>This Inner Loop Header: Depth=1
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v1, v0
-; VI-NEXT: v_or_b32_e32 v0, v1, v2
-; VI-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
+; VI-NEXT: v_add_u32_e32 v0, vcc, 16, v0
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: flat_atomic_or v0, v[0:1], v2 glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
-; VI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT: s_cbranch_execnz .LBB71_1
-; VI-NEXT: ; %bb.2: ; %atomicrmw.end
-; VI-NEXT: s_or_b64 exec, exec, s[4:5]
; VI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: global_atomic_or_i32_ret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: global_load_dword v3, v[0:1], off offset:16
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: .LBB71_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v4, v3
-; GFX9-NEXT: v_or_b32_e32 v3, v4, v2
-; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
+; GFX9-NEXT: global_atomic_or v0, v[0:1], v2, off offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB71_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v0, v3
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
%result = atomicrmw or ptr addrspace(1) %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
@@ -5428,25 +5173,9 @@ define void @global_atomic_xor_i32_noret_offset__amdgpu_no_remote_memory(ptr add
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64 offset:16
-; SI-NEXT: s_mov_b64 s[8:9], 0
-; SI-NEXT: .LBB81_1: ; %atomicrmw.start
-; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_xor_b32_e32 v3, v4, v2
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v6, v4
-; SI-NEXT: v_mov_b32_e32 v5, v3
-; SI-NEXT: buffer_atomic_cmpswap v[5:6], v[0:1], s[4:7], 0 addr64 offset:16 glc
+; SI-NEXT: buffer_atomic_xor v2, v[0:1], s[4:7], 0 addr64 offset:16
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4
-; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
-; SI-NEXT: v_mov_b32_e32 v4, v5
-; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
-; SI-NEXT: s_cbranch_execnz .LBB81_1
-; SI-NEXT: ; %bb.2: ; %atomicrmw.end
-; SI-NEXT: s_or_b64 exec, exec, s[8:9]
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
;
@@ -5455,43 +5184,17 @@ define void @global_atomic_xor_i32_noret_offset__amdgpu_no_remote_memory(ptr add
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_add_u32_e32 v0, vcc, 16, v0
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT: flat_load_dword v4, v[0:1]
-; VI-NEXT: s_mov_b64 s[4:5], 0
-; VI-NEXT: .LBB81_1: ; %atomicrmw.start
-; VI-NEXT: ; =>This Inner Loop Header: Depth=1
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_xor_b32_e32 v3, v4, v2
-; VI-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; VI-NEXT: flat_atomic_xor v[0:1], v2
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
-; VI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT: v_mov_b32_e32 v4, v3
-; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT: s_cbranch_execnz .LBB81_1
-; VI-NEXT: ; %bb.2: ; %atomicrmw.end
-; VI-NEXT: s_or_b64 exec, exec, s[4:5]
; VI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: global_atomic_xor_i32_noret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: global_load_dword v4, v[0:1], off offset:16
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: .LBB81_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_xor_b32_e32 v3, v4, v2
-; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
+; GFX9-NEXT: global_atomic_xor v[0:1], v2, off offset:16
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v4, v3
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB81_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
%tmp0 = atomicrmw xor ptr addrspace(1) %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
@@ -5506,72 +5209,29 @@ define i32 @global_atomic_xor_i32_ret_offset__amdgpu_no_remote_memory(ptr addrsp
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 offset:16
-; SI-NEXT: s_mov_b64 s[8:9], 0
-; SI-NEXT: .LBB82_1: ; %atomicrmw.start
-; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v5, v3
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_xor_b32_e32 v4, v5, v2
-; SI-NEXT: v_mov_b32_e32 v3, v4
-; SI-NEXT: v_mov_b32_e32 v4, v5
-; SI-NEXT: buffer_atomic_cmpswap v[3:4], v[0:1], s[4:7], 0 addr64 offset:16 glc
+; SI-NEXT: buffer_atomic_xor v2, v[0:1], s[4:7], 0 addr64 offset:16 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
-; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
-; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
-; SI-NEXT: s_cbranch_execnz .LBB82_1
-; SI-NEXT: ; %bb.2: ; %atomicrmw.end
-; SI-NEXT: s_or_b64 exec, exec, s[8:9]
-; SI-NEXT: v_mov_b32_e32 v0, v3
+; SI-NEXT: v_mov_b32_e32 v0, v2
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
;
; VI-LABEL: global_atomic_xor_i32_ret_offset__amdgpu_no_remote_memory:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_add_u32_e32 v3, vcc, 16, v0
-; VI-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc
-; VI-NEXT: flat_load_dword v0, v[3:4]
-; VI-NEXT: s_mov_b64 s[4:5], 0
-; VI-NEXT: .LBB82_1: ; %atomicrmw.start
-; VI-NEXT: ; =>This Inner Loop Header: Depth=1
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v1, v0
-; VI-NEXT: v_xor_b32_e32 v0, v1, v2
-; VI-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
+; VI-NEXT: v_add_u32_e32 v0, vcc, 16, v0
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: flat_atomic_xor v0, v[0:1], v2 glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
-; VI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT: s_cbranch_execnz .LBB82_1
-; VI-NEXT: ; %bb.2: ; %atomicrmw.end
-; VI-NEXT: s_or_b64 exec, exec, s[4:5]
; VI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: global_atomic_xor_i32_ret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: global_load_dword v3, v[0:1], off offset:16
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: .LBB82_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v4, v3
-; GFX9-NEXT: v_xor_b32_e32 v3, v4, v2
-; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
+; GFX9-NEXT: global_atomic_xor v0, v[0:1], v2, off offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB82_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v0, v3
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
%result = atomicrmw xor ptr addrspace(1) %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
@@ -6681,25 +6341,9 @@ define void @global_atomic_max_i32_noret_offset__amdgpu_no_remote_memory(ptr add
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64 offset:16
-; SI-NEXT: s_mov_b64 s[8:9], 0
-; SI-NEXT: .LBB95_1: ; %atomicrmw.start
-; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_max_i32_e32 v3, v4, v2
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v6, v4
-; SI-NEXT: v_mov_b32_e32 v5, v3
-; SI-NEXT: buffer_atomic_cmpswap v[5:6], v[0:1], s[4:7], 0 addr64 offset:16 glc
+; SI-NEXT: buffer_atomic_smax v2, v[0:1], s[4:7], 0 addr64 offset:16
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4
-; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
-; SI-NEXT: v_mov_b32_e32 v4, v5
-; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
-; SI-NEXT: s_cbranch_execnz .LBB95_1
-; SI-NEXT: ; %bb.2: ; %atomicrmw.end
-; SI-NEXT: s_or_b64 exec, exec, s[8:9]
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
;
@@ -6708,43 +6352,17 @@ define void @global_atomic_max_i32_noret_offset__amdgpu_no_remote_memory(ptr add
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_add_u32_e32 v0, vcc, 16, v0
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT: flat_load_dword v4, v[0:1]
-; VI-NEXT: s_mov_b64 s[4:5], 0
-; VI-NEXT: .LBB95_1: ; %atomicrmw.start
-; VI-NEXT: ; =>This Inner Loop Header: Depth=1
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_max_i32_e32 v3, v4, v2
-; VI-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; VI-NEXT: flat_atomic_smax v[0:1], v2
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
-; VI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT: v_mov_b32_e32 v4, v3
-; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT: s_cbranch_execnz .LBB95_1
-; VI-NEXT: ; %bb.2: ; %atomicrmw.end
-; VI-NEXT: s_or_b64 exec, exec, s[4:5]
; VI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: global_atomic_max_i32_noret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: global_load_dword v4, v[0:1], off offset:16
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: .LBB95_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_max_i32_e32 v3, v4, v2
-; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
+; GFX9-NEXT: global_atomic_smax v[0:1], v2, off offset:16
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v4, v3
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB95_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
%tmp0 = atomicrmw max ptr addrspace(1) %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
@@ -6759,72 +6377,29 @@ define i32 @global_atomic_max_i32_ret_offset__amdgpu_no_remote_memory(ptr addrsp
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 offset:16
-; SI-NEXT: s_mov_b64 s[8:9], 0
-; SI-NEXT: .LBB96_1: ; %atomicrmw.start
-; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v5, v3
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_max_i32_e32 v4, v5, v2
-; SI-NEXT: v_mov_b32_e32 v3, v4
-; SI-NEXT: v_mov_b32_e32 v4, v5
-; SI-NEXT: buffer_atomic_cmpswap v[3:4], v[0:1], s[4:7], 0 addr64 offset:16 glc
+; SI-NEXT: buffer_atomic_smax v2, v[0:1], s[4:7], 0 addr64 offset:16 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
-; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
-; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
-; SI-NEXT: s_cbranch_execnz .LBB96_1
-; SI-NEXT: ; %bb.2: ; %atomicrmw.end
-; SI-NEXT: s_or_b64 exec, exec, s[8:9]
-; SI-NEXT: v_mov_b32_e32 v0, v3
+; SI-NEXT: v_mov_b32_e32 v0, v2
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
;
; VI-LABEL: global_atomic_max_i32_ret_offset__amdgpu_no_remote_memory:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_add_u32_e32 v3, vcc, 16, v0
-; VI-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc
-; VI-NEXT: flat_load_dword v0, v[3:4]
-; VI-NEXT: s_mov_b64 s[4:5], 0
-; VI-NEXT: .LBB96_1: ; %atomicrmw.start
-; VI-NEXT: ; =>This Inner Loop Header: Depth=1
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v1, v0
-; VI-NEXT: v_max_i32_e32 v0, v1, v2
-; VI-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
+; VI-NEXT: v_add_u32_e32 v0, vcc, 16, v0
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: flat_atomic_smax v0, v[0:1], v2 glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
-; VI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT: s_cbranch_execnz .LBB96_1
-; VI-NEXT: ; %bb.2: ; %atomicrmw.end
-; VI-NEXT: s_or_b64 exec, exec, s[4:5]
; VI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: global_atomic_max_i32_ret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: global_load_dword v3, v[0:1], off offset:16
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: .LBB96_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v4, v3
-; GFX9-NEXT: v_max_i32_e32 v3, v4, v2
-; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
+; GFX9-NEXT: global_atomic_smax v0, v[0:1], v2, off offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB96_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v0, v3
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
%result = atomicrmw max ptr addrspace(1) %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
@@ -7839,25 +7414,9 @@ define void @global_atomic_umax_i32_noret_offset__amdgpu_no_remote_memory(ptr ad
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64 offset:16
-; SI-NEXT: s_mov_b64 s[8:9], 0
-; SI-NEXT: .LBB108_1: ; %atomicrmw.start
-; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_max_u32_e32 v3, v4, v2
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v6, v4
-; SI-NEXT: v_mov_b32_e32 v5, v3
-; SI-NEXT: buffer_atomic_cmpswap v[5:6], v[0:1], s[4:7], 0 addr64 offset:16 glc
+; SI-NEXT: buffer_atomic_umax v2, v[0:1], s[4:7], 0 addr64 offset:16
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4
-; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
-; SI-NEXT: v_mov_b32_e32 v4, v5
-; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
-; SI-NEXT: s_cbranch_execnz .LBB108_1
-; SI-NEXT: ; %bb.2: ; %atomicrmw.end
-; SI-NEXT: s_or_b64 exec, exec, s[8:9]
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
;
@@ -7866,43 +7425,17 @@ define void @global_atomic_umax_i32_noret_offset__amdgpu_no_remote_memory(ptr ad
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_add_u32_e32 v0, vcc, 16, v0
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT: flat_load_dword v4, v[0:1]
-; VI-NEXT: s_mov_b64 s[4:5], 0
-; VI-NEXT: .LBB108_1: ; %atomicrmw.start
-; VI-NEXT: ; =>This Inner Loop Header: Depth=1
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_max_u32_e32 v3, v4, v2
-; VI-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; VI-NEXT: flat_atomic_umax v[0:1], v2
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
-; VI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT: v_mov_b32_e32 v4, v3
-; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT: s_cbranch_execnz .LBB108_1
-; VI-NEXT: ; %bb.2: ; %atomicrmw.end
-; VI-NEXT: s_or_b64 exec, exec, s[4:5]
; VI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: global_atomic_umax_i32_noret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: global_load_dword v4, v[0:1], off offset:16
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: .LBB108_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_max_u32_e32 v3, v4, v2
-; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
+; GFX9-NEXT: global_atomic_umax v[0:1], v2, off offset:16
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v4, v3
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB108_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
%tmp0 = atomicrmw umax ptr addrspace(1) %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
@@ -7917,72 +7450,29 @@ define i32 @global_atomic_umax_i32_ret_offset__amdgpu_no_remote_memory(ptr addrs
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 offset:16
-; SI-NEXT: s_mov_b64 s[8:9], 0
-; SI-NEXT: .LBB109_1: ; %atomicrmw.start
-; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v5, v3
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_max_u32_e32 v4, v5, v2
-; SI-NEXT: v_mov_b32_e32 v3, v4
-; SI-NEXT: v_mov_b32_e32 v4, v5
-; SI-NEXT: buffer_atomic_cmpswap v[3:4], v[0:1], s[4:7], 0 addr64 offset:16 glc
+; SI-NEXT: buffer_atomic_umax v2, v[0:1], s[4:7], 0 addr64 offset:16 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
-; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
-; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
-; SI-NEXT: s_cbranch_execnz .LBB109_1
-; SI-NEXT: ; %bb.2: ; %atomicrmw.end
-; SI-NEXT: s_or_b64 exec, exec, s[8:9]
-; SI-NEXT: v_mov_b32_e32 v0, v3
+; SI-NEXT: v_mov_b32_e32 v0, v2
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
;
; VI-LABEL: global_atomic_umax_i32_ret_offset__amdgpu_no_remote_memory:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_add_u32_e32 v3, vcc, 16, v0
-; VI-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc
-; VI-NEXT: flat_load_dword v0, v[3:4]
-; VI-NEXT: s_mov_b64 s[4:5], 0
-; VI-NEXT: .LBB109_1: ; %atomicrmw.start
-; VI-NEXT: ; =>This Inner Loop Header: Depth=1
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v1, v0
-; VI-NEXT: v_max_u32_e32 v0, v1, v2
-; VI-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
+; VI-NEXT: v_add_u32_e32 v0, vcc, 16, v0
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: flat_atomic_umax v0, v[0:1], v2 glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
-; VI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT: s_cbranch_execnz .LBB109_1
-; VI-NEXT: ; %bb.2: ; %atomicrmw.end
-; VI-NEXT: s_or_b64 exec, exec, s[4:5]
; VI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: global_atomic_umax_i32_ret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: global_load_dword v3, v[0:1], off offset:16
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: .LBB109_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v4, v3
-; GFX9-NEXT: v_max_u32_e32 v3, v4, v2
-; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
+; GFX9-NEXT: global_atomic_umax v0, v[0:1], v2, off offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB109_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v0, v3
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
%result = atomicrmw umax ptr addrspace(1) %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
@@ -8672,25 +8162,9 @@ define void @global_atomic_umin_i32_noret_offset__amdgpu_no_remote_memory(ptr ad
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64 offset:16
-; SI-NEXT: s_mov_b64 s[8:9], 0
-; SI-NEXT: .LBB118_1: ; %atomicrmw.start
-; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_min_u32_e32 v3, v4, v2
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v6, v4
-; SI-NEXT: v_mov_b32_e32 v5, v3
-; SI-NEXT: buffer_atomic_cmpswap v[5:6], v[0:1], s[4:7], 0 addr64 offset:16 glc
+; SI-NEXT: buffer_atomic_umin v2, v[0:1], s[4:7], 0 addr64 offset:16
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4
-; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
-; SI-NEXT: v_mov_b32_e32 v4, v5
-; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
-; SI-NEXT: s_cbranch_execnz .LBB118_1
-; SI-NEXT: ; %bb.2: ; %atomicrmw.end
-; SI-NEXT: s_or_b64 exec, exec, s[8:9]
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
;
@@ -8699,43 +8173,17 @@ define void @global_atomic_umin_i32_noret_offset__amdgpu_no_remote_memory(ptr ad
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_add_u32_e32 v0, vcc, 16, v0
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT: flat_load_dword v4, v[0:1]
-; VI-NEXT: s_mov_b64 s[4:5], 0
-; VI-NEXT: .LBB118_1: ; %atomicrmw.start
-; VI-NEXT: ; =>This Inner Loop Header: Depth=1
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_min_u32_e32 v3, v4, v2
-; VI-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; VI-NEXT: flat_atomic_umin v[0:1], v2
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
-; VI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT: v_mov_b32_e32 v4, v3
-; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT: s_cbranch_execnz .LBB118_1
-; VI-NEXT: ; %bb.2: ; %atomicrmw.end
-; VI-NEXT: s_or_b64 exec, exec, s[4:5]
; VI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: global_atomic_umin_i32_noret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: global_load_dword v4, v[0:1], off offset:16
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: .LBB118_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_min_u32_e32 v3, v4, v2
-; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
+; GFX9-NEXT: global_atomic_umin v[0:1], v2, off offset:16
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v4, v3
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB118_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
%tmp0 = atomicrmw umin ptr addrspace(1) %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
@@ -8750,72 +8198,29 @@ define i32 @global_atomic_umin_i32_ret_offset__amdgpu_no_remote_memory(ptr addrs
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 offset:16
-; SI-NEXT: s_mov_b64 s[8:9], 0
-; SI-NEXT: .LBB119_1: ; %atomicrmw.start
-; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v5, v3
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_min_u32_e32 v4, v5, v2
-; SI-NEXT: v_mov_b32_e32 v3, v4
-; SI-NEXT: v_mov_b32_e32 v4, v5
-; SI-NEXT: buffer_atomic_cmpswap v[3:4], v[0:1], s[4:7], 0 addr64 offset:16 glc
+; SI-NEXT: buffer_atomic_umin v2, v[0:1], s[4:7], 0 addr64 offset:16 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
-; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
-; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
-; SI-NEXT: s_cbranch_execnz .LBB119_1
-; SI-NEXT: ; %bb.2: ; %atomicrmw.end
-; SI-NEXT: s_or_b64 exec, exec, s[8:9]
-; SI-NEXT: v_mov_b32_e32 v0, v3
+; SI-NEXT: v_mov_b32_e32 v0, v2
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
;
; VI-LABEL: global_atomic_umin_i32_ret_offset__amdgpu_no_remote_memory:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_add_u32_e32 v3, vcc, 16, v0
-; VI-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc
-; VI-NEXT: flat_load_dword v0, v[3:4]
-; VI-NEXT: s_mov_b64 s[4:5], 0
-; VI-NEXT: .LBB119_1: ; %atomicrmw.start
-; VI-NEXT: ; =>This Inner Loop Header: Depth=1
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v1, v0
-; VI-NEXT: v_min_u32_e32 v0, v1, v2
-; VI-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
+; VI-NEXT: v_add_u32_e32 v0, vcc, 16, v0
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: flat_atomic_umin v0, v[0:1], v2 glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
-; VI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT: s_cbranch_execnz .LBB119_1
-; VI-NEXT: ; %bb.2: ; %atomicrmw.end
-; VI-NEXT: s_or_b64 exec, exec, s[4:5]
; VI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: global_atomic_umin_i32_ret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: global_load_dword v3, v[0:1], off offset:16
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: .LBB119_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v4, v3
-; GFX9-NEXT: v_min_u32_e32 v3, v4, v2
-; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
+; GFX9-NEXT: global_atomic_umin v0, v[0:1], v2, off offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB119_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v0, v3
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
%result = atomicrmw umin ptr addrspace(1) %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
@@ -9912,25 +9317,9 @@ define void @global_atomic_min_i32_noret_offset__amdgpu_no_remote_memory(ptr add
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64 offset:16
-; SI-NEXT: s_mov_b64 s[8:9], 0
-; SI-NEXT: .LBB132_1: ; %atomicrmw.start
-; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_min_i32_e32 v3, v4, v2
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v6, v4
-; SI-NEXT: v_mov_b32_e32 v5, v3
-; SI-NEXT: buffer_atomic_cmpswap v[5:6], v[0:1], s[4:7], 0 addr64 offset:16 glc
+; SI-NEXT: buffer_atomic_smin v2, v[0:1], s[4:7], 0 addr64 offset:16
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4
-; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
-; SI-NEXT: v_mov_b32_e32 v4, v5
-; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
-; SI-NEXT: s_cbranch_execnz .LBB132_1
-; SI-NEXT: ; %bb.2: ; %atomicrmw.end
-; SI-NEXT: s_or_b64 exec, exec, s[8:9]
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
;
@@ -9939,43 +9328,17 @@ define void @global_atomic_min_i32_noret_offset__amdgpu_no_remote_memory(ptr add
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_add_u32_e32 v0, vcc, 16, v0
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT: flat_load_dword v4, v[0:1]
-; VI-NEXT: s_mov_b64 s[4:5], 0
-; VI-NEXT: .LBB132_1: ; %atomicrmw.start
-; VI-NEXT: ; =>This Inner Loop Header: Depth=1
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_min_i32_e32 v3, v4, v2
-; VI-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; VI-NEXT: flat_atomic_smin v[0:1], v2
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
-; VI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT: v_mov_b32_e32 v4, v3
-; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT: s_cbranch_execnz .LBB132_1
-; VI-NEXT: ; %bb.2: ; %atomicrmw.end
-; VI-NEXT: s_or_b64 exec, exec, s[4:5]
; VI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: global_atomic_min_i32_noret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: global_load_dword v4, v[0:1], off offset:16
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: .LBB132_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_min_i32_e32 v3, v4, v2
-; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
+; GFX9-NEXT: global_atomic_smin v[0:1], v2, off offset:16
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v4, v3
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB132_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
%tmp0 = atomicrmw min ptr addrspace(1) %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
@@ -9990,72 +9353,29 @@ define i32 @global_atomic_min_i32_ret_offset__amdgpu_no_remote_memory(ptr addrsp
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 offset:16
-; SI-NEXT: s_mov_b64 s[8:9], 0
-; SI-NEXT: .LBB133_1: ; %atomicrmw.start
-; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v5, v3
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_min_i32_e32 v4, v5, v2
-; SI-NEXT: v_mov_b32_e32 v3, v4
-; SI-NEXT: v_mov_b32_e32 v4, v5
-; SI-NEXT: buffer_atomic_cmpswap v[3:4], v[0:1], s[4:7], 0 addr64 offset:16 glc
+; SI-NEXT: buffer_atomic_smin v2, v[0:1], s[4:7], 0 addr64 offset:16 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
-; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
-; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
-; SI-NEXT: s_cbranch_execnz .LBB133_1
-; SI-NEXT: ; %bb.2: ; %atomicrmw.end
-; SI-NEXT: s_or_b64 exec, exec, s[8:9]
-; SI-NEXT: v_mov_b32_e32 v0, v3
+; SI-NEXT: v_mov_b32_e32 v0, v2
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
;
; VI-LABEL: global_atomic_min_i32_ret_offset__amdgpu_no_remote_memory:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_add_u32_e32 v3, vcc, 16, v0
-; VI-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc
-; VI-NEXT: flat_load_dword v0, v[3:4]
-; VI-NEXT: s_mov_b64 s[4:5], 0
-; VI-NEXT: .LBB133_1: ; %atomicrmw.start
-; VI-NEXT: ; =>This Inner Loop Header: Depth=1
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v1, v0
-; VI-NEXT: v_min_i32_e32 v0, v1, v2
-; VI-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
+; VI-NEXT: v_add_u32_e32 v0, vcc, 16, v0
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: flat_atomic_smin v0, v[0:1], v2 glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
-; VI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT: s_cbranch_execnz .LBB133_1
-; VI-NEXT: ; %bb.2: ; %atomicrmw.end
-; VI-NEXT: s_or_b64 exec, exec, s[4:5]
; VI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: global_atomic_min_i32_ret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: global_load_dword v3, v[0:1], off offset:16
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: .LBB133_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v4, v3
-; GFX9-NEXT: v_min_i32_e32 v3, v4, v2
-; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
+; GFX9-NEXT: global_atomic_smin v0, v[0:1], v2, off offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB133_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v0, v3
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
%result = atomicrmw min ptr addrspace(1) %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
@@ -10793,27 +10113,9 @@ define void @global_atomic_uinc_wrap_i32_noret_offset__amdgpu_no_remote_memory(p
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64 offset:16
-; SI-NEXT: s_mov_b64 s[8:9], 0
-; SI-NEXT: .LBB142_1: ; %atomicrmw.start
-; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_add_i32_e32 v3, vcc, 1, v4
-; SI-NEXT: v_cmp_lt_u32_e32 vcc, v4, v2
-; SI-NEXT: v_cndmask_b32_e32 v3, 0, v3, vcc
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v6, v4
-; SI-NEXT: v_mov_b32_e32 v5, v3
-; SI-NEXT: buffer_atomic_cmpswap v[5:6], v[0:1], s[4:7], 0 addr64 offset:16 glc
+; SI-NEXT: buffer_atomic_inc v2, v[0:1], s[4:7], 0 addr64 offset:16
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4
-; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
-; SI-NEXT: v_mov_b32_e32 v4, v5
-; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
-; SI-NEXT: s_cbranch_execnz .LBB142_1
-; SI-NEXT: ; %bb.2: ; %atomicrmw.end
-; SI-NEXT: s_or_b64 exec, exec, s[8:9]
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
;
@@ -10822,47 +10124,17 @@ define void @global_atomic_uinc_wrap_i32_noret_offset__amdgpu_no_remote_memory(p
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_add_u32_e32 v0, vcc, 16, v0
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT: flat_load_dword v4, v[0:1]
-; VI-NEXT: s_mov_b64 s[4:5], 0
-; VI-NEXT: .LBB142_1: ; %atomicrmw.start
-; VI-NEXT: ; =>This Inner Loop Header: Depth=1
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_add_u32_e32 v3, vcc, 1, v4
-; VI-NEXT: v_cmp_lt_u32_e32 vcc, v4, v2
-; VI-NEXT: v_cndmask_b32_e32 v3, 0, v3, vcc
-; VI-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; VI-NEXT: flat_atomic_inc v[0:1], v2
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
-; VI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT: v_mov_b32_e32 v4, v3
-; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT: s_cbranch_execnz .LBB142_1
-; VI-NEXT: ; %bb.2: ; %atomicrmw.end
-; VI-NEXT: s_or_b64 exec, exec, s[4:5]
; VI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: global_atomic_uinc_wrap_i32_noret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: global_load_dword v4, v[0:1], off offset:16
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: .LBB142_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_add_u32_e32 v3, 1, v4
-; GFX9-NEXT: v_cmp_lt_u32_e32 vcc, v4, v2
-; GFX9-NEXT: v_cndmask_b32_e32 v3, 0, v3, vcc
-; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
+; GFX9-NEXT: global_atomic_inc v[0:1], v2, off offset:16
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v4, v3
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB142_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
%tmp0 = atomicrmw uinc_wrap ptr addrspace(1) %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
@@ -10877,78 +10149,29 @@ define i32 @global_atomic_uinc_wrap_i32_ret_offset__amdgpu_no_remote_memory(ptr
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 offset:16
-; SI-NEXT: s_mov_b64 s[8:9], 0
-; SI-NEXT: .LBB143_1: ; %atomicrmw.start
-; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v5, v3
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_add_i32_e32 v3, vcc, 1, v5
-; SI-NEXT: v_cmp_lt_u32_e32 vcc, v5, v2
-; SI-NEXT: v_cndmask_b32_e32 v4, 0, v3, vcc
-; SI-NEXT: v_mov_b32_e32 v3, v4
-; SI-NEXT: v_mov_b32_e32 v4, v5
-; SI-NEXT: buffer_atomic_cmpswap v[3:4], v[0:1], s[4:7], 0 addr64 offset:16 glc
+; SI-NEXT: buffer_atomic_inc v2, v[0:1], s[4:7], 0 addr64 offset:16 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
-; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
-; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
-; SI-NEXT: s_cbranch_execnz .LBB143_1
-; SI-NEXT: ; %bb.2: ; %atomicrmw.end
-; SI-NEXT: s_or_b64 exec, exec, s[8:9]
-; SI-NEXT: v_mov_b32_e32 v0, v3
+; SI-NEXT: v_mov_b32_e32 v0, v2
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
;
; VI-LABEL: global_atomic_uinc_wrap_i32_ret_offset__amdgpu_no_remote_memory:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_add_u32_e32 v3, vcc, 16, v0
-; VI-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc
-; VI-NEXT: flat_load_dword v0, v[3:4]
-; VI-NEXT: s_mov_b64 s[4:5], 0
-; VI-NEXT: .LBB143_1: ; %atomicrmw.start
-; VI-NEXT: ; =>This Inner Loop Header: Depth=1
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v1, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 1, v1
-; VI-NEXT: v_cmp_lt_u32_e32 vcc, v1, v2
-; VI-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
-; VI-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
+; VI-NEXT: v_add_u32_e32 v0, vcc, 16, v0
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: flat_atomic_inc v0, v[0:1], v2 glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
-; VI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT: s_cbranch_execnz .LBB143_1
-; VI-NEXT: ; %bb.2: ; %atomicrmw.end
-; VI-NEXT: s_or_b64 exec, exec, s[4:5]
; VI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: global_atomic_uinc_wrap_i32_ret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: global_load_dword v3, v[0:1], off offset:16
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: .LBB143_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v4, v3
-; GFX9-NEXT: v_add_u32_e32 v3, 1, v4
-; GFX9-NEXT: v_cmp_lt_u32_e32 vcc, v4, v2
-; GFX9-NEXT: v_cndmask_b32_e32 v3, 0, v3, vcc
-; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
+; GFX9-NEXT: global_atomic_inc v0, v[0:1], v2, off offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB143_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v0, v3
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
%result = atomicrmw uinc_wrap ptr addrspace(1) %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
@@ -11742,33 +10965,13 @@ define void @global_atomic_udec_wrap_i32_noret_offset__amdgpu_no_remote_memory(p
; SI-LABEL: global_atomic_udec_wrap_i32_noret_offset__amdgpu_no_remote_memory:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: s_mov_b32 s10, 0
-; SI-NEXT: s_mov_b32 s11, 0xf000
-; SI-NEXT: s_mov_b32 s8, s10
-; SI-NEXT: s_mov_b32 s9, s10
-; SI-NEXT: buffer_load_dword v4, v[0:1], s[8:11], 0 addr64 offset:16
-; SI-NEXT: s_mov_b64 s[6:7], 0
-; SI-NEXT: .LBB152_1: ; %atomicrmw.start
-; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_add_i32_e32 v3, vcc, -1, v4
-; SI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
-; SI-NEXT: v_cmp_gt_u32_e64 s[4:5], v4, v2
-; SI-NEXT: s_or_b64 vcc, vcc, s[4:5]
-; SI-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v6, v4
-; SI-NEXT: v_mov_b32_e32 v5, v3
-; SI-NEXT: buffer_atomic_cmpswap v[5:6], v[0:1], s[8:11], 0 addr64 offset:16 glc
+; SI-NEXT: s_mov_b32 s6, 0
+; SI-NEXT: s_mov_b32 s7, 0xf000
+; SI-NEXT: s_mov_b32 s4, s6
+; SI-NEXT: s_mov_b32 s5, s6
+; SI-NEXT: buffer_atomic_dec v2, v[0:1], s[4:7], 0 addr64 offset:16
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4
-; SI-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; SI-NEXT: v_mov_b32_e32 v4, v5
-; SI-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; SI-NEXT: s_cbranch_execnz .LBB152_1
-; SI-NEXT: ; %bb.2: ; %atomicrmw.end
-; SI-NEXT: s_or_b64 exec, exec, s[6:7]
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
;
@@ -11777,51 +10980,17 @@ define void @global_atomic_udec_wrap_i32_noret_offset__amdgpu_no_remote_memory(p
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_add_u32_e32 v0, vcc, 16, v0
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT: flat_load_dword v4, v[0:1]
-; VI-NEXT: s_mov_b64 s[6:7], 0
-; VI-NEXT: .LBB152_1: ; %atomicrmw.start
-; VI-NEXT: ; =>This Inner Loop Header: Depth=1
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_add_u32_e32 v3, vcc, -1, v4
-; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
-; VI-NEXT: v_cmp_gt_u32_e64 s[4:5], v4, v2
-; VI-NEXT: s_or_b64 vcc, vcc, s[4:5]
-; VI-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc
-; VI-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
+; VI-NEXT: flat_atomic_dec v[0:1], v2
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
-; VI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; VI-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; VI-NEXT: v_mov_b32_e32 v4, v3
-; VI-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; VI-NEXT: s_cbranch_execnz .LBB152_1
-; VI-NEXT: ; %bb.2: ; %atomicrmw.end
-; VI-NEXT: s_or_b64 exec, exec, s[6:7]
; VI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: global_atomic_udec_wrap_i32_noret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: global_load_dword v4, v[0:1], off offset:16
-; GFX9-NEXT: s_mov_b64 s[6:7], 0
-; GFX9-NEXT: .LBB152_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
-; GFX9-NEXT: v_cmp_gt_u32_e64 s[4:5], v4, v2
-; GFX9-NEXT: v_add_u32_e32 v3, -1, v4
-; GFX9-NEXT: s_or_b64 vcc, vcc, s[4:5]
-; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc
-; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
+; GFX9-NEXT: global_atomic_dec v[0:1], v2, off offset:16
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GFX9-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX9-NEXT: v_mov_b32_e32 v4, v3
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GFX9-NEXT: s_cbranch_execnz .LBB152_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[6:7]
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
%tmp0 = atomicrmw udec_wrap ptr addrspace(1) %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
@@ -11832,88 +11001,33 @@ define i32 @global_atomic_udec_wrap_i32_ret_offset__amdgpu_no_remote_memory(ptr
; SI-LABEL: global_atomic_udec_wrap_i32_ret_offset__amdgpu_no_remote_memory:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: s_mov_b32 s10, 0
-; SI-NEXT: s_mov_b32 s11, 0xf000
-; SI-NEXT: s_mov_b32 s8, s10
-; SI-NEXT: s_mov_b32 s9, s10
-; SI-NEXT: buffer_load_dword v3, v[0:1], s[8:11], 0 addr64 offset:16
-; SI-NEXT: s_mov_b64 s[6:7], 0
-; SI-NEXT: .LBB153_1: ; %atomicrmw.start
-; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v5, v3
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_add_i32_e32 v3, vcc, -1, v5
-; SI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v5
-; SI-NEXT: v_cmp_gt_u32_e64 s[4:5], v5, v2
-; SI-NEXT: s_or_b64 vcc, vcc, s[4:5]
-; SI-NEXT: v_cndmask_b32_e32 v4, v3, v2, vcc
-; SI-NEXT: v_mov_b32_e32 v3, v4
-; SI-NEXT: v_mov_b32_e32 v4, v5
-; SI-NEXT: buffer_atomic_cmpswap v[3:4], v[0:1], s[8:11], 0 addr64 offset:16 glc
+; SI-NEXT: s_mov_b32 s6, 0
+; SI-NEXT: s_mov_b32 s7, 0xf000
+; SI-NEXT: s_mov_b32 s4, s6
+; SI-NEXT: s_mov_b32 s5, s6
+; SI-NEXT: buffer_atomic_dec v2, v[0:1], s[4:7], 0 addr64 offset:16 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
-; SI-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; SI-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; SI-NEXT: s_cbranch_execnz .LBB153_1
-; SI-NEXT: ; %bb.2: ; %atomicrmw.end
-; SI-NEXT: s_or_b64 exec, exec, s[6:7]
-; SI-NEXT: v_mov_b32_e32 v0, v3
+; SI-NEXT: v_mov_b32_e32 v0, v2
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
;
; VI-LABEL: global_atomic_udec_wrap_i32_ret_offset__amdgpu_no_remote_memory:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_add_u32_e32 v3, vcc, 16, v0
-; VI-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc
-; VI-NEXT: flat_load_dword v0, v[3:4]
-; VI-NEXT: s_mov_b64 s[6:7], 0
-; VI-NEXT: .LBB153_1: ; %atomicrmw.start
-; VI-NEXT: ; =>This Inner Loop Header: Depth=1
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v1, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, -1, v1
-; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
-; VI-NEXT: v_cmp_gt_u32_e64 s[4:5], v1, v2
-; VI-NEXT: s_or_b64 vcc, vcc, s[4:5]
-; VI-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
-; VI-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
+; VI-NEXT: v_add_u32_e32 v0, vcc, 16, v0
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: flat_atomic_dec v0, v[0:1], v2 glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
-; VI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
-; VI-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; VI-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; VI-NEXT: s_cbranch_execnz .LBB153_1
-; VI-NEXT: ; %bb.2: ; %atomicrmw.end
-; VI-NEXT: s_or_b64 exec, exec, s[6:7]
; VI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: global_atomic_udec_wrap_i32_ret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: global_load_dword v3, v[0:1], off offset:16
-; GFX9-NEXT: s_mov_b64 s[6:7], 0
-; GFX9-NEXT: .LBB153_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v4, v3
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
-; GFX9-NEXT: v_cmp_gt_u32_e64 s[4:5], v4, v2
-; GFX9-NEXT: v_add_u32_e32 v3, -1, v4
-; GFX9-NEXT: s_or_b64 vcc, vcc, s[4:5]
-; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc
-; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
+; GFX9-NEXT: global_atomic_dec v0, v[0:1], v2, off offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
-; GFX9-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; GFX9-NEXT: s_cbranch_execnz .LBB153_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX9-NEXT: v_mov_b32_e32 v0, v3
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
%result = atomicrmw udec_wrap ptr addrspace(1) %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_i64.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_i64.ll
index 55a2dd0eb9a14..6cae0dfac7558 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomics_i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics_i64.ll
@@ -623,7 +623,7 @@ define amdgpu_kernel void @atomic_and_i64_offset(ptr addrspace(1) %out, i64 %in)
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
- %tmp0 = atomicrmw volatile and ptr addrspace(1) %gep, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile and ptr addrspace(1) %gep, i64 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -695,7 +695,7 @@ define amdgpu_kernel void @atomic_and_i64_ret_offset(ptr addrspace(1) %out, ptr
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
- %tmp0 = atomicrmw volatile and ptr addrspace(1) %gep, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile and ptr addrspace(1) %gep, i64 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
store i64 %tmp0, ptr addrspace(1) %out2
ret void
}
@@ -771,7 +771,7 @@ define amdgpu_kernel void @atomic_and_i64_addr64_offset(ptr addrspace(1) %out, i
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i64, ptr addrspace(1) %ptr, i64 4
- %tmp0 = atomicrmw volatile and ptr addrspace(1) %gep, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile and ptr addrspace(1) %gep, i64 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -853,7 +853,7 @@ define amdgpu_kernel void @atomic_and_i64_ret_addr64_offset(ptr addrspace(1) %ou
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i64, ptr addrspace(1) %ptr, i64 4
- %tmp0 = atomicrmw volatile and ptr addrspace(1) %gep, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile and ptr addrspace(1) %gep, i64 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
store i64 %tmp0, ptr addrspace(1) %out2
ret void
}
@@ -912,7 +912,7 @@ define amdgpu_kernel void @atomic_and_i64(ptr addrspace(1) %out, i64 %in) {
; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
- %tmp0 = atomicrmw volatile and ptr addrspace(1) %out, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile and ptr addrspace(1) %out, i64 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -983,7 +983,7 @@ define amdgpu_kernel void @atomic_and_i64_ret(ptr addrspace(1) %out, ptr addrspa
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
; GFX12-NEXT: s_endpgm
entry:
- %tmp0 = atomicrmw volatile and ptr addrspace(1) %out, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile and ptr addrspace(1) %out, i64 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
store i64 %tmp0, ptr addrspace(1) %out2
ret void
}
@@ -1056,7 +1056,7 @@ define amdgpu_kernel void @atomic_and_i64_addr64(ptr addrspace(1) %out, i64 %in,
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
- %tmp0 = atomicrmw volatile and ptr addrspace(1) %ptr, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile and ptr addrspace(1) %ptr, i64 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -1135,7 +1135,7 @@ define amdgpu_kernel void @atomic_and_i64_ret_addr64(ptr addrspace(1) %out, ptr
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
- %tmp0 = atomicrmw volatile and ptr addrspace(1) %ptr, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile and ptr addrspace(1) %ptr, i64 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
store i64 %tmp0, ptr addrspace(1) %out2
ret void
}
@@ -1191,7 +1191,7 @@ define amdgpu_kernel void @atomic_sub_i64_offset(ptr addrspace(1) %out, i64 %in)
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
- %tmp0 = atomicrmw volatile sub ptr addrspace(1) %gep, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile sub ptr addrspace(1) %gep, i64 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -1263,7 +1263,7 @@ define amdgpu_kernel void @atomic_sub_i64_ret_offset(ptr addrspace(1) %out, ptr
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
- %tmp0 = atomicrmw volatile sub ptr addrspace(1) %gep, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile sub ptr addrspace(1) %gep, i64 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
store i64 %tmp0, ptr addrspace(1) %out2
ret void
}
@@ -1339,7 +1339,7 @@ define amdgpu_kernel void @atomic_sub_i64_addr64_offset(ptr addrspace(1) %out, i
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i64, ptr addrspace(1) %ptr, i64 4
- %tmp0 = atomicrmw volatile sub ptr addrspace(1) %gep, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile sub ptr addrspace(1) %gep, i64 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -1421,7 +1421,7 @@ define amdgpu_kernel void @atomic_sub_i64_ret_addr64_offset(ptr addrspace(1) %ou
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i64, ptr addrspace(1) %ptr, i64 4
- %tmp0 = atomicrmw volatile sub ptr addrspace(1) %gep, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile sub ptr addrspace(1) %gep, i64 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
store i64 %tmp0, ptr addrspace(1) %out2
ret void
}
@@ -1480,7 +1480,7 @@ define amdgpu_kernel void @atomic_sub_i64(ptr addrspace(1) %out, i64 %in) {
; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
- %tmp0 = atomicrmw volatile sub ptr addrspace(1) %out, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile sub ptr addrspace(1) %out, i64 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -1551,7 +1551,7 @@ define amdgpu_kernel void @atomic_sub_i64_ret(ptr addrspace(1) %out, ptr addrspa
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
; GFX12-NEXT: s_endpgm
entry:
- %tmp0 = atomicrmw volatile sub ptr addrspace(1) %out, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile sub ptr addrspace(1) %out, i64 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
store i64 %tmp0, ptr addrspace(1) %out2
ret void
}
@@ -1624,7 +1624,7 @@ define amdgpu_kernel void @atomic_sub_i64_addr64(ptr addrspace(1) %out, i64 %in,
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
- %tmp0 = atomicrmw volatile sub ptr addrspace(1) %ptr, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile sub ptr addrspace(1) %ptr, i64 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -1703,7 +1703,7 @@ define amdgpu_kernel void @atomic_sub_i64_ret_addr64(ptr addrspace(1) %out, ptr
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
- %tmp0 = atomicrmw volatile sub ptr addrspace(1) %ptr, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile sub ptr addrspace(1) %ptr, i64 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
store i64 %tmp0, ptr addrspace(1) %out2
ret void
}
@@ -1753,7 +1753,7 @@ define amdgpu_kernel void @atomic_max_i64_offset(ptr addrspace(1) %out, i64 %in)
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
- %tmp0 = atomicrmw volatile max ptr addrspace(1) %gep, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile max ptr addrspace(1) %gep, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -1822,7 +1822,7 @@ define amdgpu_kernel void @atomic_max_i64_ret_offset(ptr addrspace(1) %out, ptr
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
- %tmp0 = atomicrmw volatile max ptr addrspace(1) %gep, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile max ptr addrspace(1) %gep, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
store i64 %tmp0, ptr addrspace(1) %out2
ret void
}
@@ -1892,7 +1892,7 @@ define amdgpu_kernel void @atomic_max_i64_addr64_offset(ptr addrspace(1) %out, i
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i64, ptr addrspace(1) %ptr, i64 4
- %tmp0 = atomicrmw volatile max ptr addrspace(1) %gep, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile max ptr addrspace(1) %gep, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -1971,7 +1971,7 @@ define amdgpu_kernel void @atomic_max_i64_ret_addr64_offset(ptr addrspace(1) %ou
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i64, ptr addrspace(1) %ptr, i64 4
- %tmp0 = atomicrmw volatile max ptr addrspace(1) %gep, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile max ptr addrspace(1) %gep, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
store i64 %tmp0, ptr addrspace(1) %out2
ret void
}
@@ -2024,7 +2024,7 @@ define amdgpu_kernel void @atomic_max_i64(ptr addrspace(1) %out, i64 %in) {
; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: s_endpgm
entry:
- %tmp0 = atomicrmw volatile max ptr addrspace(1) %out, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile max ptr addrspace(1) %out, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -2092,7 +2092,7 @@ define amdgpu_kernel void @atomic_max_i64_ret(ptr addrspace(1) %out, ptr addrspa
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
; GFX12-NEXT: s_endpgm
entry:
- %tmp0 = atomicrmw volatile max ptr addrspace(1) %out, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile max ptr addrspace(1) %out, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
store i64 %tmp0, ptr addrspace(1) %out2
ret void
}
@@ -2159,7 +2159,7 @@ define amdgpu_kernel void @atomic_max_i64_addr64(ptr addrspace(1) %out, i64 %in,
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
- %tmp0 = atomicrmw volatile max ptr addrspace(1) %ptr, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile max ptr addrspace(1) %ptr, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -2235,7 +2235,7 @@ define amdgpu_kernel void @atomic_max_i64_ret_addr64(ptr addrspace(1) %out, ptr
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
- %tmp0 = atomicrmw volatile max ptr addrspace(1) %ptr, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile max ptr addrspace(1) %ptr, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
store i64 %tmp0, ptr addrspace(1) %out2
ret void
}
@@ -2285,7 +2285,7 @@ define amdgpu_kernel void @atomic_umax_i64_offset(ptr addrspace(1) %out, i64 %in
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
- %tmp0 = atomicrmw volatile umax ptr addrspace(1) %gep, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile umax ptr addrspace(1) %gep, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -2354,7 +2354,7 @@ define amdgpu_kernel void @atomic_umax_i64_ret_offset(ptr addrspace(1) %out, ptr
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
- %tmp0 = atomicrmw volatile umax ptr addrspace(1) %gep, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile umax ptr addrspace(1) %gep, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
store i64 %tmp0, ptr addrspace(1) %out2
ret void
}
@@ -2424,7 +2424,7 @@ define amdgpu_kernel void @atomic_umax_i64_addr64_offset(ptr addrspace(1) %out,
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i64, ptr addrspace(1) %ptr, i64 4
- %tmp0 = atomicrmw volatile umax ptr addrspace(1) %gep, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile umax ptr addrspace(1) %gep, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -2503,7 +2503,7 @@ define amdgpu_kernel void @atomic_umax_i64_ret_addr64_offset(ptr addrspace(1) %o
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i64, ptr addrspace(1) %ptr, i64 4
- %tmp0 = atomicrmw volatile umax ptr addrspace(1) %gep, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile umax ptr addrspace(1) %gep, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
store i64 %tmp0, ptr addrspace(1) %out2
ret void
}
@@ -2556,7 +2556,7 @@ define amdgpu_kernel void @atomic_umax_i64(ptr addrspace(1) %out, i64 %in) {
; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: s_endpgm
entry:
- %tmp0 = atomicrmw volatile umax ptr addrspace(1) %out, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile umax ptr addrspace(1) %out, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -2624,7 +2624,7 @@ define amdgpu_kernel void @atomic_umax_i64_ret(ptr addrspace(1) %out, ptr addrsp
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
; GFX12-NEXT: s_endpgm
entry:
- %tmp0 = atomicrmw volatile umax ptr addrspace(1) %out, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile umax ptr addrspace(1) %out, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
store i64 %tmp0, ptr addrspace(1) %out2
ret void
}
@@ -2691,7 +2691,7 @@ define amdgpu_kernel void @atomic_umax_i64_addr64(ptr addrspace(1) %out, i64 %in
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
- %tmp0 = atomicrmw volatile umax ptr addrspace(1) %ptr, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile umax ptr addrspace(1) %ptr, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -2767,7 +2767,7 @@ define amdgpu_kernel void @atomic_umax_i64_ret_addr64(ptr addrspace(1) %out, ptr
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
- %tmp0 = atomicrmw volatile umax ptr addrspace(1) %ptr, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile umax ptr addrspace(1) %ptr, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
store i64 %tmp0, ptr addrspace(1) %out2
ret void
}
@@ -2817,7 +2817,7 @@ define amdgpu_kernel void @atomic_min_i64_offset(ptr addrspace(1) %out, i64 %in)
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
- %tmp0 = atomicrmw volatile min ptr addrspace(1) %gep, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile min ptr addrspace(1) %gep, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -2886,7 +2886,7 @@ define amdgpu_kernel void @atomic_min_i64_ret_offset(ptr addrspace(1) %out, ptr
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
- %tmp0 = atomicrmw volatile min ptr addrspace(1) %gep, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile min ptr addrspace(1) %gep, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
store i64 %tmp0, ptr addrspace(1) %out2
ret void
}
@@ -2956,7 +2956,7 @@ define amdgpu_kernel void @atomic_min_i64_addr64_offset(ptr addrspace(1) %out, i
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i64, ptr addrspace(1) %ptr, i64 4
- %tmp0 = atomicrmw volatile min ptr addrspace(1) %gep, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile min ptr addrspace(1) %gep, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -3035,7 +3035,7 @@ define amdgpu_kernel void @atomic_min_i64_ret_addr64_offset(ptr addrspace(1) %ou
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i64, ptr addrspace(1) %ptr, i64 4
- %tmp0 = atomicrmw volatile min ptr addrspace(1) %gep, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile min ptr addrspace(1) %gep, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
store i64 %tmp0, ptr addrspace(1) %out2
ret void
}
@@ -3088,7 +3088,7 @@ define amdgpu_kernel void @atomic_min_i64(ptr addrspace(1) %out, i64 %in) {
; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: s_endpgm
entry:
- %tmp0 = atomicrmw volatile min ptr addrspace(1) %out, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile min ptr addrspace(1) %out, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -3156,7 +3156,7 @@ define amdgpu_kernel void @atomic_min_i64_ret(ptr addrspace(1) %out, ptr addrspa
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
; GFX12-NEXT: s_endpgm
entry:
- %tmp0 = atomicrmw volatile min ptr addrspace(1) %out, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile min ptr addrspace(1) %out, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
store i64 %tmp0, ptr addrspace(1) %out2
ret void
}
@@ -3223,7 +3223,7 @@ define amdgpu_kernel void @atomic_min_i64_addr64(ptr addrspace(1) %out, i64 %in,
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
- %tmp0 = atomicrmw volatile min ptr addrspace(1) %ptr, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile min ptr addrspace(1) %ptr, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -3299,7 +3299,7 @@ define amdgpu_kernel void @atomic_min_i64_ret_addr64(ptr addrspace(1) %out, ptr
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
- %tmp0 = atomicrmw volatile min ptr addrspace(1) %ptr, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile min ptr addrspace(1) %ptr, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
store i64 %tmp0, ptr addrspace(1) %out2
ret void
}
@@ -3349,7 +3349,7 @@ define amdgpu_kernel void @atomic_umin_i64_offset(ptr addrspace(1) %out, i64 %in
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
- %tmp0 = atomicrmw volatile umin ptr addrspace(1) %gep, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile umin ptr addrspace(1) %gep, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -3418,7 +3418,7 @@ define amdgpu_kernel void @atomic_umin_i64_ret_offset(ptr addrspace(1) %out, ptr
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
- %tmp0 = atomicrmw volatile umin ptr addrspace(1) %gep, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile umin ptr addrspace(1) %gep, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
store i64 %tmp0, ptr addrspace(1) %out2
ret void
}
@@ -3488,7 +3488,7 @@ define amdgpu_kernel void @atomic_umin_i64_addr64_offset(ptr addrspace(1) %out,
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i64, ptr addrspace(1) %ptr, i64 4
- %tmp0 = atomicrmw volatile umin ptr addrspace(1) %gep, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile umin ptr addrspace(1) %gep, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -3567,7 +3567,7 @@ define amdgpu_kernel void @atomic_umin_i64_ret_addr64_offset(ptr addrspace(1) %o
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i64, ptr addrspace(1) %ptr, i64 4
- %tmp0 = atomicrmw volatile umin ptr addrspace(1) %gep, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile umin ptr addrspace(1) %gep, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
store i64 %tmp0, ptr addrspace(1) %out2
ret void
}
@@ -3620,7 +3620,7 @@ define amdgpu_kernel void @atomic_umin_i64(ptr addrspace(1) %out, i64 %in) {
; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: s_endpgm
entry:
- %tmp0 = atomicrmw volatile umin ptr addrspace(1) %out, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile umin ptr addrspace(1) %out, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -3688,7 +3688,7 @@ define amdgpu_kernel void @atomic_umin_i64_ret(ptr addrspace(1) %out, ptr addrsp
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
; GFX12-NEXT: s_endpgm
entry:
- %tmp0 = atomicrmw volatile umin ptr addrspace(1) %out, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile umin ptr addrspace(1) %out, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
store i64 %tmp0, ptr addrspace(1) %out2
ret void
}
@@ -3755,7 +3755,7 @@ define amdgpu_kernel void @atomic_umin_i64_addr64(ptr addrspace(1) %out, i64 %in
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
- %tmp0 = atomicrmw volatile umin ptr addrspace(1) %ptr, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile umin ptr addrspace(1) %ptr, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -3831,7 +3831,7 @@ define amdgpu_kernel void @atomic_umin_i64_ret_addr64(ptr addrspace(1) %out, ptr
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
- %tmp0 = atomicrmw volatile umin ptr addrspace(1) %ptr, i64 %in syncscope("workgroup") seq_cst
+ %tmp0 = atomicrmw volatile umin ptr addrspace(1) %ptr, i64 %in syncscope("workgroup") seq_cst, !amdgpu.no.remote.memory !0
store i64 %tmp0, ptr addrspace(1) %out2
ret void
}
@@ -3887,7 +3887,7 @@ define amdgpu_kernel void @atomic_or_i64_offset(ptr addrspace(1) %out, i64 %in)
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
- %tmp0 = atomicrmw volatile or ptr addrspace(1) %gep, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile or ptr addrspace(1) %gep, i64 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -3959,7 +3959,7 @@ define amdgpu_kernel void @atomic_or_i64_ret_offset(ptr addrspace(1) %out, ptr a
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
- %tmp0 = atomicrmw volatile or ptr addrspace(1) %gep, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile or ptr addrspace(1) %gep, i64 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
store i64 %tmp0, ptr addrspace(1) %out2
ret void
}
@@ -4035,7 +4035,7 @@ define amdgpu_kernel void @atomic_or_i64_addr64_offset(ptr addrspace(1) %out, i6
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i64, ptr addrspace(1) %ptr, i64 4
- %tmp0 = atomicrmw volatile or ptr addrspace(1) %gep, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile or ptr addrspace(1) %gep, i64 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -4117,7 +4117,7 @@ define amdgpu_kernel void @atomic_or_i64_ret_addr64_offset(ptr addrspace(1) %out
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i64, ptr addrspace(1) %ptr, i64 4
- %tmp0 = atomicrmw volatile or ptr addrspace(1) %gep, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile or ptr addrspace(1) %gep, i64 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
store i64 %tmp0, ptr addrspace(1) %out2
ret void
}
@@ -4176,7 +4176,7 @@ define amdgpu_kernel void @atomic_or_i64(ptr addrspace(1) %out, i64 %in) {
; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
- %tmp0 = atomicrmw volatile or ptr addrspace(1) %out, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile or ptr addrspace(1) %out, i64 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -4247,7 +4247,7 @@ define amdgpu_kernel void @atomic_or_i64_ret(ptr addrspace(1) %out, ptr addrspac
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
; GFX12-NEXT: s_endpgm
entry:
- %tmp0 = atomicrmw volatile or ptr addrspace(1) %out, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile or ptr addrspace(1) %out, i64 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
store i64 %tmp0, ptr addrspace(1) %out2
ret void
}
@@ -4320,7 +4320,7 @@ define amdgpu_kernel void @atomic_or_i64_addr64(ptr addrspace(1) %out, i64 %in,
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
- %tmp0 = atomicrmw volatile or ptr addrspace(1) %ptr, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile or ptr addrspace(1) %ptr, i64 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -4399,7 +4399,7 @@ define amdgpu_kernel void @atomic_or_i64_ret_addr64(ptr addrspace(1) %out, ptr a
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
- %tmp0 = atomicrmw volatile or ptr addrspace(1) %ptr, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile or ptr addrspace(1) %ptr, i64 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
store i64 %tmp0, ptr addrspace(1) %out2
ret void
}
@@ -4455,7 +4455,7 @@ define amdgpu_kernel void @atomic_xchg_i64_offset(ptr addrspace(1) %out, i64 %in
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
- %tmp0 = atomicrmw volatile xchg ptr addrspace(1) %gep, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile xchg ptr addrspace(1) %gep, i64 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -4510,7 +4510,7 @@ define amdgpu_kernel void @atomic_xchg_f64_offset(ptr addrspace(1) %out, double
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr double, ptr addrspace(1) %out, i64 4
- %tmp0 = atomicrmw volatile xchg ptr addrspace(1) %gep, double %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile xchg ptr addrspace(1) %gep, double %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -4565,7 +4565,7 @@ define amdgpu_kernel void @atomic_xchg_pointer_offset(ptr addrspace(1) %out, ptr
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr ptr, ptr addrspace(1) %out, i64 4
- %tmp0 = atomicrmw volatile xchg ptr addrspace(1) %gep, ptr %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile xchg ptr addrspace(1) %gep, ptr %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -4637,7 +4637,7 @@ define amdgpu_kernel void @atomic_xchg_i64_ret_offset(ptr addrspace(1) %out, ptr
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
- %tmp0 = atomicrmw volatile xchg ptr addrspace(1) %gep, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile xchg ptr addrspace(1) %gep, i64 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
store i64 %tmp0, ptr addrspace(1) %out2
ret void
}
@@ -4713,7 +4713,7 @@ define amdgpu_kernel void @atomic_xchg_i64_addr64_offset(ptr addrspace(1) %out,
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i64, ptr addrspace(1) %ptr, i64 4
- %tmp0 = atomicrmw volatile xchg ptr addrspace(1) %gep, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile xchg ptr addrspace(1) %gep, i64 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -4795,7 +4795,7 @@ define amdgpu_kernel void @atomic_xchg_i64_ret_addr64_offset(ptr addrspace(1) %o
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i64, ptr addrspace(1) %ptr, i64 4
- %tmp0 = atomicrmw volatile xchg ptr addrspace(1) %gep, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile xchg ptr addrspace(1) %gep, i64 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
store i64 %tmp0, ptr addrspace(1) %out2
ret void
}
@@ -4854,7 +4854,7 @@ define amdgpu_kernel void @atomic_xchg_i64(ptr addrspace(1) %out, i64 %in) {
; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
- %tmp0 = atomicrmw volatile xchg ptr addrspace(1) %out, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile xchg ptr addrspace(1) %out, i64 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -4925,7 +4925,7 @@ define amdgpu_kernel void @atomic_xchg_i64_ret(ptr addrspace(1) %out, ptr addrsp
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
; GFX12-NEXT: s_endpgm
entry:
- %tmp0 = atomicrmw volatile xchg ptr addrspace(1) %out, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile xchg ptr addrspace(1) %out, i64 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
store i64 %tmp0, ptr addrspace(1) %out2
ret void
}
@@ -4998,7 +4998,7 @@ define amdgpu_kernel void @atomic_xchg_i64_addr64(ptr addrspace(1) %out, i64 %in
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
- %tmp0 = atomicrmw volatile xchg ptr addrspace(1) %ptr, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile xchg ptr addrspace(1) %ptr, i64 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -5077,7 +5077,7 @@ define amdgpu_kernel void @atomic_xchg_i64_ret_addr64(ptr addrspace(1) %out, ptr
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
- %tmp0 = atomicrmw volatile xchg ptr addrspace(1) %ptr, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile xchg ptr addrspace(1) %ptr, i64 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
store i64 %tmp0, ptr addrspace(1) %out2
ret void
}
@@ -5133,7 +5133,7 @@ define amdgpu_kernel void @atomic_xor_i64_offset(ptr addrspace(1) %out, i64 %in)
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
- %tmp0 = atomicrmw volatile xor ptr addrspace(1) %gep, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile xor ptr addrspace(1) %gep, i64 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -5205,7 +5205,7 @@ define amdgpu_kernel void @atomic_xor_i64_ret_offset(ptr addrspace(1) %out, ptr
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
- %tmp0 = atomicrmw volatile xor ptr addrspace(1) %gep, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile xor ptr addrspace(1) %gep, i64 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
store i64 %tmp0, ptr addrspace(1) %out2
ret void
}
@@ -5281,7 +5281,7 @@ define amdgpu_kernel void @atomic_xor_i64_addr64_offset(ptr addrspace(1) %out, i
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i64, ptr addrspace(1) %ptr, i64 4
- %tmp0 = atomicrmw volatile xor ptr addrspace(1) %gep, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile xor ptr addrspace(1) %gep, i64 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -5363,7 +5363,7 @@ define amdgpu_kernel void @atomic_xor_i64_ret_addr64_offset(ptr addrspace(1) %ou
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i64, ptr addrspace(1) %ptr, i64 4
- %tmp0 = atomicrmw volatile xor ptr addrspace(1) %gep, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile xor ptr addrspace(1) %gep, i64 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
store i64 %tmp0, ptr addrspace(1) %out2
ret void
}
@@ -5422,7 +5422,7 @@ define amdgpu_kernel void @atomic_xor_i64(ptr addrspace(1) %out, i64 %in) {
; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
- %tmp0 = atomicrmw volatile xor ptr addrspace(1) %out, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile xor ptr addrspace(1) %out, i64 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -5493,7 +5493,7 @@ define amdgpu_kernel void @atomic_xor_i64_ret(ptr addrspace(1) %out, ptr addrspa
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
; GFX12-NEXT: s_endpgm
entry:
- %tmp0 = atomicrmw volatile xor ptr addrspace(1) %out, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile xor ptr addrspace(1) %out, i64 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
store i64 %tmp0, ptr addrspace(1) %out2
ret void
}
@@ -5566,7 +5566,7 @@ define amdgpu_kernel void @atomic_xor_i64_addr64(ptr addrspace(1) %out, i64 %in,
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
- %tmp0 = atomicrmw volatile xor ptr addrspace(1) %ptr, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile xor ptr addrspace(1) %ptr, i64 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -5645,7 +5645,7 @@ define amdgpu_kernel void @atomic_xor_i64_ret_addr64(ptr addrspace(1) %out, ptr
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
- %tmp0 = atomicrmw volatile xor ptr addrspace(1) %ptr, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile xor ptr addrspace(1) %ptr, i64 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
store i64 %tmp0, ptr addrspace(1) %out2
ret void
}
@@ -7146,7 +7146,7 @@ define amdgpu_kernel void @atomic_inc_i64_offset(ptr addrspace(1) %out, i64 %in)
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
- %tmp0 = atomicrmw volatile uinc_wrap ptr addrspace(1) %gep, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile uinc_wrap ptr addrspace(1) %gep, i64 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -7218,7 +7218,7 @@ define amdgpu_kernel void @atomic_inc_i64_ret_offset(ptr addrspace(1) %out, ptr
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
- %tmp0 = atomicrmw volatile uinc_wrap ptr addrspace(1) %gep, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile uinc_wrap ptr addrspace(1) %gep, i64 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
store i64 %tmp0, ptr addrspace(1) %out2
ret void
}
@@ -7294,7 +7294,7 @@ define amdgpu_kernel void @atomic_inc_i64_incr64_offset(ptr addrspace(1) %out, i
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i64, ptr addrspace(1) %ptr, i64 4
- %tmp0 = atomicrmw volatile uinc_wrap ptr addrspace(1) %gep, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile uinc_wrap ptr addrspace(1) %gep, i64 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -7349,7 +7349,7 @@ define amdgpu_kernel void @atomic_dec_i64_offset(ptr addrspace(1) %out, i64 %in)
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
- %tmp0 = atomicrmw volatile udec_wrap ptr addrspace(1) %gep, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile udec_wrap ptr addrspace(1) %gep, i64 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
@@ -7421,7 +7421,7 @@ define amdgpu_kernel void @atomic_dec_i64_ret_offset(ptr addrspace(1) %out, ptr
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
- %tmp0 = atomicrmw volatile udec_wrap ptr addrspace(1) %gep, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile udec_wrap ptr addrspace(1) %gep, i64 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
store i64 %tmp0, ptr addrspace(1) %out2
ret void
}
@@ -7497,6 +7497,8 @@ define amdgpu_kernel void @atomic_dec_i64_decr64_offset(ptr addrspace(1) %out, i
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i64, ptr addrspace(1) %ptr, i64 4
- %tmp0 = atomicrmw volatile udec_wrap ptr addrspace(1) %gep, i64 %in syncscope("agent") seq_cst
+ %tmp0 = atomicrmw volatile udec_wrap ptr addrspace(1) %gep, i64 %in syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
ret void
}
+
+!0 = !{}
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_i64_system.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_i64_system.ll
index 1b9194ca4475d..f3b2ef8d69f9e 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomics_i64_system.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics_i64_system.ll
@@ -2091,29 +2091,9 @@ define void @global_atomic_sub_i64_noret_offset__amdgpu_no_remote_memory(ptr add
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[6:7], v[0:1], s[4:7], 0 addr64 offset:32
-; SI-NEXT: s_mov_b64 s[8:9], 0
-; SI-NEXT: .LBB38_1: ; %atomicrmw.start
-; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_sub_i32_e32 v4, vcc, v6, v2
-; SI-NEXT: v_subb_u32_e32 v5, vcc, v7, v3, vcc
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v7
-; SI-NEXT: v_mov_b32_e32 v10, v6
-; SI-NEXT: v_mov_b32_e32 v9, v5
-; SI-NEXT: v_mov_b32_e32 v8, v4
-; SI-NEXT: buffer_atomic_cmpswap_x2 v[8:11], v[0:1], s[4:7], 0 addr64 offset:32 glc
+; SI-NEXT: buffer_atomic_sub_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[8:9], v[6:7]
-; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
-; SI-NEXT: v_mov_b32_e32 v6, v8
-; SI-NEXT: v_mov_b32_e32 v7, v9
-; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
-; SI-NEXT: s_cbranch_execnz .LBB38_1
-; SI-NEXT: ; %bb.2: ; %atomicrmw.end
-; SI-NEXT: s_or_b64 exec, exec, s[8:9]
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
;
@@ -2122,47 +2102,17 @@ define void @global_atomic_sub_i64_noret_offset__amdgpu_no_remote_memory(ptr add
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT: flat_load_dwordx2 v[6:7], v[0:1]
-; VI-NEXT: s_mov_b64 s[4:5], 0
-; VI-NEXT: .LBB38_1: ; %atomicrmw.start
-; VI-NEXT: ; =>This Inner Loop Header: Depth=1
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_sub_u32_e32 v4, vcc, v6, v2
-; VI-NEXT: v_subb_u32_e32 v5, vcc, v7, v3, vcc
-; VI-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; VI-NEXT: flat_atomic_sub_x2 v[0:1], v[2:3]
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
-; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; VI-NEXT: v_mov_b32_e32 v7, v5
-; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT: v_mov_b32_e32 v6, v4
-; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT: s_cbranch_execnz .LBB38_1
-; VI-NEXT: ; %bb.2: ; %atomicrmw.end
-; VI-NEXT: s_or_b64 exec, exec, s[4:5]
; VI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: global_atomic_sub_i64_noret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: global_load_dwordx2 v[6:7], v[0:1], off offset:32
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: .LBB38_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_sub_co_u32_e32 v4, vcc, v6, v2
-; GFX9-NEXT: v_subb_co_u32_e32 v5, vcc, v7, v3, vcc
-; GFX9-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:32 glc
+; GFX9-NEXT: global_atomic_sub_x2 v[0:1], v[2:3], off offset:32
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GFX9-NEXT: v_mov_b32_e32 v7, v5
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v6, v4
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB38_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
%tmp0 = atomicrmw sub ptr addrspace(1) %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0
@@ -2173,88 +2123,34 @@ define i64 @global_atomic_sub_i64_ret_offset__amdgpu_no_remote_memory(ptr addrsp
; SI-LABEL: global_atomic_sub_i64_ret_offset__amdgpu_no_remote_memory:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v6, v3
-; SI-NEXT: v_mov_b32_e32 v7, v2
-; SI-NEXT: v_mov_b32_e32 v5, v1
-; SI-NEXT: v_mov_b32_e32 v4, v0
; SI-NEXT: s_mov_b32 s6, 0
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 offset:32
-; SI-NEXT: s_mov_b64 s[8:9], 0
-; SI-NEXT: .LBB39_1: ; %atomicrmw.start
-; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v1
-; SI-NEXT: v_mov_b32_e32 v10, v0
-; SI-NEXT: v_sub_i32_e32 v8, vcc, v10, v7
-; SI-NEXT: v_subb_u32_e32 v9, vcc, v11, v6, vcc
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v0, v8
-; SI-NEXT: v_mov_b32_e32 v1, v9
-; SI-NEXT: v_mov_b32_e32 v2, v10
-; SI-NEXT: v_mov_b32_e32 v3, v11
-; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[4:5], s[4:7], 0 addr64 offset:32 glc
+; SI-NEXT: buffer_atomic_sub_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
-; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
-; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
-; SI-NEXT: s_cbranch_execnz .LBB39_1
-; SI-NEXT: ; %bb.2: ; %atomicrmw.end
-; SI-NEXT: s_or_b64 exec, exec, s[8:9]
+; SI-NEXT: v_mov_b32_e32 v0, v2
+; SI-NEXT: v_mov_b32_e32 v1, v3
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
;
; VI-LABEL: global_atomic_sub_i64_ret_offset__amdgpu_no_remote_memory:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_add_u32_e32 v4, vcc, 32, v0
-; VI-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; VI-NEXT: flat_load_dwordx2 v[0:1], v[4:5]
-; VI-NEXT: s_mov_b64 s[4:5], 0
-; VI-NEXT: .LBB39_1: ; %atomicrmw.start
-; VI-NEXT: ; =>This Inner Loop Header: Depth=1
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v9, v1
-; VI-NEXT: v_mov_b32_e32 v8, v0
-; VI-NEXT: v_sub_u32_e32 v6, vcc, v8, v2
-; VI-NEXT: v_subb_u32_e32 v7, vcc, v9, v3, vcc
-; VI-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: flat_atomic_sub_x2 v[0:1], v[0:1], v[2:3] glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
-; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT: s_cbranch_execnz .LBB39_1
-; VI-NEXT: ; %bb.2: ; %atomicrmw.end
-; VI-NEXT: s_or_b64 exec, exec, s[4:5]
; VI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: global_atomic_sub_i64_ret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: global_load_dwordx2 v[4:5], v[0:1], off offset:32
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: .LBB39_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v7, v5
-; GFX9-NEXT: v_mov_b32_e32 v6, v4
-; GFX9-NEXT: v_sub_co_u32_e32 v4, vcc, v6, v2
-; GFX9-NEXT: v_subb_co_u32_e32 v5, vcc, v7, v3, vcc
-; GFX9-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:32 glc
+; GFX9-NEXT: global_atomic_sub_x2 v[0:1], v[0:1], v[2:3], off offset:32 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB39_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v0, v4
-; GFX9-NEXT: v_mov_b32_e32 v1, v5
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
%result = atomicrmw sub ptr addrspace(1) %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0
@@ -3023,29 +2919,9 @@ define void @global_atomic_and_i64_noret_offset__amdgpu_no_remote_memory(ptr add
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[6:7], v[0:1], s[4:7], 0 addr64 offset:32
-; SI-NEXT: s_mov_b64 s[8:9], 0
-; SI-NEXT: .LBB48_1: ; %atomicrmw.start
-; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_and_b32_e32 v5, v7, v3
-; SI-NEXT: v_and_b32_e32 v4, v6, v2
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v7
-; SI-NEXT: v_mov_b32_e32 v10, v6
-; SI-NEXT: v_mov_b32_e32 v9, v5
-; SI-NEXT: v_mov_b32_e32 v8, v4
-; SI-NEXT: buffer_atomic_cmpswap_x2 v[8:11], v[0:1], s[4:7], 0 addr64 offset:32 glc
+; SI-NEXT: buffer_atomic_and_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[8:9], v[6:7]
-; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
-; SI-NEXT: v_mov_b32_e32 v6, v8
-; SI-NEXT: v_mov_b32_e32 v7, v9
-; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
-; SI-NEXT: s_cbranch_execnz .LBB48_1
-; SI-NEXT: ; %bb.2: ; %atomicrmw.end
-; SI-NEXT: s_or_b64 exec, exec, s[8:9]
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
;
@@ -3054,47 +2930,17 @@ define void @global_atomic_and_i64_noret_offset__amdgpu_no_remote_memory(ptr add
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT: flat_load_dwordx2 v[6:7], v[0:1]
-; VI-NEXT: s_mov_b64 s[4:5], 0
-; VI-NEXT: .LBB48_1: ; %atomicrmw.start
-; VI-NEXT: ; =>This Inner Loop Header: Depth=1
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_and_b32_e32 v5, v7, v3
-; VI-NEXT: v_and_b32_e32 v4, v6, v2
-; VI-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; VI-NEXT: flat_atomic_and_x2 v[0:1], v[2:3]
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
-; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; VI-NEXT: v_mov_b32_e32 v7, v5
-; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT: v_mov_b32_e32 v6, v4
-; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT: s_cbranch_execnz .LBB48_1
-; VI-NEXT: ; %bb.2: ; %atomicrmw.end
-; VI-NEXT: s_or_b64 exec, exec, s[4:5]
; VI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: global_atomic_and_i64_noret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: global_load_dwordx2 v[6:7], v[0:1], off offset:32
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: .LBB48_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_and_b32_e32 v5, v7, v3
-; GFX9-NEXT: v_and_b32_e32 v4, v6, v2
-; GFX9-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:32 glc
+; GFX9-NEXT: global_atomic_and_x2 v[0:1], v[2:3], off offset:32
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GFX9-NEXT: v_mov_b32_e32 v7, v5
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v6, v4
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB48_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
%tmp0 = atomicrmw and ptr addrspace(1) %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0
@@ -3105,88 +2951,34 @@ define i64 @global_atomic_and_i64_ret_offset__amdgpu_no_remote_memory(ptr addrsp
; SI-LABEL: global_atomic_and_i64_ret_offset__amdgpu_no_remote_memory:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v6, v3
-; SI-NEXT: v_mov_b32_e32 v7, v2
-; SI-NEXT: v_mov_b32_e32 v5, v1
-; SI-NEXT: v_mov_b32_e32 v4, v0
; SI-NEXT: s_mov_b32 s6, 0
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 offset:32
-; SI-NEXT: s_mov_b64 s[8:9], 0
-; SI-NEXT: .LBB49_1: ; %atomicrmw.start
-; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v1
-; SI-NEXT: v_mov_b32_e32 v10, v0
-; SI-NEXT: v_and_b32_e32 v9, v11, v6
-; SI-NEXT: v_and_b32_e32 v8, v10, v7
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v0, v8
-; SI-NEXT: v_mov_b32_e32 v1, v9
-; SI-NEXT: v_mov_b32_e32 v2, v10
-; SI-NEXT: v_mov_b32_e32 v3, v11
-; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[4:5], s[4:7], 0 addr64 offset:32 glc
+; SI-NEXT: buffer_atomic_and_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
-; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
-; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
-; SI-NEXT: s_cbranch_execnz .LBB49_1
-; SI-NEXT: ; %bb.2: ; %atomicrmw.end
-; SI-NEXT: s_or_b64 exec, exec, s[8:9]
+; SI-NEXT: v_mov_b32_e32 v0, v2
+; SI-NEXT: v_mov_b32_e32 v1, v3
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
;
; VI-LABEL: global_atomic_and_i64_ret_offset__amdgpu_no_remote_memory:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_add_u32_e32 v4, vcc, 32, v0
-; VI-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; VI-NEXT: flat_load_dwordx2 v[0:1], v[4:5]
-; VI-NEXT: s_mov_b64 s[4:5], 0
-; VI-NEXT: .LBB49_1: ; %atomicrmw.start
-; VI-NEXT: ; =>This Inner Loop Header: Depth=1
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v9, v1
-; VI-NEXT: v_mov_b32_e32 v8, v0
-; VI-NEXT: v_and_b32_e32 v7, v9, v3
-; VI-NEXT: v_and_b32_e32 v6, v8, v2
-; VI-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: flat_atomic_and_x2 v[0:1], v[0:1], v[2:3] glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
-; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT: s_cbranch_execnz .LBB49_1
-; VI-NEXT: ; %bb.2: ; %atomicrmw.end
-; VI-NEXT: s_or_b64 exec, exec, s[4:5]
; VI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: global_atomic_and_i64_ret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: global_load_dwordx2 v[4:5], v[0:1], off offset:32
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: .LBB49_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v7, v5
-; GFX9-NEXT: v_mov_b32_e32 v6, v4
-; GFX9-NEXT: v_and_b32_e32 v5, v7, v3
-; GFX9-NEXT: v_and_b32_e32 v4, v6, v2
-; GFX9-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:32 glc
+; GFX9-NEXT: global_atomic_and_x2 v[0:1], v[0:1], v[2:3], off offset:32 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB49_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v0, v4
-; GFX9-NEXT: v_mov_b32_e32 v1, v5
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
%result = atomicrmw and ptr addrspace(1) %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0
@@ -4947,29 +4739,9 @@ define void @global_atomic_or_i64_noret_offset__amdgpu_no_remote_memory(ptr addr
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[6:7], v[0:1], s[4:7], 0 addr64 offset:32
-; SI-NEXT: s_mov_b64 s[8:9], 0
-; SI-NEXT: .LBB68_1: ; %atomicrmw.start
-; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_or_b32_e32 v5, v7, v3
-; SI-NEXT: v_or_b32_e32 v4, v6, v2
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v7
-; SI-NEXT: v_mov_b32_e32 v10, v6
-; SI-NEXT: v_mov_b32_e32 v9, v5
-; SI-NEXT: v_mov_b32_e32 v8, v4
-; SI-NEXT: buffer_atomic_cmpswap_x2 v[8:11], v[0:1], s[4:7], 0 addr64 offset:32 glc
+; SI-NEXT: buffer_atomic_or_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[8:9], v[6:7]
-; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
-; SI-NEXT: v_mov_b32_e32 v6, v8
-; SI-NEXT: v_mov_b32_e32 v7, v9
-; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
-; SI-NEXT: s_cbranch_execnz .LBB68_1
-; SI-NEXT: ; %bb.2: ; %atomicrmw.end
-; SI-NEXT: s_or_b64 exec, exec, s[8:9]
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
;
@@ -4978,47 +4750,17 @@ define void @global_atomic_or_i64_noret_offset__amdgpu_no_remote_memory(ptr addr
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT: flat_load_dwordx2 v[6:7], v[0:1]
-; VI-NEXT: s_mov_b64 s[4:5], 0
-; VI-NEXT: .LBB68_1: ; %atomicrmw.start
-; VI-NEXT: ; =>This Inner Loop Header: Depth=1
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_e32 v5, v7, v3
-; VI-NEXT: v_or_b32_e32 v4, v6, v2
-; VI-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; VI-NEXT: flat_atomic_or_x2 v[0:1], v[2:3]
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
-; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; VI-NEXT: v_mov_b32_e32 v7, v5
-; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT: v_mov_b32_e32 v6, v4
-; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT: s_cbranch_execnz .LBB68_1
-; VI-NEXT: ; %bb.2: ; %atomicrmw.end
-; VI-NEXT: s_or_b64 exec, exec, s[4:5]
; VI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: global_atomic_or_i64_noret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: global_load_dwordx2 v[6:7], v[0:1], off offset:32
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: .LBB68_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_e32 v5, v7, v3
-; GFX9-NEXT: v_or_b32_e32 v4, v6, v2
-; GFX9-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:32 glc
+; GFX9-NEXT: global_atomic_or_x2 v[0:1], v[2:3], off offset:32
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GFX9-NEXT: v_mov_b32_e32 v7, v5
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v6, v4
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB68_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
%tmp0 = atomicrmw or ptr addrspace(1) %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0
@@ -5029,88 +4771,34 @@ define i64 @global_atomic_or_i64_ret_offset__amdgpu_no_remote_memory(ptr addrspa
; SI-LABEL: global_atomic_or_i64_ret_offset__amdgpu_no_remote_memory:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v6, v3
-; SI-NEXT: v_mov_b32_e32 v7, v2
-; SI-NEXT: v_mov_b32_e32 v5, v1
-; SI-NEXT: v_mov_b32_e32 v4, v0
; SI-NEXT: s_mov_b32 s6, 0
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 offset:32
-; SI-NEXT: s_mov_b64 s[8:9], 0
-; SI-NEXT: .LBB69_1: ; %atomicrmw.start
-; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v1
-; SI-NEXT: v_mov_b32_e32 v10, v0
-; SI-NEXT: v_or_b32_e32 v9, v11, v6
-; SI-NEXT: v_or_b32_e32 v8, v10, v7
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v0, v8
-; SI-NEXT: v_mov_b32_e32 v1, v9
-; SI-NEXT: v_mov_b32_e32 v2, v10
-; SI-NEXT: v_mov_b32_e32 v3, v11
-; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[4:5], s[4:7], 0 addr64 offset:32 glc
+; SI-NEXT: buffer_atomic_or_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
-; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
-; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
-; SI-NEXT: s_cbranch_execnz .LBB69_1
-; SI-NEXT: ; %bb.2: ; %atomicrmw.end
-; SI-NEXT: s_or_b64 exec, exec, s[8:9]
+; SI-NEXT: v_mov_b32_e32 v0, v2
+; SI-NEXT: v_mov_b32_e32 v1, v3
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
;
; VI-LABEL: global_atomic_or_i64_ret_offset__amdgpu_no_remote_memory:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_add_u32_e32 v4, vcc, 32, v0
-; VI-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; VI-NEXT: flat_load_dwordx2 v[0:1], v[4:5]
-; VI-NEXT: s_mov_b64 s[4:5], 0
-; VI-NEXT: .LBB69_1: ; %atomicrmw.start
-; VI-NEXT: ; =>This Inner Loop Header: Depth=1
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v9, v1
-; VI-NEXT: v_mov_b32_e32 v8, v0
-; VI-NEXT: v_or_b32_e32 v7, v9, v3
-; VI-NEXT: v_or_b32_e32 v6, v8, v2
-; VI-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: flat_atomic_or_x2 v[0:1], v[0:1], v[2:3] glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
-; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT: s_cbranch_execnz .LBB69_1
-; VI-NEXT: ; %bb.2: ; %atomicrmw.end
-; VI-NEXT: s_or_b64 exec, exec, s[4:5]
; VI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: global_atomic_or_i64_ret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: global_load_dwordx2 v[4:5], v[0:1], off offset:32
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: .LBB69_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v7, v5
-; GFX9-NEXT: v_mov_b32_e32 v6, v4
-; GFX9-NEXT: v_or_b32_e32 v5, v7, v3
-; GFX9-NEXT: v_or_b32_e32 v4, v6, v2
-; GFX9-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:32 glc
+; GFX9-NEXT: global_atomic_or_x2 v[0:1], v[0:1], v[2:3], off offset:32 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB69_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v0, v4
-; GFX9-NEXT: v_mov_b32_e32 v1, v5
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
%result = atomicrmw or ptr addrspace(1) %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0
@@ -5879,29 +5567,9 @@ define void @global_atomic_xor_i64_noret_offset__amdgpu_no_remote_memory(ptr add
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[6:7], v[0:1], s[4:7], 0 addr64 offset:32
-; SI-NEXT: s_mov_b64 s[8:9], 0
-; SI-NEXT: .LBB78_1: ; %atomicrmw.start
-; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_xor_b32_e32 v5, v7, v3
-; SI-NEXT: v_xor_b32_e32 v4, v6, v2
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v7
-; SI-NEXT: v_mov_b32_e32 v10, v6
-; SI-NEXT: v_mov_b32_e32 v9, v5
-; SI-NEXT: v_mov_b32_e32 v8, v4
-; SI-NEXT: buffer_atomic_cmpswap_x2 v[8:11], v[0:1], s[4:7], 0 addr64 offset:32 glc
+; SI-NEXT: buffer_atomic_xor_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[8:9], v[6:7]
-; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
-; SI-NEXT: v_mov_b32_e32 v6, v8
-; SI-NEXT: v_mov_b32_e32 v7, v9
-; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
-; SI-NEXT: s_cbranch_execnz .LBB78_1
-; SI-NEXT: ; %bb.2: ; %atomicrmw.end
-; SI-NEXT: s_or_b64 exec, exec, s[8:9]
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
;
@@ -5910,47 +5578,17 @@ define void @global_atomic_xor_i64_noret_offset__amdgpu_no_remote_memory(ptr add
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT: flat_load_dwordx2 v[6:7], v[0:1]
-; VI-NEXT: s_mov_b64 s[4:5], 0
-; VI-NEXT: .LBB78_1: ; %atomicrmw.start
-; VI-NEXT: ; =>This Inner Loop Header: Depth=1
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_xor_b32_e32 v5, v7, v3
-; VI-NEXT: v_xor_b32_e32 v4, v6, v2
-; VI-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; VI-NEXT: flat_atomic_xor_x2 v[0:1], v[2:3]
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
-; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; VI-NEXT: v_mov_b32_e32 v7, v5
-; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT: v_mov_b32_e32 v6, v4
-; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT: s_cbranch_execnz .LBB78_1
-; VI-NEXT: ; %bb.2: ; %atomicrmw.end
-; VI-NEXT: s_or_b64 exec, exec, s[4:5]
; VI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: global_atomic_xor_i64_noret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: global_load_dwordx2 v[6:7], v[0:1], off offset:32
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: .LBB78_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_xor_b32_e32 v5, v7, v3
-; GFX9-NEXT: v_xor_b32_e32 v4, v6, v2
-; GFX9-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:32 glc
+; GFX9-NEXT: global_atomic_xor_x2 v[0:1], v[2:3], off offset:32
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GFX9-NEXT: v_mov_b32_e32 v7, v5
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v6, v4
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB78_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
%tmp0 = atomicrmw xor ptr addrspace(1) %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0
@@ -5961,88 +5599,34 @@ define i64 @global_atomic_xor_i64_ret_offset__amdgpu_no_remote_memory(ptr addrsp
; SI-LABEL: global_atomic_xor_i64_ret_offset__amdgpu_no_remote_memory:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v6, v3
-; SI-NEXT: v_mov_b32_e32 v7, v2
-; SI-NEXT: v_mov_b32_e32 v5, v1
-; SI-NEXT: v_mov_b32_e32 v4, v0
; SI-NEXT: s_mov_b32 s6, 0
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 offset:32
-; SI-NEXT: s_mov_b64 s[8:9], 0
-; SI-NEXT: .LBB79_1: ; %atomicrmw.start
-; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v1
-; SI-NEXT: v_mov_b32_e32 v10, v0
-; SI-NEXT: v_xor_b32_e32 v9, v11, v6
-; SI-NEXT: v_xor_b32_e32 v8, v10, v7
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v0, v8
-; SI-NEXT: v_mov_b32_e32 v1, v9
-; SI-NEXT: v_mov_b32_e32 v2, v10
-; SI-NEXT: v_mov_b32_e32 v3, v11
-; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[4:5], s[4:7], 0 addr64 offset:32 glc
+; SI-NEXT: buffer_atomic_xor_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
-; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
-; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
-; SI-NEXT: s_cbranch_execnz .LBB79_1
-; SI-NEXT: ; %bb.2: ; %atomicrmw.end
-; SI-NEXT: s_or_b64 exec, exec, s[8:9]
+; SI-NEXT: v_mov_b32_e32 v0, v2
+; SI-NEXT: v_mov_b32_e32 v1, v3
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
;
; VI-LABEL: global_atomic_xor_i64_ret_offset__amdgpu_no_remote_memory:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_add_u32_e32 v4, vcc, 32, v0
-; VI-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; VI-NEXT: flat_load_dwordx2 v[0:1], v[4:5]
-; VI-NEXT: s_mov_b64 s[4:5], 0
-; VI-NEXT: .LBB79_1: ; %atomicrmw.start
-; VI-NEXT: ; =>This Inner Loop Header: Depth=1
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v9, v1
-; VI-NEXT: v_mov_b32_e32 v8, v0
-; VI-NEXT: v_xor_b32_e32 v7, v9, v3
-; VI-NEXT: v_xor_b32_e32 v6, v8, v2
-; VI-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: flat_atomic_xor_x2 v[0:1], v[0:1], v[2:3] glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
-; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT: s_cbranch_execnz .LBB79_1
-; VI-NEXT: ; %bb.2: ; %atomicrmw.end
-; VI-NEXT: s_or_b64 exec, exec, s[4:5]
; VI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: global_atomic_xor_i64_ret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: global_load_dwordx2 v[4:5], v[0:1], off offset:32
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: .LBB79_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v7, v5
-; GFX9-NEXT: v_mov_b32_e32 v6, v4
-; GFX9-NEXT: v_xor_b32_e32 v5, v7, v3
-; GFX9-NEXT: v_xor_b32_e32 v4, v6, v2
-; GFX9-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:32 glc
+; GFX9-NEXT: global_atomic_xor_x2 v[0:1], v[0:1], v[2:3], off offset:32 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB79_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v0, v4
-; GFX9-NEXT: v_mov_b32_e32 v1, v5
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
%result = atomicrmw xor ptr addrspace(1) %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0
@@ -7335,30 +6919,9 @@ define void @global_atomic_max_i64_noret_offset__amdgpu_no_remote_memory(ptr add
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[6:7], v[0:1], s[4:7], 0 addr64 offset:32
-; SI-NEXT: s_mov_b64 s[8:9], 0
-; SI-NEXT: .LBB92_1: ; %atomicrmw.start
-; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_cmp_gt_i64_e32 vcc, v[6:7], v[2:3]
-; SI-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
-; SI-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v7
-; SI-NEXT: v_mov_b32_e32 v10, v6
-; SI-NEXT: v_mov_b32_e32 v9, v5
-; SI-NEXT: v_mov_b32_e32 v8, v4
-; SI-NEXT: buffer_atomic_cmpswap_x2 v[8:11], v[0:1], s[4:7], 0 addr64 offset:32 glc
+; SI-NEXT: buffer_atomic_smax_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[8:9], v[6:7]
-; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
-; SI-NEXT: v_mov_b32_e32 v6, v8
-; SI-NEXT: v_mov_b32_e32 v7, v9
-; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
-; SI-NEXT: s_cbranch_execnz .LBB92_1
-; SI-NEXT: ; %bb.2: ; %atomicrmw.end
-; SI-NEXT: s_or_b64 exec, exec, s[8:9]
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
;
@@ -7367,49 +6930,17 @@ define void @global_atomic_max_i64_noret_offset__amdgpu_no_remote_memory(ptr add
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT: flat_load_dwordx2 v[6:7], v[0:1]
-; VI-NEXT: s_mov_b64 s[4:5], 0
-; VI-NEXT: .LBB92_1: ; %atomicrmw.start
-; VI-NEXT: ; =>This Inner Loop Header: Depth=1
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_cmp_gt_i64_e32 vcc, v[6:7], v[2:3]
-; VI-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
-; VI-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
-; VI-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; VI-NEXT: flat_atomic_smax_x2 v[0:1], v[2:3]
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
-; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; VI-NEXT: v_mov_b32_e32 v7, v5
-; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT: v_mov_b32_e32 v6, v4
-; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT: s_cbranch_execnz .LBB92_1
-; VI-NEXT: ; %bb.2: ; %atomicrmw.end
-; VI-NEXT: s_or_b64 exec, exec, s[4:5]
; VI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: global_atomic_max_i64_noret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: global_load_dwordx2 v[6:7], v[0:1], off offset:32
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: .LBB92_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_cmp_gt_i64_e32 vcc, v[6:7], v[2:3]
-; GFX9-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
-; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
-; GFX9-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:32 glc
+; GFX9-NEXT: global_atomic_smax_x2 v[0:1], v[2:3], off offset:32
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GFX9-NEXT: v_mov_b32_e32 v7, v5
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v6, v4
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB92_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
%tmp0 = atomicrmw max ptr addrspace(1) %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0
@@ -7420,91 +6951,34 @@ define i64 @global_atomic_max_i64_ret_offset__amdgpu_no_remote_memory(ptr addrsp
; SI-LABEL: global_atomic_max_i64_ret_offset__amdgpu_no_remote_memory:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v5, v3
-; SI-NEXT: v_mov_b32_e32 v4, v2
-; SI-NEXT: v_mov_b32_e32 v7, v1
-; SI-NEXT: v_mov_b32_e32 v6, v0
; SI-NEXT: s_mov_b32 s6, 0
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:32
-; SI-NEXT: s_mov_b64 s[8:9], 0
-; SI-NEXT: .LBB93_1: ; %atomicrmw.start
-; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v1
-; SI-NEXT: v_mov_b32_e32 v10, v0
-; SI-NEXT: v_cmp_gt_i64_e32 vcc, v[10:11], v[4:5]
-; SI-NEXT: v_cndmask_b32_e32 v9, v5, v11, vcc
-; SI-NEXT: v_cndmask_b32_e32 v8, v4, v10, vcc
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v0, v8
-; SI-NEXT: v_mov_b32_e32 v1, v9
-; SI-NEXT: v_mov_b32_e32 v2, v10
-; SI-NEXT: v_mov_b32_e32 v3, v11
-; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[6:7], s[4:7], 0 addr64 offset:32 glc
+; SI-NEXT: buffer_atomic_smax_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
-; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
-; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
-; SI-NEXT: s_cbranch_execnz .LBB93_1
-; SI-NEXT: ; %bb.2: ; %atomicrmw.end
-; SI-NEXT: s_or_b64 exec, exec, s[8:9]
+; SI-NEXT: v_mov_b32_e32 v0, v2
+; SI-NEXT: v_mov_b32_e32 v1, v3
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
;
; VI-LABEL: global_atomic_max_i64_ret_offset__amdgpu_no_remote_memory:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_add_u32_e32 v4, vcc, 32, v0
-; VI-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; VI-NEXT: flat_load_dwordx2 v[0:1], v[4:5]
-; VI-NEXT: s_mov_b64 s[4:5], 0
-; VI-NEXT: .LBB93_1: ; %atomicrmw.start
-; VI-NEXT: ; =>This Inner Loop Header: Depth=1
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v9, v1
-; VI-NEXT: v_mov_b32_e32 v8, v0
-; VI-NEXT: v_cmp_gt_i64_e32 vcc, v[8:9], v[2:3]
-; VI-NEXT: v_cndmask_b32_e32 v7, v3, v9, vcc
-; VI-NEXT: v_cndmask_b32_e32 v6, v2, v8, vcc
-; VI-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: flat_atomic_smax_x2 v[0:1], v[0:1], v[2:3] glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
-; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT: s_cbranch_execnz .LBB93_1
-; VI-NEXT: ; %bb.2: ; %atomicrmw.end
-; VI-NEXT: s_or_b64 exec, exec, s[4:5]
; VI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: global_atomic_max_i64_ret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: global_load_dwordx2 v[4:5], v[0:1], off offset:32
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: .LBB93_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v7, v5
-; GFX9-NEXT: v_mov_b32_e32 v6, v4
-; GFX9-NEXT: v_cmp_gt_i64_e32 vcc, v[6:7], v[2:3]
-; GFX9-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
-; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
-; GFX9-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:32 glc
+; GFX9-NEXT: global_atomic_smax_x2 v[0:1], v[0:1], v[2:3], off offset:32 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB93_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v0, v4
-; GFX9-NEXT: v_mov_b32_e32 v1, v5
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
%result = atomicrmw max ptr addrspace(1) %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0
@@ -8685,30 +8159,9 @@ define void @global_atomic_umax_i64_noret_offset__amdgpu_no_remote_memory(ptr ad
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[6:7], v[0:1], s[4:7], 0 addr64 offset:32
-; SI-NEXT: s_mov_b64 s[8:9], 0
-; SI-NEXT: .LBB105_1: ; %atomicrmw.start
-; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_cmp_gt_u64_e32 vcc, v[6:7], v[2:3]
-; SI-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
-; SI-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v7
-; SI-NEXT: v_mov_b32_e32 v10, v6
-; SI-NEXT: v_mov_b32_e32 v9, v5
-; SI-NEXT: v_mov_b32_e32 v8, v4
-; SI-NEXT: buffer_atomic_cmpswap_x2 v[8:11], v[0:1], s[4:7], 0 addr64 offset:32 glc
+; SI-NEXT: buffer_atomic_umax_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[8:9], v[6:7]
-; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
-; SI-NEXT: v_mov_b32_e32 v6, v8
-; SI-NEXT: v_mov_b32_e32 v7, v9
-; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
-; SI-NEXT: s_cbranch_execnz .LBB105_1
-; SI-NEXT: ; %bb.2: ; %atomicrmw.end
-; SI-NEXT: s_or_b64 exec, exec, s[8:9]
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
;
@@ -8717,49 +8170,17 @@ define void @global_atomic_umax_i64_noret_offset__amdgpu_no_remote_memory(ptr ad
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT: flat_load_dwordx2 v[6:7], v[0:1]
-; VI-NEXT: s_mov_b64 s[4:5], 0
-; VI-NEXT: .LBB105_1: ; %atomicrmw.start
-; VI-NEXT: ; =>This Inner Loop Header: Depth=1
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_cmp_gt_u64_e32 vcc, v[6:7], v[2:3]
-; VI-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
-; VI-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
-; VI-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; VI-NEXT: flat_atomic_umax_x2 v[0:1], v[2:3]
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
-; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; VI-NEXT: v_mov_b32_e32 v7, v5
-; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT: v_mov_b32_e32 v6, v4
-; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT: s_cbranch_execnz .LBB105_1
-; VI-NEXT: ; %bb.2: ; %atomicrmw.end
-; VI-NEXT: s_or_b64 exec, exec, s[4:5]
; VI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: global_atomic_umax_i64_noret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: global_load_dwordx2 v[6:7], v[0:1], off offset:32
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: .LBB105_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, v[6:7], v[2:3]
-; GFX9-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
-; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
-; GFX9-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:32 glc
+; GFX9-NEXT: global_atomic_umax_x2 v[0:1], v[2:3], off offset:32
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GFX9-NEXT: v_mov_b32_e32 v7, v5
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v6, v4
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB105_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
%tmp0 = atomicrmw umax ptr addrspace(1) %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0
@@ -8770,91 +8191,34 @@ define i64 @global_atomic_umax_i64_ret_offset__amdgpu_no_remote_memory(ptr addrs
; SI-LABEL: global_atomic_umax_i64_ret_offset__amdgpu_no_remote_memory:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v5, v3
-; SI-NEXT: v_mov_b32_e32 v4, v2
-; SI-NEXT: v_mov_b32_e32 v7, v1
-; SI-NEXT: v_mov_b32_e32 v6, v0
; SI-NEXT: s_mov_b32 s6, 0
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:32
-; SI-NEXT: s_mov_b64 s[8:9], 0
-; SI-NEXT: .LBB106_1: ; %atomicrmw.start
-; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v1
-; SI-NEXT: v_mov_b32_e32 v10, v0
-; SI-NEXT: v_cmp_gt_u64_e32 vcc, v[10:11], v[4:5]
-; SI-NEXT: v_cndmask_b32_e32 v9, v5, v11, vcc
-; SI-NEXT: v_cndmask_b32_e32 v8, v4, v10, vcc
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v0, v8
-; SI-NEXT: v_mov_b32_e32 v1, v9
-; SI-NEXT: v_mov_b32_e32 v2, v10
-; SI-NEXT: v_mov_b32_e32 v3, v11
-; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[6:7], s[4:7], 0 addr64 offset:32 glc
+; SI-NEXT: buffer_atomic_umax_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
-; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
-; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
-; SI-NEXT: s_cbranch_execnz .LBB106_1
-; SI-NEXT: ; %bb.2: ; %atomicrmw.end
-; SI-NEXT: s_or_b64 exec, exec, s[8:9]
+; SI-NEXT: v_mov_b32_e32 v0, v2
+; SI-NEXT: v_mov_b32_e32 v1, v3
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
;
; VI-LABEL: global_atomic_umax_i64_ret_offset__amdgpu_no_remote_memory:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_add_u32_e32 v4, vcc, 32, v0
-; VI-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; VI-NEXT: flat_load_dwordx2 v[0:1], v[4:5]
-; VI-NEXT: s_mov_b64 s[4:5], 0
-; VI-NEXT: .LBB106_1: ; %atomicrmw.start
-; VI-NEXT: ; =>This Inner Loop Header: Depth=1
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v9, v1
-; VI-NEXT: v_mov_b32_e32 v8, v0
-; VI-NEXT: v_cmp_gt_u64_e32 vcc, v[8:9], v[2:3]
-; VI-NEXT: v_cndmask_b32_e32 v7, v3, v9, vcc
-; VI-NEXT: v_cndmask_b32_e32 v6, v2, v8, vcc
-; VI-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: flat_atomic_umax_x2 v[0:1], v[0:1], v[2:3] glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
-; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT: s_cbranch_execnz .LBB106_1
-; VI-NEXT: ; %bb.2: ; %atomicrmw.end
-; VI-NEXT: s_or_b64 exec, exec, s[4:5]
; VI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: global_atomic_umax_i64_ret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: global_load_dwordx2 v[4:5], v[0:1], off offset:32
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: .LBB106_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v7, v5
-; GFX9-NEXT: v_mov_b32_e32 v6, v4
-; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, v[6:7], v[2:3]
-; GFX9-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
-; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
-; GFX9-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:32 glc
+; GFX9-NEXT: global_atomic_umax_x2 v[0:1], v[0:1], v[2:3], off offset:32 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB106_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v0, v4
-; GFX9-NEXT: v_mov_b32_e32 v1, v5
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
%result = atomicrmw umax ptr addrspace(1) %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0
@@ -9671,30 +9035,9 @@ define void @global_atomic_umin_i64_noret_offset__amdgpu_no_remote_memory(ptr ad
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[6:7], v[0:1], s[4:7], 0 addr64 offset:32
-; SI-NEXT: s_mov_b64 s[8:9], 0
-; SI-NEXT: .LBB115_1: ; %atomicrmw.start
-; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_cmp_le_u64_e32 vcc, v[6:7], v[2:3]
-; SI-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
-; SI-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v7
-; SI-NEXT: v_mov_b32_e32 v10, v6
-; SI-NEXT: v_mov_b32_e32 v9, v5
-; SI-NEXT: v_mov_b32_e32 v8, v4
-; SI-NEXT: buffer_atomic_cmpswap_x2 v[8:11], v[0:1], s[4:7], 0 addr64 offset:32 glc
+; SI-NEXT: buffer_atomic_umin_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[8:9], v[6:7]
-; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
-; SI-NEXT: v_mov_b32_e32 v6, v8
-; SI-NEXT: v_mov_b32_e32 v7, v9
-; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
-; SI-NEXT: s_cbranch_execnz .LBB115_1
-; SI-NEXT: ; %bb.2: ; %atomicrmw.end
-; SI-NEXT: s_or_b64 exec, exec, s[8:9]
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
;
@@ -9703,49 +9046,17 @@ define void @global_atomic_umin_i64_noret_offset__amdgpu_no_remote_memory(ptr ad
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT: flat_load_dwordx2 v[6:7], v[0:1]
-; VI-NEXT: s_mov_b64 s[4:5], 0
-; VI-NEXT: .LBB115_1: ; %atomicrmw.start
-; VI-NEXT: ; =>This Inner Loop Header: Depth=1
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_cmp_le_u64_e32 vcc, v[6:7], v[2:3]
-; VI-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
-; VI-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
-; VI-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; VI-NEXT: flat_atomic_umin_x2 v[0:1], v[2:3]
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
-; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; VI-NEXT: v_mov_b32_e32 v7, v5
-; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT: v_mov_b32_e32 v6, v4
-; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT: s_cbranch_execnz .LBB115_1
-; VI-NEXT: ; %bb.2: ; %atomicrmw.end
-; VI-NEXT: s_or_b64 exec, exec, s[4:5]
; VI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: global_atomic_umin_i64_noret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: global_load_dwordx2 v[6:7], v[0:1], off offset:32
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: .LBB115_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_cmp_le_u64_e32 vcc, v[6:7], v[2:3]
-; GFX9-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
-; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
-; GFX9-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:32 glc
+; GFX9-NEXT: global_atomic_umin_x2 v[0:1], v[2:3], off offset:32
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GFX9-NEXT: v_mov_b32_e32 v7, v5
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v6, v4
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB115_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
%tmp0 = atomicrmw umin ptr addrspace(1) %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0
@@ -9756,91 +9067,34 @@ define i64 @global_atomic_umin_i64_ret_offset__amdgpu_no_remote_memory(ptr addrs
; SI-LABEL: global_atomic_umin_i64_ret_offset__amdgpu_no_remote_memory:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v5, v3
-; SI-NEXT: v_mov_b32_e32 v4, v2
-; SI-NEXT: v_mov_b32_e32 v7, v1
-; SI-NEXT: v_mov_b32_e32 v6, v0
; SI-NEXT: s_mov_b32 s6, 0
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:32
-; SI-NEXT: s_mov_b64 s[8:9], 0
-; SI-NEXT: .LBB116_1: ; %atomicrmw.start
-; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v1
-; SI-NEXT: v_mov_b32_e32 v10, v0
-; SI-NEXT: v_cmp_le_u64_e32 vcc, v[10:11], v[4:5]
-; SI-NEXT: v_cndmask_b32_e32 v9, v5, v11, vcc
-; SI-NEXT: v_cndmask_b32_e32 v8, v4, v10, vcc
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v0, v8
-; SI-NEXT: v_mov_b32_e32 v1, v9
-; SI-NEXT: v_mov_b32_e32 v2, v10
-; SI-NEXT: v_mov_b32_e32 v3, v11
-; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[6:7], s[4:7], 0 addr64 offset:32 glc
+; SI-NEXT: buffer_atomic_umin_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
-; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
-; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
-; SI-NEXT: s_cbranch_execnz .LBB116_1
-; SI-NEXT: ; %bb.2: ; %atomicrmw.end
-; SI-NEXT: s_or_b64 exec, exec, s[8:9]
+; SI-NEXT: v_mov_b32_e32 v0, v2
+; SI-NEXT: v_mov_b32_e32 v1, v3
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
;
; VI-LABEL: global_atomic_umin_i64_ret_offset__amdgpu_no_remote_memory:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_add_u32_e32 v4, vcc, 32, v0
-; VI-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; VI-NEXT: flat_load_dwordx2 v[0:1], v[4:5]
-; VI-NEXT: s_mov_b64 s[4:5], 0
-; VI-NEXT: .LBB116_1: ; %atomicrmw.start
-; VI-NEXT: ; =>This Inner Loop Header: Depth=1
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v9, v1
-; VI-NEXT: v_mov_b32_e32 v8, v0
-; VI-NEXT: v_cmp_le_u64_e32 vcc, v[8:9], v[2:3]
-; VI-NEXT: v_cndmask_b32_e32 v7, v3, v9, vcc
-; VI-NEXT: v_cndmask_b32_e32 v6, v2, v8, vcc
-; VI-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: flat_atomic_umin_x2 v[0:1], v[0:1], v[2:3] glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
-; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT: s_cbranch_execnz .LBB116_1
-; VI-NEXT: ; %bb.2: ; %atomicrmw.end
-; VI-NEXT: s_or_b64 exec, exec, s[4:5]
; VI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: global_atomic_umin_i64_ret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: global_load_dwordx2 v[4:5], v[0:1], off offset:32
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: .LBB116_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v7, v5
-; GFX9-NEXT: v_mov_b32_e32 v6, v4
-; GFX9-NEXT: v_cmp_le_u64_e32 vcc, v[6:7], v[2:3]
-; GFX9-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
-; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
-; GFX9-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:32 glc
+; GFX9-NEXT: global_atomic_umin_x2 v[0:1], v[0:1], v[2:3], off offset:32 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB116_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v0, v4
-; GFX9-NEXT: v_mov_b32_e32 v1, v5
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
%result = atomicrmw umin ptr addrspace(1) %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0
@@ -11122,30 +10376,9 @@ define void @global_atomic_min_i64_noret_offset__amdgpu_no_remote_memory(ptr add
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[6:7], v[0:1], s[4:7], 0 addr64 offset:32
-; SI-NEXT: s_mov_b64 s[8:9], 0
-; SI-NEXT: .LBB129_1: ; %atomicrmw.start
-; SI-NEXT: ; =>This Inner Loop Header: Depth=1
+; SI-NEXT: buffer_atomic_smin_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_cmp_le_i64_e32 vcc, v[6:7], v[2:3]
-; SI-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
-; SI-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v7
-; SI-NEXT: v_mov_b32_e32 v10, v6
-; SI-NEXT: v_mov_b32_e32 v9, v5
-; SI-NEXT: v_mov_b32_e32 v8, v4
-; SI-NEXT: buffer_atomic_cmpswap_x2 v[8:11], v[0:1], s[4:7], 0 addr64 offset:32 glc
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[8:9], v[6:7]
-; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
-; SI-NEXT: v_mov_b32_e32 v6, v8
-; SI-NEXT: v_mov_b32_e32 v7, v9
-; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
-; SI-NEXT: s_cbranch_execnz .LBB129_1
-; SI-NEXT: ; %bb.2: ; %atomicrmw.end
-; SI-NEXT: s_or_b64 exec, exec, s[8:9]
+; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
;
@@ -11154,49 +10387,17 @@ define void @global_atomic_min_i64_noret_offset__amdgpu_no_remote_memory(ptr add
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT: flat_load_dwordx2 v[6:7], v[0:1]
-; VI-NEXT: s_mov_b64 s[4:5], 0
-; VI-NEXT: .LBB129_1: ; %atomicrmw.start
-; VI-NEXT: ; =>This Inner Loop Header: Depth=1
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_cmp_le_i64_e32 vcc, v[6:7], v[2:3]
-; VI-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
-; VI-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
-; VI-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; VI-NEXT: flat_atomic_smin_x2 v[0:1], v[2:3]
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
-; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; VI-NEXT: v_mov_b32_e32 v7, v5
-; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT: v_mov_b32_e32 v6, v4
-; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT: s_cbranch_execnz .LBB129_1
-; VI-NEXT: ; %bb.2: ; %atomicrmw.end
-; VI-NEXT: s_or_b64 exec, exec, s[4:5]
; VI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: global_atomic_min_i64_noret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: global_load_dwordx2 v[6:7], v[0:1], off offset:32
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: .LBB129_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_cmp_le_i64_e32 vcc, v[6:7], v[2:3]
-; GFX9-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
-; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
-; GFX9-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:32 glc
+; GFX9-NEXT: global_atomic_smin_x2 v[0:1], v[2:3], off offset:32
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GFX9-NEXT: v_mov_b32_e32 v7, v5
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v6, v4
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB129_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
%tmp0 = atomicrmw min ptr addrspace(1) %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0
@@ -11207,91 +10408,34 @@ define i64 @global_atomic_min_i64_ret_offset__amdgpu_no_remote_memory(ptr addrsp
; SI-LABEL: global_atomic_min_i64_ret_offset__amdgpu_no_remote_memory:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v5, v3
-; SI-NEXT: v_mov_b32_e32 v4, v2
-; SI-NEXT: v_mov_b32_e32 v7, v1
-; SI-NEXT: v_mov_b32_e32 v6, v0
; SI-NEXT: s_mov_b32 s6, 0
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:32
-; SI-NEXT: s_mov_b64 s[8:9], 0
-; SI-NEXT: .LBB130_1: ; %atomicrmw.start
-; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v1
-; SI-NEXT: v_mov_b32_e32 v10, v0
-; SI-NEXT: v_cmp_le_i64_e32 vcc, v[10:11], v[4:5]
-; SI-NEXT: v_cndmask_b32_e32 v9, v5, v11, vcc
-; SI-NEXT: v_cndmask_b32_e32 v8, v4, v10, vcc
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v0, v8
-; SI-NEXT: v_mov_b32_e32 v1, v9
-; SI-NEXT: v_mov_b32_e32 v2, v10
-; SI-NEXT: v_mov_b32_e32 v3, v11
-; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[6:7], s[4:7], 0 addr64 offset:32 glc
+; SI-NEXT: buffer_atomic_smin_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
-; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
-; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
-; SI-NEXT: s_cbranch_execnz .LBB130_1
-; SI-NEXT: ; %bb.2: ; %atomicrmw.end
-; SI-NEXT: s_or_b64 exec, exec, s[8:9]
+; SI-NEXT: v_mov_b32_e32 v0, v2
+; SI-NEXT: v_mov_b32_e32 v1, v3
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
;
; VI-LABEL: global_atomic_min_i64_ret_offset__amdgpu_no_remote_memory:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_add_u32_e32 v4, vcc, 32, v0
-; VI-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; VI-NEXT: flat_load_dwordx2 v[0:1], v[4:5]
-; VI-NEXT: s_mov_b64 s[4:5], 0
-; VI-NEXT: .LBB130_1: ; %atomicrmw.start
-; VI-NEXT: ; =>This Inner Loop Header: Depth=1
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v9, v1
-; VI-NEXT: v_mov_b32_e32 v8, v0
-; VI-NEXT: v_cmp_le_i64_e32 vcc, v[8:9], v[2:3]
-; VI-NEXT: v_cndmask_b32_e32 v7, v3, v9, vcc
-; VI-NEXT: v_cndmask_b32_e32 v6, v2, v8, vcc
-; VI-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: flat_atomic_smin_x2 v[0:1], v[0:1], v[2:3] glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
-; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT: s_cbranch_execnz .LBB130_1
-; VI-NEXT: ; %bb.2: ; %atomicrmw.end
-; VI-NEXT: s_or_b64 exec, exec, s[4:5]
; VI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: global_atomic_min_i64_ret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: global_load_dwordx2 v[4:5], v[0:1], off offset:32
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: .LBB130_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v7, v5
-; GFX9-NEXT: v_mov_b32_e32 v6, v4
-; GFX9-NEXT: v_cmp_le_i64_e32 vcc, v[6:7], v[2:3]
-; GFX9-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
-; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
-; GFX9-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:32 glc
+; GFX9-NEXT: global_atomic_smin_x2 v[0:1], v[0:1], v[2:3], off offset:32 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB130_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v0, v4
-; GFX9-NEXT: v_mov_b32_e32 v1, v5
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
%result = atomicrmw min ptr addrspace(1) %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0
@@ -12132,32 +11276,9 @@ define void @global_atomic_uinc_wrap_i64_noret_offset__amdgpu_no_remote_memory(p
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[6:7], v[0:1], s[4:7], 0 addr64 offset:32
-; SI-NEXT: s_mov_b64 s[8:9], 0
-; SI-NEXT: .LBB139_1: ; %atomicrmw.start
-; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_add_i32_e32 v4, vcc, 1, v6
-; SI-NEXT: v_addc_u32_e32 v5, vcc, 0, v7, vcc
-; SI-NEXT: v_cmp_lt_u64_e32 vcc, v[6:7], v[2:3]
-; SI-NEXT: v_cndmask_b32_e32 v5, 0, v5, vcc
-; SI-NEXT: v_cndmask_b32_e32 v4, 0, v4, vcc
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v7
-; SI-NEXT: v_mov_b32_e32 v10, v6
-; SI-NEXT: v_mov_b32_e32 v9, v5
-; SI-NEXT: v_mov_b32_e32 v8, v4
-; SI-NEXT: buffer_atomic_cmpswap_x2 v[8:11], v[0:1], s[4:7], 0 addr64 offset:32 glc
+; SI-NEXT: buffer_atomic_inc_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[8:9], v[6:7]
-; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
-; SI-NEXT: v_mov_b32_e32 v6, v8
-; SI-NEXT: v_mov_b32_e32 v7, v9
-; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
-; SI-NEXT: s_cbranch_execnz .LBB139_1
-; SI-NEXT: ; %bb.2: ; %atomicrmw.end
-; SI-NEXT: s_or_b64 exec, exec, s[8:9]
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
;
@@ -12166,53 +11287,17 @@ define void @global_atomic_uinc_wrap_i64_noret_offset__amdgpu_no_remote_memory(p
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT: flat_load_dwordx2 v[6:7], v[0:1]
-; VI-NEXT: s_mov_b64 s[4:5], 0
-; VI-NEXT: .LBB139_1: ; %atomicrmw.start
-; VI-NEXT: ; =>This Inner Loop Header: Depth=1
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_add_u32_e32 v4, vcc, 1, v6
-; VI-NEXT: v_addc_u32_e32 v5, vcc, 0, v7, vcc
-; VI-NEXT: v_cmp_lt_u64_e32 vcc, v[6:7], v[2:3]
-; VI-NEXT: v_cndmask_b32_e32 v5, 0, v5, vcc
-; VI-NEXT: v_cndmask_b32_e32 v4, 0, v4, vcc
-; VI-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; VI-NEXT: flat_atomic_inc_x2 v[0:1], v[2:3]
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
-; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; VI-NEXT: v_mov_b32_e32 v7, v5
-; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT: v_mov_b32_e32 v6, v4
-; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT: s_cbranch_execnz .LBB139_1
-; VI-NEXT: ; %bb.2: ; %atomicrmw.end
-; VI-NEXT: s_or_b64 exec, exec, s[4:5]
; VI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: global_atomic_uinc_wrap_i64_noret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: global_load_dwordx2 v[6:7], v[0:1], off offset:32
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: .LBB139_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, 1, v6
-; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, 0, v7, vcc
-; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, v[6:7], v[2:3]
-; GFX9-NEXT: v_cndmask_b32_e32 v5, 0, v5, vcc
-; GFX9-NEXT: v_cndmask_b32_e32 v4, 0, v4, vcc
-; GFX9-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:32 glc
+; GFX9-NEXT: global_atomic_inc_x2 v[0:1], v[2:3], off offset:32
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GFX9-NEXT: v_mov_b32_e32 v7, v5
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v6, v4
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB139_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
%tmp0 = atomicrmw uinc_wrap ptr addrspace(1) %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0
@@ -12223,97 +11308,34 @@ define i64 @global_atomic_uinc_wrap_i64_ret_offset__amdgpu_no_remote_memory(ptr
; SI-LABEL: global_atomic_uinc_wrap_i64_ret_offset__amdgpu_no_remote_memory:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v5, v3
-; SI-NEXT: v_mov_b32_e32 v4, v2
-; SI-NEXT: v_mov_b32_e32 v7, v1
-; SI-NEXT: v_mov_b32_e32 v6, v0
; SI-NEXT: s_mov_b32 s6, 0
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:32
-; SI-NEXT: s_mov_b64 s[8:9], 0
-; SI-NEXT: .LBB140_1: ; %atomicrmw.start
-; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v1
-; SI-NEXT: v_mov_b32_e32 v10, v0
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_add_i32_e32 v0, vcc, 1, v10
-; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v11, vcc
-; SI-NEXT: v_cmp_lt_u64_e32 vcc, v[10:11], v[4:5]
-; SI-NEXT: v_cndmask_b32_e32 v9, 0, v1, vcc
-; SI-NEXT: v_cndmask_b32_e32 v8, 0, v0, vcc
-; SI-NEXT: v_mov_b32_e32 v0, v8
-; SI-NEXT: v_mov_b32_e32 v1, v9
-; SI-NEXT: v_mov_b32_e32 v2, v10
-; SI-NEXT: v_mov_b32_e32 v3, v11
-; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[6:7], s[4:7], 0 addr64 offset:32 glc
+; SI-NEXT: buffer_atomic_inc_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
-; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
-; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
-; SI-NEXT: s_cbranch_execnz .LBB140_1
-; SI-NEXT: ; %bb.2: ; %atomicrmw.end
-; SI-NEXT: s_or_b64 exec, exec, s[8:9]
+; SI-NEXT: v_mov_b32_e32 v0, v2
+; SI-NEXT: v_mov_b32_e32 v1, v3
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
;
; VI-LABEL: global_atomic_uinc_wrap_i64_ret_offset__amdgpu_no_remote_memory:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_add_u32_e32 v4, vcc, 32, v0
-; VI-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; VI-NEXT: flat_load_dwordx2 v[0:1], v[4:5]
-; VI-NEXT: s_mov_b64 s[4:5], 0
-; VI-NEXT: .LBB140_1: ; %atomicrmw.start
-; VI-NEXT: ; =>This Inner Loop Header: Depth=1
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v9, v1
-; VI-NEXT: v_mov_b32_e32 v8, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 1, v8
-; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v9, vcc
-; VI-NEXT: v_cmp_lt_u64_e32 vcc, v[8:9], v[2:3]
-; VI-NEXT: v_cndmask_b32_e32 v7, 0, v1, vcc
-; VI-NEXT: v_cndmask_b32_e32 v6, 0, v0, vcc
-; VI-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: flat_atomic_inc_x2 v[0:1], v[0:1], v[2:3] glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
-; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; VI-NEXT: s_cbranch_execnz .LBB140_1
-; VI-NEXT: ; %bb.2: ; %atomicrmw.end
-; VI-NEXT: s_or_b64 exec, exec, s[4:5]
; VI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: global_atomic_uinc_wrap_i64_ret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: global_load_dwordx2 v[4:5], v[0:1], off offset:32
-; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: .LBB140_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v7, v5
-; GFX9-NEXT: v_mov_b32_e32 v6, v4
-; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, 1, v6
-; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, 0, v7, vcc
-; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, v[6:7], v[2:3]
-; GFX9-NEXT: v_cndmask_b32_e32 v5, 0, v5, vcc
-; GFX9-NEXT: v_cndmask_b32_e32 v4, 0, v4, vcc
-; GFX9-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:32 glc
+; GFX9-NEXT: global_atomic_inc_x2 v[0:1], v[0:1], v[2:3], off offset:32 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_execnz .LBB140_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
-; GFX9-NEXT: v_mov_b32_e32 v0, v4
-; GFX9-NEXT: v_mov_b32_e32 v1, v5
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
%result = atomicrmw uinc_wrap ptr addrspace(1) %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0
@@ -13222,38 +12244,13 @@ define void @global_atomic_udec_wrap_i64_noret_offset__amdgpu_no_remote_memory(p
; SI-LABEL: global_atomic_udec_wrap_i64_noret_offset__amdgpu_no_remote_memory:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: s_mov_b32 s10, 0
-; SI-NEXT: s_mov_b32 s11, 0xf000
-; SI-NEXT: s_mov_b32 s8, s10
-; SI-NEXT: s_mov_b32 s9, s10
-; SI-NEXT: buffer_load_dwordx2 v[6:7], v[0:1], s[8:11], 0 addr64 offset:32
-; SI-NEXT: s_mov_b64 s[6:7], 0
-; SI-NEXT: .LBB149_1: ; %atomicrmw.start
-; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_add_i32_e32 v4, vcc, -1, v6
-; SI-NEXT: v_addc_u32_e32 v5, vcc, -1, v7, vcc
-; SI-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; SI-NEXT: v_cmp_gt_u64_e64 s[4:5], v[6:7], v[2:3]
-; SI-NEXT: s_or_b64 vcc, vcc, s[4:5]
-; SI-NEXT: v_cndmask_b32_e32 v5, v5, v3, vcc
-; SI-NEXT: v_cndmask_b32_e32 v4, v4, v2, vcc
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v7
-; SI-NEXT: v_mov_b32_e32 v10, v6
-; SI-NEXT: v_mov_b32_e32 v9, v5
-; SI-NEXT: v_mov_b32_e32 v8, v4
-; SI-NEXT: buffer_atomic_cmpswap_x2 v[8:11], v[0:1], s[8:11], 0 addr64 offset:32 glc
+; SI-NEXT: s_mov_b32 s6, 0
+; SI-NEXT: s_mov_b32 s7, 0xf000
+; SI-NEXT: s_mov_b32 s4, s6
+; SI-NEXT: s_mov_b32 s5, s6
+; SI-NEXT: buffer_atomic_dec_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[8:9], v[6:7]
-; SI-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; SI-NEXT: v_mov_b32_e32 v6, v8
-; SI-NEXT: v_mov_b32_e32 v7, v9
-; SI-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; SI-NEXT: s_cbranch_execnz .LBB149_1
-; SI-NEXT: ; %bb.2: ; %atomicrmw.end
-; SI-NEXT: s_or_b64 exec, exec, s[6:7]
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
;
@@ -13262,57 +12259,17 @@ define void @global_atomic_udec_wrap_i64_noret_offset__amdgpu_no_remote_memory(p
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT: flat_load_dwordx2 v[6:7], v[0:1]
-; VI-NEXT: s_mov_b64 s[8:9], 0
-; VI-NEXT: .LBB149_1: ; %atomicrmw.start
-; VI-NEXT: ; =>This Inner Loop Header: Depth=1
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; VI-NEXT: v_cmp_gt_u64_e64 s[4:5], v[6:7], v[2:3]
-; VI-NEXT: v_add_u32_e64 v4, s[6:7], -1, v6
-; VI-NEXT: v_addc_u32_e64 v5, s[6:7], -1, v7, s[6:7]
-; VI-NEXT: s_or_b64 vcc, vcc, s[4:5]
-; VI-NEXT: v_cndmask_b32_e32 v5, v5, v3, vcc
-; VI-NEXT: v_cndmask_b32_e32 v4, v4, v2, vcc
-; VI-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
+; VI-NEXT: flat_atomic_dec_x2 v[0:1], v[2:3]
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
-; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; VI-NEXT: v_mov_b32_e32 v7, v5
-; VI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
-; VI-NEXT: v_mov_b32_e32 v6, v4
-; VI-NEXT: s_andn2_b64 exec, exec, s[8:9]
-; VI-NEXT: s_cbranch_execnz .LBB149_1
-; VI-NEXT: ; %bb.2: ; %atomicrmw.end
-; VI-NEXT: s_or_b64 exec, exec, s[8:9]
; VI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: global_atomic_udec_wrap_i64_noret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: global_load_dwordx2 v[6:7], v[0:1], off offset:32
-; GFX9-NEXT: s_mov_b64 s[8:9], 0
-; GFX9-NEXT: .LBB149_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GFX9-NEXT: v_cmp_gt_u64_e64 s[4:5], v[6:7], v[2:3]
-; GFX9-NEXT: v_add_co_u32_e64 v4, s[6:7], -1, v6
-; GFX9-NEXT: v_addc_co_u32_e64 v5, s[6:7], -1, v7, s[6:7]
-; GFX9-NEXT: s_or_b64 vcc, vcc, s[4:5]
-; GFX9-NEXT: v_cndmask_b32_e32 v5, v5, v3, vcc
-; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v2, vcc
-; GFX9-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:32 glc
+; GFX9-NEXT: global_atomic_dec_x2 v[0:1], v[2:3], off offset:32
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GFX9-NEXT: v_mov_b32_e32 v7, v5
-; GFX9-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
-; GFX9-NEXT: v_mov_b32_e32 v6, v4
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[8:9]
-; GFX9-NEXT: s_cbranch_execnz .LBB149_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[8:9]
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
%tmp0 = atomicrmw udec_wrap ptr addrspace(1) %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0
@@ -13323,103 +12280,34 @@ define i64 @global_atomic_udec_wrap_i64_ret_offset__amdgpu_no_remote_memory(ptr
; SI-LABEL: global_atomic_udec_wrap_i64_ret_offset__amdgpu_no_remote_memory:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v5, v3
-; SI-NEXT: v_mov_b32_e32 v4, v2
-; SI-NEXT: v_mov_b32_e32 v7, v1
-; SI-NEXT: v_mov_b32_e32 v6, v0
-; SI-NEXT: s_mov_b32 s10, 0
-; SI-NEXT: s_mov_b32 s11, 0xf000
-; SI-NEXT: s_mov_b32 s8, s10
-; SI-NEXT: s_mov_b32 s9, s10
-; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[8:11], 0 addr64 offset:32
-; SI-NEXT: s_mov_b64 s[6:7], 0
-; SI-NEXT: .LBB150_1: ; %atomicrmw.start
-; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v1
-; SI-NEXT: v_mov_b32_e32 v10, v0
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_add_i32_e32 v0, vcc, -1, v10
-; SI-NEXT: v_addc_u32_e32 v1, vcc, -1, v11, vcc
-; SI-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[10:11]
-; SI-NEXT: v_cmp_gt_u64_e64 s[4:5], v[10:11], v[4:5]
-; SI-NEXT: s_or_b64 vcc, vcc, s[4:5]
-; SI-NEXT: v_cndmask_b32_e32 v9, v1, v5, vcc
-; SI-NEXT: v_cndmask_b32_e32 v8, v0, v4, vcc
-; SI-NEXT: v_mov_b32_e32 v0, v8
-; SI-NEXT: v_mov_b32_e32 v1, v9
-; SI-NEXT: v_mov_b32_e32 v2, v10
-; SI-NEXT: v_mov_b32_e32 v3, v11
-; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[6:7], s[8:11], 0 addr64 offset:32 glc
+; SI-NEXT: s_mov_b32 s6, 0
+; SI-NEXT: s_mov_b32 s7, 0xf000
+; SI-NEXT: s_mov_b32 s4, s6
+; SI-NEXT: s_mov_b32 s5, s6
+; SI-NEXT: buffer_atomic_dec_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
-; SI-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; SI-NEXT: s_andn2_b64 exec, exec, s[6:7]
-; SI-NEXT: s_cbranch_execnz .LBB150_1
-; SI-NEXT: ; %bb.2: ; %atomicrmw.end
-; SI-NEXT: s_or_b64 exec, exec, s[6:7]
+; SI-NEXT: v_mov_b32_e32 v0, v2
+; SI-NEXT: v_mov_b32_e32 v1, v3
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
;
; VI-LABEL: global_atomic_udec_wrap_i64_ret_offset__amdgpu_no_remote_memory:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_add_u32_e32 v4, vcc, 32, v0
-; VI-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; VI-NEXT: flat_load_dwordx2 v[0:1], v[4:5]
-; VI-NEXT: s_mov_b64 s[8:9], 0
-; VI-NEXT: .LBB150_1: ; %atomicrmw.start
-; VI-NEXT: ; =>This Inner Loop Header: Depth=1
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v9, v1
-; VI-NEXT: v_mov_b32_e32 v8, v0
-; VI-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[8:9]
-; VI-NEXT: v_cmp_gt_u64_e64 s[4:5], v[8:9], v[2:3]
-; VI-NEXT: v_add_u32_e64 v0, s[6:7], -1, v8
-; VI-NEXT: v_addc_u32_e64 v1, s[6:7], -1, v9, s[6:7]
-; VI-NEXT: s_or_b64 vcc, vcc, s[4:5]
-; VI-NEXT: v_cndmask_b32_e32 v7, v1, v3, vcc
-; VI-NEXT: v_cndmask_b32_e32 v6, v0, v2, vcc
-; VI-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
+; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: flat_atomic_dec_x2 v[0:1], v[0:1], v[2:3] glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
-; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
-; VI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
-; VI-NEXT: s_andn2_b64 exec, exec, s[8:9]
-; VI-NEXT: s_cbranch_execnz .LBB150_1
-; VI-NEXT: ; %bb.2: ; %atomicrmw.end
-; VI-NEXT: s_or_b64 exec, exec, s[8:9]
; VI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: global_atomic_udec_wrap_i64_ret_offset__amdgpu_no_remote_memory:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: global_load_dwordx2 v[4:5], v[0:1], off offset:32
-; GFX9-NEXT: s_mov_b64 s[8:9], 0
-; GFX9-NEXT: .LBB150_1: ; %atomicrmw.start
-; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v7, v5
-; GFX9-NEXT: v_mov_b32_e32 v6, v4
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GFX9-NEXT: v_cmp_gt_u64_e64 s[4:5], v[6:7], v[2:3]
-; GFX9-NEXT: v_add_co_u32_e64 v4, s[6:7], -1, v6
-; GFX9-NEXT: v_addc_co_u32_e64 v5, s[6:7], -1, v7, s[6:7]
-; GFX9-NEXT: s_or_b64 vcc, vcc, s[4:5]
-; GFX9-NEXT: v_cndmask_b32_e32 v5, v5, v3, vcc
-; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v2, vcc
-; GFX9-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:32 glc
+; GFX9-NEXT: global_atomic_dec_x2 v[0:1], v[0:1], v[2:3], off offset:32 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
-; GFX9-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[8:9]
-; GFX9-NEXT: s_cbranch_execnz .LBB150_1
-; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX9-NEXT: s_or_b64 exec, exec, s[8:9]
-; GFX9-NEXT: v_mov_b32_e32 v0, v4
-; GFX9-NEXT: v_mov_b32_e32 v1, v5
; GFX9-NEXT: s_setpc_b64 s[30:31]
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
%result = atomicrmw udec_wrap ptr addrspace(1) %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0
diff --git a/llvm/test/CodeGen/AMDGPU/idemponent-atomics.ll b/llvm/test/CodeGen/AMDGPU/idemponent-atomics.ll
index 28aa76ab12f37..057c09e9a255c 100644
--- a/llvm/test/CodeGen/AMDGPU/idemponent-atomics.ll
+++ b/llvm/test/CodeGen/AMDGPU/idemponent-atomics.ll
@@ -48,7 +48,7 @@ define i32 @global_agent_release_idempotent_or(ptr addrspace(1) %in) {
; GFX942-NEXT: s_setpc_b64 s[30:31]
; OPT-LABEL: @global_agent_release_idempotent_or(
; OPT-NEXT: entry:
-; OPT-NEXT: [[VAL:%.*]] = atomicrmw or ptr addrspace(1) [[IN:%.*]], i32 0 syncscope("agent-one-as") release, align 4
+; OPT-NEXT: [[VAL:%.*]] = atomicrmw add ptr addrspace(1) [[IN:%.*]], i32 0 syncscope("agent-one-as") release, align 4
; OPT-NEXT: ret i32 [[VAL]]
;
entry:
@@ -56,6 +56,42 @@ entry:
ret i32 %val
}
+define i32 @global_agent_release_idempotent_or_no_remote(ptr addrspace(1) %in) {
+; GFX942-LABEL: global_agent_release_idempotent_or_no_remote:
+; GFX942: ; %bb.0: ; %entry
+; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX942-NEXT: v_mov_b32_e32 v2, 0
+; GFX942-NEXT: buffer_wbl2 sc1
+; GFX942-NEXT: global_atomic_or v0, v[0:1], v2, off sc0
+; GFX942-NEXT: s_waitcnt vmcnt(0)
+; GFX942-NEXT: s_setpc_b64 s[30:31]
+; OPT-LABEL: @global_agent_release_idempotent_or_no_remote(
+; OPT-NEXT: entry:
+; OPT-NEXT: [[VAL:%.*]] = atomicrmw or ptr addrspace(1) [[IN:%.*]], i32 0 syncscope("agent-one-as") release, align 4, !amdgpu.no.remote.memory [[META0:![0-9]+]]
+; OPT-NEXT: ret i32 [[VAL]]
+entry:
+ %val = atomicrmw or ptr addrspace(1) %in, i32 0 syncscope("agent-one-as") release, align 4, !amdgpu.no.remote.memory !0
+ ret i32 %val
+}
+
+define i32 @global_agent_release_idempotent_or_no_fine_grained(ptr addrspace(1) %in) {
+; GFX942-LABEL: global_agent_release_idempotent_or_no_fine_grained:
+; GFX942: ; %bb.0: ; %entry
+; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX942-NEXT: v_mov_b32_e32 v2, 0
+; GFX942-NEXT: buffer_wbl2 sc1
+; GFX942-NEXT: global_atomic_or v0, v[0:1], v2, off sc0
+; GFX942-NEXT: s_waitcnt vmcnt(0)
+; GFX942-NEXT: s_setpc_b64 s[30:31]
+; OPT-LABEL: @global_agent_release_idempotent_or_no_fine_grained(
+; OPT-NEXT: entry:
+; OPT-NEXT: [[VAL:%.*]] = atomicrmw or ptr addrspace(1) [[IN:%.*]], i32 0 syncscope("agent-one-as") release, align 4, !amdgpu.no.fine.grained.memory [[META0]]
+; OPT-NEXT: ret i32 [[VAL]]
+entry:
+ %val = atomicrmw or ptr addrspace(1) %in, i32 0 syncscope("agent-one-as") release, align 4, !amdgpu.no.fine.grained.memory !0
+ ret i32 %val
+}
+
define i32 @global_agent_acquire_release_idempotent_or(ptr addrspace(1) %in) {
; GFX942-LABEL: global_agent_acquire_release_idempotent_or:
; GFX942: ; %bb.0: ; %entry
@@ -68,7 +104,7 @@ define i32 @global_agent_acquire_release_idempotent_or(ptr addrspace(1) %in) {
; GFX942-NEXT: s_setpc_b64 s[30:31]
; OPT-LABEL: @global_agent_acquire_release_idempotent_or(
; OPT-NEXT: entry:
-; OPT-NEXT: [[VAL:%.*]] = atomicrmw or ptr addrspace(1) [[IN:%.*]], i32 0 syncscope("agent-one-as") acq_rel, align 4
+; OPT-NEXT: [[VAL:%.*]] = atomicrmw add ptr addrspace(1) [[IN:%.*]], i32 0 syncscope("agent-one-as") acq_rel, align 4
; OPT-NEXT: ret i32 [[VAL]]
;
entry:
@@ -88,9 +124,8 @@ define i32 @global_agent_acquire_release_idempotent_or__no_fine_grained(ptr addr
; GFX942-NEXT: s_setpc_b64 s[30:31]
; OPT-LABEL: @global_agent_acquire_release_idempotent_or__no_fine_grained(
; OPT-NEXT: entry:
-; OPT-NEXT: [[VAL:%.*]] = atomicrmw or ptr addrspace(1) [[IN:%.*]], i32 0 syncscope("agent-one-as") acq_rel, align 4, !amdgpu.no.fine.grained.memory [[META0:![0-9]+]]
+; OPT-NEXT: [[VAL:%.*]] = atomicrmw or ptr addrspace(1) [[IN:%.*]], i32 0 syncscope("agent-one-as") acq_rel, align 4, !amdgpu.no.fine.grained.memory [[META0]]
; OPT-NEXT: ret i32 [[VAL]]
-;
entry:
%val = atomicrmw or ptr addrspace(1) %in, i32 0 syncscope("agent-one-as") acq_rel, align 4, !amdgpu.no.fine.grained.memory !0
ret i32 %val
@@ -108,7 +143,7 @@ define i32 @global_agent_seq_cst_idempotent_or(ptr addrspace(1) %in) {
; GFX942-NEXT: s_setpc_b64 s[30:31]
; OPT-LABEL: @global_agent_seq_cst_idempotent_or(
; OPT-NEXT: entry:
-; OPT-NEXT: [[VAL:%.*]] = atomicrmw or ptr addrspace(1) [[IN:%.*]], i32 0 syncscope("agent-one-as") seq_cst, align 4
+; OPT-NEXT: [[VAL:%.*]] = atomicrmw add ptr addrspace(1) [[IN:%.*]], i32 0 syncscope("agent-one-as") seq_cst, align 4
; OPT-NEXT: ret i32 [[VAL]]
;
entry:
diff --git a/llvm/test/CodeGen/AMDGPU/move-to-valu-atomicrmw.ll b/llvm/test/CodeGen/AMDGPU/move-to-valu-atomicrmw.ll
index 7eb44636f79d7..010642b75f5f7 100644
--- a/llvm/test/CodeGen/AMDGPU/move-to-valu-atomicrmw.ll
+++ b/llvm/test/CodeGen/AMDGPU/move-to-valu-atomicrmw.ll
@@ -47,7 +47,7 @@ define amdgpu_kernel void @atomic_max_i32(ptr addrspace(1) %out, ptr addrspace(1
atomic:
%gep = getelementptr i32, ptr addrspace(1) %ptr, i32 100
- %ret = atomicrmw max ptr addrspace(1) %gep, i32 %y syncscope("workgroup") seq_cst
+ %ret = atomicrmw max ptr addrspace(1) %gep, i32 %y syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
store i32 %ret, ptr addrspace(1) %out
br label %exit
@@ -87,7 +87,7 @@ define amdgpu_kernel void @atomic_max_i32_noret(ptr addrspace(1) %out, ptr addrs
atomic:
%gep = getelementptr i32, ptr addrspace(1) %ptr, i32 100
- %ret = atomicrmw max ptr addrspace(1) %gep, i32 %y syncscope("workgroup") seq_cst
+ %ret = atomicrmw max ptr addrspace(1) %gep, i32 %y syncscope("workgroup") seq_cst, !amdgpu.no.fine.grained.memory !0
br label %exit
exit:
@@ -96,3 +96,5 @@ exit:
attributes #0 = { nounwind }
attributes #1 = { nounwind readnone }
+
+!0 = !{}
diff --git a/llvm/test/CodeGen/AMDGPU/shl_add_ptr_global.ll b/llvm/test/CodeGen/AMDGPU/shl_add_ptr_global.ll
index 8ea83da78f889..159aede8d96ba 100644
--- a/llvm/test/CodeGen/AMDGPU/shl_add_ptr_global.ll
+++ b/llvm/test/CodeGen/AMDGPU/shl_add_ptr_global.ll
@@ -19,7 +19,7 @@ define void @shl_base_atomicrmw_global_ptr(ptr addrspace(1) %out, ptr addrspace(
%cast = ptrtoint ptr addrspace(1) %arrayidx0 to i64
%shl = shl i64 %cast, 2
%castback = inttoptr i64 %shl to ptr addrspace(1)
- %val = atomicrmw and ptr addrspace(1) %castback, i32 3 syncscope("agent") seq_cst
+ %val = atomicrmw and ptr addrspace(1) %castback, i32 3 syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
store volatile i64 %cast, ptr addrspace(1) %extra.use, align 4
ret void
}
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16.ll
index c49909597c72c..72fc4f468543a 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i16.ll
@@ -140,33 +140,74 @@ define i16 @test_atomicrmw_sub_i16_global_agent(ptr addrspace(1) %ptr, i16 %valu
}
define i16 @test_atomicrmw_and_i16_global_agent(ptr addrspace(1) %ptr, i16 %value) {
-; CHECK-LABEL: @test_atomicrmw_and_i16_global_agent(
-; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
-; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
-; CHECK-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
-; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
-; CHECK-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
-; CHECK-NEXT: [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
-; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
-; CHECK-NEXT: [[TMP3:%.*]] = zext i16 [[VALUE:%.*]] to i32
-; CHECK-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
-; CHECK-NEXT: [[ANDOPERAND:%.*]] = or i32 [[VALOPERAND_SHIFTED]], [[INV_MASK]]
-; CHECK-NEXT: [[TMP4:%.*]] = atomicrmw and ptr addrspace(1) [[ALIGNEDADDR]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4
-; CHECK-NEXT: [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
-; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
-; CHECK-NEXT: ret i16 [[EXTRACTED]]
+; GCN-LABEL: @test_atomicrmw_and_i16_global_agent(
+; GCN-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; GCN-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; GCN-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; GCN-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; GCN-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; GCN-NEXT: [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; GCN-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; GCN-NEXT: [[TMP3:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; GCN-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; GCN-NEXT: [[ANDOPERAND:%.*]] = or i32 [[VALOPERAND_SHIFTED]], [[INV_MASK]]
+; GCN-NEXT: [[TMP4:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GCN: atomicrmw.start:
+; GCN-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP4]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GCN-NEXT: [[NEW:%.*]] = and i32 [[LOADED]], [[ANDOPERAND]]
+; GCN-NEXT: [[TMP5:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
+; GCN-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP5]], 0
+; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GCN: atomicrmw.end:
+; GCN-NEXT: [[SHIFTED:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; GCN-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; GCN-NEXT: ret i16 [[EXTRACTED]]
+;
+; R600-LABEL: @test_atomicrmw_and_i16_global_agent(
+; R600-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; R600-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; R600-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; R600-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; R600-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; R600-NEXT: [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; R600-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; R600-NEXT: [[TMP3:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; R600-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; R600-NEXT: [[ANDOPERAND:%.*]] = or i32 [[VALOPERAND_SHIFTED]], [[INV_MASK]]
+; R600-NEXT: [[TMP4:%.*]] = atomicrmw and ptr addrspace(1) [[ALIGNEDADDR]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4
+; R600-NEXT: [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
+; R600-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; R600-NEXT: ret i16 [[EXTRACTED]]
;
%res = atomicrmw and ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst
ret i16 %res
}
define i16 @test_atomicrmw_and_i16_global_agent_align4(ptr addrspace(1) %ptr, i16 %value) {
-; CHECK-LABEL: @test_atomicrmw_and_i16_global_agent_align4(
-; CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
-; CHECK-NEXT: [[ANDOPERAND:%.*]] = or i32 [[TMP1]], -65536
-; CHECK-NEXT: [[TMP2:%.*]] = atomicrmw and ptr addrspace(1) [[PTR:%.*]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4
-; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[TMP2]] to i16
-; CHECK-NEXT: ret i16 [[EXTRACTED]]
+; GCN-LABEL: @test_atomicrmw_and_i16_global_agent_align4(
+; GCN-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; GCN-NEXT: [[ANDOPERAND:%.*]] = or i32 [[TMP1]], -65536
+; GCN-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(1) [[PTR:%.*]], align 4
+; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GCN: atomicrmw.start:
+; GCN-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GCN-NEXT: [[NEW:%.*]] = and i32 [[LOADED]], [[ANDOPERAND]]
+; GCN-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
+; GCN-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
+; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GCN: atomicrmw.end:
+; GCN-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[NEWLOADED]] to i16
+; GCN-NEXT: ret i16 [[EXTRACTED]]
+;
+; R600-LABEL: @test_atomicrmw_and_i16_global_agent_align4(
+; R600-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; R600-NEXT: [[ANDOPERAND:%.*]] = or i32 [[TMP1]], -65536
+; R600-NEXT: [[TMP2:%.*]] = atomicrmw and ptr addrspace(1) [[PTR:%.*]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4
+; R600-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[TMP2]] to i16
+; R600-NEXT: ret i16 [[EXTRACTED]]
;
%res = atomicrmw and ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst, align 4
ret i16 %res
@@ -174,21 +215,46 @@ define i16 @test_atomicrmw_and_i16_global_agent_align4(ptr addrspace(1) %ptr, i1
; Drop unknown metadata and noundef
define i16 @test_atomicrmw_and_i16_global_agent_drop_md(ptr addrspace(1) %ptr, i16 %value) {
-; CHECK-LABEL: @test_atomicrmw_and_i16_global_agent_drop_md(
-; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
-; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
-; CHECK-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
-; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
-; CHECK-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
-; CHECK-NEXT: [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
-; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
-; CHECK-NEXT: [[TMP3:%.*]] = zext i16 [[VALUE:%.*]] to i32
-; CHECK-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
-; CHECK-NEXT: [[ANDOPERAND:%.*]] = or i32 [[VALOPERAND_SHIFTED]], [[INV_MASK]]
-; CHECK-NEXT: [[TMP4:%.*]] = atomicrmw and ptr addrspace(1) [[ALIGNEDADDR]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4
-; CHECK-NEXT: [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
-; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
-; CHECK-NEXT: ret i16 [[EXTRACTED]]
+; GCN-LABEL: @test_atomicrmw_and_i16_global_agent_drop_md(
+; GCN-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; GCN-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; GCN-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; GCN-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; GCN-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; GCN-NEXT: [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; GCN-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; GCN-NEXT: [[TMP3:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; GCN-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; GCN-NEXT: [[ANDOPERAND:%.*]] = or i32 [[VALOPERAND_SHIFTED]], [[INV_MASK]]
+; GCN-NEXT: [[TMP4:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GCN: atomicrmw.start:
+; GCN-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP4]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GCN-NEXT: [[NEW:%.*]] = and i32 [[LOADED]], [[ANDOPERAND]]
+; GCN-NEXT: [[TMP5:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
+; GCN-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP5]], 0
+; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GCN: atomicrmw.end:
+; GCN-NEXT: [[SHIFTED:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; GCN-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; GCN-NEXT: ret i16 [[EXTRACTED]]
+;
+; R600-LABEL: @test_atomicrmw_and_i16_global_agent_drop_md(
+; R600-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; R600-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; R600-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; R600-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; R600-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; R600-NEXT: [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; R600-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; R600-NEXT: [[TMP3:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; R600-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; R600-NEXT: [[ANDOPERAND:%.*]] = or i32 [[VALOPERAND_SHIFTED]], [[INV_MASK]]
+; R600-NEXT: [[TMP4:%.*]] = atomicrmw and ptr addrspace(1) [[ALIGNEDADDR]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4
+; R600-NEXT: [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
+; R600-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; R600-NEXT: ret i16 [[EXTRACTED]]
;
%res = atomicrmw and ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst, !noundef !0, !some.unknown.md !0
ret i16 %res
@@ -196,12 +262,28 @@ define i16 @test_atomicrmw_and_i16_global_agent_drop_md(ptr addrspace(1) %ptr, i
; Drop unknown metadata
define i16 @test_atomicrmw_and_i16_global_agent_align4_drop_md(ptr addrspace(1) %ptr, i16 %value) {
-; CHECK-LABEL: @test_atomicrmw_and_i16_global_agent_align4_drop_md(
-; CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
-; CHECK-NEXT: [[ANDOPERAND:%.*]] = or i32 [[TMP1]], -65536
-; CHECK-NEXT: [[TMP2:%.*]] = atomicrmw and ptr addrspace(1) [[PTR:%.*]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4
-; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[TMP2]] to i16
-; CHECK-NEXT: ret i16 [[EXTRACTED]]
+; GCN-LABEL: @test_atomicrmw_and_i16_global_agent_align4_drop_md(
+; GCN-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; GCN-NEXT: [[ANDOPERAND:%.*]] = or i32 [[TMP1]], -65536
+; GCN-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(1) [[PTR:%.*]], align 4
+; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GCN: atomicrmw.start:
+; GCN-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GCN-NEXT: [[NEW:%.*]] = and i32 [[LOADED]], [[ANDOPERAND]]
+; GCN-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
+; GCN-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
+; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GCN: atomicrmw.end:
+; GCN-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[NEWLOADED]] to i16
+; GCN-NEXT: ret i16 [[EXTRACTED]]
+;
+; R600-LABEL: @test_atomicrmw_and_i16_global_agent_align4_drop_md(
+; R600-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; R600-NEXT: [[ANDOPERAND:%.*]] = or i32 [[TMP1]], -65536
+; R600-NEXT: [[TMP2:%.*]] = atomicrmw and ptr addrspace(1) [[PTR:%.*]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4
+; R600-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[TMP2]] to i16
+; R600-NEXT: ret i16 [[EXTRACTED]]
;
%res = atomicrmw and ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst, align 4, !noundef !0, !some.unknown.md !0
ret i16 %res
@@ -209,21 +291,46 @@ define i16 @test_atomicrmw_and_i16_global_agent_align4_drop_md(ptr addrspace(1)
; Drop noundef, preserve mmra
define i16 @test_atomicrmw_and_i16_global_agent_preserve_mmra(ptr addrspace(1) %ptr, i16 %value) {
-; CHECK-LABEL: @test_atomicrmw_and_i16_global_agent_preserve_mmra(
-; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
-; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
-; CHECK-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
-; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
-; CHECK-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
-; CHECK-NEXT: [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
-; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
-; CHECK-NEXT: [[TMP3:%.*]] = zext i16 [[VALUE:%.*]] to i32
-; CHECK-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
-; CHECK-NEXT: [[ANDOPERAND:%.*]] = or i32 [[VALOPERAND_SHIFTED]], [[INV_MASK]]
-; CHECK-NEXT: [[TMP4:%.*]] = atomicrmw and ptr addrspace(1) [[ALIGNEDADDR]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4, !mmra [[META0:![0-9]+]]
-; CHECK-NEXT: [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
-; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
-; CHECK-NEXT: ret i16 [[EXTRACTED]]
+; GCN-LABEL: @test_atomicrmw_and_i16_global_agent_preserve_mmra(
+; GCN-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; GCN-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; GCN-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; GCN-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; GCN-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; GCN-NEXT: [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; GCN-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; GCN-NEXT: [[TMP3:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; GCN-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; GCN-NEXT: [[ANDOPERAND:%.*]] = or i32 [[VALOPERAND_SHIFTED]], [[INV_MASK]]
+; GCN-NEXT: [[TMP4:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4, !mmra [[META0:![0-9]+]]
+; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GCN: atomicrmw.start:
+; GCN-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP4]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GCN-NEXT: [[NEW:%.*]] = and i32 [[LOADED]], [[ANDOPERAND]]
+; GCN-NEXT: [[TMP5:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4, !mmra [[META0]]
+; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
+; GCN-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP5]], 0
+; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GCN: atomicrmw.end:
+; GCN-NEXT: [[SHIFTED:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; GCN-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; GCN-NEXT: ret i16 [[EXTRACTED]]
+;
+; R600-LABEL: @test_atomicrmw_and_i16_global_agent_preserve_mmra(
+; R600-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; R600-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; R600-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; R600-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; R600-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; R600-NEXT: [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; R600-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; R600-NEXT: [[TMP3:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; R600-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; R600-NEXT: [[ANDOPERAND:%.*]] = or i32 [[VALOPERAND_SHIFTED]], [[INV_MASK]]
+; R600-NEXT: [[TMP4:%.*]] = atomicrmw and ptr addrspace(1) [[ALIGNEDADDR]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4, !mmra [[META0:![0-9]+]]
+; R600-NEXT: [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
+; R600-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; R600-NEXT: ret i16 [[EXTRACTED]]
;
%res = atomicrmw and ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst, !noundef !0, !mmra !1
ret i16 %res
@@ -231,126 +338,252 @@ define i16 @test_atomicrmw_and_i16_global_agent_preserve_mmra(ptr addrspace(1) %
; Drop noundef, preserve mmra
define i16 @test_atomicrmw_and_i16_global_agent_align4_preserve_mmra(ptr addrspace(1) %ptr, i16 %value) {
-; CHECK-LABEL: @test_atomicrmw_and_i16_global_agent_align4_preserve_mmra(
-; CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
-; CHECK-NEXT: [[ANDOPERAND:%.*]] = or i32 [[TMP1]], -65536
-; CHECK-NEXT: [[TMP2:%.*]] = atomicrmw and ptr addrspace(1) [[PTR:%.*]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4, !mmra [[META0]]
-; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[TMP2]] to i16
-; CHECK-NEXT: ret i16 [[EXTRACTED]]
+; GCN-LABEL: @test_atomicrmw_and_i16_global_agent_align4_preserve_mmra(
+; GCN-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; GCN-NEXT: [[ANDOPERAND:%.*]] = or i32 [[TMP1]], -65536
+; GCN-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(1) [[PTR:%.*]], align 4, !mmra [[META0]]
+; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GCN: atomicrmw.start:
+; GCN-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GCN-NEXT: [[NEW:%.*]] = and i32 [[LOADED]], [[ANDOPERAND]]
+; GCN-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4, !mmra [[META0]]
+; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
+; GCN-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
+; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GCN: atomicrmw.end:
+; GCN-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[NEWLOADED]] to i16
+; GCN-NEXT: ret i16 [[EXTRACTED]]
+;
+; R600-LABEL: @test_atomicrmw_and_i16_global_agent_align4_preserve_mmra(
+; R600-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; R600-NEXT: [[ANDOPERAND:%.*]] = or i32 [[TMP1]], -65536
+; R600-NEXT: [[TMP2:%.*]] = atomicrmw and ptr addrspace(1) [[PTR:%.*]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4, !mmra [[META0]]
+; R600-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[TMP2]] to i16
+; R600-NEXT: ret i16 [[EXTRACTED]]
;
%res = atomicrmw and ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst, align 4, !noundef !0, !mmra !1
ret i16 %res
}
define i16 @test_atomicrmw_and_i16_global_agent_align4_preserve_alias_scope(ptr addrspace(1) %ptr, i16 %value) {
-; CHECK-LABEL: @test_atomicrmw_and_i16_global_agent_align4_preserve_alias_scope(
-; CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
-; CHECK-NEXT: [[ANDOPERAND:%.*]] = or i32 [[TMP1]], -65536
-; CHECK-NEXT: [[TMP2:%.*]] = atomicrmw and ptr addrspace(1) [[PTR:%.*]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4, !alias.scope [[META1:![0-9]+]]
-; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[TMP2]] to i16
-; CHECK-NEXT: ret i16 [[EXTRACTED]]
+; GCN-LABEL: @test_atomicrmw_and_i16_global_agent_align4_preserve_alias_scope(
+; GCN-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; GCN-NEXT: [[ANDOPERAND:%.*]] = or i32 [[TMP1]], -65536
+; GCN-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(1) [[PTR:%.*]], align 4
+; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GCN: atomicrmw.start:
+; GCN-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GCN-NEXT: [[NEW:%.*]] = and i32 [[LOADED]], [[ANDOPERAND]]
+; GCN-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
+; GCN-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
+; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GCN: atomicrmw.end:
+; GCN-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[NEWLOADED]] to i16
+; GCN-NEXT: ret i16 [[EXTRACTED]]
+;
+; R600-LABEL: @test_atomicrmw_and_i16_global_agent_align4_preserve_alias_scope(
+; R600-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; R600-NEXT: [[ANDOPERAND:%.*]] = or i32 [[TMP1]], -65536
+; R600-NEXT: [[TMP2:%.*]] = atomicrmw and ptr addrspace(1) [[PTR:%.*]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4, !alias.scope [[META1:![0-9]+]]
+; R600-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[TMP2]] to i16
+; R600-NEXT: ret i16 [[EXTRACTED]]
;
%res = atomicrmw and ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst, align 4, !alias.scope !2
ret i16 %res
}
define i16 @test_atomicrmw_and_i16_global_agent_align4_preserve_noalias(ptr addrspace(1) %ptr, i16 %value) {
-; CHECK-LABEL: @test_atomicrmw_and_i16_global_agent_align4_preserve_noalias(
-; CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
-; CHECK-NEXT: [[ANDOPERAND:%.*]] = or i32 [[TMP1]], -65536
-; CHECK-NEXT: [[TMP2:%.*]] = atomicrmw and ptr addrspace(1) [[PTR:%.*]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4, !noalias [[META1]]
-; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[TMP2]] to i16
-; CHECK-NEXT: ret i16 [[EXTRACTED]]
+; GCN-LABEL: @test_atomicrmw_and_i16_global_agent_align4_preserve_noalias(
+; GCN-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; GCN-NEXT: [[ANDOPERAND:%.*]] = or i32 [[TMP1]], -65536
+; GCN-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(1) [[PTR:%.*]], align 4
+; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GCN: atomicrmw.start:
+; GCN-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GCN-NEXT: [[NEW:%.*]] = and i32 [[LOADED]], [[ANDOPERAND]]
+; GCN-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
+; GCN-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
+; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GCN: atomicrmw.end:
+; GCN-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[NEWLOADED]] to i16
+; GCN-NEXT: ret i16 [[EXTRACTED]]
+;
+; R600-LABEL: @test_atomicrmw_and_i16_global_agent_align4_preserve_noalias(
+; R600-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; R600-NEXT: [[ANDOPERAND:%.*]] = or i32 [[TMP1]], -65536
+; R600-NEXT: [[TMP2:%.*]] = atomicrmw and ptr addrspace(1) [[PTR:%.*]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4, !noalias [[META1]]
+; R600-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[TMP2]] to i16
+; R600-NEXT: ret i16 [[EXTRACTED]]
;
%res = atomicrmw and ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst, align 4, !noalias !2
ret i16 %res
}
define i16 @test_atomicrmw_and_i16_global_agent_align4_preserve_tbaa_struct(ptr addrspace(1) %ptr, i16 %value) {
-; CHECK-LABEL: @test_atomicrmw_and_i16_global_agent_align4_preserve_tbaa_struct(
-; CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
-; CHECK-NEXT: [[ANDOPERAND:%.*]] = or i32 [[TMP1]], -65536
-; CHECK-NEXT: [[TMP2:%.*]] = atomicrmw and ptr addrspace(1) [[PTR:%.*]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4, !tbaa.struct [[TBAA_STRUCT4:![0-9]+]]
-; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[TMP2]] to i16
-; CHECK-NEXT: ret i16 [[EXTRACTED]]
+; GCN-LABEL: @test_atomicrmw_and_i16_global_agent_align4_preserve_tbaa_struct(
+; GCN-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; GCN-NEXT: [[ANDOPERAND:%.*]] = or i32 [[TMP1]], -65536
+; GCN-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(1) [[PTR:%.*]], align 4
+; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GCN: atomicrmw.start:
+; GCN-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GCN-NEXT: [[NEW:%.*]] = and i32 [[LOADED]], [[ANDOPERAND]]
+; GCN-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
+; GCN-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
+; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GCN: atomicrmw.end:
+; GCN-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[NEWLOADED]] to i16
+; GCN-NEXT: ret i16 [[EXTRACTED]]
+;
+; R600-LABEL: @test_atomicrmw_and_i16_global_agent_align4_preserve_tbaa_struct(
+; R600-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; R600-NEXT: [[ANDOPERAND:%.*]] = or i32 [[TMP1]], -65536
+; R600-NEXT: [[TMP2:%.*]] = atomicrmw and ptr addrspace(1) [[PTR:%.*]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4, !tbaa.struct [[TBAA_STRUCT4:![0-9]+]]
+; R600-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[TMP2]] to i16
+; R600-NEXT: ret i16 [[EXTRACTED]]
;
%res = atomicrmw and ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst, align 4, !tbaa.struct !5
ret i16 %res
}
define i16 @test_atomicrmw_and_i16_global_agent_align4_preserve_tbaa(ptr addrspace(1) %ptr, i16 %value) {
-; CHECK-LABEL: @test_atomicrmw_and_i16_global_agent_align4_preserve_tbaa(
-; CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
-; CHECK-NEXT: [[ANDOPERAND:%.*]] = or i32 [[TMP1]], -65536
-; CHECK-NEXT: [[TMP2:%.*]] = atomicrmw and ptr addrspace(1) [[PTR:%.*]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4, !tbaa [[TBAA5:![0-9]+]]
-; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[TMP2]] to i16
-; CHECK-NEXT: ret i16 [[EXTRACTED]]
+; GCN-LABEL: @test_atomicrmw_and_i16_global_agent_align4_preserve_tbaa(
+; GCN-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; GCN-NEXT: [[ANDOPERAND:%.*]] = or i32 [[TMP1]], -65536
+; GCN-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(1) [[PTR:%.*]], align 4
+; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GCN: atomicrmw.start:
+; GCN-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GCN-NEXT: [[NEW:%.*]] = and i32 [[LOADED]], [[ANDOPERAND]]
+; GCN-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
+; GCN-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
+; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GCN: atomicrmw.end:
+; GCN-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[NEWLOADED]] to i16
+; GCN-NEXT: ret i16 [[EXTRACTED]]
+;
+; R600-LABEL: @test_atomicrmw_and_i16_global_agent_align4_preserve_tbaa(
+; R600-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; R600-NEXT: [[ANDOPERAND:%.*]] = or i32 [[TMP1]], -65536
+; R600-NEXT: [[TMP2:%.*]] = atomicrmw and ptr addrspace(1) [[PTR:%.*]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4, !tbaa [[TBAA5:![0-9]+]]
+; R600-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[TMP2]] to i16
+; R600-NEXT: ret i16 [[EXTRACTED]]
;
%res = atomicrmw and ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst, align 4, !tbaa !6
ret i16 %res
}
define i16 @test_atomicrmw_and_i16_global_agent__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i16 %value) {
-; CHECK-LABEL: @test_atomicrmw_and_i16_global_agent__amdgpu_no_remote_memory(
-; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
-; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
-; CHECK-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
-; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
-; CHECK-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
-; CHECK-NEXT: [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
-; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
-; CHECK-NEXT: [[TMP3:%.*]] = zext i16 [[VALUE:%.*]] to i32
-; CHECK-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
-; CHECK-NEXT: [[ANDOPERAND:%.*]] = or i32 [[VALOPERAND_SHIFTED]], [[INV_MASK]]
-; CHECK-NEXT: [[TMP4:%.*]] = atomicrmw and ptr addrspace(1) [[ALIGNEDADDR]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory [[META8:![0-9]+]]
-; CHECK-NEXT: [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
-; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
-; CHECK-NEXT: ret i16 [[EXTRACTED]]
+; GCN-LABEL: @test_atomicrmw_and_i16_global_agent__amdgpu_no_remote_memory(
+; GCN-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; GCN-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; GCN-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; GCN-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; GCN-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; GCN-NEXT: [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; GCN-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; GCN-NEXT: [[TMP3:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; GCN-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; GCN-NEXT: [[ANDOPERAND:%.*]] = or i32 [[VALOPERAND_SHIFTED]], [[INV_MASK]]
+; GCN-NEXT: [[TMP4:%.*]] = atomicrmw and ptr addrspace(1) [[ALIGNEDADDR]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory [[META1:![0-9]+]]
+; GCN-NEXT: [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
+; GCN-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; GCN-NEXT: ret i16 [[EXTRACTED]]
+;
+; R600-LABEL: @test_atomicrmw_and_i16_global_agent__amdgpu_no_remote_memory(
+; R600-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; R600-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; R600-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; R600-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; R600-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; R600-NEXT: [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; R600-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; R600-NEXT: [[TMP3:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; R600-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; R600-NEXT: [[ANDOPERAND:%.*]] = or i32 [[VALOPERAND_SHIFTED]], [[INV_MASK]]
+; R600-NEXT: [[TMP4:%.*]] = atomicrmw and ptr addrspace(1) [[ALIGNEDADDR]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory [[META8:![0-9]+]]
+; R600-NEXT: [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
+; R600-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; R600-NEXT: ret i16 [[EXTRACTED]]
;
%res = atomicrmw and ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
ret i16 %res
}
define i16 @test_atomicrmw_and_i16_global_agent_align4__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i16 %value) {
-; CHECK-LABEL: @test_atomicrmw_and_i16_global_agent_align4__amdgpu_no_remote_memory(
-; CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
-; CHECK-NEXT: [[ANDOPERAND:%.*]] = or i32 [[TMP1]], -65536
-; CHECK-NEXT: [[TMP2:%.*]] = atomicrmw and ptr addrspace(1) [[PTR:%.*]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory [[META8]]
-; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[TMP2]] to i16
-; CHECK-NEXT: ret i16 [[EXTRACTED]]
+; GCN-LABEL: @test_atomicrmw_and_i16_global_agent_align4__amdgpu_no_remote_memory(
+; GCN-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; GCN-NEXT: [[ANDOPERAND:%.*]] = or i32 [[TMP1]], -65536
+; GCN-NEXT: [[TMP2:%.*]] = atomicrmw and ptr addrspace(1) [[PTR:%.*]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory [[META1]]
+; GCN-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[TMP2]] to i16
+; GCN-NEXT: ret i16 [[EXTRACTED]]
+;
+; R600-LABEL: @test_atomicrmw_and_i16_global_agent_align4__amdgpu_no_remote_memory(
+; R600-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; R600-NEXT: [[ANDOPERAND:%.*]] = or i32 [[TMP1]], -65536
+; R600-NEXT: [[TMP2:%.*]] = atomicrmw and ptr addrspace(1) [[PTR:%.*]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory [[META8]]
+; R600-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[TMP2]] to i16
+; R600-NEXT: ret i16 [[EXTRACTED]]
;
%res = atomicrmw and ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst, align 4, !amdgpu.no.remote.memory !0
ret i16 %res
}
define i16 @test_atomicrmw_and_i16_global_agent__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, i16 %value) {
-; CHECK-LABEL: @test_atomicrmw_and_i16_global_agent__amdgpu_no_fine_grained_memory(
-; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
-; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
-; CHECK-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
-; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
-; CHECK-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
-; CHECK-NEXT: [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
-; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
-; CHECK-NEXT: [[TMP3:%.*]] = zext i16 [[VALUE:%.*]] to i32
-; CHECK-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
-; CHECK-NEXT: [[ANDOPERAND:%.*]] = or i32 [[VALOPERAND_SHIFTED]], [[INV_MASK]]
-; CHECK-NEXT: [[TMP4:%.*]] = atomicrmw and ptr addrspace(1) [[ALIGNEDADDR]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META8]]
-; CHECK-NEXT: [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
-; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
-; CHECK-NEXT: ret i16 [[EXTRACTED]]
+; GCN-LABEL: @test_atomicrmw_and_i16_global_agent__amdgpu_no_fine_grained_memory(
+; GCN-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; GCN-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; GCN-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; GCN-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; GCN-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; GCN-NEXT: [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; GCN-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; GCN-NEXT: [[TMP3:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; GCN-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; GCN-NEXT: [[ANDOPERAND:%.*]] = or i32 [[VALOPERAND_SHIFTED]], [[INV_MASK]]
+; GCN-NEXT: [[TMP4:%.*]] = atomicrmw and ptr addrspace(1) [[ALIGNEDADDR]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META1]]
+; GCN-NEXT: [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
+; GCN-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; GCN-NEXT: ret i16 [[EXTRACTED]]
+;
+; R600-LABEL: @test_atomicrmw_and_i16_global_agent__amdgpu_no_fine_grained_memory(
+; R600-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; R600-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; R600-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; R600-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; R600-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; R600-NEXT: [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; R600-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; R600-NEXT: [[TMP3:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; R600-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; R600-NEXT: [[ANDOPERAND:%.*]] = or i32 [[VALOPERAND_SHIFTED]], [[INV_MASK]]
+; R600-NEXT: [[TMP4:%.*]] = atomicrmw and ptr addrspace(1) [[ALIGNEDADDR]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META8]]
+; R600-NEXT: [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
+; R600-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; R600-NEXT: ret i16 [[EXTRACTED]]
;
%res = atomicrmw and ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
ret i16 %res
}
define i16 @test_atomicrmw_and_i16_global_agent_align4__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, i16 %value) {
-; CHECK-LABEL: @test_atomicrmw_and_i16_global_agent_align4__amdgpu_no_fine_grained_memory(
-; CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
-; CHECK-NEXT: [[ANDOPERAND:%.*]] = or i32 [[TMP1]], -65536
-; CHECK-NEXT: [[TMP2:%.*]] = atomicrmw and ptr addrspace(1) [[PTR:%.*]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META8]]
-; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[TMP2]] to i16
-; CHECK-NEXT: ret i16 [[EXTRACTED]]
+; GCN-LABEL: @test_atomicrmw_and_i16_global_agent_align4__amdgpu_no_fine_grained_memory(
+; GCN-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; GCN-NEXT: [[ANDOPERAND:%.*]] = or i32 [[TMP1]], -65536
+; GCN-NEXT: [[TMP2:%.*]] = atomicrmw and ptr addrspace(1) [[PTR:%.*]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META1]]
+; GCN-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[TMP2]] to i16
+; GCN-NEXT: ret i16 [[EXTRACTED]]
+;
+; R600-LABEL: @test_atomicrmw_and_i16_global_agent_align4__amdgpu_no_fine_grained_memory(
+; R600-NEXT: [[TMP1:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; R600-NEXT: [[ANDOPERAND:%.*]] = or i32 [[TMP1]], -65536
+; R600-NEXT: [[TMP2:%.*]] = atomicrmw and ptr addrspace(1) [[PTR:%.*]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META8]]
+; R600-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[TMP2]] to i16
+; R600-NEXT: ret i16 [[EXTRACTED]]
;
%res = atomicrmw and ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst, align 4, !amdgpu.no.fine.grained.memory !0
ret i16 %res
@@ -390,40 +623,88 @@ define i16 @test_atomicrmw_nand_i16_global_agent(ptr addrspace(1) %ptr, i16 %val
}
define i16 @test_atomicrmw_or_i16_global_agent(ptr addrspace(1) %ptr, i16 %value) {
-; CHECK-LABEL: @test_atomicrmw_or_i16_global_agent(
-; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
-; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
-; CHECK-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
-; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
-; CHECK-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
-; CHECK-NEXT: [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
-; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
-; CHECK-NEXT: [[TMP3:%.*]] = zext i16 [[VALUE:%.*]] to i32
-; CHECK-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
-; CHECK-NEXT: [[TMP4:%.*]] = atomicrmw or ptr addrspace(1) [[ALIGNEDADDR]], i32 [[VALOPERAND_SHIFTED]] syncscope("agent") seq_cst, align 4
-; CHECK-NEXT: [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
-; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
-; CHECK-NEXT: ret i16 [[EXTRACTED]]
+; GCN-LABEL: @test_atomicrmw_or_i16_global_agent(
+; GCN-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; GCN-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; GCN-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; GCN-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; GCN-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; GCN-NEXT: [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; GCN-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; GCN-NEXT: [[TMP3:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; GCN-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; GCN-NEXT: [[TMP4:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GCN: atomicrmw.start:
+; GCN-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP4]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GCN-NEXT: [[NEW:%.*]] = or i32 [[LOADED]], [[VALOPERAND_SHIFTED]]
+; GCN-NEXT: [[TMP5:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
+; GCN-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP5]], 0
+; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GCN: atomicrmw.end:
+; GCN-NEXT: [[SHIFTED:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; GCN-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; GCN-NEXT: ret i16 [[EXTRACTED]]
+;
+; R600-LABEL: @test_atomicrmw_or_i16_global_agent(
+; R600-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; R600-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; R600-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; R600-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; R600-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; R600-NEXT: [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; R600-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; R600-NEXT: [[TMP3:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; R600-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; R600-NEXT: [[TMP4:%.*]] = atomicrmw or ptr addrspace(1) [[ALIGNEDADDR]], i32 [[VALOPERAND_SHIFTED]] syncscope("agent") seq_cst, align 4
+; R600-NEXT: [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
+; R600-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; R600-NEXT: ret i16 [[EXTRACTED]]
;
%res = atomicrmw or ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst
ret i16 %res
}
define i16 @test_atomicrmw_xor_i16_global_agent(ptr addrspace(1) %ptr, i16 %value) {
-; CHECK-LABEL: @test_atomicrmw_xor_i16_global_agent(
-; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
-; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
-; CHECK-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
-; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
-; CHECK-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
-; CHECK-NEXT: [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
-; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
-; CHECK-NEXT: [[TMP3:%.*]] = zext i16 [[VALUE:%.*]] to i32
-; CHECK-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
-; CHECK-NEXT: [[TMP4:%.*]] = atomicrmw xor ptr addrspace(1) [[ALIGNEDADDR]], i32 [[VALOPERAND_SHIFTED]] syncscope("agent") seq_cst, align 4
-; CHECK-NEXT: [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
-; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
-; CHECK-NEXT: ret i16 [[EXTRACTED]]
+; GCN-LABEL: @test_atomicrmw_xor_i16_global_agent(
+; GCN-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; GCN-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; GCN-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; GCN-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; GCN-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; GCN-NEXT: [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; GCN-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; GCN-NEXT: [[TMP3:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; GCN-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; GCN-NEXT: [[TMP4:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GCN: atomicrmw.start:
+; GCN-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP4]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GCN-NEXT: [[NEW:%.*]] = xor i32 [[LOADED]], [[VALOPERAND_SHIFTED]]
+; GCN-NEXT: [[TMP5:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
+; GCN-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP5]], 0
+; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GCN: atomicrmw.end:
+; GCN-NEXT: [[SHIFTED:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; GCN-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; GCN-NEXT: ret i16 [[EXTRACTED]]
+;
+; R600-LABEL: @test_atomicrmw_xor_i16_global_agent(
+; R600-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; R600-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; R600-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; R600-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; R600-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; R600-NEXT: [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; R600-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; R600-NEXT: [[TMP3:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; R600-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; R600-NEXT: [[TMP4:%.*]] = atomicrmw xor ptr addrspace(1) [[ALIGNEDADDR]], i32 [[VALOPERAND_SHIFTED]] syncscope("agent") seq_cst, align 4
+; R600-NEXT: [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
+; R600-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; R600-NEXT: ret i16 [[EXTRACTED]]
;
%res = atomicrmw xor ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst
ret i16 %res
@@ -1362,6 +1643,102 @@ define i16 @test_atomicrmw_add_i16_buffer_fat_agent_align4(ptr addrspace(7) %ptr
ret i16 %res
}
+define i16 @test_atomicrmw_sub_i16_global_agent__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_sub_i16_global_agent__amdgpu_no_remote_memory(
+; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; CHECK-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT: [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT: [[TMP3:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; CHECK-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP4]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[NEW:%.*]] = sub i32 [[LOADED]], [[VALOPERAND_SHIFTED]]
+; CHECK-NEXT: [[TMP5:%.*]] = and i32 [[NEW]], [[MASK]]
+; CHECK-NEXT: [[TMP6:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT: [[TMP7:%.*]] = or i32 [[TMP6]], [[TMP5]]
+; CHECK-NEXT: [[TMP8:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[TMP7]] syncscope("agent") seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP8]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP8]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[SHIFTED:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; CHECK-NEXT: ret i16 [[EXTRACTED]]
+;
+ %res = atomicrmw sub ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
+ ret i16 %res
+}
+
+define i16 @test_atomicrmw_sub_i16_global_agent__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_sub_i16_global_agent__amdgpu_no_fine_grained_memory(
+; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; CHECK-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT: [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT: [[TMP3:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; CHECK-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP4]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[NEW:%.*]] = sub i32 [[LOADED]], [[VALOPERAND_SHIFTED]]
+; CHECK-NEXT: [[TMP5:%.*]] = and i32 [[NEW]], [[MASK]]
+; CHECK-NEXT: [[TMP6:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT: [[TMP7:%.*]] = or i32 [[TMP6]], [[TMP5]]
+; CHECK-NEXT: [[TMP8:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[TMP7]] syncscope("agent") seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP8]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP8]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[SHIFTED:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; CHECK-NEXT: ret i16 [[EXTRACTED]]
+;
+ %res = atomicrmw sub ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
+ ret i16 %res
+}
+
+define i16 @test_atomicrmw_sub_i16_global_agent__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i16 %value) {
+; CHECK-LABEL: @test_atomicrmw_sub_i16_global_agent__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
+; CHECK-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; CHECK-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; CHECK-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT: [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; CHECK-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; CHECK-NEXT: [[TMP3:%.*]] = zext i16 [[VALUE:%.*]] to i32
+; CHECK-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP4]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[NEW:%.*]] = sub i32 [[LOADED]], [[VALOPERAND_SHIFTED]]
+; CHECK-NEXT: [[TMP5:%.*]] = and i32 [[NEW]], [[MASK]]
+; CHECK-NEXT: [[TMP6:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; CHECK-NEXT: [[TMP7:%.*]] = or i32 [[TMP6]], [[TMP5]]
+; CHECK-NEXT: [[TMP8:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[TMP7]] syncscope("agent") seq_cst seq_cst, align 4
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP8]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP8]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: [[SHIFTED:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; CHECK-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; CHECK-NEXT: ret i16 [[EXTRACTED]]
+;
+ %res = atomicrmw sub ptr addrspace(1) %ptr, i16 %value syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.no.remote.memory !0
+ ret i16 %res
+}
+
!0 = !{}
!1 = !{!"foo", !"bar"}
!2 = !{!3}
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i32-agent.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i32-agent.ll
index a1007bacd522f..1440045d11e2d 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i32-agent.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i32-agent.ll
@@ -110,10 +110,99 @@ define i32 @test_atomicrmw_add_i32_global_agent__amdgpu_no_fine_grained_memory__
; expansion is necessary, sub is not supported over PCIe
define i32 @test_atomicrmw_sub_i32_global_agent(ptr addrspace(1) %ptr, i32 %value) {
-; COMMON-LABEL: define i32 @test_atomicrmw_sub_i32_global_agent(
-; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw sub ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
-; COMMON-NEXT: ret i32 [[NEWLOADED]]
+; GFX803-LABEL: define i32 @test_atomicrmw_sub_i32_global_agent(
+; GFX803-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX803-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX803-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX803: atomicrmw.start:
+; GFX803-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX803-NEXT: [[NEW:%.*]] = sub i32 [[LOADED]], [[VALUE]]
+; GFX803-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX803-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX803-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX803-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX803: atomicrmw.end:
+; GFX803-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX906-LABEL: define i32 @test_atomicrmw_sub_i32_global_agent(
+; GFX906-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX906-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX906-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX906: atomicrmw.start:
+; GFX906-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX906-NEXT: [[NEW:%.*]] = sub i32 [[LOADED]], [[VALUE]]
+; GFX906-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX906-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX906-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX906-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX906: atomicrmw.end:
+; GFX906-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX908-LABEL: define i32 @test_atomicrmw_sub_i32_global_agent(
+; GFX908-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX908-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX908-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX908: atomicrmw.start:
+; GFX908-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX908-NEXT: [[NEW:%.*]] = sub i32 [[LOADED]], [[VALUE]]
+; GFX908-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX908-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX908-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX908-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX908: atomicrmw.end:
+; GFX908-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX90A-LABEL: define i32 @test_atomicrmw_sub_i32_global_agent(
+; GFX90A-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX90A-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX90A-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX90A: atomicrmw.start:
+; GFX90A-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX90A-NEXT: [[NEW:%.*]] = sub i32 [[LOADED]], [[VALUE]]
+; GFX90A-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX90A-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX90A-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX90A-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX90A: atomicrmw.end:
+; GFX90A-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX940-LABEL: define i32 @test_atomicrmw_sub_i32_global_agent(
+; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX940-NEXT: [[RES:%.*]] = atomicrmw sub ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
+; GFX940-NEXT: ret i32 [[RES]]
+;
+; GFX10-LABEL: define i32 @test_atomicrmw_sub_i32_global_agent(
+; GFX10-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX10-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX10: atomicrmw.start:
+; GFX10-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX10-NEXT: [[NEW:%.*]] = sub i32 [[LOADED]], [[VALUE]]
+; GFX10-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX10-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX10-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX10-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX10: atomicrmw.end:
+; GFX10-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX11-LABEL: define i32 @test_atomicrmw_sub_i32_global_agent(
+; GFX11-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX11-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX11-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX11: atomicrmw.start:
+; GFX11-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX11-NEXT: [[NEW:%.*]] = sub i32 [[LOADED]], [[VALUE]]
+; GFX11-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX11-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX11-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX11-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX11: atomicrmw.end:
+; GFX11-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX12-LABEL: define i32 @test_atomicrmw_sub_i32_global_agent(
+; GFX12-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX12-NEXT: [[RES:%.*]] = atomicrmw sub ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
+; GFX12-NEXT: ret i32 [[RES]]
;
%res = atomicrmw sub ptr addrspace(1) %ptr, i32 %value syncscope("agent") seq_cst
ret i32 %res
@@ -155,10 +244,99 @@ define i32 @test_atomicrmw_sub_i32_global_agent__amdgpu_no_fine_grained_memory__
; expansion is necessary, operation not supported over PCIe
define i32 @test_atomicrmw_and_i32_global_agent(ptr addrspace(1) %ptr, i32 %value) {
-; COMMON-LABEL: define i32 @test_atomicrmw_and_i32_global_agent(
-; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw and ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
-; COMMON-NEXT: ret i32 [[NEWLOADED]]
+; GFX803-LABEL: define i32 @test_atomicrmw_and_i32_global_agent(
+; GFX803-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX803-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX803-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX803: atomicrmw.start:
+; GFX803-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX803-NEXT: [[NEW:%.*]] = and i32 [[LOADED]], [[VALUE]]
+; GFX803-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX803-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX803-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX803-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX803: atomicrmw.end:
+; GFX803-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX906-LABEL: define i32 @test_atomicrmw_and_i32_global_agent(
+; GFX906-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX906-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX906-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX906: atomicrmw.start:
+; GFX906-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX906-NEXT: [[NEW:%.*]] = and i32 [[LOADED]], [[VALUE]]
+; GFX906-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX906-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX906-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX906-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX906: atomicrmw.end:
+; GFX906-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX908-LABEL: define i32 @test_atomicrmw_and_i32_global_agent(
+; GFX908-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX908-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX908-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX908: atomicrmw.start:
+; GFX908-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX908-NEXT: [[NEW:%.*]] = and i32 [[LOADED]], [[VALUE]]
+; GFX908-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX908-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX908-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX908-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX908: atomicrmw.end:
+; GFX908-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX90A-LABEL: define i32 @test_atomicrmw_and_i32_global_agent(
+; GFX90A-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX90A-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX90A-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX90A: atomicrmw.start:
+; GFX90A-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX90A-NEXT: [[NEW:%.*]] = and i32 [[LOADED]], [[VALUE]]
+; GFX90A-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX90A-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX90A-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX90A-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX90A: atomicrmw.end:
+; GFX90A-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX940-LABEL: define i32 @test_atomicrmw_and_i32_global_agent(
+; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX940-NEXT: [[RES:%.*]] = atomicrmw and ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
+; GFX940-NEXT: ret i32 [[RES]]
+;
+; GFX10-LABEL: define i32 @test_atomicrmw_and_i32_global_agent(
+; GFX10-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX10-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX10: atomicrmw.start:
+; GFX10-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX10-NEXT: [[NEW:%.*]] = and i32 [[LOADED]], [[VALUE]]
+; GFX10-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX10-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX10-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX10-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX10: atomicrmw.end:
+; GFX10-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX11-LABEL: define i32 @test_atomicrmw_and_i32_global_agent(
+; GFX11-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX11-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX11-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX11: atomicrmw.start:
+; GFX11-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX11-NEXT: [[NEW:%.*]] = and i32 [[LOADED]], [[VALUE]]
+; GFX11-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX11-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX11-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX11-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX11: atomicrmw.end:
+; GFX11-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX12-LABEL: define i32 @test_atomicrmw_and_i32_global_agent(
+; GFX12-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX12-NEXT: [[RES:%.*]] = atomicrmw and ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
+; GFX12-NEXT: ret i32 [[RES]]
;
%res = atomicrmw and ptr addrspace(1) %ptr, i32 %value syncscope("agent") seq_cst
ret i32 %res
@@ -285,10 +463,99 @@ define i32 @test_atomicrmw_nand_i32_global_agent__amdgpu_no_fine_grained_memory_
; expansion is necessary, operation not supported over PCIe
define i32 @test_atomicrmw_or_i32_global_agent(ptr addrspace(1) %ptr, i32 %value) {
-; COMMON-LABEL: define i32 @test_atomicrmw_or_i32_global_agent(
-; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw or ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
-; COMMON-NEXT: ret i32 [[NEWLOADED]]
+; GFX803-LABEL: define i32 @test_atomicrmw_or_i32_global_agent(
+; GFX803-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX803-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX803-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX803: atomicrmw.start:
+; GFX803-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX803-NEXT: [[NEW:%.*]] = or i32 [[LOADED]], [[VALUE]]
+; GFX803-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX803-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX803-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX803-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX803: atomicrmw.end:
+; GFX803-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX906-LABEL: define i32 @test_atomicrmw_or_i32_global_agent(
+; GFX906-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX906-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX906-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX906: atomicrmw.start:
+; GFX906-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX906-NEXT: [[NEW:%.*]] = or i32 [[LOADED]], [[VALUE]]
+; GFX906-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX906-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX906-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX906-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX906: atomicrmw.end:
+; GFX906-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX908-LABEL: define i32 @test_atomicrmw_or_i32_global_agent(
+; GFX908-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX908-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX908-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX908: atomicrmw.start:
+; GFX908-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX908-NEXT: [[NEW:%.*]] = or i32 [[LOADED]], [[VALUE]]
+; GFX908-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX908-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX908-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX908-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX908: atomicrmw.end:
+; GFX908-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX90A-LABEL: define i32 @test_atomicrmw_or_i32_global_agent(
+; GFX90A-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX90A-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX90A-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX90A: atomicrmw.start:
+; GFX90A-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX90A-NEXT: [[NEW:%.*]] = or i32 [[LOADED]], [[VALUE]]
+; GFX90A-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX90A-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX90A-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX90A-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX90A: atomicrmw.end:
+; GFX90A-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX940-LABEL: define i32 @test_atomicrmw_or_i32_global_agent(
+; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX940-NEXT: [[RES:%.*]] = atomicrmw or ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
+; GFX940-NEXT: ret i32 [[RES]]
+;
+; GFX10-LABEL: define i32 @test_atomicrmw_or_i32_global_agent(
+; GFX10-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX10-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX10: atomicrmw.start:
+; GFX10-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX10-NEXT: [[NEW:%.*]] = or i32 [[LOADED]], [[VALUE]]
+; GFX10-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX10-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX10-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX10-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX10: atomicrmw.end:
+; GFX10-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX11-LABEL: define i32 @test_atomicrmw_or_i32_global_agent(
+; GFX11-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX11-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX11-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX11: atomicrmw.start:
+; GFX11-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX11-NEXT: [[NEW:%.*]] = or i32 [[LOADED]], [[VALUE]]
+; GFX11-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX11-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX11-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX11-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX11: atomicrmw.end:
+; GFX11-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX12-LABEL: define i32 @test_atomicrmw_or_i32_global_agent(
+; GFX12-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX12-NEXT: [[RES:%.*]] = atomicrmw or ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
+; GFX12-NEXT: ret i32 [[RES]]
;
%res = atomicrmw or ptr addrspace(1) %ptr, i32 %value syncscope("agent") seq_cst
ret i32 %res
@@ -330,10 +597,99 @@ define i32 @test_atomicrmw_or_i32_global_agent__amdgpu_no_fine_grained_memory__a
; expansion is necessary, operation not supported over PCIe
define i32 @test_atomicrmw_xor_i32_global_agent(ptr addrspace(1) %ptr, i32 %value) {
-; COMMON-LABEL: define i32 @test_atomicrmw_xor_i32_global_agent(
-; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw xor ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
-; COMMON-NEXT: ret i32 [[NEWLOADED]]
+; GFX803-LABEL: define i32 @test_atomicrmw_xor_i32_global_agent(
+; GFX803-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX803-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX803-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX803: atomicrmw.start:
+; GFX803-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX803-NEXT: [[NEW:%.*]] = xor i32 [[LOADED]], [[VALUE]]
+; GFX803-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX803-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX803-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX803-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX803: atomicrmw.end:
+; GFX803-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX906-LABEL: define i32 @test_atomicrmw_xor_i32_global_agent(
+; GFX906-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX906-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX906-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX906: atomicrmw.start:
+; GFX906-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX906-NEXT: [[NEW:%.*]] = xor i32 [[LOADED]], [[VALUE]]
+; GFX906-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX906-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX906-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX906-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX906: atomicrmw.end:
+; GFX906-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX908-LABEL: define i32 @test_atomicrmw_xor_i32_global_agent(
+; GFX908-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX908-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX908-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX908: atomicrmw.start:
+; GFX908-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX908-NEXT: [[NEW:%.*]] = xor i32 [[LOADED]], [[VALUE]]
+; GFX908-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX908-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX908-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX908-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX908: atomicrmw.end:
+; GFX908-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX90A-LABEL: define i32 @test_atomicrmw_xor_i32_global_agent(
+; GFX90A-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX90A-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX90A-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX90A: atomicrmw.start:
+; GFX90A-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX90A-NEXT: [[NEW:%.*]] = xor i32 [[LOADED]], [[VALUE]]
+; GFX90A-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX90A-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX90A-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX90A-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX90A: atomicrmw.end:
+; GFX90A-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX940-LABEL: define i32 @test_atomicrmw_xor_i32_global_agent(
+; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX940-NEXT: [[RES:%.*]] = atomicrmw xor ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
+; GFX940-NEXT: ret i32 [[RES]]
+;
+; GFX10-LABEL: define i32 @test_atomicrmw_xor_i32_global_agent(
+; GFX10-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX10-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX10: atomicrmw.start:
+; GFX10-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX10-NEXT: [[NEW:%.*]] = xor i32 [[LOADED]], [[VALUE]]
+; GFX10-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX10-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX10-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX10-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX10: atomicrmw.end:
+; GFX10-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX11-LABEL: define i32 @test_atomicrmw_xor_i32_global_agent(
+; GFX11-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX11-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX11-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX11: atomicrmw.start:
+; GFX11-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX11-NEXT: [[NEW:%.*]] = xor i32 [[LOADED]], [[VALUE]]
+; GFX11-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX11-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+; GFX11-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
+; GFX11-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX11: atomicrmw.end:
+; GFX11-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX12-LABEL: define i32 @test_atomicrmw_xor_i32_global_agent(
+; GFX12-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX12-NEXT: [[RES:%.*]] = atomicrmw xor ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
+; GFX12-NEXT: ret i32 [[RES]]
;
%res = atomicrmw xor ptr addrspace(1) %ptr, i32 %value syncscope("agent") seq_cst
ret i32 %res
@@ -375,10 +731,105 @@ define i32 @test_atomicrmw_xor_i32_global_agent__amdgpu_no_fine_grained_memory__
; expansion is necessary, operation not supported over PCIe
define i32 @test_atomicrmw_max_i32_global_agent(ptr addrspace(1) %ptr, i32 %value) {
-; COMMON-LABEL: define i32 @test_atomicrmw_max_i32_global_agent(
-; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw max ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
-; COMMON-NEXT: ret i32 [[NEWLOADED]]
+; GFX803-LABEL: define i32 @test_atomicrmw_max_i32_global_agent(
+; GFX803-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX803-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX803-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX803: atomicrmw.start:
+; GFX803-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX803-NEXT: [[TMP2:%.*]] = icmp sgt i32 [[LOADED]], [[VALUE]]
+; GFX803-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[LOADED]], i32 [[VALUE]]
+; GFX803-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX803-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
+; GFX803-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
+; GFX803-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX803: atomicrmw.end:
+; GFX803-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX906-LABEL: define i32 @test_atomicrmw_max_i32_global_agent(
+; GFX906-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX906-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX906-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX906: atomicrmw.start:
+; GFX906-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX906-NEXT: [[TMP2:%.*]] = icmp sgt i32 [[LOADED]], [[VALUE]]
+; GFX906-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[LOADED]], i32 [[VALUE]]
+; GFX906-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX906-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
+; GFX906-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
+; GFX906-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX906: atomicrmw.end:
+; GFX906-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX908-LABEL: define i32 @test_atomicrmw_max_i32_global_agent(
+; GFX908-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX908-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX908-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX908: atomicrmw.start:
+; GFX908-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX908-NEXT: [[TMP2:%.*]] = icmp sgt i32 [[LOADED]], [[VALUE]]
+; GFX908-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[LOADED]], i32 [[VALUE]]
+; GFX908-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX908-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
+; GFX908-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
+; GFX908-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX908: atomicrmw.end:
+; GFX908-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX90A-LABEL: define i32 @test_atomicrmw_max_i32_global_agent(
+; GFX90A-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX90A-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX90A-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX90A: atomicrmw.start:
+; GFX90A-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX90A-NEXT: [[TMP2:%.*]] = icmp sgt i32 [[LOADED]], [[VALUE]]
+; GFX90A-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[LOADED]], i32 [[VALUE]]
+; GFX90A-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX90A-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
+; GFX90A-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
+; GFX90A-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX90A: atomicrmw.end:
+; GFX90A-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX940-LABEL: define i32 @test_atomicrmw_max_i32_global_agent(
+; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX940-NEXT: [[RES:%.*]] = atomicrmw max ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
+; GFX940-NEXT: ret i32 [[RES]]
+;
+; GFX10-LABEL: define i32 @test_atomicrmw_max_i32_global_agent(
+; GFX10-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX10-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX10: atomicrmw.start:
+; GFX10-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX10-NEXT: [[TMP2:%.*]] = icmp sgt i32 [[LOADED]], [[VALUE]]
+; GFX10-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[LOADED]], i32 [[VALUE]]
+; GFX10-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX10-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
+; GFX10-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
+; GFX10-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX10: atomicrmw.end:
+; GFX10-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX11-LABEL: define i32 @test_atomicrmw_max_i32_global_agent(
+; GFX11-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX11-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX11-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX11: atomicrmw.start:
+; GFX11-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX11-NEXT: [[TMP2:%.*]] = icmp sgt i32 [[LOADED]], [[VALUE]]
+; GFX11-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[LOADED]], i32 [[VALUE]]
+; GFX11-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX11-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
+; GFX11-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
+; GFX11-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX11: atomicrmw.end:
+; GFX11-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX12-LABEL: define i32 @test_atomicrmw_max_i32_global_agent(
+; GFX12-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX12-NEXT: [[RES:%.*]] = atomicrmw max ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
+; GFX12-NEXT: ret i32 [[RES]]
;
%res = atomicrmw max ptr addrspace(1) %ptr, i32 %value syncscope("agent") seq_cst
ret i32 %res
@@ -420,10 +871,105 @@ define i32 @test_atomicrmw_max_i32_global_agent__amdgpu_no_fine_grained_memory__
; expansion is necessary, operation not supported over PCIe
define i32 @test_atomicrmw_min_i32_global_agent(ptr addrspace(1) %ptr, i32 %value) {
-; COMMON-LABEL: define i32 @test_atomicrmw_min_i32_global_agent(
-; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw min ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
-; COMMON-NEXT: ret i32 [[NEWLOADED]]
+; GFX803-LABEL: define i32 @test_atomicrmw_min_i32_global_agent(
+; GFX803-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX803-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX803-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX803: atomicrmw.start:
+; GFX803-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX803-NEXT: [[TMP2:%.*]] = icmp sle i32 [[LOADED]], [[VALUE]]
+; GFX803-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[LOADED]], i32 [[VALUE]]
+; GFX803-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX803-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
+; GFX803-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
+; GFX803-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX803: atomicrmw.end:
+; GFX803-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX906-LABEL: define i32 @test_atomicrmw_min_i32_global_agent(
+; GFX906-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX906-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX906-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX906: atomicrmw.start:
+; GFX906-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX906-NEXT: [[TMP2:%.*]] = icmp sle i32 [[LOADED]], [[VALUE]]
+; GFX906-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[LOADED]], i32 [[VALUE]]
+; GFX906-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX906-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
+; GFX906-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
+; GFX906-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX906: atomicrmw.end:
+; GFX906-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX908-LABEL: define i32 @test_atomicrmw_min_i32_global_agent(
+; GFX908-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX908-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX908-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX908: atomicrmw.start:
+; GFX908-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX908-NEXT: [[TMP2:%.*]] = icmp sle i32 [[LOADED]], [[VALUE]]
+; GFX908-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[LOADED]], i32 [[VALUE]]
+; GFX908-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX908-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
+; GFX908-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
+; GFX908-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX908: atomicrmw.end:
+; GFX908-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX90A-LABEL: define i32 @test_atomicrmw_min_i32_global_agent(
+; GFX90A-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX90A-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX90A-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX90A: atomicrmw.start:
+; GFX90A-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX90A-NEXT: [[TMP2:%.*]] = icmp sle i32 [[LOADED]], [[VALUE]]
+; GFX90A-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[LOADED]], i32 [[VALUE]]
+; GFX90A-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX90A-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
+; GFX90A-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
+; GFX90A-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX90A: atomicrmw.end:
+; GFX90A-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX940-LABEL: define i32 @test_atomicrmw_min_i32_global_agent(
+; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX940-NEXT: [[RES:%.*]] = atomicrmw min ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
+; GFX940-NEXT: ret i32 [[RES]]
+;
+; GFX10-LABEL: define i32 @test_atomicrmw_min_i32_global_agent(
+; GFX10-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX10-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX10: atomicrmw.start:
+; GFX10-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX10-NEXT: [[TMP2:%.*]] = icmp sle i32 [[LOADED]], [[VALUE]]
+; GFX10-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[LOADED]], i32 [[VALUE]]
+; GFX10-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX10-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
+; GFX10-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
+; GFX10-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX10: atomicrmw.end:
+; GFX10-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX11-LABEL: define i32 @test_atomicrmw_min_i32_global_agent(
+; GFX11-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX11-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX11-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX11: atomicrmw.start:
+; GFX11-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX11-NEXT: [[TMP2:%.*]] = icmp sle i32 [[LOADED]], [[VALUE]]
+; GFX11-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[LOADED]], i32 [[VALUE]]
+; GFX11-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX11-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
+; GFX11-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
+; GFX11-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX11: atomicrmw.end:
+; GFX11-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX12-LABEL: define i32 @test_atomicrmw_min_i32_global_agent(
+; GFX12-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX12-NEXT: [[RES:%.*]] = atomicrmw min ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
+; GFX12-NEXT: ret i32 [[RES]]
;
%res = atomicrmw min ptr addrspace(1) %ptr, i32 %value syncscope("agent") seq_cst
ret i32 %res
@@ -465,10 +1011,105 @@ define i32 @test_atomicrmw_min_i32_global_agent__amdgpu_no_fine_grained_memory__
; expansion is necessary, operation not supported over PCIe
define i32 @test_atomicrmw_umax_i32_global_agent(ptr addrspace(1) %ptr, i32 %value) {
-; COMMON-LABEL: define i32 @test_atomicrmw_umax_i32_global_agent(
-; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw umax ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
-; COMMON-NEXT: ret i32 [[NEWLOADED]]
+; GFX803-LABEL: define i32 @test_atomicrmw_umax_i32_global_agent(
+; GFX803-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX803-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX803-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX803: atomicrmw.start:
+; GFX803-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX803-NEXT: [[TMP2:%.*]] = icmp ugt i32 [[LOADED]], [[VALUE]]
+; GFX803-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[LOADED]], i32 [[VALUE]]
+; GFX803-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX803-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
+; GFX803-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
+; GFX803-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX803: atomicrmw.end:
+; GFX803-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX906-LABEL: define i32 @test_atomicrmw_umax_i32_global_agent(
+; GFX906-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX906-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX906-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX906: atomicrmw.start:
+; GFX906-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX906-NEXT: [[TMP2:%.*]] = icmp ugt i32 [[LOADED]], [[VALUE]]
+; GFX906-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[LOADED]], i32 [[VALUE]]
+; GFX906-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX906-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
+; GFX906-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
+; GFX906-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX906: atomicrmw.end:
+; GFX906-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX908-LABEL: define i32 @test_atomicrmw_umax_i32_global_agent(
+; GFX908-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX908-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX908-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX908: atomicrmw.start:
+; GFX908-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX908-NEXT: [[TMP2:%.*]] = icmp ugt i32 [[LOADED]], [[VALUE]]
+; GFX908-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[LOADED]], i32 [[VALUE]]
+; GFX908-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX908-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
+; GFX908-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
+; GFX908-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX908: atomicrmw.end:
+; GFX908-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX90A-LABEL: define i32 @test_atomicrmw_umax_i32_global_agent(
+; GFX90A-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX90A-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX90A-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX90A: atomicrmw.start:
+; GFX90A-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX90A-NEXT: [[TMP2:%.*]] = icmp ugt i32 [[LOADED]], [[VALUE]]
+; GFX90A-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[LOADED]], i32 [[VALUE]]
+; GFX90A-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX90A-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
+; GFX90A-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
+; GFX90A-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX90A: atomicrmw.end:
+; GFX90A-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX940-LABEL: define i32 @test_atomicrmw_umax_i32_global_agent(
+; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX940-NEXT: [[RES:%.*]] = atomicrmw umax ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
+; GFX940-NEXT: ret i32 [[RES]]
+;
+; GFX10-LABEL: define i32 @test_atomicrmw_umax_i32_global_agent(
+; GFX10-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX10-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX10: atomicrmw.start:
+; GFX10-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX10-NEXT: [[TMP2:%.*]] = icmp ugt i32 [[LOADED]], [[VALUE]]
+; GFX10-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[LOADED]], i32 [[VALUE]]
+; GFX10-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX10-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
+; GFX10-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
+; GFX10-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX10: atomicrmw.end:
+; GFX10-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX11-LABEL: define i32 @test_atomicrmw_umax_i32_global_agent(
+; GFX11-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX11-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX11-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX11: atomicrmw.start:
+; GFX11-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX11-NEXT: [[TMP2:%.*]] = icmp ugt i32 [[LOADED]], [[VALUE]]
+; GFX11-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[LOADED]], i32 [[VALUE]]
+; GFX11-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX11-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
+; GFX11-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
+; GFX11-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX11: atomicrmw.end:
+; GFX11-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX12-LABEL: define i32 @test_atomicrmw_umax_i32_global_agent(
+; GFX12-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX12-NEXT: [[RES:%.*]] = atomicrmw umax ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
+; GFX12-NEXT: ret i32 [[RES]]
;
%res = atomicrmw umax ptr addrspace(1) %ptr, i32 %value syncscope("agent") seq_cst
ret i32 %res
@@ -510,10 +1151,105 @@ define i32 @test_atomicrmw_umax_i32_global_agent__amdgpu_no_fine_grained_memory_
; expansion is necessary, operation not supported over PCIe
define i32 @test_atomicrmw_umin_i32_global_agent(ptr addrspace(1) %ptr, i32 %value) {
-; COMMON-LABEL: define i32 @test_atomicrmw_umin_i32_global_agent(
-; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw umin ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
-; COMMON-NEXT: ret i32 [[NEWLOADED]]
+; GFX803-LABEL: define i32 @test_atomicrmw_umin_i32_global_agent(
+; GFX803-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX803-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX803-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX803: atomicrmw.start:
+; GFX803-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX803-NEXT: [[TMP2:%.*]] = icmp ule i32 [[LOADED]], [[VALUE]]
+; GFX803-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[LOADED]], i32 [[VALUE]]
+; GFX803-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX803-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
+; GFX803-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
+; GFX803-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX803: atomicrmw.end:
+; GFX803-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX906-LABEL: define i32 @test_atomicrmw_umin_i32_global_agent(
+; GFX906-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX906-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX906-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX906: atomicrmw.start:
+; GFX906-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX906-NEXT: [[TMP2:%.*]] = icmp ule i32 [[LOADED]], [[VALUE]]
+; GFX906-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[LOADED]], i32 [[VALUE]]
+; GFX906-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX906-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
+; GFX906-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
+; GFX906-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX906: atomicrmw.end:
+; GFX906-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX908-LABEL: define i32 @test_atomicrmw_umin_i32_global_agent(
+; GFX908-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX908-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX908-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX908: atomicrmw.start:
+; GFX908-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX908-NEXT: [[TMP2:%.*]] = icmp ule i32 [[LOADED]], [[VALUE]]
+; GFX908-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[LOADED]], i32 [[VALUE]]
+; GFX908-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX908-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
+; GFX908-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
+; GFX908-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX908: atomicrmw.end:
+; GFX908-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX90A-LABEL: define i32 @test_atomicrmw_umin_i32_global_agent(
+; GFX90A-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX90A-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX90A-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX90A: atomicrmw.start:
+; GFX90A-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX90A-NEXT: [[TMP2:%.*]] = icmp ule i32 [[LOADED]], [[VALUE]]
+; GFX90A-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[LOADED]], i32 [[VALUE]]
+; GFX90A-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX90A-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
+; GFX90A-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
+; GFX90A-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX90A: atomicrmw.end:
+; GFX90A-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX940-LABEL: define i32 @test_atomicrmw_umin_i32_global_agent(
+; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX940-NEXT: [[RES:%.*]] = atomicrmw umin ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
+; GFX940-NEXT: ret i32 [[RES]]
+;
+; GFX10-LABEL: define i32 @test_atomicrmw_umin_i32_global_agent(
+; GFX10-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX10-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX10: atomicrmw.start:
+; GFX10-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX10-NEXT: [[TMP2:%.*]] = icmp ule i32 [[LOADED]], [[VALUE]]
+; GFX10-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[LOADED]], i32 [[VALUE]]
+; GFX10-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX10-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
+; GFX10-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
+; GFX10-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX10: atomicrmw.end:
+; GFX10-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX11-LABEL: define i32 @test_atomicrmw_umin_i32_global_agent(
+; GFX11-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX11-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX11-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX11: atomicrmw.start:
+; GFX11-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX11-NEXT: [[TMP2:%.*]] = icmp ule i32 [[LOADED]], [[VALUE]]
+; GFX11-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[LOADED]], i32 [[VALUE]]
+; GFX11-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX11-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
+; GFX11-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
+; GFX11-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX11: atomicrmw.end:
+; GFX11-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX12-LABEL: define i32 @test_atomicrmw_umin_i32_global_agent(
+; GFX12-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX12-NEXT: [[RES:%.*]] = atomicrmw umin ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
+; GFX12-NEXT: ret i32 [[RES]]
;
%res = atomicrmw umin ptr addrspace(1) %ptr, i32 %value syncscope("agent") seq_cst
ret i32 %res
@@ -555,10 +1291,111 @@ define i32 @test_atomicrmw_umin_i32_global_agent__amdgpu_no_fine_grained_memory_
; expansion is necessary, operation not supported over PCIe
define i32 @test_atomicrmw_uinc_wrap_i32_global_agent(ptr addrspace(1) %ptr, i32 %value) {
-; COMMON-LABEL: define i32 @test_atomicrmw_uinc_wrap_i32_global_agent(
-; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw uinc_wrap ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
-; COMMON-NEXT: ret i32 [[NEWLOADED]]
+; GFX803-LABEL: define i32 @test_atomicrmw_uinc_wrap_i32_global_agent(
+; GFX803-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX803-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX803-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX803: atomicrmw.start:
+; GFX803-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX803-NEXT: [[TMP2:%.*]] = add i32 [[LOADED]], 1
+; GFX803-NEXT: [[TMP3:%.*]] = icmp uge i32 [[LOADED]], [[VALUE]]
+; GFX803-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 0, i32 [[TMP2]]
+; GFX803-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX803-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; GFX803-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; GFX803-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX803: atomicrmw.end:
+; GFX803-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX906-LABEL: define i32 @test_atomicrmw_uinc_wrap_i32_global_agent(
+; GFX906-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX906-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX906-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX906: atomicrmw.start:
+; GFX906-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX906-NEXT: [[TMP2:%.*]] = add i32 [[LOADED]], 1
+; GFX906-NEXT: [[TMP3:%.*]] = icmp uge i32 [[LOADED]], [[VALUE]]
+; GFX906-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 0, i32 [[TMP2]]
+; GFX906-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX906-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; GFX906-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; GFX906-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX906: atomicrmw.end:
+; GFX906-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX908-LABEL: define i32 @test_atomicrmw_uinc_wrap_i32_global_agent(
+; GFX908-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX908-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX908-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX908: atomicrmw.start:
+; GFX908-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX908-NEXT: [[TMP2:%.*]] = add i32 [[LOADED]], 1
+; GFX908-NEXT: [[TMP3:%.*]] = icmp uge i32 [[LOADED]], [[VALUE]]
+; GFX908-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 0, i32 [[TMP2]]
+; GFX908-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX908-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; GFX908-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; GFX908-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX908: atomicrmw.end:
+; GFX908-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX90A-LABEL: define i32 @test_atomicrmw_uinc_wrap_i32_global_agent(
+; GFX90A-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX90A-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX90A-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX90A: atomicrmw.start:
+; GFX90A-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX90A-NEXT: [[TMP2:%.*]] = add i32 [[LOADED]], 1
+; GFX90A-NEXT: [[TMP3:%.*]] = icmp uge i32 [[LOADED]], [[VALUE]]
+; GFX90A-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 0, i32 [[TMP2]]
+; GFX90A-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX90A-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; GFX90A-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; GFX90A-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX90A: atomicrmw.end:
+; GFX90A-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX940-LABEL: define i32 @test_atomicrmw_uinc_wrap_i32_global_agent(
+; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX940-NEXT: [[RES:%.*]] = atomicrmw uinc_wrap ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
+; GFX940-NEXT: ret i32 [[RES]]
+;
+; GFX10-LABEL: define i32 @test_atomicrmw_uinc_wrap_i32_global_agent(
+; GFX10-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX10-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX10: atomicrmw.start:
+; GFX10-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX10-NEXT: [[TMP2:%.*]] = add i32 [[LOADED]], 1
+; GFX10-NEXT: [[TMP3:%.*]] = icmp uge i32 [[LOADED]], [[VALUE]]
+; GFX10-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 0, i32 [[TMP2]]
+; GFX10-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX10-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; GFX10-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; GFX10-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX10: atomicrmw.end:
+; GFX10-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX11-LABEL: define i32 @test_atomicrmw_uinc_wrap_i32_global_agent(
+; GFX11-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX11-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX11-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX11: atomicrmw.start:
+; GFX11-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX11-NEXT: [[TMP2:%.*]] = add i32 [[LOADED]], 1
+; GFX11-NEXT: [[TMP3:%.*]] = icmp uge i32 [[LOADED]], [[VALUE]]
+; GFX11-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 0, i32 [[TMP2]]
+; GFX11-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX11-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; GFX11-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
+; GFX11-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX11: atomicrmw.end:
+; GFX11-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX12-LABEL: define i32 @test_atomicrmw_uinc_wrap_i32_global_agent(
+; GFX12-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX12-NEXT: [[RES:%.*]] = atomicrmw uinc_wrap ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
+; GFX12-NEXT: ret i32 [[RES]]
;
%res = atomicrmw uinc_wrap ptr addrspace(1) %ptr, i32 %value syncscope("agent") seq_cst
ret i32 %res
@@ -600,10 +1437,123 @@ define i32 @test_atomicrmw_uinc_wrap_i32_global_agent__amdgpu_no_fine_grained_me
; expansion is necessary, operation not supported over PCIe
define i32 @test_atomicrmw_udec_wrap_i32_global_agent(ptr addrspace(1) %ptr, i32 %value) {
-; COMMON-LABEL: define i32 @test_atomicrmw_udec_wrap_i32_global_agent(
-; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw udec_wrap ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
-; COMMON-NEXT: ret i32 [[NEWLOADED]]
+; GFX803-LABEL: define i32 @test_atomicrmw_udec_wrap_i32_global_agent(
+; GFX803-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX803-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX803-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX803: atomicrmw.start:
+; GFX803-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX803-NEXT: [[TMP2:%.*]] = sub i32 [[LOADED]], 1
+; GFX803-NEXT: [[TMP3:%.*]] = icmp eq i32 [[LOADED]], 0
+; GFX803-NEXT: [[TMP4:%.*]] = icmp ugt i32 [[LOADED]], [[VALUE]]
+; GFX803-NEXT: [[TMP5:%.*]] = or i1 [[TMP3]], [[TMP4]]
+; GFX803-NEXT: [[NEW:%.*]] = select i1 [[TMP5]], i32 [[VALUE]], i32 [[TMP2]]
+; GFX803-NEXT: [[TMP6:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX803-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
+; GFX803-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
+; GFX803-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX803: atomicrmw.end:
+; GFX803-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX906-LABEL: define i32 @test_atomicrmw_udec_wrap_i32_global_agent(
+; GFX906-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX906-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX906-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX906: atomicrmw.start:
+; GFX906-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX906-NEXT: [[TMP2:%.*]] = sub i32 [[LOADED]], 1
+; GFX906-NEXT: [[TMP3:%.*]] = icmp eq i32 [[LOADED]], 0
+; GFX906-NEXT: [[TMP4:%.*]] = icmp ugt i32 [[LOADED]], [[VALUE]]
+; GFX906-NEXT: [[TMP5:%.*]] = or i1 [[TMP3]], [[TMP4]]
+; GFX906-NEXT: [[NEW:%.*]] = select i1 [[TMP5]], i32 [[VALUE]], i32 [[TMP2]]
+; GFX906-NEXT: [[TMP6:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX906-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
+; GFX906-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
+; GFX906-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX906: atomicrmw.end:
+; GFX906-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX908-LABEL: define i32 @test_atomicrmw_udec_wrap_i32_global_agent(
+; GFX908-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX908-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX908-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX908: atomicrmw.start:
+; GFX908-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX908-NEXT: [[TMP2:%.*]] = sub i32 [[LOADED]], 1
+; GFX908-NEXT: [[TMP3:%.*]] = icmp eq i32 [[LOADED]], 0
+; GFX908-NEXT: [[TMP4:%.*]] = icmp ugt i32 [[LOADED]], [[VALUE]]
+; GFX908-NEXT: [[TMP5:%.*]] = or i1 [[TMP3]], [[TMP4]]
+; GFX908-NEXT: [[NEW:%.*]] = select i1 [[TMP5]], i32 [[VALUE]], i32 [[TMP2]]
+; GFX908-NEXT: [[TMP6:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX908-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
+; GFX908-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
+; GFX908-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX908: atomicrmw.end:
+; GFX908-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX90A-LABEL: define i32 @test_atomicrmw_udec_wrap_i32_global_agent(
+; GFX90A-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX90A-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX90A-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX90A: atomicrmw.start:
+; GFX90A-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX90A-NEXT: [[TMP2:%.*]] = sub i32 [[LOADED]], 1
+; GFX90A-NEXT: [[TMP3:%.*]] = icmp eq i32 [[LOADED]], 0
+; GFX90A-NEXT: [[TMP4:%.*]] = icmp ugt i32 [[LOADED]], [[VALUE]]
+; GFX90A-NEXT: [[TMP5:%.*]] = or i1 [[TMP3]], [[TMP4]]
+; GFX90A-NEXT: [[NEW:%.*]] = select i1 [[TMP5]], i32 [[VALUE]], i32 [[TMP2]]
+; GFX90A-NEXT: [[TMP6:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX90A-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
+; GFX90A-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
+; GFX90A-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX90A: atomicrmw.end:
+; GFX90A-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX940-LABEL: define i32 @test_atomicrmw_udec_wrap_i32_global_agent(
+; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX940-NEXT: [[RES:%.*]] = atomicrmw udec_wrap ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
+; GFX940-NEXT: ret i32 [[RES]]
+;
+; GFX10-LABEL: define i32 @test_atomicrmw_udec_wrap_i32_global_agent(
+; GFX10-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX10-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX10: atomicrmw.start:
+; GFX10-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX10-NEXT: [[TMP2:%.*]] = sub i32 [[LOADED]], 1
+; GFX10-NEXT: [[TMP3:%.*]] = icmp eq i32 [[LOADED]], 0
+; GFX10-NEXT: [[TMP4:%.*]] = icmp ugt i32 [[LOADED]], [[VALUE]]
+; GFX10-NEXT: [[TMP5:%.*]] = or i1 [[TMP3]], [[TMP4]]
+; GFX10-NEXT: [[NEW:%.*]] = select i1 [[TMP5]], i32 [[VALUE]], i32 [[TMP2]]
+; GFX10-NEXT: [[TMP6:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX10-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
+; GFX10-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
+; GFX10-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX10: atomicrmw.end:
+; GFX10-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX11-LABEL: define i32 @test_atomicrmw_udec_wrap_i32_global_agent(
+; GFX11-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX11-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
+; GFX11-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX11: atomicrmw.start:
+; GFX11-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX11-NEXT: [[TMP2:%.*]] = sub i32 [[LOADED]], 1
+; GFX11-NEXT: [[TMP3:%.*]] = icmp eq i32 [[LOADED]], 0
+; GFX11-NEXT: [[TMP4:%.*]] = icmp ugt i32 [[LOADED]], [[VALUE]]
+; GFX11-NEXT: [[TMP5:%.*]] = or i1 [[TMP3]], [[TMP4]]
+; GFX11-NEXT: [[NEW:%.*]] = select i1 [[TMP5]], i32 [[VALUE]], i32 [[TMP2]]
+; GFX11-NEXT: [[TMP6:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GFX11-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
+; GFX11-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
+; GFX11-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX11: atomicrmw.end:
+; GFX11-NEXT: ret i32 [[NEWLOADED]]
+;
+; GFX12-LABEL: define i32 @test_atomicrmw_udec_wrap_i32_global_agent(
+; GFX12-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX12-NEXT: [[RES:%.*]] = atomicrmw udec_wrap ptr addrspace(1) [[PTR]], i32 [[VALUE]] syncscope("agent") seq_cst, align 4
+; GFX12-NEXT: ret i32 [[RES]]
;
%res = atomicrmw udec_wrap ptr addrspace(1) %ptr, i32 %value syncscope("agent") seq_cst
ret i32 %res
@@ -657,12 +1607,3 @@ define i32 @test_atomicrmw_udec_wrap_i32_global_agent__amdgpu_no_fine_grained_me
;.
; GFX12: [[META0]] = !{}
;.
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; GFX10: {{.*}}
-; GFX11: {{.*}}
-; GFX12: {{.*}}
-; GFX803: {{.*}}
-; GFX906: {{.*}}
-; GFX908: {{.*}}
-; GFX90A: {{.*}}
-; GFX942: {{.*}}
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i32-system.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i32-system.ll
index 9a03d2164e1d3..b8e7c3eb4673a 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i32-system.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i32-system.ll
@@ -131,16 +131,7 @@ define i32 @test_atomicrmw_sub_i32_global_system(ptr addrspace(1) %ptr, i32 %val
define i32 @test_atomicrmw_sub_i32_global_system__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, i32 %value) {
; COMMON-LABEL: define i32 @test_atomicrmw_sub_i32_global_system__amdgpu_no_fine_grained_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[NEW:%.*]] = sub i32 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw sub ptr addrspace(1) [[PTR]], i32 [[VALUE]] seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]]
; COMMON-NEXT: ret i32 [[NEWLOADED]]
;
%res = atomicrmw sub ptr addrspace(1) %ptr, i32 %value seq_cst, !amdgpu.no.fine.grained.memory !0
@@ -150,16 +141,7 @@ define i32 @test_atomicrmw_sub_i32_global_system__amdgpu_no_fine_grained_memory(
define i32 @test_atomicrmw_sub_i32_global_system__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i32 %value) {
; COMMON-LABEL: define i32 @test_atomicrmw_sub_i32_global_system__amdgpu_no_remote_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[NEW:%.*]] = sub i32 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw sub ptr addrspace(1) [[PTR]], i32 [[VALUE]] seq_cst, align 4, !amdgpu.no.remote.memory [[META0]]
; COMMON-NEXT: ret i32 [[NEWLOADED]]
;
%res = atomicrmw sub ptr addrspace(1) %ptr, i32 %value seq_cst, !amdgpu.no.remote.memory !0
@@ -169,16 +151,7 @@ define i32 @test_atomicrmw_sub_i32_global_system__amdgpu_no_remote_memory(ptr ad
define i32 @test_atomicrmw_sub_i32_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i32 %value) {
; COMMON-LABEL: define i32 @test_atomicrmw_sub_i32_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[NEW:%.*]] = sub i32 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw sub ptr addrspace(1) [[PTR]], i32 [[VALUE]] seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
; COMMON-NEXT: ret i32 [[NEWLOADED]]
;
%res = atomicrmw sub ptr addrspace(1) %ptr, i32 %value seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.no.remote.memory !0
@@ -212,16 +185,7 @@ define i32 @test_atomicrmw_and_i32_global_system(ptr addrspace(1) %ptr, i32 %val
define i32 @test_atomicrmw_and_i32_global_system__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, i32 %value) {
; COMMON-LABEL: define i32 @test_atomicrmw_and_i32_global_system__amdgpu_no_fine_grained_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[NEW:%.*]] = and i32 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw and ptr addrspace(1) [[PTR]], i32 [[VALUE]] seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]]
; COMMON-NEXT: ret i32 [[NEWLOADED]]
;
%res = atomicrmw and ptr addrspace(1) %ptr, i32 %value seq_cst, !amdgpu.no.fine.grained.memory !0
@@ -231,16 +195,7 @@ define i32 @test_atomicrmw_and_i32_global_system__amdgpu_no_fine_grained_memory(
define i32 @test_atomicrmw_and_i32_global_system__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i32 %value) {
; COMMON-LABEL: define i32 @test_atomicrmw_and_i32_global_system__amdgpu_no_remote_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[NEW:%.*]] = and i32 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw and ptr addrspace(1) [[PTR]], i32 [[VALUE]] seq_cst, align 4, !amdgpu.no.remote.memory [[META0]]
; COMMON-NEXT: ret i32 [[NEWLOADED]]
;
%res = atomicrmw and ptr addrspace(1) %ptr, i32 %value seq_cst, !amdgpu.no.remote.memory !0
@@ -250,16 +205,7 @@ define i32 @test_atomicrmw_and_i32_global_system__amdgpu_no_remote_memory(ptr ad
define i32 @test_atomicrmw_and_i32_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i32 %value) {
; COMMON-LABEL: define i32 @test_atomicrmw_and_i32_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[NEW:%.*]] = and i32 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw and ptr addrspace(1) [[PTR]], i32 [[VALUE]] seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
; COMMON-NEXT: ret i32 [[NEWLOADED]]
;
%res = atomicrmw and ptr addrspace(1) %ptr, i32 %value seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.no.remote.memory !0
@@ -378,16 +324,7 @@ define i32 @test_atomicrmw_or_i32_global_system(ptr addrspace(1) %ptr, i32 %valu
define i32 @test_atomicrmw_or_i32_global_system__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, i32 %value) {
; COMMON-LABEL: define i32 @test_atomicrmw_or_i32_global_system__amdgpu_no_fine_grained_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[NEW:%.*]] = or i32 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw or ptr addrspace(1) [[PTR]], i32 [[VALUE]] seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]]
; COMMON-NEXT: ret i32 [[NEWLOADED]]
;
%res = atomicrmw or ptr addrspace(1) %ptr, i32 %value seq_cst, !amdgpu.no.fine.grained.memory !0
@@ -397,16 +334,7 @@ define i32 @test_atomicrmw_or_i32_global_system__amdgpu_no_fine_grained_memory(p
define i32 @test_atomicrmw_or_i32_global_system__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i32 %value) {
; COMMON-LABEL: define i32 @test_atomicrmw_or_i32_global_system__amdgpu_no_remote_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[NEW:%.*]] = or i32 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw or ptr addrspace(1) [[PTR]], i32 [[VALUE]] seq_cst, align 4, !amdgpu.no.remote.memory [[META0]]
; COMMON-NEXT: ret i32 [[NEWLOADED]]
;
%res = atomicrmw or ptr addrspace(1) %ptr, i32 %value seq_cst, !amdgpu.no.remote.memory !0
@@ -416,16 +344,7 @@ define i32 @test_atomicrmw_or_i32_global_system__amdgpu_no_remote_memory(ptr add
define i32 @test_atomicrmw_or_i32_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i32 %value) {
; COMMON-LABEL: define i32 @test_atomicrmw_or_i32_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[NEW:%.*]] = or i32 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw or ptr addrspace(1) [[PTR]], i32 [[VALUE]] seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
; COMMON-NEXT: ret i32 [[NEWLOADED]]
;
%res = atomicrmw or ptr addrspace(1) %ptr, i32 %value seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.no.remote.memory !0
@@ -459,16 +378,7 @@ define i32 @test_atomicrmw_xor_i32_global_system(ptr addrspace(1) %ptr, i32 %val
define i32 @test_atomicrmw_xor_i32_global_system__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, i32 %value) {
; COMMON-LABEL: define i32 @test_atomicrmw_xor_i32_global_system__amdgpu_no_fine_grained_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[NEW:%.*]] = xor i32 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw xor ptr addrspace(1) [[PTR]], i32 [[VALUE]] seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]]
; COMMON-NEXT: ret i32 [[NEWLOADED]]
;
%res = atomicrmw xor ptr addrspace(1) %ptr, i32 %value seq_cst, !amdgpu.no.fine.grained.memory !0
@@ -478,16 +388,7 @@ define i32 @test_atomicrmw_xor_i32_global_system__amdgpu_no_fine_grained_memory(
define i32 @test_atomicrmw_xor_i32_global_system__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i32 %value) {
; COMMON-LABEL: define i32 @test_atomicrmw_xor_i32_global_system__amdgpu_no_remote_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[NEW:%.*]] = xor i32 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw xor ptr addrspace(1) [[PTR]], i32 [[VALUE]] seq_cst, align 4, !amdgpu.no.remote.memory [[META0]]
; COMMON-NEXT: ret i32 [[NEWLOADED]]
;
%res = atomicrmw xor ptr addrspace(1) %ptr, i32 %value seq_cst, !amdgpu.no.remote.memory !0
@@ -497,16 +398,7 @@ define i32 @test_atomicrmw_xor_i32_global_system__amdgpu_no_remote_memory(ptr ad
define i32 @test_atomicrmw_xor_i32_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i32 %value) {
; COMMON-LABEL: define i32 @test_atomicrmw_xor_i32_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[NEW:%.*]] = xor i32 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP2]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw xor ptr addrspace(1) [[PTR]], i32 [[VALUE]] seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
; COMMON-NEXT: ret i32 [[NEWLOADED]]
;
%res = atomicrmw xor ptr addrspace(1) %ptr, i32 %value seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.no.remote.memory !0
@@ -541,17 +433,7 @@ define i32 @test_atomicrmw_max_i32_global_system(ptr addrspace(1) %ptr, i32 %val
define i32 @test_atomicrmw_max_i32_global_system__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, i32 %value) {
; COMMON-LABEL: define i32 @test_atomicrmw_max_i32_global_system__amdgpu_no_fine_grained_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[TMP2:%.*]] = icmp sgt i32 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[LOADED]], i32 [[VALUE]]
-; COMMON-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]]
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw max ptr addrspace(1) [[PTR]], i32 [[VALUE]] seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]]
; COMMON-NEXT: ret i32 [[NEWLOADED]]
;
%res = atomicrmw max ptr addrspace(1) %ptr, i32 %value seq_cst, !amdgpu.no.fine.grained.memory !0
@@ -561,17 +443,7 @@ define i32 @test_atomicrmw_max_i32_global_system__amdgpu_no_fine_grained_memory(
define i32 @test_atomicrmw_max_i32_global_system__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i32 %value) {
; COMMON-LABEL: define i32 @test_atomicrmw_max_i32_global_system__amdgpu_no_remote_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[TMP2:%.*]] = icmp sgt i32 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[LOADED]], i32 [[VALUE]]
-; COMMON-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4, !amdgpu.no.remote.memory [[META0]]
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw max ptr addrspace(1) [[PTR]], i32 [[VALUE]] seq_cst, align 4, !amdgpu.no.remote.memory [[META0]]
; COMMON-NEXT: ret i32 [[NEWLOADED]]
;
%res = atomicrmw max ptr addrspace(1) %ptr, i32 %value seq_cst, !amdgpu.no.remote.memory !0
@@ -581,17 +453,7 @@ define i32 @test_atomicrmw_max_i32_global_system__amdgpu_no_remote_memory(ptr ad
define i32 @test_atomicrmw_max_i32_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i32 %value) {
; COMMON-LABEL: define i32 @test_atomicrmw_max_i32_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[TMP2:%.*]] = icmp sgt i32 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[LOADED]], i32 [[VALUE]]
-; COMMON-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw max ptr addrspace(1) [[PTR]], i32 [[VALUE]] seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
; COMMON-NEXT: ret i32 [[NEWLOADED]]
;
%res = atomicrmw max ptr addrspace(1) %ptr, i32 %value seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.no.remote.memory !0
@@ -626,17 +488,7 @@ define i32 @test_atomicrmw_min_i32_global_system(ptr addrspace(1) %ptr, i32 %val
define i32 @test_atomicrmw_min_i32_global_system__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, i32 %value) {
; COMMON-LABEL: define i32 @test_atomicrmw_min_i32_global_system__amdgpu_no_fine_grained_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[TMP2:%.*]] = icmp sle i32 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[LOADED]], i32 [[VALUE]]
-; COMMON-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]]
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw min ptr addrspace(1) [[PTR]], i32 [[VALUE]] seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]]
; COMMON-NEXT: ret i32 [[NEWLOADED]]
;
%res = atomicrmw min ptr addrspace(1) %ptr, i32 %value seq_cst, !amdgpu.no.fine.grained.memory !0
@@ -646,17 +498,7 @@ define i32 @test_atomicrmw_min_i32_global_system__amdgpu_no_fine_grained_memory(
define i32 @test_atomicrmw_min_i32_global_system__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i32 %value) {
; COMMON-LABEL: define i32 @test_atomicrmw_min_i32_global_system__amdgpu_no_remote_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[TMP2:%.*]] = icmp sle i32 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[LOADED]], i32 [[VALUE]]
-; COMMON-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4, !amdgpu.no.remote.memory [[META0]]
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw min ptr addrspace(1) [[PTR]], i32 [[VALUE]] seq_cst, align 4, !amdgpu.no.remote.memory [[META0]]
; COMMON-NEXT: ret i32 [[NEWLOADED]]
;
%res = atomicrmw min ptr addrspace(1) %ptr, i32 %value seq_cst, !amdgpu.no.remote.memory !0
@@ -666,17 +508,7 @@ define i32 @test_atomicrmw_min_i32_global_system__amdgpu_no_remote_memory(ptr ad
define i32 @test_atomicrmw_min_i32_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i32 %value) {
; COMMON-LABEL: define i32 @test_atomicrmw_min_i32_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[TMP2:%.*]] = icmp sle i32 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[LOADED]], i32 [[VALUE]]
-; COMMON-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw min ptr addrspace(1) [[PTR]], i32 [[VALUE]] seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
; COMMON-NEXT: ret i32 [[NEWLOADED]]
;
%res = atomicrmw min ptr addrspace(1) %ptr, i32 %value seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.no.remote.memory !0
@@ -711,17 +543,7 @@ define i32 @test_atomicrmw_umax_i32_global_system(ptr addrspace(1) %ptr, i32 %va
define i32 @test_atomicrmw_umax_i32_global_system__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, i32 %value) {
; COMMON-LABEL: define i32 @test_atomicrmw_umax_i32_global_system__amdgpu_no_fine_grained_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[TMP2:%.*]] = icmp ugt i32 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[LOADED]], i32 [[VALUE]]
-; COMMON-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]]
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw umax ptr addrspace(1) [[PTR]], i32 [[VALUE]] seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]]
; COMMON-NEXT: ret i32 [[NEWLOADED]]
;
%res = atomicrmw umax ptr addrspace(1) %ptr, i32 %value seq_cst, !amdgpu.no.fine.grained.memory !0
@@ -731,17 +553,7 @@ define i32 @test_atomicrmw_umax_i32_global_system__amdgpu_no_fine_grained_memory
define i32 @test_atomicrmw_umax_i32_global_system__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i32 %value) {
; COMMON-LABEL: define i32 @test_atomicrmw_umax_i32_global_system__amdgpu_no_remote_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[TMP2:%.*]] = icmp ugt i32 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[LOADED]], i32 [[VALUE]]
-; COMMON-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4, !amdgpu.no.remote.memory [[META0]]
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw umax ptr addrspace(1) [[PTR]], i32 [[VALUE]] seq_cst, align 4, !amdgpu.no.remote.memory [[META0]]
; COMMON-NEXT: ret i32 [[NEWLOADED]]
;
%res = atomicrmw umax ptr addrspace(1) %ptr, i32 %value seq_cst, !amdgpu.no.remote.memory !0
@@ -751,17 +563,7 @@ define i32 @test_atomicrmw_umax_i32_global_system__amdgpu_no_remote_memory(ptr a
define i32 @test_atomicrmw_umax_i32_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i32 %value) {
; COMMON-LABEL: define i32 @test_atomicrmw_umax_i32_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[TMP2:%.*]] = icmp ugt i32 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[LOADED]], i32 [[VALUE]]
-; COMMON-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw umax ptr addrspace(1) [[PTR]], i32 [[VALUE]] seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
; COMMON-NEXT: ret i32 [[NEWLOADED]]
;
%res = atomicrmw umax ptr addrspace(1) %ptr, i32 %value seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.no.remote.memory !0
@@ -796,17 +598,7 @@ define i32 @test_atomicrmw_umin_i32_global_system(ptr addrspace(1) %ptr, i32 %va
define i32 @test_atomicrmw_umin_i32_global_system__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, i32 %value) {
; COMMON-LABEL: define i32 @test_atomicrmw_umin_i32_global_system__amdgpu_no_fine_grained_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[TMP2:%.*]] = icmp ule i32 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[LOADED]], i32 [[VALUE]]
-; COMMON-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]]
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw umin ptr addrspace(1) [[PTR]], i32 [[VALUE]] seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]]
; COMMON-NEXT: ret i32 [[NEWLOADED]]
;
%res = atomicrmw umin ptr addrspace(1) %ptr, i32 %value seq_cst, !amdgpu.no.fine.grained.memory !0
@@ -816,17 +608,7 @@ define i32 @test_atomicrmw_umin_i32_global_system__amdgpu_no_fine_grained_memory
define i32 @test_atomicrmw_umin_i32_global_system__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i32 %value) {
; COMMON-LABEL: define i32 @test_atomicrmw_umin_i32_global_system__amdgpu_no_remote_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[TMP2:%.*]] = icmp ule i32 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[LOADED]], i32 [[VALUE]]
-; COMMON-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4, !amdgpu.no.remote.memory [[META0]]
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw umin ptr addrspace(1) [[PTR]], i32 [[VALUE]] seq_cst, align 4, !amdgpu.no.remote.memory [[META0]]
; COMMON-NEXT: ret i32 [[NEWLOADED]]
;
%res = atomicrmw umin ptr addrspace(1) %ptr, i32 %value seq_cst, !amdgpu.no.remote.memory !0
@@ -836,17 +618,7 @@ define i32 @test_atomicrmw_umin_i32_global_system__amdgpu_no_remote_memory(ptr a
define i32 @test_atomicrmw_umin_i32_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i32 %value) {
; COMMON-LABEL: define i32 @test_atomicrmw_umin_i32_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[TMP2:%.*]] = icmp ule i32 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i32 [[LOADED]], i32 [[VALUE]]
-; COMMON-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP3]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw umin ptr addrspace(1) [[PTR]], i32 [[VALUE]] seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
; COMMON-NEXT: ret i32 [[NEWLOADED]]
;
%res = atomicrmw umin ptr addrspace(1) %ptr, i32 %value seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.no.remote.memory !0
@@ -882,18 +654,7 @@ define i32 @test_atomicrmw_uinc_wrap_i32_global_system(ptr addrspace(1) %ptr, i3
define i32 @test_atomicrmw_uinc_wrap_i32_global_system__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, i32 %value) {
; COMMON-LABEL: define i32 @test_atomicrmw_uinc_wrap_i32_global_system__amdgpu_no_fine_grained_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[TMP2:%.*]] = add i32 [[LOADED]], 1
-; COMMON-NEXT: [[TMP3:%.*]] = icmp uge i32 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 0, i32 [[TMP2]]
-; COMMON-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw uinc_wrap ptr addrspace(1) [[PTR]], i32 [[VALUE]] seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]]
; COMMON-NEXT: ret i32 [[NEWLOADED]]
;
%res = atomicrmw uinc_wrap ptr addrspace(1) %ptr, i32 %value seq_cst, !amdgpu.no.fine.grained.memory !0
@@ -903,18 +664,7 @@ define i32 @test_atomicrmw_uinc_wrap_i32_global_system__amdgpu_no_fine_grained_m
define i32 @test_atomicrmw_uinc_wrap_i32_global_system__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i32 %value) {
; COMMON-LABEL: define i32 @test_atomicrmw_uinc_wrap_i32_global_system__amdgpu_no_remote_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[TMP2:%.*]] = add i32 [[LOADED]], 1
-; COMMON-NEXT: [[TMP3:%.*]] = icmp uge i32 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 0, i32 [[TMP2]]
-; COMMON-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw uinc_wrap ptr addrspace(1) [[PTR]], i32 [[VALUE]] seq_cst, align 4, !amdgpu.no.remote.memory [[META0]]
; COMMON-NEXT: ret i32 [[NEWLOADED]]
;
%res = atomicrmw uinc_wrap ptr addrspace(1) %ptr, i32 %value seq_cst, !amdgpu.no.remote.memory !0
@@ -924,18 +674,7 @@ define i32 @test_atomicrmw_uinc_wrap_i32_global_system__amdgpu_no_remote_memory(
define i32 @test_atomicrmw_uinc_wrap_i32_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i32 %value) {
; COMMON-LABEL: define i32 @test_atomicrmw_uinc_wrap_i32_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[TMP2:%.*]] = add i32 [[LOADED]], 1
-; COMMON-NEXT: [[TMP3:%.*]] = icmp uge i32 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 0, i32 [[TMP2]]
-; COMMON-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP4]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw uinc_wrap ptr addrspace(1) [[PTR]], i32 [[VALUE]] seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
; COMMON-NEXT: ret i32 [[NEWLOADED]]
;
%res = atomicrmw uinc_wrap ptr addrspace(1) %ptr, i32 %value seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.no.remote.memory !0
@@ -973,20 +712,7 @@ define i32 @test_atomicrmw_udec_wrap_i32_global_system(ptr addrspace(1) %ptr, i3
define i32 @test_atomicrmw_udec_wrap_i32_global_system__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, i32 %value) {
; COMMON-LABEL: define i32 @test_atomicrmw_udec_wrap_i32_global_system__amdgpu_no_fine_grained_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[TMP2:%.*]] = sub i32 [[LOADED]], 1
-; COMMON-NEXT: [[TMP3:%.*]] = icmp eq i32 [[LOADED]], 0
-; COMMON-NEXT: [[TMP4:%.*]] = icmp ugt i32 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[TMP5:%.*]] = or i1 [[TMP3]], [[TMP4]]
-; COMMON-NEXT: [[NEW:%.*]] = select i1 [[TMP5]], i32 [[VALUE]], i32 [[TMP2]]
-; COMMON-NEXT: [[TMP6:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw udec_wrap ptr addrspace(1) [[PTR]], i32 [[VALUE]] seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]]
; COMMON-NEXT: ret i32 [[NEWLOADED]]
;
%res = atomicrmw udec_wrap ptr addrspace(1) %ptr, i32 %value seq_cst, !amdgpu.no.fine.grained.memory !0
@@ -996,20 +722,7 @@ define i32 @test_atomicrmw_udec_wrap_i32_global_system__amdgpu_no_fine_grained_m
define i32 @test_atomicrmw_udec_wrap_i32_global_system__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i32 %value) {
; COMMON-LABEL: define i32 @test_atomicrmw_udec_wrap_i32_global_system__amdgpu_no_remote_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[TMP2:%.*]] = sub i32 [[LOADED]], 1
-; COMMON-NEXT: [[TMP3:%.*]] = icmp eq i32 [[LOADED]], 0
-; COMMON-NEXT: [[TMP4:%.*]] = icmp ugt i32 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[TMP5:%.*]] = or i1 [[TMP3]], [[TMP4]]
-; COMMON-NEXT: [[NEW:%.*]] = select i1 [[TMP5]], i32 [[VALUE]], i32 [[TMP2]]
-; COMMON-NEXT: [[TMP6:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw udec_wrap ptr addrspace(1) [[PTR]], i32 [[VALUE]] seq_cst, align 4, !amdgpu.no.remote.memory [[META0]]
; COMMON-NEXT: ret i32 [[NEWLOADED]]
;
%res = atomicrmw udec_wrap ptr addrspace(1) %ptr, i32 %value seq_cst, !amdgpu.no.remote.memory !0
@@ -1019,20 +732,7 @@ define i32 @test_atomicrmw_udec_wrap_i32_global_system__amdgpu_no_remote_memory(
define i32 @test_atomicrmw_udec_wrap_i32_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i32 %value) {
; COMMON-LABEL: define i32 @test_atomicrmw_udec_wrap_i32_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i32 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(1) [[PTR]], align 4
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[TMP2:%.*]] = sub i32 [[LOADED]], 1
-; COMMON-NEXT: [[TMP3:%.*]] = icmp eq i32 [[LOADED]], 0
-; COMMON-NEXT: [[TMP4:%.*]] = icmp ugt i32 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[TMP5:%.*]] = or i1 [[TMP3]], [[TMP4]]
-; COMMON-NEXT: [[NEW:%.*]] = select i1 [[TMP5]], i32 [[VALUE]], i32 [[TMP2]]
-; COMMON-NEXT: [[TMP6:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i32 [[LOADED]], i32 [[NEW]] seq_cst seq_cst, align 4
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP6]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP6]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw udec_wrap ptr addrspace(1) [[PTR]], i32 [[VALUE]] seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
; COMMON-NEXT: ret i32 [[NEWLOADED]]
;
%res = atomicrmw udec_wrap ptr addrspace(1) %ptr, i32 %value seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.no.remote.memory !0
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i64-agent.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i64-agent.ll
index 7586a0af43c95..8bc481408fe73 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i64-agent.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i64-agent.ll
@@ -110,10 +110,99 @@ define i64 @test_atomicrmw_add_i64_global_agent__amdgpu_no_fine_grained_memory__
; expansion is necessary, sub is not supported over PCIe
define i64 @test_atomicrmw_sub_i64_global_agent(ptr addrspace(1) %ptr, i64 %value) {
-; COMMON-LABEL: define i64 @test_atomicrmw_sub_i64_global_agent(
-; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw sub ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
-; COMMON-NEXT: ret i64 [[NEWLOADED]]
+; GFX803-LABEL: define i64 @test_atomicrmw_sub_i64_global_agent(
+; GFX803-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX803-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX803-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX803: atomicrmw.start:
+; GFX803-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX803-NEXT: [[NEW:%.*]] = sub i64 [[LOADED]], [[VALUE]]
+; GFX803-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX803-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
+; GFX803-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP2]], 0
+; GFX803-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX803: atomicrmw.end:
+; GFX803-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX906-LABEL: define i64 @test_atomicrmw_sub_i64_global_agent(
+; GFX906-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX906-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX906-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX906: atomicrmw.start:
+; GFX906-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX906-NEXT: [[NEW:%.*]] = sub i64 [[LOADED]], [[VALUE]]
+; GFX906-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX906-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
+; GFX906-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP2]], 0
+; GFX906-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX906: atomicrmw.end:
+; GFX906-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX908-LABEL: define i64 @test_atomicrmw_sub_i64_global_agent(
+; GFX908-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX908-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX908-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX908: atomicrmw.start:
+; GFX908-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX908-NEXT: [[NEW:%.*]] = sub i64 [[LOADED]], [[VALUE]]
+; GFX908-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX908-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
+; GFX908-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP2]], 0
+; GFX908-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX908: atomicrmw.end:
+; GFX908-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX90A-LABEL: define i64 @test_atomicrmw_sub_i64_global_agent(
+; GFX90A-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX90A-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX90A-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX90A: atomicrmw.start:
+; GFX90A-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX90A-NEXT: [[NEW:%.*]] = sub i64 [[LOADED]], [[VALUE]]
+; GFX90A-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX90A-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
+; GFX90A-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP2]], 0
+; GFX90A-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX90A: atomicrmw.end:
+; GFX90A-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX940-LABEL: define i64 @test_atomicrmw_sub_i64_global_agent(
+; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX940-NEXT: [[RES:%.*]] = atomicrmw sub ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
+; GFX940-NEXT: ret i64 [[RES]]
+;
+; GFX10-LABEL: define i64 @test_atomicrmw_sub_i64_global_agent(
+; GFX10-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX10-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX10: atomicrmw.start:
+; GFX10-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX10-NEXT: [[NEW:%.*]] = sub i64 [[LOADED]], [[VALUE]]
+; GFX10-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX10-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
+; GFX10-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP2]], 0
+; GFX10-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX10: atomicrmw.end:
+; GFX10-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX11-LABEL: define i64 @test_atomicrmw_sub_i64_global_agent(
+; GFX11-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX11-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX11-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX11: atomicrmw.start:
+; GFX11-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX11-NEXT: [[NEW:%.*]] = sub i64 [[LOADED]], [[VALUE]]
+; GFX11-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX11-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
+; GFX11-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP2]], 0
+; GFX11-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX11: atomicrmw.end:
+; GFX11-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX12-LABEL: define i64 @test_atomicrmw_sub_i64_global_agent(
+; GFX12-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX12-NEXT: [[RES:%.*]] = atomicrmw sub ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
+; GFX12-NEXT: ret i64 [[RES]]
;
%res = atomicrmw sub ptr addrspace(1) %ptr, i64 %value syncscope("agent") seq_cst
ret i64 %res
@@ -155,10 +244,99 @@ define i64 @test_atomicrmw_sub_i64_global_agent__amdgpu_no_fine_grained_memory__
; expansion is necessary, operation not supported over PCIe
define i64 @test_atomicrmw_and_i64_global_agent(ptr addrspace(1) %ptr, i64 %value) {
-; COMMON-LABEL: define i64 @test_atomicrmw_and_i64_global_agent(
-; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw and ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
-; COMMON-NEXT: ret i64 [[NEWLOADED]]
+; GFX803-LABEL: define i64 @test_atomicrmw_and_i64_global_agent(
+; GFX803-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX803-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX803-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX803: atomicrmw.start:
+; GFX803-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX803-NEXT: [[NEW:%.*]] = and i64 [[LOADED]], [[VALUE]]
+; GFX803-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX803-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
+; GFX803-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP2]], 0
+; GFX803-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX803: atomicrmw.end:
+; GFX803-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX906-LABEL: define i64 @test_atomicrmw_and_i64_global_agent(
+; GFX906-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX906-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX906-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX906: atomicrmw.start:
+; GFX906-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX906-NEXT: [[NEW:%.*]] = and i64 [[LOADED]], [[VALUE]]
+; GFX906-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX906-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
+; GFX906-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP2]], 0
+; GFX906-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX906: atomicrmw.end:
+; GFX906-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX908-LABEL: define i64 @test_atomicrmw_and_i64_global_agent(
+; GFX908-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX908-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX908-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX908: atomicrmw.start:
+; GFX908-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX908-NEXT: [[NEW:%.*]] = and i64 [[LOADED]], [[VALUE]]
+; GFX908-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX908-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
+; GFX908-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP2]], 0
+; GFX908-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX908: atomicrmw.end:
+; GFX908-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX90A-LABEL: define i64 @test_atomicrmw_and_i64_global_agent(
+; GFX90A-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX90A-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX90A-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX90A: atomicrmw.start:
+; GFX90A-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX90A-NEXT: [[NEW:%.*]] = and i64 [[LOADED]], [[VALUE]]
+; GFX90A-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX90A-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
+; GFX90A-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP2]], 0
+; GFX90A-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX90A: atomicrmw.end:
+; GFX90A-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX940-LABEL: define i64 @test_atomicrmw_and_i64_global_agent(
+; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX940-NEXT: [[RES:%.*]] = atomicrmw and ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
+; GFX940-NEXT: ret i64 [[RES]]
+;
+; GFX10-LABEL: define i64 @test_atomicrmw_and_i64_global_agent(
+; GFX10-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX10-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX10: atomicrmw.start:
+; GFX10-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX10-NEXT: [[NEW:%.*]] = and i64 [[LOADED]], [[VALUE]]
+; GFX10-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX10-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
+; GFX10-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP2]], 0
+; GFX10-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX10: atomicrmw.end:
+; GFX10-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX11-LABEL: define i64 @test_atomicrmw_and_i64_global_agent(
+; GFX11-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX11-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX11-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX11: atomicrmw.start:
+; GFX11-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX11-NEXT: [[NEW:%.*]] = and i64 [[LOADED]], [[VALUE]]
+; GFX11-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX11-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
+; GFX11-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP2]], 0
+; GFX11-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX11: atomicrmw.end:
+; GFX11-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX12-LABEL: define i64 @test_atomicrmw_and_i64_global_agent(
+; GFX12-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX12-NEXT: [[RES:%.*]] = atomicrmw and ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
+; GFX12-NEXT: ret i64 [[RES]]
;
%res = atomicrmw and ptr addrspace(1) %ptr, i64 %value syncscope("agent") seq_cst
ret i64 %res
@@ -285,10 +463,99 @@ define i64 @test_atomicrmw_nand_i64_global_agent__amdgpu_no_fine_grained_memory_
; expansion is necessary, operation not supported over PCIe
define i64 @test_atomicrmw_or_i64_global_agent(ptr addrspace(1) %ptr, i64 %value) {
-; COMMON-LABEL: define i64 @test_atomicrmw_or_i64_global_agent(
-; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw or ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
-; COMMON-NEXT: ret i64 [[NEWLOADED]]
+; GFX803-LABEL: define i64 @test_atomicrmw_or_i64_global_agent(
+; GFX803-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX803-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX803-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX803: atomicrmw.start:
+; GFX803-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX803-NEXT: [[NEW:%.*]] = or i64 [[LOADED]], [[VALUE]]
+; GFX803-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX803-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
+; GFX803-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP2]], 0
+; GFX803-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX803: atomicrmw.end:
+; GFX803-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX906-LABEL: define i64 @test_atomicrmw_or_i64_global_agent(
+; GFX906-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX906-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX906-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX906: atomicrmw.start:
+; GFX906-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX906-NEXT: [[NEW:%.*]] = or i64 [[LOADED]], [[VALUE]]
+; GFX906-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX906-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
+; GFX906-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP2]], 0
+; GFX906-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX906: atomicrmw.end:
+; GFX906-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX908-LABEL: define i64 @test_atomicrmw_or_i64_global_agent(
+; GFX908-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX908-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX908-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX908: atomicrmw.start:
+; GFX908-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX908-NEXT: [[NEW:%.*]] = or i64 [[LOADED]], [[VALUE]]
+; GFX908-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX908-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
+; GFX908-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP2]], 0
+; GFX908-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX908: atomicrmw.end:
+; GFX908-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX90A-LABEL: define i64 @test_atomicrmw_or_i64_global_agent(
+; GFX90A-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX90A-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX90A-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX90A: atomicrmw.start:
+; GFX90A-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX90A-NEXT: [[NEW:%.*]] = or i64 [[LOADED]], [[VALUE]]
+; GFX90A-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX90A-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
+; GFX90A-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP2]], 0
+; GFX90A-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX90A: atomicrmw.end:
+; GFX90A-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX940-LABEL: define i64 @test_atomicrmw_or_i64_global_agent(
+; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX940-NEXT: [[RES:%.*]] = atomicrmw or ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
+; GFX940-NEXT: ret i64 [[RES]]
+;
+; GFX10-LABEL: define i64 @test_atomicrmw_or_i64_global_agent(
+; GFX10-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX10-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX10: atomicrmw.start:
+; GFX10-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX10-NEXT: [[NEW:%.*]] = or i64 [[LOADED]], [[VALUE]]
+; GFX10-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX10-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
+; GFX10-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP2]], 0
+; GFX10-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX10: atomicrmw.end:
+; GFX10-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX11-LABEL: define i64 @test_atomicrmw_or_i64_global_agent(
+; GFX11-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX11-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX11-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX11: atomicrmw.start:
+; GFX11-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX11-NEXT: [[NEW:%.*]] = or i64 [[LOADED]], [[VALUE]]
+; GFX11-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX11-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
+; GFX11-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP2]], 0
+; GFX11-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX11: atomicrmw.end:
+; GFX11-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX12-LABEL: define i64 @test_atomicrmw_or_i64_global_agent(
+; GFX12-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX12-NEXT: [[RES:%.*]] = atomicrmw or ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
+; GFX12-NEXT: ret i64 [[RES]]
;
%res = atomicrmw or ptr addrspace(1) %ptr, i64 %value syncscope("agent") seq_cst
ret i64 %res
@@ -330,10 +597,99 @@ define i64 @test_atomicrmw_or_i64_global_agent__amdgpu_no_fine_grained_memory__a
; expansion is necessary, operation not supported over PCIe
define i64 @test_atomicrmw_xor_i64_global_agent(ptr addrspace(1) %ptr, i64 %value) {
-; COMMON-LABEL: define i64 @test_atomicrmw_xor_i64_global_agent(
-; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw xor ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
-; COMMON-NEXT: ret i64 [[NEWLOADED]]
+; GFX803-LABEL: define i64 @test_atomicrmw_xor_i64_global_agent(
+; GFX803-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX803-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX803-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX803: atomicrmw.start:
+; GFX803-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX803-NEXT: [[NEW:%.*]] = xor i64 [[LOADED]], [[VALUE]]
+; GFX803-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX803-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
+; GFX803-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP2]], 0
+; GFX803-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX803: atomicrmw.end:
+; GFX803-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX906-LABEL: define i64 @test_atomicrmw_xor_i64_global_agent(
+; GFX906-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX906-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX906-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX906: atomicrmw.start:
+; GFX906-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX906-NEXT: [[NEW:%.*]] = xor i64 [[LOADED]], [[VALUE]]
+; GFX906-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX906-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
+; GFX906-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP2]], 0
+; GFX906-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX906: atomicrmw.end:
+; GFX906-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX908-LABEL: define i64 @test_atomicrmw_xor_i64_global_agent(
+; GFX908-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX908-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX908-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX908: atomicrmw.start:
+; GFX908-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX908-NEXT: [[NEW:%.*]] = xor i64 [[LOADED]], [[VALUE]]
+; GFX908-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX908-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
+; GFX908-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP2]], 0
+; GFX908-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX908: atomicrmw.end:
+; GFX908-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX90A-LABEL: define i64 @test_atomicrmw_xor_i64_global_agent(
+; GFX90A-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX90A-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX90A-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX90A: atomicrmw.start:
+; GFX90A-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX90A-NEXT: [[NEW:%.*]] = xor i64 [[LOADED]], [[VALUE]]
+; GFX90A-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX90A-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
+; GFX90A-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP2]], 0
+; GFX90A-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX90A: atomicrmw.end:
+; GFX90A-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX940-LABEL: define i64 @test_atomicrmw_xor_i64_global_agent(
+; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX940-NEXT: [[RES:%.*]] = atomicrmw xor ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
+; GFX940-NEXT: ret i64 [[RES]]
+;
+; GFX10-LABEL: define i64 @test_atomicrmw_xor_i64_global_agent(
+; GFX10-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX10-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX10: atomicrmw.start:
+; GFX10-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX10-NEXT: [[NEW:%.*]] = xor i64 [[LOADED]], [[VALUE]]
+; GFX10-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX10-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
+; GFX10-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP2]], 0
+; GFX10-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX10: atomicrmw.end:
+; GFX10-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX11-LABEL: define i64 @test_atomicrmw_xor_i64_global_agent(
+; GFX11-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX11-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX11-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX11: atomicrmw.start:
+; GFX11-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX11-NEXT: [[NEW:%.*]] = xor i64 [[LOADED]], [[VALUE]]
+; GFX11-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX11-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
+; GFX11-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP2]], 0
+; GFX11-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX11: atomicrmw.end:
+; GFX11-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX12-LABEL: define i64 @test_atomicrmw_xor_i64_global_agent(
+; GFX12-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX12-NEXT: [[RES:%.*]] = atomicrmw xor ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
+; GFX12-NEXT: ret i64 [[RES]]
;
%res = atomicrmw xor ptr addrspace(1) %ptr, i64 %value syncscope("agent") seq_cst
ret i64 %res
@@ -375,10 +731,105 @@ define i64 @test_atomicrmw_xor_i64_global_agent__amdgpu_no_fine_grained_memory__
; expansion is necessary, operation not supported over PCIe
define i64 @test_atomicrmw_max_i64_global_agent(ptr addrspace(1) %ptr, i64 %value) {
-; COMMON-LABEL: define i64 @test_atomicrmw_max_i64_global_agent(
-; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw max ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
-; COMMON-NEXT: ret i64 [[NEWLOADED]]
+; GFX803-LABEL: define i64 @test_atomicrmw_max_i64_global_agent(
+; GFX803-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX803-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX803-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX803: atomicrmw.start:
+; GFX803-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX803-NEXT: [[TMP2:%.*]] = icmp sgt i64 [[LOADED]], [[VALUE]]
+; GFX803-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i64 [[LOADED]], i64 [[VALUE]]
+; GFX803-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX803-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP3]], 1
+; GFX803-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP3]], 0
+; GFX803-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX803: atomicrmw.end:
+; GFX803-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX906-LABEL: define i64 @test_atomicrmw_max_i64_global_agent(
+; GFX906-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX906-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX906-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX906: atomicrmw.start:
+; GFX906-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX906-NEXT: [[TMP2:%.*]] = icmp sgt i64 [[LOADED]], [[VALUE]]
+; GFX906-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i64 [[LOADED]], i64 [[VALUE]]
+; GFX906-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX906-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP3]], 1
+; GFX906-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP3]], 0
+; GFX906-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX906: atomicrmw.end:
+; GFX906-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX908-LABEL: define i64 @test_atomicrmw_max_i64_global_agent(
+; GFX908-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX908-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX908-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX908: atomicrmw.start:
+; GFX908-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX908-NEXT: [[TMP2:%.*]] = icmp sgt i64 [[LOADED]], [[VALUE]]
+; GFX908-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i64 [[LOADED]], i64 [[VALUE]]
+; GFX908-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX908-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP3]], 1
+; GFX908-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP3]], 0
+; GFX908-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX908: atomicrmw.end:
+; GFX908-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX90A-LABEL: define i64 @test_atomicrmw_max_i64_global_agent(
+; GFX90A-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX90A-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX90A-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX90A: atomicrmw.start:
+; GFX90A-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX90A-NEXT: [[TMP2:%.*]] = icmp sgt i64 [[LOADED]], [[VALUE]]
+; GFX90A-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i64 [[LOADED]], i64 [[VALUE]]
+; GFX90A-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX90A-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP3]], 1
+; GFX90A-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP3]], 0
+; GFX90A-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX90A: atomicrmw.end:
+; GFX90A-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX940-LABEL: define i64 @test_atomicrmw_max_i64_global_agent(
+; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX940-NEXT: [[RES:%.*]] = atomicrmw max ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
+; GFX940-NEXT: ret i64 [[RES]]
+;
+; GFX10-LABEL: define i64 @test_atomicrmw_max_i64_global_agent(
+; GFX10-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX10-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX10: atomicrmw.start:
+; GFX10-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX10-NEXT: [[TMP2:%.*]] = icmp sgt i64 [[LOADED]], [[VALUE]]
+; GFX10-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i64 [[LOADED]], i64 [[VALUE]]
+; GFX10-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX10-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP3]], 1
+; GFX10-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP3]], 0
+; GFX10-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX10: atomicrmw.end:
+; GFX10-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX11-LABEL: define i64 @test_atomicrmw_max_i64_global_agent(
+; GFX11-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX11-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX11-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX11: atomicrmw.start:
+; GFX11-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX11-NEXT: [[TMP2:%.*]] = icmp sgt i64 [[LOADED]], [[VALUE]]
+; GFX11-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i64 [[LOADED]], i64 [[VALUE]]
+; GFX11-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX11-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP3]], 1
+; GFX11-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP3]], 0
+; GFX11-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX11: atomicrmw.end:
+; GFX11-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX12-LABEL: define i64 @test_atomicrmw_max_i64_global_agent(
+; GFX12-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX12-NEXT: [[RES:%.*]] = atomicrmw max ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
+; GFX12-NEXT: ret i64 [[RES]]
;
%res = atomicrmw max ptr addrspace(1) %ptr, i64 %value syncscope("agent") seq_cst
ret i64 %res
@@ -420,10 +871,105 @@ define i64 @test_atomicrmw_max_i64_global_agent__amdgpu_no_fine_grained_memory__
; expansion is necessary, operation not supported over PCIe
define i64 @test_atomicrmw_min_i64_global_agent(ptr addrspace(1) %ptr, i64 %value) {
-; COMMON-LABEL: define i64 @test_atomicrmw_min_i64_global_agent(
-; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw min ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
-; COMMON-NEXT: ret i64 [[NEWLOADED]]
+; GFX803-LABEL: define i64 @test_atomicrmw_min_i64_global_agent(
+; GFX803-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX803-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX803-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX803: atomicrmw.start:
+; GFX803-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX803-NEXT: [[TMP2:%.*]] = icmp sle i64 [[LOADED]], [[VALUE]]
+; GFX803-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i64 [[LOADED]], i64 [[VALUE]]
+; GFX803-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX803-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP3]], 1
+; GFX803-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP3]], 0
+; GFX803-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX803: atomicrmw.end:
+; GFX803-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX906-LABEL: define i64 @test_atomicrmw_min_i64_global_agent(
+; GFX906-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX906-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX906-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX906: atomicrmw.start:
+; GFX906-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX906-NEXT: [[TMP2:%.*]] = icmp sle i64 [[LOADED]], [[VALUE]]
+; GFX906-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i64 [[LOADED]], i64 [[VALUE]]
+; GFX906-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX906-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP3]], 1
+; GFX906-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP3]], 0
+; GFX906-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX906: atomicrmw.end:
+; GFX906-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX908-LABEL: define i64 @test_atomicrmw_min_i64_global_agent(
+; GFX908-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX908-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX908-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX908: atomicrmw.start:
+; GFX908-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX908-NEXT: [[TMP2:%.*]] = icmp sle i64 [[LOADED]], [[VALUE]]
+; GFX908-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i64 [[LOADED]], i64 [[VALUE]]
+; GFX908-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX908-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP3]], 1
+; GFX908-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP3]], 0
+; GFX908-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX908: atomicrmw.end:
+; GFX908-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX90A-LABEL: define i64 @test_atomicrmw_min_i64_global_agent(
+; GFX90A-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX90A-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX90A-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX90A: atomicrmw.start:
+; GFX90A-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX90A-NEXT: [[TMP2:%.*]] = icmp sle i64 [[LOADED]], [[VALUE]]
+; GFX90A-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i64 [[LOADED]], i64 [[VALUE]]
+; GFX90A-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX90A-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP3]], 1
+; GFX90A-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP3]], 0
+; GFX90A-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX90A: atomicrmw.end:
+; GFX90A-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX940-LABEL: define i64 @test_atomicrmw_min_i64_global_agent(
+; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX940-NEXT: [[RES:%.*]] = atomicrmw min ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
+; GFX940-NEXT: ret i64 [[RES]]
+;
+; GFX10-LABEL: define i64 @test_atomicrmw_min_i64_global_agent(
+; GFX10-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX10-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX10: atomicrmw.start:
+; GFX10-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX10-NEXT: [[TMP2:%.*]] = icmp sle i64 [[LOADED]], [[VALUE]]
+; GFX10-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i64 [[LOADED]], i64 [[VALUE]]
+; GFX10-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX10-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP3]], 1
+; GFX10-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP3]], 0
+; GFX10-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX10: atomicrmw.end:
+; GFX10-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX11-LABEL: define i64 @test_atomicrmw_min_i64_global_agent(
+; GFX11-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX11-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX11-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX11: atomicrmw.start:
+; GFX11-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX11-NEXT: [[TMP2:%.*]] = icmp sle i64 [[LOADED]], [[VALUE]]
+; GFX11-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i64 [[LOADED]], i64 [[VALUE]]
+; GFX11-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX11-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP3]], 1
+; GFX11-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP3]], 0
+; GFX11-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX11: atomicrmw.end:
+; GFX11-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX12-LABEL: define i64 @test_atomicrmw_min_i64_global_agent(
+; GFX12-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX12-NEXT: [[RES:%.*]] = atomicrmw min ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
+; GFX12-NEXT: ret i64 [[RES]]
;
%res = atomicrmw min ptr addrspace(1) %ptr, i64 %value syncscope("agent") seq_cst
ret i64 %res
@@ -465,10 +1011,105 @@ define i64 @test_atomicrmw_min_i64_global_agent__amdgpu_no_fine_grained_memory__
; expansion is necessary, operation not supported over PCIe
define i64 @test_atomicrmw_umax_i64_global_agent(ptr addrspace(1) %ptr, i64 %value) {
-; COMMON-LABEL: define i64 @test_atomicrmw_umax_i64_global_agent(
-; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw umax ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
-; COMMON-NEXT: ret i64 [[NEWLOADED]]
+; GFX803-LABEL: define i64 @test_atomicrmw_umax_i64_global_agent(
+; GFX803-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX803-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX803-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX803: atomicrmw.start:
+; GFX803-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX803-NEXT: [[TMP2:%.*]] = icmp ugt i64 [[LOADED]], [[VALUE]]
+; GFX803-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i64 [[LOADED]], i64 [[VALUE]]
+; GFX803-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX803-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP3]], 1
+; GFX803-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP3]], 0
+; GFX803-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX803: atomicrmw.end:
+; GFX803-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX906-LABEL: define i64 @test_atomicrmw_umax_i64_global_agent(
+; GFX906-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX906-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX906-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX906: atomicrmw.start:
+; GFX906-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX906-NEXT: [[TMP2:%.*]] = icmp ugt i64 [[LOADED]], [[VALUE]]
+; GFX906-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i64 [[LOADED]], i64 [[VALUE]]
+; GFX906-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX906-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP3]], 1
+; GFX906-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP3]], 0
+; GFX906-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX906: atomicrmw.end:
+; GFX906-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX908-LABEL: define i64 @test_atomicrmw_umax_i64_global_agent(
+; GFX908-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX908-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX908-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX908: atomicrmw.start:
+; GFX908-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX908-NEXT: [[TMP2:%.*]] = icmp ugt i64 [[LOADED]], [[VALUE]]
+; GFX908-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i64 [[LOADED]], i64 [[VALUE]]
+; GFX908-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX908-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP3]], 1
+; GFX908-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP3]], 0
+; GFX908-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX908: atomicrmw.end:
+; GFX908-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX90A-LABEL: define i64 @test_atomicrmw_umax_i64_global_agent(
+; GFX90A-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX90A-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX90A-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX90A: atomicrmw.start:
+; GFX90A-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX90A-NEXT: [[TMP2:%.*]] = icmp ugt i64 [[LOADED]], [[VALUE]]
+; GFX90A-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i64 [[LOADED]], i64 [[VALUE]]
+; GFX90A-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX90A-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP3]], 1
+; GFX90A-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP3]], 0
+; GFX90A-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX90A: atomicrmw.end:
+; GFX90A-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX940-LABEL: define i64 @test_atomicrmw_umax_i64_global_agent(
+; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX940-NEXT: [[RES:%.*]] = atomicrmw umax ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
+; GFX940-NEXT: ret i64 [[RES]]
+;
+; GFX10-LABEL: define i64 @test_atomicrmw_umax_i64_global_agent(
+; GFX10-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX10-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX10: atomicrmw.start:
+; GFX10-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX10-NEXT: [[TMP2:%.*]] = icmp ugt i64 [[LOADED]], [[VALUE]]
+; GFX10-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i64 [[LOADED]], i64 [[VALUE]]
+; GFX10-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX10-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP3]], 1
+; GFX10-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP3]], 0
+; GFX10-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX10: atomicrmw.end:
+; GFX10-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX11-LABEL: define i64 @test_atomicrmw_umax_i64_global_agent(
+; GFX11-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX11-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX11-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX11: atomicrmw.start:
+; GFX11-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX11-NEXT: [[TMP2:%.*]] = icmp ugt i64 [[LOADED]], [[VALUE]]
+; GFX11-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i64 [[LOADED]], i64 [[VALUE]]
+; GFX11-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX11-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP3]], 1
+; GFX11-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP3]], 0
+; GFX11-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX11: atomicrmw.end:
+; GFX11-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX12-LABEL: define i64 @test_atomicrmw_umax_i64_global_agent(
+; GFX12-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX12-NEXT: [[RES:%.*]] = atomicrmw umax ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
+; GFX12-NEXT: ret i64 [[RES]]
;
%res = atomicrmw umax ptr addrspace(1) %ptr, i64 %value syncscope("agent") seq_cst
ret i64 %res
@@ -510,10 +1151,105 @@ define i64 @test_atomicrmw_umax_i64_global_agent__amdgpu_no_fine_grained_memory_
; expansion is necessary, operation not supported over PCIe
define i64 @test_atomicrmw_umin_i64_global_agent(ptr addrspace(1) %ptr, i64 %value) {
-; COMMON-LABEL: define i64 @test_atomicrmw_umin_i64_global_agent(
-; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw umin ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
-; COMMON-NEXT: ret i64 [[NEWLOADED]]
+; GFX803-LABEL: define i64 @test_atomicrmw_umin_i64_global_agent(
+; GFX803-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX803-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX803-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX803: atomicrmw.start:
+; GFX803-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX803-NEXT: [[TMP2:%.*]] = icmp ule i64 [[LOADED]], [[VALUE]]
+; GFX803-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i64 [[LOADED]], i64 [[VALUE]]
+; GFX803-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX803-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP3]], 1
+; GFX803-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP3]], 0
+; GFX803-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX803: atomicrmw.end:
+; GFX803-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX906-LABEL: define i64 @test_atomicrmw_umin_i64_global_agent(
+; GFX906-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX906-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX906-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX906: atomicrmw.start:
+; GFX906-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX906-NEXT: [[TMP2:%.*]] = icmp ule i64 [[LOADED]], [[VALUE]]
+; GFX906-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i64 [[LOADED]], i64 [[VALUE]]
+; GFX906-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX906-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP3]], 1
+; GFX906-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP3]], 0
+; GFX906-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX906: atomicrmw.end:
+; GFX906-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX908-LABEL: define i64 @test_atomicrmw_umin_i64_global_agent(
+; GFX908-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX908-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX908-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX908: atomicrmw.start:
+; GFX908-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX908-NEXT: [[TMP2:%.*]] = icmp ule i64 [[LOADED]], [[VALUE]]
+; GFX908-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i64 [[LOADED]], i64 [[VALUE]]
+; GFX908-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX908-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP3]], 1
+; GFX908-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP3]], 0
+; GFX908-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX908: atomicrmw.end:
+; GFX908-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX90A-LABEL: define i64 @test_atomicrmw_umin_i64_global_agent(
+; GFX90A-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX90A-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX90A-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX90A: atomicrmw.start:
+; GFX90A-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX90A-NEXT: [[TMP2:%.*]] = icmp ule i64 [[LOADED]], [[VALUE]]
+; GFX90A-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i64 [[LOADED]], i64 [[VALUE]]
+; GFX90A-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX90A-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP3]], 1
+; GFX90A-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP3]], 0
+; GFX90A-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX90A: atomicrmw.end:
+; GFX90A-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX940-LABEL: define i64 @test_atomicrmw_umin_i64_global_agent(
+; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX940-NEXT: [[RES:%.*]] = atomicrmw umin ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
+; GFX940-NEXT: ret i64 [[RES]]
+;
+; GFX10-LABEL: define i64 @test_atomicrmw_umin_i64_global_agent(
+; GFX10-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX10-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX10: atomicrmw.start:
+; GFX10-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX10-NEXT: [[TMP2:%.*]] = icmp ule i64 [[LOADED]], [[VALUE]]
+; GFX10-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i64 [[LOADED]], i64 [[VALUE]]
+; GFX10-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX10-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP3]], 1
+; GFX10-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP3]], 0
+; GFX10-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX10: atomicrmw.end:
+; GFX10-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX11-LABEL: define i64 @test_atomicrmw_umin_i64_global_agent(
+; GFX11-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX11-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX11-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX11: atomicrmw.start:
+; GFX11-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX11-NEXT: [[TMP2:%.*]] = icmp ule i64 [[LOADED]], [[VALUE]]
+; GFX11-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i64 [[LOADED]], i64 [[VALUE]]
+; GFX11-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX11-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP3]], 1
+; GFX11-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP3]], 0
+; GFX11-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX11: atomicrmw.end:
+; GFX11-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX12-LABEL: define i64 @test_atomicrmw_umin_i64_global_agent(
+; GFX12-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX12-NEXT: [[RES:%.*]] = atomicrmw umin ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
+; GFX12-NEXT: ret i64 [[RES]]
;
%res = atomicrmw umin ptr addrspace(1) %ptr, i64 %value syncscope("agent") seq_cst
ret i64 %res
@@ -555,10 +1291,111 @@ define i64 @test_atomicrmw_umin_i64_global_agent__amdgpu_no_fine_grained_memory_
; expansion is necessary, operation not supported over PCIe
define i64 @test_atomicrmw_uinc_wrap_i64_global_agent(ptr addrspace(1) %ptr, i64 %value) {
-; COMMON-LABEL: define i64 @test_atomicrmw_uinc_wrap_i64_global_agent(
-; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw uinc_wrap ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
-; COMMON-NEXT: ret i64 [[NEWLOADED]]
+; GFX803-LABEL: define i64 @test_atomicrmw_uinc_wrap_i64_global_agent(
+; GFX803-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX803-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX803-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX803: atomicrmw.start:
+; GFX803-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX803-NEXT: [[TMP2:%.*]] = add i64 [[LOADED]], 1
+; GFX803-NEXT: [[TMP3:%.*]] = icmp uge i64 [[LOADED]], [[VALUE]]
+; GFX803-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i64 0, i64 [[TMP2]]
+; GFX803-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX803-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP4]], 1
+; GFX803-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP4]], 0
+; GFX803-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX803: atomicrmw.end:
+; GFX803-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX906-LABEL: define i64 @test_atomicrmw_uinc_wrap_i64_global_agent(
+; GFX906-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX906-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX906-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX906: atomicrmw.start:
+; GFX906-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX906-NEXT: [[TMP2:%.*]] = add i64 [[LOADED]], 1
+; GFX906-NEXT: [[TMP3:%.*]] = icmp uge i64 [[LOADED]], [[VALUE]]
+; GFX906-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i64 0, i64 [[TMP2]]
+; GFX906-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX906-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP4]], 1
+; GFX906-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP4]], 0
+; GFX906-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX906: atomicrmw.end:
+; GFX906-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX908-LABEL: define i64 @test_atomicrmw_uinc_wrap_i64_global_agent(
+; GFX908-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX908-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX908-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX908: atomicrmw.start:
+; GFX908-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX908-NEXT: [[TMP2:%.*]] = add i64 [[LOADED]], 1
+; GFX908-NEXT: [[TMP3:%.*]] = icmp uge i64 [[LOADED]], [[VALUE]]
+; GFX908-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i64 0, i64 [[TMP2]]
+; GFX908-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX908-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP4]], 1
+; GFX908-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP4]], 0
+; GFX908-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX908: atomicrmw.end:
+; GFX908-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX90A-LABEL: define i64 @test_atomicrmw_uinc_wrap_i64_global_agent(
+; GFX90A-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX90A-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX90A-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX90A: atomicrmw.start:
+; GFX90A-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX90A-NEXT: [[TMP2:%.*]] = add i64 [[LOADED]], 1
+; GFX90A-NEXT: [[TMP3:%.*]] = icmp uge i64 [[LOADED]], [[VALUE]]
+; GFX90A-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i64 0, i64 [[TMP2]]
+; GFX90A-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX90A-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP4]], 1
+; GFX90A-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP4]], 0
+; GFX90A-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX90A: atomicrmw.end:
+; GFX90A-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX940-LABEL: define i64 @test_atomicrmw_uinc_wrap_i64_global_agent(
+; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX940-NEXT: [[RES:%.*]] = atomicrmw uinc_wrap ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
+; GFX940-NEXT: ret i64 [[RES]]
+;
+; GFX10-LABEL: define i64 @test_atomicrmw_uinc_wrap_i64_global_agent(
+; GFX10-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX10-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX10: atomicrmw.start:
+; GFX10-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX10-NEXT: [[TMP2:%.*]] = add i64 [[LOADED]], 1
+; GFX10-NEXT: [[TMP3:%.*]] = icmp uge i64 [[LOADED]], [[VALUE]]
+; GFX10-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i64 0, i64 [[TMP2]]
+; GFX10-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX10-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP4]], 1
+; GFX10-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP4]], 0
+; GFX10-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX10: atomicrmw.end:
+; GFX10-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX11-LABEL: define i64 @test_atomicrmw_uinc_wrap_i64_global_agent(
+; GFX11-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX11-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX11-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX11: atomicrmw.start:
+; GFX11-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX11-NEXT: [[TMP2:%.*]] = add i64 [[LOADED]], 1
+; GFX11-NEXT: [[TMP3:%.*]] = icmp uge i64 [[LOADED]], [[VALUE]]
+; GFX11-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i64 0, i64 [[TMP2]]
+; GFX11-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX11-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP4]], 1
+; GFX11-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP4]], 0
+; GFX11-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX11: atomicrmw.end:
+; GFX11-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX12-LABEL: define i64 @test_atomicrmw_uinc_wrap_i64_global_agent(
+; GFX12-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX12-NEXT: [[RES:%.*]] = atomicrmw uinc_wrap ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
+; GFX12-NEXT: ret i64 [[RES]]
;
%res = atomicrmw uinc_wrap ptr addrspace(1) %ptr, i64 %value syncscope("agent") seq_cst
ret i64 %res
@@ -600,10 +1437,123 @@ define i64 @test_atomicrmw_uinc_wrap_i64_global_agent__amdgpu_no_fine_grained_me
; expansion is necessary, operation not supported over PCIe
define i64 @test_atomicrmw_udec_wrap_i64_global_agent(ptr addrspace(1) %ptr, i64 %value) {
-; COMMON-LABEL: define i64 @test_atomicrmw_udec_wrap_i64_global_agent(
-; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw udec_wrap ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
-; COMMON-NEXT: ret i64 [[NEWLOADED]]
+; GFX803-LABEL: define i64 @test_atomicrmw_udec_wrap_i64_global_agent(
+; GFX803-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX803-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX803-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX803: atomicrmw.start:
+; GFX803-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX803-NEXT: [[TMP2:%.*]] = sub i64 [[LOADED]], 1
+; GFX803-NEXT: [[TMP3:%.*]] = icmp eq i64 [[LOADED]], 0
+; GFX803-NEXT: [[TMP4:%.*]] = icmp ugt i64 [[LOADED]], [[VALUE]]
+; GFX803-NEXT: [[TMP5:%.*]] = or i1 [[TMP3]], [[TMP4]]
+; GFX803-NEXT: [[NEW:%.*]] = select i1 [[TMP5]], i64 [[VALUE]], i64 [[TMP2]]
+; GFX803-NEXT: [[TMP6:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX803-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP6]], 1
+; GFX803-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP6]], 0
+; GFX803-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX803: atomicrmw.end:
+; GFX803-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX906-LABEL: define i64 @test_atomicrmw_udec_wrap_i64_global_agent(
+; GFX906-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX906-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX906-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX906: atomicrmw.start:
+; GFX906-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX906-NEXT: [[TMP2:%.*]] = sub i64 [[LOADED]], 1
+; GFX906-NEXT: [[TMP3:%.*]] = icmp eq i64 [[LOADED]], 0
+; GFX906-NEXT: [[TMP4:%.*]] = icmp ugt i64 [[LOADED]], [[VALUE]]
+; GFX906-NEXT: [[TMP5:%.*]] = or i1 [[TMP3]], [[TMP4]]
+; GFX906-NEXT: [[NEW:%.*]] = select i1 [[TMP5]], i64 [[VALUE]], i64 [[TMP2]]
+; GFX906-NEXT: [[TMP6:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX906-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP6]], 1
+; GFX906-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP6]], 0
+; GFX906-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX906: atomicrmw.end:
+; GFX906-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX908-LABEL: define i64 @test_atomicrmw_udec_wrap_i64_global_agent(
+; GFX908-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX908-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX908-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX908: atomicrmw.start:
+; GFX908-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX908-NEXT: [[TMP2:%.*]] = sub i64 [[LOADED]], 1
+; GFX908-NEXT: [[TMP3:%.*]] = icmp eq i64 [[LOADED]], 0
+; GFX908-NEXT: [[TMP4:%.*]] = icmp ugt i64 [[LOADED]], [[VALUE]]
+; GFX908-NEXT: [[TMP5:%.*]] = or i1 [[TMP3]], [[TMP4]]
+; GFX908-NEXT: [[NEW:%.*]] = select i1 [[TMP5]], i64 [[VALUE]], i64 [[TMP2]]
+; GFX908-NEXT: [[TMP6:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX908-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP6]], 1
+; GFX908-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP6]], 0
+; GFX908-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX908: atomicrmw.end:
+; GFX908-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX90A-LABEL: define i64 @test_atomicrmw_udec_wrap_i64_global_agent(
+; GFX90A-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX90A-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX90A-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX90A: atomicrmw.start:
+; GFX90A-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX90A-NEXT: [[TMP2:%.*]] = sub i64 [[LOADED]], 1
+; GFX90A-NEXT: [[TMP3:%.*]] = icmp eq i64 [[LOADED]], 0
+; GFX90A-NEXT: [[TMP4:%.*]] = icmp ugt i64 [[LOADED]], [[VALUE]]
+; GFX90A-NEXT: [[TMP5:%.*]] = or i1 [[TMP3]], [[TMP4]]
+; GFX90A-NEXT: [[NEW:%.*]] = select i1 [[TMP5]], i64 [[VALUE]], i64 [[TMP2]]
+; GFX90A-NEXT: [[TMP6:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX90A-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP6]], 1
+; GFX90A-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP6]], 0
+; GFX90A-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX90A: atomicrmw.end:
+; GFX90A-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX940-LABEL: define i64 @test_atomicrmw_udec_wrap_i64_global_agent(
+; GFX940-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX940-NEXT: [[RES:%.*]] = atomicrmw udec_wrap ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
+; GFX940-NEXT: ret i64 [[RES]]
+;
+; GFX10-LABEL: define i64 @test_atomicrmw_udec_wrap_i64_global_agent(
+; GFX10-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX10-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX10: atomicrmw.start:
+; GFX10-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX10-NEXT: [[TMP2:%.*]] = sub i64 [[LOADED]], 1
+; GFX10-NEXT: [[TMP3:%.*]] = icmp eq i64 [[LOADED]], 0
+; GFX10-NEXT: [[TMP4:%.*]] = icmp ugt i64 [[LOADED]], [[VALUE]]
+; GFX10-NEXT: [[TMP5:%.*]] = or i1 [[TMP3]], [[TMP4]]
+; GFX10-NEXT: [[NEW:%.*]] = select i1 [[TMP5]], i64 [[VALUE]], i64 [[TMP2]]
+; GFX10-NEXT: [[TMP6:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX10-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP6]], 1
+; GFX10-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP6]], 0
+; GFX10-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX10: atomicrmw.end:
+; GFX10-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX11-LABEL: define i64 @test_atomicrmw_udec_wrap_i64_global_agent(
+; GFX11-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX11-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
+; GFX11-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX11: atomicrmw.start:
+; GFX11-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GFX11-NEXT: [[TMP2:%.*]] = sub i64 [[LOADED]], 1
+; GFX11-NEXT: [[TMP3:%.*]] = icmp eq i64 [[LOADED]], 0
+; GFX11-NEXT: [[TMP4:%.*]] = icmp ugt i64 [[LOADED]], [[VALUE]]
+; GFX11-NEXT: [[TMP5:%.*]] = or i1 [[TMP3]], [[TMP4]]
+; GFX11-NEXT: [[NEW:%.*]] = select i1 [[TMP5]], i64 [[VALUE]], i64 [[TMP2]]
+; GFX11-NEXT: [[TMP6:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8
+; GFX11-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP6]], 1
+; GFX11-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP6]], 0
+; GFX11-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX11: atomicrmw.end:
+; GFX11-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX12-LABEL: define i64 @test_atomicrmw_udec_wrap_i64_global_agent(
+; GFX12-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX12-NEXT: [[RES:%.*]] = atomicrmw udec_wrap ptr addrspace(1) [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8
+; GFX12-NEXT: ret i64 [[RES]]
;
%res = atomicrmw udec_wrap ptr addrspace(1) %ptr, i64 %value syncscope("agent") seq_cst
ret i64 %res
@@ -657,12 +1607,3 @@ define i64 @test_atomicrmw_udec_wrap_i64_global_agent__amdgpu_no_fine_grained_me
;.
; GFX12: [[META0]] = !{}
;.
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; GFX10: {{.*}}
-; GFX11: {{.*}}
-; GFX12: {{.*}}
-; GFX803: {{.*}}
-; GFX906: {{.*}}
-; GFX908: {{.*}}
-; GFX90A: {{.*}}
-; GFX942: {{.*}}
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i64-system.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i64-system.ll
index 9e28f8f5ac3fb..c895eebcf0f8d 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i64-system.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i64-system.ll
@@ -131,16 +131,7 @@ define i64 @test_atomicrmw_sub_i64_global_system(ptr addrspace(1) %ptr, i64 %val
define i64 @test_atomicrmw_sub_i64_global_system__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, i64 %value) {
; COMMON-LABEL: define i64 @test_atomicrmw_sub_i64_global_system__amdgpu_no_fine_grained_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[NEW:%.*]] = sub i64 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] seq_cst seq_cst, align 8
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP2]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw sub ptr addrspace(1) [[PTR]], i64 [[VALUE]] seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META0]]
; COMMON-NEXT: ret i64 [[NEWLOADED]]
;
%res = atomicrmw sub ptr addrspace(1) %ptr, i64 %value seq_cst, !amdgpu.no.fine.grained.memory !0
@@ -150,16 +141,7 @@ define i64 @test_atomicrmw_sub_i64_global_system__amdgpu_no_fine_grained_memory(
define i64 @test_atomicrmw_sub_i64_global_system__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i64 %value) {
; COMMON-LABEL: define i64 @test_atomicrmw_sub_i64_global_system__amdgpu_no_remote_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[NEW:%.*]] = sub i64 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] seq_cst seq_cst, align 8
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP2]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw sub ptr addrspace(1) [[PTR]], i64 [[VALUE]] seq_cst, align 8, !amdgpu.no.remote.memory [[META0]]
; COMMON-NEXT: ret i64 [[NEWLOADED]]
;
%res = atomicrmw sub ptr addrspace(1) %ptr, i64 %value seq_cst, !amdgpu.no.remote.memory !0
@@ -169,16 +151,7 @@ define i64 @test_atomicrmw_sub_i64_global_system__amdgpu_no_remote_memory(ptr ad
define i64 @test_atomicrmw_sub_i64_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i64 %value) {
; COMMON-LABEL: define i64 @test_atomicrmw_sub_i64_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[NEW:%.*]] = sub i64 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] seq_cst seq_cst, align 8
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP2]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw sub ptr addrspace(1) [[PTR]], i64 [[VALUE]] seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
; COMMON-NEXT: ret i64 [[NEWLOADED]]
;
%res = atomicrmw sub ptr addrspace(1) %ptr, i64 %value seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.no.remote.memory !0
@@ -212,16 +185,7 @@ define i64 @test_atomicrmw_and_i64_global_system(ptr addrspace(1) %ptr, i64 %val
define i64 @test_atomicrmw_and_i64_global_system__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, i64 %value) {
; COMMON-LABEL: define i64 @test_atomicrmw_and_i64_global_system__amdgpu_no_fine_grained_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[NEW:%.*]] = and i64 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] seq_cst seq_cst, align 8
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP2]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw and ptr addrspace(1) [[PTR]], i64 [[VALUE]] seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META0]]
; COMMON-NEXT: ret i64 [[NEWLOADED]]
;
%res = atomicrmw and ptr addrspace(1) %ptr, i64 %value seq_cst, !amdgpu.no.fine.grained.memory !0
@@ -231,16 +195,7 @@ define i64 @test_atomicrmw_and_i64_global_system__amdgpu_no_fine_grained_memory(
define i64 @test_atomicrmw_and_i64_global_system__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i64 %value) {
; COMMON-LABEL: define i64 @test_atomicrmw_and_i64_global_system__amdgpu_no_remote_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[NEW:%.*]] = and i64 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] seq_cst seq_cst, align 8
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP2]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw and ptr addrspace(1) [[PTR]], i64 [[VALUE]] seq_cst, align 8, !amdgpu.no.remote.memory [[META0]]
; COMMON-NEXT: ret i64 [[NEWLOADED]]
;
%res = atomicrmw and ptr addrspace(1) %ptr, i64 %value seq_cst, !amdgpu.no.remote.memory !0
@@ -250,16 +205,7 @@ define i64 @test_atomicrmw_and_i64_global_system__amdgpu_no_remote_memory(ptr ad
define i64 @test_atomicrmw_and_i64_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i64 %value) {
; COMMON-LABEL: define i64 @test_atomicrmw_and_i64_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[NEW:%.*]] = and i64 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] seq_cst seq_cst, align 8
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP2]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw and ptr addrspace(1) [[PTR]], i64 [[VALUE]] seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
; COMMON-NEXT: ret i64 [[NEWLOADED]]
;
%res = atomicrmw and ptr addrspace(1) %ptr, i64 %value seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.no.remote.memory !0
@@ -378,16 +324,7 @@ define i64 @test_atomicrmw_or_i64_global_system(ptr addrspace(1) %ptr, i64 %valu
define i64 @test_atomicrmw_or_i64_global_system__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, i64 %value) {
; COMMON-LABEL: define i64 @test_atomicrmw_or_i64_global_system__amdgpu_no_fine_grained_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[NEW:%.*]] = or i64 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] seq_cst seq_cst, align 8
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP2]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw or ptr addrspace(1) [[PTR]], i64 [[VALUE]] seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META0]]
; COMMON-NEXT: ret i64 [[NEWLOADED]]
;
%res = atomicrmw or ptr addrspace(1) %ptr, i64 %value seq_cst, !amdgpu.no.fine.grained.memory !0
@@ -397,16 +334,7 @@ define i64 @test_atomicrmw_or_i64_global_system__amdgpu_no_fine_grained_memory(p
define i64 @test_atomicrmw_or_i64_global_system__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i64 %value) {
; COMMON-LABEL: define i64 @test_atomicrmw_or_i64_global_system__amdgpu_no_remote_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[NEW:%.*]] = or i64 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] seq_cst seq_cst, align 8
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP2]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw or ptr addrspace(1) [[PTR]], i64 [[VALUE]] seq_cst, align 8, !amdgpu.no.remote.memory [[META0]]
; COMMON-NEXT: ret i64 [[NEWLOADED]]
;
%res = atomicrmw or ptr addrspace(1) %ptr, i64 %value seq_cst, !amdgpu.no.remote.memory !0
@@ -416,16 +344,7 @@ define i64 @test_atomicrmw_or_i64_global_system__amdgpu_no_remote_memory(ptr add
define i64 @test_atomicrmw_or_i64_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i64 %value) {
; COMMON-LABEL: define i64 @test_atomicrmw_or_i64_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[NEW:%.*]] = or i64 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] seq_cst seq_cst, align 8
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP2]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw or ptr addrspace(1) [[PTR]], i64 [[VALUE]] seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
; COMMON-NEXT: ret i64 [[NEWLOADED]]
;
%res = atomicrmw or ptr addrspace(1) %ptr, i64 %value seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.no.remote.memory !0
@@ -459,16 +378,7 @@ define i64 @test_atomicrmw_xor_i64_global_system(ptr addrspace(1) %ptr, i64 %val
define i64 @test_atomicrmw_xor_i64_global_system__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, i64 %value) {
; COMMON-LABEL: define i64 @test_atomicrmw_xor_i64_global_system__amdgpu_no_fine_grained_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[NEW:%.*]] = xor i64 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] seq_cst seq_cst, align 8
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP2]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw xor ptr addrspace(1) [[PTR]], i64 [[VALUE]] seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META0]]
; COMMON-NEXT: ret i64 [[NEWLOADED]]
;
%res = atomicrmw xor ptr addrspace(1) %ptr, i64 %value seq_cst, !amdgpu.no.fine.grained.memory !0
@@ -478,16 +388,7 @@ define i64 @test_atomicrmw_xor_i64_global_system__amdgpu_no_fine_grained_memory(
define i64 @test_atomicrmw_xor_i64_global_system__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i64 %value) {
; COMMON-LABEL: define i64 @test_atomicrmw_xor_i64_global_system__amdgpu_no_remote_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[NEW:%.*]] = xor i64 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] seq_cst seq_cst, align 8
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP2]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw xor ptr addrspace(1) [[PTR]], i64 [[VALUE]] seq_cst, align 8, !amdgpu.no.remote.memory [[META0]]
; COMMON-NEXT: ret i64 [[NEWLOADED]]
;
%res = atomicrmw xor ptr addrspace(1) %ptr, i64 %value seq_cst, !amdgpu.no.remote.memory !0
@@ -497,16 +398,7 @@ define i64 @test_atomicrmw_xor_i64_global_system__amdgpu_no_remote_memory(ptr ad
define i64 @test_atomicrmw_xor_i64_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i64 %value) {
; COMMON-LABEL: define i64 @test_atomicrmw_xor_i64_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[NEW:%.*]] = xor i64 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] seq_cst seq_cst, align 8
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP2]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw xor ptr addrspace(1) [[PTR]], i64 [[VALUE]] seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
; COMMON-NEXT: ret i64 [[NEWLOADED]]
;
%res = atomicrmw xor ptr addrspace(1) %ptr, i64 %value seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.no.remote.memory !0
@@ -541,17 +433,7 @@ define i64 @test_atomicrmw_max_i64_global_system(ptr addrspace(1) %ptr, i64 %val
define i64 @test_atomicrmw_max_i64_global_system__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, i64 %value) {
; COMMON-LABEL: define i64 @test_atomicrmw_max_i64_global_system__amdgpu_no_fine_grained_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[TMP2:%.*]] = icmp sgt i64 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i64 [[LOADED]], i64 [[VALUE]]
-; COMMON-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] seq_cst seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META0]]
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP3]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP3]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw max ptr addrspace(1) [[PTR]], i64 [[VALUE]] seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META0]]
; COMMON-NEXT: ret i64 [[NEWLOADED]]
;
%res = atomicrmw max ptr addrspace(1) %ptr, i64 %value seq_cst, !amdgpu.no.fine.grained.memory !0
@@ -561,17 +443,7 @@ define i64 @test_atomicrmw_max_i64_global_system__amdgpu_no_fine_grained_memory(
define i64 @test_atomicrmw_max_i64_global_system__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i64 %value) {
; COMMON-LABEL: define i64 @test_atomicrmw_max_i64_global_system__amdgpu_no_remote_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[TMP2:%.*]] = icmp sgt i64 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i64 [[LOADED]], i64 [[VALUE]]
-; COMMON-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] seq_cst seq_cst, align 8, !amdgpu.no.remote.memory [[META0]]
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP3]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP3]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw max ptr addrspace(1) [[PTR]], i64 [[VALUE]] seq_cst, align 8, !amdgpu.no.remote.memory [[META0]]
; COMMON-NEXT: ret i64 [[NEWLOADED]]
;
%res = atomicrmw max ptr addrspace(1) %ptr, i64 %value seq_cst, !amdgpu.no.remote.memory !0
@@ -581,17 +453,7 @@ define i64 @test_atomicrmw_max_i64_global_system__amdgpu_no_remote_memory(ptr ad
define i64 @test_atomicrmw_max_i64_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i64 %value) {
; COMMON-LABEL: define i64 @test_atomicrmw_max_i64_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[TMP2:%.*]] = icmp sgt i64 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i64 [[LOADED]], i64 [[VALUE]]
-; COMMON-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] seq_cst seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP3]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP3]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw max ptr addrspace(1) [[PTR]], i64 [[VALUE]] seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
; COMMON-NEXT: ret i64 [[NEWLOADED]]
;
%res = atomicrmw max ptr addrspace(1) %ptr, i64 %value seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.no.remote.memory !0
@@ -626,17 +488,7 @@ define i64 @test_atomicrmw_min_i64_global_system(ptr addrspace(1) %ptr, i64 %val
define i64 @test_atomicrmw_min_i64_global_system__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, i64 %value) {
; COMMON-LABEL: define i64 @test_atomicrmw_min_i64_global_system__amdgpu_no_fine_grained_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[TMP2:%.*]] = icmp sle i64 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i64 [[LOADED]], i64 [[VALUE]]
-; COMMON-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] seq_cst seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META0]]
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP3]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP3]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw min ptr addrspace(1) [[PTR]], i64 [[VALUE]] seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META0]]
; COMMON-NEXT: ret i64 [[NEWLOADED]]
;
%res = atomicrmw min ptr addrspace(1) %ptr, i64 %value seq_cst, !amdgpu.no.fine.grained.memory !0
@@ -646,17 +498,7 @@ define i64 @test_atomicrmw_min_i64_global_system__amdgpu_no_fine_grained_memory(
define i64 @test_atomicrmw_min_i64_global_system__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i64 %value) {
; COMMON-LABEL: define i64 @test_atomicrmw_min_i64_global_system__amdgpu_no_remote_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[TMP2:%.*]] = icmp sle i64 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i64 [[LOADED]], i64 [[VALUE]]
-; COMMON-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] seq_cst seq_cst, align 8, !amdgpu.no.remote.memory [[META0]]
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP3]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP3]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw min ptr addrspace(1) [[PTR]], i64 [[VALUE]] seq_cst, align 8, !amdgpu.no.remote.memory [[META0]]
; COMMON-NEXT: ret i64 [[NEWLOADED]]
;
%res = atomicrmw min ptr addrspace(1) %ptr, i64 %value seq_cst, !amdgpu.no.remote.memory !0
@@ -666,17 +508,7 @@ define i64 @test_atomicrmw_min_i64_global_system__amdgpu_no_remote_memory(ptr ad
define i64 @test_atomicrmw_min_i64_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i64 %value) {
; COMMON-LABEL: define i64 @test_atomicrmw_min_i64_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[TMP2:%.*]] = icmp sle i64 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i64 [[LOADED]], i64 [[VALUE]]
-; COMMON-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] seq_cst seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP3]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP3]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw min ptr addrspace(1) [[PTR]], i64 [[VALUE]] seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
; COMMON-NEXT: ret i64 [[NEWLOADED]]
;
%res = atomicrmw min ptr addrspace(1) %ptr, i64 %value seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.no.remote.memory !0
@@ -711,17 +543,7 @@ define i64 @test_atomicrmw_umax_i64_global_system(ptr addrspace(1) %ptr, i64 %va
define i64 @test_atomicrmw_umax_i64_global_system__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, i64 %value) {
; COMMON-LABEL: define i64 @test_atomicrmw_umax_i64_global_system__amdgpu_no_fine_grained_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[TMP2:%.*]] = icmp ugt i64 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i64 [[LOADED]], i64 [[VALUE]]
-; COMMON-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] seq_cst seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META0]]
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP3]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP3]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw umax ptr addrspace(1) [[PTR]], i64 [[VALUE]] seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META0]]
; COMMON-NEXT: ret i64 [[NEWLOADED]]
;
%res = atomicrmw umax ptr addrspace(1) %ptr, i64 %value seq_cst, !amdgpu.no.fine.grained.memory !0
@@ -731,17 +553,7 @@ define i64 @test_atomicrmw_umax_i64_global_system__amdgpu_no_fine_grained_memory
define i64 @test_atomicrmw_umax_i64_global_system__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i64 %value) {
; COMMON-LABEL: define i64 @test_atomicrmw_umax_i64_global_system__amdgpu_no_remote_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[TMP2:%.*]] = icmp ugt i64 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i64 [[LOADED]], i64 [[VALUE]]
-; COMMON-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] seq_cst seq_cst, align 8, !amdgpu.no.remote.memory [[META0]]
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP3]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP3]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw umax ptr addrspace(1) [[PTR]], i64 [[VALUE]] seq_cst, align 8, !amdgpu.no.remote.memory [[META0]]
; COMMON-NEXT: ret i64 [[NEWLOADED]]
;
%res = atomicrmw umax ptr addrspace(1) %ptr, i64 %value seq_cst, !amdgpu.no.remote.memory !0
@@ -751,17 +563,7 @@ define i64 @test_atomicrmw_umax_i64_global_system__amdgpu_no_remote_memory(ptr a
define i64 @test_atomicrmw_umax_i64_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i64 %value) {
; COMMON-LABEL: define i64 @test_atomicrmw_umax_i64_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[TMP2:%.*]] = icmp ugt i64 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i64 [[LOADED]], i64 [[VALUE]]
-; COMMON-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] seq_cst seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP3]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP3]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw umax ptr addrspace(1) [[PTR]], i64 [[VALUE]] seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
; COMMON-NEXT: ret i64 [[NEWLOADED]]
;
%res = atomicrmw umax ptr addrspace(1) %ptr, i64 %value seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.no.remote.memory !0
@@ -796,17 +598,7 @@ define i64 @test_atomicrmw_umin_i64_global_system(ptr addrspace(1) %ptr, i64 %va
define i64 @test_atomicrmw_umin_i64_global_system__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, i64 %value) {
; COMMON-LABEL: define i64 @test_atomicrmw_umin_i64_global_system__amdgpu_no_fine_grained_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[TMP2:%.*]] = icmp ule i64 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i64 [[LOADED]], i64 [[VALUE]]
-; COMMON-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] seq_cst seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META0]]
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP3]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP3]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw umin ptr addrspace(1) [[PTR]], i64 [[VALUE]] seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META0]]
; COMMON-NEXT: ret i64 [[NEWLOADED]]
;
%res = atomicrmw umin ptr addrspace(1) %ptr, i64 %value seq_cst, !amdgpu.no.fine.grained.memory !0
@@ -816,17 +608,7 @@ define i64 @test_atomicrmw_umin_i64_global_system__amdgpu_no_fine_grained_memory
define i64 @test_atomicrmw_umin_i64_global_system__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i64 %value) {
; COMMON-LABEL: define i64 @test_atomicrmw_umin_i64_global_system__amdgpu_no_remote_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[TMP2:%.*]] = icmp ule i64 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i64 [[LOADED]], i64 [[VALUE]]
-; COMMON-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] seq_cst seq_cst, align 8, !amdgpu.no.remote.memory [[META0]]
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP3]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP3]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw umin ptr addrspace(1) [[PTR]], i64 [[VALUE]] seq_cst, align 8, !amdgpu.no.remote.memory [[META0]]
; COMMON-NEXT: ret i64 [[NEWLOADED]]
;
%res = atomicrmw umin ptr addrspace(1) %ptr, i64 %value seq_cst, !amdgpu.no.remote.memory !0
@@ -836,17 +618,7 @@ define i64 @test_atomicrmw_umin_i64_global_system__amdgpu_no_remote_memory(ptr a
define i64 @test_atomicrmw_umin_i64_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i64 %value) {
; COMMON-LABEL: define i64 @test_atomicrmw_umin_i64_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[TMP2:%.*]] = icmp ule i64 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[NEW:%.*]] = select i1 [[TMP2]], i64 [[LOADED]], i64 [[VALUE]]
-; COMMON-NEXT: [[TMP3:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] seq_cst seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP3]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP3]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw umin ptr addrspace(1) [[PTR]], i64 [[VALUE]] seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
; COMMON-NEXT: ret i64 [[NEWLOADED]]
;
%res = atomicrmw umin ptr addrspace(1) %ptr, i64 %value seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.no.remote.memory !0
@@ -882,18 +654,7 @@ define i64 @test_atomicrmw_uinc_wrap_i64_global_system(ptr addrspace(1) %ptr, i6
define i64 @test_atomicrmw_uinc_wrap_i64_global_system__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, i64 %value) {
; COMMON-LABEL: define i64 @test_atomicrmw_uinc_wrap_i64_global_system__amdgpu_no_fine_grained_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[TMP2:%.*]] = add i64 [[LOADED]], 1
-; COMMON-NEXT: [[TMP3:%.*]] = icmp uge i64 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i64 0, i64 [[TMP2]]
-; COMMON-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] seq_cst seq_cst, align 8
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP4]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP4]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw uinc_wrap ptr addrspace(1) [[PTR]], i64 [[VALUE]] seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META0]]
; COMMON-NEXT: ret i64 [[NEWLOADED]]
;
%res = atomicrmw uinc_wrap ptr addrspace(1) %ptr, i64 %value seq_cst, !amdgpu.no.fine.grained.memory !0
@@ -903,18 +664,7 @@ define i64 @test_atomicrmw_uinc_wrap_i64_global_system__amdgpu_no_fine_grained_m
define i64 @test_atomicrmw_uinc_wrap_i64_global_system__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i64 %value) {
; COMMON-LABEL: define i64 @test_atomicrmw_uinc_wrap_i64_global_system__amdgpu_no_remote_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[TMP2:%.*]] = add i64 [[LOADED]], 1
-; COMMON-NEXT: [[TMP3:%.*]] = icmp uge i64 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i64 0, i64 [[TMP2]]
-; COMMON-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] seq_cst seq_cst, align 8
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP4]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP4]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw uinc_wrap ptr addrspace(1) [[PTR]], i64 [[VALUE]] seq_cst, align 8, !amdgpu.no.remote.memory [[META0]]
; COMMON-NEXT: ret i64 [[NEWLOADED]]
;
%res = atomicrmw uinc_wrap ptr addrspace(1) %ptr, i64 %value seq_cst, !amdgpu.no.remote.memory !0
@@ -924,18 +674,7 @@ define i64 @test_atomicrmw_uinc_wrap_i64_global_system__amdgpu_no_remote_memory(
define i64 @test_atomicrmw_uinc_wrap_i64_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i64 %value) {
; COMMON-LABEL: define i64 @test_atomicrmw_uinc_wrap_i64_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[TMP2:%.*]] = add i64 [[LOADED]], 1
-; COMMON-NEXT: [[TMP3:%.*]] = icmp uge i64 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i64 0, i64 [[TMP2]]
-; COMMON-NEXT: [[TMP4:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] seq_cst seq_cst, align 8
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP4]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP4]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw uinc_wrap ptr addrspace(1) [[PTR]], i64 [[VALUE]] seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
; COMMON-NEXT: ret i64 [[NEWLOADED]]
;
%res = atomicrmw uinc_wrap ptr addrspace(1) %ptr, i64 %value seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.no.remote.memory !0
@@ -973,20 +712,7 @@ define i64 @test_atomicrmw_udec_wrap_i64_global_system(ptr addrspace(1) %ptr, i6
define i64 @test_atomicrmw_udec_wrap_i64_global_system__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, i64 %value) {
; COMMON-LABEL: define i64 @test_atomicrmw_udec_wrap_i64_global_system__amdgpu_no_fine_grained_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[TMP2:%.*]] = sub i64 [[LOADED]], 1
-; COMMON-NEXT: [[TMP3:%.*]] = icmp eq i64 [[LOADED]], 0
-; COMMON-NEXT: [[TMP4:%.*]] = icmp ugt i64 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[TMP5:%.*]] = or i1 [[TMP3]], [[TMP4]]
-; COMMON-NEXT: [[NEW:%.*]] = select i1 [[TMP5]], i64 [[VALUE]], i64 [[TMP2]]
-; COMMON-NEXT: [[TMP6:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] seq_cst seq_cst, align 8
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP6]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP6]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw udec_wrap ptr addrspace(1) [[PTR]], i64 [[VALUE]] seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META0]]
; COMMON-NEXT: ret i64 [[NEWLOADED]]
;
%res = atomicrmw udec_wrap ptr addrspace(1) %ptr, i64 %value seq_cst, !amdgpu.no.fine.grained.memory !0
@@ -996,20 +722,7 @@ define i64 @test_atomicrmw_udec_wrap_i64_global_system__amdgpu_no_fine_grained_m
define i64 @test_atomicrmw_udec_wrap_i64_global_system__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i64 %value) {
; COMMON-LABEL: define i64 @test_atomicrmw_udec_wrap_i64_global_system__amdgpu_no_remote_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[TMP2:%.*]] = sub i64 [[LOADED]], 1
-; COMMON-NEXT: [[TMP3:%.*]] = icmp eq i64 [[LOADED]], 0
-; COMMON-NEXT: [[TMP4:%.*]] = icmp ugt i64 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[TMP5:%.*]] = or i1 [[TMP3]], [[TMP4]]
-; COMMON-NEXT: [[NEW:%.*]] = select i1 [[TMP5]], i64 [[VALUE]], i64 [[TMP2]]
-; COMMON-NEXT: [[TMP6:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] seq_cst seq_cst, align 8
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP6]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP6]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw udec_wrap ptr addrspace(1) [[PTR]], i64 [[VALUE]] seq_cst, align 8, !amdgpu.no.remote.memory [[META0]]
; COMMON-NEXT: ret i64 [[NEWLOADED]]
;
%res = atomicrmw udec_wrap ptr addrspace(1) %ptr, i64 %value seq_cst, !amdgpu.no.remote.memory !0
@@ -1019,20 +732,7 @@ define i64 @test_atomicrmw_udec_wrap_i64_global_system__amdgpu_no_remote_memory(
define i64 @test_atomicrmw_udec_wrap_i64_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i64 %value) {
; COMMON-LABEL: define i64 @test_atomicrmw_udec_wrap_i64_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
; COMMON-SAME: ptr addrspace(1) [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
-; COMMON-NEXT: [[TMP1:%.*]] = load i64, ptr addrspace(1) [[PTR]], align 8
-; COMMON-NEXT: br label [[ATOMICRMW_START:%.*]]
-; COMMON: atomicrmw.start:
-; COMMON-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
-; COMMON-NEXT: [[TMP2:%.*]] = sub i64 [[LOADED]], 1
-; COMMON-NEXT: [[TMP3:%.*]] = icmp eq i64 [[LOADED]], 0
-; COMMON-NEXT: [[TMP4:%.*]] = icmp ugt i64 [[LOADED]], [[VALUE]]
-; COMMON-NEXT: [[TMP5:%.*]] = or i1 [[TMP3]], [[TMP4]]
-; COMMON-NEXT: [[NEW:%.*]] = select i1 [[TMP5]], i64 [[VALUE]], i64 [[TMP2]]
-; COMMON-NEXT: [[TMP6:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 [[LOADED]], i64 [[NEW]] seq_cst seq_cst, align 8
-; COMMON-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP6]], 1
-; COMMON-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP6]], 0
-; COMMON-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
-; COMMON: atomicrmw.end:
+; COMMON-NEXT: [[NEWLOADED:%.*]] = atomicrmw udec_wrap ptr addrspace(1) [[PTR]], i64 [[VALUE]] seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
; COMMON-NEXT: ret i64 [[NEWLOADED]]
;
%res = atomicrmw udec_wrap ptr addrspace(1) %ptr, i64 %value seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.no.remote.memory !0
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8.ll
index 590ee63001615..b19a717b56938 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-i8.ll
@@ -265,8 +265,17 @@ define i8 @test_atomicrmw_and_i8_global_agent(ptr addrspace(1) %ptr, i8 %value)
; GCN-NEXT: [[TMP3:%.*]] = zext i8 [[VALUE:%.*]] to i32
; GCN-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
; GCN-NEXT: [[ANDOPERAND:%.*]] = or i32 [[VALOPERAND_SHIFTED]], [[INV_MASK]]
-; GCN-NEXT: [[TMP4:%.*]] = atomicrmw and ptr addrspace(1) [[ALIGNEDADDR]], i32 [[ANDOPERAND]] syncscope("agent") seq_cst, align 4
-; GCN-NEXT: [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
+; GCN-NEXT: [[TMP4:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GCN: atomicrmw.start:
+; GCN-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP4]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GCN-NEXT: [[NEW:%.*]] = and i32 [[LOADED]], [[ANDOPERAND]]
+; GCN-NEXT: [[TMP5:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
+; GCN-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP5]], 0
+; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GCN: atomicrmw.end:
+; GCN-NEXT: [[SHIFTED:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
; GCN-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
; GCN-NEXT: ret i8 [[EXTRACTED]]
;
@@ -360,8 +369,17 @@ define i8 @test_atomicrmw_or_i8_global_agent(ptr addrspace(1) %ptr, i8 %value) {
; GCN-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
; GCN-NEXT: [[TMP3:%.*]] = zext i8 [[VALUE:%.*]] to i32
; GCN-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
-; GCN-NEXT: [[TMP4:%.*]] = atomicrmw or ptr addrspace(1) [[ALIGNEDADDR]], i32 [[VALOPERAND_SHIFTED]] syncscope("agent") seq_cst, align 4
-; GCN-NEXT: [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
+; GCN-NEXT: [[TMP4:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GCN: atomicrmw.start:
+; GCN-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP4]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GCN-NEXT: [[NEW:%.*]] = or i32 [[LOADED]], [[VALOPERAND_SHIFTED]]
+; GCN-NEXT: [[TMP5:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
+; GCN-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP5]], 0
+; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GCN: atomicrmw.end:
+; GCN-NEXT: [[SHIFTED:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
; GCN-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
; GCN-NEXT: ret i8 [[EXTRACTED]]
;
@@ -394,8 +412,17 @@ define i8 @test_atomicrmw_xor_i8_global_agent(ptr addrspace(1) %ptr, i8 %value)
; GCN-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
; GCN-NEXT: [[TMP3:%.*]] = zext i8 [[VALUE:%.*]] to i32
; GCN-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
-; GCN-NEXT: [[TMP4:%.*]] = atomicrmw xor ptr addrspace(1) [[ALIGNEDADDR]], i32 [[VALOPERAND_SHIFTED]] syncscope("agent") seq_cst, align 4
-; GCN-NEXT: [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
+; GCN-NEXT: [[TMP4:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GCN: atomicrmw.start:
+; GCN-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP4]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GCN-NEXT: [[NEW:%.*]] = xor i32 [[LOADED]], [[VALOPERAND_SHIFTED]]
+; GCN-NEXT: [[TMP5:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[NEW]] syncscope("agent") seq_cst seq_cst, align 4
+; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
+; GCN-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP5]], 0
+; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GCN: atomicrmw.end:
+; GCN-NEXT: [[SHIFTED:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
; GCN-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
; GCN-NEXT: ret i8 [[EXTRACTED]]
;
@@ -1712,3 +1739,179 @@ define i8 @test_atomicrmw_add_i8_buffer_fat_agent_align4(ptr addrspace(7) %ptr,
%res = atomicrmw add ptr addrspace(7) %ptr, i8 %value syncscope("agent") seq_cst, align 4
ret i8 %res
}
+
+define i8 @test_atomicrmw_sub_i8_global_agent__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i8 %value) {
+; GCN-LABEL: @test_atomicrmw_sub_i8_global_agent__amdgpu_no_remote_memory(
+; GCN-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; GCN-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; GCN-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; GCN-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; GCN-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; GCN-NEXT: [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; GCN-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; GCN-NEXT: [[TMP3:%.*]] = zext i8 [[VALUE:%.*]] to i32
+; GCN-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; GCN-NEXT: [[TMP4:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GCN: atomicrmw.start:
+; GCN-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP4]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GCN-NEXT: [[NEW:%.*]] = sub i32 [[LOADED]], [[VALOPERAND_SHIFTED]]
+; GCN-NEXT: [[TMP5:%.*]] = and i32 [[NEW]], [[MASK]]
+; GCN-NEXT: [[TMP6:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; GCN-NEXT: [[TMP7:%.*]] = or i32 [[TMP6]], [[TMP5]]
+; GCN-NEXT: [[TMP8:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[TMP7]] syncscope("agent") seq_cst seq_cst, align 4
+; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP8]], 1
+; GCN-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP8]], 0
+; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GCN: atomicrmw.end:
+; GCN-NEXT: [[SHIFTED:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; GCN-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; GCN-NEXT: ret i8 [[EXTRACTED]]
+;
+; R600-LABEL: @test_atomicrmw_sub_i8_global_agent__amdgpu_no_remote_memory(
+; R600-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i32(ptr addrspace(1) [[PTR:%.*]], i32 -4)
+; R600-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i32
+; R600-NEXT: [[PTRLSB:%.*]] = and i32 [[TMP1]], 3
+; R600-NEXT: [[TMP2:%.*]] = shl i32 [[PTRLSB]], 3
+; R600-NEXT: [[MASK:%.*]] = shl i32 255, [[TMP2]]
+; R600-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; R600-NEXT: [[TMP3:%.*]] = zext i8 [[VALUE:%.*]] to i32
+; R600-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[TMP2]]
+; R600-NEXT: [[TMP4:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; R600-NEXT: br label [[ATOMICRMW_START:%.*]]
+; R600: atomicrmw.start:
+; R600-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP4]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; R600-NEXT: [[NEW:%.*]] = sub i32 [[LOADED]], [[VALOPERAND_SHIFTED]]
+; R600-NEXT: [[TMP5:%.*]] = and i32 [[NEW]], [[MASK]]
+; R600-NEXT: [[TMP6:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; R600-NEXT: [[TMP7:%.*]] = or i32 [[TMP6]], [[TMP5]]
+; R600-NEXT: [[TMP8:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[TMP7]] syncscope("agent") seq_cst seq_cst, align 4
+; R600-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP8]], 1
+; R600-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP8]], 0
+; R600-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; R600: atomicrmw.end:
+; R600-NEXT: [[SHIFTED:%.*]] = lshr i32 [[NEWLOADED]], [[TMP2]]
+; R600-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; R600-NEXT: ret i8 [[EXTRACTED]]
+;
+ %res = atomicrmw sub ptr addrspace(1) %ptr, i8 %value syncscope("agent") seq_cst, !amdgpu.no.remote.memory !0
+ ret i8 %res
+}
+
+define i8 @test_atomicrmw_sub_i8_global_agent__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr, i8 %value) {
+; GCN-LABEL: @test_atomicrmw_sub_i8_global_agent__amdgpu_no_fine_grained_memory(
+; GCN-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; GCN-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; GCN-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; GCN-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; GCN-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; GCN-NEXT: [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; GCN-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; GCN-NEXT: [[TMP3:%.*]] = zext i8 [[VALUE:%.*]] to i32
+; GCN-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; GCN-NEXT: [[TMP4:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GCN: atomicrmw.start:
+; GCN-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP4]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GCN-NEXT: [[NEW:%.*]] = sub i32 [[LOADED]], [[VALOPERAND_SHIFTED]]
+; GCN-NEXT: [[TMP5:%.*]] = and i32 [[NEW]], [[MASK]]
+; GCN-NEXT: [[TMP6:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; GCN-NEXT: [[TMP7:%.*]] = or i32 [[TMP6]], [[TMP5]]
+; GCN-NEXT: [[TMP8:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[TMP7]] syncscope("agent") seq_cst seq_cst, align 4
+; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP8]], 1
+; GCN-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP8]], 0
+; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GCN: atomicrmw.end:
+; GCN-NEXT: [[SHIFTED:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; GCN-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; GCN-NEXT: ret i8 [[EXTRACTED]]
+;
+; R600-LABEL: @test_atomicrmw_sub_i8_global_agent__amdgpu_no_fine_grained_memory(
+; R600-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i32(ptr addrspace(1) [[PTR:%.*]], i32 -4)
+; R600-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i32
+; R600-NEXT: [[PTRLSB:%.*]] = and i32 [[TMP1]], 3
+; R600-NEXT: [[TMP2:%.*]] = shl i32 [[PTRLSB]], 3
+; R600-NEXT: [[MASK:%.*]] = shl i32 255, [[TMP2]]
+; R600-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; R600-NEXT: [[TMP3:%.*]] = zext i8 [[VALUE:%.*]] to i32
+; R600-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[TMP2]]
+; R600-NEXT: [[TMP4:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; R600-NEXT: br label [[ATOMICRMW_START:%.*]]
+; R600: atomicrmw.start:
+; R600-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP4]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; R600-NEXT: [[NEW:%.*]] = sub i32 [[LOADED]], [[VALOPERAND_SHIFTED]]
+; R600-NEXT: [[TMP5:%.*]] = and i32 [[NEW]], [[MASK]]
+; R600-NEXT: [[TMP6:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; R600-NEXT: [[TMP7:%.*]] = or i32 [[TMP6]], [[TMP5]]
+; R600-NEXT: [[TMP8:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[TMP7]] syncscope("agent") seq_cst seq_cst, align 4
+; R600-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP8]], 1
+; R600-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP8]], 0
+; R600-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; R600: atomicrmw.end:
+; R600-NEXT: [[SHIFTED:%.*]] = lshr i32 [[NEWLOADED]], [[TMP2]]
+; R600-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; R600-NEXT: ret i8 [[EXTRACTED]]
+;
+ %res = atomicrmw sub ptr addrspace(1) %ptr, i8 %value syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
+ ret i8 %res
+}
+
+define i8 @test_atomicrmw_sub_i8_global_agent__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(ptr addrspace(1) %ptr, i8 %value) {
+; GCN-LABEL: @test_atomicrmw_sub_i8_global_agent__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
+; GCN-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[PTR:%.*]], i64 -4)
+; GCN-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; GCN-NEXT: [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; GCN-NEXT: [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; GCN-NEXT: [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; GCN-NEXT: [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; GCN-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; GCN-NEXT: [[TMP3:%.*]] = zext i8 [[VALUE:%.*]] to i32
+; GCN-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; GCN-NEXT: [[TMP4:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GCN: atomicrmw.start:
+; GCN-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP4]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; GCN-NEXT: [[NEW:%.*]] = sub i32 [[LOADED]], [[VALOPERAND_SHIFTED]]
+; GCN-NEXT: [[TMP5:%.*]] = and i32 [[NEW]], [[MASK]]
+; GCN-NEXT: [[TMP6:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; GCN-NEXT: [[TMP7:%.*]] = or i32 [[TMP6]], [[TMP5]]
+; GCN-NEXT: [[TMP8:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[TMP7]] syncscope("agent") seq_cst seq_cst, align 4
+; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP8]], 1
+; GCN-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP8]], 0
+; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GCN: atomicrmw.end:
+; GCN-NEXT: [[SHIFTED:%.*]] = lshr i32 [[NEWLOADED]], [[SHIFTAMT]]
+; GCN-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; GCN-NEXT: ret i8 [[EXTRACTED]]
+;
+; R600-LABEL: @test_atomicrmw_sub_i8_global_agent__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
+; R600-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i32(ptr addrspace(1) [[PTR:%.*]], i32 -4)
+; R600-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i32
+; R600-NEXT: [[PTRLSB:%.*]] = and i32 [[TMP1]], 3
+; R600-NEXT: [[TMP2:%.*]] = shl i32 [[PTRLSB]], 3
+; R600-NEXT: [[MASK:%.*]] = shl i32 255, [[TMP2]]
+; R600-NEXT: [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; R600-NEXT: [[TMP3:%.*]] = zext i8 [[VALUE:%.*]] to i32
+; R600-NEXT: [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[TMP2]]
+; R600-NEXT: [[TMP4:%.*]] = load i32, ptr addrspace(1) [[ALIGNEDADDR]], align 4
+; R600-NEXT: br label [[ATOMICRMW_START:%.*]]
+; R600: atomicrmw.start:
+; R600-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP4]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
+; R600-NEXT: [[NEW:%.*]] = sub i32 [[LOADED]], [[VALOPERAND_SHIFTED]]
+; R600-NEXT: [[TMP5:%.*]] = and i32 [[NEW]], [[MASK]]
+; R600-NEXT: [[TMP6:%.*]] = and i32 [[LOADED]], [[INV_MASK]]
+; R600-NEXT: [[TMP7:%.*]] = or i32 [[TMP6]], [[TMP5]]
+; R600-NEXT: [[TMP8:%.*]] = cmpxchg ptr addrspace(1) [[ALIGNEDADDR]], i32 [[LOADED]], i32 [[TMP7]] syncscope("agent") seq_cst seq_cst, align 4
+; R600-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP8]], 1
+; R600-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP8]], 0
+; R600-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; R600: atomicrmw.end:
+; R600-NEXT: [[SHIFTED:%.*]] = lshr i32 [[NEWLOADED]], [[TMP2]]
+; R600-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; R600-NEXT: ret i8 [[EXTRACTED]]
+;
+ %res = atomicrmw sub ptr addrspace(1) %ptr, i8 %value syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.no.remote.memory !0
+ ret i8 %res
+}
+
+!0 = !{}
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomicrmw-flat-noalias-addrspace.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomicrmw-flat-noalias-addrspace.ll
index 7692fd34312ff..eb0a28d3fc90a 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomicrmw-flat-noalias-addrspace.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomicrmw-flat-noalias-addrspace.ll
@@ -332,10 +332,71 @@ define i64 @test_flat_atomicrmw_and_i64_agent__noalias_addrspace_5(ptr %ptr, i64
}
define i64 @test_flat_atomicrmw_and_i64_agent__noalias_addrspace_5__maybe_fine_grained(ptr %ptr, i64 %value) {
-; ALL-LABEL: define i64 @test_flat_atomicrmw_and_i64_agent__noalias_addrspace_5__maybe_fine_grained(
-; ALL-SAME: ptr [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
-; ALL-NEXT: [[RES:%.*]] = atomicrmw and ptr [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8, !noalias.addrspace [[META0]]
-; ALL-NEXT: ret i64 [[RES]]
+; GFX7-LABEL: define i64 @test_flat_atomicrmw_and_i64_agent__noalias_addrspace_5__maybe_fine_grained(
+; GFX7-SAME: ptr [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX7-NEXT: [[TMP1:%.*]] = load i64, ptr [[PTR]], align 8
+; GFX7-NEXT: br label %[[ATOMICRMW_START:.*]]
+; GFX7: [[ATOMICRMW_START]]:
+; GFX7-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; GFX7-NEXT: [[NEW:%.*]] = and i64 [[LOADED]], [[VALUE]]
+; GFX7-NEXT: [[TMP2:%.*]] = cmpxchg ptr [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8, !noalias.addrspace [[META0]]
+; GFX7-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
+; GFX7-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP2]], 0
+; GFX7-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; GFX7: [[ATOMICRMW_END]]:
+; GFX7-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX900-LABEL: define i64 @test_flat_atomicrmw_and_i64_agent__noalias_addrspace_5__maybe_fine_grained(
+; GFX900-SAME: ptr [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX900-NEXT: [[TMP1:%.*]] = load i64, ptr [[PTR]], align 8
+; GFX900-NEXT: br label %[[ATOMICRMW_START:.*]]
+; GFX900: [[ATOMICRMW_START]]:
+; GFX900-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; GFX900-NEXT: [[NEW:%.*]] = and i64 [[LOADED]], [[VALUE]]
+; GFX900-NEXT: [[TMP2:%.*]] = cmpxchg ptr [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8, !noalias.addrspace [[META0]]
+; GFX900-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
+; GFX900-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP2]], 0
+; GFX900-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; GFX900: [[ATOMICRMW_END]]:
+; GFX900-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX908-LABEL: define i64 @test_flat_atomicrmw_and_i64_agent__noalias_addrspace_5__maybe_fine_grained(
+; GFX908-SAME: ptr [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX908-NEXT: [[TMP1:%.*]] = load i64, ptr [[PTR]], align 8
+; GFX908-NEXT: br label %[[ATOMICRMW_START:.*]]
+; GFX908: [[ATOMICRMW_START]]:
+; GFX908-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; GFX908-NEXT: [[NEW:%.*]] = and i64 [[LOADED]], [[VALUE]]
+; GFX908-NEXT: [[TMP2:%.*]] = cmpxchg ptr [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8, !noalias.addrspace [[META0]]
+; GFX908-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
+; GFX908-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP2]], 0
+; GFX908-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; GFX908: [[ATOMICRMW_END]]:
+; GFX908-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX90A-LABEL: define i64 @test_flat_atomicrmw_and_i64_agent__noalias_addrspace_5__maybe_fine_grained(
+; GFX90A-SAME: ptr [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX90A-NEXT: [[TMP1:%.*]] = load i64, ptr [[PTR]], align 8
+; GFX90A-NEXT: br label %[[ATOMICRMW_START:.*]]
+; GFX90A: [[ATOMICRMW_START]]:
+; GFX90A-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; GFX90A-NEXT: [[NEW:%.*]] = and i64 [[LOADED]], [[VALUE]]
+; GFX90A-NEXT: [[TMP2:%.*]] = cmpxchg ptr [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8, !noalias.addrspace [[META0]]
+; GFX90A-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
+; GFX90A-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP2]], 0
+; GFX90A-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; GFX90A: [[ATOMICRMW_END]]:
+; GFX90A-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX940-LABEL: define i64 @test_flat_atomicrmw_and_i64_agent__noalias_addrspace_5__maybe_fine_grained(
+; GFX940-SAME: ptr [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX940-NEXT: [[RES:%.*]] = atomicrmw and ptr [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8, !noalias.addrspace [[META0]]
+; GFX940-NEXT: ret i64 [[RES]]
+;
+; GFX12-LABEL: define i64 @test_flat_atomicrmw_and_i64_agent__noalias_addrspace_5__maybe_fine_grained(
+; GFX12-SAME: ptr [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX12-NEXT: [[RES:%.*]] = atomicrmw and ptr [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8, !noalias.addrspace [[META0]]
+; GFX12-NEXT: ret i64 [[RES]]
;
%res = atomicrmw and ptr %ptr, i64 %value syncscope("agent") seq_cst, !noalias.addrspace !1
ret i64 %res
@@ -424,10 +485,71 @@ define i64 @test_flat_atomicrmw_sub_i64_agent__noalias_addrspace_5(ptr %ptr, i64
}
define i64 @test_flat_atomicrmw_sub_i64_agent__noalias_addrspace_5__maybe_fine_grained(ptr %ptr, i64 %value) {
-; ALL-LABEL: define i64 @test_flat_atomicrmw_sub_i64_agent__noalias_addrspace_5__maybe_fine_grained(
-; ALL-SAME: ptr [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
-; ALL-NEXT: [[RES:%.*]] = atomicrmw sub ptr [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8, !noalias.addrspace [[META0]]
-; ALL-NEXT: ret i64 [[RES]]
+; GFX7-LABEL: define i64 @test_flat_atomicrmw_sub_i64_agent__noalias_addrspace_5__maybe_fine_grained(
+; GFX7-SAME: ptr [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX7-NEXT: [[TMP1:%.*]] = load i64, ptr [[PTR]], align 8
+; GFX7-NEXT: br label %[[ATOMICRMW_START:.*]]
+; GFX7: [[ATOMICRMW_START]]:
+; GFX7-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; GFX7-NEXT: [[NEW:%.*]] = sub i64 [[LOADED]], [[VALUE]]
+; GFX7-NEXT: [[TMP2:%.*]] = cmpxchg ptr [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8, !noalias.addrspace [[META0]]
+; GFX7-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
+; GFX7-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP2]], 0
+; GFX7-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; GFX7: [[ATOMICRMW_END]]:
+; GFX7-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX900-LABEL: define i64 @test_flat_atomicrmw_sub_i64_agent__noalias_addrspace_5__maybe_fine_grained(
+; GFX900-SAME: ptr [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX900-NEXT: [[TMP1:%.*]] = load i64, ptr [[PTR]], align 8
+; GFX900-NEXT: br label %[[ATOMICRMW_START:.*]]
+; GFX900: [[ATOMICRMW_START]]:
+; GFX900-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; GFX900-NEXT: [[NEW:%.*]] = sub i64 [[LOADED]], [[VALUE]]
+; GFX900-NEXT: [[TMP2:%.*]] = cmpxchg ptr [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8, !noalias.addrspace [[META0]]
+; GFX900-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
+; GFX900-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP2]], 0
+; GFX900-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; GFX900: [[ATOMICRMW_END]]:
+; GFX900-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX908-LABEL: define i64 @test_flat_atomicrmw_sub_i64_agent__noalias_addrspace_5__maybe_fine_grained(
+; GFX908-SAME: ptr [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX908-NEXT: [[TMP1:%.*]] = load i64, ptr [[PTR]], align 8
+; GFX908-NEXT: br label %[[ATOMICRMW_START:.*]]
+; GFX908: [[ATOMICRMW_START]]:
+; GFX908-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; GFX908-NEXT: [[NEW:%.*]] = sub i64 [[LOADED]], [[VALUE]]
+; GFX908-NEXT: [[TMP2:%.*]] = cmpxchg ptr [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8, !noalias.addrspace [[META0]]
+; GFX908-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
+; GFX908-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP2]], 0
+; GFX908-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; GFX908: [[ATOMICRMW_END]]:
+; GFX908-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX90A-LABEL: define i64 @test_flat_atomicrmw_sub_i64_agent__noalias_addrspace_5__maybe_fine_grained(
+; GFX90A-SAME: ptr [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX90A-NEXT: [[TMP1:%.*]] = load i64, ptr [[PTR]], align 8
+; GFX90A-NEXT: br label %[[ATOMICRMW_START:.*]]
+; GFX90A: [[ATOMICRMW_START]]:
+; GFX90A-NEXT: [[LOADED:%.*]] = phi i64 [ [[TMP1]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; GFX90A-NEXT: [[NEW:%.*]] = sub i64 [[LOADED]], [[VALUE]]
+; GFX90A-NEXT: [[TMP2:%.*]] = cmpxchg ptr [[PTR]], i64 [[LOADED]], i64 [[NEW]] syncscope("agent") seq_cst seq_cst, align 8, !noalias.addrspace [[META0]]
+; GFX90A-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
+; GFX90A-NEXT: [[NEWLOADED]] = extractvalue { i64, i1 } [[TMP2]], 0
+; GFX90A-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; GFX90A: [[ATOMICRMW_END]]:
+; GFX90A-NEXT: ret i64 [[NEWLOADED]]
+;
+; GFX940-LABEL: define i64 @test_flat_atomicrmw_sub_i64_agent__noalias_addrspace_5__maybe_fine_grained(
+; GFX940-SAME: ptr [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX940-NEXT: [[RES:%.*]] = atomicrmw sub ptr [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8, !noalias.addrspace [[META0]]
+; GFX940-NEXT: ret i64 [[RES]]
+;
+; GFX12-LABEL: define i64 @test_flat_atomicrmw_sub_i64_agent__noalias_addrspace_5__maybe_fine_grained(
+; GFX12-SAME: ptr [[PTR:%.*]], i64 [[VALUE:%.*]]) #[[ATTR0]] {
+; GFX12-NEXT: [[RES:%.*]] = atomicrmw sub ptr [[PTR]], i64 [[VALUE]] syncscope("agent") seq_cst, align 8, !noalias.addrspace [[META0]]
+; GFX12-NEXT: ret i64 [[RES]]
;
%res = atomicrmw sub ptr %ptr, i64 %value syncscope("agent") seq_cst, !noalias.addrspace !1
ret i64 %res
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomicrmw-integer-ops-0-to-add-0.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomicrmw-integer-ops-0-to-add-0.ll
index 9c6a76a194d34..185f42ebc028c 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomicrmw-integer-ops-0-to-add-0.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomicrmw-integer-ops-0-to-add-0.ll
@@ -69,10 +69,40 @@ define i32 @test_atomicrmw_or_0_as999_system(ptr addrspace(999) %ptr) {
; Leave as-is, only system scope should be changed.
define i32 @test_atomicrmw_or_0_global_agent(ptr addrspace(1) %ptr) {
-; CHECK-LABEL: define i32 @test_atomicrmw_or_0_global_agent(
-; CHECK-SAME: ptr addrspace(1) [[PTR:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[RES:%.*]] = atomicrmw or ptr addrspace(1) [[PTR]], i32 0 syncscope("agent") seq_cst, align 4
-; CHECK-NEXT: ret i32 [[RES]]
+; GFX803-LABEL: define i32 @test_atomicrmw_or_0_global_agent(
+; GFX803-SAME: ptr addrspace(1) [[PTR:%.*]]) #[[ATTR0]] {
+; GFX803-NEXT: [[RES:%.*]] = atomicrmw add ptr addrspace(1) [[PTR]], i32 0 syncscope("agent") seq_cst, align 4
+; GFX803-NEXT: ret i32 [[RES]]
+;
+; GFX900-LABEL: define i32 @test_atomicrmw_or_0_global_agent(
+; GFX900-SAME: ptr addrspace(1) [[PTR:%.*]]) #[[ATTR0]] {
+; GFX900-NEXT: [[RES:%.*]] = atomicrmw add ptr addrspace(1) [[PTR]], i32 0 syncscope("agent") seq_cst, align 4
+; GFX900-NEXT: ret i32 [[RES]]
+;
+; GFX90A-LABEL: define i32 @test_atomicrmw_or_0_global_agent(
+; GFX90A-SAME: ptr addrspace(1) [[PTR:%.*]]) #[[ATTR0]] {
+; GFX90A-NEXT: [[RES:%.*]] = atomicrmw add ptr addrspace(1) [[PTR]], i32 0 syncscope("agent") seq_cst, align 4
+; GFX90A-NEXT: ret i32 [[RES]]
+;
+; GFX10-LABEL: define i32 @test_atomicrmw_or_0_global_agent(
+; GFX10-SAME: ptr addrspace(1) [[PTR:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT: [[RES:%.*]] = atomicrmw add ptr addrspace(1) [[PTR]], i32 0 syncscope("agent") seq_cst, align 4
+; GFX10-NEXT: ret i32 [[RES]]
+;
+; GFX11-LABEL: define i32 @test_atomicrmw_or_0_global_agent(
+; GFX11-SAME: ptr addrspace(1) [[PTR:%.*]]) #[[ATTR0]] {
+; GFX11-NEXT: [[RES:%.*]] = atomicrmw add ptr addrspace(1) [[PTR]], i32 0 syncscope("agent") seq_cst, align 4
+; GFX11-NEXT: ret i32 [[RES]]
+;
+; GFX942-LABEL: define i32 @test_atomicrmw_or_0_global_agent(
+; GFX942-SAME: ptr addrspace(1) [[PTR:%.*]]) #[[ATTR0]] {
+; GFX942-NEXT: [[RES:%.*]] = atomicrmw or ptr addrspace(1) [[PTR]], i32 0 syncscope("agent") seq_cst, align 4
+; GFX942-NEXT: ret i32 [[RES]]
+;
+; GFX12-LABEL: define i32 @test_atomicrmw_or_0_global_agent(
+; GFX12-SAME: ptr addrspace(1) [[PTR:%.*]]) #[[ATTR0]] {
+; GFX12-NEXT: [[RES:%.*]] = atomicrmw or ptr addrspace(1) [[PTR]], i32 0 syncscope("agent") seq_cst, align 4
+; GFX12-NEXT: ret i32 [[RES]]
;
%res = atomicrmw or ptr addrspace(1) %ptr, i32 0 syncscope("agent") seq_cst
ret i32 %res
@@ -164,7 +194,7 @@ define i32 @test_atomicrmw_xor_0_global_system(ptr addrspace(1) %ptr) {
define i32 @test_atomicrmw_or_0_global_system__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr) {
; CHECK-LABEL: define i32 @test_atomicrmw_or_0_global_system__amdgpu_no_fine_grained_memory(
; CHECK-SAME: ptr addrspace(1) [[PTR:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[RES:%.*]] = atomicrmw add ptr addrspace(1) [[PTR]], i32 0 seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]]
+; CHECK-NEXT: [[RES:%.*]] = atomicrmw or ptr addrspace(1) [[PTR]], i32 0 seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]]
; CHECK-NEXT: ret i32 [[RES]]
;
%res = atomicrmw or ptr addrspace(1) %ptr, i32 0 seq_cst, !amdgpu.no.fine.grained.memory !0
@@ -174,7 +204,7 @@ define i32 @test_atomicrmw_or_0_global_system__amdgpu_no_fine_grained_memory(ptr
define i32 @test_atomicrmw_or_0_global_system__amdgpu_no_remote_memory(ptr addrspace(1) %ptr) {
; CHECK-LABEL: define i32 @test_atomicrmw_or_0_global_system__amdgpu_no_remote_memory(
; CHECK-SAME: ptr addrspace(1) [[PTR:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[RES:%.*]] = atomicrmw add ptr addrspace(1) [[PTR]], i32 0 seq_cst, align 4, !amdgpu.no.remote.memory [[META0]]
+; CHECK-NEXT: [[RES:%.*]] = atomicrmw or ptr addrspace(1) [[PTR]], i32 0 seq_cst, align 4, !amdgpu.no.remote.memory [[META0]]
; CHECK-NEXT: ret i32 [[RES]]
;
%res = atomicrmw or ptr addrspace(1) %ptr, i32 0 seq_cst, !amdgpu.no.remote.memory !0
@@ -184,7 +214,7 @@ define i32 @test_atomicrmw_or_0_global_system__amdgpu_no_remote_memory(ptr addrs
define i32 @test_atomicrmw_or_0_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(ptr addrspace(1) %ptr) {
; CHECK-LABEL: define i32 @test_atomicrmw_or_0_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
; CHECK-SAME: ptr addrspace(1) [[PTR:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[RES:%.*]] = atomicrmw add ptr addrspace(1) [[PTR]], i32 0 seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
+; CHECK-NEXT: [[RES:%.*]] = atomicrmw or ptr addrspace(1) [[PTR]], i32 0 seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
; CHECK-NEXT: ret i32 [[RES]]
;
%res = atomicrmw or ptr addrspace(1) %ptr, i32 0 seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.no.remote.memory !0
@@ -194,7 +224,7 @@ define i32 @test_atomicrmw_or_0_global_system__amdgpu_no_fine_grained_memory__am
define i32 @test_atomicrmw_xor_0_global_system__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr) {
; CHECK-LABEL: define i32 @test_atomicrmw_xor_0_global_system__amdgpu_no_fine_grained_memory(
; CHECK-SAME: ptr addrspace(1) [[PTR:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[RES:%.*]] = atomicrmw add ptr addrspace(1) [[PTR]], i32 0 seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]]
+; CHECK-NEXT: [[RES:%.*]] = atomicrmw xor ptr addrspace(1) [[PTR]], i32 0 seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]]
; CHECK-NEXT: ret i32 [[RES]]
;
%res = atomicrmw xor ptr addrspace(1) %ptr, i32 0 seq_cst, !amdgpu.no.fine.grained.memory !0
@@ -204,7 +234,7 @@ define i32 @test_atomicrmw_xor_0_global_system__amdgpu_no_fine_grained_memory(pt
define i32 @test_atomicrmw_xor_0_global_system__amdgpu_no_remote_memory(ptr addrspace(1) %ptr) {
; CHECK-LABEL: define i32 @test_atomicrmw_xor_0_global_system__amdgpu_no_remote_memory(
; CHECK-SAME: ptr addrspace(1) [[PTR:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[RES:%.*]] = atomicrmw add ptr addrspace(1) [[PTR]], i32 0 seq_cst, align 4, !amdgpu.no.remote.memory [[META0]]
+; CHECK-NEXT: [[RES:%.*]] = atomicrmw xor ptr addrspace(1) [[PTR]], i32 0 seq_cst, align 4, !amdgpu.no.remote.memory [[META0]]
; CHECK-NEXT: ret i32 [[RES]]
;
%res = atomicrmw xor ptr addrspace(1) %ptr, i32 0 seq_cst, !amdgpu.no.remote.memory !0
@@ -214,7 +244,7 @@ define i32 @test_atomicrmw_xor_0_global_system__amdgpu_no_remote_memory(ptr addr
define i32 @test_atomicrmw_xor_0_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(ptr addrspace(1) %ptr) {
; CHECK-LABEL: define i32 @test_atomicrmw_xor_0_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
; CHECK-SAME: ptr addrspace(1) [[PTR:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[RES:%.*]] = atomicrmw add ptr addrspace(1) [[PTR]], i32 0 seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
+; CHECK-NEXT: [[RES:%.*]] = atomicrmw xor ptr addrspace(1) [[PTR]], i32 0 seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
; CHECK-NEXT: ret i32 [[RES]]
;
%res = atomicrmw xor ptr addrspace(1) %ptr, i32 0 seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.no.remote.memory !0
@@ -224,7 +254,7 @@ define i32 @test_atomicrmw_xor_0_global_system__amdgpu_no_fine_grained_memory__a
define i32 @test_atomicrmw_sub_0_global_system__amdgpu_no_fine_grained_memory(ptr addrspace(1) %ptr) {
; CHECK-LABEL: define i32 @test_atomicrmw_sub_0_global_system__amdgpu_no_fine_grained_memory(
; CHECK-SAME: ptr addrspace(1) [[PTR:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[RES:%.*]] = atomicrmw add ptr addrspace(1) [[PTR]], i32 0 seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]]
+; CHECK-NEXT: [[RES:%.*]] = atomicrmw sub ptr addrspace(1) [[PTR]], i32 0 seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]]
; CHECK-NEXT: ret i32 [[RES]]
;
%res = atomicrmw sub ptr addrspace(1) %ptr, i32 0 seq_cst, !amdgpu.no.fine.grained.memory !0
@@ -234,7 +264,7 @@ define i32 @test_atomicrmw_sub_0_global_system__amdgpu_no_fine_grained_memory(pt
define i32 @test_atomicrmw_sub_0_global_system__amdgpu_no_remote_memory(ptr addrspace(1) %ptr) {
; CHECK-LABEL: define i32 @test_atomicrmw_sub_0_global_system__amdgpu_no_remote_memory(
; CHECK-SAME: ptr addrspace(1) [[PTR:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[RES:%.*]] = atomicrmw add ptr addrspace(1) [[PTR]], i32 0 seq_cst, align 4, !amdgpu.no.remote.memory [[META0]]
+; CHECK-NEXT: [[RES:%.*]] = atomicrmw sub ptr addrspace(1) [[PTR]], i32 0 seq_cst, align 4, !amdgpu.no.remote.memory [[META0]]
; CHECK-NEXT: ret i32 [[RES]]
;
%res = atomicrmw sub ptr addrspace(1) %ptr, i32 0 seq_cst, !amdgpu.no.remote.memory !0
@@ -244,7 +274,7 @@ define i32 @test_atomicrmw_sub_0_global_system__amdgpu_no_remote_memory(ptr addr
define i32 @test_atomicrmw_sub_0_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(ptr addrspace(1) %ptr) {
; CHECK-LABEL: define i32 @test_atomicrmw_sub_0_global_system__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory(
; CHECK-SAME: ptr addrspace(1) [[PTR:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[RES:%.*]] = atomicrmw add ptr addrspace(1) [[PTR]], i32 0 seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
+; CHECK-NEXT: [[RES:%.*]] = atomicrmw sub ptr addrspace(1) [[PTR]], i32 0 seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META0]], !amdgpu.no.remote.memory [[META0]]
; CHECK-NEXT: ret i32 [[RES]]
;
%res = atomicrmw sub ptr addrspace(1) %ptr, i32 0 seq_cst, !amdgpu.no.fine.grained.memory !0, !amdgpu.no.remote.memory !0
@@ -342,12 +372,3 @@ define i32 @test_atomicrmw_sub_0_global_agent__amdgpu_no_fine_grained_memory__am
}
!0 = !{}
-
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; GFX10: {{.*}}
-; GFX11: {{.*}}
-; GFX12: {{.*}}
-; GFX803: {{.*}}
-; GFX900: {{.*}}
-; GFX90A: {{.*}}
-; GFX942: {{.*}}
More information about the llvm-branch-commits
mailing list